LLVM 22.0.0git
InstructionSimplify.cpp
Go to the documentation of this file.
1//===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements routines for folding instructions into simpler forms
10// that do not require creating new instructions. This does constant folding
11// ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12// returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13// ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14// simplified: This is usually true and assuming it simplifies the logic (if
15// they have not been simplified then results are correct but maybe suboptimal).
16//
17//===----------------------------------------------------------------------===//
18
20
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/ADT/SetVector.h"
23#include "llvm/ADT/Statistic.h"
31#include "llvm/Analysis/Loads.h"
40#include "llvm/IR/DataLayout.h"
41#include "llvm/IR/Dominators.h"
42#include "llvm/IR/InstrTypes.h"
44#include "llvm/IR/Operator.h"
46#include "llvm/IR/Statepoint.h"
49#include <algorithm>
50#include <optional>
51using namespace llvm;
52using namespace llvm::PatternMatch;
53
54#define DEBUG_TYPE "instsimplify"
55
56enum { RecursionLimit = 3 };
57
58STATISTIC(NumExpand, "Number of expansions");
59STATISTIC(NumReassoc, "Number of reassociations");
60
61static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
62 unsigned);
63static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
64static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
65 const SimplifyQuery &, unsigned);
66static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
67 unsigned);
68static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
69 const SimplifyQuery &, unsigned);
71 const SimplifyQuery &, unsigned);
73 const SimplifyQuery &Q, unsigned MaxRecurse);
74static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
75static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
76 unsigned);
77static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
78 unsigned);
80 GEPNoWrapFlags, const SimplifyQuery &, unsigned);
82 const SimplifyQuery &, unsigned);
84 ArrayRef<Value *> NewOps,
85 const SimplifyQuery &SQ,
86 unsigned MaxRecurse);
87
88/// For a boolean type or a vector of boolean type, return false or a vector
89/// with every element false.
90static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
91
92/// For a boolean type or a vector of boolean type, return true or a vector
93/// with every element true.
94static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
95
96/// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
97static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS) {
98 CmpInst *Cmp = dyn_cast<CmpInst>(V);
99 if (!Cmp)
100 return false;
101 CmpInst::Predicate CPred = Cmp->getPredicate();
102 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
103 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
104 return true;
105 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
106 CRHS == LHS;
107}
108
109/// Simplify comparison with true or false branch of select:
110/// %sel = select i1 %cond, i32 %tv, i32 %fv
111/// %cmp = icmp sle i32 %sel, %rhs
112/// Compose new comparison by substituting %sel with either %tv or %fv
113/// and see if it simplifies.
115 Value *Cond, const SimplifyQuery &Q,
116 unsigned MaxRecurse, Constant *TrueOrFalse) {
117 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
118 if (SimplifiedCmp == Cond) {
119 // %cmp simplified to the select condition (%cond).
120 return TrueOrFalse;
121 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
122 // It didn't simplify. However, if composed comparison is equivalent
123 // to the select condition (%cond) then we can replace it.
124 return TrueOrFalse;
125 }
126 return SimplifiedCmp;
127}
128
129/// Simplify comparison with true branch of select
131 Value *Cond, const SimplifyQuery &Q,
132 unsigned MaxRecurse) {
133 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
134 getTrue(Cond->getType()));
135}
136
137/// Simplify comparison with false branch of select
139 Value *Cond, const SimplifyQuery &Q,
140 unsigned MaxRecurse) {
141 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
142 getFalse(Cond->getType()));
143}
144
145/// We know comparison with both branches of select can be simplified, but they
146/// are not equal. This routine handles some logical simplifications.
148 Value *Cond,
149 const SimplifyQuery &Q,
150 unsigned MaxRecurse) {
151 // If the false value simplified to false, then the result of the compare
152 // is equal to "Cond && TCmp". This also catches the case when the false
153 // value simplified to false and the true value to true, returning "Cond".
154 // Folding select to and/or isn't poison-safe in general; impliesPoison
155 // checks whether folding it does not convert a well-defined value into
156 // poison.
157 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
158 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
159 return V;
160 // If the true value simplified to true, then the result of the compare
161 // is equal to "Cond || FCmp".
162 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
163 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
164 return V;
165 // Finally, if the false value simplified to true and the true value to
166 // false, then the result of the compare is equal to "!Cond".
167 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
168 if (Value *V = simplifyXorInst(
169 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
170 return V;
171 return nullptr;
172}
173
174/// Does the given value dominate the specified phi node?
175static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
177 if (!I)
178 // Arguments and constants dominate all instructions.
179 return true;
180
181 // If we have a DominatorTree then do a precise test.
182 if (DT)
183 return DT->dominates(I, P);
184
185 // Otherwise, if the instruction is in the entry block and is not an invoke,
186 // then it obviously dominates all phi nodes.
187 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
189 return true;
190
191 return false;
192}
193
194/// Try to simplify a binary operator of form "V op OtherOp" where V is
195/// "(B0 opex B1)" by distributing 'op' across 'opex' as
196/// "(B0 op OtherOp) opex (B1 op OtherOp)".
198 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
199 const SimplifyQuery &Q, unsigned MaxRecurse) {
200 auto *B = dyn_cast<BinaryOperator>(V);
201 if (!B || B->getOpcode() != OpcodeToExpand)
202 return nullptr;
203 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
204 Value *L =
205 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
206 if (!L)
207 return nullptr;
208 Value *R =
209 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
210 if (!R)
211 return nullptr;
212
213 // Does the expanded pair of binops simplify to the existing binop?
214 if ((L == B0 && R == B1) ||
215 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
216 ++NumExpand;
217 return B;
218 }
219
220 // Otherwise, return "L op' R" if it simplifies.
221 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
222 if (!S)
223 return nullptr;
224
225 ++NumExpand;
226 return S;
227}
228
229/// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
230/// distributing op over op'.
232 Value *R,
233 Instruction::BinaryOps OpcodeToExpand,
234 const SimplifyQuery &Q,
235 unsigned MaxRecurse) {
236 // Recursion is always used, so bail out at once if we already hit the limit.
237 if (!MaxRecurse--)
238 return nullptr;
239
240 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
241 return V;
242 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
243 return V;
244 return nullptr;
245}
246
247/// Generic simplifications for associative binary operations.
248/// Returns the simpler value, or null if none was found.
250 Value *LHS, Value *RHS,
251 const SimplifyQuery &Q,
252 unsigned MaxRecurse) {
253 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
254
255 // Recursion is always used, so bail out at once if we already hit the limit.
256 if (!MaxRecurse--)
257 return nullptr;
258
261
262 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
263 if (Op0 && Op0->getOpcode() == Opcode) {
264 Value *A = Op0->getOperand(0);
265 Value *B = Op0->getOperand(1);
266 Value *C = RHS;
267
268 // Does "B op C" simplify?
269 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
270 // It does! Return "A op V" if it simplifies or is already available.
271 // If V equals B then "A op V" is just the LHS.
272 if (V == B)
273 return LHS;
274 // Otherwise return "A op V" if it simplifies.
275 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
276 ++NumReassoc;
277 return W;
278 }
279 }
280 }
281
282 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
283 if (Op1 && Op1->getOpcode() == Opcode) {
284 Value *A = LHS;
285 Value *B = Op1->getOperand(0);
286 Value *C = Op1->getOperand(1);
287
288 // Does "A op B" simplify?
289 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
290 // It does! Return "V op C" if it simplifies or is already available.
291 // If V equals B then "V op C" is just the RHS.
292 if (V == B)
293 return RHS;
294 // Otherwise return "V op C" if it simplifies.
295 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
296 ++NumReassoc;
297 return W;
298 }
299 }
300 }
301
302 // The remaining transforms require commutativity as well as associativity.
303 if (!Instruction::isCommutative(Opcode))
304 return nullptr;
305
306 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
307 if (Op0 && Op0->getOpcode() == Opcode) {
308 Value *A = Op0->getOperand(0);
309 Value *B = Op0->getOperand(1);
310 Value *C = RHS;
311
312 // Does "C op A" simplify?
313 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
314 // It does! Return "V op B" if it simplifies or is already available.
315 // If V equals A then "V op B" is just the LHS.
316 if (V == A)
317 return LHS;
318 // Otherwise return "V op B" if it simplifies.
319 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
320 ++NumReassoc;
321 return W;
322 }
323 }
324 }
325
326 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
327 if (Op1 && Op1->getOpcode() == Opcode) {
328 Value *A = LHS;
329 Value *B = Op1->getOperand(0);
330 Value *C = Op1->getOperand(1);
331
332 // Does "C op A" simplify?
333 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
334 // It does! Return "B op V" if it simplifies or is already available.
335 // If V equals C then "B op V" is just the RHS.
336 if (V == C)
337 return RHS;
338 // Otherwise return "B op V" if it simplifies.
339 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
340 ++NumReassoc;
341 return W;
342 }
343 }
344 }
345
346 return nullptr;
347}
348
349/// In the case of a binary operation with a select instruction as an operand,
350/// try to simplify the binop by seeing whether evaluating it on both branches
351/// of the select results in the same value. Returns the common value if so,
352/// otherwise returns null.
354 Value *RHS, const SimplifyQuery &Q,
355 unsigned MaxRecurse) {
356 // Recursion is always used, so bail out at once if we already hit the limit.
357 if (!MaxRecurse--)
358 return nullptr;
359
360 SelectInst *SI;
361 if (isa<SelectInst>(LHS)) {
363 } else {
364 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
366 }
367
368 // Evaluate the BinOp on the true and false branches of the select.
369 Value *TV;
370 Value *FV;
371 if (SI == LHS) {
372 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
373 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
374 } else {
375 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
376 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
377 }
378
379 // If they simplified to the same value, then return the common value.
380 // If they both failed to simplify then return null.
381 if (TV == FV)
382 return TV;
383
384 // If one branch simplified to undef, return the other one.
385 if (TV && Q.isUndefValue(TV))
386 return FV;
387 if (FV && Q.isUndefValue(FV))
388 return TV;
389
390 // If applying the operation did not change the true and false select values,
391 // then the result of the binop is the select itself.
392 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
393 return SI;
394
395 // If one branch simplified and the other did not, and the simplified
396 // value is equal to the unsimplified one, return the simplified value.
397 // For example, select (cond, X, X & Z) & Z -> X & Z.
398 if ((FV && !TV) || (TV && !FV)) {
399 // Check that the simplified value has the form "X op Y" where "op" is the
400 // same as the original operation.
401 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
402 if (Simplified && Simplified->getOpcode() == unsigned(Opcode) &&
403 !Simplified->hasPoisonGeneratingFlags()) {
404 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
405 // We already know that "op" is the same as for the simplified value. See
406 // if the operands match too. If so, return the simplified value.
407 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
408 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
409 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
410 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
411 Simplified->getOperand(1) == UnsimplifiedRHS)
412 return Simplified;
413 if (Simplified->isCommutative() &&
414 Simplified->getOperand(1) == UnsimplifiedLHS &&
415 Simplified->getOperand(0) == UnsimplifiedRHS)
416 return Simplified;
417 }
418 }
419
420 return nullptr;
421}
422
423/// In the case of a comparison with a select instruction, try to simplify the
424/// comparison by seeing whether both branches of the select result in the same
425/// value. Returns the common value if so, otherwise returns null.
426/// For example, if we have:
427/// %tmp = select i1 %cmp, i32 1, i32 2
428/// %cmp1 = icmp sle i32 %tmp, 3
429/// We can simplify %cmp1 to true, because both branches of select are
430/// less than 3. We compose new comparison by substituting %tmp with both
431/// branches of select and see if it can be simplified.
433 const SimplifyQuery &Q, unsigned MaxRecurse) {
434 // Recursion is always used, so bail out at once if we already hit the limit.
435 if (!MaxRecurse--)
436 return nullptr;
437
438 // Make sure the select is on the LHS.
439 if (!isa<SelectInst>(LHS)) {
440 std::swap(LHS, RHS);
441 Pred = CmpInst::getSwappedPredicate(Pred);
442 }
443 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
445 Value *Cond = SI->getCondition();
446 Value *TV = SI->getTrueValue();
447 Value *FV = SI->getFalseValue();
448
449 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
450 // Does "cmp TV, RHS" simplify?
451 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
452 if (!TCmp)
453 return nullptr;
454
455 // Does "cmp FV, RHS" simplify?
456 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
457 if (!FCmp)
458 return nullptr;
459
460 // If both sides simplified to the same value, then use it as the result of
461 // the original comparison.
462 if (TCmp == FCmp)
463 return TCmp;
464
465 // The remaining cases only make sense if the select condition has the same
466 // type as the result of the comparison, so bail out if this is not so.
467 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
468 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
469
470 return nullptr;
471}
472
473/// In the case of a binary operation with an operand that is a PHI instruction,
474/// try to simplify the binop by seeing whether evaluating it on the incoming
475/// phi values yields the same result for every value. If so returns the common
476/// value, otherwise returns null.
478 Value *RHS, const SimplifyQuery &Q,
479 unsigned MaxRecurse) {
480 // Recursion is always used, so bail out at once if we already hit the limit.
481 if (!MaxRecurse--)
482 return nullptr;
483
484 PHINode *PI;
485 if (isa<PHINode>(LHS)) {
486 PI = cast<PHINode>(LHS);
487 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
488 if (!valueDominatesPHI(RHS, PI, Q.DT))
489 return nullptr;
490 } else {
491 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
492 PI = cast<PHINode>(RHS);
493 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
494 if (!valueDominatesPHI(LHS, PI, Q.DT))
495 return nullptr;
496 }
497
498 // Evaluate the BinOp on the incoming phi values.
499 Value *CommonValue = nullptr;
500 for (Use &Incoming : PI->incoming_values()) {
501 // If the incoming value is the phi node itself, it can safely be skipped.
502 if (Incoming == PI)
503 continue;
505 Value *V = PI == LHS
506 ? simplifyBinOp(Opcode, Incoming, RHS,
507 Q.getWithInstruction(InTI), MaxRecurse)
508 : simplifyBinOp(Opcode, LHS, Incoming,
509 Q.getWithInstruction(InTI), MaxRecurse);
510 // If the operation failed to simplify, or simplified to a different value
511 // to previously, then give up.
512 if (!V || (CommonValue && V != CommonValue))
513 return nullptr;
514 CommonValue = V;
515 }
516
517 return CommonValue;
518}
519
520/// In the case of a comparison with a PHI instruction, try to simplify the
521/// comparison by seeing whether comparing with all of the incoming phi values
522/// yields the same result every time. If so returns the common result,
523/// otherwise returns null.
525 const SimplifyQuery &Q, unsigned MaxRecurse) {
526 // Recursion is always used, so bail out at once if we already hit the limit.
527 if (!MaxRecurse--)
528 return nullptr;
529
530 // Make sure the phi is on the LHS.
531 if (!isa<PHINode>(LHS)) {
532 std::swap(LHS, RHS);
533 Pred = CmpInst::getSwappedPredicate(Pred);
534 }
535 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
537
538 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
539 if (!valueDominatesPHI(RHS, PI, Q.DT))
540 return nullptr;
541
542 // Evaluate the BinOp on the incoming phi values.
543 Value *CommonValue = nullptr;
544 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
547 // If the incoming value is the phi node itself, it can safely be skipped.
548 if (Incoming == PI)
549 continue;
550 // Change the context instruction to the "edge" that flows into the phi.
551 // This is important because that is where incoming is actually "evaluated"
552 // even though it is used later somewhere else.
554 MaxRecurse);
555 // If the operation failed to simplify, or simplified to a different value
556 // to previously, then give up.
557 if (!V || (CommonValue && V != CommonValue))
558 return nullptr;
559 CommonValue = V;
560 }
561
562 return CommonValue;
563}
564
566 Value *&Op0, Value *&Op1,
567 const SimplifyQuery &Q) {
568 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
569 if (auto *CRHS = dyn_cast<Constant>(Op1)) {
570 switch (Opcode) {
571 default:
572 break;
573 case Instruction::FAdd:
574 case Instruction::FSub:
575 case Instruction::FMul:
576 case Instruction::FDiv:
577 case Instruction::FRem:
578 if (Q.CxtI != nullptr)
579 return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
580 }
581 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
582 }
583
584 // Canonicalize the constant to the RHS if this is a commutative operation.
585 if (Instruction::isCommutative(Opcode))
586 std::swap(Op0, Op1);
587 }
588 return nullptr;
589}
590
591/// Given operands for an Add, see if we can fold the result.
592/// If not, this returns null.
593static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
594 const SimplifyQuery &Q, unsigned MaxRecurse) {
595 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
596 return C;
597
598 // X + poison -> poison
599 if (isa<PoisonValue>(Op1))
600 return Op1;
601
602 // X + undef -> undef
603 if (Q.isUndefValue(Op1))
604 return Op1;
605
606 // X + 0 -> X
607 if (match(Op1, m_Zero()))
608 return Op0;
609
610 // If two operands are negative, return 0.
611 if (isKnownNegation(Op0, Op1))
612 return Constant::getNullValue(Op0->getType());
613
614 // X + (Y - X) -> Y
615 // (Y - X) + X -> Y
616 // Eg: X + -X -> 0
617 Value *Y = nullptr;
618 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
619 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
620 return Y;
621
622 // X + ~X -> -1 since ~X = -X-1
623 Type *Ty = Op0->getType();
624 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
625 return Constant::getAllOnesValue(Ty);
626
627 // add nsw/nuw (xor Y, signmask), signmask --> Y
628 // The no-wrapping add guarantees that the top bit will be set by the add.
629 // Therefore, the xor must be clearing the already set sign bit of Y.
630 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
631 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
632 return Y;
633
634 // add nuw %x, -1 -> -1, because %x can only be 0.
635 if (IsNUW && match(Op1, m_AllOnes()))
636 return Op1; // Which is -1.
637
638 /// i1 add -> xor.
639 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
640 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
641 return V;
642
643 // Try some generic simplifications for associative operations.
644 if (Value *V =
645 simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
646 return V;
647
648 // Threading Add over selects and phi nodes is pointless, so don't bother.
649 // Threading over the select in "A + select(cond, B, C)" means evaluating
650 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
651 // only if B and C are equal. If B and C are equal then (since we assume
652 // that operands have already been simplified) "select(cond, B, C)" should
653 // have been simplified to the common value of B and C already. Analysing
654 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
655 // for threading over phi nodes.
656
657 return nullptr;
658}
659
660Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
661 const SimplifyQuery &Query) {
662 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
663}
664
665/// Compute the base pointer and cumulative constant offsets for V.
666///
667/// This strips all constant offsets off of V, leaving it the base pointer, and
668/// accumulates the total constant offset applied in the returned constant.
669/// It returns zero if there are no constant offsets applied.
670///
671/// This is very similar to stripAndAccumulateConstantOffsets(), except it
672/// normalizes the offset bitwidth to the stripped pointer type, not the
673/// original pointer type.
675 assert(V->getType()->isPtrOrPtrVectorTy());
676
677 APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
678 V = V->stripAndAccumulateConstantOffsets(DL, Offset,
679 /*AllowNonInbounds=*/true);
680 // As that strip may trace through `addrspacecast`, need to sext or trunc
681 // the offset calculated.
682 return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
683}
684
685/// Compute the constant difference between two pointer values.
686/// If the difference is not a constant, returns zero.
688 Value *RHS) {
691
692 // If LHS and RHS are not related via constant offsets to the same base
693 // value, there is nothing we can do here.
694 if (LHS != RHS)
695 return nullptr;
696
697 // Otherwise, the difference of LHS - RHS can be computed as:
698 // LHS - RHS
699 // = (LHSOffset + Base) - (RHSOffset + Base)
700 // = LHSOffset - RHSOffset
701 Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
702 if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
703 Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
704 return Res;
705}
706
707/// Test if there is a dominating equivalence condition for the
708/// two operands. If there is, try to reduce the binary operation
709/// between the two operands.
710/// Example: Op0 - Op1 --> 0 when Op0 == Op1
711static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
712 const SimplifyQuery &Q, unsigned MaxRecurse) {
713 // Recursive run it can not get any benefit
714 if (MaxRecurse != RecursionLimit)
715 return nullptr;
716
717 std::optional<bool> Imp =
719 if (Imp && *Imp) {
720 Type *Ty = Op0->getType();
721 switch (Opcode) {
722 case Instruction::Sub:
723 case Instruction::Xor:
724 case Instruction::URem:
725 case Instruction::SRem:
726 return Constant::getNullValue(Ty);
727
728 case Instruction::SDiv:
729 case Instruction::UDiv:
730 return ConstantInt::get(Ty, 1);
731
732 case Instruction::And:
733 case Instruction::Or:
734 // Could be either one - choose Op1 since that's more likely a constant.
735 return Op1;
736 default:
737 break;
738 }
739 }
740 return nullptr;
741}
742
743/// Given operands for a Sub, see if we can fold the result.
744/// If not, this returns null.
745static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
746 const SimplifyQuery &Q, unsigned MaxRecurse) {
747 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
748 return C;
749
750 // X - poison -> poison
751 // poison - X -> poison
752 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
753 return PoisonValue::get(Op0->getType());
754
755 // X - undef -> undef
756 // undef - X -> undef
757 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
758 return UndefValue::get(Op0->getType());
759
760 // X - 0 -> X
761 if (match(Op1, m_Zero()))
762 return Op0;
763
764 // X - X -> 0
765 if (Op0 == Op1)
766 return Constant::getNullValue(Op0->getType());
767
768 // Is this a negation?
769 if (match(Op0, m_Zero())) {
770 // 0 - X -> 0 if the sub is NUW.
771 if (IsNUW)
772 return Constant::getNullValue(Op0->getType());
773
774 KnownBits Known = computeKnownBits(Op1, Q);
775 if (Known.Zero.isMaxSignedValue()) {
776 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
777 // Op1 must be 0 because negating the minimum signed value is undefined.
778 if (IsNSW)
779 return Constant::getNullValue(Op0->getType());
780
781 // 0 - X -> X if X is 0 or the minimum signed value.
782 return Op1;
783 }
784 }
785
786 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
787 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
788 Value *X = nullptr, *Y = nullptr, *Z = Op1;
789 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
790 // See if "V === Y - Z" simplifies.
791 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
792 // It does! Now see if "X + V" simplifies.
793 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
794 // It does, we successfully reassociated!
795 ++NumReassoc;
796 return W;
797 }
798 // See if "V === X - Z" simplifies.
799 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
800 // It does! Now see if "Y + V" simplifies.
801 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
802 // It does, we successfully reassociated!
803 ++NumReassoc;
804 return W;
805 }
806 }
807
808 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
809 // For example, X - (X + 1) -> -1
810 X = Op0;
811 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
812 // See if "V === X - Y" simplifies.
813 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
814 // It does! Now see if "V - Z" simplifies.
815 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
816 // It does, we successfully reassociated!
817 ++NumReassoc;
818 return W;
819 }
820 // See if "V === X - Z" simplifies.
821 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
822 // It does! Now see if "V - Y" simplifies.
823 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
824 // It does, we successfully reassociated!
825 ++NumReassoc;
826 return W;
827 }
828 }
829
830 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
831 // For example, X - (X - Y) -> Y.
832 Z = Op0;
833 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
834 // See if "V === Z - X" simplifies.
835 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
836 // It does! Now see if "V + Y" simplifies.
837 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
838 // It does, we successfully reassociated!
839 ++NumReassoc;
840 return W;
841 }
842
843 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
844 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
845 match(Op1, m_Trunc(m_Value(Y))))
846 if (X->getType() == Y->getType())
847 // See if "V === X - Y" simplifies.
848 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
849 // It does! Now see if "trunc V" simplifies.
850 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
851 Q, MaxRecurse - 1))
852 // It does, return the simplified "trunc V".
853 return W;
854
855 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
856 if (match(Op0, m_PtrToIntOrAddr(m_Value(X))) &&
858 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
859 return ConstantFoldIntegerCast(Result, Op0->getType(), /*IsSigned*/ true,
860 Q.DL);
861 }
862
863 // i1 sub -> xor.
864 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
865 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
866 return V;
867
868 // Threading Sub over selects and phi nodes is pointless, so don't bother.
869 // Threading over the select in "A - select(cond, B, C)" means evaluating
870 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
871 // only if B and C are equal. If B and C are equal then (since we assume
872 // that operands have already been simplified) "select(cond, B, C)" should
873 // have been simplified to the common value of B and C already. Analysing
874 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
875 // for threading over phi nodes.
876
877 if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
878 return V;
879
880 // (sub nuw C_Mask, (xor X, C_Mask)) -> X
881 if (IsNUW) {
882 Value *X;
883 if (match(Op1, m_Xor(m_Value(X), m_Specific(Op0))) &&
884 match(Op0, m_LowBitMask()))
885 return X;
886 }
887
888 return nullptr;
889}
890
891Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
892 const SimplifyQuery &Q) {
893 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
894}
895
896/// Given operands for a Mul, see if we can fold the result.
897/// If not, this returns null.
898static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
899 const SimplifyQuery &Q, unsigned MaxRecurse) {
900 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
901 return C;
902
903 // X * poison -> poison
904 if (isa<PoisonValue>(Op1))
905 return Op1;
906
907 // X * undef -> 0
908 // X * 0 -> 0
909 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
910 return Constant::getNullValue(Op0->getType());
911
912 // X * 1 -> X
913 if (match(Op1, m_One()))
914 return Op0;
915
916 // (X / Y) * Y -> X if the division is exact.
917 Value *X = nullptr;
918 if (Q.IIQ.UseInstrInfo &&
919 (match(Op0,
920 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
921 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
922 return X;
923
924 if (Op0->getType()->isIntOrIntVectorTy(1)) {
925 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
926 // representable). All other cases reduce to 0, so just return 0.
927 if (IsNSW)
928 return ConstantInt::getNullValue(Op0->getType());
929
930 // Treat "mul i1" as "and i1".
931 if (MaxRecurse)
932 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
933 return V;
934 }
935
936 // Try some generic simplifications for associative operations.
937 if (Value *V =
938 simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
939 return V;
940
941 // Mul distributes over Add. Try some generic simplifications based on this.
942 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
943 Instruction::Add, Q, MaxRecurse))
944 return V;
945
946 // If the operation is with the result of a select instruction, check whether
947 // operating on either branch of the select always yields the same value.
948 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
949 if (Value *V =
950 threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
951 return V;
952
953 // If the operation is with the result of a phi instruction, check whether
954 // operating on all incoming values of the phi always yields the same value.
955 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
956 if (Value *V =
957 threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
958 return V;
959
960 return nullptr;
961}
962
963Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
964 const SimplifyQuery &Q) {
965 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
966}
967
968/// Given a predicate and two operands, return true if the comparison is true.
969/// This is a helper for div/rem simplification where we return some other value
970/// when we can prove a relationship between the operands.
972 const SimplifyQuery &Q, unsigned MaxRecurse) {
973 Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
975 return (C && C->isAllOnesValue());
976}
977
978/// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
979/// to simplify X % Y to X.
980static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
981 unsigned MaxRecurse, bool IsSigned) {
982 // Recursion is always used, so bail out at once if we already hit the limit.
983 if (!MaxRecurse--)
984 return false;
985
986 if (IsSigned) {
987 // (X srem Y) sdiv Y --> 0
988 if (match(X, m_SRem(m_Value(), m_Specific(Y))))
989 return true;
990
991 // |X| / |Y| --> 0
992 //
993 // We require that 1 operand is a simple constant. That could be extended to
994 // 2 variables if we computed the sign bit for each.
995 //
996 // Make sure that a constant is not the minimum signed value because taking
997 // the abs() of that is undefined.
998 Type *Ty = X->getType();
999 const APInt *C;
1000 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1001 // Is the variable divisor magnitude always greater than the constant
1002 // dividend magnitude?
1003 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1004 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1005 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1006 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1007 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1008 return true;
1009 }
1010 if (match(Y, m_APInt(C))) {
1011 // Special-case: we can't take the abs() of a minimum signed value. If
1012 // that's the divisor, then all we have to do is prove that the dividend
1013 // is also not the minimum signed value.
1014 if (C->isMinSignedValue())
1015 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1016
1017 // Is the variable dividend magnitude always less than the constant
1018 // divisor magnitude?
1019 // |X| < |C| --> X > -abs(C) and X < abs(C)
1020 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1021 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1022 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1023 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1024 return true;
1025 }
1026 return false;
1027 }
1028
1029 // IsSigned == false.
1030
1031 // Is the unsigned dividend known to be less than a constant divisor?
1032 // TODO: Convert this (and above) to range analysis
1033 // ("computeConstantRangeIncludingKnownBits")?
1034 const APInt *C;
1035 if (match(Y, m_APInt(C)) && computeKnownBits(X, Q).getMaxValue().ult(*C))
1036 return true;
1037
1038 // Try again for any divisor:
1039 // Is the dividend unsigned less than the divisor?
1040 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1041}
1042
1043/// Check for common or similar folds of integer division or integer remainder.
1044/// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
1046 Value *Op1, const SimplifyQuery &Q,
1047 unsigned MaxRecurse) {
1048 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1049 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1050
1051 Type *Ty = Op0->getType();
1052
1053 // X / undef -> poison
1054 // X % undef -> poison
1055 if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1056 return PoisonValue::get(Ty);
1057
1058 // X / 0 -> poison
1059 // X % 0 -> poison
1060 // We don't need to preserve faults!
1061 if (match(Op1, m_Zero()))
1062 return PoisonValue::get(Ty);
1063
1064 // poison / X -> poison
1065 // poison % X -> poison
1066 if (isa<PoisonValue>(Op0))
1067 return Op0;
1068
1069 // undef / X -> 0
1070 // undef % X -> 0
1071 if (Q.isUndefValue(Op0))
1072 return Constant::getNullValue(Ty);
1073
1074 // 0 / X -> 0
1075 // 0 % X -> 0
1076 if (match(Op0, m_Zero()))
1077 return Constant::getNullValue(Op0->getType());
1078
1079 // X / X -> 1
1080 // X % X -> 0
1081 if (Op0 == Op1)
1082 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1083
1084 KnownBits Known = computeKnownBits(Op1, Q);
1085 // X / 0 -> poison
1086 // X % 0 -> poison
1087 // If the divisor is known to be zero, just return poison. This can happen in
1088 // some cases where its provable indirectly the denominator is zero but it's
1089 // not trivially simplifiable (i.e known zero through a phi node).
1090 if (Known.isZero())
1091 return PoisonValue::get(Ty);
1092
1093 // X / 1 -> X
1094 // X % 1 -> 0
1095 // If the divisor can only be zero or one, we can't have division-by-zero
1096 // or remainder-by-zero, so assume the divisor is 1.
1097 // e.g. 1, zext (i8 X), sdiv X (Y and 1)
1098 if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1099 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1100
1101 // If X * Y does not overflow, then:
1102 // X * Y / Y -> X
1103 // X * Y % Y -> 0
1104 Value *X;
1105 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1107 // The multiplication can't overflow if it is defined not to, or if
1108 // X == A / Y for some A.
1109 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1110 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1111 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1112 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1113 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1114 }
1115 }
1116
1117 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1118 return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1119
1120 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1121 return V;
1122
1123 // If the operation is with the result of a select instruction, check whether
1124 // operating on either branch of the select always yields the same value.
1125 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1126 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1127 return V;
1128
1129 // If the operation is with the result of a phi instruction, check whether
1130 // operating on all incoming values of the phi always yields the same value.
1131 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1132 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1133 return V;
1134
1135 return nullptr;
1136}
1137
1138/// These are simplifications common to SDiv and UDiv.
1140 bool IsExact, const SimplifyQuery &Q,
1141 unsigned MaxRecurse) {
1142 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1143 return C;
1144
1145 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1146 return V;
1147
1148 const APInt *DivC;
1149 if (IsExact && match(Op1, m_APInt(DivC))) {
1150 // If this is an exact divide by a constant, then the dividend (Op0) must
1151 // have at least as many trailing zeros as the divisor to divide evenly. If
1152 // it has less trailing zeros, then the result must be poison.
1153 if (DivC->countr_zero()) {
1154 KnownBits KnownOp0 = computeKnownBits(Op0, Q);
1155 if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1156 return PoisonValue::get(Op0->getType());
1157 }
1158
1159 // udiv exact (mul nsw X, C), C --> X
1160 // sdiv exact (mul nuw X, C), C --> X
1161 // where C is not a power of 2.
1162 Value *X;
1163 if (!DivC->isPowerOf2() &&
1164 (Opcode == Instruction::UDiv
1165 ? match(Op0, m_NSWMul(m_Value(X), m_Specific(Op1)))
1166 : match(Op0, m_NUWMul(m_Value(X), m_Specific(Op1)))))
1167 return X;
1168 }
1169
1170 return nullptr;
1171}
1172
1173/// These are simplifications common to SRem and URem.
1175 const SimplifyQuery &Q, unsigned MaxRecurse) {
1176 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1177 return C;
1178
1179 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1180 return V;
1181
1182 // (X << Y) % X -> 0
1183 if (Q.IIQ.UseInstrInfo) {
1184 if ((Opcode == Instruction::SRem &&
1185 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1186 (Opcode == Instruction::URem &&
1187 match(Op0, m_NUWShl(m_Specific(Op1), m_Value()))))
1188 return Constant::getNullValue(Op0->getType());
1189
1190 const APInt *C0;
1191 if (match(Op1, m_APInt(C0))) {
1192 // (srem (mul nsw X, C1), C0) -> 0 if C1 s% C0 == 0
1193 // (urem (mul nuw X, C1), C0) -> 0 if C1 u% C0 == 0
1194 if (Opcode == Instruction::SRem
1195 ? match(Op0,
1196 m_NSWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1197 return C.srem(*C0).isZero();
1198 })))
1199 : match(Op0,
1200 m_NUWMul(m_Value(), m_CheckedInt([C0](const APInt &C) {
1201 return C.urem(*C0).isZero();
1202 }))))
1203 return Constant::getNullValue(Op0->getType());
1204 }
1205 }
1206 return nullptr;
1207}
1208
1209/// Given operands for an SDiv, see if we can fold the result.
1210/// If not, this returns null.
1211static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1212 const SimplifyQuery &Q, unsigned MaxRecurse) {
1213 // If two operands are negated and no signed overflow, return -1.
1214 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1215 return Constant::getAllOnesValue(Op0->getType());
1216
1217 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1218}
1219
1220Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1221 const SimplifyQuery &Q) {
1222 return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1223}
1224
1225/// Given operands for a UDiv, see if we can fold the result.
1226/// If not, this returns null.
1227static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1228 const SimplifyQuery &Q, unsigned MaxRecurse) {
1229 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1230}
1231
1232Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1233 const SimplifyQuery &Q) {
1234 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1235}
1236
1237/// Given operands for an SRem, see if we can fold the result.
1238/// If not, this returns null.
1239static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1240 unsigned MaxRecurse) {
1241 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1242 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1243 Value *X;
1244 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1245 return ConstantInt::getNullValue(Op0->getType());
1246
1247 // If the two operands are negated, return 0.
1248 if (isKnownNegation(Op0, Op1))
1249 return ConstantInt::getNullValue(Op0->getType());
1250
1251 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1252}
1253
1255 return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1256}
1257
1258/// Given operands for a URem, see if we can fold the result.
1259/// If not, this returns null.
1260static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1261 unsigned MaxRecurse) {
1262 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1263}
1264
1266 return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1267}
1268
1269/// Returns true if a shift by \c Amount always yields poison.
1270static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1271 Constant *C = dyn_cast<Constant>(Amount);
1272 if (!C)
1273 return false;
1274
1275 // X shift by undef -> poison because it may shift by the bitwidth.
1276 if (Q.isUndefValue(C))
1277 return true;
1278
1279 // Shifting by the bitwidth or more is poison. This covers scalars and
1280 // fixed/scalable vectors with splat constants.
1281 const APInt *AmountC;
1282 if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1283 return true;
1284
1285 // Try harder for fixed-length vectors:
1286 // If all lanes of a vector shift are poison, the whole shift is poison.
1288 for (unsigned I = 0,
1289 E = cast<FixedVectorType>(C->getType())->getNumElements();
1290 I != E; ++I)
1291 if (!isPoisonShift(C->getAggregateElement(I), Q))
1292 return false;
1293 return true;
1294 }
1295
1296 return false;
1297}
1298
1299/// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1300/// If not, this returns null.
1302 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1303 unsigned MaxRecurse) {
1304 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1305 return C;
1306
1307 // poison shift by X -> poison
1308 if (isa<PoisonValue>(Op0))
1309 return Op0;
1310
1311 // 0 shift by X -> 0
1312 if (match(Op0, m_Zero()))
1313 return Constant::getNullValue(Op0->getType());
1314
1315 // X shift by 0 -> X
1316 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1317 // would be poison.
1318 Value *X;
1319 if (match(Op1, m_Zero()) ||
1320 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1321 return Op0;
1322
1323 // Fold undefined shifts.
1324 if (isPoisonShift(Op1, Q))
1325 return PoisonValue::get(Op0->getType());
1326
1327 // If the operation is with the result of a select instruction, check whether
1328 // operating on either branch of the select always yields the same value.
1329 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1330 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1331 return V;
1332
1333 // If the operation is with the result of a phi instruction, check whether
1334 // operating on all incoming values of the phi always yields the same value.
1335 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1336 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1337 return V;
1338
1339 // If any bits in the shift amount make that value greater than or equal to
1340 // the number of bits in the type, the shift is undefined.
1341 KnownBits KnownAmt = computeKnownBits(Op1, Q);
1342 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1343 return PoisonValue::get(Op0->getType());
1344
1345 // If all valid bits in the shift amount are known zero, the first operand is
1346 // unchanged.
1347 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1348 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1349 return Op0;
1350
1351 // Check for nsw shl leading to a poison value.
1352 if (IsNSW) {
1353 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1354 KnownBits KnownVal = computeKnownBits(Op0, Q);
1355 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1356
1357 if (KnownVal.Zero.isSignBitSet())
1358 KnownShl.Zero.setSignBit();
1359 if (KnownVal.One.isSignBitSet())
1360 KnownShl.One.setSignBit();
1361
1362 if (KnownShl.hasConflict())
1363 return PoisonValue::get(Op0->getType());
1364 }
1365
1366 return nullptr;
1367}
1368
1369/// Given operands for an LShr or AShr, see if we can fold the result. If not,
1370/// this returns null.
1372 Value *Op1, bool IsExact,
1373 const SimplifyQuery &Q, unsigned MaxRecurse) {
1374 if (Value *V =
1375 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1376 return V;
1377
1378 // X >> X -> 0
1379 if (Op0 == Op1)
1380 return Constant::getNullValue(Op0->getType());
1381
1382 // undef >> X -> 0
1383 // undef >> X -> undef (if it's exact)
1384 if (Q.isUndefValue(Op0))
1385 return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1386
1387 // The low bit cannot be shifted out of an exact shift if it is set.
1388 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1389 if (IsExact) {
1390 KnownBits Op0Known = computeKnownBits(Op0, Q);
1391 if (Op0Known.One[0])
1392 return Op0;
1393 }
1394
1395 return nullptr;
1396}
1397
1398/// Given operands for an Shl, see if we can fold the result.
1399/// If not, this returns null.
1400static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1401 const SimplifyQuery &Q, unsigned MaxRecurse) {
1402 if (Value *V =
1403 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1404 return V;
1405
1406 Type *Ty = Op0->getType();
1407 // undef << X -> 0
1408 // undef << X -> undef if (if it's NSW/NUW)
1409 if (Q.isUndefValue(Op0))
1410 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1411
1412 // (X >> A) << A -> X
1413 Value *X;
1414 if (Q.IIQ.UseInstrInfo &&
1415 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1416 return X;
1417
1418 // shl nuw i8 C, %x -> C iff C has sign bit set.
1419 if (IsNUW && match(Op0, m_Negative()))
1420 return Op0;
1421 // NOTE: could use computeKnownBits() / LazyValueInfo,
1422 // but the cost-benefit analysis suggests it isn't worth it.
1423
1424 // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1425 // that the sign-bit does not change, so the only input that does not
1426 // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1427 if (IsNSW && IsNUW &&
1428 match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1429 return Constant::getNullValue(Ty);
1430
1431 return nullptr;
1432}
1433
1434Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1435 const SimplifyQuery &Q) {
1436 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1437}
1438
1439/// Given operands for an LShr, see if we can fold the result.
1440/// If not, this returns null.
1441static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1442 const SimplifyQuery &Q, unsigned MaxRecurse) {
1443 if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1444 MaxRecurse))
1445 return V;
1446
1447 // (X << A) >> A -> X
1448 Value *X;
1449 if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1450 return X;
1451
1452 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1453 // We can return X as we do in the above case since OR alters no bits in X.
1454 // SimplifyDemandedBits in InstCombine can do more general optimization for
1455 // bit manipulation. This pattern aims to provide opportunities for other
1456 // optimizers by supporting a simple but common case in InstSimplify.
1457 Value *Y;
1458 const APInt *ShRAmt, *ShLAmt;
1459 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
1460 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1461 *ShRAmt == *ShLAmt) {
1462 const KnownBits YKnown = computeKnownBits(Y, Q);
1463 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1464 if (ShRAmt->uge(EffWidthY))
1465 return X;
1466 }
1467
1468 return nullptr;
1469}
1470
1471Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1472 const SimplifyQuery &Q) {
1473 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1474}
1475
1476/// Given operands for an AShr, see if we can fold the result.
1477/// If not, this returns null.
1478static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1479 const SimplifyQuery &Q, unsigned MaxRecurse) {
1480 if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1481 MaxRecurse))
1482 return V;
1483
1484 // -1 >>a X --> -1
1485 // (-1 << X) a>> X --> -1
1486 // We could return the original -1 constant to preserve poison elements.
1487 if (match(Op0, m_AllOnes()) ||
1488 match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1489 return Constant::getAllOnesValue(Op0->getType());
1490
1491 // (X << A) >> A -> X
1492 Value *X;
1493 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1494 return X;
1495
1496 // Arithmetic shifting an all-sign-bit value is a no-op.
1497 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, Q.AC, Q.CxtI, Q.DT);
1498 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1499 return Op0;
1500
1501 return nullptr;
1502}
1503
1504Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1505 const SimplifyQuery &Q) {
1506 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1507}
1508
1509/// Commuted variants are assumed to be handled by calling this function again
1510/// with the parameters swapped.
1512 ICmpInst *UnsignedICmp, bool IsAnd,
1513 const SimplifyQuery &Q) {
1514 Value *X, *Y;
1515
1516 CmpPredicate EqPred;
1517 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1518 !ICmpInst::isEquality(EqPred))
1519 return nullptr;
1520
1521 CmpPredicate UnsignedPred;
1522
1523 Value *A, *B;
1524 // Y = (A - B);
1525 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1526 if (match(UnsignedICmp,
1527 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1528 ICmpInst::isUnsigned(UnsignedPred)) {
1529 // A >=/<= B || (A - B) != 0 <--> true
1530 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1531 UnsignedPred == ICmpInst::ICMP_ULE) &&
1532 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1533 return ConstantInt::getTrue(UnsignedICmp->getType());
1534 // A </> B && (A - B) == 0 <--> false
1535 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1536 UnsignedPred == ICmpInst::ICMP_UGT) &&
1537 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1538 return ConstantInt::getFalse(UnsignedICmp->getType());
1539
1540 // A </> B && (A - B) != 0 <--> A </> B
1541 // A </> B || (A - B) != 0 <--> (A - B) != 0
1542 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1543 UnsignedPred == ICmpInst::ICMP_UGT))
1544 return IsAnd ? UnsignedICmp : ZeroICmp;
1545
1546 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1547 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1548 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1549 UnsignedPred == ICmpInst::ICMP_UGE))
1550 return IsAnd ? ZeroICmp : UnsignedICmp;
1551 }
1552
1553 // Given Y = (A - B)
1554 // Y >= A && Y != 0 --> Y >= A iff B != 0
1555 // Y < A || Y == 0 --> Y < A iff B != 0
1556 if (match(UnsignedICmp,
1557 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1558 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1559 EqPred == ICmpInst::ICMP_NE && isKnownNonZero(B, Q))
1560 return UnsignedICmp;
1561 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1562 EqPred == ICmpInst::ICMP_EQ && isKnownNonZero(B, Q))
1563 return UnsignedICmp;
1564 }
1565 }
1566
1567 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1568 ICmpInst::isUnsigned(UnsignedPred))
1569 ;
1570 else if (match(UnsignedICmp,
1571 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1572 ICmpInst::isUnsigned(UnsignedPred))
1573 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1574 else
1575 return nullptr;
1576
1577 // X > Y && Y == 0 --> Y == 0 iff X != 0
1578 // X > Y || Y == 0 --> X > Y iff X != 0
1579 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1580 isKnownNonZero(X, Q))
1581 return IsAnd ? ZeroICmp : UnsignedICmp;
1582
1583 // X <= Y && Y != 0 --> X <= Y iff X != 0
1584 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1585 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1586 isKnownNonZero(X, Q))
1587 return IsAnd ? UnsignedICmp : ZeroICmp;
1588
1589 // The transforms below here are expected to be handled more generally with
1590 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1591 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1592 // these are candidates for removal.
1593
1594 // X < Y && Y != 0 --> X < Y
1595 // X < Y || Y != 0 --> Y != 0
1596 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1597 return IsAnd ? UnsignedICmp : ZeroICmp;
1598
1599 // X >= Y && Y == 0 --> Y == 0
1600 // X >= Y || Y == 0 --> X >= Y
1601 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1602 return IsAnd ? ZeroICmp : UnsignedICmp;
1603
1604 // X < Y && Y == 0 --> false
1605 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1606 IsAnd)
1607 return getFalse(UnsignedICmp->getType());
1608
1609 // X >= Y || Y != 0 --> true
1610 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1611 !IsAnd)
1612 return getTrue(UnsignedICmp->getType());
1613
1614 return nullptr;
1615}
1616
1617/// Test if a pair of compares with a shared operand and 2 constants has an
1618/// empty set intersection, full set union, or if one compare is a superset of
1619/// the other.
1621 bool IsAnd) {
1622 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1623 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1624 return nullptr;
1625
1626 const APInt *C0, *C1;
1627 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1628 !match(Cmp1->getOperand(1), m_APInt(C1)))
1629 return nullptr;
1630
1631 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1632 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1633
1634 // For and-of-compares, check if the intersection is empty:
1635 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1636 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1637 return getFalse(Cmp0->getType());
1638
1639 // For or-of-compares, check if the union is full:
1640 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1641 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1642 return getTrue(Cmp0->getType());
1643
1644 // Is one range a superset of the other?
1645 // If this is and-of-compares, take the smaller set:
1646 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1647 // If this is or-of-compares, take the larger set:
1648 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1649 if (Range0.contains(Range1))
1650 return IsAnd ? Cmp1 : Cmp0;
1651 if (Range1.contains(Range0))
1652 return IsAnd ? Cmp0 : Cmp1;
1653
1654 return nullptr;
1655}
1656
1658 const InstrInfoQuery &IIQ) {
1659 // (icmp (add V, C0), C1) & (icmp V, C0)
1660 CmpPredicate Pred0, Pred1;
1661 const APInt *C0, *C1;
1662 Value *V;
1663 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1664 return nullptr;
1665
1666 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1667 return nullptr;
1668
1669 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1670 if (AddInst->getOperand(1) != Op1->getOperand(1))
1671 return nullptr;
1672
1673 Type *ITy = Op0->getType();
1674 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1675 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1676
1677 const APInt Delta = *C1 - *C0;
1678 if (C0->isStrictlyPositive()) {
1679 if (Delta == 2) {
1680 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1681 return getFalse(ITy);
1682 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1683 return getFalse(ITy);
1684 }
1685 if (Delta == 1) {
1686 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1687 return getFalse(ITy);
1688 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1689 return getFalse(ITy);
1690 }
1691 }
1692 if (C0->getBoolValue() && IsNUW) {
1693 if (Delta == 2)
1694 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1695 return getFalse(ITy);
1696 if (Delta == 1)
1697 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1698 return getFalse(ITy);
1699 }
1700
1701 return nullptr;
1702}
1703
1704/// Try to simplify and/or of icmp with ctpop intrinsic.
1706 bool IsAnd) {
1707 CmpPredicate Pred0, Pred1;
1708 Value *X;
1709 const APInt *C;
1711 m_APInt(C))) ||
1712 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1713 return nullptr;
1714
1715 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1716 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1717 return Cmp1;
1718 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1719 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1720 return Cmp1;
1721
1722 return nullptr;
1723}
1724
1726 const SimplifyQuery &Q) {
1727 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1728 return X;
1729 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1730 return X;
1731
1732 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1733 return X;
1734
1735 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1736 return X;
1737 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1738 return X;
1739
1740 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1741 return X;
1742 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1743 return X;
1744
1745 return nullptr;
1746}
1747
1749 const InstrInfoQuery &IIQ) {
1750 // (icmp (add V, C0), C1) | (icmp V, C0)
1751 CmpPredicate Pred0, Pred1;
1752 const APInt *C0, *C1;
1753 Value *V;
1754 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1755 return nullptr;
1756
1757 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1758 return nullptr;
1759
1760 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1761 if (AddInst->getOperand(1) != Op1->getOperand(1))
1762 return nullptr;
1763
1764 Type *ITy = Op0->getType();
1765 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1766 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1767
1768 const APInt Delta = *C1 - *C0;
1769 if (C0->isStrictlyPositive()) {
1770 if (Delta == 2) {
1771 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1772 return getTrue(ITy);
1773 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1774 return getTrue(ITy);
1775 }
1776 if (Delta == 1) {
1777 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1778 return getTrue(ITy);
1779 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1780 return getTrue(ITy);
1781 }
1782 }
1783 if (C0->getBoolValue() && IsNUW) {
1784 if (Delta == 2)
1785 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1786 return getTrue(ITy);
1787 if (Delta == 1)
1788 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1789 return getTrue(ITy);
1790 }
1791
1792 return nullptr;
1793}
1794
1796 const SimplifyQuery &Q) {
1797 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1798 return X;
1799 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1800 return X;
1801
1802 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1803 return X;
1804
1805 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1806 return X;
1807 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1808 return X;
1809
1810 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1811 return X;
1812 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1813 return X;
1814
1815 return nullptr;
1816}
1817
1818/// Test if a pair of compares with a shared operand and 2 constants has an
1819/// empty set intersection, full set union, or if one compare is a superset of
1820/// the other.
1822 bool IsAnd) {
1823 // Look for this pattern: {and/or} (fcmp X, C0), (fcmp X, C1)).
1824 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1825 return nullptr;
1826
1827 const APFloat *C0, *C1;
1828 if (!match(Cmp0->getOperand(1), m_APFloat(C0)) ||
1829 !match(Cmp1->getOperand(1), m_APFloat(C1)))
1830 return nullptr;
1831
1833 IsAnd ? Cmp0->getPredicate() : Cmp0->getInversePredicate(), *C0);
1835 IsAnd ? Cmp1->getPredicate() : Cmp1->getInversePredicate(), *C1);
1836
1837 if (!Range0 || !Range1)
1838 return nullptr;
1839
1840 // For and-of-compares, check if the intersection is empty:
1841 // (fcmp X, C0) && (fcmp X, C1) --> empty set --> false
1842 if (Range0->intersectWith(*Range1).isEmptySet())
1843 return ConstantInt::getBool(Cmp0->getType(), !IsAnd);
1844
1845 // Is one range a superset of the other?
1846 // If this is and-of-compares, take the smaller set:
1847 // (fcmp ogt X, 4) && (fcmp ogt X, 42) --> fcmp ogt X, 42
1848 // If this is or-of-compares, take the larger set:
1849 // (fcmp ogt X, 4) || (fcmp ogt X, 42) --> fcmp ogt X, 4
1850 if (Range0->contains(*Range1))
1851 return Cmp1;
1852 if (Range1->contains(*Range0))
1853 return Cmp0;
1854
1855 return nullptr;
1856}
1857
1859 FCmpInst *RHS, bool IsAnd) {
1860 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1861 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1862 if (LHS0->getType() != RHS0->getType())
1863 return nullptr;
1864
1865 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1866 auto AbsOrSelfLHS0 = m_CombineOr(m_Specific(LHS0), m_FAbs(m_Specific(LHS0)));
1867 if ((PredL == FCmpInst::FCMP_ORD || PredL == FCmpInst::FCMP_UNO) &&
1868 ((FCmpInst::isOrdered(PredR) && IsAnd) ||
1869 (FCmpInst::isUnordered(PredR) && !IsAnd))) {
1870 // (fcmp ord X, 0) & (fcmp o** X/abs(X), Y) --> fcmp o** X/abs(X), Y
1871 // (fcmp uno X, 0) & (fcmp o** X/abs(X), Y) --> false
1872 // (fcmp uno X, 0) | (fcmp u** X/abs(X), Y) --> fcmp u** X/abs(X), Y
1873 // (fcmp ord X, 0) | (fcmp u** X/abs(X), Y) --> true
1874 if ((match(RHS0, AbsOrSelfLHS0) || match(RHS1, AbsOrSelfLHS0)) &&
1875 match(LHS1, m_PosZeroFP()))
1876 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1877 ? static_cast<Value *>(RHS)
1878 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1879 }
1880
1881 auto AbsOrSelfRHS0 = m_CombineOr(m_Specific(RHS0), m_FAbs(m_Specific(RHS0)));
1882 if ((PredR == FCmpInst::FCMP_ORD || PredR == FCmpInst::FCMP_UNO) &&
1883 ((FCmpInst::isOrdered(PredL) && IsAnd) ||
1884 (FCmpInst::isUnordered(PredL) && !IsAnd))) {
1885 // (fcmp o** X/abs(X), Y) & (fcmp ord X, 0) --> fcmp o** X/abs(X), Y
1886 // (fcmp o** X/abs(X), Y) & (fcmp uno X, 0) --> false
1887 // (fcmp u** X/abs(X), Y) | (fcmp uno X, 0) --> fcmp u** X/abs(X), Y
1888 // (fcmp u** X/abs(X), Y) | (fcmp ord X, 0) --> true
1889 if ((match(LHS0, AbsOrSelfRHS0) || match(LHS1, AbsOrSelfRHS0)) &&
1890 match(RHS1, m_PosZeroFP()))
1891 return FCmpInst::isOrdered(PredL) == FCmpInst::isOrdered(PredR)
1892 ? static_cast<Value *>(LHS)
1893 : ConstantInt::getBool(LHS->getType(), !IsAnd);
1894 }
1895
1896 if (auto *V = simplifyAndOrOfFCmpsWithConstants(LHS, RHS, IsAnd))
1897 return V;
1898
1899 return nullptr;
1900}
1901
1903 Value *Op1, bool IsAnd) {
1904 // Look through casts of the 'and' operands to find compares.
1905 auto *Cast0 = dyn_cast<CastInst>(Op0);
1906 auto *Cast1 = dyn_cast<CastInst>(Op1);
1907 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1908 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1909 Op0 = Cast0->getOperand(0);
1910 Op1 = Cast1->getOperand(0);
1911 }
1912
1913 Value *V = nullptr;
1914 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1915 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1916 if (ICmp0 && ICmp1)
1917 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1918 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1919
1920 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1921 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1922 if (FCmp0 && FCmp1)
1923 V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1924
1925 if (!V)
1926 return nullptr;
1927 if (!Cast0)
1928 return V;
1929
1930 // If we looked through casts, we can only handle a constant simplification
1931 // because we are not allowed to create a cast instruction here.
1932 if (auto *C = dyn_cast<Constant>(V))
1933 return ConstantFoldCastOperand(Cast0->getOpcode(), C, Cast0->getType(),
1934 Q.DL);
1935
1936 return nullptr;
1937}
1938
1939static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1940 const SimplifyQuery &Q,
1941 bool AllowRefinement,
1943 unsigned MaxRecurse);
1944
1945static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1946 const SimplifyQuery &Q,
1947 unsigned MaxRecurse) {
1948 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1949 "Must be and/or");
1950 CmpPredicate Pred;
1951 Value *A, *B;
1952 if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
1953 !ICmpInst::isEquality(Pred))
1954 return nullptr;
1955
1956 auto Simplify = [&](Value *Res) -> Value * {
1957 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1958
1959 // and (icmp eq a, b), x implies (a==b) inside x.
1960 // or (icmp ne a, b), x implies (a==b) inside x.
1961 // If x simplifies to true/false, we can simplify the and/or.
1962 if (Pred ==
1963 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1964 if (Res == Absorber)
1965 return Absorber;
1966 if (Res == ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1967 return Op0;
1968 return nullptr;
1969 }
1970
1971 // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1972 // then we can drop the icmp, as x will already be false in the case where
1973 // the icmp is false. Similar for or and true.
1974 if (Res == Absorber)
1975 return Op1;
1976 return nullptr;
1977 };
1978
1979 // In the final case (Res == Absorber with inverted predicate), it is safe to
1980 // refine poison during simplification, but not undef. For simplicity always
1981 // disable undef-based folds here.
1982 if (Value *Res = simplifyWithOpReplaced(Op1, A, B, Q.getWithoutUndef(),
1983 /* AllowRefinement */ true,
1984 /* DropFlags */ nullptr, MaxRecurse))
1985 return Simplify(Res);
1986 if (Value *Res = simplifyWithOpReplaced(Op1, B, A, Q.getWithoutUndef(),
1987 /* AllowRefinement */ true,
1988 /* DropFlags */ nullptr, MaxRecurse))
1989 return Simplify(Res);
1990
1991 return nullptr;
1992}
1993
1994/// Given a bitwise logic op, check if the operands are add/sub with a common
1995/// source value and inverted constant (identity: C - X -> ~(X + ~C)).
1997 Instruction::BinaryOps Opcode) {
1998 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1999 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
2000 Value *X;
2001 Constant *C1, *C2;
2002 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
2003 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
2004 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
2005 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
2006 if (ConstantExpr::getNot(C1) == C2) {
2007 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2008 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2009 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2010 Type *Ty = Op0->getType();
2011 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2013 }
2014 }
2015 return nullptr;
2016}
2017
2018// Commutative patterns for and that will be tried with both operand orders.
2020 const SimplifyQuery &Q,
2021 unsigned MaxRecurse) {
2022 // ~A & A = 0
2023 if (match(Op0, m_Not(m_Specific(Op1))))
2024 return Constant::getNullValue(Op0->getType());
2025
2026 // (A | ?) & A = A
2027 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2028 return Op1;
2029
2030 // (X | ~Y) & (X | Y) --> X
2031 Value *X, *Y;
2032 if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2033 match(Op1, m_c_Or(m_Specific(X), m_Specific(Y))))
2034 return X;
2035
2036 // If we have a multiplication overflow check that is being 'and'ed with a
2037 // check that one of the multipliers is not zero, we can omit the 'and', and
2038 // only keep the overflow check.
2039 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2040 return Op1;
2041
2042 // -A & A = A if A is a power of two or zero.
2043 if (match(Op0, m_Neg(m_Specific(Op1))) &&
2044 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
2045 return Op1;
2046
2047 // This is a similar pattern used for checking if a value is a power-of-2:
2048 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2049 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2050 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI, Q.DT))
2051 return Constant::getNullValue(Op1->getType());
2052
2053 // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2054 // M <= N.
2055 const APInt *Shift1, *Shift2;
2056 if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
2057 match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
2058 isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, Q.AC, Q.CxtI) &&
2059 Shift1->uge(*Shift2))
2060 return Constant::getNullValue(Op0->getType());
2061
2062 if (Value *V =
2063 simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2064 return V;
2065
2066 return nullptr;
2067}
2068
2069/// Given operands for an And, see if we can fold the result.
2070/// If not, this returns null.
2071static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2072 unsigned MaxRecurse) {
2073 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2074 return C;
2075
2076 // X & poison -> poison
2077 if (isa<PoisonValue>(Op1))
2078 return Op1;
2079
2080 // X & undef -> 0
2081 if (Q.isUndefValue(Op1))
2082 return Constant::getNullValue(Op0->getType());
2083
2084 // X & X = X
2085 if (Op0 == Op1)
2086 return Op0;
2087
2088 // X & 0 = 0
2089 if (match(Op1, m_Zero()))
2090 return Constant::getNullValue(Op0->getType());
2091
2092 // X & -1 = X
2093 if (match(Op1, m_AllOnes()))
2094 return Op0;
2095
2096 if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2097 return Res;
2098 if (Value *Res = simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2099 return Res;
2100
2101 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2102 return V;
2103
2104 // A mask that only clears known zeros of a shifted value is a no-op.
2105 const APInt *Mask;
2106 const APInt *ShAmt;
2107 Value *X, *Y;
2108 if (match(Op1, m_APInt(Mask))) {
2109 // If all bits in the inverted and shifted mask are clear:
2110 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2111 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2112 (~(*Mask)).lshr(*ShAmt).isZero())
2113 return Op0;
2114
2115 // If all bits in the inverted and shifted mask are clear:
2116 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2117 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2118 (~(*Mask)).shl(*ShAmt).isZero())
2119 return Op0;
2120 }
2121
2122 // and 2^x-1, 2^C --> 0 where x <= C.
2123 const APInt *PowerC;
2124 Value *Shift;
2125 if (match(Op1, m_Power2(PowerC)) &&
2126 match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
2127 isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI,
2128 Q.DT)) {
2129 KnownBits Known = computeKnownBits(Shift, Q);
2130 // Use getActiveBits() to make use of the additional power of two knowledge
2131 if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2132 return ConstantInt::getNullValue(Op1->getType());
2133 }
2134
2135 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2136 return V;
2137
2138 // Try some generic simplifications for associative operations.
2139 if (Value *V =
2140 simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2141 return V;
2142
2143 // And distributes over Or. Try some generic simplifications based on this.
2144 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2145 Instruction::Or, Q, MaxRecurse))
2146 return V;
2147
2148 // And distributes over Xor. Try some generic simplifications based on this.
2149 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2150 Instruction::Xor, Q, MaxRecurse))
2151 return V;
2152
2153 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2154 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2155 // A & (A && B) -> A && B
2156 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2157 return Op1;
2158 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2159 return Op0;
2160 }
2161 // If the operation is with the result of a select instruction, check
2162 // whether operating on either branch of the select always yields the same
2163 // value.
2164 if (Value *V =
2165 threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2166 return V;
2167 }
2168
2169 // If the operation is with the result of a phi instruction, check whether
2170 // operating on all incoming values of the phi always yields the same value.
2171 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2172 if (Value *V =
2173 threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2174 return V;
2175
2176 // Assuming the effective width of Y is not larger than A, i.e. all bits
2177 // from X and Y are disjoint in (X << A) | Y,
2178 // if the mask of this AND op covers all bits of X or Y, while it covers
2179 // no bits from the other, we can bypass this AND op. E.g.,
2180 // ((X << A) | Y) & Mask -> Y,
2181 // if Mask = ((1 << effective_width_of(Y)) - 1)
2182 // ((X << A) | Y) & Mask -> X << A,
2183 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2184 // SimplifyDemandedBits in InstCombine can optimize the general case.
2185 // This pattern aims to help other passes for a common case.
2186 Value *XShifted;
2187 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(Mask)) &&
2189 m_Value(XShifted)),
2190 m_Value(Y)))) {
2191 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2192 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2193 const KnownBits YKnown = computeKnownBits(Y, Q);
2194 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2195 if (EffWidthY <= ShftCnt) {
2196 const KnownBits XKnown = computeKnownBits(X, Q);
2197 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2198 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2199 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2200 // If the mask is extracting all bits from X or Y as is, we can skip
2201 // this AND op.
2202 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2203 return Y;
2204 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2205 return XShifted;
2206 }
2207 }
2208
2209 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2210 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2212 if (match(Op0, m_c_Xor(m_Value(X),
2214 m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2216 return Constant::getNullValue(Op0->getType());
2217
2218 const APInt *C1;
2219 Value *A;
2220 // (A ^ C) & (A ^ ~C) -> 0
2221 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2222 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2223 return Constant::getNullValue(Op0->getType());
2224
2225 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2226 if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2227 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2228 if (*Implied == true)
2229 return Op0;
2230 // If Op0 is true implies Op1 is false, then they are not true together.
2231 if (*Implied == false)
2232 return ConstantInt::getFalse(Op0->getType());
2233 }
2234 if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2235 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2236 if (*Implied)
2237 return Op1;
2238 // If Op1 is true implies Op0 is false, then they are not true together.
2239 if (!*Implied)
2240 return ConstantInt::getFalse(Op1->getType());
2241 }
2242 }
2243
2244 if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2245 return V;
2246
2247 return nullptr;
2248}
2249
2251 return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2252}
2253
2254// TODO: Many of these folds could use LogicalAnd/LogicalOr.
2256 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2257 Type *Ty = X->getType();
2258
2259 // X | ~X --> -1
2260 if (match(Y, m_Not(m_Specific(X))))
2262
2263 // X | ~(X & ?) = -1
2264 if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2266
2267 // X | (X & ?) --> X
2268 if (match(Y, m_c_And(m_Specific(X), m_Value())))
2269 return X;
2270
2271 Value *A, *B;
2272
2273 // (A ^ B) | (A | B) --> A | B
2274 // (A ^ B) | (B | A) --> B | A
2275 if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2277 return Y;
2278
2279 // ~(A ^ B) | (A | B) --> -1
2280 // ~(A ^ B) | (B | A) --> -1
2281 if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2284
2285 // (A & ~B) | (A ^ B) --> A ^ B
2286 // (~B & A) | (A ^ B) --> A ^ B
2287 // (A & ~B) | (B ^ A) --> B ^ A
2288 // (~B & A) | (B ^ A) --> B ^ A
2289 if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2291 return Y;
2292
2293 // (~A ^ B) | (A & B) --> ~A ^ B
2294 // (B ^ ~A) | (A & B) --> B ^ ~A
2295 // (~A ^ B) | (B & A) --> ~A ^ B
2296 // (B ^ ~A) | (B & A) --> B ^ ~A
2297 if (match(X, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2299 return X;
2300
2301 // (~A | B) | (A ^ B) --> -1
2302 // (~A | B) | (B ^ A) --> -1
2303 // (B | ~A) | (A ^ B) --> -1
2304 // (B | ~A) | (B ^ A) --> -1
2305 if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2308
2309 // (~A & B) | ~(A | B) --> ~A
2310 // (~A & B) | ~(B | A) --> ~A
2311 // (B & ~A) | ~(A | B) --> ~A
2312 // (B & ~A) | ~(B | A) --> ~A
2313 Value *NotA;
2315 m_Value(B))) &&
2317 return NotA;
2318 // The same is true of Logical And
2319 // TODO: This could share the logic of the version above if there was a
2320 // version of LogicalAnd that allowed more than just i1 types.
2322 m_Value(B))) &&
2324 return NotA;
2325
2326 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2327 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2328 Value *NotAB;
2330 m_Value(NotAB))) &&
2332 return NotAB;
2333
2334 // ~(A & B) | (A ^ B) --> ~(A & B)
2335 // ~(A & B) | (B ^ A) --> ~(A & B)
2337 m_Value(NotAB))) &&
2339 return NotAB;
2340
2341 return nullptr;
2342}
2343
2344/// Given operands for an Or, see if we can fold the result.
2345/// If not, this returns null.
2346static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2347 unsigned MaxRecurse) {
2348 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2349 return C;
2350
2351 // X | poison -> poison
2352 if (isa<PoisonValue>(Op1))
2353 return Op1;
2354
2355 // X | undef -> -1
2356 // X | -1 = -1
2357 // Do not return Op1 because it may contain undef elements if it's a vector.
2358 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2359 return Constant::getAllOnesValue(Op0->getType());
2360
2361 // X | X = X
2362 // X | 0 = X
2363 if (Op0 == Op1 || match(Op1, m_Zero()))
2364 return Op0;
2365
2366 if (Value *R = simplifyOrLogic(Op0, Op1))
2367 return R;
2368 if (Value *R = simplifyOrLogic(Op1, Op0))
2369 return R;
2370
2371 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2372 return V;
2373
2374 // Rotated -1 is still -1:
2375 // (-1 << X) | (-1 >> (C - X)) --> -1
2376 // (-1 >> X) | (-1 << (C - X)) --> -1
2377 // ...with C <= bitwidth (and commuted variants).
2378 Value *X, *Y;
2379 if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2380 match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2381 (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2382 match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2383 const APInt *C;
2384 if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2385 match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2386 C->ule(X->getType()->getScalarSizeInBits())) {
2387 return ConstantInt::getAllOnesValue(X->getType());
2388 }
2389 }
2390
2391 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2392 // are mixing in another shift that is redundant with the funnel shift.
2393
2394 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2395 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2396 if (match(Op0,
2398 match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2399 return Op0;
2400 if (match(Op1,
2402 match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2403 return Op1;
2404
2405 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2406 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2407 if (match(Op0,
2409 match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2410 return Op0;
2411 if (match(Op1,
2413 match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2414 return Op1;
2415
2416 if (Value *V =
2417 simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2418 return V;
2419 if (Value *V =
2420 simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2421 return V;
2422
2423 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2424 return V;
2425
2426 // If we have a multiplication overflow check that is being 'and'ed with a
2427 // check that one of the multipliers is not zero, we can omit the 'and', and
2428 // only keep the overflow check.
2429 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2430 return Op1;
2431 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2432 return Op0;
2433
2434 // Try some generic simplifications for associative operations.
2435 if (Value *V =
2436 simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2437 return V;
2438
2439 // Or distributes over And. Try some generic simplifications based on this.
2440 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2441 Instruction::And, Q, MaxRecurse))
2442 return V;
2443
2444 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2445 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2446 // A | (A || B) -> A || B
2447 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2448 return Op1;
2449 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2450 return Op0;
2451 }
2452 // If the operation is with the result of a select instruction, check
2453 // whether operating on either branch of the select always yields the same
2454 // value.
2455 if (Value *V =
2456 threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2457 return V;
2458 }
2459
2460 // (A & C1)|(B & C2)
2461 Value *A, *B;
2462 const APInt *C1, *C2;
2463 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2464 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2465 if (*C1 == ~*C2) {
2466 // (A & C1)|(B & C2)
2467 // If we have: ((V + N) & C1) | (V & C2)
2468 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2469 // replace with V+N.
2470 Value *N;
2471 if (C2->isMask() && // C2 == 0+1+
2473 // Add commutes, try both ways.
2474 if (MaskedValueIsZero(N, *C2, Q))
2475 return A;
2476 }
2477 // Or commutes, try both ways.
2478 if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2479 // Add commutes, try both ways.
2480 if (MaskedValueIsZero(N, *C1, Q))
2481 return B;
2482 }
2483 }
2484 }
2485
2486 // If the operation is with the result of a phi instruction, check whether
2487 // operating on all incoming values of the phi always yields the same value.
2488 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2489 if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2490 return V;
2491
2492 // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2493 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2494 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2495 return Constant::getAllOnesValue(Op0->getType());
2496
2497 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2498 if (std::optional<bool> Implied =
2499 isImpliedCondition(Op0, Op1, Q.DL, false)) {
2500 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2501 if (*Implied == false)
2502 return Op0;
2503 // If Op0 is false implies Op1 is true, then at least one is always true.
2504 if (*Implied == true)
2505 return ConstantInt::getTrue(Op0->getType());
2506 }
2507 if (std::optional<bool> Implied =
2508 isImpliedCondition(Op1, Op0, Q.DL, false)) {
2509 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2510 if (*Implied == false)
2511 return Op1;
2512 // If Op1 is false implies Op0 is true, then at least one is always true.
2513 if (*Implied == true)
2514 return ConstantInt::getTrue(Op1->getType());
2515 }
2516 }
2517
2518 if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2519 return V;
2520
2521 return nullptr;
2522}
2523
2525 return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2526}
2527
2528/// Given operands for a Xor, see if we can fold the result.
2529/// If not, this returns null.
2530static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2531 unsigned MaxRecurse) {
2532 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2533 return C;
2534
2535 // X ^ poison -> poison
2536 if (isa<PoisonValue>(Op1))
2537 return Op1;
2538
2539 // A ^ undef -> undef
2540 if (Q.isUndefValue(Op1))
2541 return Op1;
2542
2543 // A ^ 0 = A
2544 if (match(Op1, m_Zero()))
2545 return Op0;
2546
2547 // A ^ A = 0
2548 if (Op0 == Op1)
2549 return Constant::getNullValue(Op0->getType());
2550
2551 // A ^ ~A = ~A ^ A = -1
2552 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2553 return Constant::getAllOnesValue(Op0->getType());
2554
2555 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2556 Value *A, *B;
2557 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2558 if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2560 return A;
2561
2562 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2563 // The 'not' op must contain a complete -1 operand (no undef elements for
2564 // vector) for the transform to be safe.
2565 Value *NotA;
2567 m_Value(B))) &&
2569 return NotA;
2570
2571 return nullptr;
2572 };
2573 if (Value *R = foldAndOrNot(Op0, Op1))
2574 return R;
2575 if (Value *R = foldAndOrNot(Op1, Op0))
2576 return R;
2577
2578 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2579 return V;
2580
2581 // Try some generic simplifications for associative operations.
2582 if (Value *V =
2583 simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2584 return V;
2585
2586 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2587 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2588 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2589 // only if B and C are equal. If B and C are equal then (since we assume
2590 // that operands have already been simplified) "select(cond, B, C)" should
2591 // have been simplified to the common value of B and C already. Analysing
2592 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2593 // for threading over phi nodes.
2594
2595 if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2596 return V;
2597
2598 // (xor (sub nuw C_Mask, X), C_Mask) -> X
2599 {
2600 Value *X;
2601 if (match(Op0, m_NUWSub(m_Specific(Op1), m_Value(X))) &&
2602 match(Op1, m_LowBitMask()))
2603 return X;
2604 }
2605
2606 return nullptr;
2607}
2608
2610 return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2611}
2612
2614 return CmpInst::makeCmpResultType(Op->getType());
2615}
2616
2617/// Rummage around inside V looking for something equivalent to the comparison
2618/// "LHS Pred RHS". Return such a value if found, otherwise return null.
2619/// Helper function for analyzing max/min idioms.
2621 Value *LHS, Value *RHS) {
2623 if (!SI)
2624 return nullptr;
2625 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2626 if (!Cmp)
2627 return nullptr;
2628 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2629 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2630 return Cmp;
2631 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2632 LHS == CmpRHS && RHS == CmpLHS)
2633 return Cmp;
2634 return nullptr;
2635}
2636
2637/// Return true if the underlying object (storage) must be disjoint from
2638/// storage returned by any noalias return call.
2639static bool isAllocDisjoint(const Value *V) {
2640 // For allocas, we consider only static ones (dynamic
2641 // allocas might be transformed into calls to malloc not simultaneously
2642 // live with the compared-to allocation). For globals, we exclude symbols
2643 // that might be resolve lazily to symbols in another dynamically-loaded
2644 // library (and, thus, could be malloc'ed by the implementation).
2645 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2646 return AI->isStaticAlloca();
2647 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2648 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2649 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2650 !GV->isThreadLocal();
2651 if (const Argument *A = dyn_cast<Argument>(V))
2652 return A->hasByValAttr();
2653 return false;
2654}
2655
2656/// Return true if V1 and V2 are each the base of some distict storage region
2657/// [V, object_size(V)] which do not overlap. Note that zero sized regions
2658/// *are* possible, and that zero sized regions do not overlap with any other.
2659static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2660 // Global variables always exist, so they always exist during the lifetime
2661 // of each other and all allocas. Global variables themselves usually have
2662 // non-overlapping storage, but since their addresses are constants, the
2663 // case involving two globals does not reach here and is instead handled in
2664 // constant folding.
2665 //
2666 // Two different allocas usually have different addresses...
2667 //
2668 // However, if there's an @llvm.stackrestore dynamically in between two
2669 // allocas, they may have the same address. It's tempting to reduce the
2670 // scope of the problem by only looking at *static* allocas here. That would
2671 // cover the majority of allocas while significantly reducing the likelihood
2672 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2673 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2674 // an entry block. Also, if we have a block that's not attached to a
2675 // function, we can't tell if it's "static" under the current definition.
2676 // Theoretically, this problem could be fixed by creating a new kind of
2677 // instruction kind specifically for static allocas. Such a new instruction
2678 // could be required to be at the top of the entry block, thus preventing it
2679 // from being subject to a @llvm.stackrestore. Instcombine could even
2680 // convert regular allocas into these special allocas. It'd be nifty.
2681 // However, until then, this problem remains open.
2682 //
2683 // So, we'll assume that two non-empty allocas have different addresses
2684 // for now.
2685 auto isByValArg = [](const Value *V) {
2686 const Argument *A = dyn_cast<Argument>(V);
2687 return A && A->hasByValAttr();
2688 };
2689
2690 // Byval args are backed by store which does not overlap with each other,
2691 // allocas, or globals.
2692 if (isByValArg(V1))
2693 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2694 if (isByValArg(V2))
2695 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2696
2697 return isa<AllocaInst>(V1) &&
2699}
2700
2701// A significant optimization not implemented here is assuming that alloca
2702// addresses are not equal to incoming argument values. They don't *alias*,
2703// as we say, but that doesn't mean they aren't equal, so we take a
2704// conservative approach.
2705//
2706// This is inspired in part by C++11 5.10p1:
2707// "Two pointers of the same type compare equal if and only if they are both
2708// null, both point to the same function, or both represent the same
2709// address."
2710//
2711// This is pretty permissive.
2712//
2713// It's also partly due to C11 6.5.9p6:
2714// "Two pointers compare equal if and only if both are null pointers, both are
2715// pointers to the same object (including a pointer to an object and a
2716// subobject at its beginning) or function, both are pointers to one past the
2717// last element of the same array object, or one is a pointer to one past the
2718// end of one array object and the other is a pointer to the start of a
2719// different array object that happens to immediately follow the first array
2720// object in the address space.)
2721//
2722// C11's version is more restrictive, however there's no reason why an argument
2723// couldn't be a one-past-the-end value for a stack object in the caller and be
2724// equal to the beginning of a stack object in the callee.
2725//
2726// If the C and C++ standards are ever made sufficiently restrictive in this
2727// area, it may be possible to update LLVM's semantics accordingly and reinstate
2728// this optimization.
2730 const SimplifyQuery &Q) {
2731 assert(LHS->getType() == RHS->getType() && "Must have same types");
2732 const DataLayout &DL = Q.DL;
2733 const TargetLibraryInfo *TLI = Q.TLI;
2734
2735 // We fold equality and unsigned predicates on pointer comparisons, but forbid
2736 // signed predicates since a GEP with inbounds could cross the sign boundary.
2737 if (CmpInst::isSigned(Pred))
2738 return nullptr;
2739
2740 // We have to switch to a signed predicate to handle negative indices from
2741 // the base pointer.
2742 Pred = ICmpInst::getSignedPredicate(Pred);
2743
2744 // Strip off any constant offsets so that we can reason about them.
2745 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2746 // here and compare base addresses like AliasAnalysis does, however there are
2747 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2748 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2749 // doesn't need to guarantee pointer inequality when it says NoAlias.
2750
2751 // Even if an non-inbounds GEP occurs along the path we can still optimize
2752 // equality comparisons concerning the result.
2753 bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2754 unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2755 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2756 LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2757 RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2758
2759 // If LHS and RHS are related via constant offsets to the same base
2760 // value, we can replace it with an icmp which just compares the offsets.
2761 if (LHS == RHS)
2762 return ConstantInt::get(getCompareTy(LHS),
2763 ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2764
2765 // Various optimizations for (in)equality comparisons.
2766 if (ICmpInst::isEquality(Pred)) {
2767 // Different non-empty allocations that exist at the same time have
2768 // different addresses (if the program can tell). If the offsets are
2769 // within the bounds of their allocations (and not one-past-the-end!
2770 // so we can't use inbounds!), and their allocations aren't the same,
2771 // the pointers are not equal.
2773 uint64_t LHSSize, RHSSize;
2774 ObjectSizeOpts Opts;
2776 auto *F = [](Value *V) -> Function * {
2777 if (auto *I = dyn_cast<Instruction>(V))
2778 return I->getFunction();
2779 if (auto *A = dyn_cast<Argument>(V))
2780 return A->getParent();
2781 return nullptr;
2782 }(LHS);
2783 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2784 if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) && LHSSize != 0 &&
2785 getObjectSize(RHS, RHSSize, DL, TLI, Opts) && RHSSize != 0) {
2786 APInt Dist = LHSOffset - RHSOffset;
2787 if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2788 return ConstantInt::get(getCompareTy(LHS),
2790 }
2791 }
2792
2793 // If one side of the equality comparison must come from a noalias call
2794 // (meaning a system memory allocation function), and the other side must
2795 // come from a pointer that cannot overlap with dynamically-allocated
2796 // memory within the lifetime of the current function (allocas, byval
2797 // arguments, globals), then determine the comparison result here.
2798 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2799 getUnderlyingObjects(LHS, LHSUObjs);
2800 getUnderlyingObjects(RHS, RHSUObjs);
2801
2802 // Is the set of underlying objects all noalias calls?
2803 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2804 return all_of(Objects, isNoAliasCall);
2805 };
2806
2807 // Is the set of underlying objects all things which must be disjoint from
2808 // noalias calls. We assume that indexing from such disjoint storage
2809 // into the heap is undefined, and thus offsets can be safely ignored.
2810 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2811 return all_of(Objects, ::isAllocDisjoint);
2812 };
2813
2814 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2815 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2816 return ConstantInt::get(getCompareTy(LHS),
2818
2819 // Fold comparisons for non-escaping pointer even if the allocation call
2820 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2821 // dynamic allocation call could be either of the operands. Note that
2822 // the other operand can not be based on the alloc - if it were, then
2823 // the cmp itself would be a capture.
2824 Value *MI = nullptr;
2825 if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonZero(RHS, Q))
2826 MI = LHS;
2827 else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonZero(LHS, Q))
2828 MI = RHS;
2829 if (MI) {
2830 // FIXME: This is incorrect, see PR54002. While we can assume that the
2831 // allocation is at an address that makes the comparison false, this
2832 // requires that *all* comparisons to that address be false, which
2833 // InstSimplify cannot guarantee.
2834 struct CustomCaptureTracker : public CaptureTracker {
2835 bool Captured = false;
2836 void tooManyUses() override { Captured = true; }
2837 Action captured(const Use *U, UseCaptureInfo CI) override {
2838 // TODO(captures): Use UseCaptureInfo.
2839 if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2840 // Comparison against value stored in global variable. Given the
2841 // pointer does not escape, its value cannot be guessed and stored
2842 // separately in a global variable.
2843 unsigned OtherIdx = 1 - U->getOperandNo();
2844 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2845 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2846 return Continue;
2847 }
2848
2849 Captured = true;
2850 return Stop;
2851 }
2852 };
2853 CustomCaptureTracker Tracker;
2854 PointerMayBeCaptured(MI, &Tracker);
2855 if (!Tracker.Captured)
2856 return ConstantInt::get(getCompareTy(LHS),
2858 }
2859 }
2860
2861 // Otherwise, fail.
2862 return nullptr;
2863}
2864
2865/// Fold an icmp when its operands have i1 scalar type.
2867 const SimplifyQuery &Q) {
2868 Type *ITy = getCompareTy(LHS); // The return type.
2869 Type *OpTy = LHS->getType(); // The operand type.
2870 if (!OpTy->isIntOrIntVectorTy(1))
2871 return nullptr;
2872
2873 // A boolean compared to true/false can be reduced in 14 out of the 20
2874 // (10 predicates * 2 constants) possible combinations. The other
2875 // 6 cases require a 'not' of the LHS.
2876
2877 auto ExtractNotLHS = [](Value *V) -> Value * {
2878 Value *X;
2879 if (match(V, m_Not(m_Value(X))))
2880 return X;
2881 return nullptr;
2882 };
2883
2884 if (match(RHS, m_Zero())) {
2885 switch (Pred) {
2886 case CmpInst::ICMP_NE: // X != 0 -> X
2887 case CmpInst::ICMP_UGT: // X >u 0 -> X
2888 case CmpInst::ICMP_SLT: // X <s 0 -> X
2889 return LHS;
2890
2891 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2892 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2893 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2894 if (Value *X = ExtractNotLHS(LHS))
2895 return X;
2896 break;
2897
2898 case CmpInst::ICMP_ULT: // X <u 0 -> false
2899 case CmpInst::ICMP_SGT: // X >s 0 -> false
2900 return getFalse(ITy);
2901
2902 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2903 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2904 return getTrue(ITy);
2905
2906 default:
2907 break;
2908 }
2909 } else if (match(RHS, m_One())) {
2910 switch (Pred) {
2911 case CmpInst::ICMP_EQ: // X == 1 -> X
2912 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2913 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2914 return LHS;
2915
2916 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2917 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2918 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2919 if (Value *X = ExtractNotLHS(LHS))
2920 return X;
2921 break;
2922
2923 case CmpInst::ICMP_UGT: // X >u 1 -> false
2924 case CmpInst::ICMP_SLT: // X <s -1 -> false
2925 return getFalse(ITy);
2926
2927 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2928 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2929 return getTrue(ITy);
2930
2931 default:
2932 break;
2933 }
2934 }
2935
2936 switch (Pred) {
2937 default:
2938 break;
2939 case ICmpInst::ICMP_UGE:
2940 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2941 return getTrue(ITy);
2942 break;
2943 case ICmpInst::ICMP_SGE:
2944 /// For signed comparison, the values for an i1 are 0 and -1
2945 /// respectively. This maps into a truth table of:
2946 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2947 /// 0 | 0 | 1 (0 >= 0) | 1
2948 /// 0 | 1 | 1 (0 >= -1) | 1
2949 /// 1 | 0 | 0 (-1 >= 0) | 0
2950 /// 1 | 1 | 1 (-1 >= -1) | 1
2951 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2952 return getTrue(ITy);
2953 break;
2954 case ICmpInst::ICMP_ULE:
2955 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2956 return getTrue(ITy);
2957 break;
2958 case ICmpInst::ICMP_SLE:
2959 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2960 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2961 return getTrue(ITy);
2962 break;
2963 }
2964
2965 return nullptr;
2966}
2967
2968/// Try hard to fold icmp with zero RHS because this is a common case.
2970 const SimplifyQuery &Q) {
2971 if (!match(RHS, m_Zero()))
2972 return nullptr;
2973
2974 Type *ITy = getCompareTy(LHS); // The return type.
2975 switch (Pred) {
2976 default:
2977 llvm_unreachable("Unknown ICmp predicate!");
2978 case ICmpInst::ICMP_ULT:
2979 return getFalse(ITy);
2980 case ICmpInst::ICMP_UGE:
2981 return getTrue(ITy);
2982 case ICmpInst::ICMP_EQ:
2983 case ICmpInst::ICMP_ULE:
2984 if (isKnownNonZero(LHS, Q))
2985 return getFalse(ITy);
2986 break;
2987 case ICmpInst::ICMP_NE:
2988 case ICmpInst::ICMP_UGT:
2989 if (isKnownNonZero(LHS, Q))
2990 return getTrue(ITy);
2991 break;
2992 case ICmpInst::ICMP_SLT: {
2993 KnownBits LHSKnown = computeKnownBits(LHS, Q);
2994 if (LHSKnown.isNegative())
2995 return getTrue(ITy);
2996 if (LHSKnown.isNonNegative())
2997 return getFalse(ITy);
2998 break;
2999 }
3000 case ICmpInst::ICMP_SLE: {
3001 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3002 if (LHSKnown.isNegative())
3003 return getTrue(ITy);
3004 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
3005 return getFalse(ITy);
3006 break;
3007 }
3008 case ICmpInst::ICMP_SGE: {
3009 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3010 if (LHSKnown.isNegative())
3011 return getFalse(ITy);
3012 if (LHSKnown.isNonNegative())
3013 return getTrue(ITy);
3014 break;
3015 }
3016 case ICmpInst::ICMP_SGT: {
3017 KnownBits LHSKnown = computeKnownBits(LHS, Q);
3018 if (LHSKnown.isNegative())
3019 return getFalse(ITy);
3020 if (LHSKnown.isNonNegative() && isKnownNonZero(LHS, Q))
3021 return getTrue(ITy);
3022 break;
3023 }
3024 }
3025
3026 return nullptr;
3027}
3028
3030 Value *RHS, const SimplifyQuery &Q) {
3031 Type *ITy = getCompareTy(RHS); // The return type.
3032
3033 Value *X;
3034 const APInt *C;
3035 if (!match(RHS, m_APIntAllowPoison(C)))
3036 return nullptr;
3037
3038 // Sign-bit checks can be optimized to true/false after unsigned
3039 // floating-point casts:
3040 // icmp slt (bitcast (uitofp X)), 0 --> false
3041 // icmp sgt (bitcast (uitofp X)), -1 --> true
3043 bool TrueIfSigned;
3044 if (isSignBitCheck(Pred, *C, TrueIfSigned))
3045 return ConstantInt::getBool(ITy, !TrueIfSigned);
3046 }
3047
3048 // Rule out tautological comparisons (eg., ult 0 or uge 0).
3050 if (RHS_CR.isEmptySet())
3051 return ConstantInt::getFalse(ITy);
3052 if (RHS_CR.isFullSet())
3053 return ConstantInt::getTrue(ITy);
3054
3055 ConstantRange LHS_CR =
3057 if (!LHS_CR.isFullSet()) {
3058 if (RHS_CR.contains(LHS_CR))
3059 return ConstantInt::getTrue(ITy);
3060 if (RHS_CR.inverse().contains(LHS_CR))
3061 return ConstantInt::getFalse(ITy);
3062 }
3063
3064 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3065 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3066 const APInt *MulC;
3067 if (Q.IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
3069 *MulC != 0 && C->urem(*MulC) != 0) ||
3071 *MulC != 0 && C->srem(*MulC) != 0)))
3072 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3073
3074 if (Pred == ICmpInst::ICMP_UGE && C->isOne() && isKnownNonZero(LHS, Q))
3075 return ConstantInt::getTrue(ITy);
3076
3077 return nullptr;
3078}
3079
3081
3082/// Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
3085 const SimplifyQuery &Q,
3086 unsigned Depth = 0) {
3087 if (!Res.insert(V).second)
3088 return;
3089
3090 // Can be increased if useful.
3091 if (++Depth > 1)
3092 return;
3093
3094 auto *I = dyn_cast<Instruction>(V);
3095 if (!I)
3096 return;
3097
3098 Value *X, *Y;
3100 if (match(I, m_Or(m_Value(X), m_Value(Y))) ||
3104 }
3105 // X * Y >= X --> true
3106 if (match(I, m_NUWMul(m_Value(X), m_Value(Y)))) {
3107 if (isKnownNonZero(X, Q))
3109 if (isKnownNonZero(Y, Q))
3111 }
3112 } else {
3114 switch (I->getOpcode()) {
3115 case Instruction::And:
3116 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Q, Depth);
3117 getUnsignedMonotonicValues(Res, I->getOperand(1), Type, Q, Depth);
3118 break;
3119 case Instruction::URem:
3120 case Instruction::UDiv:
3121 case Instruction::LShr:
3122 getUnsignedMonotonicValues(Res, I->getOperand(0), Type, Q, Depth);
3123 break;
3124 case Instruction::Call:
3127 break;
3128 default:
3129 break;
3130 }
3131 }
3132}
3133
3135 Value *RHS,
3136 const SimplifyQuery &Q) {
3137 if (Pred != ICmpInst::ICMP_UGE && Pred != ICmpInst::ICMP_ULT)
3138 return nullptr;
3139
3140 // We have LHS uge GreaterValues and LowerValues uge RHS. If any of the
3141 // GreaterValues and LowerValues are the same, it follows that LHS uge RHS.
3142 SmallPtrSet<Value *, 4> GreaterValues;
3143 SmallPtrSet<Value *, 4> LowerValues;
3146 for (Value *GV : GreaterValues)
3147 if (LowerValues.contains(GV))
3149 Pred == ICmpInst::ICMP_UGE);
3150 return nullptr;
3151}
3152
3154 Value *RHS, const SimplifyQuery &Q,
3155 unsigned MaxRecurse) {
3156 Type *ITy = getCompareTy(RHS); // The return type.
3157
3158 Value *Y = nullptr;
3159 // icmp pred (or X, Y), X
3160 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3161 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3162 KnownBits RHSKnown = computeKnownBits(RHS, Q);
3163 KnownBits YKnown = computeKnownBits(Y, Q);
3164 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3165 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3166 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3167 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3168 }
3169 }
3170
3171 // icmp pred (urem X, Y), Y
3172 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3173 switch (Pred) {
3174 default:
3175 break;
3176 case ICmpInst::ICMP_SGT:
3177 case ICmpInst::ICMP_SGE: {
3178 KnownBits Known = computeKnownBits(RHS, Q);
3179 if (!Known.isNonNegative())
3180 break;
3181 [[fallthrough]];
3182 }
3183 case ICmpInst::ICMP_EQ:
3184 case ICmpInst::ICMP_UGT:
3185 case ICmpInst::ICMP_UGE:
3186 return getFalse(ITy);
3187 case ICmpInst::ICMP_SLT:
3188 case ICmpInst::ICMP_SLE: {
3189 KnownBits Known = computeKnownBits(RHS, Q);
3190 if (!Known.isNonNegative())
3191 break;
3192 [[fallthrough]];
3193 }
3194 case ICmpInst::ICMP_NE:
3195 case ICmpInst::ICMP_ULT:
3196 case ICmpInst::ICMP_ULE:
3197 return getTrue(ITy);
3198 }
3199 }
3200
3201 // If x is nonzero:
3202 // x >>u C <u x --> true for C != 0.
3203 // x >>u C != x --> true for C != 0.
3204 // x >>u C >=u x --> false for C != 0.
3205 // x >>u C == x --> false for C != 0.
3206 // x udiv C <u x --> true for C != 1.
3207 // x udiv C != x --> true for C != 1.
3208 // x udiv C >=u x --> false for C != 1.
3209 // x udiv C == x --> false for C != 1.
3210 // TODO: allow non-constant shift amount/divisor
3211 const APInt *C;
3212 if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3213 (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3214 if (isKnownNonZero(RHS, Q)) {
3215 switch (Pred) {
3216 default:
3217 break;
3218 case ICmpInst::ICMP_EQ:
3219 case ICmpInst::ICMP_UGE:
3220 case ICmpInst::ICMP_UGT:
3221 return getFalse(ITy);
3222 case ICmpInst::ICMP_NE:
3223 case ICmpInst::ICMP_ULT:
3224 case ICmpInst::ICMP_ULE:
3225 return getTrue(ITy);
3226 }
3227 }
3228 }
3229
3230 // (x*C1)/C2 <= x for C1 <= C2.
3231 // This holds even if the multiplication overflows: Assume that x != 0 and
3232 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3233 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3234 //
3235 // Additionally, either the multiplication and division might be represented
3236 // as shifts:
3237 // (x*C1)>>C2 <= x for C1 < 2**C2.
3238 // (x<<C1)/C2 <= x for 2**C1 < C2.
3239 const APInt *C1, *C2;
3240 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3241 C1->ule(*C2)) ||
3242 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3243 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3244 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3245 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3246 if (Pred == ICmpInst::ICMP_UGT)
3247 return getFalse(ITy);
3248 if (Pred == ICmpInst::ICMP_ULE)
3249 return getTrue(ITy);
3250 }
3251
3252 // (sub C, X) == X, C is odd --> false
3253 // (sub C, X) != X, C is odd --> true
3254 if (match(LBO, m_Sub(m_APIntAllowPoison(C), m_Specific(RHS))) &&
3255 (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3256 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3257
3258 return nullptr;
3259}
3260
3261// If only one of the icmp's operands has NSW flags, try to prove that:
3262//
3263// icmp slt (x + C1), (x +nsw C2)
3264//
3265// is equivalent to:
3266//
3267// icmp slt C1, C2
3268//
3269// which is true if x + C2 has the NSW flags set and:
3270// *) C1 < C2 && C1 >= 0, or
3271// *) C2 < C1 && C1 <= 0.
3272//
3274 const InstrInfoQuery &IIQ) {
3275 // TODO: only support icmp slt for now.
3276 if (Pred != CmpInst::ICMP_SLT || !IIQ.UseInstrInfo)
3277 return false;
3278
3279 // Canonicalize nsw add as RHS.
3280 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3281 std::swap(LHS, RHS);
3282 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3283 return false;
3284
3285 Value *X;
3286 const APInt *C1, *C2;
3287 if (!match(LHS, m_Add(m_Value(X), m_APInt(C1))) ||
3288 !match(RHS, m_Add(m_Specific(X), m_APInt(C2))))
3289 return false;
3290
3291 return (C1->slt(*C2) && C1->isNonNegative()) ||
3292 (C2->slt(*C1) && C1->isNonPositive());
3293}
3294
3295/// TODO: A large part of this logic is duplicated in InstCombine's
3296/// foldICmpBinOp(). We should be able to share that and avoid the code
3297/// duplication.
3299 const SimplifyQuery &Q,
3300 unsigned MaxRecurse) {
3303 if (MaxRecurse && (LBO || RBO)) {
3304 // Analyze the case when either LHS or RHS is an add instruction.
3305 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3306 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3307 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3308 if (LBO && LBO->getOpcode() == Instruction::Add) {
3309 A = LBO->getOperand(0);
3310 B = LBO->getOperand(1);
3311 NoLHSWrapProblem =
3312 ICmpInst::isEquality(Pred) ||
3313 (CmpInst::isUnsigned(Pred) &&
3315 (CmpInst::isSigned(Pred) &&
3317 }
3318 if (RBO && RBO->getOpcode() == Instruction::Add) {
3319 C = RBO->getOperand(0);
3320 D = RBO->getOperand(1);
3321 NoRHSWrapProblem =
3322 ICmpInst::isEquality(Pred) ||
3323 (CmpInst::isUnsigned(Pred) &&
3325 (CmpInst::isSigned(Pred) &&
3327 }
3328
3329 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3330 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3331 if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3332 Constant::getNullValue(RHS->getType()), Q,
3333 MaxRecurse - 1))
3334 return V;
3335
3336 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3337 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3338 if (Value *V =
3340 C == LHS ? D : C, Q, MaxRecurse - 1))
3341 return V;
3342
3343 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3344 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3346 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3347 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3348 Value *Y, *Z;
3349 if (A == C) {
3350 // C + B == C + D -> B == D
3351 Y = B;
3352 Z = D;
3353 } else if (A == D) {
3354 // D + B == C + D -> B == C
3355 Y = B;
3356 Z = C;
3357 } else if (B == C) {
3358 // A + C == C + D -> A == D
3359 Y = A;
3360 Z = D;
3361 } else {
3362 assert(B == D);
3363 // A + D == C + D -> A == C
3364 Y = A;
3365 Z = C;
3366 }
3367 if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3368 return V;
3369 }
3370 }
3371
3372 if (LBO)
3373 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3374 return V;
3375
3376 if (RBO)
3378 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3379 return V;
3380
3381 // 0 - (zext X) pred C
3382 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3383 const APInt *C;
3384 if (match(RHS, m_APInt(C))) {
3385 if (C->isStrictlyPositive()) {
3386 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3388 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3390 }
3391 if (C->isNonNegative()) {
3392 if (Pred == ICmpInst::ICMP_SLE)
3394 if (Pred == ICmpInst::ICMP_SGT)
3396 }
3397 }
3398 }
3399
3400 // If C2 is a power-of-2 and C is not:
3401 // (C2 << X) == C --> false
3402 // (C2 << X) != C --> true
3403 const APInt *C;
3404 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3405 match(RHS, m_APIntAllowPoison(C)) && !C->isPowerOf2()) {
3406 // C2 << X can equal zero in some circumstances.
3407 // This simplification might be unsafe if C is zero.
3408 //
3409 // We know it is safe if:
3410 // - The shift is nsw. We can't shift out the one bit.
3411 // - The shift is nuw. We can't shift out the one bit.
3412 // - C2 is one.
3413 // - C isn't zero.
3416 match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3417 if (Pred == ICmpInst::ICMP_EQ)
3419 if (Pred == ICmpInst::ICMP_NE)
3421 }
3422 }
3423
3424 // If C is a power-of-2:
3425 // (C << X) >u 0x8000 --> false
3426 // (C << X) <=u 0x8000 --> true
3427 if (match(LHS, m_Shl(m_Power2(), m_Value())) && match(RHS, m_SignMask())) {
3428 if (Pred == ICmpInst::ICMP_UGT)
3430 if (Pred == ICmpInst::ICMP_ULE)
3432 }
3433
3434 if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3435 return nullptr;
3436
3437 if (LBO->getOperand(0) == RBO->getOperand(0)) {
3438 switch (LBO->getOpcode()) {
3439 default:
3440 break;
3441 case Instruction::Shl: {
3442 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3443 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3444 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3445 !isKnownNonZero(LBO->getOperand(0), Q))
3446 break;
3447 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3448 RBO->getOperand(1), Q, MaxRecurse - 1))
3449 return V;
3450 break;
3451 }
3452 // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3453 // icmp ule A, B -> true
3454 // icmp ugt A, B -> false
3455 // icmp sle A, B -> true (C1 and C2 are the same sign)
3456 // icmp sgt A, B -> false (C1 and C2 are the same sign)
3457 case Instruction::And:
3458 case Instruction::Or: {
3459 const APInt *C1, *C2;
3460 if (ICmpInst::isRelational(Pred) &&
3461 match(LBO->getOperand(1), m_APInt(C1)) &&
3462 match(RBO->getOperand(1), m_APInt(C2))) {
3463 if (!C1->isSubsetOf(*C2)) {
3464 std::swap(C1, C2);
3465 Pred = ICmpInst::getSwappedPredicate(Pred);
3466 }
3467 if (C1->isSubsetOf(*C2)) {
3468 if (Pred == ICmpInst::ICMP_ULE)
3470 if (Pred == ICmpInst::ICMP_UGT)
3472 if (C1->isNonNegative() == C2->isNonNegative()) {
3473 if (Pred == ICmpInst::ICMP_SLE)
3475 if (Pred == ICmpInst::ICMP_SGT)
3477 }
3478 }
3479 }
3480 break;
3481 }
3482 }
3483 }
3484
3485 if (LBO->getOperand(1) == RBO->getOperand(1)) {
3486 switch (LBO->getOpcode()) {
3487 default:
3488 break;
3489 case Instruction::UDiv:
3490 case Instruction::LShr:
3491 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3492 !Q.IIQ.isExact(RBO))
3493 break;
3494 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3495 RBO->getOperand(0), Q, MaxRecurse - 1))
3496 return V;
3497 break;
3498 case Instruction::SDiv:
3499 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3500 !Q.IIQ.isExact(RBO))
3501 break;
3502 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3503 RBO->getOperand(0), Q, MaxRecurse - 1))
3504 return V;
3505 break;
3506 case Instruction::AShr:
3507 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3508 break;
3509 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3510 RBO->getOperand(0), Q, MaxRecurse - 1))
3511 return V;
3512 break;
3513 case Instruction::Shl: {
3514 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3515 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3516 if (!NUW && !NSW)
3517 break;
3518 if (!NSW && ICmpInst::isSigned(Pred))
3519 break;
3520 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3521 RBO->getOperand(0), Q, MaxRecurse - 1))
3522 return V;
3523 break;
3524 }
3525 }
3526 }
3527 return nullptr;
3528}
3529
3530/// simplify integer comparisons where at least one operand of the compare
3531/// matches an integer min/max idiom.
3533 const SimplifyQuery &Q,
3534 unsigned MaxRecurse) {
3535 Type *ITy = getCompareTy(LHS); // The return type.
3536 Value *A, *B;
3538 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3539
3540 // Signed variants on "max(a,b)>=a -> true".
3541 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3542 if (A != RHS)
3543 std::swap(A, B); // smax(A, B) pred A.
3544 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3545 // We analyze this as smax(A, B) pred A.
3546 P = Pred;
3547 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3548 (A == LHS || B == LHS)) {
3549 if (A != LHS)
3550 std::swap(A, B); // A pred smax(A, B).
3551 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3552 // We analyze this as smax(A, B) swapped-pred A.
3554 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3555 (A == RHS || B == RHS)) {
3556 if (A != RHS)
3557 std::swap(A, B); // smin(A, B) pred A.
3558 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3559 // We analyze this as smax(-A, -B) swapped-pred -A.
3560 // Note that we do not need to actually form -A or -B thanks to EqP.
3562 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3563 (A == LHS || B == LHS)) {
3564 if (A != LHS)
3565 std::swap(A, B); // A pred smin(A, B).
3566 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3567 // We analyze this as smax(-A, -B) pred -A.
3568 // Note that we do not need to actually form -A or -B thanks to EqP.
3569 P = Pred;
3570 }
3572 // Cases correspond to "max(A, B) p A".
3573 switch (P) {
3574 default:
3575 break;
3576 case CmpInst::ICMP_EQ:
3577 case CmpInst::ICMP_SLE:
3578 // Equivalent to "A EqP B". This may be the same as the condition tested
3579 // in the max/min; if so, we can just return that.
3580 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3581 return V;
3582 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3583 return V;
3584 // Otherwise, see if "A EqP B" simplifies.
3585 if (MaxRecurse)
3586 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3587 return V;
3588 break;
3589 case CmpInst::ICMP_NE:
3590 case CmpInst::ICMP_SGT: {
3592 // Equivalent to "A InvEqP B". This may be the same as the condition
3593 // tested in the max/min; if so, we can just return that.
3594 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3595 return V;
3596 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3597 return V;
3598 // Otherwise, see if "A InvEqP B" simplifies.
3599 if (MaxRecurse)
3600 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3601 return V;
3602 break;
3603 }
3604 case CmpInst::ICMP_SGE:
3605 // Always true.
3606 return getTrue(ITy);
3607 case CmpInst::ICMP_SLT:
3608 // Always false.
3609 return getFalse(ITy);
3610 }
3611 }
3612
3613 // Unsigned variants on "max(a,b)>=a -> true".
3615 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3616 if (A != RHS)
3617 std::swap(A, B); // umax(A, B) pred A.
3618 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3619 // We analyze this as umax(A, B) pred A.
3620 P = Pred;
3621 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3622 (A == LHS || B == LHS)) {
3623 if (A != LHS)
3624 std::swap(A, B); // A pred umax(A, B).
3625 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3626 // We analyze this as umax(A, B) swapped-pred A.
3628 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3629 (A == RHS || B == RHS)) {
3630 if (A != RHS)
3631 std::swap(A, B); // umin(A, B) pred A.
3632 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3633 // We analyze this as umax(-A, -B) swapped-pred -A.
3634 // Note that we do not need to actually form -A or -B thanks to EqP.
3636 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3637 (A == LHS || B == LHS)) {
3638 if (A != LHS)
3639 std::swap(A, B); // A pred umin(A, B).
3640 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3641 // We analyze this as umax(-A, -B) pred -A.
3642 // Note that we do not need to actually form -A or -B thanks to EqP.
3643 P = Pred;
3644 }
3646 // Cases correspond to "max(A, B) p A".
3647 switch (P) {
3648 default:
3649 break;
3650 case CmpInst::ICMP_EQ:
3651 case CmpInst::ICMP_ULE:
3652 // Equivalent to "A EqP B". This may be the same as the condition tested
3653 // in the max/min; if so, we can just return that.
3654 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3655 return V;
3656 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3657 return V;
3658 // Otherwise, see if "A EqP B" simplifies.
3659 if (MaxRecurse)
3660 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3661 return V;
3662 break;
3663 case CmpInst::ICMP_NE:
3664 case CmpInst::ICMP_UGT: {
3666 // Equivalent to "A InvEqP B". This may be the same as the condition
3667 // tested in the max/min; if so, we can just return that.
3668 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3669 return V;
3670 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3671 return V;
3672 // Otherwise, see if "A InvEqP B" simplifies.
3673 if (MaxRecurse)
3674 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3675 return V;
3676 break;
3677 }
3678 case CmpInst::ICMP_UGE:
3679 return getTrue(ITy);
3680 case CmpInst::ICMP_ULT:
3681 return getFalse(ITy);
3682 }
3683 }
3684
3685 // Comparing 1 each of min/max with a common operand?
3686 // Canonicalize min operand to RHS.
3687 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3688 match(LHS, m_SMin(m_Value(), m_Value()))) {
3689 std::swap(LHS, RHS);
3690 Pred = ICmpInst::getSwappedPredicate(Pred);
3691 }
3692
3693 Value *C, *D;
3694 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3695 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3696 (A == C || A == D || B == C || B == D)) {
3697 // smax(A, B) >=s smin(A, D) --> true
3698 if (Pred == CmpInst::ICMP_SGE)
3699 return getTrue(ITy);
3700 // smax(A, B) <s smin(A, D) --> false
3701 if (Pred == CmpInst::ICMP_SLT)
3702 return getFalse(ITy);
3703 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3704 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3705 (A == C || A == D || B == C || B == D)) {
3706 // umax(A, B) >=u umin(A, D) --> true
3707 if (Pred == CmpInst::ICMP_UGE)
3708 return getTrue(ITy);
3709 // umax(A, B) <u umin(A, D) --> false
3710 if (Pred == CmpInst::ICMP_ULT)
3711 return getFalse(ITy);
3712 }
3713
3714 return nullptr;
3715}
3716
3718 Value *LHS, Value *RHS,
3719 const SimplifyQuery &Q) {
3720 // Gracefully handle instructions that have not been inserted yet.
3721 if (!Q.AC || !Q.CxtI)
3722 return nullptr;
3723
3724 for (Value *AssumeBaseOp : {LHS, RHS}) {
3725 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3726 if (!AssumeVH)
3727 continue;
3728
3729 CallInst *Assume = cast<CallInst>(AssumeVH);
3730 if (std::optional<bool> Imp = isImpliedCondition(
3731 Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3732 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3733 return ConstantInt::get(getCompareTy(LHS), *Imp);
3734 }
3735 }
3736
3737 return nullptr;
3738}
3739
3741 Value *RHS) {
3743 if (!II)
3744 return nullptr;
3745
3746 switch (II->getIntrinsicID()) {
3747 case Intrinsic::uadd_sat:
3748 // uadd.sat(X, Y) uge X + Y
3749 if (match(RHS, m_c_Add(m_Specific(II->getArgOperand(0)),
3750 m_Specific(II->getArgOperand(1))))) {
3751 if (Pred == ICmpInst::ICMP_UGE)
3753 if (Pred == ICmpInst::ICMP_ULT)
3755 }
3756 return nullptr;
3757 case Intrinsic::usub_sat:
3758 // usub.sat(X, Y) ule X - Y
3759 if (match(RHS, m_Sub(m_Specific(II->getArgOperand(0)),
3760 m_Specific(II->getArgOperand(1))))) {
3761 if (Pred == ICmpInst::ICMP_ULE)
3763 if (Pred == ICmpInst::ICMP_UGT)
3765 }
3766 return nullptr;
3767 default:
3768 return nullptr;
3769 }
3770}
3771
3772/// Helper method to get range from metadata or attribute.
3773static std::optional<ConstantRange> getRange(Value *V,
3774 const InstrInfoQuery &IIQ) {
3776 if (MDNode *MD = IIQ.getMetadata(I, LLVMContext::MD_range))
3777 return getConstantRangeFromMetadata(*MD);
3778
3779 if (const Argument *A = dyn_cast<Argument>(V))
3780 return A->getRange();
3781 else if (const CallBase *CB = dyn_cast<CallBase>(V))
3782 return CB->getRange();
3783
3784 return std::nullopt;
3785}
3786
3787/// Given operands for an ICmpInst, see if we can fold the result.
3788/// If not, this returns null.
3790 const SimplifyQuery &Q, unsigned MaxRecurse) {
3791 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3792
3793 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3794 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3795 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3796
3797 // If we have a constant, make sure it is on the RHS.
3798 std::swap(LHS, RHS);
3799 Pred = CmpInst::getSwappedPredicate(Pred);
3800 }
3801 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3802
3803 Type *ITy = getCompareTy(LHS); // The return type.
3804
3805 // icmp poison, X -> poison
3806 if (isa<PoisonValue>(RHS))
3807 return PoisonValue::get(ITy);
3808
3809 // For EQ and NE, we can always pick a value for the undef to make the
3810 // predicate pass or fail, so we can return undef.
3811 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3812 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3813 return UndefValue::get(ITy);
3814
3815 // icmp X, X -> true/false
3816 // icmp X, undef -> true/false because undef could be X.
3817 if (LHS == RHS || Q.isUndefValue(RHS))
3818 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3819
3820 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3821 return V;
3822
3823 // TODO: Sink/common this with other potentially expensive calls that use
3824 // ValueTracking? See comment below for isKnownNonEqual().
3825 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3826 return V;
3827
3828 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q))
3829 return V;
3830
3831 // If both operands have range metadata, use the metadata
3832 // to simplify the comparison.
3833 if (std::optional<ConstantRange> RhsCr = getRange(RHS, Q.IIQ))
3834 if (std::optional<ConstantRange> LhsCr = getRange(LHS, Q.IIQ)) {
3835 if (LhsCr->icmp(Pred, *RhsCr))
3836 return ConstantInt::getTrue(ITy);
3837
3838 if (LhsCr->icmp(CmpInst::getInversePredicate(Pred), *RhsCr))
3839 return ConstantInt::getFalse(ITy);
3840 }
3841
3842 // Compare of cast, for example (zext X) != 0 -> X != 0
3845 Value *SrcOp = LI->getOperand(0);
3846 Type *SrcTy = SrcOp->getType();
3847 Type *DstTy = LI->getType();
3848
3849 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3850 // if the integer type is the same size as the pointer type.
3851 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3852 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3853 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3854 // Transfer the cast to the constant.
3855 if (Value *V = simplifyICmpInst(Pred, SrcOp,
3856 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3857 Q, MaxRecurse - 1))
3858 return V;
3859 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3860 if (RI->getOperand(0)->getType() == SrcTy)
3861 // Compare without the cast.
3862 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3863 MaxRecurse - 1))
3864 return V;
3865 }
3866 }
3867
3868 if (isa<ZExtInst>(LHS)) {
3869 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3870 // same type.
3871 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3872 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3873 // Compare X and Y. Note that signed predicates become unsigned.
3874 if (Value *V =
3876 RI->getOperand(0), Q, MaxRecurse - 1))
3877 return V;
3878 }
3879 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3880 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3881 if (SrcOp == RI->getOperand(0)) {
3882 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3883 return ConstantInt::getTrue(ITy);
3884 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3885 return ConstantInt::getFalse(ITy);
3886 }
3887 }
3888 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3889 // too. If not, then try to deduce the result of the comparison.
3890 else if (match(RHS, m_ImmConstant())) {
3892 assert(C != nullptr);
3893
3894 // Compute the constant that would happen if we truncated to SrcTy then
3895 // reextended to DstTy.
3896 Constant *Trunc =
3897 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3898 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3899 Constant *RExt =
3900 ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3901 assert(RExt && "Constant-fold of ImmConstant should not fail");
3902 Constant *AnyEq =
3904 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3905
3906 // If the re-extended constant didn't change any of the elements then
3907 // this is effectively also a case of comparing two zero-extended
3908 // values.
3909 if (AnyEq->isAllOnesValue() && MaxRecurse)
3911 SrcOp, Trunc, Q, MaxRecurse - 1))
3912 return V;
3913
3914 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3915 // there. Use this to work out the result of the comparison.
3916 if (AnyEq->isNullValue()) {
3917 switch (Pred) {
3918 default:
3919 llvm_unreachable("Unknown ICmp predicate!");
3920 // LHS <u RHS.
3921 case ICmpInst::ICMP_EQ:
3922 case ICmpInst::ICMP_UGT:
3923 case ICmpInst::ICMP_UGE:
3924 return Constant::getNullValue(ITy);
3925
3926 case ICmpInst::ICMP_NE:
3927 case ICmpInst::ICMP_ULT:
3928 case ICmpInst::ICMP_ULE:
3929 return Constant::getAllOnesValue(ITy);
3930
3931 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3932 // is non-negative then LHS <s RHS.
3933 case ICmpInst::ICMP_SGT:
3934 case ICmpInst::ICMP_SGE:
3937 Q.DL);
3938 case ICmpInst::ICMP_SLT:
3939 case ICmpInst::ICMP_SLE:
3942 Q.DL);
3943 }
3944 }
3945 }
3946 }
3947
3948 if (isa<SExtInst>(LHS)) {
3949 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3950 // same type.
3951 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3952 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3953 // Compare X and Y. Note that the predicate does not change.
3954 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3955 MaxRecurse - 1))
3956 return V;
3957 }
3958 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3959 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3960 if (SrcOp == RI->getOperand(0)) {
3961 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3962 return ConstantInt::getTrue(ITy);
3963 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3964 return ConstantInt::getFalse(ITy);
3965 }
3966 }
3967 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3968 // too. If not, then try to deduce the result of the comparison.
3969 else if (match(RHS, m_ImmConstant())) {
3971
3972 // Compute the constant that would happen if we truncated to SrcTy then
3973 // reextended to DstTy.
3974 Constant *Trunc =
3975 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3976 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3977 Constant *RExt =
3978 ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
3979 assert(RExt && "Constant-fold of ImmConstant should not fail");
3980 Constant *AnyEq =
3982 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3983
3984 // If the re-extended constant didn't change then this is effectively
3985 // also a case of comparing two sign-extended values.
3986 if (AnyEq->isAllOnesValue() && MaxRecurse)
3987 if (Value *V =
3988 simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3989 return V;
3990
3991 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3992 // bits there. Use this to work out the result of the comparison.
3993 if (AnyEq->isNullValue()) {
3994 switch (Pred) {
3995 default:
3996 llvm_unreachable("Unknown ICmp predicate!");
3997 case ICmpInst::ICMP_EQ:
3998 return Constant::getNullValue(ITy);
3999 case ICmpInst::ICMP_NE:
4000 return Constant::getAllOnesValue(ITy);
4001
4002 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
4003 // LHS >s RHS.
4004 case ICmpInst::ICMP_SGT:
4005 case ICmpInst::ICMP_SGE:
4008 Q.DL);
4009 case ICmpInst::ICMP_SLT:
4010 case ICmpInst::ICMP_SLE:
4013 Q.DL);
4014
4015 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
4016 // LHS >u RHS.
4017 case ICmpInst::ICMP_UGT:
4018 case ICmpInst::ICMP_UGE:
4019 // Comparison is true iff the LHS <s 0.
4020 if (MaxRecurse)
4022 Constant::getNullValue(SrcTy), Q,
4023 MaxRecurse - 1))
4024 return V;
4025 break;
4026 case ICmpInst::ICMP_ULT:
4027 case ICmpInst::ICMP_ULE:
4028 // Comparison is true iff the LHS >=s 0.
4029 if (MaxRecurse)
4031 Constant::getNullValue(SrcTy), Q,
4032 MaxRecurse - 1))
4033 return V;
4034 break;
4035 }
4036 }
4037 }
4038 }
4039 }
4040
4041 // icmp eq|ne X, Y -> false|true if X != Y
4042 // This is potentially expensive, and we have already computedKnownBits for
4043 // compares with 0 above here, so only try this for a non-zero compare.
4044 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
4045 isKnownNonEqual(LHS, RHS, Q)) {
4046 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
4047 }
4048
4049 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
4050 return V;
4051
4052 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
4053 return V;
4054
4056 return V;
4059 return V;
4060
4061 if (Value *V = simplifyICmpUsingMonotonicValues(Pred, LHS, RHS, Q))
4062 return V;
4065 return V;
4066
4067 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
4068 return V;
4069
4070 if (std::optional<bool> Res =
4071 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4072 return ConstantInt::getBool(ITy, *Res);
4073
4074 // Simplify comparisons of related pointers using a powerful, recursive
4075 // GEP-walk when we have target data available..
4076 if (LHS->getType()->isPointerTy())
4077 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4078 return C;
4079 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
4080 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
4081 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4082 Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
4083 Q.DL.getTypeSizeInBits(CLHS->getType()))
4084 if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
4085 CRHS->getPointerOperand(), Q))
4086 return C;
4087
4088 // If the comparison is with the result of a select instruction, check whether
4089 // comparing with either branch of the select always yields the same value.
4091 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4092 return V;
4093
4094 // If the comparison is with the result of a phi instruction, check whether
4095 // doing the compare with each incoming phi value yields a common result.
4097 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4098 return V;
4099
4100 return nullptr;
4101}
4102
4104 const SimplifyQuery &Q) {
4105 return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4106}
4107
4108/// Given operands for an FCmpInst, see if we can fold the result.
4109/// If not, this returns null.
4111 FastMathFlags FMF, const SimplifyQuery &Q,
4112 unsigned MaxRecurse) {
4113 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4114
4115 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4116 if (Constant *CRHS = dyn_cast<Constant>(RHS))
4117 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4118 Q.CxtI);
4119
4120 // If we have a constant, make sure it is on the RHS.
4121 std::swap(LHS, RHS);
4122 Pred = CmpInst::getSwappedPredicate(Pred);
4123 }
4124
4125 // Fold trivial predicates.
4126 Type *RetTy = getCompareTy(LHS);
4127 if (Pred == FCmpInst::FCMP_FALSE)
4128 return getFalse(RetTy);
4129 if (Pred == FCmpInst::FCMP_TRUE)
4130 return getTrue(RetTy);
4131
4132 // fcmp pred x, poison and fcmp pred poison, x
4133 // fold to poison
4135 return PoisonValue::get(RetTy);
4136
4137 // fcmp pred x, undef and fcmp pred undef, x
4138 // fold to true if unordered, false if ordered
4139 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4140 // Choosing NaN for the undef will always make unordered comparison succeed
4141 // and ordered comparison fail.
4142 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4143 }
4144
4145 // fcmp x,x -> true/false. Not all compares are foldable.
4146 if (LHS == RHS) {
4147 if (CmpInst::isTrueWhenEqual(Pred))
4148 return getTrue(RetTy);
4149 if (CmpInst::isFalseWhenEqual(Pred))
4150 return getFalse(RetTy);
4151 }
4152
4153 // Fold (un)ordered comparison if we can determine there are no NaNs.
4154 //
4155 // This catches the 2 variable input case, constants are handled below as a
4156 // class-like compare.
4157 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4160
4161 if (FMF.noNaNs() ||
4162 (RHSClass.isKnownNeverNaN() && LHSClass.isKnownNeverNaN()))
4163 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4164
4165 if (RHSClass.isKnownAlwaysNaN() || LHSClass.isKnownAlwaysNaN())
4166 return ConstantInt::get(RetTy, Pred == CmpInst::FCMP_UNO);
4167 }
4168
4169 if (std::optional<bool> Res =
4170 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4171 return ConstantInt::getBool(RetTy, *Res);
4172
4173 const APFloat *C = nullptr;
4175 std::optional<KnownFPClass> FullKnownClassLHS;
4176
4177 // Lazily compute the possible classes for LHS. Avoid computing it twice if
4178 // RHS is a 0.
4179 auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4180 fcAllFlags) {
4181 if (FullKnownClassLHS)
4182 return *FullKnownClassLHS;
4183 return computeKnownFPClass(LHS, FMF, InterestedFlags, Q);
4184 };
4185
4186 if (C && Q.CxtI) {
4187 // Fold out compares that express a class test.
4188 //
4189 // FIXME: Should be able to perform folds without context
4190 // instruction. Always pass in the context function?
4191
4192 const Function *ParentF = Q.CxtI->getFunction();
4193 auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, *ParentF, LHS, C);
4194 if (ClassVal) {
4195 FullKnownClassLHS = computeLHSClass();
4196 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4197 return getFalse(RetTy);
4198 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4199 return getTrue(RetTy);
4200 }
4201 }
4202
4203 // Handle fcmp with constant RHS.
4204 if (C) {
4205 // TODO: If we always required a context function, we wouldn't need to
4206 // special case nans.
4207 if (C->isNaN())
4208 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4209
4210 // TODO: Need version fcmpToClassTest which returns implied class when the
4211 // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4212 // isn't implementable as a class call.
4213 if (C->isNegative() && !C->isNegZero()) {
4215
4216 // TODO: We can catch more cases by using a range check rather than
4217 // relying on CannotBeOrderedLessThanZero.
4218 switch (Pred) {
4219 case FCmpInst::FCMP_UGE:
4220 case FCmpInst::FCMP_UGT:
4221 case FCmpInst::FCMP_UNE: {
4222 KnownFPClass KnownClass = computeLHSClass(Interested);
4223
4224 // (X >= 0) implies (X > C) when (C < 0)
4225 if (KnownClass.cannotBeOrderedLessThanZero())
4226 return getTrue(RetTy);
4227 break;
4228 }
4229 case FCmpInst::FCMP_OEQ:
4230 case FCmpInst::FCMP_OLE:
4231 case FCmpInst::FCMP_OLT: {
4232 KnownFPClass KnownClass = computeLHSClass(Interested);
4233
4234 // (X >= 0) implies !(X < C) when (C < 0)
4235 if (KnownClass.cannotBeOrderedLessThanZero())
4236 return getFalse(RetTy);
4237 break;
4238 }
4239 default:
4240 break;
4241 }
4242 }
4243 // Check comparison of [minnum/maxnum with constant] with other constant.
4244 const APFloat *C2;
4246 *C2 < *C) ||
4248 *C2 > *C)) {
4249 bool IsMaxNum =
4250 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4251 // The ordered relationship and minnum/maxnum guarantee that we do not
4252 // have NaN constants, so ordered/unordered preds are handled the same.
4253 switch (Pred) {
4254 case FCmpInst::FCMP_OEQ:
4255 case FCmpInst::FCMP_UEQ:
4256 // minnum(X, LesserC) == C --> false
4257 // maxnum(X, GreaterC) == C --> false
4258 return getFalse(RetTy);
4259 case FCmpInst::FCMP_ONE:
4260 case FCmpInst::FCMP_UNE:
4261 // minnum(X, LesserC) != C --> true
4262 // maxnum(X, GreaterC) != C --> true
4263 return getTrue(RetTy);
4264 case FCmpInst::FCMP_OGE:
4265 case FCmpInst::FCMP_UGE:
4266 case FCmpInst::FCMP_OGT:
4267 case FCmpInst::FCMP_UGT:
4268 // minnum(X, LesserC) >= C --> false
4269 // minnum(X, LesserC) > C --> false
4270 // maxnum(X, GreaterC) >= C --> true
4271 // maxnum(X, GreaterC) > C --> true
4272 return ConstantInt::get(RetTy, IsMaxNum);
4273 case FCmpInst::FCMP_OLE:
4274 case FCmpInst::FCMP_ULE:
4275 case FCmpInst::FCMP_OLT:
4276 case FCmpInst::FCMP_ULT:
4277 // minnum(X, LesserC) <= C --> true
4278 // minnum(X, LesserC) < C --> true
4279 // maxnum(X, GreaterC) <= C --> false
4280 // maxnum(X, GreaterC) < C --> false
4281 return ConstantInt::get(RetTy, !IsMaxNum);
4282 default:
4283 // TRUE/FALSE/ORD/UNO should be handled before this.
4284 llvm_unreachable("Unexpected fcmp predicate");
4285 }
4286 }
4287 }
4288
4289 // TODO: Could fold this with above if there were a matcher which returned all
4290 // classes in a non-splat vector.
4291 if (match(RHS, m_AnyZeroFP())) {
4292 switch (Pred) {
4293 case FCmpInst::FCMP_OGE:
4294 case FCmpInst::FCMP_ULT: {
4296 if (!FMF.noNaNs())
4297 Interested |= fcNan;
4298
4299 KnownFPClass Known = computeLHSClass(Interested);
4300
4301 // Positive or zero X >= 0.0 --> true
4302 // Positive or zero X < 0.0 --> false
4303 if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4305 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4306 break;
4307 }
4308 case FCmpInst::FCMP_UGE:
4309 case FCmpInst::FCMP_OLT: {
4311 KnownFPClass Known = computeLHSClass(Interested);
4312
4313 // Positive or zero or nan X >= 0.0 --> true
4314 // Positive or zero or nan X < 0.0 --> false
4315 if (Known.cannotBeOrderedLessThanZero())
4316 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4317 break;
4318 }
4319 default:
4320 break;
4321 }
4322 }
4323
4324 // If the comparison is with the result of a select instruction, check whether
4325 // comparing with either branch of the select always yields the same value.
4327 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4328 return V;
4329
4330 // If the comparison is with the result of a phi instruction, check whether
4331 // doing the compare with each incoming phi value yields a common result.
4333 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4334 return V;
4335
4336 return nullptr;
4337}
4338
4340 FastMathFlags FMF, const SimplifyQuery &Q) {
4341 return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4342}
4343
4345 ArrayRef<std::pair<Value *, Value *>> Ops,
4346 const SimplifyQuery &Q,
4347 bool AllowRefinement,
4349 unsigned MaxRecurse) {
4350 assert((AllowRefinement || !Q.CanUseUndef) &&
4351 "If AllowRefinement=false then CanUseUndef=false");
4352 for (const auto &OpAndRepOp : Ops) {
4353 // We cannot replace a constant, and shouldn't even try.
4354 if (isa<Constant>(OpAndRepOp.first))
4355 return nullptr;
4356
4357 // Trivial replacement.
4358 if (V == OpAndRepOp.first)
4359 return OpAndRepOp.second;
4360 }
4361
4362 if (!MaxRecurse--)
4363 return nullptr;
4364
4365 auto *I = dyn_cast<Instruction>(V);
4366 if (!I)
4367 return nullptr;
4368
4369 // The arguments of a phi node might refer to a value from a previous
4370 // cycle iteration.
4371 if (isa<PHINode>(I))
4372 return nullptr;
4373
4374 // Don't fold away llvm.is.constant checks based on assumptions.
4376 return nullptr;
4377
4378 // Don't simplify freeze.
4379 if (isa<FreezeInst>(I))
4380 return nullptr;
4381
4382 for (const auto &OpAndRepOp : Ops) {
4383 // For vector types, the simplification must hold per-lane, so forbid
4384 // potentially cross-lane operations like shufflevector.
4385 if (OpAndRepOp.first->getType()->isVectorTy() &&
4387 return nullptr;
4388 }
4389
4390 // Replace Op with RepOp in instruction operands.
4392 bool AnyReplaced = false;
4393 for (Value *InstOp : I->operands()) {
4394 if (Value *NewInstOp = simplifyWithOpsReplaced(
4395 InstOp, Ops, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4396 NewOps.push_back(NewInstOp);
4397 AnyReplaced = InstOp != NewInstOp;
4398 } else {
4399 NewOps.push_back(InstOp);
4400 }
4401
4402 // Bail out if any operand is undef and SimplifyQuery disables undef
4403 // simplification. Constant folding currently doesn't respect this option.
4404 if (isa<UndefValue>(NewOps.back()) && !Q.CanUseUndef)
4405 return nullptr;
4406 }
4407
4408 if (!AnyReplaced)
4409 return nullptr;
4410
4411 if (!AllowRefinement) {
4412 // General InstSimplify functions may refine the result, e.g. by returning
4413 // a constant for a potentially poison value. To avoid this, implement only
4414 // a few non-refining but profitable transforms here.
4415
4416 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4417 unsigned Opcode = BO->getOpcode();
4418 // id op x -> x, x op id -> x
4419 // Exclude floats, because x op id may produce a different NaN value.
4420 if (!BO->getType()->isFPOrFPVectorTy()) {
4421 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4422 return NewOps[1];
4423 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4424 /* RHS */ true))
4425 return NewOps[0];
4426 }
4427
4428 // x & x -> x, x | x -> x
4429 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4430 NewOps[0] == NewOps[1]) {
4431 // or disjoint x, x results in poison.
4432 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4433 if (PDI->isDisjoint()) {
4434 if (!DropFlags)
4435 return nullptr;
4436 DropFlags->push_back(BO);
4437 }
4438 }
4439 return NewOps[0];
4440 }
4441
4442 // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4443 // by assumption and this case never wraps, so nowrap flags can be
4444 // ignored.
4445 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4446 NewOps[0] == NewOps[1] &&
4447 any_of(Ops, [=](const auto &Rep) { return NewOps[0] == Rep.second; }))
4448 return Constant::getNullValue(I->getType());
4449
4450 // If we are substituting an absorber constant into a binop and extra
4451 // poison can't leak if we remove the select -- because both operands of
4452 // the binop are based on the same value -- then it may be safe to replace
4453 // the value with the absorber constant. Examples:
4454 // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4455 // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4456 // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4457 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4458 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4459 any_of(Ops,
4460 [=](const auto &Rep) { return impliesPoison(BO, Rep.first); }))
4461 return Absorber;
4462 }
4463
4465 // getelementptr x, 0 -> x.
4466 // This never returns poison, even if inbounds is set.
4467 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4468 return NewOps[0];
4469 }
4470 } else {
4471 // The simplification queries below may return the original value. Consider:
4472 // %div = udiv i32 %arg, %arg2
4473 // %mul = mul nsw i32 %div, %arg2
4474 // %cmp = icmp eq i32 %mul, %arg
4475 // %sel = select i1 %cmp, i32 %div, i32 undef
4476 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4477 // simplifies back to %arg. This can only happen because %mul does not
4478 // dominate %div. To ensure a consistent return value contract, we make sure
4479 // that this case returns nullptr as well.
4480 auto PreventSelfSimplify = [V](Value *Simplified) {
4481 return Simplified != V ? Simplified : nullptr;
4482 };
4483
4484 return PreventSelfSimplify(
4485 ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4486 }
4487
4488 // If all operands are constant after substituting Op for RepOp then we can
4489 // constant fold the instruction.
4491 for (Value *NewOp : NewOps) {
4492 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4493 ConstOps.push_back(ConstOp);
4494 else
4495 return nullptr;
4496 }
4497
4498 // Consider:
4499 // %cmp = icmp eq i32 %x, 2147483647
4500 // %add = add nsw i32 %x, 1
4501 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4502 //
4503 // We can't replace %sel with %add unless we strip away the flags (which
4504 // will be done in InstCombine).
4505 // TODO: This may be unsound, because it only catches some forms of
4506 // refinement.
4507 if (!AllowRefinement) {
4508 if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4509 // abs cannot create poison if the value is known to never be int_min.
4510 if (auto *II = dyn_cast<IntrinsicInst>(I);
4511 II && II->getIntrinsicID() == Intrinsic::abs) {
4512 if (!ConstOps[0]->isNotMinSignedValue())
4513 return nullptr;
4514 } else
4515 return nullptr;
4516 }
4517 Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4518 /*AllowNonDeterministic=*/false);
4519 if (DropFlags && Res && I->hasPoisonGeneratingAnnotations())
4520 DropFlags->push_back(I);
4521 return Res;
4522 }
4523
4524 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI,
4525 /*AllowNonDeterministic=*/false);
4526}
4527
4529 const SimplifyQuery &Q,
4530 bool AllowRefinement,
4532 unsigned MaxRecurse) {
4533 return simplifyWithOpsReplaced(V, {{Op, RepOp}}, Q, AllowRefinement,
4534 DropFlags, MaxRecurse);
4535}
4536
4538 const SimplifyQuery &Q,
4539 bool AllowRefinement,
4540 SmallVectorImpl<Instruction *> *DropFlags) {
4541 // If refinement is disabled, also disable undef simplifications (which are
4542 // always refinements) in SimplifyQuery.
4543 if (!AllowRefinement)
4544 return ::simplifyWithOpReplaced(V, Op, RepOp, Q.getWithoutUndef(),
4545 AllowRefinement, DropFlags, RecursionLimit);
4546 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4548}
4549
4550/// Try to simplify a select instruction when its condition operand is an
4551/// integer comparison where one operand of the compare is a constant.
4552static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4553 const APInt *Y, bool TrueWhenUnset) {
4554 const APInt *C;
4555
4556 // (X & Y) == 0 ? X & ~Y : X --> X
4557 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4558 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4559 *Y == ~*C)
4560 return TrueWhenUnset ? FalseVal : TrueVal;
4561
4562 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4563 // (X & Y) != 0 ? X : X & ~Y --> X
4564 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4565 *Y == ~*C)
4566 return TrueWhenUnset ? FalseVal : TrueVal;
4567
4568 if (Y->isPowerOf2()) {
4569 // (X & Y) == 0 ? X | Y : X --> X | Y
4570 // (X & Y) != 0 ? X | Y : X --> X
4571 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4572 *Y == *C) {
4573 // We can't return the or if it has the disjoint flag.
4574 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4575 return nullptr;
4576 return TrueWhenUnset ? TrueVal : FalseVal;
4577 }
4578
4579 // (X & Y) == 0 ? X : X | Y --> X
4580 // (X & Y) != 0 ? X : X | Y --> X | Y
4581 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4582 *Y == *C) {
4583 // We can't return the or if it has the disjoint flag.
4584 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4585 return nullptr;
4586 return TrueWhenUnset ? TrueVal : FalseVal;
4587 }
4588 }
4589
4590 return nullptr;
4591}
4592
4593static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4594 CmpPredicate Pred, Value *TVal,
4595 Value *FVal) {
4596 // Canonicalize common cmp+sel operand as CmpLHS.
4597 if (CmpRHS == TVal || CmpRHS == FVal) {
4598 std::swap(CmpLHS, CmpRHS);
4599 Pred = ICmpInst::getSwappedPredicate(Pred);
4600 }
4601
4602 // Canonicalize common cmp+sel operand as TVal.
4603 if (CmpLHS == FVal) {
4604 std::swap(TVal, FVal);
4605 Pred = ICmpInst::getInversePredicate(Pred);
4606 }
4607
4608 // A vector select may be shuffling together elements that are equivalent
4609 // based on the max/min/select relationship.
4610 Value *X = CmpLHS, *Y = CmpRHS;
4611 bool PeekedThroughSelectShuffle = false;
4612 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4613 if (Shuf && Shuf->isSelect()) {
4614 if (Shuf->getOperand(0) == Y)
4615 FVal = Shuf->getOperand(1);
4616 else if (Shuf->getOperand(1) == Y)
4617 FVal = Shuf->getOperand(0);
4618 else
4619 return nullptr;
4620 PeekedThroughSelectShuffle = true;
4621 }
4622
4623 // (X pred Y) ? X : max/min(X, Y)
4624 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4625 if (!MMI || TVal != X ||
4627 return nullptr;
4628
4629 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4630 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4631 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4632 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4633 //
4634 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4635 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4636 // If Z is true, this reduces as above, and if Z is false:
4637 // (X > Y) ? X : Y --> max(X, Y)
4638 ICmpInst::Predicate MMPred = MMI->getPredicate();
4639 if (MMPred == CmpInst::getStrictPredicate(Pred))
4640 return MMI;
4641
4642 // Other transforms are not valid with a shuffle.
4643 if (PeekedThroughSelectShuffle)
4644 return nullptr;
4645
4646 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4647 if (Pred == CmpInst::ICMP_EQ)
4648 return MMI;
4649
4650 // (X != Y) ? X : max/min(X, Y) --> X
4651 if (Pred == CmpInst::ICMP_NE)
4652 return X;
4653
4654 // (X < Y) ? X : max(X, Y) --> X
4655 // (X <= Y) ? X : max(X, Y) --> X
4656 // (X > Y) ? X : min(X, Y) --> X
4657 // (X >= Y) ? X : min(X, Y) --> X
4659 if (MMPred == CmpInst::getStrictPredicate(InvPred))
4660 return X;
4661
4662 return nullptr;
4663}
4664
4665/// An alternative way to test if a bit is set or not.
4666/// uses e.g. sgt/slt or trunc instead of eq/ne.
4667static Value *simplifySelectWithBitTest(Value *CondVal, Value *TrueVal,
4668 Value *FalseVal) {
4669 if (auto Res = decomposeBitTest(CondVal))
4670 return simplifySelectBitTest(TrueVal, FalseVal, Res->X, &Res->Mask,
4671 Res->Pred == ICmpInst::ICMP_EQ);
4672
4673 return nullptr;
4674}
4675
4676/// Try to simplify a select instruction when its condition operand is an
4677/// integer equality or floating-point equivalence comparison.
4679 ArrayRef<std::pair<Value *, Value *>> Replacements, Value *TrueVal,
4680 Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse) {
4681 Value *SimplifiedFalseVal =
4682 simplifyWithOpsReplaced(FalseVal, Replacements, Q.getWithoutUndef(),
4683 /* AllowRefinement */ false,
4684 /* DropFlags */ nullptr, MaxRecurse);
4685 if (!SimplifiedFalseVal)
4686 SimplifiedFalseVal = FalseVal;
4687
4688 Value *SimplifiedTrueVal =
4689 simplifyWithOpsReplaced(TrueVal, Replacements, Q,
4690 /* AllowRefinement */ true,
4691 /* DropFlags */ nullptr, MaxRecurse);
4692 if (!SimplifiedTrueVal)
4693 SimplifiedTrueVal = TrueVal;
4694
4695 if (SimplifiedFalseVal == SimplifiedTrueVal)
4696 return FalseVal;
4697
4698 return nullptr;
4699}
4700
4701/// Try to simplify a select instruction when its condition operand is an
4702/// integer comparison.
4703static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4704 Value *FalseVal,
4705 const SimplifyQuery &Q,
4706 unsigned MaxRecurse) {
4707 CmpPredicate Pred;
4708 Value *CmpLHS, *CmpRHS;
4709 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4710 return nullptr;
4711
4712 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4713 return V;
4714
4715 // Canonicalize ne to eq predicate.
4716 if (Pred == ICmpInst::ICMP_NE) {
4717 Pred = ICmpInst::ICMP_EQ;
4718 std::swap(TrueVal, FalseVal);
4719 }
4720
4721 // Check for integer min/max with a limit constant:
4722 // X > MIN_INT ? X : MIN_INT --> X
4723 // X < MAX_INT ? X : MAX_INT --> X
4724 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4725 Value *X, *Y;
4727 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4728 X, Y)
4729 .Flavor;
4730 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4732 X->getType()->getScalarSizeInBits());
4733 if (match(Y, m_SpecificInt(LimitC)))
4734 return X;
4735 }
4736 }
4737
4738 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4739 Value *X;
4740 const APInt *Y;
4741 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4742 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4743 /*TrueWhenUnset=*/true))
4744 return V;
4745
4746 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4747 Value *ShAmt;
4748 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4749 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4750 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4751 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4752 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4753 return X;
4754
4755 // Test for a zero-shift-guard-op around rotates. These are used to
4756 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4757 // intrinsics do not have that problem.
4758 // We do not allow this transform for the general funnel shift case because
4759 // that would not preserve the poison safety of the original code.
4760 auto isRotate =
4762 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4763 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4764 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4765 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4766 Pred == ICmpInst::ICMP_EQ)
4767 return FalseVal;
4768
4769 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4770 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4771 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4773 return FalseVal;
4774 if (match(TrueVal,
4776 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4777 return FalseVal;
4778 }
4779
4780 // If we have a scalar equality comparison, then we know the value in one of
4781 // the arms of the select. See if substituting this value into the arm and
4782 // simplifying the result yields the same value as the other arm.
4783 if (Pred == ICmpInst::ICMP_EQ) {
4784 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4785 canReplacePointersIfEqual(CmpLHS, CmpRHS, Q.DL))
4786 if (Value *V = simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, TrueVal,
4787 FalseVal, Q, MaxRecurse))
4788 return V;
4789 if (CmpLHS->getType()->isIntOrIntVectorTy() ||
4790 canReplacePointersIfEqual(CmpRHS, CmpLHS, Q.DL))
4791 if (Value *V = simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, TrueVal,
4792 FalseVal, Q, MaxRecurse))
4793 return V;
4794
4795 Value *X;
4796 Value *Y;
4797 // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4798 if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4799 match(CmpRHS, m_Zero())) {
4800 // (X | Y) == 0 implies X == 0 and Y == 0.
4802 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4803 return V;
4804 }
4805
4806 // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4807 if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4808 match(CmpRHS, m_AllOnes())) {
4809 // (X & Y) == -1 implies X == -1 and Y == -1.
4811 {{X, CmpRHS}, {Y, CmpRHS}}, TrueVal, FalseVal, Q, MaxRecurse))
4812 return V;
4813 }
4814 }
4815
4816 return nullptr;
4817}
4818
4819/// Try to simplify a select instruction when its condition operand is a
4820/// floating-point comparison.
4822 const SimplifyQuery &Q,
4823 unsigned MaxRecurse) {
4824 CmpPredicate Pred;
4825 Value *CmpLHS, *CmpRHS;
4826 if (!match(Cond, m_FCmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4827 return nullptr;
4829
4830 bool IsEquiv = I->isEquivalence();
4831 if (I->isEquivalence(/*Invert=*/true)) {
4832 std::swap(T, F);
4833 Pred = FCmpInst::getInversePredicate(Pred);
4834 IsEquiv = true;
4835 }
4836
4837 // This transforms is safe if at least one operand is known to not be zero.
4838 // Otherwise, the select can change the sign of a zero operand.
4839 if (IsEquiv) {
4840 if (Value *V = simplifySelectWithEquivalence({{CmpLHS, CmpRHS}}, T, F, Q,
4841 MaxRecurse))
4842 return V;
4843 if (Value *V = simplifySelectWithEquivalence({{CmpRHS, CmpLHS}}, T, F, Q,
4844 MaxRecurse))
4845 return V;
4846 }
4847
4848 // Canonicalize CmpLHS to be T, and CmpRHS to be F, if they're swapped.
4849 if (CmpLHS == F && CmpRHS == T)
4850 std::swap(CmpLHS, CmpRHS);
4851
4852 if (CmpLHS != T || CmpRHS != F)
4853 return nullptr;
4854
4855 // This transform is also safe if we do not have (do not care about) -0.0.
4856 if (Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros()) {
4857 // (T == F) ? T : F --> F
4858 if (Pred == FCmpInst::FCMP_OEQ)
4859 return F;
4860
4861 // (T != F) ? T : F --> T
4862 if (Pred == FCmpInst::FCMP_UNE)
4863 return T;
4864 }
4865
4866 return nullptr;
4867}
4868
4869/// Given operands for a SelectInst, see if we can fold the result.
4870/// If not, this returns null.
4871static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4872 const SimplifyQuery &Q, unsigned MaxRecurse) {
4873 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4874 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4875 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4876 if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4877 return C;
4878
4879 // select poison, X, Y -> poison
4880 if (isa<PoisonValue>(CondC))
4881 return PoisonValue::get(TrueVal->getType());
4882
4883 // select undef, X, Y -> X or Y
4884 if (Q.isUndefValue(CondC))
4885 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4886
4887 // select true, X, Y --> X
4888 // select false, X, Y --> Y
4889 // For vectors, allow undef/poison elements in the condition to match the
4890 // defined elements, so we can eliminate the select.
4891 if (match(CondC, m_One()))
4892 return TrueVal;
4893 if (match(CondC, m_Zero()))
4894 return FalseVal;
4895 }
4896
4897 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4898 "Select must have bool or bool vector condition");
4899 assert(TrueVal->getType() == FalseVal->getType() &&
4900 "Select must have same types for true/false ops");
4901
4902 if (Cond->getType() == TrueVal->getType()) {
4903 // select i1 Cond, i1 true, i1 false --> i1 Cond
4904 if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4905 return Cond;
4906
4907 // (X && Y) ? X : Y --> Y (commuted 2 ways)
4908 if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
4909 return FalseVal;
4910
4911 // (X || Y) ? X : Y --> X (commuted 2 ways)
4912 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
4913 return TrueVal;
4914
4915 // (X || Y) ? false : X --> false (commuted 2 ways)
4916 if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
4917 match(TrueVal, m_ZeroInt()))
4918 return ConstantInt::getFalse(Cond->getType());
4919
4920 // Match patterns that end in logical-and.
4921 if (match(FalseVal, m_ZeroInt())) {
4922 // !(X || Y) && X --> false (commuted 2 ways)
4923 if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
4924 return ConstantInt::getFalse(Cond->getType());
4925 // X && !(X || Y) --> false (commuted 2 ways)
4926 if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
4927 return ConstantInt::getFalse(Cond->getType());
4928
4929 // (X || Y) && Y --> Y (commuted 2 ways)
4930 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
4931 return TrueVal;
4932 // Y && (X || Y) --> Y (commuted 2 ways)
4933 if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
4934 return Cond;
4935
4936 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4937 Value *X, *Y;
4940 return X;
4941 if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4943 return X;
4944 }
4945
4946 // Match patterns that end in logical-or.
4947 if (match(TrueVal, m_One())) {
4948 // !(X && Y) || X --> true (commuted 2 ways)
4949 if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
4950 return ConstantInt::getTrue(Cond->getType());
4951 // X || !(X && Y) --> true (commuted 2 ways)
4952 if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
4953 return ConstantInt::getTrue(Cond->getType());
4954
4955 // (X && Y) || Y --> Y (commuted 2 ways)
4956 if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
4957 return FalseVal;
4958 // Y || (X && Y) --> Y (commuted 2 ways)
4959 if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
4960 return Cond;
4961 }
4962 }
4963
4964 // select ?, X, X -> X
4965 if (TrueVal == FalseVal)
4966 return TrueVal;
4967
4968 if (Cond == TrueVal) {
4969 // select i1 X, i1 X, i1 false --> X (logical-and)
4970 if (match(FalseVal, m_ZeroInt()))
4971 return Cond;
4972 // select i1 X, i1 X, i1 true --> true
4973 if (match(FalseVal, m_One()))
4974 return ConstantInt::getTrue(Cond->getType());
4975 }
4976 if (Cond == FalseVal) {
4977 // select i1 X, i1 true, i1 X --> X (logical-or)
4978 if (match(TrueVal, m_One()))
4979 return Cond;
4980 // select i1 X, i1 false, i1 X --> false
4981 if (match(TrueVal, m_ZeroInt()))
4982 return ConstantInt::getFalse(Cond->getType());
4983 }
4984
4985 // If the true or false value is poison, we can fold to the other value.
4986 // If the true or false value is undef, we can fold to the other value as
4987 // long as the other value isn't poison.
4988 // select ?, poison, X -> X
4989 // select ?, undef, X -> X
4990 if (isa<PoisonValue>(TrueVal) ||
4991 (Q.isUndefValue(TrueVal) && impliesPoison(FalseVal, Cond)))
4992 return FalseVal;
4993 // select ?, X, poison -> X
4994 // select ?, X, undef -> X
4995 if (isa<PoisonValue>(FalseVal) ||
4996 (Q.isUndefValue(FalseVal) && impliesPoison(TrueVal, Cond)))
4997 return TrueVal;
4998
4999 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
5000 Constant *TrueC, *FalseC;
5001 if (isa<FixedVectorType>(TrueVal->getType()) &&
5002 match(TrueVal, m_Constant(TrueC)) &&
5003 match(FalseVal, m_Constant(FalseC))) {
5004 unsigned NumElts =
5005 cast<FixedVectorType>(TrueC->getType())->getNumElements();
5007 for (unsigned i = 0; i != NumElts; ++i) {
5008 // Bail out on incomplete vector constants.
5009 Constant *TEltC = TrueC->getAggregateElement(i);
5010 Constant *FEltC = FalseC->getAggregateElement(i);
5011 if (!TEltC || !FEltC)
5012 break;
5013
5014 // If the elements match (undef or not), that value is the result. If only
5015 // one element is undef, choose the defined element as the safe result.
5016 if (TEltC == FEltC)
5017 NewC.push_back(TEltC);
5018 else if (isa<PoisonValue>(TEltC) ||
5019 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
5020 NewC.push_back(FEltC);
5021 else if (isa<PoisonValue>(FEltC) ||
5022 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
5023 NewC.push_back(TEltC);
5024 else
5025 break;
5026 }
5027 if (NewC.size() == NumElts)
5028 return ConstantVector::get(NewC);
5029 }
5030
5031 if (Value *V =
5032 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
5033 return V;
5034
5035 if (Value *V = simplifySelectWithBitTest(Cond, TrueVal, FalseVal))
5036 return V;
5037
5038 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q, MaxRecurse))
5039 return V;
5040
5041 std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
5042 if (Imp)
5043 return *Imp ? TrueVal : FalseVal;
5044
5045 return nullptr;
5046}
5047
5049 const SimplifyQuery &Q) {
5050 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
5051}
5052
5053/// Given operands for an GetElementPtrInst, see if we can fold the result.
5054/// If not, this returns null.
5057 const SimplifyQuery &Q, unsigned) {
5058 // The type of the GEP pointer operand.
5059 unsigned AS =
5060 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
5061
5062 // getelementptr P -> P.
5063 if (Indices.empty())
5064 return Ptr;
5065
5066 // Compute the (pointer) type returned by the GEP instruction.
5067 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
5068 Type *GEPTy = Ptr->getType();
5069 if (!GEPTy->isVectorTy()) {
5070 for (Value *Op : Indices) {
5071 // If one of the operands is a vector, the result type is a vector of
5072 // pointers. All vector operands must have the same number of elements.
5073 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
5074 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
5075 break;
5076 }
5077 }
5078 }
5079
5080 // All-zero GEP is a no-op, unless it performs a vector splat.
5081 if (Ptr->getType() == GEPTy && all_of(Indices, match_fn(m_Zero())))
5082 return Ptr;
5083
5084 // getelementptr poison, idx -> poison
5085 // getelementptr baseptr, poison -> poison
5087 return PoisonValue::get(GEPTy);
5088
5089 // getelementptr undef, idx -> undef
5090 if (Q.isUndefValue(Ptr))
5091 return UndefValue::get(GEPTy);
5092
5093 bool IsScalableVec =
5094 SrcTy->isScalableTy() || any_of(Indices, [](const Value *V) {
5095 return isa<ScalableVectorType>(V->getType());
5096 });
5097
5098 if (Indices.size() == 1) {
5099 Type *Ty = SrcTy;
5100 if (!IsScalableVec && Ty->isSized()) {
5101 Value *P;
5102 uint64_t C;
5103 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5104 // getelementptr P, N -> P if P points to a type of zero size.
5105 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5106 return Ptr;
5107
5108 // The following transforms are only safe if the ptrtoint cast
5109 // doesn't truncate the pointers.
5110 if (Indices[0]->getType()->getScalarSizeInBits() ==
5111 Q.DL.getPointerSizeInBits(AS)) {
5112 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5113 return P->getType() == GEPTy &&
5115 };
5116 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5117 if (TyAllocSize == 1 &&
5118 match(Indices[0],
5120 CanSimplify())
5121 return P;
5122
5123 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5124 // size 1 << C.
5125 if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
5127 m_ConstantInt(C))) &&
5128 TyAllocSize == 1ULL << C && CanSimplify())
5129 return P;
5130
5131 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5132 // size C.
5133 if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
5135 m_SpecificInt(TyAllocSize))) &&
5136 CanSimplify())
5137 return P;
5138 }
5139 }
5140 }
5141
5142 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5143 all_of(Indices.drop_back(1), match_fn(m_Zero()))) {
5144 unsigned IdxWidth =
5145 Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
5146 if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5147 APInt BasePtrOffset(IdxWidth, 0);
5148 Value *StrippedBasePtr =
5149 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5150
5151 // Avoid creating inttoptr of zero here: While LLVMs treatment of
5152 // inttoptr is generally conservative, this particular case is folded to
5153 // a null pointer, which will have incorrect provenance.
5154
5155 // gep (gep V, C), (sub 0, V) -> C
5156 if (match(Indices.back(),
5157 m_Neg(m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5158 !BasePtrOffset.isZero()) {
5159 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5160 return ConstantExpr::getIntToPtr(CI, GEPTy);
5161 }
5162 // gep (gep V, C), (xor V, -1) -> C-1
5163 if (match(Indices.back(),
5164 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
5165 !BasePtrOffset.isOne()) {
5166 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5167 return ConstantExpr::getIntToPtr(CI, GEPTy);
5168 }
5169 }
5170 }
5171
5172 // Check to see if this is constant foldable.
5173 if (!isa<Constant>(Ptr) || !all_of(Indices, IsaPred<Constant>))
5174 return nullptr;
5175
5177 return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), std::nullopt,
5178 Indices);
5179
5180 auto *CE =
5182 return ConstantFoldConstant(CE, Q.DL);
5183}
5184
5186 GEPNoWrapFlags NW, const SimplifyQuery &Q) {
5187 return ::simplifyGEPInst(SrcTy, Ptr, Indices, NW, Q, RecursionLimit);
5188}
5189
5190/// Given operands for an InsertValueInst, see if we can fold the result.
5191/// If not, this returns null.
5193 ArrayRef<unsigned> Idxs,
5194 const SimplifyQuery &Q, unsigned) {
5195 if (Constant *CAgg = dyn_cast<Constant>(Agg))
5196 if (Constant *CVal = dyn_cast<Constant>(Val))
5197 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5198
5199 // insertvalue x, poison, n -> x
5200 // insertvalue x, undef, n -> x if x cannot be poison
5201 if (isa<PoisonValue>(Val) ||
5202 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5203 return Agg;
5204
5205 // insertvalue x, (extractvalue y, n), n
5207 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5208 EV->getIndices() == Idxs) {
5209 // insertvalue poison, (extractvalue y, n), n -> y
5210 // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5211 if (isa<PoisonValue>(Agg) ||
5212 (Q.isUndefValue(Agg) &&
5213 isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5214 return EV->getAggregateOperand();
5215
5216 // insertvalue y, (extractvalue y, n), n -> y
5217 if (Agg == EV->getAggregateOperand())
5218 return Agg;
5219 }
5220
5221 return nullptr;
5222}
5223
5225 ArrayRef<unsigned> Idxs,
5226 const SimplifyQuery &Q) {
5227 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5228}
5229
5231 const SimplifyQuery &Q) {
5232 // Try to constant fold.
5233 auto *VecC = dyn_cast<Constant>(Vec);
5234 auto *ValC = dyn_cast<Constant>(Val);
5235 auto *IdxC = dyn_cast<Constant>(Idx);
5236 if (VecC && ValC && IdxC)
5237 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5238
5239 // For fixed-length vector, fold into poison if index is out of bounds.
5240 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5241 if (isa<FixedVectorType>(Vec->getType()) &&
5242 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5243 return PoisonValue::get(Vec->getType());
5244 }
5245
5246 // If index is undef, it might be out of bounds (see above case)
5247 if (Q.isUndefValue(Idx))
5248 return PoisonValue::get(Vec->getType());
5249
5250 // If the scalar is poison, or it is undef and there is no risk of
5251 // propagating poison from the vector value, simplify to the vector value.
5252 if (isa<PoisonValue>(Val) ||
5253 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5254 return Vec;
5255
5256 // Inserting the splatted value into a constant splat does nothing.
5257 if (VecC && ValC && VecC->getSplatValue() == ValC)
5258 return Vec;
5259
5260 // If we are extracting a value from a vector, then inserting it into the same
5261 // place, that's the input vector:
5262 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5263 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5264 return Vec;
5265
5266 return nullptr;
5267}
5268
5269/// Given operands for an ExtractValueInst, see if we can fold the result.
5270/// If not, this returns null.
5272 const SimplifyQuery &, unsigned) {
5273 if (auto *CAgg = dyn_cast<Constant>(Agg))
5274 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5275
5276 // extractvalue x, (insertvalue y, elt, n), n -> elt
5277 unsigned NumIdxs = Idxs.size();
5278 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5279 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5280 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5281 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5282 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5283 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5284 Idxs.slice(0, NumCommonIdxs)) {
5285 if (NumIdxs == NumInsertValueIdxs)
5286 return IVI->getInsertedValueOperand();
5287 break;
5288 }
5289 }
5290
5291 // Simplify umul_with_overflow where one operand is 1.
5292 Value *V;
5293 if (Idxs.size() == 1 &&
5294 (match(Agg,
5297 m_Value(V))))) {
5298 if (Idxs[0] == 0)
5299 return V;
5300 assert(Idxs[0] == 1 && "invalid index");
5301 return getFalse(CmpInst::makeCmpResultType(V->getType()));
5302 }
5303
5304 return nullptr;
5305}
5306
5308 const SimplifyQuery &Q) {
5309 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5310}
5311
5312/// Given operands for an ExtractElementInst, see if we can fold the result.
5313/// If not, this returns null.
5315 const SimplifyQuery &Q, unsigned) {
5316 auto *VecVTy = cast<VectorType>(Vec->getType());
5317 if (auto *CVec = dyn_cast<Constant>(Vec)) {
5318 if (auto *CIdx = dyn_cast<Constant>(Idx))
5319 return ConstantExpr::getExtractElement(CVec, CIdx);
5320
5321 if (Q.isUndefValue(Vec))
5322 return UndefValue::get(VecVTy->getElementType());
5323 }
5324
5325 // An undef extract index can be arbitrarily chosen to be an out-of-range
5326 // index value, which would result in the instruction being poison.
5327 if (Q.isUndefValue(Idx))
5328 return PoisonValue::get(VecVTy->getElementType());
5329
5330 // If extracting a specified index from the vector, see if we can recursively
5331 // find a previously computed scalar that was inserted into the vector.
5332 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5333 // For fixed-length vector, fold into undef if index is out of bounds.
5334 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5335 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5336 return PoisonValue::get(VecVTy->getElementType());
5337 // Handle case where an element is extracted from a splat.
5338 if (IdxC->getValue().ult(MinNumElts))
5339 if (auto *Splat = getSplatValue(Vec))
5340 return Splat;
5341 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5342 return Elt;
5343 } else {
5344 // extractelt x, (insertelt y, elt, n), n -> elt
5345 // If the possibly-variable indices are trivially known to be equal
5346 // (because they are the same operand) then use the value that was
5347 // inserted directly.
5348 auto *IE = dyn_cast<InsertElementInst>(Vec);
5349 if (IE && IE->getOperand(2) == Idx)
5350 return IE->getOperand(1);
5351
5352 // The index is not relevant if our vector is a splat.
5353 if (Value *Splat = getSplatValue(Vec))
5354 return Splat;
5355 }
5356 return nullptr;
5357}
5358
5360 const SimplifyQuery &Q) {
5361 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5362}
5363
5364/// See if we can fold the given phi. If not, returns null.
5366 const SimplifyQuery &Q) {
5367 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5368 // here, because the PHI we may succeed simplifying to was not
5369 // def-reachable from the original PHI!
5370
5371 // If all of the PHI's incoming values are the same then replace the PHI node
5372 // with the common value.
5373 Value *CommonValue = nullptr;
5374 bool HasPoisonInput = false;
5375 bool HasUndefInput = false;
5376 for (Value *Incoming : IncomingValues) {
5377 // If the incoming value is the phi node itself, it can safely be skipped.
5378 if (Incoming == PN)
5379 continue;
5381 HasPoisonInput = true;
5382 continue;
5383 }
5384 if (Q.isUndefValue(Incoming)) {
5385 // Remember that we saw an undef value, but otherwise ignore them.
5386 HasUndefInput = true;
5387 continue;
5388 }
5389 if (CommonValue && Incoming != CommonValue)
5390 return nullptr; // Not the same, bail out.
5391 CommonValue = Incoming;
5392 }
5393
5394 // If CommonValue is null then all of the incoming values were either undef,
5395 // poison or equal to the phi node itself.
5396 if (!CommonValue)
5397 return HasUndefInput ? UndefValue::get(PN->getType())
5398 : PoisonValue::get(PN->getType());
5399
5400 if (HasPoisonInput || HasUndefInput) {
5401 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5402 // instruction, we cannot return X as the result of the PHI node unless it
5403 // dominates the PHI block.
5404 if (!valueDominatesPHI(CommonValue, PN, Q.DT))
5405 return nullptr;
5406
5407 // Make sure we do not replace an undef value with poison.
5408 if (HasUndefInput &&
5409 !isGuaranteedNotToBePoison(CommonValue, Q.AC, Q.CxtI, Q.DT))
5410 return nullptr;
5411 return CommonValue;
5412 }
5413
5414 return CommonValue;
5415}
5416
5417static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5418 const SimplifyQuery &Q, unsigned MaxRecurse) {
5419 if (auto *C = dyn_cast<Constant>(Op))
5420 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5421
5422 if (auto *CI = dyn_cast<CastInst>(Op)) {
5423 auto *Src = CI->getOperand(0);
5424 Type *SrcTy = Src->getType();
5425 Type *MidTy = CI->getType();
5426 Type *DstTy = Ty;
5427 if (Src->getType() == Ty) {
5428 auto FirstOp = CI->getOpcode();
5429 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5430 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5431 &Q.DL) == Instruction::BitCast)
5432 return Src;
5433 }
5434 }
5435
5436 // bitcast x -> x
5437 if (CastOpc == Instruction::BitCast)
5438 if (Op->getType() == Ty)
5439 return Op;
5440
5441 // ptrtoint (ptradd (Ptr, X - ptrtoint(Ptr))) -> X
5442 Value *Ptr, *X;
5443 if (CastOpc == Instruction::PtrToInt &&
5446 X->getType() == Ty && Ty == Q.DL.getIndexType(Ptr->getType()))
5447 return X;
5448
5449 return nullptr;
5450}
5451
5452Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5453 const SimplifyQuery &Q) {
5454 return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5455}
5456
5457/// For the given destination element of a shuffle, peek through shuffles to
5458/// match a root vector source operand that contains that element in the same
5459/// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
5460static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5461 int MaskVal, Value *RootVec,
5462 unsigned MaxRecurse) {
5463 if (!MaxRecurse--)
5464 return nullptr;
5465
5466 // Bail out if any mask value is undefined. That kind of shuffle may be
5467 // simplified further based on demanded bits or other folds.
5468 if (MaskVal == -1)
5469 return nullptr;
5470
5471 // The mask value chooses which source operand we need to look at next.
5472 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5473 int RootElt = MaskVal;
5474 Value *SourceOp = Op0;
5475 if (MaskVal >= InVecNumElts) {
5476 RootElt = MaskVal - InVecNumElts;
5477 SourceOp = Op1;
5478 }
5479
5480 // If the source operand is a shuffle itself, look through it to find the
5481 // matching root vector.
5482 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5483 return foldIdentityShuffles(
5484 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5485 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5486 }
5487
5488 // The source operand is not a shuffle. Initialize the root vector value for
5489 // this shuffle if that has not been done yet.
5490 if (!RootVec)
5491 RootVec = SourceOp;
5492
5493 // Give up as soon as a source operand does not match the existing root value.
5494 if (RootVec != SourceOp)
5495 return nullptr;
5496
5497 // The element must be coming from the same lane in the source vector
5498 // (although it may have crossed lanes in intermediate shuffles).
5499 if (RootElt != DestElt)
5500 return nullptr;
5501
5502 return RootVec;
5503}
5504
5506 ArrayRef<int> Mask, Type *RetTy,
5507 const SimplifyQuery &Q,
5508 unsigned MaxRecurse) {
5509 if (all_of(Mask, [](int Elem) { return Elem == PoisonMaskElem; }))
5510 return PoisonValue::get(RetTy);
5511
5512 auto *InVecTy = cast<VectorType>(Op0->getType());
5513 unsigned MaskNumElts = Mask.size();
5514 ElementCount InVecEltCount = InVecTy->getElementCount();
5515
5516 bool Scalable = InVecEltCount.isScalable();
5517
5518 SmallVector<int, 32> Indices;
5519 Indices.assign(Mask.begin(), Mask.end());
5520
5521 // Canonicalization: If mask does not select elements from an input vector,
5522 // replace that input vector with poison.
5523 if (!Scalable) {
5524 bool MaskSelects0 = false, MaskSelects1 = false;
5525 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5526 for (unsigned i = 0; i != MaskNumElts; ++i) {
5527 if (Indices[i] == -1)
5528 continue;
5529 if ((unsigned)Indices[i] < InVecNumElts)
5530 MaskSelects0 = true;
5531 else
5532 MaskSelects1 = true;
5533 }
5534 if (!MaskSelects0)
5535 Op0 = PoisonValue::get(InVecTy);
5536 if (!MaskSelects1)
5537 Op1 = PoisonValue::get(InVecTy);
5538 }
5539
5540 auto *Op0Const = dyn_cast<Constant>(Op0);
5541 auto *Op1Const = dyn_cast<Constant>(Op1);
5542
5543 // If all operands are constant, constant fold the shuffle. This
5544 // transformation depends on the value of the mask which is not known at
5545 // compile time for scalable vectors
5546 if (Op0Const && Op1Const)
5547 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5548
5549 // Canonicalization: if only one input vector is constant, it shall be the
5550 // second one. This transformation depends on the value of the mask which
5551 // is not known at compile time for scalable vectors
5552 if (!Scalable && Op0Const && !Op1Const) {
5553 std::swap(Op0, Op1);
5555 InVecEltCount.getKnownMinValue());
5556 }
5557
5558 // A splat of an inserted scalar constant becomes a vector constant:
5559 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5560 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5561 // original mask constant.
5562 // NOTE: This transformation depends on the value of the mask which is not
5563 // known at compile time for scalable vectors
5564 Constant *C;
5565 ConstantInt *IndexC;
5566 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5567 m_ConstantInt(IndexC)))) {
5568 // Match a splat shuffle mask of the insert index allowing undef elements.
5569 int InsertIndex = IndexC->getZExtValue();
5570 if (all_of(Indices, [InsertIndex](int MaskElt) {
5571 return MaskElt == InsertIndex || MaskElt == -1;
5572 })) {
5573 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5574
5575 // Shuffle mask poisons become poison constant result elements.
5576 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5577 for (unsigned i = 0; i != MaskNumElts; ++i)
5578 if (Indices[i] == -1)
5579 VecC[i] = PoisonValue::get(C->getType());
5580 return ConstantVector::get(VecC);
5581 }
5582 }
5583
5584 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5585 // value type is same as the input vectors' type.
5586 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5587 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5588 all_equal(OpShuf->getShuffleMask()))
5589 return Op0;
5590
5591 // All remaining transformation depend on the value of the mask, which is
5592 // not known at compile time for scalable vectors.
5593 if (Scalable)
5594 return nullptr;
5595
5596 // Don't fold a shuffle with undef mask elements. This may get folded in a
5597 // better way using demanded bits or other analysis.
5598 // TODO: Should we allow this?
5599 if (is_contained(Indices, -1))
5600 return nullptr;
5601
5602 // Check if every element of this shuffle can be mapped back to the
5603 // corresponding element of a single root vector. If so, we don't need this
5604 // shuffle. This handles simple identity shuffles as well as chains of
5605 // shuffles that may widen/narrow and/or move elements across lanes and back.
5606 Value *RootVec = nullptr;
5607 for (unsigned i = 0; i != MaskNumElts; ++i) {
5608 // Note that recursion is limited for each vector element, so if any element
5609 // exceeds the limit, this will fail to simplify.
5610 RootVec =
5611 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5612
5613 // We can't replace a widening/narrowing shuffle with one of its operands.
5614 if (!RootVec || RootVec->getType() != RetTy)
5615 return nullptr;
5616 }
5617 return RootVec;
5618}
5619
5620/// Given operands for a ShuffleVectorInst, fold the result or return null.
5622 ArrayRef<int> Mask, Type *RetTy,
5623 const SimplifyQuery &Q) {
5624 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5625}
5626
5628 const SimplifyQuery &Q) {
5629 if (auto *C = dyn_cast<Constant>(Op))
5630 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5631 return nullptr;
5632}
5633
5634/// Given the operand for an FNeg, see if we can fold the result. If not, this
5635/// returns null.
5637 const SimplifyQuery &Q, unsigned MaxRecurse) {
5638 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5639 return C;
5640
5641 Value *X;
5642 // fneg (fneg X) ==> X
5643 if (match(Op, m_FNeg(m_Value(X))))
5644 return X;
5645
5646 return nullptr;
5647}
5648
5650 const SimplifyQuery &Q) {
5651 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5652}
5653
5654/// Try to propagate existing NaN values when possible. If not, replace the
5655/// constant or elements in the constant with a canonical NaN.
5657 Type *Ty = In->getType();
5658 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5659 unsigned NumElts = VecTy->getNumElements();
5660 SmallVector<Constant *, 32> NewC(NumElts);
5661 for (unsigned i = 0; i != NumElts; ++i) {
5662 Constant *EltC = In->getAggregateElement(i);
5663 // Poison elements propagate. NaN propagates except signaling is quieted.
5664 // Replace unknown or undef elements with canonical NaN.
5665 if (EltC && isa<PoisonValue>(EltC))
5666 NewC[i] = EltC;
5667 else if (EltC && EltC->isNaN())
5668 NewC[i] = ConstantFP::get(
5669 EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5670 else
5671 NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5672 }
5673 return ConstantVector::get(NewC);
5674 }
5675
5676 // If it is not a fixed vector, but not a simple NaN either, return a
5677 // canonical NaN.
5678 if (!In->isNaN())
5679 return ConstantFP::getNaN(Ty);
5680
5681 // If we known this is a NaN, and it's scalable vector, we must have a splat
5682 // on our hands. Grab that before splatting a QNaN constant.
5683 if (isa<ScalableVectorType>(Ty)) {
5684 auto *Splat = In->getSplatValue();
5685 assert(Splat && Splat->isNaN() &&
5686 "Found a scalable-vector NaN but not a splat");
5687 In = Splat;
5688 }
5689
5690 // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5691 // preserve the sign/payload.
5692 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5693}
5694
5695/// Perform folds that are common to any floating-point operation. This implies
5696/// transforms based on poison/undef/NaN because the operation itself makes no
5697/// difference to the result.
5699 const SimplifyQuery &Q,
5700 fp::ExceptionBehavior ExBehavior,
5701 RoundingMode Rounding) {
5702 // Poison is independent of anything else. It always propagates from an
5703 // operand to a math result.
5705 return PoisonValue::get(Ops[0]->getType());
5706
5707 for (Value *V : Ops) {
5708 bool IsNan = match(V, m_NaN());
5709 bool IsInf = match(V, m_Inf());
5710 bool IsUndef = Q.isUndefValue(V);
5711
5712 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5713 // (an undef operand can be chosen to be Nan/Inf), then the result of
5714 // this operation is poison.
5715 if (FMF.noNaNs() && (IsNan || IsUndef))
5716 return PoisonValue::get(V->getType());
5717 if (FMF.noInfs() && (IsInf || IsUndef))
5718 return PoisonValue::get(V->getType());
5719
5720 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5721 // Undef does not propagate because undef means that all bits can take on
5722 // any value. If this is undef * NaN for example, then the result values
5723 // (at least the exponent bits) are limited. Assume the undef is a
5724 // canonical NaN and propagate that.
5725 if (IsUndef)
5726 return ConstantFP::getNaN(V->getType());
5727 if (IsNan)
5728 return propagateNaN(cast<Constant>(V));
5729 } else if (ExBehavior != fp::ebStrict) {
5730 if (IsNan)
5731 return propagateNaN(cast<Constant>(V));
5732 }
5733 }
5734 return nullptr;
5735}
5736
5737/// Given operands for an FAdd, see if we can fold the result. If not, this
5738/// returns null.
5739static Value *
5741 const SimplifyQuery &Q, unsigned MaxRecurse,
5744 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5745 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5746 return C;
5747
5748 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5749 return C;
5750
5751 // fadd X, -0 ==> X
5752 // With strict/constrained FP, we have these possible edge cases that do
5753 // not simplify to Op0:
5754 // fadd SNaN, -0.0 --> QNaN
5755 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5756 if (canIgnoreSNaN(ExBehavior, FMF) &&
5758 FMF.noSignedZeros()))
5759 if (match(Op1, m_NegZeroFP()))
5760 return Op0;
5761
5762 // fadd X, 0 ==> X, when we know X is not -0
5763 if (canIgnoreSNaN(ExBehavior, FMF))
5764 if (match(Op1, m_PosZeroFP()) &&
5765 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
5766 return Op0;
5767
5768 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5769 return nullptr;
5770
5771 if (FMF.noNaNs()) {
5772 // With nnan: X + {+/-}Inf --> {+/-}Inf
5773 if (match(Op1, m_Inf()))
5774 return Op1;
5775
5776 // With nnan: -X + X --> 0.0 (and commuted variant)
5777 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5778 // Negative zeros are allowed because we always end up with positive zero:
5779 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5780 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5781 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5782 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5783 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5784 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5785 return ConstantFP::getZero(Op0->getType());
5786
5787 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5788 match(Op1, m_FNeg(m_Specific(Op0))))
5789 return ConstantFP::getZero(Op0->getType());
5790 }
5791
5792 // (X - Y) + Y --> X
5793 // Y + (X - Y) --> X
5794 Value *X;
5795 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5796 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5797 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5798 return X;
5799
5800 return nullptr;
5801}
5802
5803/// Given operands for an FSub, see if we can fold the result. If not, this
5804/// returns null.
5805static Value *
5807 const SimplifyQuery &Q, unsigned MaxRecurse,
5810 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5811 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5812 return C;
5813
5814 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5815 return C;
5816
5817 // fsub X, +0 ==> X
5818 if (canIgnoreSNaN(ExBehavior, FMF) &&
5820 FMF.noSignedZeros()))
5821 if (match(Op1, m_PosZeroFP()))
5822 return Op0;
5823
5824 // fsub X, -0 ==> X, when we know X is not -0
5825 if (canIgnoreSNaN(ExBehavior, FMF))
5826 if (match(Op1, m_NegZeroFP()) &&
5827 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q)))
5828 return Op0;
5829
5830 // fsub -0.0, (fsub -0.0, X) ==> X
5831 // fsub -0.0, (fneg X) ==> X
5832 Value *X;
5833 if (canIgnoreSNaN(ExBehavior, FMF))
5834 if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5835 return X;
5836
5837 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5838 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5839 if (canIgnoreSNaN(ExBehavior, FMF))
5840 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5841 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5842 match(Op1, m_FNeg(m_Value(X)))))
5843 return X;
5844
5845 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5846 return nullptr;
5847
5848 if (FMF.noNaNs()) {
5849 // fsub nnan x, x ==> 0.0
5850 if (Op0 == Op1)
5851 return Constant::getNullValue(Op0->getType());
5852
5853 // With nnan: {+/-}Inf - X --> {+/-}Inf
5854 if (match(Op0, m_Inf()))
5855 return Op0;
5856
5857 // With nnan: X - {+/-}Inf --> {-/+}Inf
5858 if (match(Op1, m_Inf()))
5859 return foldConstant(Instruction::FNeg, Op1, Q);
5860 }
5861
5862 // Y - (Y - X) --> X
5863 // (X + Y) - Y --> X
5864 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5865 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5866 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5867 return X;
5868
5869 return nullptr;
5870}
5871
5873 const SimplifyQuery &Q, unsigned MaxRecurse,
5874 fp::ExceptionBehavior ExBehavior,
5875 RoundingMode Rounding) {
5876 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5877 return C;
5878
5879 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5880 return nullptr;
5881
5882 // Canonicalize special constants as operand 1.
5883 if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5884 std::swap(Op0, Op1);
5885
5886 // X * 1.0 --> X
5887 if (match(Op1, m_FPOne()))
5888 return Op0;
5889
5890 if (match(Op1, m_AnyZeroFP())) {
5891 // X * 0.0 --> 0.0 (with nnan and nsz)
5892 if (FMF.noNaNs() && FMF.noSignedZeros())
5893 return ConstantFP::getZero(Op0->getType());
5894
5895 KnownFPClass Known = computeKnownFPClass(Op0, FMF, fcInf | fcNan, Q);
5896 if (Known.isKnownNever(fcInf | fcNan)) {
5897 // if nsz is set, return 0.0
5898 if (FMF.noSignedZeros())
5899 return ConstantFP::getZero(Op0->getType());
5900 // +normal number * (-)0.0 --> (-)0.0
5901 if (Known.SignBit == false)
5902 return Op1;
5903 // -normal number * (-)0.0 --> -(-)0.0
5904 if (Known.SignBit == true)
5905 return foldConstant(Instruction::FNeg, Op1, Q);
5906 }
5907 }
5908
5909 // sqrt(X) * sqrt(X) --> X, if we can:
5910 // 1. Remove the intermediate rounding (reassociate).
5911 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5912 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5913 Value *X;
5914 if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5915 FMF.noNaNs() && FMF.noSignedZeros())
5916 return X;
5917
5918 return nullptr;
5919}
5920
5921/// Given the operands for an FMul, see if we can fold the result
5922static Value *
5924 const SimplifyQuery &Q, unsigned MaxRecurse,
5927 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5928 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5929 return C;
5930
5931 // Now apply simplifications that do not require rounding.
5932 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5933}
5934
5936 const SimplifyQuery &Q,
5937 fp::ExceptionBehavior ExBehavior,
5938 RoundingMode Rounding) {
5939 return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5940 Rounding);
5941}
5942
5944 const SimplifyQuery &Q,
5945 fp::ExceptionBehavior ExBehavior,
5946 RoundingMode Rounding) {
5947 return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5948 Rounding);
5949}
5950
5952 const SimplifyQuery &Q,
5953 fp::ExceptionBehavior ExBehavior,
5954 RoundingMode Rounding) {
5955 return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5956 Rounding);
5957}
5958
5960 const SimplifyQuery &Q,
5961 fp::ExceptionBehavior ExBehavior,
5962 RoundingMode Rounding) {
5963 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5964 Rounding);
5965}
5966
5967static Value *
5969 const SimplifyQuery &Q, unsigned,
5972 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5973 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5974 return C;
5975
5976 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5977 return C;
5978
5979 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5980 return nullptr;
5981
5982 // X / 1.0 -> X
5983 if (match(Op1, m_FPOne()))
5984 return Op0;
5985
5986 // 0 / X -> 0
5987 // Requires that NaNs are off (X could be zero) and signed zeroes are
5988 // ignored (X could be positive or negative, so the output sign is unknown).
5989 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5990 return ConstantFP::getZero(Op0->getType());
5991
5992 if (FMF.noNaNs()) {
5993 // X / X -> 1.0 is legal when NaNs are ignored.
5994 // We can ignore infinities because INF/INF is NaN.
5995 if (Op0 == Op1)
5996 return ConstantFP::get(Op0->getType(), 1.0);
5997
5998 // (X * Y) / Y --> X if we can reassociate to the above form.
5999 Value *X;
6000 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
6001 return X;
6002
6003 // -X / X -> -1.0 and
6004 // X / -X -> -1.0 are legal when NaNs are ignored.
6005 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
6006 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
6007 match(Op1, m_FNegNSZ(m_Specific(Op0))))
6008 return ConstantFP::get(Op0->getType(), -1.0);
6009
6010 // nnan ninf X / [-]0.0 -> poison
6011 if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
6012 return PoisonValue::get(Op1->getType());
6013 }
6014
6015 return nullptr;
6016}
6017
6019 const SimplifyQuery &Q,
6020 fp::ExceptionBehavior ExBehavior,
6021 RoundingMode Rounding) {
6022 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6023 Rounding);
6024}
6025
6026static Value *
6028 const SimplifyQuery &Q, unsigned,
6031 if (isDefaultFPEnvironment(ExBehavior, Rounding))
6032 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
6033 return C;
6034
6035 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
6036 return C;
6037
6038 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
6039 return nullptr;
6040
6041 // Unlike fdiv, the result of frem always matches the sign of the dividend.
6042 // The constant match may include undef elements in a vector, so return a full
6043 // zero constant as the result.
6044 if (FMF.noNaNs()) {
6045 // +0 % X -> 0
6046 if (match(Op0, m_PosZeroFP()))
6047 return ConstantFP::getZero(Op0->getType());
6048 // -0 % X -> -0
6049 if (match(Op0, m_NegZeroFP()))
6050 return ConstantFP::getNegativeZero(Op0->getType());
6051 }
6052
6053 return nullptr;
6054}
6055
6057 const SimplifyQuery &Q,
6058 fp::ExceptionBehavior ExBehavior,
6059 RoundingMode Rounding) {
6060 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
6061 Rounding);
6062}
6063
6064//=== Helper functions for higher up the class hierarchy.
6065
6066/// Given the operand for a UnaryOperator, see if we can fold the result.
6067/// If not, this returns null.
6068static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
6069 unsigned MaxRecurse) {
6070 switch (Opcode) {
6071 case Instruction::FNeg:
6072 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
6073 default:
6074 llvm_unreachable("Unexpected opcode");
6075 }
6076}
6077
6078/// Given the operand for a UnaryOperator, see if we can fold the result.
6079/// If not, this returns null.
6080/// Try to use FastMathFlags when folding the result.
6081static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
6082 const FastMathFlags &FMF, const SimplifyQuery &Q,
6083 unsigned MaxRecurse) {
6084 switch (Opcode) {
6085 case Instruction::FNeg:
6086 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
6087 default:
6088 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
6089 }
6090}
6091
6092Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
6093 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
6094}
6095
6097 const SimplifyQuery &Q) {
6098 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
6099}
6100
6101/// Given operands for a BinaryOperator, see if we can fold the result.
6102/// If not, this returns null.
6103static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6104 const SimplifyQuery &Q, unsigned MaxRecurse) {
6105 switch (Opcode) {
6106 case Instruction::Add:
6107 return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6108 MaxRecurse);
6109 case Instruction::Sub:
6110 return simplifySubInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6111 MaxRecurse);
6112 case Instruction::Mul:
6113 return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6114 MaxRecurse);
6115 case Instruction::SDiv:
6116 return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6117 case Instruction::UDiv:
6118 return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6119 case Instruction::SRem:
6120 return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
6121 case Instruction::URem:
6122 return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
6123 case Instruction::Shl:
6124 return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
6125 MaxRecurse);
6126 case Instruction::LShr:
6127 return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6128 case Instruction::AShr:
6129 return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
6130 case Instruction::And:
6131 return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
6132 case Instruction::Or:
6133 return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
6134 case Instruction::Xor:
6135 return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
6136 case Instruction::FAdd:
6137 return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6138 case Instruction::FSub:
6139 return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6140 case Instruction::FMul:
6141 return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6142 case Instruction::FDiv:
6143 return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6144 case Instruction::FRem:
6145 return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6146 default:
6147 llvm_unreachable("Unexpected opcode");
6148 }
6149}
6150
6151/// Given operands for a BinaryOperator, see if we can fold the result.
6152/// If not, this returns null.
6153/// Try to use FastMathFlags when folding the result.
6154static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6155 const FastMathFlags &FMF, const SimplifyQuery &Q,
6156 unsigned MaxRecurse) {
6157 switch (Opcode) {
6158 case Instruction::FAdd:
6159 return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
6160 case Instruction::FSub:
6161 return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
6162 case Instruction::FMul:
6163 return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
6164 case Instruction::FDiv:
6165 return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
6166 default:
6167 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6168 }
6169}
6170
6171Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6172 const SimplifyQuery &Q) {
6173 return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
6174}
6175
6176Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6177 FastMathFlags FMF, const SimplifyQuery &Q) {
6178 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
6179}
6180
6181/// Given operands for a CmpInst, see if we can fold the result.
6183 const SimplifyQuery &Q, unsigned MaxRecurse) {
6185 return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
6186 return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6187}
6188
6190 const SimplifyQuery &Q) {
6191 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
6192}
6193
6195 switch (ID) {
6196 default:
6197 return false;
6198
6199 // Unary idempotent: f(f(x)) = f(x)
6200 case Intrinsic::fabs:
6201 case Intrinsic::floor:
6202 case Intrinsic::ceil:
6203 case Intrinsic::trunc:
6204 case Intrinsic::rint:
6205 case Intrinsic::nearbyint:
6206 case Intrinsic::round:
6207 case Intrinsic::roundeven:
6208 case Intrinsic::canonicalize:
6209 case Intrinsic::arithmetic_fence:
6210 return true;
6211 }
6212}
6213
6214/// Return true if the intrinsic rounds a floating-point value to an integral
6215/// floating-point value (not an integer type).
6217 switch (ID) {
6218 default:
6219 return false;
6220
6221 case Intrinsic::floor:
6222 case Intrinsic::ceil:
6223 case Intrinsic::trunc:
6224 case Intrinsic::rint:
6225 case Intrinsic::nearbyint:
6226 case Intrinsic::round:
6227 case Intrinsic::roundeven:
6228 return true;
6229 }
6230}
6231
6233 const DataLayout &DL) {
6234 GlobalValue *PtrSym;
6235 APInt PtrOffset;
6236 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6237 return nullptr;
6238
6239 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
6240
6241 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6242 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6243 return nullptr;
6244
6245 APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6246 DL.getIndexTypeSizeInBits(Ptr->getType()));
6247 if (OffsetInt.srem(4) != 0)
6248 return nullptr;
6249
6250 Constant *Loaded =
6251 ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, std::move(OffsetInt), DL);
6252 if (!Loaded)
6253 return nullptr;
6254
6255 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6256 if (!LoadedCE)
6257 return nullptr;
6258
6259 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6260 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6261 if (!LoadedCE)
6262 return nullptr;
6263 }
6264
6265 if (LoadedCE->getOpcode() != Instruction::Sub)
6266 return nullptr;
6267
6268 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6269 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6270 return nullptr;
6271 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6272
6273 Constant *LoadedRHS = LoadedCE->getOperand(1);
6274 GlobalValue *LoadedRHSSym;
6275 APInt LoadedRHSOffset;
6276 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6277 DL) ||
6278 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6279 return nullptr;
6280
6281 return LoadedLHSPtr;
6282}
6283
6284// TODO: Need to pass in FastMathFlags
6285static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6286 bool IsStrict) {
6287 // ldexp(poison, x) -> poison
6288 // ldexp(x, poison) -> poison
6289 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6290 return Op0;
6291
6292 // ldexp(undef, x) -> nan
6293 if (Q.isUndefValue(Op0))
6294 return ConstantFP::getNaN(Op0->getType());
6295
6296 if (!IsStrict) {
6297 // TODO: Could insert a canonicalize for strict
6298
6299 // ldexp(x, undef) -> x
6300 if (Q.isUndefValue(Op1))
6301 return Op0;
6302 }
6303
6304 const APFloat *C = nullptr;
6306
6307 // These cases should be safe, even with strictfp.
6308 // ldexp(0.0, x) -> 0.0
6309 // ldexp(-0.0, x) -> -0.0
6310 // ldexp(inf, x) -> inf
6311 // ldexp(-inf, x) -> -inf
6312 if (C && (C->isZero() || C->isInfinity()))
6313 return Op0;
6314
6315 // These are canonicalization dropping, could do it if we knew how we could
6316 // ignore denormal flushes and target handling of nan payload bits.
6317 if (IsStrict)
6318 return nullptr;
6319
6320 // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6321 if (C && C->isNaN())
6322 return ConstantFP::get(Op0->getType(), C->makeQuiet());
6323
6324 // ldexp(x, 0) -> x
6325
6326 // TODO: Could fold this if we know the exception mode isn't
6327 // strict, we know the denormal mode and other target modes.
6328 if (match(Op1, PatternMatch::m_ZeroInt()))
6329 return Op0;
6330
6331 return nullptr;
6332}
6333
6335 const SimplifyQuery &Q,
6336 const CallBase *Call) {
6337 // Idempotent functions return the same result when called repeatedly.
6338 Intrinsic::ID IID = F->getIntrinsicID();
6339 if (isIdempotent(IID))
6340 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6341 if (II->getIntrinsicID() == IID)
6342 return II;
6343
6344 if (removesFPFraction(IID)) {
6345 // Converting from int or calling a rounding function always results in a
6346 // finite integral number or infinity. For those inputs, rounding functions
6347 // always return the same value, so the (2nd) rounding is eliminated. Ex:
6348 // floor (sitofp x) -> sitofp x
6349 // round (ceil x) -> ceil x
6350 auto *II = dyn_cast<IntrinsicInst>(Op0);
6351 if ((II && removesFPFraction(II->getIntrinsicID())) ||
6352 match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
6353 return Op0;
6354 }
6355
6356 Value *X;
6357 switch (IID) {
6358 case Intrinsic::fabs:
6359 if (computeKnownFPSignBit(Op0, Q) == false)
6360 return Op0;
6361 break;
6362 case Intrinsic::bswap:
6363 // bswap(bswap(x)) -> x
6364 if (match(Op0, m_BSwap(m_Value(X))))
6365 return X;
6366 break;
6367 case Intrinsic::bitreverse:
6368 // bitreverse(bitreverse(x)) -> x
6369 if (match(Op0, m_BitReverse(m_Value(X))))
6370 return X;
6371 break;
6372 case Intrinsic::ctpop: {
6373 // ctpop(X) -> 1 iff X is non-zero power of 2.
6374 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, Q.AC, Q.CxtI, Q.DT))
6375 return ConstantInt::get(Op0->getType(), 1);
6376 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6377 // ctpop(and X, 1) --> and X, 1
6378 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6380 Q))
6381 return Op0;
6382 break;
6383 }
6384 case Intrinsic::exp:
6385 // exp(log(x)) -> x
6386 if (Call->hasAllowReassoc() &&
6388 return X;
6389 break;
6390 case Intrinsic::exp2:
6391 // exp2(log2(x)) -> x
6392 if (Call->hasAllowReassoc() &&
6394 return X;
6395 break;
6396 case Intrinsic::exp10:
6397 // exp10(log10(x)) -> x
6398 if (Call->hasAllowReassoc() &&
6400 return X;
6401 break;
6402 case Intrinsic::log:
6403 // log(exp(x)) -> x
6404 if (Call->hasAllowReassoc() &&
6406 return X;
6407 break;
6408 case Intrinsic::log2:
6409 // log2(exp2(x)) -> x
6410 if (Call->hasAllowReassoc() &&
6412 match(Op0,
6414 return X;
6415 break;
6416 case Intrinsic::log10:
6417 // log10(pow(10.0, x)) -> x
6418 // log10(exp10(x)) -> x
6419 if (Call->hasAllowReassoc() &&
6421 match(Op0,
6423 return X;
6424 break;
6425 case Intrinsic::vector_reverse:
6426 // vector.reverse(vector.reverse(x)) -> x
6427 if (match(Op0, m_VecReverse(m_Value(X))))
6428 return X;
6429 // vector.reverse(splat(X)) -> splat(X)
6430 if (isSplatValue(Op0))
6431 return Op0;
6432 break;
6433 default:
6434 break;
6435 }
6436
6437 return nullptr;
6438}
6439
6440/// Given a min/max intrinsic, see if it can be removed based on having an
6441/// operand that is another min/max intrinsic with shared operand(s). The caller
6442/// is expected to swap the operand arguments to handle commutation.
6444 Value *X, *Y;
6445 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6446 return nullptr;
6447
6448 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6449 if (!MM0)
6450 return nullptr;
6451 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6452
6453 if (Op1 == X || Op1 == Y ||
6455 // max (max X, Y), X --> max X, Y
6456 if (IID0 == IID)
6457 return MM0;
6458 // max (min X, Y), X --> X
6459 if (IID0 == getInverseMinMaxIntrinsic(IID))
6460 return Op1;
6461 }
6462 return nullptr;
6463}
6464
6465/// Given a min/max intrinsic, see if it can be removed based on having an
6466/// operand that is another min/max intrinsic with shared operand(s). The caller
6467/// is expected to swap the operand arguments to handle commutation.
6469 Value *Op1) {
6470 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6471 IID == Intrinsic::maximum || IID == Intrinsic::minimum ||
6472 IID == Intrinsic::maximumnum || IID == Intrinsic::minimumnum) &&
6473 "Unsupported intrinsic");
6474
6475 auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6476 // If Op0 is not the same intrinsic as IID, do not process.
6477 // This is a difference with integer min/max handling. We do not process the
6478 // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6479 if (!M0 || M0->getIntrinsicID() != IID)
6480 return nullptr;
6481 Value *X0 = M0->getOperand(0);
6482 Value *Y0 = M0->getOperand(1);
6483 // Simple case, m(m(X,Y), X) => m(X, Y)
6484 // m(m(X,Y), Y) => m(X, Y)
6485 // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6486 // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6487 // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6488 // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6489 if (X0 == Op1 || Y0 == Op1)
6490 return M0;
6491
6492 auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6493 if (!M1)
6494 return nullptr;
6495 Value *X1 = M1->getOperand(0);
6496 Value *Y1 = M1->getOperand(1);
6497 Intrinsic::ID IID1 = M1->getIntrinsicID();
6498 // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6499 // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6500 // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6501 // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6502 // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6503 // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6504 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6505 if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6506 return M0;
6507
6508 return nullptr;
6509}
6510
6515 // For undef/poison, we can choose to either propgate undef/poison or
6516 // use the LHS value depending on what will allow more optimization.
6518};
6519// Get the optimized value for a min/max instruction with a single constant
6520// input (either undef or scalar constantFP). The result may indicate to
6521// use the non-const LHS value, use a new constant value instead (with NaNs
6522// quieted), or to choose either option in the case of undef/poison.
6524 const Intrinsic::ID IID,
6525 const CallBase *Call,
6526 Constant **OutNewConstVal) {
6527 assert(OutNewConstVal != nullptr);
6528
6529 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6530 bool PropagateSNaN = IID == Intrinsic::minnum || IID == Intrinsic::maxnum;
6531 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum ||
6532 IID == Intrinsic::minimumnum;
6533
6534 // min/max(x, poison) -> either x or poison
6535 if (isa<UndefValue>(RHSConst)) {
6536 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6538 }
6539
6540 const ConstantFP *CFP = dyn_cast<ConstantFP>(RHSConst);
6541 if (!CFP)
6543 APFloat CAPF = CFP->getValueAPF();
6544
6545 // minnum(x, qnan) -> x
6546 // maxnum(x, qnan) -> x
6547 // minnum(x, snan) -> qnan
6548 // maxnum(x, snan) -> qnan
6549 // minimum(X, nan) -> qnan
6550 // maximum(X, nan) -> qnan
6551 // minimumnum(X, nan) -> x
6552 // maximumnum(X, nan) -> x
6553 if (CAPF.isNaN()) {
6554 if (PropagateNaN || (PropagateSNaN && CAPF.isSignaling())) {
6555 *OutNewConstVal = ConstantFP::get(CFP->getType(), CAPF.makeQuiet());
6557 }
6559 }
6560
6561 if (CAPF.isInfinity() || (Call && Call->hasNoInfs() && CAPF.isLargest())) {
6562 // minnum(X, -inf) -> -inf (ignoring sNaN -> qNaN propagation)
6563 // maxnum(X, +inf) -> +inf (ignoring sNaN -> qNaN propagation)
6564 // minimum(X, -inf) -> -inf if nnan
6565 // maximum(X, +inf) -> +inf if nnan
6566 // minimumnum(X, -inf) -> -inf
6567 // maximumnum(X, +inf) -> +inf
6568 if (CAPF.isNegative() == IsMin &&
6569 (!PropagateNaN || (Call && Call->hasNoNaNs()))) {
6570 *OutNewConstVal = const_cast<Constant *>(RHSConst);
6572 }
6573
6574 // minnum(X, +inf) -> X if nnan
6575 // maxnum(X, -inf) -> X if nnan
6576 // minimum(X, +inf) -> X (ignoring quieting of sNaNs)
6577 // maximum(X, -inf) -> X (ignoring quieting of sNaNs)
6578 // minimumnum(X, +inf) -> X if nnan
6579 // maximumnum(X, -inf) -> X if nnan
6580 if (CAPF.isNegative() != IsMin &&
6581 (PropagateNaN || (Call && Call->hasNoNaNs())))
6583 }
6585}
6586
6588 Value *Op0, Value *Op1,
6589 const SimplifyQuery &Q,
6590 const CallBase *Call) {
6591 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6592 switch (IID) {
6593 case Intrinsic::get_active_lane_mask: {
6594 if (match(Op1, m_Zero()))
6595 return ConstantInt::getFalse(ReturnType);
6596
6597 const Function *F = Call->getFunction();
6598 auto *ScalableTy = dyn_cast<ScalableVectorType>(ReturnType);
6599 Attribute Attr = F->getFnAttribute(Attribute::VScaleRange);
6600 if (ScalableTy && Attr.isValid()) {
6601 std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
6602 if (!VScaleMax)
6603 break;
6604 uint64_t MaxPossibleMaskElements =
6605 (uint64_t)ScalableTy->getMinNumElements() * (*VScaleMax);
6606
6607 const APInt *Op1Val;
6608 if (match(Op0, m_Zero()) && match(Op1, m_APInt(Op1Val)) &&
6609 Op1Val->uge(MaxPossibleMaskElements))
6610 return ConstantInt::getAllOnesValue(ReturnType);
6611 }
6612 break;
6613 }
6614 case Intrinsic::abs:
6615 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6616 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6617 // on the outer abs.
6619 return Op0;
6620 break;
6621
6622 case Intrinsic::cttz: {
6623 Value *X;
6624 if (match(Op0, m_Shl(m_One(), m_Value(X))))
6625 return X;
6626 break;
6627 }
6628 case Intrinsic::ctlz: {
6629 Value *X;
6630 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6631 return X;
6632 if (match(Op0, m_AShr(m_Negative(), m_Value())))
6633 return Constant::getNullValue(ReturnType);
6634 break;
6635 }
6636 case Intrinsic::ptrmask: {
6637 // NOTE: We can't apply this simplifications based on the value of Op1
6638 // because we need to preserve provenance.
6639 if (Q.isUndefValue(Op0) || match(Op0, m_Zero()))
6640 return Constant::getNullValue(Op0->getType());
6641
6643 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6644 "Invalid mask width");
6645 // If index-width (mask size) is less than pointer-size then mask is
6646 // 1-extended.
6647 if (match(Op1, m_PtrToInt(m_Specific(Op0))))
6648 return Op0;
6649
6650 // NOTE: We may have attributes associated with the return value of the
6651 // llvm.ptrmask intrinsic that will be lost when we just return the
6652 // operand. We should try to preserve them.
6653 if (match(Op1, m_AllOnes()) || Q.isUndefValue(Op1))
6654 return Op0;
6655
6656 Constant *C;
6657 if (match(Op1, m_ImmConstant(C))) {
6658 KnownBits PtrKnown = computeKnownBits(Op0, Q);
6659 // See if we only masking off bits we know are already zero due to
6660 // alignment.
6661 APInt IrrelevantPtrBits =
6662 PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6664 Instruction::Or, C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6665 Q.DL);
6666 if (C != nullptr && C->isAllOnesValue())
6667 return Op0;
6668 }
6669 break;
6670 }
6671 case Intrinsic::smax:
6672 case Intrinsic::smin:
6673 case Intrinsic::umax:
6674 case Intrinsic::umin: {
6675 // If the arguments are the same, this is a no-op.
6676 if (Op0 == Op1)
6677 return Op0;
6678
6679 // Canonicalize immediate constant operand as Op1.
6680 if (match(Op0, m_ImmConstant()))
6681 std::swap(Op0, Op1);
6682
6683 // Assume undef is the limit value.
6684 if (Q.isUndefValue(Op1))
6685 return ConstantInt::get(
6687
6688 const APInt *C;
6689 if (match(Op1, m_APIntAllowPoison(C))) {
6690 // Clamp to limit value. For example:
6691 // umax(i8 %x, i8 255) --> 255
6693 return ConstantInt::get(ReturnType, *C);
6694
6695 // If the constant op is the opposite of the limit value, the other must
6696 // be larger/smaller or equal. For example:
6697 // umin(i8 %x, i8 255) --> %x
6700 return Op0;
6701
6702 // Remove nested call if constant operands allow it. Example:
6703 // max (max X, 7), 5 -> max X, 7
6704 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6705 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6706 // TODO: loosen undef/splat restrictions for vector constants.
6707 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6708 const APInt *InnerC;
6709 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6710 ICmpInst::compare(*InnerC, *C,
6713 return Op0;
6714 }
6715 }
6716
6717 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6718 return V;
6719 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6720 return V;
6721
6722 ICmpInst::Predicate Pred =
6724 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6725 return Op0;
6726 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6727 return Op1;
6728
6729 break;
6730 }
6731 case Intrinsic::scmp:
6732 case Intrinsic::ucmp: {
6733 // Fold to a constant if the relationship between operands can be
6734 // established with certainty
6735 if (isICmpTrue(CmpInst::ICMP_EQ, Op0, Op1, Q, RecursionLimit))
6736 return Constant::getNullValue(ReturnType);
6737
6738 ICmpInst::Predicate PredGT =
6739 IID == Intrinsic::scmp ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
6740 if (isICmpTrue(PredGT, Op0, Op1, Q, RecursionLimit))
6741 return ConstantInt::get(ReturnType, 1);
6742
6743 ICmpInst::Predicate PredLT =
6744 IID == Intrinsic::scmp ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
6745 if (isICmpTrue(PredLT, Op0, Op1, Q, RecursionLimit))
6746 return ConstantInt::getSigned(ReturnType, -1);
6747
6748 break;
6749 }
6750 case Intrinsic::usub_with_overflow:
6751 case Intrinsic::ssub_with_overflow:
6752 // X - X -> { 0, false }
6753 // X - undef -> { 0, false }
6754 // undef - X -> { 0, false }
6755 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6756 return Constant::getNullValue(ReturnType);
6757 break;
6758 case Intrinsic::uadd_with_overflow:
6759 case Intrinsic::sadd_with_overflow:
6760 // X + undef -> { -1, false }
6761 // undef + x -> { -1, false }
6762 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6763 return ConstantStruct::get(
6764 cast<StructType>(ReturnType),
6765 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6766 Constant::getNullValue(ReturnType->getStructElementType(1))});
6767 }
6768 break;
6769 case Intrinsic::umul_with_overflow:
6770 case Intrinsic::smul_with_overflow:
6771 // 0 * X -> { 0, false }
6772 // X * 0 -> { 0, false }
6773 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6774 return Constant::getNullValue(ReturnType);
6775 // undef * X -> { 0, false }
6776 // X * undef -> { 0, false }
6777 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6778 return Constant::getNullValue(ReturnType);
6779 break;
6780 case Intrinsic::uadd_sat:
6781 // sat(MAX + X) -> MAX
6782 // sat(X + MAX) -> MAX
6783 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6784 return Constant::getAllOnesValue(ReturnType);
6785 [[fallthrough]];
6786 case Intrinsic::sadd_sat:
6787 // sat(X + undef) -> -1
6788 // sat(undef + X) -> -1
6789 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6790 // For signed: Assume undef is ~X, in which case X + ~X = -1.
6791 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6792 return Constant::getAllOnesValue(ReturnType);
6793
6794 // X + 0 -> X
6795 if (match(Op1, m_Zero()))
6796 return Op0;
6797 // 0 + X -> X
6798 if (match(Op0, m_Zero()))
6799 return Op1;
6800 break;
6801 case Intrinsic::usub_sat:
6802 // sat(0 - X) -> 0, sat(X - MAX) -> 0
6803 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6804 return Constant::getNullValue(ReturnType);
6805 [[fallthrough]];
6806 case Intrinsic::ssub_sat:
6807 // X - X -> 0, X - undef -> 0, undef - X -> 0
6808 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6809 return Constant::getNullValue(ReturnType);
6810 // X - 0 -> X
6811 if (match(Op1, m_Zero()))
6812 return Op0;
6813 break;
6814 case Intrinsic::load_relative:
6815 if (auto *C0 = dyn_cast<Constant>(Op0))
6816 if (auto *C1 = dyn_cast<Constant>(Op1))
6817 return simplifyRelativeLoad(C0, C1, Q.DL);
6818 break;
6819 case Intrinsic::powi:
6820 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6821 // powi(x, 0) -> 1.0
6822 if (Power->isZero())
6823 return ConstantFP::get(Op0->getType(), 1.0);
6824 // powi(x, 1) -> x
6825 if (Power->isOne())
6826 return Op0;
6827 }
6828 break;
6829 case Intrinsic::ldexp:
6830 return simplifyLdexp(Op0, Op1, Q, false);
6831 case Intrinsic::copysign:
6832 // copysign X, X --> X
6833 if (Op0 == Op1)
6834 return Op0;
6835 // copysign -X, X --> X
6836 // copysign X, -X --> -X
6837 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6838 match(Op1, m_FNeg(m_Specific(Op0))))
6839 return Op1;
6840 break;
6841 case Intrinsic::is_fpclass: {
6842 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6843 // If all tests are made, it doesn't matter what the value is.
6844 if ((Mask & fcAllFlags) == fcAllFlags)
6845 return ConstantInt::get(ReturnType, true);
6846 if ((Mask & fcAllFlags) == 0)
6847 return ConstantInt::get(ReturnType, false);
6848 if (Q.isUndefValue(Op0))
6849 return UndefValue::get(ReturnType);
6850 break;
6851 }
6852 case Intrinsic::maxnum:
6853 case Intrinsic::minnum:
6854 case Intrinsic::maximum:
6855 case Intrinsic::minimum:
6856 case Intrinsic::maximumnum:
6857 case Intrinsic::minimumnum: {
6858 // In several cases here, we deviate from exact IEEE 754 semantics
6859 // to enable optimizations (as allowed by the LLVM IR spec).
6860 //
6861 // For instance, we may return one of the arguments unmodified instead of
6862 // inserting an llvm.canonicalize to transform input sNaNs into qNaNs,
6863 // or may assume all NaN inputs are qNaNs.
6864
6865 // If the arguments are the same, this is a no-op (ignoring NaN quieting)
6866 if (Op0 == Op1)
6867 return Op0;
6868
6869 // Canonicalize constant operand as Op1.
6870 if (isa<Constant>(Op0))
6871 std::swap(Op0, Op1);
6872
6873 if (Constant *C = dyn_cast<Constant>(Op1)) {
6875 Constant *NewConst = nullptr;
6876
6877 if (VectorType *VTy = dyn_cast<VectorType>(C->getType())) {
6878 ElementCount ElemCount = VTy->getElementCount();
6879
6880 if (Constant *SplatVal = C->getSplatValue()) {
6881 // Handle splat vectors (including scalable vectors)
6882 OptResult = OptimizeConstMinMax(SplatVal, IID, Call, &NewConst);
6883 if (OptResult == MinMaxOptResult::UseNewConstVal)
6884 NewConst = ConstantVector::getSplat(ElemCount, NewConst);
6885
6886 } else if (ElemCount.isFixed()) {
6887 // Storage to build up new const return value (with NaNs quieted)
6889
6890 // Check elementwise whether we can optimize to either a constant
6891 // value or return the LHS value. We cannot mix and match LHS +
6892 // constant elements, as this would require inserting a new
6893 // VectorShuffle instruction, which is not allowed in simplifyBinOp.
6894 OptResult = MinMaxOptResult::UseEither;
6895 for (unsigned i = 0; i != ElemCount.getFixedValue(); ++i) {
6896 auto ElemResult = OptimizeConstMinMax(C->getAggregateElement(i),
6897 IID, Call, &NewConst);
6898 if (ElemResult == MinMaxOptResult::CannotOptimize ||
6899 (ElemResult != OptResult &&
6900 OptResult != MinMaxOptResult::UseEither &&
6901 ElemResult != MinMaxOptResult::UseEither)) {
6903 break;
6904 }
6905 NewC[i] = NewConst;
6906 if (ElemResult != MinMaxOptResult::UseEither)
6907 OptResult = ElemResult;
6908 }
6909 if (OptResult == MinMaxOptResult::UseNewConstVal)
6910 NewConst = ConstantVector::get(NewC);
6911 }
6912 } else {
6913 // Handle scalar inputs
6914 OptResult = OptimizeConstMinMax(C, IID, Call, &NewConst);
6915 }
6916
6917 if (OptResult == MinMaxOptResult::UseOtherVal ||
6918 OptResult == MinMaxOptResult::UseEither)
6919 return Op0; // Return the other arg (ignoring NaN quieting)
6920 else if (OptResult == MinMaxOptResult::UseNewConstVal)
6921 return NewConst;
6922 }
6923
6924 // Min/max of the same operation with common operand:
6925 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6926 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
6927 return V;
6928 if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
6929 return V;
6930
6931 break;
6932 }
6933 case Intrinsic::vector_extract: {
6934 // (extract_vector (insert_vector _, X, 0), 0) -> X
6935 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6936 Value *X = nullptr;
6938 m_Zero())) &&
6939 IdxN == 0 && X->getType() == ReturnType)
6940 return X;
6941
6942 break;
6943 }
6944 default:
6945 break;
6946 }
6947
6948 return nullptr;
6949}
6950
6952 ArrayRef<Value *> Args,
6953 const SimplifyQuery &Q) {
6954 // Operand bundles should not be in Args.
6955 assert(Call->arg_size() == Args.size());
6956 unsigned NumOperands = Args.size();
6957 Function *F = cast<Function>(Callee);
6958 Intrinsic::ID IID = F->getIntrinsicID();
6959
6962 return PoisonValue::get(F->getReturnType());
6963 // Most of the intrinsics with no operands have some kind of side effect.
6964 // Don't simplify.
6965 if (!NumOperands) {
6966 switch (IID) {
6967 case Intrinsic::vscale: {
6968 Type *RetTy = F->getReturnType();
6969 ConstantRange CR = getVScaleRange(Call->getFunction(), 64);
6970 if (const APInt *C = CR.getSingleElement())
6971 return ConstantInt::get(RetTy, C->getZExtValue());
6972 return nullptr;
6973 }
6974 default:
6975 return nullptr;
6976 }
6977 }
6978
6979 if (NumOperands == 1)
6980 return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
6981
6982 if (NumOperands == 2)
6983 return simplifyBinaryIntrinsic(IID, F->getReturnType(), Args[0], Args[1], Q,
6984 Call);
6985
6986 // Handle intrinsics with 3 or more arguments.
6987 switch (IID) {
6988 case Intrinsic::masked_load:
6989 case Intrinsic::masked_gather: {
6990 Value *MaskArg = Args[2];
6991 Value *PassthruArg = Args[3];
6992 // If the mask is all zeros or undef, the "passthru" argument is the result.
6993 if (maskIsAllZeroOrUndef(MaskArg))
6994 return PassthruArg;
6995 return nullptr;
6996 }
6997 case Intrinsic::fshl:
6998 case Intrinsic::fshr: {
6999 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
7000
7001 // If both operands are undef, the result is undef.
7002 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
7003 return UndefValue::get(F->getReturnType());
7004
7005 // If shift amount is undef, assume it is zero.
7006 if (Q.isUndefValue(ShAmtArg))
7007 return Args[IID == Intrinsic::fshl ? 0 : 1];
7008
7009 const APInt *ShAmtC;
7010 if (match(ShAmtArg, m_APInt(ShAmtC))) {
7011 // If there's effectively no shift, return the 1st arg or 2nd arg.
7012 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
7013 if (ShAmtC->urem(BitWidth).isZero())
7014 return Args[IID == Intrinsic::fshl ? 0 : 1];
7015 }
7016
7017 // Rotating zero by anything is zero.
7018 if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
7019 return ConstantInt::getNullValue(F->getReturnType());
7020
7021 // Rotating -1 by anything is -1.
7022 if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
7023 return ConstantInt::getAllOnesValue(F->getReturnType());
7024
7025 return nullptr;
7026 }
7027 case Intrinsic::experimental_constrained_fma: {
7029 if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
7030 *FPI->getRoundingMode()))
7031 return V;
7032 return nullptr;
7033 }
7034 case Intrinsic::fma:
7035 case Intrinsic::fmuladd: {
7036 if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
7038 return V;
7039 return nullptr;
7040 }
7041 case Intrinsic::smul_fix:
7042 case Intrinsic::smul_fix_sat: {
7043 Value *Op0 = Args[0];
7044 Value *Op1 = Args[1];
7045 Value *Op2 = Args[2];
7046 Type *ReturnType = F->getReturnType();
7047
7048 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
7049 // when both Op0 and Op1 are constant so we do not care about that special
7050 // case here).
7051 if (isa<Constant>(Op0))
7052 std::swap(Op0, Op1);
7053
7054 // X * 0 -> 0
7055 if (match(Op1, m_Zero()))
7056 return Constant::getNullValue(ReturnType);
7057
7058 // X * undef -> 0
7059 if (Q.isUndefValue(Op1))
7060 return Constant::getNullValue(ReturnType);
7061
7062 // X * (1 << Scale) -> X
7063 APInt ScaledOne =
7064 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
7065 cast<ConstantInt>(Op2)->getZExtValue());
7066 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
7067 return Op0;
7068
7069 return nullptr;
7070 }
7071 case Intrinsic::vector_insert: {
7072 Value *Vec = Args[0];
7073 Value *SubVec = Args[1];
7074 Value *Idx = Args[2];
7075 Type *ReturnType = F->getReturnType();
7076
7077 // (insert_vector Y, (extract_vector X, 0), 0) -> X
7078 // where: Y is X, or Y is undef
7079 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
7080 Value *X = nullptr;
7081 if (match(SubVec,
7083 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
7084 X->getType() == ReturnType)
7085 return X;
7086
7087 return nullptr;
7088 }
7089 case Intrinsic::experimental_constrained_fadd: {
7091 return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7092 *FPI->getExceptionBehavior(),
7093 *FPI->getRoundingMode());
7094 }
7095 case Intrinsic::experimental_constrained_fsub: {
7097 return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7098 *FPI->getExceptionBehavior(),
7099 *FPI->getRoundingMode());
7100 }
7101 case Intrinsic::experimental_constrained_fmul: {
7103 return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7104 *FPI->getExceptionBehavior(),
7105 *FPI->getRoundingMode());
7106 }
7107 case Intrinsic::experimental_constrained_fdiv: {
7109 return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7110 *FPI->getExceptionBehavior(),
7111 *FPI->getRoundingMode());
7112 }
7113 case Intrinsic::experimental_constrained_frem: {
7115 return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
7116 *FPI->getExceptionBehavior(),
7117 *FPI->getRoundingMode());
7118 }
7119 case Intrinsic::experimental_constrained_ldexp:
7120 return simplifyLdexp(Args[0], Args[1], Q, true);
7121 case Intrinsic::experimental_gc_relocate: {
7123 Value *DerivedPtr = GCR.getDerivedPtr();
7124 Value *BasePtr = GCR.getBasePtr();
7125
7126 // Undef is undef, even after relocation.
7127 if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
7128 return UndefValue::get(GCR.getType());
7129 }
7130
7131 if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
7132 // For now, the assumption is that the relocation of null will be null
7133 // for most any collector. If this ever changes, a corresponding hook
7134 // should be added to GCStrategy and this code should check it first.
7135 if (isa<ConstantPointerNull>(DerivedPtr)) {
7136 // Use null-pointer of gc_relocate's type to replace it.
7137 return ConstantPointerNull::get(PT);
7138 }
7139 }
7140 return nullptr;
7141 }
7142 case Intrinsic::experimental_vp_reverse: {
7143 Value *Vec = Call->getArgOperand(0);
7144 Value *Mask = Call->getArgOperand(1);
7145 Value *EVL = Call->getArgOperand(2);
7146
7147 Value *X;
7148 // vp.reverse(vp.reverse(X)) == X (with all ones mask and matching EVL)
7149 if (match(Mask, m_AllOnes()) &&
7151 m_Value(X), m_AllOnes(), m_Specific(EVL))))
7152 return X;
7153
7154 // vp.reverse(splat(X)) -> splat(X) (regardless of mask and EVL)
7155 if (isSplatValue(Vec))
7156 return Vec;
7157 return nullptr;
7158 }
7159 default:
7160 return nullptr;
7161 }
7162}
7163
7165 ArrayRef<Value *> Args,
7166 const SimplifyQuery &Q) {
7167 auto *F = dyn_cast<Function>(Callee);
7168 if (!F || !canConstantFoldCallTo(Call, F))
7169 return nullptr;
7170
7171 SmallVector<Constant *, 4> ConstantArgs;
7172 ConstantArgs.reserve(Args.size());
7173 for (Value *Arg : Args) {
7175 if (!C) {
7176 if (isa<MetadataAsValue>(Arg))
7177 continue;
7178 return nullptr;
7179 }
7180 ConstantArgs.push_back(C);
7181 }
7182
7183 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
7184}
7185
7187 const SimplifyQuery &Q) {
7188 // Args should not contain operand bundle operands.
7189 assert(Call->arg_size() == Args.size());
7190
7191 // musttail calls can only be simplified if they are also DCEd.
7192 // As we can't guarantee this here, don't simplify them.
7193 if (Call->isMustTailCall())
7194 return nullptr;
7195
7196 // call undef -> poison
7197 // call null -> poison
7198 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
7199 return PoisonValue::get(Call->getType());
7200
7201 if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
7202 return V;
7203
7204 auto *F = dyn_cast<Function>(Callee);
7205 if (F && F->isIntrinsic())
7206 if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
7207 return Ret;
7208
7209 return nullptr;
7210}
7211
7214 SmallVector<Value *, 4> Args(Call->args());
7215 if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
7216 return V;
7217 if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
7218 return Ret;
7219 return nullptr;
7220}
7221
7222/// Given operands for a Freeze, see if we can fold the result.
7224 // Use a utility function defined in ValueTracking.
7226 return Op0;
7227 // We have room for improvement.
7228 return nullptr;
7229}
7230
7232 return ::simplifyFreezeInst(Op0, Q);
7233}
7234
7236 const SimplifyQuery &Q) {
7237 if (LI->isVolatile())
7238 return nullptr;
7239
7240 if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
7241 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
7242
7243 // We can only fold the load if it is from a constant global with definitive
7244 // initializer. Skip expensive logic if this is not the case.
7246 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
7247 return nullptr;
7248
7249 // If GlobalVariable's initializer is uniform, then return the constant
7250 // regardless of its offset.
7251 if (Constant *C = ConstantFoldLoadFromUniformValue(GV->getInitializer(),
7252 LI->getType(), Q.DL))
7253 return C;
7254
7255 // Try to convert operand into a constant by stripping offsets while looking
7256 // through invariant.group intrinsics.
7258 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
7259 Q.DL, Offset, /* AllowNonInbounts */ true,
7260 /* AllowInvariantGroup */ true);
7261 if (PtrOp == GV) {
7262 // Index size may have changed due to address space casts.
7263 Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
7264 return ConstantFoldLoadFromConstPtr(GV, LI->getType(), std::move(Offset),
7265 Q.DL);
7266 }
7267
7268 return nullptr;
7269}
7270
7271/// See if we can compute a simplified version of this instruction.
7272/// If not, this returns null.
7273
7275 ArrayRef<Value *> NewOps,
7276 const SimplifyQuery &SQ,
7277 unsigned MaxRecurse) {
7278 assert(I->getFunction() && "instruction should be inserted in a function");
7279 assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
7280 "context instruction should be in the same function");
7281
7282 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
7283
7284 switch (I->getOpcode()) {
7285 default:
7286 if (all_of(NewOps, IsaPred<Constant>)) {
7287 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
7288 transform(NewOps, NewConstOps.begin(),
7289 [](Value *V) { return cast<Constant>(V); });
7290 return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
7291 }
7292 return nullptr;
7293 case Instruction::FNeg:
7294 return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
7295 case Instruction::FAdd:
7296 return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7297 MaxRecurse);
7298 case Instruction::Add:
7299 return simplifyAddInst(
7300 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7301 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7302 case Instruction::FSub:
7303 return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7304 MaxRecurse);
7305 case Instruction::Sub:
7306 return simplifySubInst(
7307 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7308 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7309 case Instruction::FMul:
7310 return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7311 MaxRecurse);
7312 case Instruction::Mul:
7313 return simplifyMulInst(
7314 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7315 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7316 case Instruction::SDiv:
7317 return simplifySDivInst(NewOps[0], NewOps[1],
7319 MaxRecurse);
7320 case Instruction::UDiv:
7321 return simplifyUDivInst(NewOps[0], NewOps[1],
7323 MaxRecurse);
7324 case Instruction::FDiv:
7325 return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7326 MaxRecurse);
7327 case Instruction::SRem:
7328 return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7329 case Instruction::URem:
7330 return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7331 case Instruction::FRem:
7332 return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7333 MaxRecurse);
7334 case Instruction::Shl:
7335 return simplifyShlInst(
7336 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7337 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7338 case Instruction::LShr:
7339 return simplifyLShrInst(NewOps[0], NewOps[1],
7341 MaxRecurse);
7342 case Instruction::AShr:
7343 return simplifyAShrInst(NewOps[0], NewOps[1],
7345 MaxRecurse);
7346 case Instruction::And:
7347 return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7348 case Instruction::Or:
7349 return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7350 case Instruction::Xor:
7351 return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7352 case Instruction::ICmp:
7353 return simplifyICmpInst(cast<ICmpInst>(I)->getCmpPredicate(), NewOps[0],
7354 NewOps[1], Q, MaxRecurse);
7355 case Instruction::FCmp:
7356 return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7357 NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
7358 case Instruction::Select:
7359 return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7360 case Instruction::GetElementPtr: {
7361 auto *GEPI = cast<GetElementPtrInst>(I);
7362 return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7363 ArrayRef(NewOps).slice(1), GEPI->getNoWrapFlags(), Q,
7364 MaxRecurse);
7365 }
7366 case Instruction::InsertValue: {
7368 return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
7369 MaxRecurse);
7370 }
7371 case Instruction::InsertElement:
7372 return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7373 case Instruction::ExtractValue: {
7374 auto *EVI = cast<ExtractValueInst>(I);
7375 return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7376 MaxRecurse);
7377 }
7378 case Instruction::ExtractElement:
7379 return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7380 case Instruction::ShuffleVector: {
7381 auto *SVI = cast<ShuffleVectorInst>(I);
7382 return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
7383 SVI->getShuffleMask(), SVI->getType(), Q,
7384 MaxRecurse);
7385 }
7386 case Instruction::PHI:
7387 return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
7388 case Instruction::Call:
7389 return simplifyCall(
7390 cast<CallInst>(I), NewOps.back(),
7391 NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7392 case Instruction::Freeze:
7393 return llvm::simplifyFreezeInst(NewOps[0], Q);
7394#define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7395#include "llvm/IR/Instruction.def"
7396#undef HANDLE_CAST_INST
7397 return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
7398 MaxRecurse);
7399 case Instruction::Alloca:
7400 // No simplifications for Alloca and it can't be constant folded.
7401 return nullptr;
7402 case Instruction::Load:
7403 return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7404 }
7405}
7406
7408 ArrayRef<Value *> NewOps,
7409 const SimplifyQuery &SQ) {
7410 assert(NewOps.size() == I->getNumOperands() &&
7411 "Number of operands should match the instruction!");
7412 return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
7413}
7414
7416 SmallVector<Value *, 8> Ops(I->operands());
7418
7419 /// If called on unreachable code, the instruction may simplify to itself.
7420 /// Make life easier for users by detecting that case here, and returning a
7421 /// safe value instead.
7422 return Result == I ? PoisonValue::get(I->getType()) : Result;
7423}
7424
7425/// Implementation of recursive simplification through an instruction's
7426/// uses.
7427///
7428/// This is the common implementation of the recursive simplification routines.
7429/// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7430/// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7431/// instructions to process and attempt to simplify it using
7432/// InstructionSimplify. Recursively visited users which could not be
7433/// simplified themselves are to the optional UnsimplifiedUsers set for
7434/// further processing by the caller.
7435///
7436/// This routine returns 'true' only when *it* simplifies something. The passed
7437/// in simplified value does not count toward this.
7439 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7440 const DominatorTree *DT, AssumptionCache *AC,
7441 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7442 bool Simplified = false;
7444 const DataLayout &DL = I->getDataLayout();
7445
7446 // If we have an explicit value to collapse to, do that round of the
7447 // simplification loop by hand initially.
7448 if (SimpleV) {
7449 for (User *U : I->users())
7450 if (U != I)
7451 Worklist.insert(cast<Instruction>(U));
7452
7453 // Replace the instruction with its simplified value.
7454 I->replaceAllUsesWith(SimpleV);
7455
7456 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7457 I->eraseFromParent();
7458 } else {
7459 Worklist.insert(I);
7460 }
7461
7462 // Note that we must test the size on each iteration, the worklist can grow.
7463 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7464 I = Worklist[Idx];
7465
7466 // See if this instruction simplifies.
7467 SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7468 if (!SimpleV) {
7469 if (UnsimplifiedUsers)
7470 UnsimplifiedUsers->insert(I);
7471 continue;
7472 }
7473
7474 Simplified = true;
7475
7476 // Stash away all the uses of the old instruction so we can check them for
7477 // recursive simplifications after a RAUW. This is cheaper than checking all
7478 // uses of To on the recursive step in most cases.
7479 for (User *U : I->users())
7480 Worklist.insert(cast<Instruction>(U));
7481
7482 // Replace the instruction with its simplified value.
7483 I->replaceAllUsesWith(SimpleV);
7484
7485 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7486 I->eraseFromParent();
7487 }
7488 return Simplified;
7489}
7490
7492 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7493 const DominatorTree *DT, AssumptionCache *AC,
7494 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7495 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7496 assert(SimpleV && "Must provide a simplified value.");
7497 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7498 UnsimplifiedUsers);
7499}
7500
7501namespace llvm {
7503 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7504 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7505 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7506 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7507 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7508 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7509 return {F.getDataLayout(), TLI, DT, AC};
7510}
7511
7513 const DataLayout &DL) {
7514 return {DL, &AR.TLI, &AR.DT, &AR.AC};
7515}
7516
7517template <class T, class... TArgs>
7519 Function &F) {
7520 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7521 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7522 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7523 return {F.getDataLayout(), TLI, DT, AC};
7524}
7526 Function &);
7527
7529 if (!CanUseUndef)
7530 return false;
7531
7532 return match(V, m_Undef());
7533}
7534
7535} // namespace llvm
7536
7537void InstSimplifyFolder::anchor() {}
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< ErlangGC > A("erlang", "erlang-compatible garbage collector")
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
static GCRegistry::Add< OcamlGC > B("ocaml", "ocaml 3.10-compatible GC")
IRTranslator LLVM IR MI
static Value * simplifyCmpSelFalseCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with false branch of select.
static Value * simplifyCmpSelCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse, Constant *TrueOrFalse)
Simplify comparison with true or false branch of select: sel = select i1 cond, i32 tv,...
static Value * foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L, Value *R, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify binops of form "A op (B op' C)" or the commuted variant by distributing op over op'.
static Constant * foldOrCommuteConstant(Instruction::BinaryOps Opcode, Value *&Op0, Value *&Op1, const SimplifyQuery &Q)
static bool haveNonOverlappingStorage(const Value *V1, const Value *V2)
Return true if V1 and V2 are each the base of some distict storage region [V, object_size(V)] which d...
static Constant * foldConstant(Instruction::UnaryOps Opcode, Value *&Op, const SimplifyQuery &Q)
static Value * handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
We know comparison with both branches of select can be simplified, but they are not equal.
static Value * threadCmpOverPHI(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a PHI instruction, try to simplify the comparison by seeing whether ...
static Constant * propagateNaN(Constant *In)
Try to propagate existing NaN values when possible.
static Value * simplifyICmpOfBools(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Fold an icmp when its operands have i1 scalar type.
static Value * simplifyICmpWithBinOpOnLHS(CmpPredicate Pred, BinaryOperator *LBO, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
static void getUnsignedMonotonicValues(SmallPtrSetImpl< Value * > &Res, Value *V, MonotonicType Type, const SimplifyQuery &Q, unsigned Depth=0)
Get values V_i such that V uge V_i (GreaterEq) or V ule V_i (LowerEq).
static Value * simplifyRelativeLoad(Constant *Ptr, Constant *Offset, const DataLayout &DL)
static Value * simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SDiv and UDiv.
static Value * simplifyPHINode(PHINode *PN, ArrayRef< Value * > IncomingValues, const SimplifyQuery &Q)
See if we can fold the given phi. If not, returns null.
@ RecursionLimit
static bool isSameCompare(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
static Value * simplifyAndCommutative(Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static bool isIdempotent(Intrinsic::ID ID)
static std::optional< ConstantRange > getRange(Value *V, const InstrInfoQuery &IIQ)
Helper method to get range from metadata or attribute.
static Value * simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Try to simplify and/or of icmp with ctpop intrinsic.
static Value * simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp, ICmpInst *UnsignedICmp, bool IsAnd, const SimplifyQuery &Q)
Commuted variants are assumed to be handled by calling this function again with the parameters swappe...
static Value * tryConstantFoldCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyWithOpsReplaced(Value *V, ArrayRef< std::pair< Value *, Value * > > Ops, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags, unsigned MaxRecurse)
static Value * simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifyAndOrOfFCmpsWithConstants(FCmpInst *Cmp0, FCmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyICmpWithMinMax(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
simplify integer comparisons where at least one operand of the compare matches an integer min/max idi...
static Value * simplifyCmpSelTrueCase(CmpPredicate Pred, Value *LHS, Value *RHS, Value *Cond, const SimplifyQuery &Q, unsigned MaxRecurse)
Simplify comparison with true branch of select.
static Value * simplifyIntrinsic(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
static Value * simplifyICmpUsingMonotonicValues(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q)
Returns true if a shift by Amount always yields poison.
static Value * simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an LShr or AShr, see if we can fold the result.
static Value * simplifyICmpWithIntrinsicOnLHS(CmpPredicate Pred, Value *LHS, Value *RHS)
static Value * simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Test if there is a dominating equivalence condition for the two operands.
static Value * simplifyFPUnOp(unsigned, Value *, const FastMathFlags &, const SimplifyQuery &, unsigned)
Given the operand for a UnaryOperator, see if we can fold the result.
static Value * simplifyICmpWithBinOp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
TODO: A large part of this logic is duplicated in InstCombine's foldICmpBinOp().
static Value * simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static Value * expandBinOp(Instruction::BinaryOps Opcode, Value *V, Value *OtherOp, Instruction::BinaryOps OpcodeToExpand, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a binary operator of form "V op OtherOp" where V is "(B0 opex B1)" by distributing 'o...
static Value * simplifyICmpWithZero(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Try hard to fold icmp with zero RHS because this is a common case.
static Value * simplifySelectWithFCmp(Value *Cond, Value *T, Value *F, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is a floating-point comparison.
static Constant * getFalse(Type *Ty)
For a boolean type or a vector of boolean type, return false or a vector with every element false.
static Value * simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
Check for common or similar folds of integer division or integer remainder.
static bool removesFPFraction(Intrinsic::ID ID)
Return true if the intrinsic rounds a floating-point value to an integral floating-point value (not a...
static Value * simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1, const InstrInfoQuery &IIQ)
static Value * simplifySelectWithEquivalence(ArrayRef< std::pair< Value *, Value * > > Replacements, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer equality or floating-po...
static bool trySimplifyICmpWithAdds(CmpPredicate Pred, Value *LHS, Value *RHS, const InstrInfoQuery &IIQ)
static Value * simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X, const APInt *Y, bool TrueWhenUnset)
Try to simplify a select instruction when its condition operand is an integer comparison where one op...
static Value * simplifyAssociativeBinOp(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Generic simplifications for associative binary operations.
static Value * threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with an operand that is a PHI instruction, try to simplify the bino...
static Value * simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS, CmpPredicate Pred, Value *TVal, Value *FVal)
static Constant * simplifyFPOp(ArrayRef< Value * > Ops, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior, RoundingMode Rounding)
Perform folds that are common to any floating-point operation.
static Value * threadCmpOverSelect(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a comparison with a select instruction, try to simplify the comparison by seeing wheth...
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Implementation of recursive simplification through an instruction's uses.
static bool isAllocDisjoint(const Value *V)
Return true if the underlying object (storage) must be disjoint from storage returned by any noalias ...
static Constant * getTrue(Type *Ty)
For a boolean type or a vector of boolean type, return true or a vector with every element true.
static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q, unsigned MaxRecurse, bool IsSigned)
Return true if we can simplify X / Y to 0.
static Value * simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q, bool IsStrict)
static Value * simplifyLogicOfAddSub(Value *Op0, Value *Op1, Instruction::BinaryOps Opcode)
Given a bitwise logic op, check if the operands are add/sub with a common source value and inverted c...
static Value * simplifySelectWithBitTest(Value *CondVal, Value *TrueVal, Value *FalseVal)
An alternative way to test if a bit is set or not.
static Value * simplifyOrLogic(Value *X, Value *Y)
static Type * getCompareTy(Value *Op)
static Value * simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1, const SimplifyQuery &Q)
static bool isICmpTrue(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
Given a predicate and two operands, return true if the comparison is true.
static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V)
Compute the base pointer and cumulative constant offsets for V.
static Value * foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1, int MaskVal, Value *RootVec, unsigned MaxRecurse)
For the given destination element of a shuffle, peek through shuffles to match a root vector source o...
static Value * simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS, FCmpInst *RHS, bool IsAnd)
static MinMaxOptResult OptimizeConstMinMax(const Constant *RHSConst, const Intrinsic::ID IID, const CallBase *Call, Constant **OutNewConstVal)
static Value * simplifyICmpWithConstant(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * extractEquivalentCondition(Value *V, CmpPredicate Pred, Value *LHS, Value *RHS)
Rummage around inside V looking for something equivalent to the comparison "LHS Pred RHS".
static Value * simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0, Value *Op1, bool IsAnd)
static Value * threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q, unsigned MaxRecurse)
In the case of a binary operation with a select instruction as an operand, try to simplify the binop ...
static Constant * computePointerDifference(const DataLayout &DL, Value *LHS, Value *RHS)
Compute the constant difference between two pointer values.
static Value * simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd)
Test if a pair of compares with a shared operand and 2 constants has an empty set intersection,...
static Value * simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
static Value * simplifyICmpWithDominatingAssume(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyShift(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, bool IsNSW, const SimplifyQuery &Q, unsigned MaxRecurse)
Given operands for an Shl, LShr or AShr, see if we can fold the result.
static Constant * computePointerICmp(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
static Value * simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1, const SimplifyQuery &Q, unsigned MaxRecurse)
These are simplifications common to SRem and URem.
static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT)
Does the given value dominate the specified phi node?
static Value * simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q, unsigned MaxRecurse)
Try to simplify a select instruction when its condition operand is an integer comparison.
static Value * foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1)
Given a min/max intrinsic, see if it can be removed based on having an operand that is another min/ma...
static Value * simplifyUnaryIntrinsic(Function *F, Value *Op0, const SimplifyQuery &Q, const CallBase *Call)
const AbstractManglingParser< Derived, Alloc >::OperatorInfo AbstractManglingParser< Derived, Alloc >::Ops[]
This header provides classes for managing per-loop analyses.
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
#define T
uint64_t IntrinsicInst * II
#define P(N)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
This file implements a set that has insertion order iteration characteristics.
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
#define STATISTIC(VARNAME, DESC)
Definition Statistic.h:171
static unsigned getScalarSizeInBits(Type *Ty)
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static SymbolRef::Type getType(const Symbol *Sym)
Definition TapiFile.cpp:39
Value * RHS
Value * LHS
BinaryOperator * Mul
static const uint32_t IV[8]
Definition blake3_impl.h:83
bool isNegative() const
Definition APFloat.h:1449
APFloat makeQuiet() const
Assuming this is an IEEE-754 NaN value, quiet its signaling bit.
Definition APFloat.h:1316
bool isNaN() const
Definition APFloat.h:1447
bool isSignaling() const
Definition APFloat.h:1451
bool isLargest() const
Definition APFloat.h:1465
bool isInfinity() const
Definition APFloat.h:1446
Class for arbitrary precision integers.
Definition APInt.h:78
LLVM_ABI APInt zextOrTrunc(unsigned width) const
Zero extend or truncate to width.
Definition APInt.cpp:1033
unsigned getActiveBits() const
Compute the number of active bits in the value.
Definition APInt.h:1512
bool isZero() const
Determine if this value is zero, i.e. all bits are clear.
Definition APInt.h:380
LLVM_ABI APInt urem(const APInt &RHS) const
Unsigned remainder operation.
Definition APInt.cpp:1666
void setSignBit()
Set the sign bit to 1.
Definition APInt.h:1340
unsigned getBitWidth() const
Return the number of bits in the APInt.
Definition APInt.h:1488
bool ult(const APInt &RHS) const
Unsigned less than comparison.
Definition APInt.h:1111
bool intersects(const APInt &RHS) const
This operation tests if there are any pairs of corresponding bits between this APInt and RHS that are...
Definition APInt.h:1249
unsigned countr_zero() const
Count the number of trailing zero bits.
Definition APInt.h:1639
bool isNonPositive() const
Determine if this APInt Value is non-positive (<= 0).
Definition APInt.h:361
LLVM_ABI APInt sextOrTrunc(unsigned width) const
Sign extend or truncate to width.
Definition APInt.cpp:1041
bool isStrictlyPositive() const
Determine if this APInt Value is positive.
Definition APInt.h:356
uint64_t getLimitedValue(uint64_t Limit=UINT64_MAX) const
If this value is smaller than the specified limit, return it, otherwise return the limit value.
Definition APInt.h:475
bool getBoolValue() const
Convert APInt to a boolean value.
Definition APInt.h:471
LLVM_ABI APInt srem(const APInt &RHS) const
Function for signed remainder operation.
Definition APInt.cpp:1736
bool isMask(unsigned numBits) const
Definition APInt.h:488
bool isMaxSignedValue() const
Determine if this is the largest signed value.
Definition APInt.h:405
bool isNonNegative() const
Determine if this APInt Value is non-negative (>= 0)
Definition APInt.h:334
bool ule(const APInt &RHS) const
Unsigned less or equal comparison.
Definition APInt.h:1150
bool isSubsetOf(const APInt &RHS) const
This operation checks that all bits set in this APInt are also set in RHS.
Definition APInt.h:1257
bool isPowerOf2() const
Check if this APInt's value is a power of two greater than zero.
Definition APInt.h:440
static APInt getLowBitsSet(unsigned numBits, unsigned loBitsSet)
Constructs an APInt value that has the bottom loBitsSet bits set.
Definition APInt.h:306
bool isSignBitSet() const
Determine if sign bit of this APInt is set.
Definition APInt.h:341
bool slt(const APInt &RHS) const
Signed less than comparison.
Definition APInt.h:1130
static APInt getHighBitsSet(unsigned numBits, unsigned hiBitsSet)
Constructs an APInt value that has the top hiBitsSet bits set.
Definition APInt.h:296
static APInt getZero(unsigned numBits)
Get the '0' value for the specified bit-width.
Definition APInt.h:200
bool isOne() const
Determine if this is a value of 1.
Definition APInt.h:389
static APInt getOneBitSet(unsigned numBits, unsigned BitNo)
Return an APInt with exactly one bit set in the result.
Definition APInt.h:239
bool uge(const APInt &RHS) const
Unsigned greater or equal comparison.
Definition APInt.h:1221
an instruction to allocate memory on the stack
A container for analyses that lazily runs them and caches their results.
This class represents an incoming formal argument to a Function.
Definition Argument.h:32
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition ArrayRef.h:41
const T & back() const
back - Get the last element.
Definition ArrayRef.h:156
size_t size() const
size - Get the array size.
Definition ArrayRef.h:147
ArrayRef< T > drop_back(size_t N=1) const
Drop the last N elements of the array.
Definition ArrayRef.h:206
bool empty() const
empty - Check if the array is empty.
Definition ArrayRef.h:142
ArrayRef< T > slice(size_t N, size_t M) const
slice(n, m) - Chop off the first N elements of the array, and keep M elements in the array.
Definition ArrayRef.h:191
An immutable pass that tracks lazily created AssumptionCache objects.
AssumptionCache & getAssumptionCache(Function &F)
Get the cached assumptions for a function.
A cache of @llvm.assume calls within a function.
MutableArrayRef< ResultElem > assumptionsFor(const Value *V)
Access the list of assumptions which affect this value.
Functions, function parameters, and return types can have attributes to indicate how they should be t...
Definition Attributes.h:69
LLVM_ABI std::optional< unsigned > getVScaleRangeMax() const
Returns the maximum value for the vscale_range attribute or std::nullopt when unknown.
bool isValid() const
Return true if the attribute is any kind of attribute.
Definition Attributes.h:223
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition BasicBlock.h:233
BinaryOps getOpcode() const
Definition InstrTypes.h:374
Base class for all callable instructions (InvokeInst and CallInst) Holds everything related to callin...
This class represents a function call, abstracting a target machine's calling convention.
static LLVM_ABI unsigned isEliminableCastPair(Instruction::CastOps firstOpcode, Instruction::CastOps secondOpcode, Type *SrcTy, Type *MidTy, Type *DstTy, const DataLayout *DL)
Determine how a pair of casts can be eliminated, if they can be at all.
This class is the base class for the comparison instructions.
Definition InstrTypes.h:664
static Type * makeCmpResultType(Type *opnd_type)
Create a result type for fcmp/icmp.
Definition InstrTypes.h:982
Predicate getStrictPredicate() const
For example, SGE -> SGT, SLE -> SLT, ULE -> ULT, UGE -> UGT.
Definition InstrTypes.h:858
bool isFalseWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:948
Predicate
This enumeration lists the possible predicates for CmpInst subclasses.
Definition InstrTypes.h:676
@ FCMP_OEQ
0 0 0 1 True if ordered and equal
Definition InstrTypes.h:679
@ FCMP_TRUE
1 1 1 1 Always true (always folded)
Definition InstrTypes.h:693
@ ICMP_SLT
signed less than
Definition InstrTypes.h:705
@ ICMP_SLE
signed less or equal
Definition InstrTypes.h:706
@ FCMP_OLT
0 1 0 0 True if ordered and less than
Definition InstrTypes.h:682
@ FCMP_ULE
1 1 0 1 True if unordered, less than, or equal
Definition InstrTypes.h:691
@ FCMP_OGT
0 0 1 0 True if ordered and greater than
Definition InstrTypes.h:680
@ FCMP_OGE
0 0 1 1 True if ordered and greater than or equal
Definition InstrTypes.h:681
@ ICMP_UGE
unsigned greater or equal
Definition InstrTypes.h:700
@ ICMP_UGT
unsigned greater than
Definition InstrTypes.h:699
@ ICMP_SGT
signed greater than
Definition InstrTypes.h:703
@ FCMP_ULT
1 1 0 0 True if unordered or less than
Definition InstrTypes.h:690
@ FCMP_ONE
0 1 1 0 True if ordered and operands are unequal
Definition InstrTypes.h:684
@ FCMP_UEQ
1 0 0 1 True if unordered or equal
Definition InstrTypes.h:687
@ ICMP_ULT
unsigned less than
Definition InstrTypes.h:701
@ FCMP_UGT
1 0 1 0 True if unordered or greater than
Definition InstrTypes.h:688
@ FCMP_OLE
0 1 0 1 True if ordered and less than or equal
Definition InstrTypes.h:683
@ FCMP_ORD
0 1 1 1 True if ordered (no nans)
Definition InstrTypes.h:685
@ ICMP_NE
not equal
Definition InstrTypes.h:698
@ ICMP_SGE
signed greater or equal
Definition InstrTypes.h:704
@ FCMP_UNE
1 1 1 0 True if unordered or not equal
Definition InstrTypes.h:692
@ ICMP_ULE
unsigned less or equal
Definition InstrTypes.h:702
@ FCMP_UGE
1 0 1 1 True if unordered, greater than, or equal
Definition InstrTypes.h:689
@ FCMP_FALSE
0 0 0 0 Always false (always folded)
Definition InstrTypes.h:678
@ FCMP_UNO
1 0 0 0 True if unordered: isnan(X) | isnan(Y)
Definition InstrTypes.h:686
bool isSigned() const
Definition InstrTypes.h:930
Predicate getSwappedPredicate() const
For example, EQ->EQ, SLE->SGE, ULT->UGT, OEQ->OEQ, ULE->UGE, OLT->OGT, etc.
Definition InstrTypes.h:827
bool isTrueWhenEqual() const
This is just a convenience.
Definition InstrTypes.h:942
static bool isFPPredicate(Predicate P)
Definition InstrTypes.h:770
Predicate getNonStrictPredicate() const
For example, SGT -> SGE, SLT -> SLE, ULT -> ULE, UGT -> UGE.
Definition InstrTypes.h:871
Predicate getInversePredicate() const
For example, EQ -> NE, UGT -> ULE, SLT -> SGE, OEQ -> UNE, UGT -> OLE, OLT -> UGE,...
Definition InstrTypes.h:789
Predicate getPredicate() const
Return the predicate for this instruction.
Definition InstrTypes.h:765
static LLVM_ABI bool isUnordered(Predicate predicate)
Determine if the predicate is an unordered operation.
static bool isIntPredicate(Predicate P)
Definition InstrTypes.h:776
static LLVM_ABI bool isOrdered(Predicate predicate)
Determine if the predicate is an ordered operation.
bool isUnsigned() const
Definition InstrTypes.h:936
An abstraction over a floating-point predicate, and a pack of an integer predicate with samesign info...
static LLVM_ABI Constant * getIntToPtr(Constant *C, Type *Ty, bool OnlyIfReduced=false)
static LLVM_ABI Constant * getExtractElement(Constant *Vec, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getBinOpAbsorber(unsigned Opcode, Type *Ty, bool AllowLHSConstant=false)
Return the absorbing element for the given binary operation, i.e.
static LLVM_ABI Constant * getNot(Constant *C)
static LLVM_ABI Constant * getInsertElement(Constant *Vec, Constant *Elt, Constant *Idx, Type *OnlyIfReducedTy=nullptr)
static LLVM_ABI Constant * getShuffleVector(Constant *V1, Constant *V2, ArrayRef< int > Mask, Type *OnlyIfReducedTy=nullptr)
static bool isSupportedGetElementPtr(const Type *SrcElemTy)
Whether creating a constant expression for this getelementptr type is supported.
Definition Constants.h:1387
static Constant * getGetElementPtr(Type *Ty, Constant *C, ArrayRef< Constant * > IdxList, GEPNoWrapFlags NW=GEPNoWrapFlags::none(), std::optional< ConstantRange > InRange=std::nullopt, Type *OnlyIfReducedTy=nullptr)
Getelementptr form.
Definition Constants.h:1274
static LLVM_ABI Constant * getBinOpIdentity(unsigned Opcode, Type *Ty, bool AllowRHSConstant=false, bool NSZ=false)
Return the identity constant for a binary opcode.
static LLVM_ABI std::optional< ConstantFPRange > makeExactFCmpRegion(FCmpInst::Predicate Pred, const APFloat &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
ConstantFP - Floating Point Values [float, double].
Definition Constants.h:277
const APFloat & getValueAPF() const
Definition Constants.h:320
static LLVM_ABI Constant * getZero(Type *Ty, bool Negative=false)
static Constant * getNegativeZero(Type *Ty)
Definition Constants.h:315
static LLVM_ABI Constant * getNaN(Type *Ty, bool Negative=false, uint64_t Payload=0)
This is the shared class of boolean and integer constants.
Definition Constants.h:87
static LLVM_ABI ConstantInt * getTrue(LLVMContext &Context)
static ConstantInt * getSigned(IntegerType *Ty, int64_t V)
Return a ConstantInt with the specified value for the specified type.
Definition Constants.h:131
static LLVM_ABI ConstantInt * getFalse(LLVMContext &Context)
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition Constants.h:163
static LLVM_ABI ConstantInt * getBool(LLVMContext &Context, bool V)
static LLVM_ABI ConstantPointerNull * get(PointerType *T)
Static factory methods - Return objects of the specified value.
This class represents a range of values.
const APInt * getSingleElement() const
If this set contains a single element, return it, otherwise return null.
LLVM_ABI bool isFullSet() const
Return true if this set contains all of the elements possible for this data-type.
LLVM_ABI bool isEmptySet() const
Return true if this set contains no members.
static LLVM_ABI ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred, const APInt &Other)
Produce the exact range such that all values in the returned range satisfy the given predicate with a...
LLVM_ABI ConstantRange inverse() const
Return a new range that is the logical not of the current set.
LLVM_ABI bool contains(const APInt &Val) const
Return true if the specified value is in the set.
static LLVM_ABI Constant * get(StructType *T, ArrayRef< Constant * > V)
static LLVM_ABI Constant * getSplat(ElementCount EC, Constant *Elt)
Return a ConstantVector with the specified constant in each element.
static LLVM_ABI Constant * get(ArrayRef< Constant * > V)
This is an important base class in LLVM.
Definition Constant.h:43
static LLVM_ABI Constant * getAllOnesValue(Type *Ty)
LLVM_ABI bool isAllOnesValue() const
Return true if this is the value that would be returned by getAllOnesValue.
static LLVM_ABI Constant * getNullValue(Type *Ty)
Constructor to create a '0' constant of arbitrary type.
LLVM_ABI bool isNaN() const
Return true if this is a floating-point NaN constant or a vector floating-point constant with all NaN...
LLVM_ABI Constant * getAggregateElement(unsigned Elt) const
For aggregates (struct/array/vector) return the constant that corresponds to the specified element if...
LLVM_ABI bool isNullValue() const
Return true if this is the value that would be returned by getNullValue.
Definition Constants.cpp:90
A parsed version of the target data layout string in and methods for querying it.
Definition DataLayout.h:63
unsigned getPointerSizeInBits(unsigned AS=0) const
The size in bits of the pointer representation in a given address space.
Definition DataLayout.h:479
LLVM_ABI unsigned getIndexTypeSizeInBits(Type *Ty) const
The size in bits of the index used in GEP calculation for this type.
LLVM_ABI IntegerType * getIndexType(LLVMContext &C, unsigned AddressSpace) const
Returns the type of a GEP index in AddressSpace.
TypeSize getTypeAllocSize(Type *Ty) const
Returns the offset in bytes between successive objects of the specified type, including alignment pad...
unsigned getIndexSizeInBits(unsigned AS) const
The size in bits of indices used for address calculation in getelementptr and for addresses in the gi...
Definition DataLayout.h:487
TypeSize getTypeSizeInBits(Type *Ty) const
Size examples:
Definition DataLayout.h:760
Legacy analysis pass which computes a DominatorTree.
Definition Dominators.h:322
DominatorTree & getDomTree()
Definition Dominators.h:330
Concrete subclass of DominatorTreeBase that is used to compute a normal dominator tree.
Definition Dominators.h:165
LLVM_ABI bool dominates(const BasicBlock *BB, const Use &U) const
Return true if the (end of the) basic block BB dominates the use U.
This instruction extracts a struct member or array element value from an aggregate value.
This instruction compares its operands according to the predicate given to the constructor.
Convenience struct for specifying and reasoning about fast-math flags.
Definition FMF.h:22
bool noSignedZeros() const
Definition FMF.h:67
bool noInfs() const
Definition FMF.h:66
bool allowReassoc() const
Flag queries.
Definition FMF.h:64
bool noNaNs() const
Definition FMF.h:65
Represents calls to the gc.relocate intrinsic.
LLVM_ABI Value * getBasePtr() const
LLVM_ABI Value * getDerivedPtr() const
Represents flags for the getelementptr instruction/expression.
static LLVM_ABI Type * getIndexedType(Type *Ty, ArrayRef< Value * > IdxList)
Returns the result type of a getelementptr with the given source element type and indexes.
This instruction compares its operands according to the predicate given to the constructor.
static LLVM_ABI bool compare(const APInt &LHS, const APInt &RHS, ICmpInst::Predicate Pred)
Return result of LHS Pred RHS comparison.
Predicate getSignedPredicate() const
For example, EQ->EQ, SLE->SLE, UGT->SGT, etc.
bool isEquality() const
Return true if this predicate is either EQ or NE.
static bool isEquality(Predicate P)
Return true if this predicate is either EQ or NE.
bool isRelational() const
Return true if the predicate is relational (not EQ or NE).
Predicate getUnsignedPredicate() const
For example, EQ->EQ, SLE->ULE, UGT->UGT, etc.
This instruction inserts a struct field of array element value into an aggregate value.
LLVM_ABI bool hasNoSignedZeros() const LLVM_READONLY
Determine whether the no-signed-zeros flag is set.
static bool isBitwiseLogicOp(unsigned Opcode)
Determine if the Opcode is and/or/xor.
LLVM_ABI bool isAssociative() const LLVM_READONLY
Return true if the instruction is associative:
LLVM_ABI bool isCommutative() const LLVM_READONLY
Return true if the instruction is commutative:
LLVM_ABI const Function * getFunction() const
Return the function this instruction belongs to.
An instruction for reading from memory.
bool isVolatile() const
Return true if this is a load from a volatile memory location.
Metadata node.
Definition Metadata.h:1078
static APInt getSaturationPoint(Intrinsic::ID ID, unsigned numBits)
Min/max intrinsics are monotonic, they operate on a fixed-bitwidth values, so there is a certain thre...
static ICmpInst::Predicate getPredicate(Intrinsic::ID ID)
Returns the comparison predicate underlying the intrinsic.
op_range incoming_values()
BasicBlock * getIncomingBlock(unsigned i) const
Return incoming basic block number i.
Value * getIncomingValue(unsigned i) const
Return incoming value number x.
unsigned getNumIncomingValues() const
Return the number of incoming edges.
Pass interface - Implemented by all 'passes'.
Definition Pass.h:99
static LLVM_ABI PoisonValue * get(Type *T)
Static factory methods - Return an 'poison' object of the specified type.
This class represents a cast from a pointer to an integer.
This class represents a sign extension of integer types.
This class represents the LLVM 'select' instruction.
size_type size() const
Determine the number of elements in the SetVector.
Definition SetVector.h:102
bool insert(const value_type &X)
Insert a new element into the SetVector.
Definition SetVector.h:150
static void commuteShuffleMask(MutableArrayRef< int > Mask, unsigned InVecNumElts)
Change values in a shuffle permute mask assuming the two vector operands of length InVecNumElts have ...
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
bool contains(ConstPtrType Ptr) const
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements.
A SetVector that performs no allocations if smaller than a certain size.
Definition SetVector.h:338
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
void assign(size_type NumElts, ValueParamT Elt)
void reserve(size_type N)
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
TargetLibraryInfo & getTLI(const Function &F)
Provides information about what library functions are available for the current target.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition Type.h:45
bool isVectorTy() const
True if this is an instance of VectorType.
Definition Type.h:273
static LLVM_ABI IntegerType * getInt32Ty(LLVMContext &C)
Definition Type.cpp:297
bool isIntOrIntVectorTy() const
Return true if this is an integer type or a vector of integer types.
Definition Type.h:246
LLVM_ABI TypeSize getPrimitiveSizeInBits() const LLVM_READONLY
Return the basic size of this type if it is a primitive type.
Definition Type.cpp:198
LLVMContext & getContext() const
Return the LLVMContext in which this type was uniqued.
Definition Type.h:128
LLVM_ABI unsigned getScalarSizeInBits() const LLVM_READONLY
If this is a vector type, return the getPrimitiveSizeInBits value for the element type.
Definition Type.cpp:231
static LLVM_ABI UndefValue * get(Type *T)
Static factory methods - Return an 'undef' object of the specified type.
A Use represents the edge between a Value definition and its users.
Definition Use.h:35
Value * getOperand(unsigned i) const
Definition User.h:232
LLVM Value Representation.
Definition Value.h:75
Type * getType() const
All values are typed, get the type of this value.
Definition Value.h:256
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
Base class of all SIMD vector types.
static LLVM_ABI VectorType * get(Type *ElementType, ElementCount EC)
This static method is the primary way to construct an VectorType.
This class represents zero extension of integer types.
constexpr ScalarTy getFixedValue() const
Definition TypeSize.h:201
constexpr bool isScalable() const
Returns whether the quantity is scaled by a runtime quantity (vscale).
Definition TypeSize.h:169
constexpr bool isFixed() const
Returns true if the quantity is not scaled by vscale.
Definition TypeSize.h:172
constexpr ScalarTy getKnownMinValue() const
Returns the minimum value this quantity can represent.
Definition TypeSize.h:166
CallInst * Call
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
unsigned ID
LLVM IR allows to use arbitrary numbers as calling convention identifiers.
Definition CallingConv.h:24
@ C
The default llvm calling convention, compatible with C.
Definition CallingConv.h:34
SpecificConstantMatch m_ZeroInt()
Convenience matchers for specific integer values.
BinaryOp_match< SpecificConstantMatch, SrcTy, TargetOpcode::G_SUB > m_Neg(const SrcTy &&Src)
Matches a register negated by a G_SUB.
BinaryOp_match< SrcTy, SpecificConstantMatch, TargetOpcode::G_XOR, true > m_Not(const SrcTy &&Src)
Matches a register not-ed by a G_XOR.
cst_pred_ty< is_all_ones > m_AllOnes()
Match an integer or vector with all bits set.
cst_pred_ty< is_lowbit_mask > m_LowBitMask()
Match an integer or vector with only the low bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::And > m_And(const LHS &L, const RHS &R)
PtrAdd_match< PointerOpTy, OffsetOpTy > m_PtrAdd(const PointerOpTy &PointerOp, const OffsetOpTy &OffsetOp)
Matches GEP with i8 source element type.
cst_pred_ty< is_negative > m_Negative()
Match an integer or vector of negative values.
BinaryOp_match< LHS, RHS, Instruction::Add > m_Add(const LHS &L, const RHS &R)
class_match< BinaryOperator > m_BinOp()
Match an arbitrary binary operation and ignore it.
CmpClass_match< LHS, RHS, FCmpInst > m_FCmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::FMul, true > m_c_FMul(const LHS &L, const RHS &R)
Matches FMul with LHS and RHS in either order.
cst_pred_ty< is_sign_mask > m_SignMask()
Match an integer or vector with only the sign bit(s) set.
BinaryOp_match< LHS, RHS, Instruction::AShr > m_AShr(const LHS &L, const RHS &R)
auto m_PtrToIntOrAddr(const OpTy &Op)
Matches PtrToInt or PtrToAddr.
cstfp_pred_ty< is_inf > m_Inf()
Match a positive or negative infinity FP constant.
m_Intrinsic_Ty< Opnd0 >::Ty m_BitReverse(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::FSub > m_FSub(const LHS &L, const RHS &R)
cst_pred_ty< is_power2 > m_Power2()
Match an integer or vector power-of-2.
BinaryOp_match< cstfp_pred_ty< is_any_zero_fp >, RHS, Instruction::FSub > m_FNegNSZ(const RHS &X)
Match 'fneg X' as 'fsub +-0.0, X'.
BinaryOp_match< LHS, RHS, Instruction::URem > m_URem(const LHS &L, const RHS &R)
class_match< Constant > m_Constant()
Match an arbitrary Constant and ignore it.
ap_match< APInt > m_APInt(const APInt *&Res)
Match a ConstantInt or splatted ConstantVector, binding the specified pointer to the contained APInt.
BinaryOp_match< LHS, RHS, Instruction::And, true > m_c_And(const LHS &L, const RHS &R)
Matches an And with LHS and RHS in either order.
CastInst_match< OpTy, TruncInst > m_Trunc(const OpTy &Op)
Matches Trunc.
BinaryOp_match< LHS, RHS, Instruction::Xor > m_Xor(const LHS &L, const RHS &R)
ap_match< APInt > m_APIntAllowPoison(const APInt *&Res)
Match APInt while allowing poison in splat vector constants.
specific_intval< false > m_SpecificInt(const APInt &V)
Match a specific integer value or vector with all elements equal to the value.
bool match(Val *V, const Pattern &P)
BinOpPred_match< LHS, RHS, is_idiv_op > m_IDiv(const LHS &L, const RHS &R)
Matches integer division operations.
cstfp_pred_ty< is_any_zero_fp > m_AnyZeroFP()
Match a floating-point negative zero or positive zero.
specificval_ty m_Specific(const Value *V)
Match if we have a specific specified value.
BinOpPred_match< LHS, RHS, is_right_shift_op > m_Shr(const LHS &L, const RHS &R)
Matches logical shift operations.
ap_match< APFloat > m_APFloat(const APFloat *&Res)
Match a ConstantFP or splatted ConstantVector, binding the specified pointer to the contained APFloat...
ap_match< APFloat > m_APFloatAllowPoison(const APFloat *&Res)
Match APFloat while allowing poison in splat vector constants.
CmpClass_match< LHS, RHS, ICmpInst, true > m_c_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Matches an ICmp with a predicate over LHS and RHS in either order.
TwoOps_match< Val_t, Idx_t, Instruction::ExtractElement > m_ExtractElt(const Val_t &Val, const Idx_t &Idx)
Matches ExtractElementInst.
class_match< ConstantInt > m_ConstantInt()
Match an arbitrary ConstantInt and ignore it.
cst_pred_ty< is_one > m_One()
Match an integer 1 or a vector with all elements equal to 1.
IntrinsicID_match m_Intrinsic()
Match intrinsic calls like this: m_Intrinsic<Intrinsic::fabs>(m_Value(X))
ThreeOps_match< Cond, LHS, RHS, Instruction::Select > m_Select(const Cond &C, const LHS &L, const RHS &R)
Matches SelectInst.
cstfp_pred_ty< is_neg_zero_fp > m_NegZeroFP()
Match a floating-point negative zero.
specific_fpval m_SpecificFP(double V)
Match a specific floating point value or vector with all elements equal to the value.
match_combine_and< LTy, RTy > m_CombineAnd(const LTy &L, const RTy &R)
Combine two pattern matchers matching L && R.
MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > m_SMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_Sqrt(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Xor, true > m_c_Xor(const LHS &L, const RHS &R)
Matches an Xor with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Mul > m_Mul(const LHS &L, const RHS &R)
deferredval_ty< Value > m_Deferred(Value *const &V)
Like m_Specific(), but works if the specific value to match is determined as part of the same match()...
cst_pred_ty< is_zero_int > m_ZeroInt()
Match an integer 0 or a vector with all elements equal to 0.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoSignedWrap > m_NSWShl(const LHS &L, const RHS &R)
CastInst_match< OpTy, ZExtInst > m_ZExt(const OpTy &Op)
Matches ZExt.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Shl, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWShl(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::UDiv > m_UDiv(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty > m_UMax(const LHS &L, const RHS &R)
match_immconstant_ty m_ImmConstant()
Match an arbitrary immediate Constant and ignore it.
cst_pred_ty< custom_checkfn< APInt > > m_CheckedInt(function_ref< bool(const APInt &)> CheckFn)
Match an integer or vector where CheckFn(ele) for each element is true.
specific_fpval m_FPOne()
Match a float 1.0 or vector with all elements equal to 1.0.
BinaryOp_match< LHS, RHS, Instruction::Add, true > m_c_Add(const LHS &L, const RHS &R)
Matches a Add with LHS and RHS in either order.
CastInst_match< OpTy, UIToFPInst > m_UIToFP(const OpTy &Op)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShl(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty, true > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty, true >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty, true > > > m_c_MaxOrMin(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::SDiv > m_SDiv(const LHS &L, const RHS &R)
OverflowingBinaryOp_match< LHS, RHS, Instruction::Sub, OverflowingBinaryOperator::NoUnsignedWrap > m_NUWSub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty > m_SMax(const LHS &L, const RHS &R)
class_match< Value > m_Value()
Match an arbitrary value and ignore it.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Add, OverflowingBinaryOperator::NoSignedWrap > m_NSWAdd(const LHS &L, const RHS &R)
CastInst_match< OpTy, SIToFPInst > m_SIToFP(const OpTy &Op)
BinaryOp_match< LHS, RHS, Instruction::LShr > m_LShr(const LHS &L, const RHS &R)
CmpClass_match< LHS, RHS, ICmpInst > m_ICmp(CmpPredicate &Pred, const LHS &L, const RHS &R)
Exact_match< T > m_Exact(const T &SubPattern)
FNeg_match< OpTy > m_FNeg(const OpTy &X)
Match 'fneg X' as 'fsub -0.0, X'.
cstfp_pred_ty< is_pos_zero_fp > m_PosZeroFP()
Match a floating-point positive zero.
BinaryOp_match< LHS, RHS, Instruction::FAdd, true > m_c_FAdd(const LHS &L, const RHS &R)
Matches FAdd with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::And, true > m_c_LogicalAnd(const LHS &L, const RHS &R)
Matches L && R with LHS and RHS in either order.
BinaryOp_match< LHS, RHS, Instruction::Shl > m_Shl(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_VecReverse(const Opnd0 &Op0)
match_combine_or< match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, smax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, smin_pred_ty > >, match_combine_or< MaxMin_match< ICmpInst, LHS, RHS, umax_pred_ty >, MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > > > m_MaxOrMin(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0, Opnd1, Opnd2 >::Ty m_FShr(const Opnd0 &Op0, const Opnd1 &Op1, const Opnd2 &Op2)
BinaryOp_match< LHS, RHS, Instruction::SRem > m_SRem(const LHS &L, const RHS &R)
auto m_Undef()
Match an arbitrary undef constant.
cstfp_pred_ty< is_nan > m_NaN()
Match an arbitrary NaN constant.
BinaryOp_match< LHS, RHS, Instruction::Or > m_Or(const LHS &L, const RHS &R)
m_Intrinsic_Ty< Opnd0 >::Ty m_BSwap(const Opnd0 &Op0)
CastInst_match< OpTy, SExtInst > m_SExt(const OpTy &Op)
Matches SExt.
is_zero m_Zero()
Match any null constant or a vector with all elements equal to 0.
BinaryOp_match< LHS, RHS, Instruction::Or, true > m_c_Or(const LHS &L, const RHS &R)
Matches an Or with LHS and RHS in either order.
LogicalOp_match< LHS, RHS, Instruction::Or, true > m_c_LogicalOr(const LHS &L, const RHS &R)
Matches L || R with LHS and RHS in either order.
ThreeOps_match< Val_t, Elt_t, Idx_t, Instruction::InsertElement > m_InsertElt(const Val_t &Val, const Elt_t &Elt, const Idx_t &Idx)
Matches InsertElementInst.
ElementWiseBitCast_match< OpTy > m_ElementWiseBitCast(const OpTy &Op)
m_Intrinsic_Ty< Opnd0 >::Ty m_FAbs(const Opnd0 &Op0)
BinaryOp_match< LHS, RHS, Instruction::Mul, true > m_c_Mul(const LHS &L, const RHS &R)
Matches a Mul with LHS and RHS in either order.
CastOperator_match< OpTy, Instruction::PtrToInt > m_PtrToInt(const OpTy &Op)
Matches PtrToInt.
MatchFunctor< Val, Pattern > match_fn(const Pattern &P)
A match functor that can be used as a UnaryPredicate in functional algorithms like all_of.
OverflowingBinaryOp_match< LHS, RHS, Instruction::Mul, OverflowingBinaryOperator::NoSignedWrap > m_NSWMul(const LHS &L, const RHS &R)
BinaryOp_match< LHS, RHS, Instruction::Sub > m_Sub(const LHS &L, const RHS &R)
MaxMin_match< ICmpInst, LHS, RHS, umin_pred_ty > m_UMin(const LHS &L, const RHS &R)
match_combine_or< LTy, RTy > m_CombineOr(const LTy &L, const RTy &R)
Combine two pattern matchers matching L || R.
ExceptionBehavior
Exception behavior used for floating point operations.
Definition FPEnv.h:39
@ ebStrict
This corresponds to "fpexcept.strict".
Definition FPEnv.h:42
@ ebIgnore
This corresponds to "fpexcept.ignore".
Definition FPEnv.h:40
This is an optimization pass for GlobalISel generic memory operations.
LLVM_ABI Intrinsic::ID getInverseMinMaxIntrinsic(Intrinsic::ID MinMaxID)
LLVM_ABI Value * simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a AShr, fold the result or return nulll.
unsigned Log2_32_Ceil(uint32_t Value)
Return the ceil log base 2 of the specified value, 32 if the value is zero.
Definition MathExtras.h:355
@ Offset
Definition DWP.cpp:477
LLVM_ABI KnownFPClass computeKnownFPClass(const Value *V, const APInt &DemandedElts, FPClassTest InterestedClasses, const SimplifyQuery &SQ, unsigned Depth=0)
Determine which floating-point classes are valid for V, and return them in KnownFPClass bit sets.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1727
LLVM_ABI Value * simplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FMul, fold the result or return null.
LLVM_ABI Value * simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef< Value * > Indices, GEPNoWrapFlags NW, const SimplifyQuery &Q)
Given operands for a GetElementPtrInst, fold the result or return null.
LLVM_ABI bool isValidAssumeForContext(const Instruction *I, const Instruction *CxtI, const DominatorTree *DT=nullptr, bool AllowEphemerals=false)
Return true if it is valid to use the assumptions provided by an assume intrinsic,...
LLVM_ABI bool canCreatePoison(const Operator *Op, bool ConsiderFlagsAndMetadata=true)
LLVM_ABI Constant * ConstantFoldSelectInstruction(Constant *Cond, Constant *V1, Constant *V2)
Attempt to constant fold a select instruction with the specified operands.
LLVM_ABI Value * simplifyFreezeInst(Value *Op, const SimplifyQuery &Q)
Given an operand for a Freeze, see if we can fold the result.
LLVM_ABI Constant * ConstantFoldFPInstOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL, const Instruction *I, bool AllowNonDeterministic=true)
Attempt to constant fold a floating point binary operation with the specified operands,...
LLVM_ABI bool isSignBitCheck(ICmpInst::Predicate Pred, const APInt &RHS, bool &TrueIfSigned)
Given an exploded icmp instruction, return true if the comparison only checks the sign bit.
LLVM_ABI bool canConstantFoldCallTo(const CallBase *Call, const Function *F)
canConstantFoldCallTo - Return true if its even possible to fold a call to the specified function.
LLVM_ABI APInt getMinMaxLimit(SelectPatternFlavor SPF, unsigned BitWidth)
Return the minimum or maximum constant value for the specified integer min/max flavor and type.
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:644
LLVM_ABI Value * simplifySDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for an SDiv, fold the result or return null.
FunctionAddr VTableAddr uintptr_t uintptr_t Int32Ty
Definition InstrProf.h:296
LLVM_ABI Value * simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q)
Given operand for a UnaryOperator, fold the result or return null.
bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM)
Returns true if the exception handling behavior and rounding mode match what is used in the default f...
Definition FPEnv.h:68
LLVM_ABI Value * simplifyMulInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Mul, fold the result or return null.
LLVM_ABI bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, APInt &Offset, const DataLayout &DL, DSOLocalEquivalent **DSOEquiv=nullptr)
If this constant is a constant offset from a global, return the global and the constant.
LLVM_ABI Value * simplifyInstructionWithOperands(Instruction *I, ArrayRef< Value * > NewOps, const SimplifyQuery &Q)
Like simplifyInstruction but the operands of I are replaced with NewOps.
LLVM_ABI Value * simplifyCall(CallBase *Call, Value *Callee, ArrayRef< Value * > Args, const SimplifyQuery &Q)
Given a callsite, callee, and arguments, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCompareInstOperands(unsigned Predicate, Constant *LHS, Constant *RHS, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, const Instruction *I=nullptr)
Attempt to constant fold a compare instruction (icmp/fcmp) with the specified operands.
bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM)
Returns true if the rounding mode RM may be QRM at compile time or at run time.
Definition FPEnv.h:80
LLVM_ABI bool isNoAliasCall(const Value *V)
Return true if this pointer is returned by a noalias function.
LLVM_ABI Value * simplifyFCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q)
Given operands for an FCmpInst, fold the result or return null.
LLVM_ABI Value * getSplatValue(const Value *V)
Get splat value if the input is a splat vector or return nullptr.
LLVM_ABI Constant * ConstantFoldGetElementPtr(Type *Ty, Constant *C, std::optional< ConstantRange > InRange, ArrayRef< Value * > Idxs)
LLVM_ABI CmpInst::Predicate getMinMaxPred(SelectPatternFlavor SPF, bool Ordered=false)
Return the canonical comparison predicate for the specified minimum/maximum flavor.
LLVM_ABI Value * simplifyShuffleVectorInst(Value *Op0, Value *Op1, ArrayRef< int > Mask, Type *RetTy, const SimplifyQuery &Q)
Given operands for a ShuffleVectorInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldCall(const CallBase *Call, Function *F, ArrayRef< Constant * > Operands, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldCall - Attempt to constant fold a call to the specified function with the specified argum...
LLVM_ABI Value * simplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Or, fold the result or return null.
LLVM_ABI Value * simplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an Xor, fold the result or return null.
LLVM_ABI ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD)
Parse out a conservative ConstantRange from !range metadata.
LLVM_ABI ConstantRange computeConstantRange(const Value *V, bool ForSigned, bool UseInstrInfo=true, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Determine the possible constant range of an integer or vector of integer value.
LLVM_ABI Constant * ConstantFoldExtractValueInstruction(Constant *Agg, ArrayRef< unsigned > Idxs)
Attempt to constant fold an extractvalue instruction with the specified operands and indices.
LLVM_ABI bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI)
Tests if a value is a call or invoke to a library function that allocates memory (either malloc,...
LLVM_ABI bool MaskedValueIsZero(const Value *V, const APInt &Mask, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if 'V & Mask' is known to be zero.
LLVM_ABI Value * simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty, const SimplifyQuery &Q)
Given operands for a CastInst, fold the result or return null.
LLVM_ABI Value * simplifyInstruction(Instruction *I, const SimplifyQuery &Q)
See if we can compute a simplified version of this instruction.
unsigned M1(unsigned Val)
Definition VE.h:377
LLVM_ABI Value * simplifySubInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Sub, fold the result or return null.
LLVM_ABI Value * simplifyAddInst(Value *LHS, Value *RHS, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for an Add, fold the result or return null.
LLVM_ABI Constant * ConstantFoldConstant(const Constant *C, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr)
ConstantFoldConstant - Fold the constant using the specified DataLayout.
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:754
OutputIt transform(R &&Range, OutputIt d_first, UnaryFunction F)
Wrapper function around std::transform to apply a function to a range and store the result elsewhere.
Definition STLExtras.h:1970
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1734
LLVM_ABI bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout &DL, const TargetLibraryInfo *TLI, ObjectSizeOpts Opts={})
Compute the size of the object pointed by Ptr.
LLVM_ABI bool isSplatValue(const Value *V, int Index=-1, unsigned Depth=0)
Return true if each element of the vector value V is poisoned or equal to every other non-poisoned el...
LLVM_ABI Constant * ConstantFoldLoadFromUniformValue(Constant *C, Type *Ty, const DataLayout &DL)
If C is a uniform value where all bits are the same (either all zero, all ones, all undef or all pois...
LLVM_ABI SelectPatternFlavor getInverseMinMaxFlavor(SelectPatternFlavor SPF)
Return the inverse minimum/maximum flavor of the specified flavor.
LLVM_ABI bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI=nullptr, const DominatorTree *DT=nullptr, AssumptionCache *AC=nullptr, SmallSetVector< Instruction *, 8 > *UnsimplifiedUsers=nullptr)
Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
LLVM_ABI Constant * ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op, const DataLayout &DL)
Attempt to constant fold a unary operation with the specified operand.
SelectPatternFlavor
Specific patterns of select instructions we can match.
LLVM_ABI Value * simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW, const SimplifyQuery &Q)
Given operands for a Shl, fold the result or return null.
LLVM_ABI Value * simplifyFNegInst(Value *Op, FastMathFlags FMF, const SimplifyQuery &Q)
Given operand for an FNeg, fold the result or return null.
LLVM_ABI Value * simplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FSub, fold the result or return null.
LLVM_ABI bool canReplacePointersIfEqual(const Value *From, const Value *To, const DataLayout &DL)
Returns true if a pointer value From can be replaced with another pointer value \To if they are deeme...
Definition Loads.cpp:859
LLVM_ABI bool impliesPoison(const Value *ValAssumedPoison, const Value *V)
Return true if V is poison given that ValAssumedPoison is already poison.
LLVM_ABI Value * simplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FRem, fold the result or return null.
LLVM_ABI Value * simplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FAdd, fold the result or return null.
FPClassTest
Floating-point class tests, supported by 'is_fpclass' intrinsic.
LLVM_ABI void computeKnownBits(const Value *V, KnownBits &Known, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Determine which bits of V are known to be either zero or one and return them in the KnownZero/KnownOn...
LLVM_ABI Value * simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact, const SimplifyQuery &Q)
Given operands for a LShr, fold the result or return null.
LLVM_ABI bool NullPointerIsDefined(const Function *F, unsigned AS=0)
Check whether null pointer dereferencing is considered undefined behavior for a given function or an ...
LLVM_ABI bool cannotBeNegativeZero(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if we can prove that the specified FP value is never equal to -0.0.
LLVM_ABI Value * simplifyICmpInst(CmpPredicate Pred, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an ICmpInst, fold the result or return null.
LLVM_ABI ConstantRange getVScaleRange(const Function *F, unsigned BitWidth)
Determine the possible constant range of vscale with the given bit width, based on the vscale_range f...
LLVM_ABI Constant * ConstantFoldCastOperand(unsigned Opcode, Constant *C, Type *DestTy, const DataLayout &DL)
Attempt to constant fold a cast with the specified operand.
LLVM_ABI Value * simplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an And, fold the result or return null.
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
LLVM_ABI bool intrinsicPropagatesPoison(Intrinsic::ID IID)
Return whether this intrinsic propagates poison for all operands.
LLVM_ABI Value * simplifyExtractValueInst(Value *Agg, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an ExtractValueInst, fold the result or return null.
LLVM_ABI bool isNotCrossLaneOperation(const Instruction *I)
Return true if the instruction doesn't potentially cross vector lanes.
LLVM_ABI Value * simplifyInsertValueInst(Value *Agg, Value *Val, ArrayRef< unsigned > Idxs, const SimplifyQuery &Q)
Given operands for an InsertValueInst, fold the result or return null.
LLVM_ABI Constant * ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS, Constant *RHS, const DataLayout &DL)
Attempt to constant fold a binary operation with the specified operands.
LLVM_ABI Value * simplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for an FDiv, fold the result or return null.
LLVM_ABI bool isKnownNonZero(const Value *V, const SimplifyQuery &Q, unsigned Depth=0)
Return true if the given value is known to be non-zero when defined.
constexpr int PoisonMaskElem
LLVM_ABI Value * simplifyLoadInst(LoadInst *LI, Value *PtrOp, const SimplifyQuery &Q)
Given a load instruction and its pointer operand, fold the result or return null.
LLVM_ABI Value * simplifyFMAFMul(Value *LHS, Value *RHS, FastMathFlags FMF, const SimplifyQuery &Q, fp::ExceptionBehavior ExBehavior=fp::ebIgnore, RoundingMode Rounding=RoundingMode::NearestTiesToEven)
Given operands for the multiplication of a FMA, fold the result or return null.
LLVM_ABI SelectPatternResult matchDecomposedSelectPattern(CmpInst *CmpI, Value *TrueVal, Value *FalseVal, Value *&LHS, Value *&RHS, FastMathFlags FMF=FastMathFlags(), Instruction::CastOps *CastOp=nullptr, unsigned Depth=0)
Determine the pattern that a select with the given compare as its predicate and given values as its t...
LLVM_ABI Value * simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q)
Given a constrained FP intrinsic call, tries to compute its simplified version.
LLVM_ABI Value * simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a BinaryOperator, fold the result or return null.
std::optional< DecomposedBitTest > decomposeBitTest(Value *Cond, bool LookThroughTrunc=true, bool AllowNonZeroC=false, bool DecomposeAnd=false)
Decompose an icmp into the form ((X & Mask) pred C) if possible.
LLVM_ABI Value * findScalarElement(Value *V, unsigned EltNo)
Given a vector and an element number, see if the scalar value is already around as a register,...
LLVM_ABI bool isKnownNonEqual(const Value *V1, const Value *V2, const SimplifyQuery &SQ, unsigned Depth=0)
Return true if the given values are known to be non-equal when defined.
LLVM_ABI Value * simplifyUDivInst(Value *LHS, Value *RHS, bool IsExact, const SimplifyQuery &Q)
Given operands for a UDiv, fold the result or return null.
DWARFExpression::Operation Op
LLVM_ABI bool PointerMayBeCaptured(const Value *V, bool ReturnCaptures, unsigned MaxUsesToExplore=0)
PointerMayBeCaptured - Return true if this pointer value may be captured by the enclosing function (w...
LLVM_ABI Value * simplifyBinaryIntrinsic(Intrinsic::ID IID, Type *ReturnType, Value *Op0, Value *Op1, const SimplifyQuery &Q, const CallBase *Call)
Given operands for a BinaryIntrinsic, fold the result or return null.
RoundingMode
Rounding mode.
@ NearestTiesToEven
roundTiesToEven.
@ TowardNegative
roundTowardNegative.
LLVM_ABI bool isGuaranteedNotToBeUndefOrPoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Return true if this function can prove that V does not have undef bits and is never poison.
unsigned M0(unsigned Val)
Definition VE.h:376
LLVM_ABI unsigned ComputeNumSignBits(const Value *Op, const DataLayout &DL, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return the number of times the sign bit of the register is replicated into the other bits.
LLVM_ABI Value * simplifyInsertElementInst(Value *Vec, Value *Elt, Value *Idx, const SimplifyQuery &Q)
Given operands for an InsertElement, fold the result or return null.
constexpr unsigned BitWidth
LLVM_ABI Value * simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp, const SimplifyQuery &Q, bool AllowRefinement, SmallVectorImpl< Instruction * > *DropFlags=nullptr)
See if V simplifies when its operand Op is replaced with RepOp.
LLVM_ABI bool maskIsAllZeroOrUndef(Value *Mask)
Given a mask vector of i1, Return true if all of the elements of this predicate mask are known to be ...
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:560
LLVM_ABI Value * simplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for an SRem, fold the result or return null.
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
bool all_equal(std::initializer_list< T > Values)
Returns true if all Values in the initializer lists are equal or the list.
Definition STLExtras.h:2110
LLVM_ABI Constant * ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val, ArrayRef< unsigned > Idxs)
ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue instruction with the spe...
LLVM_ABI Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, APInt Offset, const DataLayout &DL)
Return the value that a load from C with offset Offset would produce if it is constant and determinab...
LLVM_ABI bool isKnownToBeAPowerOfTwo(const Value *V, const DataLayout &DL, bool OrZero=false, AssumptionCache *AC=nullptr, const Instruction *CxtI=nullptr, const DominatorTree *DT=nullptr, bool UseInstrInfo=true, unsigned Depth=0)
Return true if the given value is known to have exactly one bit set when defined.
@ Continue
Definition DWP.h:22
LLVM_ABI std::optional< bool > isImpliedByDomCondition(const Value *Cond, const Instruction *ContextI, const DataLayout &DL)
Return the boolean condition value in the context of the given instruction if it is known based on do...
LLVM_ABI Value * simplifyCmpInst(CmpPredicate Predicate, Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a CmpInst, fold the result or return null.
LLVM_ABI bool isGuaranteedNotToBePoison(const Value *V, AssumptionCache *AC=nullptr, const Instruction *CtxI=nullptr, const DominatorTree *DT=nullptr, unsigned Depth=0)
Returns true if V cannot be poison, but may be undef.
LLVM_ABI Constant * ConstantFoldInstOperands(const Instruction *I, ArrayRef< Constant * > Ops, const DataLayout &DL, const TargetLibraryInfo *TLI=nullptr, bool AllowNonDeterministic=true)
ConstantFoldInstOperands - Attempt to constant fold an instruction with the specified operands.
LLVM_ABI bool isKnownNegation(const Value *X, const Value *Y, bool NeedNSW=false, bool AllowPoison=true)
Return true if the two given values are negation.
LLVM_ABI const Value * getUnderlyingObject(const Value *V, unsigned MaxLookup=MaxLookupSearchDepth)
This method strips off any GEP address adjustments, pointer casts or llvm.threadlocal....
LLVM_ABI Constant * ConstantFoldIntegerCast(Constant *C, Type *DestTy, bool IsSigned, const DataLayout &DL)
Constant fold a zext, sext or trunc, depending on IsSigned and whether the DestTy is wider or narrowe...
LLVM_ABI const SimplifyQuery getBestSimplifyQuery(Pass &, Function &)
std::pair< Value *, FPClassTest > fcmpToClassTest(FCmpInst::Predicate Pred, const Function &F, Value *LHS, Value *RHS, bool LookThroughSrc=true)
Returns a pair of values, which if passed to llvm.is.fpclass, returns the same result as an fcmp with...
LLVM_ABI void getUnderlyingObjects(const Value *V, SmallVectorImpl< const Value * > &Objects, const LoopInfo *LI=nullptr, unsigned MaxLookup=MaxLookupSearchDepth)
This method is similar to getUnderlyingObject except that it can look through phi and select instruct...
bool isCheckForZeroAndMulWithOverflow(Value *Op0, Value *Op1, bool IsAnd, Use *&Y)
Match one of the patterns up to the select/logic op: Op0 = icmp ne i4 X, 0 Agg = call { i4,...
bool canIgnoreSNaN(fp::ExceptionBehavior EB, FastMathFlags FMF)
Returns true if the possibility of a signaling NaN can be safely ignored.
Definition FPEnv.h:86
LLVM_ABI Value * simplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q)
Given operands for a URem, fold the result or return null.
LLVM_ABI Value * simplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQuery &Q)
Given operands for an ExtractElementInst, fold the result or return null.
LLVM_ABI Value * simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, const SimplifyQuery &Q)
Given operands for a SelectInst, fold the result or return null.
constexpr detail::IsaCheckPredicate< Types... > IsaPred
Function object wrapper for the llvm::isa type check.
Definition Casting.h:831
LLVM_ABI std::optional< bool > isImpliedCondition(const Value *LHS, const Value *RHS, const DataLayout &DL, bool LHSIsTrue=true, unsigned Depth=0)
Return true if RHS is known to be implied true by LHS.
LLVM_ABI std::optional< bool > computeKnownFPSignBit(const Value *V, const SimplifyQuery &SQ, unsigned Depth=0)
Return false if we can prove that the specified FP value's sign bit is 0.
void swap(llvm::BitVector &LHS, llvm::BitVector &RHS)
Implement std::swap in terms of BitVector swap.
Definition BitVector.h:869
#define N
This callback is used in conjunction with PointerMayBeCaptured.
virtual Action captured(const Use *U, UseCaptureInfo CI)=0
Use U directly captures CI.UseCC and additionally CI.ResultCC through the return value of the user of...
virtual void tooManyUses()=0
tooManyUses - The depth of traversal has breached a limit.
Incoming for lane maks phi as machine instruction, incoming register Reg and incoming block Block are...
InstrInfoQuery provides an interface to query additional information for instructions like metadata o...
bool isExact(const BinaryOperator *Op) const
MDNode * getMetadata(const Instruction *I, unsigned KindID) const
bool hasNoSignedWrap(const InstT *Op) const
bool hasNoUnsignedWrap(const InstT *Op) const
bool isNonNegative() const
Returns true if this value is known to be non-negative.
Definition KnownBits.h:108
bool isZero() const
Returns true if value is all zero.
Definition KnownBits.h:80
unsigned countMinTrailingZeros() const
Returns the minimum number of trailing zero bits.
Definition KnownBits.h:242
unsigned countMaxTrailingZeros() const
Returns the maximum number of trailing zero bits possible.
Definition KnownBits.h:274
bool hasConflict() const
Returns true if there is conflicting information.
Definition KnownBits.h:51
unsigned getBitWidth() const
Get the bit width of this value.
Definition KnownBits.h:44
unsigned countMaxActiveBits() const
Returns the maximum number of bits needed to represent all possible unsigned values with these known ...
Definition KnownBits.h:296
unsigned countMinLeadingZeros() const
Returns the minimum number of leading zero bits.
Definition KnownBits.h:248
APInt getMaxValue() const
Return the maximal unsigned value possible given these KnownBits.
Definition KnownBits.h:145
APInt getMinValue() const
Return the minimal unsigned value possible given these KnownBits.
Definition KnownBits.h:129
bool isNegative() const
Returns true if this value is known to be negative.
Definition KnownBits.h:105
static LLVM_ABI KnownBits shl(const KnownBits &LHS, const KnownBits &RHS, bool NUW=false, bool NSW=false, bool ShAmtNonZero=false)
Compute known bits for shl(LHS, RHS).
bool isKnownAlwaysNaN() const
Return true if it's known this must always be a nan.
static constexpr FPClassTest OrderedLessThanZeroMask
std::optional< bool > SignBit
std::nullopt if the sign bit is unknown, true if the sign bit is definitely set or false if the sign ...
bool isKnownNeverNaN() const
Return true if it's known this can never be a nan.
bool isKnownNever(FPClassTest Mask) const
Return true if it's known this can never be one of the mask entries.
bool cannotBeOrderedLessThanZero() const
Return true if we can prove that the analyzed floating-point value is either NaN or never less than -...
The adaptor from a function pass to a loop pass computes these analyses and makes them available to t...
Various options to control the behavior of getObjectSize.
bool NullIsUnknownSize
If this is true, null pointers in address space 0 will be treated as though they can't be evaluated.
Mode EvalMode
How we want to evaluate this object's size.
@ Min
Evaluate all branches of an unknown condition.
SelectPatternFlavor Flavor
static bool isMinOrMax(SelectPatternFlavor SPF)
When implementing this min/max pattern as fcmp; select, does the fcmp have to be ordered?
const DataLayout & DL
const Instruction * CxtI
bool CanUseUndef
Controls whether simplifications are allowed to constrain the range of possible values for uses of un...
const DominatorTree * DT
SimplifyQuery getWithInstruction(const Instruction *I) const
LLVM_ABI bool isUndefValue(Value *V) const
If CanUseUndef is true, returns whether V is undef.
AssumptionCache * AC
const TargetLibraryInfo * TLI
SimplifyQuery getWithoutUndef() const
const InstrInfoQuery IIQ
Capture information for a specific Use.