LLVM 21.0.0git
SystemZInstrInfo.cpp
Go to the documentation of this file.
1//===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains the SystemZ implementation of the TargetInstrInfo class.
10//
11//===----------------------------------------------------------------------===//
12
13#include "SystemZInstrInfo.h"
15#include "SystemZ.h"
16#include "SystemZInstrBuilder.h"
17#include "SystemZSubtarget.h"
18#include "llvm/ADT/Statistic.h"
36#include "llvm/MC/MCInstrDesc.h"
42#include <cassert>
43#include <cstdint>
44#include <iterator>
45
46using namespace llvm;
47
48#define GET_INSTRINFO_CTOR_DTOR
49#define GET_INSTRMAP_INFO
50#include "SystemZGenInstrInfo.inc"
51
52#define DEBUG_TYPE "systemz-II"
53
54// Return a mask with Count low bits set.
55static uint64_t allOnes(unsigned int Count) {
56 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
57}
58
59// Pin the vtable to this file.
60void SystemZInstrInfo::anchor() {}
61
63 : SystemZGenInstrInfo(-1, -1),
64 RI(sti.getSpecialRegisters()->getReturnFunctionAddressRegister()),
65 STI(sti) {}
66
67// MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
68// each having the opcode given by NewOpcode.
69void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
70 unsigned NewOpcode) const {
71 MachineBasicBlock *MBB = MI->getParent();
73
74 // Get two load or store instructions. Use the original instruction for
75 // one of them and create a clone for the other.
76 MachineInstr *HighPartMI = MF.CloneMachineInstr(&*MI);
77 MachineInstr *LowPartMI = &*MI;
78 MBB->insert(LowPartMI, HighPartMI);
79
80 // Set up the two 64-bit registers and remember super reg and its flags.
81 MachineOperand &HighRegOp = HighPartMI->getOperand(0);
82 MachineOperand &LowRegOp = LowPartMI->getOperand(0);
83 Register Reg128 = LowRegOp.getReg();
84 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
85 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
86 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
87 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
88
89 // The address in the first (high) instruction is already correct.
90 // Adjust the offset in the second (low) instruction.
91 MachineOperand &HighOffsetOp = HighPartMI->getOperand(2);
92 MachineOperand &LowOffsetOp = LowPartMI->getOperand(2);
93 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
94
95 // Set the opcodes.
96 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
97 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
98 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
99 HighPartMI->setDesc(get(HighOpcode));
100 LowPartMI->setDesc(get(LowOpcode));
101
102 MachineInstr *FirstMI = HighPartMI;
103 if (MI->mayStore()) {
104 FirstMI->getOperand(0).setIsKill(false);
105 // Add implicit uses of the super register in case one of the subregs is
106 // undefined. We could track liveness and skip storing an undefined
107 // subreg, but this is hopefully rare (discovered with llvm-stress).
108 // If Reg128 was killed, set kill flag on MI.
109 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
110 MachineInstrBuilder(MF, HighPartMI).addReg(Reg128, Reg128UndefImpl);
111 MachineInstrBuilder(MF, LowPartMI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
112 } else {
113 // If HighPartMI clobbers any of the address registers, it needs to come
114 // after LowPartMI.
115 auto overlapsAddressReg = [&](Register Reg) -> bool {
116 return RI.regsOverlap(Reg, MI->getOperand(1).getReg()) ||
117 RI.regsOverlap(Reg, MI->getOperand(3).getReg());
118 };
119 if (overlapsAddressReg(HighRegOp.getReg())) {
120 assert(!overlapsAddressReg(LowRegOp.getReg()) &&
121 "Both loads clobber address!");
122 MBB->splice(HighPartMI, MBB, LowPartMI);
123 FirstMI = LowPartMI;
124 }
125 }
126
127 // Clear the kill flags on the address registers in the first instruction.
128 FirstMI->getOperand(1).setIsKill(false);
129 FirstMI->getOperand(3).setIsKill(false);
130}
131
132// Split ADJDYNALLOC instruction MI.
133void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
134 MachineBasicBlock *MBB = MI->getParent();
135 MachineFunction &MF = *MBB->getParent();
136 MachineFrameInfo &MFFrame = MF.getFrameInfo();
137 MachineOperand &OffsetMO = MI->getOperand(2);
139
140 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
141 Regs->getCallFrameSize() +
142 Regs->getStackPointerBias() +
143 OffsetMO.getImm());
144 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
145 assert(NewOpcode && "No support for huge argument lists yet");
146 MI->setDesc(get(NewOpcode));
147 OffsetMO.setImm(Offset);
148}
149
150// MI is an RI-style pseudo instruction. Replace it with LowOpcode
151// if the first operand is a low GR32 and HighOpcode if the first operand
152// is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
153// and HighOpcode takes an unsigned 32-bit operand. In those cases,
154// MI has the same kind of operand as LowOpcode, so needs to be converted
155// if HighOpcode is used.
156void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
157 unsigned HighOpcode,
158 bool ConvertHigh) const {
159 Register Reg = MI.getOperand(0).getReg();
160 bool IsHigh = SystemZ::isHighReg(Reg);
161 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
162 if (IsHigh && ConvertHigh)
163 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
164}
165
166// MI is a three-operand RIE-style pseudo instruction. Replace it with
167// LowOpcodeK if the registers are both low GR32s, otherwise use a move
168// followed by HighOpcode or LowOpcode, depending on whether the target
169// is a high or low GR32.
170void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
171 unsigned LowOpcodeK,
172 unsigned HighOpcode) const {
173 Register DestReg = MI.getOperand(0).getReg();
174 Register SrcReg = MI.getOperand(1).getReg();
175 bool DestIsHigh = SystemZ::isHighReg(DestReg);
176 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
177 if (!DestIsHigh && !SrcIsHigh)
178 MI.setDesc(get(LowOpcodeK));
179 else {
180 if (DestReg != SrcReg) {
181 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
182 SystemZ::LR, 32, MI.getOperand(1).isKill(),
183 MI.getOperand(1).isUndef());
184 MI.getOperand(1).setReg(DestReg);
185 }
186 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
187 MI.tieOperands(0, 1);
188 }
189}
190
191// MI is an RXY-style pseudo instruction. Replace it with LowOpcode
192// if the first operand is a low GR32 and HighOpcode if the first operand
193// is a high GR32.
194void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
195 unsigned HighOpcode) const {
196 Register Reg = MI.getOperand(0).getReg();
197 unsigned Opcode = getOpcodeForOffset(
198 SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode,
199 MI.getOperand(2).getImm());
200 MI.setDesc(get(Opcode));
201}
202
203// MI is a load-on-condition pseudo instruction with a single register
204// (source or destination) operand. Replace it with LowOpcode if the
205// register is a low GR32 and HighOpcode if the register is a high GR32.
206void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
207 unsigned HighOpcode) const {
208 Register Reg = MI.getOperand(0).getReg();
209 unsigned Opcode = SystemZ::isHighReg(Reg) ? HighOpcode : LowOpcode;
210 MI.setDesc(get(Opcode));
211}
212
213// MI is an RR-style pseudo instruction that zero-extends the low Size bits
214// of one GRX32 into another. Replace it with LowOpcode if both operands
215// are low registers, otherwise use RISB[LH]G.
216void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
217 unsigned Size) const {
219 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
220 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
221 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
222
223 // Keep the remaining operands as-is.
224 for (const MachineOperand &MO : llvm::drop_begin(MI.operands(), 2))
225 MIB.add(MO);
226
227 MI.eraseFromParent();
228}
229
230void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
231 MachineBasicBlock *MBB = MI->getParent();
232 MachineFunction &MF = *MBB->getParent();
233 const Register Reg64 = MI->getOperand(0).getReg();
234 const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
235
236 // EAR can only load the low subregister so us a shift for %a0 to produce
237 // the GR containing %a0 and %a1.
238
239 // ear <reg>, %a0
240 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
241 .addReg(SystemZ::A0)
243
244 // sllg <reg>, <reg>, 32
245 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
246 .addReg(Reg64)
247 .addReg(0)
248 .addImm(32);
249
250 // ear <reg>, %a1
251 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
252 .addReg(SystemZ::A1);
253
254 // lg <reg>, 40(<reg>)
255 MI->setDesc(get(SystemZ::LG));
256 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
257}
258
259// Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
260// DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
261// are low registers, otherwise use RISB[LH]G. Size is the number of bits
262// taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
263// KillSrc is true if this move is the last use of SrcReg.
265SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
267 const DebugLoc &DL, unsigned DestReg,
268 unsigned SrcReg, unsigned LowLowOpcode,
269 unsigned Size, bool KillSrc,
270 bool UndefSrc) const {
271 unsigned Opcode;
272 bool DestIsHigh = SystemZ::isHighReg(DestReg);
273 bool SrcIsHigh = SystemZ::isHighReg(SrcReg);
274 if (DestIsHigh && SrcIsHigh)
275 Opcode = SystemZ::RISBHH;
276 else if (DestIsHigh && !SrcIsHigh)
277 Opcode = SystemZ::RISBHL;
278 else if (!DestIsHigh && SrcIsHigh)
279 Opcode = SystemZ::RISBLH;
280 else {
281 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
282 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
283 }
284 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
285 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
286 .addReg(DestReg, RegState::Undef)
287 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
288 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
289}
290
292 bool NewMI,
293 unsigned OpIdx1,
294 unsigned OpIdx2) const {
295 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
296 if (NewMI)
297 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
298 return MI;
299 };
300
301 switch (MI.getOpcode()) {
302 case SystemZ::SELRMux:
303 case SystemZ::SELFHR:
304 case SystemZ::SELR:
305 case SystemZ::SELGR:
306 case SystemZ::LOCRMux:
307 case SystemZ::LOCFHR:
308 case SystemZ::LOCR:
309 case SystemZ::LOCGR: {
310 auto &WorkingMI = cloneIfNew(MI);
311 // Invert condition.
312 unsigned CCValid = WorkingMI.getOperand(3).getImm();
313 unsigned CCMask = WorkingMI.getOperand(4).getImm();
314 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
315 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
316 OpIdx1, OpIdx2);
317 }
318 default:
319 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
320 }
321}
322
323// If MI is a simple load or store for a frame object, return the register
324// it loads or stores and set FrameIndex to the index of the frame object.
325// Return 0 otherwise.
326//
327// Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
328static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
329 unsigned Flag) {
330 const MCInstrDesc &MCID = MI.getDesc();
331 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
332 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
333 FrameIndex = MI.getOperand(1).getIndex();
334 return MI.getOperand(0).getReg();
335 }
336 return 0;
337}
338
340 int &FrameIndex) const {
341 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
342}
343
345 int &FrameIndex) const {
346 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
347}
348
350 int &DestFrameIndex,
351 int &SrcFrameIndex) const {
352 // Check for MVC 0(Length,FI1),0(FI2)
353 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
354 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
355 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
356 MI.getOperand(4).getImm() != 0)
357 return false;
358
359 // Check that Length covers the full slots.
360 int64_t Length = MI.getOperand(2).getImm();
361 unsigned FI1 = MI.getOperand(0).getIndex();
362 unsigned FI2 = MI.getOperand(3).getIndex();
363 if (MFI.getObjectSize(FI1) != Length ||
364 MFI.getObjectSize(FI2) != Length)
365 return false;
366
367 DestFrameIndex = FI1;
368 SrcFrameIndex = FI2;
369 return true;
370}
371
374 MachineBasicBlock *&FBB,
376 bool AllowModify) const {
377 // Most of the code and comments here are boilerplate.
378
379 // Start from the bottom of the block and work up, examining the
380 // terminator instructions.
382 while (I != MBB.begin()) {
383 --I;
384 if (I->isDebugInstr())
385 continue;
386
387 // Working from the bottom, when we see a non-terminator instruction, we're
388 // done.
389 if (!isUnpredicatedTerminator(*I))
390 break;
391
392 // A terminator that isn't a branch can't easily be handled by this
393 // analysis.
394 if (!I->isBranch())
395 return true;
396
397 // Can't handle indirect branches.
399 if (!Branch.hasMBBTarget())
400 return true;
401
402 // Punt on compound branches.
403 if (Branch.Type != SystemZII::BranchNormal)
404 return true;
405
406 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
407 // Handle unconditional branches.
408 if (!AllowModify) {
409 TBB = Branch.getMBBTarget();
410 continue;
411 }
412
413 // If the block has any instructions after a JMP, delete them.
414 MBB.erase(std::next(I), MBB.end());
415
416 Cond.clear();
417 FBB = nullptr;
418
419 // Delete the JMP if it's equivalent to a fall-through.
420 if (MBB.isLayoutSuccessor(Branch.getMBBTarget())) {
421 TBB = nullptr;
422 I->eraseFromParent();
423 I = MBB.end();
424 continue;
425 }
426
427 // TBB is used to indicate the unconditinal destination.
428 TBB = Branch.getMBBTarget();
429 continue;
430 }
431
432 // Working from the bottom, handle the first conditional branch.
433 if (Cond.empty()) {
434 // FIXME: add X86-style branch swap
435 FBB = TBB;
436 TBB = Branch.getMBBTarget();
437 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
438 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
439 continue;
440 }
441
442 // Handle subsequent conditional branches.
443 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
444
445 // Only handle the case where all conditional branches branch to the same
446 // destination.
447 if (TBB != Branch.getMBBTarget())
448 return true;
449
450 // If the conditions are the same, we can leave them alone.
451 unsigned OldCCValid = Cond[0].getImm();
452 unsigned OldCCMask = Cond[1].getImm();
453 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
454 continue;
455
456 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
457 return false;
458 }
459
460 return false;
461}
462
464 int *BytesRemoved) const {
465 assert(!BytesRemoved && "code size not handled");
466
467 // Most of the code and comments here are boilerplate.
469 unsigned Count = 0;
470
471 while (I != MBB.begin()) {
472 --I;
473 if (I->isDebugInstr())
474 continue;
475 if (!I->isBranch())
476 break;
477 if (!getBranchInfo(*I).hasMBBTarget())
478 break;
479 // Remove the branch.
480 I->eraseFromParent();
481 I = MBB.end();
482 ++Count;
483 }
484
485 return Count;
486}
487
490 assert(Cond.size() == 2 && "Invalid condition");
491 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
492 return false;
493}
494
499 const DebugLoc &DL,
500 int *BytesAdded) const {
501 // In this function we output 32-bit branches, which should always
502 // have enough range. They can be shortened and relaxed by later code
503 // in the pipeline, if desired.
504
505 // Shouldn't be a fall through.
506 assert(TBB && "insertBranch must not be told to insert a fallthrough");
507 assert((Cond.size() == 2 || Cond.size() == 0) &&
508 "SystemZ branch conditions have one component!");
509 assert(!BytesAdded && "code size not handled");
510
511 if (Cond.empty()) {
512 // Unconditional branch?
513 assert(!FBB && "Unconditional branch with multiple successors!");
514 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
515 return 1;
516 }
517
518 // Conditional branch.
519 unsigned Count = 0;
520 unsigned CCValid = Cond[0].getImm();
521 unsigned CCMask = Cond[1].getImm();
522 BuildMI(&MBB, DL, get(SystemZ::BRC))
523 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
524 ++Count;
525
526 if (FBB) {
527 // Two-way Conditional branch. Insert the second branch.
528 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
529 ++Count;
530 }
531 return Count;
532}
533
535 Register &SrcReg2, int64_t &Mask,
536 int64_t &Value) const {
537 assert(MI.isCompare() && "Caller should have checked for a comparison");
538
539 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
540 MI.getOperand(1).isImm()) {
541 SrcReg = MI.getOperand(0).getReg();
542 SrcReg2 = 0;
543 Value = MI.getOperand(1).getImm();
544 Mask = ~0;
545 return true;
546 }
547
548 return false;
549}
550
553 Register DstReg, Register TrueReg,
554 Register FalseReg, int &CondCycles,
555 int &TrueCycles,
556 int &FalseCycles) const {
557 // Not all subtargets have LOCR instructions.
558 if (!STI.hasLoadStoreOnCond())
559 return false;
560 if (Pred.size() != 2)
561 return false;
562
563 // Check register classes.
565 const TargetRegisterClass *RC =
566 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
567 if (!RC)
568 return false;
569
570 // We have LOCR instructions for 32 and 64 bit general purpose registers.
571 if ((STI.hasLoadStoreOnCond2() &&
572 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
573 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
574 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
575 CondCycles = 2;
576 TrueCycles = 2;
577 FalseCycles = 2;
578 return true;
579 }
580
581 // Can't do anything else.
582 return false;
583}
584
587 const DebugLoc &DL, Register DstReg,
589 Register TrueReg,
590 Register FalseReg) const {
592 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
593
594 assert(Pred.size() == 2 && "Invalid condition");
595 unsigned CCValid = Pred[0].getImm();
596 unsigned CCMask = Pred[1].getImm();
597
598 unsigned Opc;
599 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
600 if (STI.hasMiscellaneousExtensions3())
601 Opc = SystemZ::SELRMux;
602 else if (STI.hasLoadStoreOnCond2())
603 Opc = SystemZ::LOCRMux;
604 else {
605 Opc = SystemZ::LOCR;
606 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
607 Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
608 Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
609 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
610 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
611 TrueReg = TReg;
612 FalseReg = FReg;
613 }
614 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
615 if (STI.hasMiscellaneousExtensions3())
616 Opc = SystemZ::SELGR;
617 else
618 Opc = SystemZ::LOCGR;
619 } else
620 llvm_unreachable("Invalid register class");
621
622 BuildMI(MBB, I, DL, get(Opc), DstReg)
623 .addReg(FalseReg).addReg(TrueReg)
624 .addImm(CCValid).addImm(CCMask);
625}
626
629 Register &FoldAsLoadDefReg,
630 MachineInstr *&DefMI) const {
631 // Check whether we can move the DefMI load, and that it only has one use.
632 DefMI = MRI->getVRegDef(FoldAsLoadDefReg);
633 assert(DefMI);
634 bool SawStore = false;
635 if (!DefMI->isSafeToMove(SawStore) || !MRI->hasOneNonDBGUse(FoldAsLoadDefReg))
636 return nullptr;
637
638 int UseOpIdx =
639 MI.findRegisterUseOperandIdx(FoldAsLoadDefReg, /*TRI=*/nullptr);
640 assert(UseOpIdx != -1 && "Expected FoldAsLoadDefReg to be used by MI.");
641
642 // Check whether we can fold the load.
643 if (MachineInstr *FoldMI =
644 foldMemoryOperand(MI, {((unsigned)UseOpIdx)}, *DefMI)) {
645 FoldAsLoadDefReg = 0;
646 return FoldMI;
647 }
648
649 return nullptr;
650}
651
653 Register Reg,
654 MachineRegisterInfo *MRI) const {
655 unsigned DefOpc = DefMI.getOpcode();
656
657 if (DefOpc == SystemZ::VGBM) {
658 int64_t ImmVal = DefMI.getOperand(1).getImm();
659 if (ImmVal != 0) // TODO: Handle other values
660 return false;
661
662 // Fold gr128 = COPY (vr128 VGBM imm)
663 //
664 // %tmp:gr64 = LGHI 0
665 // to gr128 = REG_SEQUENCE %tmp, %tmp
666 assert(DefMI.getOperand(0).getReg() == Reg);
667
668 if (!UseMI.isCopy())
669 return false;
670
671 Register CopyDstReg = UseMI.getOperand(0).getReg();
672 if (CopyDstReg.isVirtual() &&
673 MRI->getRegClass(CopyDstReg) == &SystemZ::GR128BitRegClass &&
674 MRI->hasOneNonDBGUse(Reg)) {
675 // TODO: Handle physical registers
676 // TODO: Handle gr64 uses with subregister indexes
677 // TODO: Should this multi-use cases?
678 Register TmpReg = MRI->createVirtualRegister(&SystemZ::GR64BitRegClass);
679 MachineBasicBlock &MBB = *UseMI.getParent();
680
681 loadImmediate(MBB, UseMI.getIterator(), TmpReg, ImmVal);
682
683 UseMI.setDesc(get(SystemZ::REG_SEQUENCE));
684 UseMI.getOperand(1).setReg(TmpReg);
686 .addImm(SystemZ::subreg_h64)
687 .addReg(TmpReg)
688 .addImm(SystemZ::subreg_l64);
689
690 if (MRI->use_nodbg_empty(Reg))
691 DefMI.eraseFromParent();
692 return true;
693 }
694
695 return false;
696 }
697
698 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
699 DefOpc != SystemZ::LGHI)
700 return false;
701 if (DefMI.getOperand(0).getReg() != Reg)
702 return false;
703 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
704
705 unsigned UseOpc = UseMI.getOpcode();
706 unsigned NewUseOpc;
707 unsigned UseIdx;
708 int CommuteIdx = -1;
709 bool TieOps = false;
710 switch (UseOpc) {
711 case SystemZ::SELRMux:
712 TieOps = true;
713 [[fallthrough]];
714 case SystemZ::LOCRMux:
715 if (!STI.hasLoadStoreOnCond2())
716 return false;
717 NewUseOpc = SystemZ::LOCHIMux;
718 if (UseMI.getOperand(2).getReg() == Reg)
719 UseIdx = 2;
720 else if (UseMI.getOperand(1).getReg() == Reg)
721 UseIdx = 2, CommuteIdx = 1;
722 else
723 return false;
724 break;
725 case SystemZ::SELGR:
726 TieOps = true;
727 [[fallthrough]];
728 case SystemZ::LOCGR:
729 if (!STI.hasLoadStoreOnCond2())
730 return false;
731 NewUseOpc = SystemZ::LOCGHI;
732 if (UseMI.getOperand(2).getReg() == Reg)
733 UseIdx = 2;
734 else if (UseMI.getOperand(1).getReg() == Reg)
735 UseIdx = 2, CommuteIdx = 1;
736 else
737 return false;
738 break;
739 default:
740 return false;
741 }
742
743 if (CommuteIdx != -1)
744 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
745 return false;
746
747 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
748 UseMI.setDesc(get(NewUseOpc));
749 if (TieOps)
750 UseMI.tieOperands(0, 1);
751 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
752 if (DeleteDef)
753 DefMI.eraseFromParent();
754
755 return true;
756}
757
759 unsigned Opcode = MI.getOpcode();
760 if (Opcode == SystemZ::Return ||
761 Opcode == SystemZ::Return_XPLINK ||
762 Opcode == SystemZ::Trap ||
763 Opcode == SystemZ::CallJG ||
764 Opcode == SystemZ::CallBR)
765 return true;
766 return false;
767}
768
771 unsigned NumCycles, unsigned ExtraPredCycles,
772 BranchProbability Probability) const {
773 // Avoid using conditional returns at the end of a loop (since then
774 // we'd need to emit an unconditional branch to the beginning anyway,
775 // making the loop body longer). This doesn't apply for low-probability
776 // loops (eg. compare-and-swap retry), so just decide based on branch
777 // probability instead of looping structure.
778 // However, since Compare and Trap instructions cost the same as a regular
779 // Compare instruction, we should allow the if conversion to convert this
780 // into a Conditional Compare regardless of the branch probability.
781 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
782 MBB.succ_empty() && Probability < BranchProbability(1, 8))
783 return false;
784 // For now only convert single instructions.
785 return NumCycles == 1;
786}
787
790 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
791 MachineBasicBlock &FMBB,
792 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
793 BranchProbability Probability) const {
794 // For now avoid converting mutually-exclusive cases.
795 return false;
796}
797
800 BranchProbability Probability) const {
801 // For now only duplicate single instructions.
802 return NumCycles == 1;
803}
804
807 assert(Pred.size() == 2 && "Invalid condition");
808 unsigned CCValid = Pred[0].getImm();
809 unsigned CCMask = Pred[1].getImm();
810 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
811 unsigned Opcode = MI.getOpcode();
812 if (Opcode == SystemZ::Trap) {
813 MI.setDesc(get(SystemZ::CondTrap));
814 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
815 .addImm(CCValid).addImm(CCMask)
816 .addReg(SystemZ::CC, RegState::Implicit);
817 return true;
818 }
819 if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
820 MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
821 : SystemZ::CondReturn_XPLINK));
822 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
823 .addImm(CCValid)
824 .addImm(CCMask)
825 .addReg(SystemZ::CC, RegState::Implicit);
826 return true;
827 }
828 if (Opcode == SystemZ::CallJG) {
829 MachineOperand FirstOp = MI.getOperand(0);
830 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
831 MI.removeOperand(1);
832 MI.removeOperand(0);
833 MI.setDesc(get(SystemZ::CallBRCL));
834 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
835 .addImm(CCValid)
836 .addImm(CCMask)
837 .add(FirstOp)
838 .addRegMask(RegMask)
839 .addReg(SystemZ::CC, RegState::Implicit);
840 return true;
841 }
842 if (Opcode == SystemZ::CallBR) {
843 MachineOperand Target = MI.getOperand(0);
844 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
845 MI.removeOperand(1);
846 MI.removeOperand(0);
847 MI.setDesc(get(SystemZ::CallBCR));
848 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
849 .addImm(CCValid).addImm(CCMask)
850 .add(Target)
851 .addRegMask(RegMask)
852 .addReg(SystemZ::CC, RegState::Implicit);
853 return true;
854 }
855 return false;
856}
857
860 const DebugLoc &DL, MCRegister DestReg,
861 MCRegister SrcReg, bool KillSrc,
862 bool RenamableDest,
863 bool RenamableSrc) const {
864 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
865 // super register in case one of the subregs is undefined.
866 // This handles ADDR128 too.
867 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
868 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
869 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
870 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
871 .addReg(SrcReg, RegState::Implicit);
872 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
873 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
874 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
875 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
876 return;
877 }
878
879 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
880 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
881 false);
882 return;
883 }
884
885 // Move 128-bit floating-point values between VR128 and FP128.
886 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
887 SystemZ::FP128BitRegClass.contains(SrcReg)) {
888 MCRegister SrcRegHi =
889 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
890 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
891 MCRegister SrcRegLo =
892 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
893 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
894
895 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
896 .addReg(SrcRegHi, getKillRegState(KillSrc))
897 .addReg(SrcRegLo, getKillRegState(KillSrc));
898 return;
899 }
900 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
901 SystemZ::VR128BitRegClass.contains(SrcReg)) {
902 MCRegister DestRegHi =
903 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
904 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
905 MCRegister DestRegLo =
906 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
907 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
908
909 if (DestRegHi != SrcReg)
910 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
911 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
912 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
913 return;
914 }
915
916 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
917 SystemZ::GR128BitRegClass.contains(SrcReg)) {
918 MCRegister DestRegHi = RI.getSubReg(DestReg, SystemZ::subreg_h64);
919 MCRegister DestRegLo = RI.getSubReg(DestReg, SystemZ::subreg_l64);
920 MCRegister SrcRegHi = RI.getSubReg(SrcReg, SystemZ::subreg_h64);
921 MCRegister SrcRegLo = RI.getSubReg(SrcReg, SystemZ::subreg_l64);
922
923 BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegHi)
924 .addReg(SrcRegHi)
926
927 BuildMI(MBB, MBBI, DL, get(SystemZ::LDGR), DestRegLo)
928 .addReg(SrcRegLo, getKillRegState(KillSrc));
929 return;
930 }
931
932 // Move CC value from a GR32.
933 if (DestReg == SystemZ::CC) {
934 unsigned Opcode =
935 SystemZ::GR32BitRegClass.contains(SrcReg) ? SystemZ::TMLH : SystemZ::TMHH;
936 BuildMI(MBB, MBBI, DL, get(Opcode))
937 .addReg(SrcReg, getKillRegState(KillSrc))
938 .addImm(3 << (SystemZ::IPM_CC - 16));
939 return;
940 }
941
942 if (SystemZ::GR128BitRegClass.contains(DestReg) &&
943 SystemZ::VR128BitRegClass.contains(SrcReg)) {
944 MCRegister DestH64 = RI.getSubReg(DestReg, SystemZ::subreg_h64);
945 MCRegister DestL64 = RI.getSubReg(DestReg, SystemZ::subreg_l64);
946
947 BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestH64)
948 .addReg(SrcReg)
949 .addReg(SystemZ::NoRegister)
950 .addImm(0)
951 .addDef(DestReg, RegState::Implicit);
952 BuildMI(MBB, MBBI, DL, get(SystemZ::VLGVG), DestL64)
953 .addReg(SrcReg, getKillRegState(KillSrc))
954 .addReg(SystemZ::NoRegister)
955 .addImm(1);
956 return;
957 }
958
959 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
960 SystemZ::GR128BitRegClass.contains(SrcReg)) {
961 BuildMI(MBB, MBBI, DL, get(SystemZ::VLVGP), DestReg)
962 .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64))
963 .addReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64));
964 return;
965 }
966
967 // Everything else needs only one instruction.
968 unsigned Opcode;
969 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
970 Opcode = SystemZ::LGR;
971 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
972 // For z13 we prefer LDR over LER to avoid partial register dependencies.
973 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
974 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
975 Opcode = SystemZ::LDR;
976 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
977 Opcode = SystemZ::LXR;
978 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
979 Opcode = SystemZ::VLR32;
980 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
981 Opcode = SystemZ::VLR64;
982 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
983 Opcode = SystemZ::VLR;
984 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
985 Opcode = SystemZ::CPYA;
986 else if (SystemZ::GR64BitRegClass.contains(DestReg) &&
987 SystemZ::FP64BitRegClass.contains(SrcReg))
988 Opcode = SystemZ::LGDR;
989 else if (SystemZ::FP64BitRegClass.contains(DestReg) &&
990 SystemZ::GR64BitRegClass.contains(SrcReg))
991 Opcode = SystemZ::LDGR;
992 else
993 llvm_unreachable("Impossible reg-to-reg copy");
994
995 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
996 .addReg(SrcReg, getKillRegState(KillSrc));
997}
998
1001 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
1002 const TargetRegisterInfo *TRI, Register VReg,
1003 MachineInstr::MIFlag Flags) const {
1004 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1005
1006 // Callers may expect a single instruction, so keep 128-bit moves
1007 // together for now and lower them after register allocation.
1008 unsigned LoadOpcode, StoreOpcode;
1009 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1010 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
1011 .addReg(SrcReg, getKillRegState(isKill)),
1012 FrameIdx);
1013}
1014
1017 int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI,
1018 Register VReg, MachineInstr::MIFlag Flags) const {
1019 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1020
1021 // Callers may expect a single instruction, so keep 128-bit moves
1022 // together for now and lower them after register allocation.
1023 unsigned LoadOpcode, StoreOpcode;
1024 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
1025 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
1026 FrameIdx);
1027}
1028
1029// Return true if MI is a simple load or store with a 12-bit displacement
1030// and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
1031static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
1032 const MCInstrDesc &MCID = MI->getDesc();
1033 return ((MCID.TSFlags & Flag) &&
1034 isUInt<12>(MI->getOperand(2).getImm()) &&
1035 MI->getOperand(3).getReg() == 0);
1036}
1037
1038namespace {
1039
1040struct LogicOp {
1041 LogicOp() = default;
1042 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
1043 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
1044
1045 explicit operator bool() const { return RegSize; }
1046
1047 unsigned RegSize = 0;
1048 unsigned ImmLSB = 0;
1049 unsigned ImmSize = 0;
1050};
1051
1052} // end anonymous namespace
1053
1054static LogicOp interpretAndImmediate(unsigned Opcode) {
1055 switch (Opcode) {
1056 case SystemZ::NILMux: return LogicOp(32, 0, 16);
1057 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
1058 case SystemZ::NILL64: return LogicOp(64, 0, 16);
1059 case SystemZ::NILH64: return LogicOp(64, 16, 16);
1060 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
1061 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
1062 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
1063 case SystemZ::NILF64: return LogicOp(64, 0, 32);
1064 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
1065 default: return LogicOp();
1066 }
1067}
1068
1069static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
1070 if (OldMI->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr)) {
1071 MachineOperand *CCDef =
1072 NewMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr);
1073 if (CCDef != nullptr)
1074 CCDef->setIsDead(true);
1075 }
1076}
1077
1078static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI,
1079 MachineInstr::MIFlag Flag) {
1080 if (OldMI->getFlag(Flag))
1081 NewMI->setFlag(Flag);
1082}
1083
1086 LiveIntervals *LIS) const {
1087 MachineBasicBlock *MBB = MI.getParent();
1088
1089 // Try to convert an AND into an RISBG-type instruction.
1090 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
1091 if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
1092 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
1093 // AND IMMEDIATE leaves the other bits of the register unchanged.
1094 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
1095 unsigned Start, End;
1096 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
1097 unsigned NewOpcode;
1098 if (And.RegSize == 64) {
1099 NewOpcode = SystemZ::RISBG;
1100 // Prefer RISBGN if available, since it does not clobber CC.
1101 if (STI.hasMiscellaneousExtensions())
1102 NewOpcode = SystemZ::RISBGN;
1103 } else {
1104 NewOpcode = SystemZ::RISBMux;
1105 Start &= 31;
1106 End &= 31;
1107 }
1108 MachineOperand &Dest = MI.getOperand(0);
1109 MachineOperand &Src = MI.getOperand(1);
1111 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
1112 .add(Dest)
1113 .addReg(0)
1114 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
1115 Src.getSubReg())
1116 .addImm(Start)
1117 .addImm(End + 128)
1118 .addImm(0);
1119 if (LV) {
1120 unsigned NumOps = MI.getNumOperands();
1121 for (unsigned I = 1; I < NumOps; ++I) {
1122 MachineOperand &Op = MI.getOperand(I);
1123 if (Op.isReg() && Op.isKill())
1124 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1125 }
1126 }
1127 if (LIS)
1128 LIS->ReplaceMachineInstrInMaps(MI, *MIB);
1129 transferDeadCC(&MI, MIB);
1130 return MIB;
1131 }
1132 }
1133 return nullptr;
1134}
1135
1137 bool Invert) const {
1138 unsigned Opc = Inst.getOpcode();
1139 if (Invert) {
1140 auto InverseOpcode = getInverseOpcode(Opc);
1141 if (!InverseOpcode)
1142 return false;
1143 Opc = *InverseOpcode;
1144 }
1145
1146 switch (Opc) {
1147 default:
1148 break;
1149 // Adds and multiplications.
1150 case SystemZ::WFADB:
1151 case SystemZ::WFASB:
1152 case SystemZ::WFAXB:
1153 case SystemZ::VFADB:
1154 case SystemZ::VFASB:
1155 case SystemZ::WFMDB:
1156 case SystemZ::WFMSB:
1157 case SystemZ::WFMXB:
1158 case SystemZ::VFMDB:
1159 case SystemZ::VFMSB:
1162 }
1163
1164 return false;
1165}
1166
1167std::optional<unsigned>
1169 // fadd => fsub
1170 switch (Opcode) {
1171 case SystemZ::WFADB:
1172 return SystemZ::WFSDB;
1173 case SystemZ::WFASB:
1174 return SystemZ::WFSSB;
1175 case SystemZ::WFAXB:
1176 return SystemZ::WFSXB;
1177 case SystemZ::VFADB:
1178 return SystemZ::VFSDB;
1179 case SystemZ::VFASB:
1180 return SystemZ::VFSSB;
1181 // fsub => fadd
1182 case SystemZ::WFSDB:
1183 return SystemZ::WFADB;
1184 case SystemZ::WFSSB:
1185 return SystemZ::WFASB;
1186 case SystemZ::WFSXB:
1187 return SystemZ::WFAXB;
1188 case SystemZ::VFSDB:
1189 return SystemZ::VFADB;
1190 case SystemZ::VFSSB:
1191 return SystemZ::VFASB;
1192 default:
1193 return std::nullopt;
1194 }
1195}
1196
1199 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1200 LiveIntervals *LIS, VirtRegMap *VRM) const {
1203 const MachineFrameInfo &MFI = MF.getFrameInfo();
1204 unsigned Size = MFI.getObjectSize(FrameIndex);
1205 unsigned Opcode = MI.getOpcode();
1206
1207 // Check CC liveness if new instruction introduces a dead def of CC.
1208 SlotIndex MISlot = SlotIndex();
1209 LiveRange *CCLiveRange = nullptr;
1210 bool CCLiveAtMI = true;
1211 if (LIS) {
1212 MISlot = LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1213 auto CCUnits = TRI->regunits(MCRegister::from(SystemZ::CC));
1214 assert(range_size(CCUnits) == 1 && "CC only has one reg unit.");
1215 CCLiveRange = &LIS->getRegUnit(*CCUnits.begin());
1216 CCLiveAtMI = CCLiveRange->liveAt(MISlot);
1217 }
1218
1219 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1220 if (!CCLiveAtMI && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1221 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1222 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1223 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1224 MI.getDebugLoc(), get(SystemZ::AGSI))
1225 .addFrameIndex(FrameIndex)
1226 .addImm(0)
1227 .addImm(MI.getOperand(2).getImm());
1228 BuiltMI->findRegisterDefOperand(SystemZ::CC, /*TRI=*/nullptr)
1229 ->setIsDead(true);
1230 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1231 return BuiltMI;
1232 }
1233 return nullptr;
1234 }
1235
1236 // All other cases require a single operand.
1237 if (Ops.size() != 1)
1238 return nullptr;
1239
1240 unsigned OpNum = Ops[0];
1241 assert(Size * 8 ==
1242 TRI->getRegSizeInBits(*MF.getRegInfo()
1243 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1244 "Invalid size combination");
1245
1246 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1247 isInt<8>(MI.getOperand(2).getImm())) {
1248 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1249 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1250 MachineInstr *BuiltMI =
1251 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1252 .addFrameIndex(FrameIndex)
1253 .addImm(0)
1254 .addImm(MI.getOperand(2).getImm());
1255 transferDeadCC(&MI, BuiltMI);
1257 return BuiltMI;
1258 }
1259
1260 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1261 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1262 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1263 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1264 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1265 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1266 MachineInstr *BuiltMI =
1267 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1268 .addFrameIndex(FrameIndex)
1269 .addImm(0)
1270 .addImm((int8_t)MI.getOperand(2).getImm());
1271 transferDeadCC(&MI, BuiltMI);
1272 return BuiltMI;
1273 }
1274
1275 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1276 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1277 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1278 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1279 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1280 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1281 MachineInstr *BuiltMI =
1282 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1283 .addFrameIndex(FrameIndex)
1284 .addImm(0)
1285 .addImm((int8_t)-MI.getOperand(2).getImm());
1286 transferDeadCC(&MI, BuiltMI);
1287 return BuiltMI;
1288 }
1289
1290 unsigned MemImmOpc = 0;
1291 switch (Opcode) {
1292 case SystemZ::LHIMux:
1293 case SystemZ::LHI: MemImmOpc = SystemZ::MVHI; break;
1294 case SystemZ::LGHI: MemImmOpc = SystemZ::MVGHI; break;
1295 case SystemZ::CHIMux:
1296 case SystemZ::CHI: MemImmOpc = SystemZ::CHSI; break;
1297 case SystemZ::CGHI: MemImmOpc = SystemZ::CGHSI; break;
1298 case SystemZ::CLFIMux:
1299 case SystemZ::CLFI:
1300 if (isUInt<16>(MI.getOperand(1).getImm()))
1301 MemImmOpc = SystemZ::CLFHSI;
1302 break;
1303 case SystemZ::CLGFI:
1304 if (isUInt<16>(MI.getOperand(1).getImm()))
1305 MemImmOpc = SystemZ::CLGHSI;
1306 break;
1307 default: break;
1308 }
1309 if (MemImmOpc)
1310 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1311 get(MemImmOpc))
1312 .addFrameIndex(FrameIndex)
1313 .addImm(0)
1314 .addImm(MI.getOperand(1).getImm());
1315
1316 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1317 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1318 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1319 // If we're spilling the destination of an LDGR or LGDR, store the
1320 // source register instead.
1321 if (OpNum == 0) {
1322 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1323 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1324 get(StoreOpcode))
1325 .add(MI.getOperand(1))
1326 .addFrameIndex(FrameIndex)
1327 .addImm(0)
1328 .addReg(0);
1329 }
1330 // If we're spilling the source of an LDGR or LGDR, load the
1331 // destination register instead.
1332 if (OpNum == 1) {
1333 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1334 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1335 get(LoadOpcode))
1336 .add(MI.getOperand(0))
1337 .addFrameIndex(FrameIndex)
1338 .addImm(0)
1339 .addReg(0);
1340 }
1341 }
1342
1343 // Look for cases where the source of a simple store or the destination
1344 // of a simple load is being spilled. Try to use MVC instead.
1345 //
1346 // Although MVC is in practice a fast choice in these cases, it is still
1347 // logically a bytewise copy. This means that we cannot use it if the
1348 // load or store is volatile. We also wouldn't be able to use MVC if
1349 // the two memories partially overlap, but that case cannot occur here,
1350 // because we know that one of the memories is a full frame index.
1351 //
1352 // For performance reasons, we also want to avoid using MVC if the addresses
1353 // might be equal. We don't worry about that case here, because spill slot
1354 // coloring happens later, and because we have special code to remove
1355 // MVCs that turn out to be redundant.
1356 if (OpNum == 0 && MI.hasOneMemOperand()) {
1357 MachineMemOperand *MMO = *MI.memoperands_begin();
1358 if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1359 // Handle conversion of loads.
1361 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1362 get(SystemZ::MVC))
1363 .addFrameIndex(FrameIndex)
1364 .addImm(0)
1365 .addImm(Size)
1366 .add(MI.getOperand(1))
1367 .addImm(MI.getOperand(2).getImm())
1368 .addMemOperand(MMO);
1369 }
1370 // Handle conversion of stores.
1372 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1373 get(SystemZ::MVC))
1374 .add(MI.getOperand(1))
1375 .addImm(MI.getOperand(2).getImm())
1376 .addImm(Size)
1377 .addFrameIndex(FrameIndex)
1378 .addImm(0)
1379 .addMemOperand(MMO);
1380 }
1381 }
1382 }
1383
1384 // If the spilled operand is the final one or the instruction is
1385 // commutable, try to change <INSN>R into <INSN>. Don't introduce a def of
1386 // CC if it is live and MI does not define it.
1387 unsigned NumOps = MI.getNumExplicitOperands();
1388 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1389 if (MemOpcode == -1 ||
1390 (CCLiveAtMI && !MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1391 get(MemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)))
1392 return nullptr;
1393
1394 // Check if all other vregs have a usable allocation in the case of vector
1395 // to FP conversion.
1396 const MCInstrDesc &MCID = MI.getDesc();
1397 for (unsigned I = 0, E = MCID.getNumOperands(); I != E; ++I) {
1398 const MCOperandInfo &MCOI = MCID.operands()[I];
1399 if (MCOI.OperandType != MCOI::OPERAND_REGISTER || I == OpNum)
1400 continue;
1401 const TargetRegisterClass *RC = TRI->getRegClass(MCOI.RegClass);
1402 if (RC == &SystemZ::VR32BitRegClass || RC == &SystemZ::VR64BitRegClass) {
1403 Register Reg = MI.getOperand(I).getReg();
1404 Register PhysReg = Reg.isVirtual()
1405 ? (VRM ? Register(VRM->getPhys(Reg)) : Register())
1406 : Reg;
1407 if (!PhysReg ||
1408 !(SystemZ::FP32BitRegClass.contains(PhysReg) ||
1409 SystemZ::FP64BitRegClass.contains(PhysReg) ||
1410 SystemZ::VF128BitRegClass.contains(PhysReg)))
1411 return nullptr;
1412 }
1413 }
1414 // Fused multiply and add/sub need to have the same dst and accumulator reg.
1415 bool FusedFPOp = (Opcode == SystemZ::WFMADB || Opcode == SystemZ::WFMASB ||
1416 Opcode == SystemZ::WFMSDB || Opcode == SystemZ::WFMSSB);
1417 if (FusedFPOp) {
1418 Register DstReg = VRM->getPhys(MI.getOperand(0).getReg());
1419 Register AccReg = VRM->getPhys(MI.getOperand(3).getReg());
1420 if (OpNum == 0 || OpNum == 3 || DstReg != AccReg)
1421 return nullptr;
1422 }
1423
1424 // Try to swap compare operands if possible.
1425 bool NeedsCommute = false;
1426 if ((MI.getOpcode() == SystemZ::CR || MI.getOpcode() == SystemZ::CGR ||
1427 MI.getOpcode() == SystemZ::CLR || MI.getOpcode() == SystemZ::CLGR ||
1428 MI.getOpcode() == SystemZ::WFCDB || MI.getOpcode() == SystemZ::WFCSB ||
1429 MI.getOpcode() == SystemZ::WFKDB || MI.getOpcode() == SystemZ::WFKSB) &&
1430 OpNum == 0 && prepareCompareSwapOperands(MI))
1431 NeedsCommute = true;
1432
1433 bool CCOperands = false;
1434 if (MI.getOpcode() == SystemZ::LOCRMux || MI.getOpcode() == SystemZ::LOCGR ||
1435 MI.getOpcode() == SystemZ::SELRMux || MI.getOpcode() == SystemZ::SELGR) {
1436 assert(MI.getNumOperands() == 6 && NumOps == 5 &&
1437 "LOCR/SELR instruction operands corrupt?");
1438 NumOps -= 2;
1439 CCOperands = true;
1440 }
1441
1442 // See if this is a 3-address instruction that is convertible to 2-address
1443 // and suitable for folding below. Only try this with virtual registers
1444 // and a provided VRM (during regalloc).
1445 if (NumOps == 3 && SystemZ::getTargetMemOpcode(MemOpcode) != -1) {
1446 if (VRM == nullptr)
1447 return nullptr;
1448 else {
1449 Register DstReg = MI.getOperand(0).getReg();
1450 Register DstPhys =
1451 (DstReg.isVirtual() ? Register(VRM->getPhys(DstReg)) : DstReg);
1452 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1453 : ((OpNum == 1 && MI.isCommutable())
1454 ? MI.getOperand(2).getReg()
1455 : Register()));
1456 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1457 SrcReg.isVirtual() && DstPhys == VRM->getPhys(SrcReg))
1458 NeedsCommute = (OpNum == 1);
1459 else
1460 return nullptr;
1461 }
1462 }
1463
1464 if ((OpNum == NumOps - 1) || NeedsCommute || FusedFPOp) {
1465 const MCInstrDesc &MemDesc = get(MemOpcode);
1466 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1467 assert(AccessBytes != 0 && "Size of access should be known");
1468 assert(AccessBytes <= Size && "Access outside the frame index");
1469 uint64_t Offset = Size - AccessBytes;
1470 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1471 MI.getDebugLoc(), get(MemOpcode));
1472 if (MI.isCompare()) {
1473 assert(NumOps == 2 && "Expected 2 register operands for a compare.");
1474 MIB.add(MI.getOperand(NeedsCommute ? 1 : 0));
1475 }
1476 else if (FusedFPOp) {
1477 MIB.add(MI.getOperand(0));
1478 MIB.add(MI.getOperand(3));
1479 MIB.add(MI.getOperand(OpNum == 1 ? 2 : 1));
1480 }
1481 else {
1482 MIB.add(MI.getOperand(0));
1483 if (NeedsCommute)
1484 MIB.add(MI.getOperand(2));
1485 else
1486 for (unsigned I = 1; I < OpNum; ++I)
1487 MIB.add(MI.getOperand(I));
1488 }
1489 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1490 if (MemDesc.TSFlags & SystemZII::HasIndex)
1491 MIB.addReg(0);
1492 if (CCOperands) {
1493 unsigned CCValid = MI.getOperand(NumOps).getImm();
1494 unsigned CCMask = MI.getOperand(NumOps + 1).getImm();
1495 MIB.addImm(CCValid);
1496 MIB.addImm(NeedsCommute ? CCMask ^ CCValid : CCMask);
1497 }
1498 if (MIB->definesRegister(SystemZ::CC, /*TRI=*/nullptr) &&
1499 (!MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr) ||
1500 MI.registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))) {
1501 MIB->addRegisterDead(SystemZ::CC, TRI);
1502 if (CCLiveRange)
1503 CCLiveRange->createDeadDef(MISlot, LIS->getVNInfoAllocator());
1504 }
1505 // Constrain the register classes if converted from a vector opcode. The
1506 // allocated regs are in an FP reg-class per previous check above.
1507 for (const MachineOperand &MO : MIB->operands())
1508 if (MO.isReg() && MO.getReg().isVirtual()) {
1509 Register Reg = MO.getReg();
1510 if (MRI.getRegClass(Reg) == &SystemZ::VR32BitRegClass)
1511 MRI.setRegClass(Reg, &SystemZ::FP32BitRegClass);
1512 else if (MRI.getRegClass(Reg) == &SystemZ::VR64BitRegClass)
1513 MRI.setRegClass(Reg, &SystemZ::FP64BitRegClass);
1514 else if (MRI.getRegClass(Reg) == &SystemZ::VR128BitRegClass)
1515 MRI.setRegClass(Reg, &SystemZ::VF128BitRegClass);
1516 }
1517
1518 transferDeadCC(&MI, MIB);
1521 return MIB;
1522 }
1523
1524 return nullptr;
1525}
1526
1529 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1530 LiveIntervals *LIS) const {
1532 MachineBasicBlock *MBB = MI.getParent();
1533
1534 // For reassociable FP operations, any loads have been purposefully left
1535 // unfolded so that MachineCombiner can do its work on reg/reg
1536 // opcodes. After that, as many loads as possible are now folded.
1537 // TODO: This may be beneficial with other opcodes as well as machine-sink
1538 // can move loads close to their user in a different MBB, which the isel
1539 // matcher did not see.
1540 unsigned LoadOpc = 0;
1541 unsigned RegMemOpcode = 0;
1542 const TargetRegisterClass *FPRC = nullptr;
1543 RegMemOpcode = MI.getOpcode() == SystemZ::WFADB ? SystemZ::ADB
1544 : MI.getOpcode() == SystemZ::WFSDB ? SystemZ::SDB
1545 : MI.getOpcode() == SystemZ::WFMDB ? SystemZ::MDB
1546 : 0;
1547 if (RegMemOpcode) {
1548 LoadOpc = SystemZ::VL64;
1549 FPRC = &SystemZ::FP64BitRegClass;
1550 } else {
1551 RegMemOpcode = MI.getOpcode() == SystemZ::WFASB ? SystemZ::AEB
1552 : MI.getOpcode() == SystemZ::WFSSB ? SystemZ::SEB
1553 : MI.getOpcode() == SystemZ::WFMSB ? SystemZ::MEEB
1554 : 0;
1555 if (RegMemOpcode) {
1556 LoadOpc = SystemZ::VL32;
1557 FPRC = &SystemZ::FP32BitRegClass;
1558 }
1559 }
1560 if (!RegMemOpcode || LoadMI.getOpcode() != LoadOpc)
1561 return nullptr;
1562
1563 // If RegMemOpcode clobbers CC, first make sure CC is not live at this point.
1564 if (get(RegMemOpcode).hasImplicitDefOfPhysReg(SystemZ::CC)) {
1565 assert(LoadMI.getParent() == MI.getParent() && "Assuming a local fold.");
1566 assert(LoadMI != InsertPt && "Assuming InsertPt not to be first in MBB.");
1567 for (MachineBasicBlock::iterator MII = std::prev(InsertPt);;
1568 --MII) {
1569 if (MII->definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
1570 if (!MII->registerDefIsDead(SystemZ::CC, /*TRI=*/nullptr))
1571 return nullptr;
1572 break;
1573 }
1574 if (MII == MBB->begin()) {
1575 if (MBB->isLiveIn(SystemZ::CC))
1576 return nullptr;
1577 break;
1578 }
1579 }
1580 }
1581
1582 Register FoldAsLoadDefReg = LoadMI.getOperand(0).getReg();
1583 if (Ops.size() != 1 || FoldAsLoadDefReg != MI.getOperand(Ops[0]).getReg())
1584 return nullptr;
1585 Register DstReg = MI.getOperand(0).getReg();
1586 MachineOperand LHS = MI.getOperand(1);
1587 MachineOperand RHS = MI.getOperand(2);
1588 MachineOperand &RegMO = RHS.getReg() == FoldAsLoadDefReg ? LHS : RHS;
1589 if ((RegMemOpcode == SystemZ::SDB || RegMemOpcode == SystemZ::SEB) &&
1590 FoldAsLoadDefReg != RHS.getReg())
1591 return nullptr;
1592
1593 MachineOperand &Base = LoadMI.getOperand(1);
1594 MachineOperand &Disp = LoadMI.getOperand(2);
1595 MachineOperand &Indx = LoadMI.getOperand(3);
1597 BuildMI(*MI.getParent(), InsertPt, MI.getDebugLoc(), get(RegMemOpcode), DstReg)
1598 .add(RegMO)
1599 .add(Base)
1600 .add(Disp)
1601 .add(Indx);
1602 MIB->addRegisterDead(SystemZ::CC, &RI);
1603 MRI->setRegClass(DstReg, FPRC);
1604 MRI->setRegClass(RegMO.getReg(), FPRC);
1606
1607 return MIB;
1608}
1609
1611 switch (MI.getOpcode()) {
1612 case SystemZ::L128:
1613 splitMove(MI, SystemZ::LG);
1614 return true;
1615
1616 case SystemZ::ST128:
1617 splitMove(MI, SystemZ::STG);
1618 return true;
1619
1620 case SystemZ::LX:
1621 splitMove(MI, SystemZ::LD);
1622 return true;
1623
1624 case SystemZ::STX:
1625 splitMove(MI, SystemZ::STD);
1626 return true;
1627
1628 case SystemZ::LBMux:
1629 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1630 return true;
1631
1632 case SystemZ::LHMux:
1633 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1634 return true;
1635
1636 case SystemZ::LLCRMux:
1637 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1638 return true;
1639
1640 case SystemZ::LLHRMux:
1641 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1642 return true;
1643
1644 case SystemZ::LLCMux:
1645 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1646 return true;
1647
1648 case SystemZ::LLHMux:
1649 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1650 return true;
1651
1652 case SystemZ::LMux:
1653 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1654 return true;
1655
1656 case SystemZ::LOCMux:
1657 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1658 return true;
1659
1660 case SystemZ::LOCHIMux:
1661 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1662 return true;
1663
1664 case SystemZ::STCMux:
1665 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1666 return true;
1667
1668 case SystemZ::STHMux:
1669 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1670 return true;
1671
1672 case SystemZ::STMux:
1673 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1674 return true;
1675
1676 case SystemZ::STOCMux:
1677 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1678 return true;
1679
1680 case SystemZ::LHIMux:
1681 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1682 return true;
1683
1684 case SystemZ::IIFMux:
1685 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1686 return true;
1687
1688 case SystemZ::IILMux:
1689 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1690 return true;
1691
1692 case SystemZ::IIHMux:
1693 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1694 return true;
1695
1696 case SystemZ::NIFMux:
1697 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1698 return true;
1699
1700 case SystemZ::NILMux:
1701 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1702 return true;
1703
1704 case SystemZ::NIHMux:
1705 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1706 return true;
1707
1708 case SystemZ::OIFMux:
1709 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1710 return true;
1711
1712 case SystemZ::OILMux:
1713 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1714 return true;
1715
1716 case SystemZ::OIHMux:
1717 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1718 return true;
1719
1720 case SystemZ::XIFMux:
1721 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1722 return true;
1723
1724 case SystemZ::TMLMux:
1725 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1726 return true;
1727
1728 case SystemZ::TMHMux:
1729 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1730 return true;
1731
1732 case SystemZ::AHIMux:
1733 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1734 return true;
1735
1736 case SystemZ::AHIMuxK:
1737 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1738 return true;
1739
1740 case SystemZ::AFIMux:
1741 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1742 return true;
1743
1744 case SystemZ::CHIMux:
1745 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1746 return true;
1747
1748 case SystemZ::CFIMux:
1749 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1750 return true;
1751
1752 case SystemZ::CLFIMux:
1753 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1754 return true;
1755
1756 case SystemZ::CMux:
1757 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1758 return true;
1759
1760 case SystemZ::CLMux:
1761 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1762 return true;
1763
1764 case SystemZ::RISBMux: {
1765 bool DestIsHigh = SystemZ::isHighReg(MI.getOperand(0).getReg());
1766 bool SrcIsHigh = SystemZ::isHighReg(MI.getOperand(2).getReg());
1767 if (SrcIsHigh == DestIsHigh)
1768 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1769 else {
1770 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1771 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1772 }
1773 return true;
1774 }
1775
1776 case SystemZ::ADJDYNALLOC:
1777 splitAdjDynAlloc(MI);
1778 return true;
1779
1780 case TargetOpcode::LOAD_STACK_GUARD:
1781 expandLoadStackGuard(&MI);
1782 return true;
1783
1784 default:
1785 return false;
1786 }
1787}
1788
1790 if (MI.isInlineAsm()) {
1791 const MachineFunction *MF = MI.getParent()->getParent();
1792 const char *AsmStr = MI.getOperand(0).getSymbolName();
1793 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1794 }
1795 else if (MI.getOpcode() == SystemZ::PATCHPOINT)
1797 else if (MI.getOpcode() == SystemZ::STACKMAP)
1798 return MI.getOperand(1).getImm();
1799 else if (MI.getOpcode() == SystemZ::FENTRY_CALL)
1800 return 6;
1801 if (MI.getOpcode() == TargetOpcode::PATCHABLE_FUNCTION_ENTER)
1802 return 18;
1803 if (MI.getOpcode() == TargetOpcode::PATCHABLE_RET)
1804 return 18 + (MI.getOperand(0).getImm() == SystemZ::CondReturn ? 4 : 0);
1805
1806 return MI.getDesc().getSize();
1807}
1808
1811 switch (MI.getOpcode()) {
1812 case SystemZ::BR:
1813 case SystemZ::BI:
1814 case SystemZ::J:
1815 case SystemZ::JG:
1817 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1818
1819 case SystemZ::BRC:
1820 case SystemZ::BRCL:
1821 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1822 MI.getOperand(1).getImm(), &MI.getOperand(2));
1823
1824 case SystemZ::BRCT:
1825 case SystemZ::BRCTH:
1827 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1828
1829 case SystemZ::BRCTG:
1831 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1832
1833 case SystemZ::CIJ:
1834 case SystemZ::CRJ:
1836 MI.getOperand(2).getImm(), &MI.getOperand(3));
1837
1838 case SystemZ::CLIJ:
1839 case SystemZ::CLRJ:
1841 MI.getOperand(2).getImm(), &MI.getOperand(3));
1842
1843 case SystemZ::CGIJ:
1844 case SystemZ::CGRJ:
1846 MI.getOperand(2).getImm(), &MI.getOperand(3));
1847
1848 case SystemZ::CLGIJ:
1849 case SystemZ::CLGRJ:
1851 MI.getOperand(2).getImm(), &MI.getOperand(3));
1852
1853 case SystemZ::INLINEASM_BR:
1854 // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1855 return SystemZII::Branch(SystemZII::AsmGoto, 0, 0, nullptr);
1856
1857 default:
1858 llvm_unreachable("Unrecognized branch opcode");
1859 }
1860}
1861
1863 unsigned &LoadOpcode,
1864 unsigned &StoreOpcode) const {
1865 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1866 LoadOpcode = SystemZ::L;
1867 StoreOpcode = SystemZ::ST;
1868 } else if (RC == &SystemZ::GRH32BitRegClass) {
1869 LoadOpcode = SystemZ::LFH;
1870 StoreOpcode = SystemZ::STFH;
1871 } else if (RC == &SystemZ::GRX32BitRegClass) {
1872 LoadOpcode = SystemZ::LMux;
1873 StoreOpcode = SystemZ::STMux;
1874 } else if (RC == &SystemZ::GR64BitRegClass ||
1875 RC == &SystemZ::ADDR64BitRegClass) {
1876 LoadOpcode = SystemZ::LG;
1877 StoreOpcode = SystemZ::STG;
1878 } else if (RC == &SystemZ::GR128BitRegClass ||
1879 RC == &SystemZ::ADDR128BitRegClass) {
1880 LoadOpcode = SystemZ::L128;
1881 StoreOpcode = SystemZ::ST128;
1882 } else if (RC == &SystemZ::FP32BitRegClass) {
1883 LoadOpcode = SystemZ::LE;
1884 StoreOpcode = SystemZ::STE;
1885 } else if (RC == &SystemZ::FP64BitRegClass) {
1886 LoadOpcode = SystemZ::LD;
1887 StoreOpcode = SystemZ::STD;
1888 } else if (RC == &SystemZ::FP128BitRegClass) {
1889 LoadOpcode = SystemZ::LX;
1890 StoreOpcode = SystemZ::STX;
1891 } else if (RC == &SystemZ::VR32BitRegClass) {
1892 LoadOpcode = SystemZ::VL32;
1893 StoreOpcode = SystemZ::VST32;
1894 } else if (RC == &SystemZ::VR64BitRegClass) {
1895 LoadOpcode = SystemZ::VL64;
1896 StoreOpcode = SystemZ::VST64;
1897 } else if (RC == &SystemZ::VF128BitRegClass ||
1898 RC == &SystemZ::VR128BitRegClass) {
1899 LoadOpcode = SystemZ::VL;
1900 StoreOpcode = SystemZ::VST;
1901 } else
1902 llvm_unreachable("Unsupported regclass to load or store");
1903}
1904
1906 int64_t Offset,
1907 const MachineInstr *MI) const {
1908 const MCInstrDesc &MCID = get(Opcode);
1909 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1910 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1911 // Get the instruction to use for unsigned 12-bit displacements.
1912 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1913 if (Disp12Opcode >= 0)
1914 return Disp12Opcode;
1915
1916 // All address-related instructions can use unsigned 12-bit
1917 // displacements.
1918 return Opcode;
1919 }
1920 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1921 // Get the instruction to use for signed 20-bit displacements.
1922 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1923 if (Disp20Opcode >= 0)
1924 return Disp20Opcode;
1925
1926 // Check whether Opcode allows signed 20-bit displacements.
1928 return Opcode;
1929
1930 // If a VR32/VR64 reg ended up in an FP register, use the FP opcode.
1931 if (MI && MI->getOperand(0).isReg()) {
1932 Register Reg = MI->getOperand(0).getReg();
1933 if (Reg.isPhysical() && SystemZMC::getFirstReg(Reg) < 16) {
1934 switch (Opcode) {
1935 case SystemZ::VL32:
1936 return SystemZ::LEY;
1937 case SystemZ::VST32:
1938 return SystemZ::STEY;
1939 case SystemZ::VL64:
1940 return SystemZ::LDY;
1941 case SystemZ::VST64:
1942 return SystemZ::STDY;
1943 default: break;
1944 }
1945 }
1946 }
1947 }
1948 return 0;
1949}
1950
1952 const MCInstrDesc &MCID = get(Opcode);
1954 return SystemZ::getDisp12Opcode(Opcode) >= 0;
1955 return SystemZ::getDisp20Opcode(Opcode) >= 0;
1956}
1957
1958unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1959 switch (Opcode) {
1960 case SystemZ::L: return SystemZ::LT;
1961 case SystemZ::LY: return SystemZ::LT;
1962 case SystemZ::LG: return SystemZ::LTG;
1963 case SystemZ::LGF: return SystemZ::LTGF;
1964 case SystemZ::LR: return SystemZ::LTR;
1965 case SystemZ::LGFR: return SystemZ::LTGFR;
1966 case SystemZ::LGR: return SystemZ::LTGR;
1967 case SystemZ::LCDFR: return SystemZ::LCDBR;
1968 case SystemZ::LPDFR: return SystemZ::LPDBR;
1969 case SystemZ::LNDFR: return SystemZ::LNDBR;
1970 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1971 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1972 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1973 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1974 // actually use the condition code, we may turn it back into RISGB.
1975 // Note that RISBG is not really a "load-and-test" instruction,
1976 // but sets the same condition code values, so is OK to use here.
1977 case SystemZ::RISBGN: return SystemZ::RISBG;
1978 default: return 0;
1979 }
1980}
1981
1982bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1983 unsigned &Start, unsigned &End) const {
1984 // Reject trivial all-zero masks.
1985 Mask &= allOnes(BitSize);
1986 if (Mask == 0)
1987 return false;
1988
1989 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1990 // the msb and End specifies the index of the lsb.
1991 unsigned LSB, Length;
1992 if (isShiftedMask_64(Mask, LSB, Length)) {
1993 Start = 63 - (LSB + Length - 1);
1994 End = 63 - LSB;
1995 return true;
1996 }
1997
1998 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1999 // of the low 1s and End specifies the lsb of the high 1s.
2000 if (isShiftedMask_64(Mask ^ allOnes(BitSize), LSB, Length)) {
2001 assert(LSB > 0 && "Bottom bit must be set");
2002 assert(LSB + Length < BitSize && "Top bit must be set");
2003 Start = 63 - (LSB - 1);
2004 End = 63 - (LSB + Length);
2005 return true;
2006 }
2007
2008 return false;
2009}
2010
2011unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
2013 const MachineInstr *MI) const {
2014 switch (Opcode) {
2015 case SystemZ::CHI:
2016 case SystemZ::CGHI:
2017 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
2018 return 0;
2019 break;
2020 case SystemZ::CLFI:
2021 case SystemZ::CLGFI:
2022 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
2023 return 0;
2024 break;
2025 case SystemZ::CL:
2026 case SystemZ::CLG:
2027 if (!STI.hasMiscellaneousExtensions())
2028 return 0;
2029 if (!(MI && MI->getOperand(3).getReg() == 0))
2030 return 0;
2031 break;
2032 }
2033 switch (Type) {
2035 switch (Opcode) {
2036 case SystemZ::CR:
2037 return SystemZ::CRJ;
2038 case SystemZ::CGR:
2039 return SystemZ::CGRJ;
2040 case SystemZ::CHI:
2041 return SystemZ::CIJ;
2042 case SystemZ::CGHI:
2043 return SystemZ::CGIJ;
2044 case SystemZ::CLR:
2045 return SystemZ::CLRJ;
2046 case SystemZ::CLGR:
2047 return SystemZ::CLGRJ;
2048 case SystemZ::CLFI:
2049 return SystemZ::CLIJ;
2050 case SystemZ::CLGFI:
2051 return SystemZ::CLGIJ;
2052 default:
2053 return 0;
2054 }
2056 switch (Opcode) {
2057 case SystemZ::CR:
2058 return SystemZ::CRBReturn;
2059 case SystemZ::CGR:
2060 return SystemZ::CGRBReturn;
2061 case SystemZ::CHI:
2062 return SystemZ::CIBReturn;
2063 case SystemZ::CGHI:
2064 return SystemZ::CGIBReturn;
2065 case SystemZ::CLR:
2066 return SystemZ::CLRBReturn;
2067 case SystemZ::CLGR:
2068 return SystemZ::CLGRBReturn;
2069 case SystemZ::CLFI:
2070 return SystemZ::CLIBReturn;
2071 case SystemZ::CLGFI:
2072 return SystemZ::CLGIBReturn;
2073 default:
2074 return 0;
2075 }
2077 switch (Opcode) {
2078 case SystemZ::CR:
2079 return SystemZ::CRBCall;
2080 case SystemZ::CGR:
2081 return SystemZ::CGRBCall;
2082 case SystemZ::CHI:
2083 return SystemZ::CIBCall;
2084 case SystemZ::CGHI:
2085 return SystemZ::CGIBCall;
2086 case SystemZ::CLR:
2087 return SystemZ::CLRBCall;
2088 case SystemZ::CLGR:
2089 return SystemZ::CLGRBCall;
2090 case SystemZ::CLFI:
2091 return SystemZ::CLIBCall;
2092 case SystemZ::CLGFI:
2093 return SystemZ::CLGIBCall;
2094 default:
2095 return 0;
2096 }
2098 switch (Opcode) {
2099 case SystemZ::CR:
2100 return SystemZ::CRT;
2101 case SystemZ::CGR:
2102 return SystemZ::CGRT;
2103 case SystemZ::CHI:
2104 return SystemZ::CIT;
2105 case SystemZ::CGHI:
2106 return SystemZ::CGIT;
2107 case SystemZ::CLR:
2108 return SystemZ::CLRT;
2109 case SystemZ::CLGR:
2110 return SystemZ::CLGRT;
2111 case SystemZ::CLFI:
2112 return SystemZ::CLFIT;
2113 case SystemZ::CLGFI:
2114 return SystemZ::CLGIT;
2115 case SystemZ::CL:
2116 return SystemZ::CLT;
2117 case SystemZ::CLG:
2118 return SystemZ::CLGT;
2119 default:
2120 return 0;
2121 }
2122 }
2123 return 0;
2124}
2125
2128 assert(MBBI->isCompare() && MBBI->getOperand(0).isReg() &&
2129 MBBI->getOperand(1).isReg() && !MBBI->mayLoad() &&
2130 "Not a compare reg/reg.");
2131
2133 bool CCLive = true;
2135 for (MachineInstr &MI : llvm::make_range(std::next(MBBI), MBB->end())) {
2136 if (MI.readsRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2137 unsigned Flags = MI.getDesc().TSFlags;
2138 if ((Flags & SystemZII::CCMaskFirst) || (Flags & SystemZII::CCMaskLast))
2139 CCUsers.push_back(&MI);
2140 else
2141 return false;
2142 }
2143 if (MI.definesRegister(SystemZ::CC, /*TRI=*/nullptr)) {
2144 CCLive = false;
2145 break;
2146 }
2147 }
2148 if (CCLive) {
2150 LiveRegs.addLiveOuts(*MBB);
2151 if (!LiveRegs.available(SystemZ::CC))
2152 return false;
2153 }
2154
2155 // Update all CC users.
2156 for (unsigned Idx = 0; Idx < CCUsers.size(); ++Idx) {
2157 unsigned Flags = CCUsers[Idx]->getDesc().TSFlags;
2158 unsigned FirstOpNum = ((Flags & SystemZII::CCMaskFirst) ?
2159 0 : CCUsers[Idx]->getNumExplicitOperands() - 2);
2160 MachineOperand &CCMaskMO = CCUsers[Idx]->getOperand(FirstOpNum + 1);
2161 unsigned NewCCMask = SystemZ::reverseCCMask(CCMaskMO.getImm());
2162 CCMaskMO.setImm(NewCCMask);
2163 }
2164
2165 return true;
2166}
2167
2168unsigned SystemZ::reverseCCMask(unsigned CCMask) {
2169 return ((CCMask & SystemZ::CCMASK_CMP_EQ) |
2172 (CCMask & SystemZ::CCMASK_CMP_UO));
2173}
2174
2176 MachineFunction &MF = *MBB->getParent();
2178 MF.insert(std::next(MachineFunction::iterator(MBB)), NewMBB);
2179 return NewMBB;
2180}
2181
2185 NewMBB->splice(NewMBB->begin(), MBB,
2186 std::next(MachineBasicBlock::iterator(MI)), MBB->end());
2188 return NewMBB;
2189}
2190
2194 NewMBB->splice(NewMBB->begin(), MBB, MI, MBB->end());
2196 return NewMBB;
2197}
2198
2199unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
2200 if (!STI.hasLoadAndTrap())
2201 return 0;
2202 switch (Opcode) {
2203 case SystemZ::L:
2204 case SystemZ::LY:
2205 return SystemZ::LAT;
2206 case SystemZ::LG:
2207 return SystemZ::LGAT;
2208 case SystemZ::LFH:
2209 return SystemZ::LFHAT;
2210 case SystemZ::LLGF:
2211 return SystemZ::LLGFAT;
2212 case SystemZ::LLGT:
2213 return SystemZ::LLGTAT;
2214 }
2215 return 0;
2216}
2217
2220 unsigned Reg, uint64_t Value) const {
2221 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
2222 unsigned Opcode = 0;
2223 if (isInt<16>(Value))
2224 Opcode = SystemZ::LGHI;
2225 else if (SystemZ::isImmLL(Value))
2226 Opcode = SystemZ::LLILL;
2227 else if (SystemZ::isImmLH(Value)) {
2228 Opcode = SystemZ::LLILH;
2229 Value >>= 16;
2230 }
2231 else if (isInt<32>(Value))
2232 Opcode = SystemZ::LGFI;
2233 if (Opcode) {
2234 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
2235 return;
2236 }
2237
2239 assert (MRI.isSSA() && "Huge values only handled before reg-alloc .");
2240 Register Reg0 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2241 Register Reg1 = MRI.createVirtualRegister(&SystemZ::GR64BitRegClass);
2242 BuildMI(MBB, MBBI, DL, get(SystemZ::IMPLICIT_DEF), Reg0);
2243 BuildMI(MBB, MBBI, DL, get(SystemZ::IIHF64), Reg1)
2244 .addReg(Reg0).addImm(Value >> 32);
2245 BuildMI(MBB, MBBI, DL, get(SystemZ::IILF64), Reg)
2246 .addReg(Reg1).addImm(Value & ((uint64_t(1) << 32) - 1));
2247}
2248
2250 StringRef &ErrInfo) const {
2251 const MCInstrDesc &MCID = MI.getDesc();
2252 for (unsigned I = 0, E = MI.getNumOperands(); I != E; ++I) {
2253 if (I >= MCID.getNumOperands())
2254 break;
2255 const MachineOperand &Op = MI.getOperand(I);
2256 const MCOperandInfo &MCOI = MCID.operands()[I];
2257 // Addressing modes have register and immediate operands. Op should be a
2258 // register (or frame index) operand if MCOI.RegClass contains a valid
2259 // register class, or an immediate otherwise.
2260 if (MCOI.OperandType == MCOI::OPERAND_MEMORY &&
2261 ((MCOI.RegClass != -1 && !Op.isReg() && !Op.isFI()) ||
2262 (MCOI.RegClass == -1 && !Op.isImm()))) {
2263 ErrInfo = "Addressing mode operands corrupt!";
2264 return false;
2265 }
2266 }
2267
2268 return true;
2269}
2270
2273 const MachineInstr &MIb) const {
2274
2275 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
2276 return false;
2277
2278 // If mem-operands show that the same address Value is used by both
2279 // instructions, check for non-overlapping offsets and widths. Not
2280 // sure if a register based analysis would be an improvement...
2281
2282 MachineMemOperand *MMOa = *MIa.memoperands_begin();
2283 MachineMemOperand *MMOb = *MIb.memoperands_begin();
2284 const Value *VALa = MMOa->getValue();
2285 const Value *VALb = MMOb->getValue();
2286 bool SameVal = (VALa && VALb && (VALa == VALb));
2287 if (!SameVal) {
2288 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
2289 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
2290 if (PSVa && PSVb && (PSVa == PSVb))
2291 SameVal = true;
2292 }
2293 if (SameVal) {
2294 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
2295 LocationSize WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
2296 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
2297 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
2298 LocationSize LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
2299 if (LowWidth.hasValue() &&
2300 LowOffset + (int)LowWidth.getValue() <= HighOffset)
2301 return true;
2302 }
2303
2304 return false;
2305}
2306
2308 const Register Reg,
2309 int64_t &ImmVal) const {
2310
2311 if (MI.getOpcode() == SystemZ::VGBM && Reg == MI.getOperand(0).getReg()) {
2312 ImmVal = MI.getOperand(1).getImm();
2313 // TODO: Handle non-0 values
2314 return ImmVal == 0;
2315 }
2316
2317 return false;
2318}
unsigned const MachineRegisterInfo * MRI
MachineInstrBuilder & UseMI
MachineInstrBuilder MachineInstrBuilder & DefMI
unsigned RegSize
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
MachineBasicBlock MachineBasicBlock::iterator MBBI
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
uint64_t Size
bool End
Definition: ELF_riscv.cpp:480
IRTranslator LLVM IR MI
A set of register units.
#define I(x, y, z)
Definition: MD5.cpp:58
unsigned const TargetRegisterInfo * TRI
const SmallVectorImpl< MachineOperand > MachineBasicBlock * TBB
const SmallVectorImpl< MachineOperand > & Cond
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:469
This file defines the 'Statistic' class, which is designed to be an easy way to expose various metric...
static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag)
static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI)
static void transferMIFlag(MachineInstr *OldMI, MachineInstr *NewMI, MachineInstr::MIFlag Flag)
static int isSimpleMove(const MachineInstr &MI, int &FrameIndex, unsigned Flag)
static LogicOp interpretAndImmediate(unsigned Opcode)
static uint64_t allOnes(unsigned int Count)
Value * RHS
Value * LHS
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:168
This class represents an Operation in the Expression.
A debug info location.
Definition: DebugLoc.h:33
SlotIndexes * getSlotIndexes() const
VNInfo::Allocator & getVNInfoAllocator()
LiveRange & getRegUnit(unsigned Unit)
Return the live range for register unit Unit.
SlotIndex ReplaceMachineInstrInMaps(MachineInstr &MI, MachineInstr &NewMI)
This class represents the liveness of a register, stack slot, etc.
Definition: LiveInterval.h:157
bool liveAt(SlotIndex index) const
Definition: LiveInterval.h:401
VNInfo * createDeadDef(SlotIndex Def, VNInfo::Allocator &VNIAlloc)
createDeadDef - Make sure the range has a value defined at Def.
A set of register units used to track register liveness.
Definition: LiveRegUnits.h:30
bool available(MCPhysReg Reg) const
Returns true if no part of physical register Reg is live.
Definition: LiveRegUnits.h:116
void addLiveOuts(const MachineBasicBlock &MBB)
Adds registers living out of block MBB.
void replaceKillInstruction(Register Reg, MachineInstr &OldMI, MachineInstr &NewMI)
replaceKillInstruction - Update register kill info by replacing a kill instruction with a new one.
bool hasValue() const
TypeSize getValue() const
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:198
unsigned getNumOperands() const
Return the number of declared MachineOperands for this MachineInstruction.
Definition: MCInstrDesc.h:237
ArrayRef< MCOperandInfo > operands() const
Definition: MCInstrDesc.h:239
This holds information about one operand of a machine instruction, indicating the register class for ...
Definition: MCInstrDesc.h:85
uint8_t OperandType
Information about the type of the operand.
Definition: MCInstrDesc.h:97
int16_t RegClass
This specifies the register class enumeration of the operand if the operand is a register.
Definition: MCInstrDesc.h:91
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
static MCRegister from(unsigned Val)
Check the provided unsigned value is a valid MCRegister.
Definition: MCRegister.h:78
void transferSuccessorsAndUpdatePHIs(MachineBasicBlock *FromMBB)
Transfers all the successors, as in transferSuccessors, and update PHI operands in the successor bloc...
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
iterator getLastNonDebugInstr(bool SkipPseudoOp=true)
Returns an iterator to the last non-debug instruction in the basic block, or end().
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
instr_iterator erase(instr_iterator I)
Remove an instruction from the instruction list and delete it.
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB 'Other' at the position From, and insert it into this MBB right before '...
bool isLiveIn(MCRegister Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
The MachineFrameInfo class represents an abstract stack frame until prolog/epilog code is inserted.
uint64_t getMaxCallFrameSize() const
Return the maximum size of a call frame that must be allocated for an outgoing function call.
int64_t getObjectSize(int ObjectIdx) const
Return the size of the specified object.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
MachineFrameInfo & getFrameInfo()
getFrameInfo - Return the frame info object for the current function.
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
MachineInstr * CloneMachineInstr(const MachineInstr *Orig)
Create a new MachineInstr which is a copy of Orig, identical in all ways except the instruction has n...
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *BB=nullptr, std::optional< UniqueBBID > BBID=std::nullopt)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
void insert(iterator MBBI, MachineBasicBlock *MBB)
const TargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
Register getReg(unsigned Idx) const
Get the register for the operand index.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
const MachineInstrBuilder & add(const MachineOperand &MO) const
const MachineInstrBuilder & addFrameIndex(int Idx) const
const MachineInstrBuilder & addRegMask(const uint32_t *Mask) const
const MachineInstrBuilder & addReg(Register RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned TargetFlags=0) const
const MachineInstrBuilder & addMemOperand(MachineMemOperand *MMO) const
const MachineInstrBuilder & addDef(Register RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register definition operand.
Representation of each machine instruction.
Definition: MachineInstr.h:71
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
Definition: MachineInstr.h:577
const MachineBasicBlock * getParent() const
Definition: MachineInstr.h:349
bool getFlag(MIFlag Flag) const
Return whether an MI flag is set.
Definition: MachineInstr.h:399
bool isSafeToMove(bool &SawStore) const
Return true if it is safe to move this instruction.
bool registerDefIsDead(Register Reg, const TargetRegisterInfo *TRI) const
Returns true if the register is dead in this machine instruction.
bool definesRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr fully defines the specified register.
void setDesc(const MCInstrDesc &TID)
Replace the instruction descriptor (thus opcode) of the current instruction with a new one.
bool hasOneMemOperand() const
Return true if this instruction has exactly one MachineMemOperand.
Definition: MachineInstr.h:823
iterator_range< mop_iterator > operands()
Definition: MachineInstr.h:693
mmo_iterator memoperands_begin() const
Access to memory operands of the instruction.
Definition: MachineInstr.h:808
void setFlag(MIFlag Flag)
Set a MI flag.
Definition: MachineInstr.h:406
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:587
MachineOperand * findRegisterDefOperand(Register Reg, const TargetRegisterInfo *TRI, bool isDead=false, bool Overlap=false)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
bool addRegisterDead(Register Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
A description of a memory reference used in the backend.
LocationSize getSize() const
Return the size in bytes of the memory reference.
const PseudoSourceValue * getPseudoValue() const
bool isAtomic() const
Returns true if this operation has an atomic ordering requirement of unordered or higher,...
const Value * getValue() const
Return the base address of the memory access.
int64_t getOffset() const
For normal values, this is a byte offset added to the base address.
MachineOperand class - Representation of each machine instruction operand.
void setImm(int64_t immVal)
int64_t getImm() const
void setIsDead(bool Val=true)
void setReg(Register Reg)
Change the register this operand corresponds to.
void setIsKill(bool Val=true)
static MachineOperand CreateImm(int64_t Val)
Register getReg() const
getReg - Returns the register number.
MachineRegisterInfo - Keep track of information for virtual and physical registers,...
const TargetRegisterClass * getRegClass(Register Reg) const
Return the register class of the specified virtual register.
MI-level patchpoint operands.
Definition: StackMaps.h:76
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given patchpoint should emit.
Definition: StackMaps.h:104
Special value supplied for machine level alias analysis.
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
constexpr bool isVirtual() const
Return true if the specified register number is in the virtual register namespace.
Definition: Register.h:91
SlotIndex - An opaque wrapper around machine indexes.
Definition: SlotIndexes.h:65
SlotIndex getRegSlot(bool EC=false) const
Returns the register use/def slot in the current instruction for a normal or early-clobber def.
Definition: SlotIndexes.h:237
SlotIndex getInstructionIndex(const MachineInstr &MI, bool IgnoreBundle=false) const
Returns the base index for the given instruction.
Definition: SlotIndexes.h:379
size_t size() const
Definition: SmallVector.h:78
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Definition: SmallVector.h:573
void push_back(const T &Elt)
Definition: SmallVector.h:413
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1196
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:51
A SystemZ-specific class detailing special use registers particular for calling conventions.
unsigned getLoadAndTrap(unsigned Opcode) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
bool isProfitableToIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, unsigned ExtraPredCycles, BranchProbability Probability) const override
unsigned getLoadAndTest(unsigned Opcode) const
void loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register DestReg, int FrameIdx, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
bool isPredicable(const MachineInstr &MI) const override
bool isStackSlotCopy(const MachineInstr &MI, int &DestFrameIndex, int &SrcFrameIndex) const override
MachineInstr * convertToThreeAddress(MachineInstr &MI, LiveVariables *LV, LiveIntervals *LIS) const override
unsigned getOpcodeForOffset(unsigned Opcode, int64_t Offset, const MachineInstr *MI=nullptr) const
unsigned getInstSizeInBytes(const MachineInstr &MI) const override
unsigned removeBranch(MachineBasicBlock &MBB, int *BytesRemoved=nullptr) const override
Register isStoreToStackSlot(const MachineInstr &MI, int &FrameIndex) const override
bool getConstValDefinedInReg(const MachineInstr &MI, const Register Reg, int64_t &ImmVal) const override
bool isAssociativeAndCommutative(const MachineInstr &Inst, bool Invert) const override
void storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, Register SrcReg, bool isKill, int FrameIndex, const TargetRegisterClass *RC, const TargetRegisterInfo *TRI, Register VReg, MachineInstr::MIFlag Flags=MachineInstr::NoFlags) const override
MachineInstr * optimizeLoadInstr(MachineInstr &MI, const MachineRegisterInfo *MRI, Register &FoldAsLoadDefReg, MachineInstr *&DefMI) const override
SystemZInstrInfo(SystemZSubtarget &STI)
bool hasDisplacementPairInsn(unsigned Opcode) const
MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned CommuteOpIdx1, unsigned CommuteOpIdx2) const override
Commutes the operands in the given instruction by changing the operands order and/or changing the ins...
void insertSelect(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, const DebugLoc &DL, Register DstReg, ArrayRef< MachineOperand > Cond, Register TrueReg, Register FalseReg) const override
std::optional< unsigned > getInverseOpcode(unsigned Opcode) const override
bool isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles, BranchProbability Probability) const override
SystemZII::Branch getBranchInfo(const MachineInstr &MI) const
bool areMemAccessesTriviallyDisjoint(const MachineInstr &MIa, const MachineInstr &MIb) const override
bool reverseBranchCondition(SmallVectorImpl< MachineOperand > &Cond) const override
unsigned getFusedCompare(unsigned Opcode, SystemZII::FusedCompareType Type, const MachineInstr *MI=nullptr) const
bool expandPostRAPseudo(MachineInstr &MBBI) const override
bool analyzeCompare(const MachineInstr &MI, Register &SrcReg, Register &SrcReg2, int64_t &Mask, int64_t &Value) const override
bool verifyInstruction(const MachineInstr &MI, StringRef &ErrInfo) const override
void getLoadStoreOpcodes(const TargetRegisterClass *RC, unsigned &LoadOpcode, unsigned &StoreOpcode) const
bool isRxSBGMask(uint64_t Mask, unsigned BitSize, unsigned &Start, unsigned &End) const
bool foldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, Register Reg, MachineRegisterInfo *MRI) const override
bool canInsertSelect(const MachineBasicBlock &, ArrayRef< MachineOperand > Cond, Register, Register, Register, int &, int &, int &) const override
bool prepareCompareSwapOperands(MachineBasicBlock::iterator MBBI) const
Register isLoadFromStackSlot(const MachineInstr &MI, int &FrameIndex) const override
MachineInstr * foldMemoryOperandImpl(MachineFunction &MF, MachineInstr &MI, ArrayRef< unsigned > Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex, LiveIntervals *LIS=nullptr, VirtRegMap *VRM=nullptr) const override
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
void copyPhysReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, const DebugLoc &DL, MCRegister DestReg, MCRegister SrcReg, bool KillSrc, bool RenamableDest=false, bool RenamableSrc=false) const override
void loadImmediate(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned Reg, uint64_t Value) const
bool PredicateInstruction(MachineInstr &MI, ArrayRef< MachineOperand > Pred) const override
SystemZCallingConventionRegisters * getSpecialRegisters() const
virtual MachineInstr * commuteInstructionImpl(MachineInstr &MI, bool NewMI, unsigned OpIdx1, unsigned OpIdx2) const
This method commutes the operands of the given machine instruction MI.
const MCAsmInfo * getMCAsmInfo() const
Return target specific asm information.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
Target - Wrapper for Target specific information.
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:74
MCRegister getPhys(Register virtReg) const
returns the physical register mapped to the specified virtual register
Definition: VirtRegMap.h:90
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
@ OPERAND_REGISTER
Definition: MCInstrDesc.h:61
@ OPERAND_MEMORY
Definition: MCInstrDesc.h:62
@ Implicit
Not emitted register (e.g. carry, or temporary result).
@ Undef
Value of the register doesn't matter.
static unsigned getAccessSize(unsigned int Flags)
unsigned getFirstReg(unsigned Reg)
MachineBasicBlock * splitBlockBefore(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
int getTargetMemOpcode(uint16_t Opcode)
const unsigned CCMASK_CMP_GT
Definition: SystemZ.h:37
const unsigned CCMASK_ANY
Definition: SystemZ.h:31
static bool isImmLL(uint64_t Val)
Definition: SystemZ.h:161
static bool isImmLH(uint64_t Val)
Definition: SystemZ.h:166
MachineBasicBlock * emitBlockAfter(MachineBasicBlock *MBB)
unsigned reverseCCMask(unsigned CCMask)
const unsigned IPM_CC
Definition: SystemZ.h:112
const unsigned CCMASK_CMP_EQ
Definition: SystemZ.h:35
const unsigned CCMASK_ICMP
Definition: SystemZ.h:47
MachineBasicBlock * splitBlockAfter(MachineBasicBlock::iterator MI, MachineBasicBlock *MBB)
const unsigned CCMASK_CMP_LT
Definition: SystemZ.h:36
const unsigned CCMASK_CMP_NE
Definition: SystemZ.h:38
bool isHighReg(unsigned int Reg)
const unsigned CCMASK_CMP_UO
Definition: SystemZ.h:43
Reg
All possible values of the reg field in the ModR/M byte.
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:329
@ Offset
Definition: DWP.cpp:480
@ Length
Definition: DWP.cpp:480
MachineInstrBuilder BuildMI(MachineFunction &MF, const MIMetadata &MIMD, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
static const MachineInstrBuilder & addFrameReference(const MachineInstrBuilder &MIB, int FI, int Offset=0, bool mem=true)
addFrameReference - This function is used to add a reference to the base of an abstract object on the...
constexpr bool isShiftedMask_64(uint64_t Value)
Return true if the argument contains a non-empty sequence of ones with the remainder zero (64 bit ver...
Definition: MathExtras.h:286
constexpr size_t range_size(R &&Range)
Returns the size of the Range, i.e., the number of elements.
Definition: STLExtras.h:1722
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
unsigned getUndefRegState(bool B)
@ And
Bitwise or logical AND of integers.
unsigned getKillRegState(bool B)