LLVM 22.0.0git
AArch64AsmPrinter.cpp
Go to the documentation of this file.
1//===- AArch64AsmPrinter.cpp - AArch64 LLVM assembly writer ---------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file contains a printer that converts from our internal representation
10// of machine-dependent LLVM code to the AArch64 assembly language.
11//
12//===----------------------------------------------------------------------===//
13
14#include "AArch64.h"
15#include "AArch64MCInstLower.h"
17#include "AArch64RegisterInfo.h"
18#include "AArch64Subtarget.h"
27#include "llvm/ADT/DenseMap.h"
28#include "llvm/ADT/ScopeExit.h"
31#include "llvm/ADT/StringRef.h"
32#include "llvm/ADT/Twine.h"
46#include "llvm/IR/DataLayout.h"
48#include "llvm/IR/Mangler.h"
49#include "llvm/IR/Module.h"
50#include "llvm/MC/MCAsmInfo.h"
51#include "llvm/MC/MCContext.h"
52#include "llvm/MC/MCInst.h"
56#include "llvm/MC/MCStreamer.h"
57#include "llvm/MC/MCSymbol.h"
67#include <cassert>
68#include <cstdint>
69#include <map>
70#include <memory>
71
72using namespace llvm;
73
76 "aarch64-ptrauth-auth-checks", cl::Hidden,
77 cl::values(clEnumValN(Unchecked, "none", "don't test for failure"),
78 clEnumValN(Poison, "poison", "poison on failure"),
79 clEnumValN(Trap, "trap", "trap on failure")),
80 cl::desc("Check pointer authentication auth/resign failures"),
82
83#define DEBUG_TYPE "asm-printer"
84
85namespace {
86
87class AArch64AsmPrinter : public AsmPrinter {
88 AArch64MCInstLower MCInstLowering;
89 FaultMaps FM;
90 const AArch64Subtarget *STI;
91 bool ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = false;
92#ifndef NDEBUG
93 unsigned InstsEmitted;
94#endif
95 bool EnableImportCallOptimization = false;
97 SectionToImportedFunctionCalls;
98
99public:
100 static char ID;
101
102 AArch64AsmPrinter(TargetMachine &TM, std::unique_ptr<MCStreamer> Streamer)
103 : AsmPrinter(TM, std::move(Streamer), ID),
104 MCInstLowering(OutContext, *this), FM(*this) {}
105
106 StringRef getPassName() const override { return "AArch64 Assembly Printer"; }
107
108 /// Wrapper for MCInstLowering.lowerOperand() for the
109 /// tblgen'erated pseudo lowering.
110 bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const {
111 return MCInstLowering.lowerOperand(MO, MCOp);
112 }
113
114 const MCExpr *lowerConstantPtrAuth(const ConstantPtrAuth &CPA) override;
115
116 const MCExpr *lowerBlockAddressConstant(const BlockAddress &BA) override;
117
118 void emitStartOfAsmFile(Module &M) override;
119 void emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
120 ArrayRef<unsigned> JumpTableIndices) override;
121 std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
123 getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr,
124 const MCSymbol *BranchLabel) const override;
125
126 void emitFunctionEntryLabel() override;
127
128 void emitXXStructor(const DataLayout &DL, const Constant *CV) override;
129
130 void LowerJumpTableDest(MCStreamer &OutStreamer, const MachineInstr &MI);
131
132 void LowerHardenedBRJumpTable(const MachineInstr &MI);
133
134 void LowerMOPS(MCStreamer &OutStreamer, const MachineInstr &MI);
135
136 void LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
137 const MachineInstr &MI);
138 void LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
139 const MachineInstr &MI);
140 void LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
141 const MachineInstr &MI);
142 void LowerFAULTING_OP(const MachineInstr &MI);
143
144 void LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI);
145 void LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI);
146 void LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI);
147 void LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI, bool Typed);
148
149 typedef std::tuple<unsigned, bool, uint32_t, bool, uint64_t>
150 HwasanMemaccessTuple;
151 std::map<HwasanMemaccessTuple, MCSymbol *> HwasanMemaccessSymbols;
152 void LowerKCFI_CHECK(const MachineInstr &MI);
153 void LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI);
154 void emitHwasanMemaccessSymbols(Module &M);
155
156 void emitSled(const MachineInstr &MI, SledKind Kind);
157
158 // Emit the sequence for BRA/BLRA (authenticate + branch/call).
159 void emitPtrauthBranch(const MachineInstr *MI);
160
161 void emitPtrauthCheckAuthenticatedValue(Register TestedReg,
162 Register ScratchReg,
165 bool ShouldTrap,
166 const MCSymbol *OnFailure);
167
168 // Check authenticated LR before tail calling.
169 void emitPtrauthTailCallHardening(const MachineInstr *TC);
170
171 // Emit the sequence for AUT or AUTPAC.
172 void emitPtrauthAuthResign(Register AUTVal, AArch64PACKey::ID AUTKey,
173 uint64_t AUTDisc,
174 const MachineOperand *AUTAddrDisc,
175 Register Scratch,
176 std::optional<AArch64PACKey::ID> PACKey,
177 uint64_t PACDisc, Register PACAddrDisc);
178
179 // Emit the sequence for PAC.
180 void emitPtrauthSign(const MachineInstr *MI);
181
182 // Emit the sequence to compute the discriminator.
183 //
184 // The returned register is either unmodified AddrDisc or ScratchReg.
185 //
186 // If the expanded pseudo is allowed to clobber AddrDisc register, setting
187 // MayUseAddrAsScratch may save one MOV instruction, provided the address
188 // is already in x16/x17 (i.e. return x16/x17 which is the *modified* AddrDisc
189 // register at the same time) or the OS doesn't make it safer to use x16/x17
190 // (see AArch64Subtarget::isX16X17Safer()):
191 //
192 // mov x17, x16
193 // movk x17, #1234, lsl #48
194 // ; x16 is not used anymore
195 //
196 // can be replaced by
197 //
198 // movk x16, #1234, lsl #48
199 Register emitPtrauthDiscriminator(uint16_t Disc, Register AddrDisc,
200 Register ScratchReg,
201 bool MayUseAddrAsScratch = false);
202
203 // Emit the sequence for LOADauthptrstatic
204 void LowerLOADauthptrstatic(const MachineInstr &MI);
205
206 // Emit the sequence for LOADgotPAC/MOVaddrPAC (either GOT adrp-ldr or
207 // adrp-add followed by PAC sign)
208 void LowerMOVaddrPAC(const MachineInstr &MI);
209
210 // Emit the sequence for LOADgotAUTH (load signed pointer from signed ELF GOT
211 // and authenticate it with, if FPAC bit is not set, check+trap sequence after
212 // authenticating)
213 void LowerLOADgotAUTH(const MachineInstr &MI);
214
215 /// tblgen'erated driver function for lowering simple MI->MC
216 /// pseudo instructions.
217 bool lowerPseudoInstExpansion(const MachineInstr *MI, MCInst &Inst);
218
219 // Emit Build Attributes
220 void emitAttributes(unsigned Flags, uint64_t PAuthABIPlatform,
221 uint64_t PAuthABIVersion, AArch64TargetStreamer *TS);
222
223 // Emit expansion of Compare-and-branch pseudo instructions
224 void emitCBPseudoExpansion(const MachineInstr *MI);
225
226 void EmitToStreamer(MCStreamer &S, const MCInst &Inst);
227 void EmitToStreamer(const MCInst &Inst) {
228 EmitToStreamer(*OutStreamer, Inst);
229 }
230
231 void emitInstruction(const MachineInstr *MI) override;
232
233 void emitFunctionHeaderComment() override;
234
235 void getAnalysisUsage(AnalysisUsage &AU) const override {
237 AU.setPreservesAll();
238 }
239
240 bool runOnMachineFunction(MachineFunction &MF) override {
241 if (auto *PSIW = getAnalysisIfAvailable<ProfileSummaryInfoWrapperPass>())
242 PSI = &PSIW->getPSI();
243 if (auto *SDPIW =
244 getAnalysisIfAvailable<StaticDataProfileInfoWrapperPass>())
245 SDPI = &SDPIW->getStaticDataProfileInfo();
246
247 AArch64FI = MF.getInfo<AArch64FunctionInfo>();
248 STI = &MF.getSubtarget<AArch64Subtarget>();
249
251
252 if (STI->isTargetCOFF()) {
253 bool Local = MF.getFunction().hasLocalLinkage();
256 int Type =
258
259 OutStreamer->beginCOFFSymbolDef(CurrentFnSym);
260 OutStreamer->emitCOFFSymbolStorageClass(Scl);
261 OutStreamer->emitCOFFSymbolType(Type);
262 OutStreamer->endCOFFSymbolDef();
263 }
264
265 // Emit the rest of the function body.
267
268 // Emit the XRay table for this function.
270
271 // We didn't modify anything.
272 return false;
273 }
274
275 const MCExpr *lowerConstant(const Constant *CV,
276 const Constant *BaseCV = nullptr,
277 uint64_t Offset = 0) override;
278
279private:
280 void printOperand(const MachineInstr *MI, unsigned OpNum, raw_ostream &O);
281 bool printAsmMRegister(const MachineOperand &MO, char Mode, raw_ostream &O);
282 bool printAsmRegInClass(const MachineOperand &MO,
283 const TargetRegisterClass *RC, unsigned AltName,
284 raw_ostream &O);
285
286 bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
287 const char *ExtraCode, raw_ostream &O) override;
288 bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNum,
289 const char *ExtraCode, raw_ostream &O) override;
290
291 void PrintDebugValueComment(const MachineInstr *MI, raw_ostream &OS);
292
293 void emitFunctionBodyEnd() override;
294 void emitGlobalAlias(const Module &M, const GlobalAlias &GA) override;
295
296 MCSymbol *GetCPISymbol(unsigned CPID) const override;
297 void emitEndOfAsmFile(Module &M) override;
298
299 AArch64FunctionInfo *AArch64FI = nullptr;
300
301 /// Emit the LOHs contained in AArch64FI.
302 void emitLOHs();
303
304 void emitMovXReg(Register Dest, Register Src);
305 void emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift);
306 void emitMOVK(Register Dest, uint64_t Imm, unsigned Shift);
307
308 /// Emit instruction to set float register to zero.
309 void emitFMov0(const MachineInstr &MI);
310
311 using MInstToMCSymbol = std::map<const MachineInstr *, MCSymbol *>;
312
313 MInstToMCSymbol LOHInstToLabel;
314
316 return ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags;
317 }
318
319 const MCSubtargetInfo *getIFuncMCSubtargetInfo() const override {
320 assert(STI);
321 return STI;
322 }
323 void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
324 MCSymbol *LazyPointer) override;
326 MCSymbol *LazyPointer) override;
327
328 /// Checks if this instruction is part of a sequence that is eligle for import
329 /// call optimization and, if so, records it to be emitted in the import call
330 /// section.
331 void recordIfImportCall(const MachineInstr *BranchInst);
332};
333
334} // end anonymous namespace
335
336void AArch64AsmPrinter::emitStartOfAsmFile(Module &M) {
337 const Triple &TT = TM.getTargetTriple();
338
339 if (TT.isOSBinFormatCOFF()) {
340 emitCOFFFeatureSymbol(M);
341 emitCOFFReplaceableFunctionData(M);
342
343 if (M.getModuleFlag("import-call-optimization"))
344 EnableImportCallOptimization = true;
345 }
346
347 if (!TT.isOSBinFormatELF())
348 return;
349
350 // For emitting build attributes and .note.gnu.property section
351 auto *TS =
352 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
353 // Assemble feature flags that may require creation of build attributes and a
354 // note section.
355 unsigned BAFlags = 0;
356 unsigned GNUFlags = 0;
357 if (const auto *BTE = mdconst::extract_or_null<ConstantInt>(
358 M.getModuleFlag("branch-target-enforcement"))) {
359 if (!BTE->isZero()) {
360 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_BTI_Flag;
362 }
363 }
364
365 if (const auto *GCS = mdconst::extract_or_null<ConstantInt>(
366 M.getModuleFlag("guarded-control-stack"))) {
367 if (!GCS->isZero()) {
368 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_GCS_Flag;
370 }
371 }
372
373 if (const auto *Sign = mdconst::extract_or_null<ConstantInt>(
374 M.getModuleFlag("sign-return-address"))) {
375 if (!Sign->isZero()) {
376 BAFlags |= AArch64BuildAttributes::FeatureAndBitsFlag::Feature_PAC_Flag;
378 }
379 }
380
381 uint64_t PAuthABIPlatform = -1;
382 if (const auto *PAP = mdconst::extract_or_null<ConstantInt>(
383 M.getModuleFlag("aarch64-elf-pauthabi-platform"))) {
384 PAuthABIPlatform = PAP->getZExtValue();
385 }
386
387 uint64_t PAuthABIVersion = -1;
388 if (const auto *PAV = mdconst::extract_or_null<ConstantInt>(
389 M.getModuleFlag("aarch64-elf-pauthabi-version"))) {
390 PAuthABIVersion = PAV->getZExtValue();
391 }
392
393 // Emit AArch64 Build Attributes
394 emitAttributes(BAFlags, PAuthABIPlatform, PAuthABIVersion, TS);
395 // Emit a .note.gnu.property section with the flags.
396 TS->emitNoteSection(GNUFlags, PAuthABIPlatform, PAuthABIVersion);
397}
398
399void AArch64AsmPrinter::emitFunctionHeaderComment() {
400 const AArch64FunctionInfo *FI = MF->getInfo<AArch64FunctionInfo>();
401 std::optional<std::string> OutlinerString = FI->getOutliningStyle();
402 if (OutlinerString != std::nullopt)
403 OutStreamer->getCommentOS() << ' ' << OutlinerString;
404}
405
406void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_ENTER(const MachineInstr &MI)
407{
408 const Function &F = MF->getFunction();
409 if (F.hasFnAttribute("patchable-function-entry")) {
410 unsigned Num;
411 if (F.getFnAttribute("patchable-function-entry")
412 .getValueAsString()
413 .getAsInteger(10, Num))
414 return;
415 emitNops(Num);
416 return;
417 }
418
419 emitSled(MI, SledKind::FUNCTION_ENTER);
420}
421
422void AArch64AsmPrinter::LowerPATCHABLE_FUNCTION_EXIT(const MachineInstr &MI) {
423 emitSled(MI, SledKind::FUNCTION_EXIT);
424}
425
426void AArch64AsmPrinter::LowerPATCHABLE_TAIL_CALL(const MachineInstr &MI) {
427 emitSled(MI, SledKind::TAIL_CALL);
428}
429
430void AArch64AsmPrinter::emitSled(const MachineInstr &MI, SledKind Kind) {
431 static const int8_t NoopsInSledCount = 7;
432 // We want to emit the following pattern:
433 //
434 // .Lxray_sled_N:
435 // ALIGN
436 // B #32
437 // ; 7 NOP instructions (28 bytes)
438 // .tmpN
439 //
440 // We need the 28 bytes (7 instructions) because at runtime, we'd be patching
441 // over the full 32 bytes (8 instructions) with the following pattern:
442 //
443 // STP X0, X30, [SP, #-16]! ; push X0 and the link register to the stack
444 // LDR W17, #12 ; W17 := function ID
445 // LDR X16,#12 ; X16 := addr of __xray_FunctionEntry or __xray_FunctionExit
446 // BLR X16 ; call the tracing trampoline
447 // ;DATA: 32 bits of function ID
448 // ;DATA: lower 32 bits of the address of the trampoline
449 // ;DATA: higher 32 bits of the address of the trampoline
450 // LDP X0, X30, [SP], #16 ; pop X0 and the link register from the stack
451 //
452 OutStreamer->emitCodeAlignment(Align(4), &getSubtargetInfo());
453 auto CurSled = OutContext.createTempSymbol("xray_sled_", true);
454 OutStreamer->emitLabel(CurSled);
455 auto Target = OutContext.createTempSymbol();
456
457 // Emit "B #32" instruction, which jumps over the next 28 bytes.
458 // The operand has to be the number of 4-byte instructions to jump over,
459 // including the current instruction.
460 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::B).addImm(8));
461
462 for (int8_t I = 0; I < NoopsInSledCount; I++)
463 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
464
465 OutStreamer->emitLabel(Target);
466 recordSled(CurSled, MI, Kind, 2);
467}
468
469void AArch64AsmPrinter::emitAttributes(unsigned Flags,
470 uint64_t PAuthABIPlatform,
471 uint64_t PAuthABIVersion,
473
474 PAuthABIPlatform = (uint64_t(-1) == PAuthABIPlatform) ? 0 : PAuthABIPlatform;
475 PAuthABIVersion = (uint64_t(-1) == PAuthABIVersion) ? 0 : PAuthABIVersion;
476
477 if (PAuthABIPlatform || PAuthABIVersion) {
481 AArch64BuildAttributes::SubsectionOptional::REQUIRED,
482 AArch64BuildAttributes::SubsectionType::ULEB128);
486 PAuthABIPlatform, "");
490 "");
491 }
492
493 unsigned BTIValue =
495 unsigned PACValue =
497 unsigned GCSValue =
499
500 if (BTIValue || PACValue || GCSValue) {
504 AArch64BuildAttributes::SubsectionOptional::OPTIONAL,
505 AArch64BuildAttributes::SubsectionType::ULEB128);
515 }
516}
517
518// Emit the following code for Intrinsic::{xray_customevent,xray_typedevent}
519// (built-in functions __xray_customevent/__xray_typedevent).
520//
521// .Lxray_event_sled_N:
522// b 1f
523// save x0 and x1 (and also x2 for TYPED_EVENT_CALL)
524// set up x0 and x1 (and also x2 for TYPED_EVENT_CALL)
525// bl __xray_CustomEvent or __xray_TypedEvent
526// restore x0 and x1 (and also x2 for TYPED_EVENT_CALL)
527// 1:
528//
529// There are 6 instructions for EVENT_CALL and 9 for TYPED_EVENT_CALL.
530//
531// Then record a sled of kind CUSTOM_EVENT or TYPED_EVENT.
532// After patching, b .+N will become a nop.
533void AArch64AsmPrinter::LowerPATCHABLE_EVENT_CALL(const MachineInstr &MI,
534 bool Typed) {
535 auto &O = *OutStreamer;
536 MCSymbol *CurSled = OutContext.createTempSymbol("xray_sled_", true);
537 O.emitLabel(CurSled);
538 bool MachO = TM.getTargetTriple().isOSBinFormatMachO();
540 OutContext.getOrCreateSymbol(
541 Twine(MachO ? "_" : "") +
542 (Typed ? "__xray_TypedEvent" : "__xray_CustomEvent")),
543 OutContext);
544 if (Typed) {
545 O.AddComment("Begin XRay typed event");
546 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(9));
547 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
548 .addReg(AArch64::SP)
549 .addReg(AArch64::X0)
550 .addReg(AArch64::X1)
551 .addReg(AArch64::SP)
552 .addImm(-4));
553 EmitToStreamer(O, MCInstBuilder(AArch64::STRXui)
554 .addReg(AArch64::X2)
555 .addReg(AArch64::SP)
556 .addImm(2));
557 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
558 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
559 emitMovXReg(AArch64::X2, MI.getOperand(2).getReg());
560 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
561 EmitToStreamer(O, MCInstBuilder(AArch64::LDRXui)
562 .addReg(AArch64::X2)
563 .addReg(AArch64::SP)
564 .addImm(2));
565 O.AddComment("End XRay typed event");
566 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
567 .addReg(AArch64::SP)
568 .addReg(AArch64::X0)
569 .addReg(AArch64::X1)
570 .addReg(AArch64::SP)
571 .addImm(4));
572
573 recordSled(CurSled, MI, SledKind::TYPED_EVENT, 2);
574 } else {
575 O.AddComment("Begin XRay custom event");
576 EmitToStreamer(O, MCInstBuilder(AArch64::B).addImm(6));
577 EmitToStreamer(O, MCInstBuilder(AArch64::STPXpre)
578 .addReg(AArch64::SP)
579 .addReg(AArch64::X0)
580 .addReg(AArch64::X1)
581 .addReg(AArch64::SP)
582 .addImm(-2));
583 emitMovXReg(AArch64::X0, MI.getOperand(0).getReg());
584 emitMovXReg(AArch64::X1, MI.getOperand(1).getReg());
585 EmitToStreamer(O, MCInstBuilder(AArch64::BL).addExpr(Sym));
586 O.AddComment("End XRay custom event");
587 EmitToStreamer(O, MCInstBuilder(AArch64::LDPXpost)
588 .addReg(AArch64::SP)
589 .addReg(AArch64::X0)
590 .addReg(AArch64::X1)
591 .addReg(AArch64::SP)
592 .addImm(2));
593
594 recordSled(CurSled, MI, SledKind::CUSTOM_EVENT, 2);
595 }
596}
597
598void AArch64AsmPrinter::LowerKCFI_CHECK(const MachineInstr &MI) {
599 Register AddrReg = MI.getOperand(0).getReg();
600 assert(std::next(MI.getIterator())->isCall() &&
601 "KCFI_CHECK not followed by a call instruction");
602 assert(std::next(MI.getIterator())->getOperand(0).getReg() == AddrReg &&
603 "KCFI_CHECK call target doesn't match call operand");
604
605 // Default to using the intra-procedure-call temporary registers for
606 // comparing the hashes.
607 unsigned ScratchRegs[] = {AArch64::W16, AArch64::W17};
608 if (AddrReg == AArch64::XZR) {
609 // Checking XZR makes no sense. Instead of emitting a load, zero
610 // ScratchRegs[0] and use it for the ESR AddrIndex below.
611 AddrReg = getXRegFromWReg(ScratchRegs[0]);
612 emitMovXReg(AddrReg, AArch64::XZR);
613 } else {
614 // If one of the scratch registers is used for the call target (e.g.
615 // with AArch64::TCRETURNriBTI), we can clobber another caller-saved
616 // temporary register instead (in this case, AArch64::W9) as the check
617 // is immediately followed by the call instruction.
618 for (auto &Reg : ScratchRegs) {
619 if (Reg == getWRegFromXReg(AddrReg)) {
620 Reg = AArch64::W9;
621 break;
622 }
623 }
624 assert(ScratchRegs[0] != AddrReg && ScratchRegs[1] != AddrReg &&
625 "Invalid scratch registers for KCFI_CHECK");
626
627 // Adjust the offset for patchable-function-prefix. This assumes that
628 // patchable-function-prefix is the same for all functions.
629 int64_t PrefixNops = 0;
630 (void)MI.getMF()
631 ->getFunction()
632 .getFnAttribute("patchable-function-prefix")
633 .getValueAsString()
634 .getAsInteger(10, PrefixNops);
635
636 // Load the target function type hash.
637 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDURWi)
638 .addReg(ScratchRegs[0])
639 .addReg(AddrReg)
640 .addImm(-(PrefixNops * 4 + 4)));
641 }
642
643 // Load the expected type hash.
644 const int64_t Type = MI.getOperand(1).getImm();
645 emitMOVK(ScratchRegs[1], Type & 0xFFFF, 0);
646 emitMOVK(ScratchRegs[1], (Type >> 16) & 0xFFFF, 16);
647
648 // Compare the hashes and trap if there's a mismatch.
649 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSWrs)
650 .addReg(AArch64::WZR)
651 .addReg(ScratchRegs[0])
652 .addReg(ScratchRegs[1])
653 .addImm(0));
654
655 MCSymbol *Pass = OutContext.createTempSymbol();
656 EmitToStreamer(*OutStreamer,
657 MCInstBuilder(AArch64::Bcc)
658 .addImm(AArch64CC::EQ)
659 .addExpr(MCSymbolRefExpr::create(Pass, OutContext)));
660
661 // The base ESR is 0x8000 and the register information is encoded in bits
662 // 0-9 as follows:
663 // - 0-4: n, where the register Xn contains the target address
664 // - 5-9: m, where the register Wm contains the expected type hash
665 // Where n, m are in [0, 30].
666 unsigned TypeIndex = ScratchRegs[1] - AArch64::W0;
667 unsigned AddrIndex;
668 switch (AddrReg) {
669 default:
670 AddrIndex = AddrReg - AArch64::X0;
671 break;
672 case AArch64::FP:
673 AddrIndex = 29;
674 break;
675 case AArch64::LR:
676 AddrIndex = 30;
677 break;
678 }
679
680 assert(AddrIndex < 31 && TypeIndex < 31);
681
682 unsigned ESR = 0x8000 | ((TypeIndex & 31) << 5) | (AddrIndex & 31);
683 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BRK).addImm(ESR));
684 OutStreamer->emitLabel(Pass);
685}
686
687void AArch64AsmPrinter::LowerHWASAN_CHECK_MEMACCESS(const MachineInstr &MI) {
688 Register Reg = MI.getOperand(0).getReg();
689
690 // The HWASan pass won't emit a CHECK_MEMACCESS intrinsic with a pointer
691 // statically known to be zero. However, conceivably, the HWASan pass may
692 // encounter a "cannot currently statically prove to be null" pointer (and is
693 // therefore unable to omit the intrinsic) that later optimization passes
694 // convert into a statically known-null pointer.
695 if (Reg == AArch64::XZR)
696 return;
697
698 bool IsShort =
699 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES) ||
700 (MI.getOpcode() ==
701 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
702 uint32_t AccessInfo = MI.getOperand(1).getImm();
703 bool IsFixedShadow =
704 ((MI.getOpcode() == AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW) ||
705 (MI.getOpcode() ==
706 AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW));
707 uint64_t FixedShadowOffset = IsFixedShadow ? MI.getOperand(2).getImm() : 0;
708
709 MCSymbol *&Sym = HwasanMemaccessSymbols[HwasanMemaccessTuple(
710 Reg, IsShort, AccessInfo, IsFixedShadow, FixedShadowOffset)];
711 if (!Sym) {
712 // FIXME: Make this work on non-ELF.
713 if (!TM.getTargetTriple().isOSBinFormatELF())
714 report_fatal_error("llvm.hwasan.check.memaccess only supported on ELF");
715
716 std::string SymName = "__hwasan_check_x" + utostr(Reg - AArch64::X0) + "_" +
717 utostr(AccessInfo);
718 if (IsFixedShadow)
719 SymName += "_fixed_" + utostr(FixedShadowOffset);
720 if (IsShort)
721 SymName += "_short_v2";
722 Sym = OutContext.getOrCreateSymbol(SymName);
723 }
724
725 EmitToStreamer(*OutStreamer,
726 MCInstBuilder(AArch64::BL)
727 .addExpr(MCSymbolRefExpr::create(Sym, OutContext)));
728}
729
730void AArch64AsmPrinter::emitHwasanMemaccessSymbols(Module &M) {
731 if (HwasanMemaccessSymbols.empty())
732 return;
733
734 const Triple &TT = TM.getTargetTriple();
735 assert(TT.isOSBinFormatELF());
736 std::unique_ptr<MCSubtargetInfo> STI(
737 TM.getTarget().createMCSubtargetInfo(TT.str(), "", ""));
738 assert(STI && "Unable to create subtarget info");
739 this->STI = static_cast<const AArch64Subtarget *>(&*STI);
740
741 MCSymbol *HwasanTagMismatchV1Sym =
742 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch");
743 MCSymbol *HwasanTagMismatchV2Sym =
744 OutContext.getOrCreateSymbol("__hwasan_tag_mismatch_v2");
745
746 const MCSymbolRefExpr *HwasanTagMismatchV1Ref =
747 MCSymbolRefExpr::create(HwasanTagMismatchV1Sym, OutContext);
748 const MCSymbolRefExpr *HwasanTagMismatchV2Ref =
749 MCSymbolRefExpr::create(HwasanTagMismatchV2Sym, OutContext);
750
751 for (auto &P : HwasanMemaccessSymbols) {
752 unsigned Reg = std::get<0>(P.first);
753 bool IsShort = std::get<1>(P.first);
754 uint32_t AccessInfo = std::get<2>(P.first);
755 bool IsFixedShadow = std::get<3>(P.first);
756 uint64_t FixedShadowOffset = std::get<4>(P.first);
757 const MCSymbolRefExpr *HwasanTagMismatchRef =
758 IsShort ? HwasanTagMismatchV2Ref : HwasanTagMismatchV1Ref;
759 MCSymbol *Sym = P.second;
760
761 bool HasMatchAllTag =
762 (AccessInfo >> HWASanAccessInfo::HasMatchAllShift) & 1;
763 uint8_t MatchAllTag =
764 (AccessInfo >> HWASanAccessInfo::MatchAllShift) & 0xff;
765 unsigned Size =
766 1 << ((AccessInfo >> HWASanAccessInfo::AccessSizeShift) & 0xf);
767 bool CompileKernel =
768 (AccessInfo >> HWASanAccessInfo::CompileKernelShift) & 1;
769
770 OutStreamer->switchSection(OutContext.getELFSection(
771 ".text.hot", ELF::SHT_PROGBITS,
773 /*IsComdat=*/true));
774
775 OutStreamer->emitSymbolAttribute(Sym, MCSA_ELF_TypeFunction);
776 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
777 OutStreamer->emitSymbolAttribute(Sym, MCSA_Hidden);
778 OutStreamer->emitLabel(Sym);
779
780 EmitToStreamer(MCInstBuilder(AArch64::SBFMXri)
781 .addReg(AArch64::X16)
782 .addReg(Reg)
783 .addImm(4)
784 .addImm(55));
785
786 if (IsFixedShadow) {
787 // Aarch64 makes it difficult to embed large constants in the code.
788 // Fortuitously, kShadowBaseAlignment == 32, so we use the 32-bit
789 // left-shift option in the MOV instruction. Combined with the 16-bit
790 // immediate, this is enough to represent any offset up to 2**48.
791 emitMOVZ(AArch64::X17, FixedShadowOffset >> 32, 32);
792 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
793 .addReg(AArch64::W16)
794 .addReg(AArch64::X17)
795 .addReg(AArch64::X16)
796 .addImm(0)
797 .addImm(0));
798 } else {
799 EmitToStreamer(MCInstBuilder(AArch64::LDRBBroX)
800 .addReg(AArch64::W16)
801 .addReg(IsShort ? AArch64::X20 : AArch64::X9)
802 .addReg(AArch64::X16)
803 .addImm(0)
804 .addImm(0));
805 }
806
807 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
808 .addReg(AArch64::XZR)
809 .addReg(AArch64::X16)
810 .addReg(Reg)
812 MCSymbol *HandleMismatchOrPartialSym = OutContext.createTempSymbol();
813 EmitToStreamer(MCInstBuilder(AArch64::Bcc)
814 .addImm(AArch64CC::NE)
816 HandleMismatchOrPartialSym, OutContext)));
817 MCSymbol *ReturnSym = OutContext.createTempSymbol();
818 OutStreamer->emitLabel(ReturnSym);
819 EmitToStreamer(MCInstBuilder(AArch64::RET).addReg(AArch64::LR));
820 OutStreamer->emitLabel(HandleMismatchOrPartialSym);
821
822 if (HasMatchAllTag) {
823 EmitToStreamer(MCInstBuilder(AArch64::UBFMXri)
824 .addReg(AArch64::X17)
825 .addReg(Reg)
826 .addImm(56)
827 .addImm(63));
828 EmitToStreamer(MCInstBuilder(AArch64::SUBSXri)
829 .addReg(AArch64::XZR)
830 .addReg(AArch64::X17)
831 .addImm(MatchAllTag)
832 .addImm(0));
833 EmitToStreamer(
834 MCInstBuilder(AArch64::Bcc)
835 .addImm(AArch64CC::EQ)
836 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
837 }
838
839 if (IsShort) {
840 EmitToStreamer(MCInstBuilder(AArch64::SUBSWri)
841 .addReg(AArch64::WZR)
842 .addReg(AArch64::W16)
843 .addImm(15)
844 .addImm(0));
845 MCSymbol *HandleMismatchSym = OutContext.createTempSymbol();
846 EmitToStreamer(
847 MCInstBuilder(AArch64::Bcc)
848 .addImm(AArch64CC::HI)
849 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
850
851 EmitToStreamer(MCInstBuilder(AArch64::ANDXri)
852 .addReg(AArch64::X17)
853 .addReg(Reg)
854 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
855 if (Size != 1)
856 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
857 .addReg(AArch64::X17)
858 .addReg(AArch64::X17)
859 .addImm(Size - 1)
860 .addImm(0));
861 EmitToStreamer(MCInstBuilder(AArch64::SUBSWrs)
862 .addReg(AArch64::WZR)
863 .addReg(AArch64::W16)
864 .addReg(AArch64::W17)
865 .addImm(0));
866 EmitToStreamer(
867 MCInstBuilder(AArch64::Bcc)
868 .addImm(AArch64CC::LS)
869 .addExpr(MCSymbolRefExpr::create(HandleMismatchSym, OutContext)));
870
871 EmitToStreamer(MCInstBuilder(AArch64::ORRXri)
872 .addReg(AArch64::X16)
873 .addReg(Reg)
874 .addImm(AArch64_AM::encodeLogicalImmediate(0xf, 64)));
875 EmitToStreamer(MCInstBuilder(AArch64::LDRBBui)
876 .addReg(AArch64::W16)
877 .addReg(AArch64::X16)
878 .addImm(0));
879 EmitToStreamer(
880 MCInstBuilder(AArch64::SUBSXrs)
881 .addReg(AArch64::XZR)
882 .addReg(AArch64::X16)
883 .addReg(Reg)
885 EmitToStreamer(
886 MCInstBuilder(AArch64::Bcc)
887 .addImm(AArch64CC::EQ)
888 .addExpr(MCSymbolRefExpr::create(ReturnSym, OutContext)));
889
890 OutStreamer->emitLabel(HandleMismatchSym);
891 }
892
893 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
894 .addReg(AArch64::SP)
895 .addReg(AArch64::X0)
896 .addReg(AArch64::X1)
897 .addReg(AArch64::SP)
898 .addImm(-32));
899 EmitToStreamer(MCInstBuilder(AArch64::STPXi)
900 .addReg(AArch64::FP)
901 .addReg(AArch64::LR)
902 .addReg(AArch64::SP)
903 .addImm(29));
904
905 if (Reg != AArch64::X0)
906 emitMovXReg(AArch64::X0, Reg);
907 emitMOVZ(AArch64::X1, AccessInfo & HWASanAccessInfo::RuntimeMask, 0);
908
909 if (CompileKernel) {
910 // The Linux kernel's dynamic loader doesn't support GOT relative
911 // relocations, but it doesn't support late binding either, so just call
912 // the function directly.
913 EmitToStreamer(MCInstBuilder(AArch64::B).addExpr(HwasanTagMismatchRef));
914 } else {
915 // Intentionally load the GOT entry and branch to it, rather than possibly
916 // late binding the function, which may clobber the registers before we
917 // have a chance to save them.
918 EmitToStreamer(MCInstBuilder(AArch64::ADRP)
919 .addReg(AArch64::X16)
920 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
922 OutContext)));
923 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
924 .addReg(AArch64::X16)
925 .addReg(AArch64::X16)
926 .addExpr(MCSpecifierExpr::create(HwasanTagMismatchRef,
928 OutContext)));
929 EmitToStreamer(MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
930 }
931 }
932 this->STI = nullptr;
933}
934
935static void emitAuthenticatedPointer(MCStreamer &OutStreamer,
936 MCSymbol *StubLabel,
937 const MCExpr *StubAuthPtrRef) {
938 // sym$auth_ptr$key$disc:
939 OutStreamer.emitLabel(StubLabel);
940 OutStreamer.emitValue(StubAuthPtrRef, /*size=*/8);
941}
942
943void AArch64AsmPrinter::emitEndOfAsmFile(Module &M) {
944 emitHwasanMemaccessSymbols(M);
945
946 const Triple &TT = TM.getTargetTriple();
947 if (TT.isOSBinFormatMachO()) {
948 // Output authenticated pointers as indirect symbols, if we have any.
949 MachineModuleInfoMachO &MMIMacho =
950 MMI->getObjFileInfo<MachineModuleInfoMachO>();
951
952 auto Stubs = MMIMacho.getAuthGVStubList();
953
954 if (!Stubs.empty()) {
955 // Switch to the "__auth_ptr" section.
956 OutStreamer->switchSection(
957 OutContext.getMachOSection("__DATA", "__auth_ptr", MachO::S_REGULAR,
959 emitAlignment(Align(8));
960
961 for (const auto &Stub : Stubs)
962 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
963
964 OutStreamer->addBlankLine();
965 }
966
967 // Funny Darwin hack: This flag tells the linker that no global symbols
968 // contain code that falls through to other global symbols (e.g. the obvious
969 // implementation of multiple entry points). If this doesn't occur, the
970 // linker can safely perform dead code stripping. Since LLVM never
971 // generates code that does this, it is always safe to set.
972 OutStreamer->emitSubsectionsViaSymbols();
973 }
974
975 if (TT.isOSBinFormatELF()) {
976 // Output authenticated pointers as indirect symbols, if we have any.
977 MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
978
979 auto Stubs = MMIELF.getAuthGVStubList();
980
981 if (!Stubs.empty()) {
982 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
983 OutStreamer->switchSection(TLOF.getDataSection());
984 emitAlignment(Align(8));
985
986 for (const auto &Stub : Stubs)
987 emitAuthenticatedPointer(*OutStreamer, Stub.first, Stub.second);
988
989 OutStreamer->addBlankLine();
990 }
991
992 // With signed ELF GOT enabled, the linker looks at the symbol type to
993 // choose between keys IA (for STT_FUNC) and DA (for other types). Symbols
994 // for functions not defined in the module have STT_NOTYPE type by default.
995 // This makes linker to emit signing schema with DA key (instead of IA) for
996 // corresponding R_AARCH64_AUTH_GLOB_DAT dynamic reloc. To avoid that, force
997 // all function symbols used in the module to have STT_FUNC type. See
998 // https://github.com/ARM-software/abi-aa/blob/main/pauthabielf64/pauthabielf64.rst#default-signing-schema
999 const auto *PtrAuthELFGOTFlag = mdconst::extract_or_null<ConstantInt>(
1000 M.getModuleFlag("ptrauth-elf-got"));
1001 if (PtrAuthELFGOTFlag && PtrAuthELFGOTFlag->getZExtValue() == 1)
1002 for (const GlobalValue &GV : M.global_values())
1003 if (!GV.use_empty() && isa<Function>(GV) &&
1004 !GV.getName().starts_with("llvm."))
1005 OutStreamer->emitSymbolAttribute(getSymbol(&GV),
1007 }
1008
1009 // Emit stack and fault map information.
1010 FM.serializeToFaultMapSection();
1011
1012 // If import call optimization is enabled, emit the appropriate section.
1013 // We do this whether or not we recorded any import calls.
1014 if (EnableImportCallOptimization && TT.isOSBinFormatCOFF()) {
1015 OutStreamer->switchSection(getObjFileLowering().getImportCallSection());
1016
1017 // Section always starts with some magic.
1018 constexpr char ImpCallMagic[12] = "Imp_Call_V1";
1019 OutStreamer->emitBytes(StringRef{ImpCallMagic, sizeof(ImpCallMagic)});
1020
1021 // Layout of this section is:
1022 // Per section that contains calls to imported functions:
1023 // uint32_t SectionSize: Size in bytes for information in this section.
1024 // uint32_t Section Number
1025 // Per call to imported function in section:
1026 // uint32_t Kind: the kind of imported function.
1027 // uint32_t BranchOffset: the offset of the branch instruction in its
1028 // parent section.
1029 // uint32_t TargetSymbolId: the symbol id of the called function.
1030 for (auto &[Section, CallsToImportedFuncs] :
1031 SectionToImportedFunctionCalls) {
1032 unsigned SectionSize =
1033 sizeof(uint32_t) * (2 + 3 * CallsToImportedFuncs.size());
1034 OutStreamer->emitInt32(SectionSize);
1035 OutStreamer->emitCOFFSecNumber(Section->getBeginSymbol());
1036 for (auto &[CallsiteSymbol, CalledSymbol] : CallsToImportedFuncs) {
1037 // Kind is always IMAGE_REL_ARM64_DYNAMIC_IMPORT_CALL (0x13).
1038 OutStreamer->emitInt32(0x13);
1039 OutStreamer->emitCOFFSecOffset(CallsiteSymbol);
1040 OutStreamer->emitCOFFSymbolIndex(CalledSymbol);
1041 }
1042 }
1043 }
1044}
1045
1046void AArch64AsmPrinter::emitLOHs() {
1048
1049 for (const auto &D : AArch64FI->getLOHContainer()) {
1050 for (const MachineInstr *MI : D.getArgs()) {
1051 MInstToMCSymbol::iterator LabelIt = LOHInstToLabel.find(MI);
1052 assert(LabelIt != LOHInstToLabel.end() &&
1053 "Label hasn't been inserted for LOH related instruction");
1054 MCArgs.push_back(LabelIt->second);
1055 }
1056 OutStreamer->emitLOHDirective(D.getKind(), MCArgs);
1057 MCArgs.clear();
1058 }
1059}
1060
1061void AArch64AsmPrinter::emitFunctionBodyEnd() {
1062 if (!AArch64FI->getLOHRelated().empty())
1063 emitLOHs();
1064}
1065
1066/// GetCPISymbol - Return the symbol for the specified constant pool entry.
1067MCSymbol *AArch64AsmPrinter::GetCPISymbol(unsigned CPID) const {
1068 // Darwin uses a linker-private symbol name for constant-pools (to
1069 // avoid addends on the relocation?), ELF has no such concept and
1070 // uses a normal private symbol.
1071 if (!getDataLayout().getLinkerPrivateGlobalPrefix().empty())
1072 return OutContext.getOrCreateSymbol(
1073 Twine(getDataLayout().getLinkerPrivateGlobalPrefix()) + "CPI" +
1074 Twine(getFunctionNumber()) + "_" + Twine(CPID));
1075
1076 return AsmPrinter::GetCPISymbol(CPID);
1077}
1078
1079void AArch64AsmPrinter::printOperand(const MachineInstr *MI, unsigned OpNum,
1080 raw_ostream &O) {
1081 const MachineOperand &MO = MI->getOperand(OpNum);
1082 switch (MO.getType()) {
1083 default:
1084 llvm_unreachable("<unknown operand type>");
1086 Register Reg = MO.getReg();
1087 assert(Reg.isPhysical());
1088 assert(!MO.getSubReg() && "Subregs should be eliminated!");
1090 break;
1091 }
1093 O << MO.getImm();
1094 break;
1095 }
1097 PrintSymbolOperand(MO, O);
1098 break;
1099 }
1101 MCSymbol *Sym = GetBlockAddressSymbol(MO.getBlockAddress());
1102 Sym->print(O, MAI);
1103 break;
1104 }
1105 }
1106}
1107
1108bool AArch64AsmPrinter::printAsmMRegister(const MachineOperand &MO, char Mode,
1109 raw_ostream &O) {
1110 Register Reg = MO.getReg();
1111 switch (Mode) {
1112 default:
1113 return true; // Unknown mode.
1114 case 'w':
1115 Reg = getWRegFromXReg(Reg);
1116 break;
1117 case 'x':
1118 Reg = getXRegFromWReg(Reg);
1119 break;
1120 case 't':
1122 break;
1123 }
1124
1126 return false;
1127}
1128
1129// Prints the register in MO using class RC using the offset in the
1130// new register class. This should not be used for cross class
1131// printing.
1132bool AArch64AsmPrinter::printAsmRegInClass(const MachineOperand &MO,
1133 const TargetRegisterClass *RC,
1134 unsigned AltName, raw_ostream &O) {
1135 assert(MO.isReg() && "Should only get here with a register!");
1136 const TargetRegisterInfo *RI = STI->getRegisterInfo();
1137 Register Reg = MO.getReg();
1138 MCRegister RegToPrint = RC->getRegister(RI->getEncodingValue(Reg));
1139 if (!RI->regsOverlap(RegToPrint, Reg))
1140 return true;
1141 O << AArch64InstPrinter::getRegisterName(RegToPrint, AltName);
1142 return false;
1143}
1144
1145bool AArch64AsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum,
1146 const char *ExtraCode, raw_ostream &O) {
1147 const MachineOperand &MO = MI->getOperand(OpNum);
1148
1149 // First try the generic code, which knows about modifiers like 'c' and 'n'.
1150 if (!AsmPrinter::PrintAsmOperand(MI, OpNum, ExtraCode, O))
1151 return false;
1152
1153 // Does this asm operand have a single letter operand modifier?
1154 if (ExtraCode && ExtraCode[0]) {
1155 if (ExtraCode[1] != 0)
1156 return true; // Unknown modifier.
1157
1158 switch (ExtraCode[0]) {
1159 default:
1160 return true; // Unknown modifier.
1161 case 'w': // Print W register
1162 case 'x': // Print X register
1163 if (MO.isReg())
1164 return printAsmMRegister(MO, ExtraCode[0], O);
1165 if (MO.isImm() && MO.getImm() == 0) {
1166 unsigned Reg = ExtraCode[0] == 'w' ? AArch64::WZR : AArch64::XZR;
1168 return false;
1169 }
1170 printOperand(MI, OpNum, O);
1171 return false;
1172 case 'b': // Print B register.
1173 case 'h': // Print H register.
1174 case 's': // Print S register.
1175 case 'd': // Print D register.
1176 case 'q': // Print Q register.
1177 case 'z': // Print Z register.
1178 if (MO.isReg()) {
1179 const TargetRegisterClass *RC;
1180 switch (ExtraCode[0]) {
1181 case 'b':
1182 RC = &AArch64::FPR8RegClass;
1183 break;
1184 case 'h':
1185 RC = &AArch64::FPR16RegClass;
1186 break;
1187 case 's':
1188 RC = &AArch64::FPR32RegClass;
1189 break;
1190 case 'd':
1191 RC = &AArch64::FPR64RegClass;
1192 break;
1193 case 'q':
1194 RC = &AArch64::FPR128RegClass;
1195 break;
1196 case 'z':
1197 RC = &AArch64::ZPRRegClass;
1198 break;
1199 default:
1200 return true;
1201 }
1202 return printAsmRegInClass(MO, RC, AArch64::NoRegAltName, O);
1203 }
1204 printOperand(MI, OpNum, O);
1205 return false;
1206 }
1207 }
1208
1209 // According to ARM, we should emit x and v registers unless we have a
1210 // modifier.
1211 if (MO.isReg()) {
1212 Register Reg = MO.getReg();
1213
1214 // If this is a w or x register, print an x register.
1215 if (AArch64::GPR32allRegClass.contains(Reg) ||
1216 AArch64::GPR64allRegClass.contains(Reg))
1217 return printAsmMRegister(MO, 'x', O);
1218
1219 // If this is an x register tuple, print an x register.
1220 if (AArch64::GPR64x8ClassRegClass.contains(Reg))
1221 return printAsmMRegister(MO, 't', O);
1222
1223 unsigned AltName = AArch64::NoRegAltName;
1224 const TargetRegisterClass *RegClass;
1225 if (AArch64::ZPRRegClass.contains(Reg)) {
1226 RegClass = &AArch64::ZPRRegClass;
1227 } else if (AArch64::PPRRegClass.contains(Reg)) {
1228 RegClass = &AArch64::PPRRegClass;
1229 } else if (AArch64::PNRRegClass.contains(Reg)) {
1230 RegClass = &AArch64::PNRRegClass;
1231 } else {
1232 RegClass = &AArch64::FPR128RegClass;
1233 AltName = AArch64::vreg;
1234 }
1235
1236 // If this is a b, h, s, d, or q register, print it as a v register.
1237 return printAsmRegInClass(MO, RegClass, AltName, O);
1238 }
1239
1240 printOperand(MI, OpNum, O);
1241 return false;
1242}
1243
1244bool AArch64AsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI,
1245 unsigned OpNum,
1246 const char *ExtraCode,
1247 raw_ostream &O) {
1248 if (ExtraCode && ExtraCode[0] && ExtraCode[0] != 'a')
1249 return true; // Unknown modifier.
1250
1251 const MachineOperand &MO = MI->getOperand(OpNum);
1252 assert(MO.isReg() && "unexpected inline asm memory operand");
1253 O << "[" << AArch64InstPrinter::getRegisterName(MO.getReg()) << "]";
1254 return false;
1255}
1256
1257void AArch64AsmPrinter::PrintDebugValueComment(const MachineInstr *MI,
1258 raw_ostream &OS) {
1259 unsigned NOps = MI->getNumOperands();
1260 assert(NOps == 4);
1261 OS << '\t' << MAI->getCommentString() << "DEBUG_VALUE: ";
1262 // cast away const; DIetc do not take const operands for some reason.
1263 OS << MI->getDebugVariable()->getName();
1264 OS << " <- ";
1265 // Frame address. Currently handles register +- offset only.
1266 assert(MI->isIndirectDebugValue());
1267 OS << '[';
1268 for (unsigned I = 0, E = std::distance(MI->debug_operands().begin(),
1269 MI->debug_operands().end());
1270 I < E; ++I) {
1271 if (I != 0)
1272 OS << ", ";
1273 printOperand(MI, I, OS);
1274 }
1275 OS << ']';
1276 OS << "+";
1277 printOperand(MI, NOps - 2, OS);
1278}
1279
1280void AArch64AsmPrinter::emitJumpTableImpl(const MachineJumpTableInfo &MJTI,
1281 ArrayRef<unsigned> JumpTableIndices) {
1282 // Fast return if there is nothing to emit to avoid creating empty sections.
1283 if (JumpTableIndices.empty())
1284 return;
1285 const TargetLoweringObjectFile &TLOF = getObjFileLowering();
1286 const auto &F = MF->getFunction();
1288
1289 MCSection *ReadOnlySec = nullptr;
1290 if (TM.Options.EnableStaticDataPartitioning) {
1291 ReadOnlySec =
1292 TLOF.getSectionForJumpTable(F, TM, &JT[JumpTableIndices.front()]);
1293 } else {
1294 ReadOnlySec = TLOF.getSectionForJumpTable(F, TM);
1295 }
1296 OutStreamer->switchSection(ReadOnlySec);
1297
1298 auto AFI = MF->getInfo<AArch64FunctionInfo>();
1299 for (unsigned JTI : JumpTableIndices) {
1300 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
1301
1302 // If this jump table was deleted, ignore it.
1303 if (JTBBs.empty()) continue;
1304
1305 unsigned Size = AFI->getJumpTableEntrySize(JTI);
1306 emitAlignment(Align(Size));
1307 OutStreamer->emitLabel(GetJTISymbol(JTI));
1308
1309 const MCSymbol *BaseSym = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1310 const MCExpr *Base = MCSymbolRefExpr::create(BaseSym, OutContext);
1311
1312 for (auto *JTBB : JTBBs) {
1313 const MCExpr *Value =
1314 MCSymbolRefExpr::create(JTBB->getSymbol(), OutContext);
1315
1316 // Each entry is:
1317 // .byte/.hword (LBB - Lbase)>>2
1318 // or plain:
1319 // .word LBB - Lbase
1320 Value = MCBinaryExpr::createSub(Value, Base, OutContext);
1321 if (Size != 4)
1323 Value, MCConstantExpr::create(2, OutContext), OutContext);
1324
1325 OutStreamer->emitValue(Value, Size);
1326 }
1327 }
1328}
1329
1330std::tuple<const MCSymbol *, uint64_t, const MCSymbol *,
1332AArch64AsmPrinter::getCodeViewJumpTableInfo(int JTI,
1333 const MachineInstr *BranchInstr,
1334 const MCSymbol *BranchLabel) const {
1335 const auto AFI = MF->getInfo<AArch64FunctionInfo>();
1336 const auto Base = AArch64FI->getJumpTableEntryPCRelSymbol(JTI);
1338 switch (AFI->getJumpTableEntrySize(JTI)) {
1339 case 1:
1340 EntrySize = codeview::JumpTableEntrySize::UInt8ShiftLeft;
1341 break;
1342 case 2:
1343 EntrySize = codeview::JumpTableEntrySize::UInt16ShiftLeft;
1344 break;
1345 case 4:
1346 EntrySize = codeview::JumpTableEntrySize::Int32;
1347 break;
1348 default:
1349 llvm_unreachable("Unexpected jump table entry size");
1350 }
1351 return std::make_tuple(Base, 0, BranchLabel, EntrySize);
1352}
1353
1354void AArch64AsmPrinter::emitFunctionEntryLabel() {
1355 const Triple &TT = TM.getTargetTriple();
1356 if (TT.isOSBinFormatELF() &&
1357 (MF->getFunction().getCallingConv() == CallingConv::AArch64_VectorCall ||
1358 MF->getFunction().getCallingConv() ==
1360 MF->getInfo<AArch64FunctionInfo>()->isSVECC())) {
1361 auto *TS =
1362 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
1363 TS->emitDirectiveVariantPCS(CurrentFnSym);
1364 }
1365
1367
1368 if (TT.isWindowsArm64EC() && !MF->getFunction().hasLocalLinkage()) {
1369 // For ARM64EC targets, a function definition's name is mangled differently
1370 // from the normal symbol, emit required aliases here.
1371 auto emitFunctionAlias = [&](MCSymbol *Src, MCSymbol *Dst) {
1372 OutStreamer->emitSymbolAttribute(Src, MCSA_WeakAntiDep);
1373 OutStreamer->emitAssignment(
1374 Src, MCSymbolRefExpr::create(Dst, MMI->getContext()));
1375 };
1376
1377 auto getSymbolFromMetadata = [&](StringRef Name) {
1378 MCSymbol *Sym = nullptr;
1379 if (MDNode *Node = MF->getFunction().getMetadata(Name)) {
1380 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1381 Sym = MMI->getContext().getOrCreateSymbol(NameStr);
1382 }
1383 return Sym;
1384 };
1385
1386 SmallVector<MDNode *> UnmangledNames;
1387 MF->getFunction().getMetadata("arm64ec_unmangled_name", UnmangledNames);
1388 for (MDNode *Node : UnmangledNames) {
1389 StringRef NameStr = cast<MDString>(Node->getOperand(0))->getString();
1390 MCSymbol *UnmangledSym = MMI->getContext().getOrCreateSymbol(NameStr);
1391 if (std::optional<std::string> MangledName =
1392 getArm64ECMangledFunctionName(UnmangledSym->getName())) {
1393 MCSymbol *ECMangledSym =
1394 MMI->getContext().getOrCreateSymbol(*MangledName);
1395 emitFunctionAlias(UnmangledSym, ECMangledSym);
1396 }
1397 }
1398 if (MCSymbol *ECMangledSym =
1399 getSymbolFromMetadata("arm64ec_ecmangled_name"))
1400 emitFunctionAlias(ECMangledSym, CurrentFnSym);
1401 }
1402}
1403
1404void AArch64AsmPrinter::emitXXStructor(const DataLayout &DL,
1405 const Constant *CV) {
1406 if (const auto *CPA = dyn_cast<ConstantPtrAuth>(CV))
1407 if (CPA->hasAddressDiscriminator() &&
1408 !CPA->hasSpecialAddressDiscriminator(
1411 "unexpected address discrimination value for ctors/dtors entry, only "
1412 "'ptr inttoptr (i64 1 to ptr)' is allowed");
1413 // If we have signed pointers in xxstructors list, they'll be lowered to @AUTH
1414 // MCExpr's via AArch64AsmPrinter::lowerConstantPtrAuth. It does not look at
1415 // actual address discrimination value and only checks
1416 // hasAddressDiscriminator(), so it's OK to leave special address
1417 // discrimination value here.
1419}
1420
1421void AArch64AsmPrinter::emitGlobalAlias(const Module &M,
1422 const GlobalAlias &GA) {
1423 if (auto F = dyn_cast_or_null<Function>(GA.getAliasee())) {
1424 // Global aliases must point to a definition, but unmangled patchable
1425 // symbols are special and need to point to an undefined symbol with "EXP+"
1426 // prefix. Such undefined symbol is resolved by the linker by creating
1427 // x86 thunk that jumps back to the actual EC target.
1428 if (MDNode *Node = F->getMetadata("arm64ec_exp_name")) {
1429 StringRef ExpStr = cast<MDString>(Node->getOperand(0))->getString();
1430 MCSymbol *ExpSym = MMI->getContext().getOrCreateSymbol(ExpStr);
1431 MCSymbol *Sym = MMI->getContext().getOrCreateSymbol(GA.getName());
1432
1433 OutStreamer->beginCOFFSymbolDef(ExpSym);
1434 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1435 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1437 OutStreamer->endCOFFSymbolDef();
1438
1439 OutStreamer->beginCOFFSymbolDef(Sym);
1440 OutStreamer->emitCOFFSymbolStorageClass(COFF::IMAGE_SYM_CLASS_EXTERNAL);
1441 OutStreamer->emitCOFFSymbolType(COFF::IMAGE_SYM_DTYPE_FUNCTION
1443 OutStreamer->endCOFFSymbolDef();
1444 OutStreamer->emitSymbolAttribute(Sym, MCSA_Weak);
1445 OutStreamer->emitAssignment(
1446 Sym, MCSymbolRefExpr::create(ExpSym, MMI->getContext()));
1447 return;
1448 }
1449 }
1451}
1452
1453/// Small jump tables contain an unsigned byte or half, representing the offset
1454/// from the lowest-addressed possible destination to the desired basic
1455/// block. Since all instructions are 4-byte aligned, this is further compressed
1456/// by counting in instructions rather than bytes (i.e. divided by 4). So, to
1457/// materialize the correct destination we need:
1458///
1459/// adr xDest, .LBB0_0
1460/// ldrb wScratch, [xTable, xEntry] (with "lsl #1" for ldrh).
1461/// add xDest, xDest, xScratch (with "lsl #2" for smaller entries)
1462void AArch64AsmPrinter::LowerJumpTableDest(llvm::MCStreamer &OutStreamer,
1463 const llvm::MachineInstr &MI) {
1464 Register DestReg = MI.getOperand(0).getReg();
1465 Register ScratchReg = MI.getOperand(1).getReg();
1466 Register ScratchRegW =
1467 STI->getRegisterInfo()->getSubReg(ScratchReg, AArch64::sub_32);
1468 Register TableReg = MI.getOperand(2).getReg();
1469 Register EntryReg = MI.getOperand(3).getReg();
1470 int JTIdx = MI.getOperand(4).getIndex();
1471 int Size = AArch64FI->getJumpTableEntrySize(JTIdx);
1472
1473 // This has to be first because the compression pass based its reachability
1474 // calculations on the start of the JumpTableDest instruction.
1475 auto Label =
1476 MF->getInfo<AArch64FunctionInfo>()->getJumpTableEntryPCRelSymbol(JTIdx);
1477
1478 // If we don't already have a symbol to use as the base, use the ADR
1479 // instruction itself.
1480 if (!Label) {
1481 Label = MF->getContext().createTempSymbol();
1482 AArch64FI->setJumpTableEntryInfo(JTIdx, Size, Label);
1483 OutStreamer.emitLabel(Label);
1484 }
1485
1486 auto LabelExpr = MCSymbolRefExpr::create(Label, MF->getContext());
1487 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADR)
1488 .addReg(DestReg)
1489 .addExpr(LabelExpr));
1490
1491 // Load the number of instruction-steps to offset from the label.
1492 unsigned LdrOpcode;
1493 switch (Size) {
1494 case 1: LdrOpcode = AArch64::LDRBBroX; break;
1495 case 2: LdrOpcode = AArch64::LDRHHroX; break;
1496 case 4: LdrOpcode = AArch64::LDRSWroX; break;
1497 default:
1498 llvm_unreachable("Unknown jump table size");
1499 }
1500
1501 EmitToStreamer(OutStreamer, MCInstBuilder(LdrOpcode)
1502 .addReg(Size == 4 ? ScratchReg : ScratchRegW)
1503 .addReg(TableReg)
1504 .addReg(EntryReg)
1505 .addImm(0)
1506 .addImm(Size == 1 ? 0 : 1));
1507
1508 // Add to the already materialized base label address, multiplying by 4 if
1509 // compressed.
1510 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1511 .addReg(DestReg)
1512 .addReg(DestReg)
1513 .addReg(ScratchReg)
1514 .addImm(Size == 4 ? 0 : 2));
1515}
1516
1517void AArch64AsmPrinter::LowerHardenedBRJumpTable(const MachineInstr &MI) {
1518 const MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
1519 assert(MJTI && "Can't lower jump-table dispatch without JTI");
1520
1521 const std::vector<MachineJumpTableEntry> &JTs = MJTI->getJumpTables();
1522 assert(!JTs.empty() && "Invalid JT index for jump-table dispatch");
1523
1524 // Emit:
1525 // mov x17, #<size of table> ; depending on table size, with MOVKs
1526 // cmp x16, x17 ; or #imm if table size fits in 12-bit
1527 // csel x16, x16, xzr, ls ; check for index overflow
1528 //
1529 // adrp x17, Ltable@PAGE ; materialize table address
1530 // add x17, Ltable@PAGEOFF
1531 // ldrsw x16, [x17, x16, lsl #2] ; load table entry
1532 //
1533 // Lanchor:
1534 // adr x17, Lanchor ; compute target address
1535 // add x16, x17, x16
1536 // br x16 ; branch to target
1537
1538 MachineOperand JTOp = MI.getOperand(0);
1539
1540 unsigned JTI = JTOp.getIndex();
1541 assert(!AArch64FI->getJumpTableEntryPCRelSymbol(JTI) &&
1542 "unsupported compressed jump table");
1543
1544 const uint64_t NumTableEntries = JTs[JTI].MBBs.size();
1545
1546 // cmp only supports a 12-bit immediate. If we need more, materialize the
1547 // immediate, using x17 as a scratch register.
1548 uint64_t MaxTableEntry = NumTableEntries - 1;
1549 if (isUInt<12>(MaxTableEntry)) {
1550 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXri)
1551 .addReg(AArch64::XZR)
1552 .addReg(AArch64::X16)
1553 .addImm(MaxTableEntry)
1554 .addImm(0));
1555 } else {
1556 emitMOVZ(AArch64::X17, static_cast<uint16_t>(MaxTableEntry), 0);
1557 // It's sad that we have to manually materialize instructions, but we can't
1558 // trivially reuse the main pseudo expansion logic.
1559 // A MOVK sequence is easy enough to generate and handles the general case.
1560 for (int Offset = 16; Offset < 64; Offset += 16) {
1561 if ((MaxTableEntry >> Offset) == 0)
1562 break;
1563 emitMOVK(AArch64::X17, static_cast<uint16_t>(MaxTableEntry >> Offset),
1564 Offset);
1565 }
1566 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::SUBSXrs)
1567 .addReg(AArch64::XZR)
1568 .addReg(AArch64::X16)
1569 .addReg(AArch64::X17)
1570 .addImm(0));
1571 }
1572
1573 // This picks entry #0 on failure.
1574 // We might want to trap instead.
1575 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::CSELXr)
1576 .addReg(AArch64::X16)
1577 .addReg(AArch64::X16)
1578 .addReg(AArch64::XZR)
1579 .addImm(AArch64CC::LS));
1580
1581 // Prepare the @PAGE/@PAGEOFF low/high operands.
1582 MachineOperand JTMOHi(JTOp), JTMOLo(JTOp);
1583 MCOperand JTMCHi, JTMCLo;
1584
1585 JTMOHi.setTargetFlags(AArch64II::MO_PAGE);
1586 JTMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
1587
1588 MCInstLowering.lowerOperand(JTMOHi, JTMCHi);
1589 MCInstLowering.lowerOperand(JTMOLo, JTMCLo);
1590
1591 EmitToStreamer(
1592 *OutStreamer,
1593 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(JTMCHi));
1594
1595 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXri)
1596 .addReg(AArch64::X17)
1597 .addReg(AArch64::X17)
1598 .addOperand(JTMCLo)
1599 .addImm(0));
1600
1601 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRSWroX)
1602 .addReg(AArch64::X16)
1603 .addReg(AArch64::X17)
1604 .addReg(AArch64::X16)
1605 .addImm(0)
1606 .addImm(1));
1607
1608 MCSymbol *AdrLabel = MF->getContext().createTempSymbol();
1609 const auto *AdrLabelE = MCSymbolRefExpr::create(AdrLabel, MF->getContext());
1610 AArch64FI->setJumpTableEntryInfo(JTI, 4, AdrLabel);
1611
1612 OutStreamer->emitLabel(AdrLabel);
1613 EmitToStreamer(
1614 *OutStreamer,
1615 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addExpr(AdrLabelE));
1616
1617 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ADDXrs)
1618 .addReg(AArch64::X16)
1619 .addReg(AArch64::X17)
1620 .addReg(AArch64::X16)
1621 .addImm(0));
1622
1623 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::BR).addReg(AArch64::X16));
1624}
1625
1626void AArch64AsmPrinter::LowerMOPS(llvm::MCStreamer &OutStreamer,
1627 const llvm::MachineInstr &MI) {
1628 unsigned Opcode = MI.getOpcode();
1629 assert(STI->hasMOPS());
1630 assert(STI->hasMTE() || Opcode != AArch64::MOPSMemorySetTaggingPseudo);
1631
1632 const auto Ops = [Opcode]() -> std::array<unsigned, 3> {
1633 if (Opcode == AArch64::MOPSMemoryCopyPseudo)
1634 return {AArch64::CPYFP, AArch64::CPYFM, AArch64::CPYFE};
1635 if (Opcode == AArch64::MOPSMemoryMovePseudo)
1636 return {AArch64::CPYP, AArch64::CPYM, AArch64::CPYE};
1637 if (Opcode == AArch64::MOPSMemorySetPseudo)
1638 return {AArch64::SETP, AArch64::SETM, AArch64::SETE};
1639 if (Opcode == AArch64::MOPSMemorySetTaggingPseudo)
1640 return {AArch64::SETGP, AArch64::SETGM, AArch64::MOPSSETGE};
1641 llvm_unreachable("Unhandled memory operation pseudo");
1642 }();
1643 const bool IsSet = Opcode == AArch64::MOPSMemorySetPseudo ||
1644 Opcode == AArch64::MOPSMemorySetTaggingPseudo;
1645
1646 for (auto Op : Ops) {
1647 int i = 0;
1648 auto MCIB = MCInstBuilder(Op);
1649 // Destination registers
1650 MCIB.addReg(MI.getOperand(i++).getReg());
1651 MCIB.addReg(MI.getOperand(i++).getReg());
1652 if (!IsSet)
1653 MCIB.addReg(MI.getOperand(i++).getReg());
1654 // Input registers
1655 MCIB.addReg(MI.getOperand(i++).getReg());
1656 MCIB.addReg(MI.getOperand(i++).getReg());
1657 MCIB.addReg(MI.getOperand(i++).getReg());
1658
1659 EmitToStreamer(OutStreamer, MCIB);
1660 }
1661}
1662
1663void AArch64AsmPrinter::LowerSTACKMAP(MCStreamer &OutStreamer, StackMaps &SM,
1664 const MachineInstr &MI) {
1665 unsigned NumNOPBytes = StackMapOpers(&MI).getNumPatchBytes();
1666
1667 auto &Ctx = OutStreamer.getContext();
1668 MCSymbol *MILabel = Ctx.createTempSymbol();
1669 OutStreamer.emitLabel(MILabel);
1670
1671 SM.recordStackMap(*MILabel, MI);
1672 assert(NumNOPBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1673
1674 // Scan ahead to trim the shadow.
1675 const MachineBasicBlock &MBB = *MI.getParent();
1677 ++MII;
1678 while (NumNOPBytes > 0) {
1679 if (MII == MBB.end() || MII->isCall() ||
1680 MII->getOpcode() == AArch64::DBG_VALUE ||
1681 MII->getOpcode() == TargetOpcode::PATCHPOINT ||
1682 MII->getOpcode() == TargetOpcode::STACKMAP)
1683 break;
1684 ++MII;
1685 NumNOPBytes -= 4;
1686 }
1687
1688 // Emit nops.
1689 for (unsigned i = 0; i < NumNOPBytes; i += 4)
1690 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1691}
1692
1693// Lower a patchpoint of the form:
1694// [<def>], <id>, <numBytes>, <target>, <numArgs>
1695void AArch64AsmPrinter::LowerPATCHPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1696 const MachineInstr &MI) {
1697 auto &Ctx = OutStreamer.getContext();
1698 MCSymbol *MILabel = Ctx.createTempSymbol();
1699 OutStreamer.emitLabel(MILabel);
1700 SM.recordPatchPoint(*MILabel, MI);
1701
1702 PatchPointOpers Opers(&MI);
1703
1704 int64_t CallTarget = Opers.getCallTarget().getImm();
1705 unsigned EncodedBytes = 0;
1706 if (CallTarget) {
1707 assert((CallTarget & 0xFFFFFFFFFFFF) == CallTarget &&
1708 "High 16 bits of call target should be zero.");
1709 Register ScratchReg = MI.getOperand(Opers.getNextScratchIdx()).getReg();
1710 EncodedBytes = 16;
1711 // Materialize the jump address:
1712 emitMOVZ(ScratchReg, (CallTarget >> 32) & 0xFFFF, 32);
1713 emitMOVK(ScratchReg, (CallTarget >> 16) & 0xFFFF, 16);
1714 emitMOVK(ScratchReg, CallTarget & 0xFFFF, 0);
1715 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::BLR).addReg(ScratchReg));
1716 }
1717 // Emit padding.
1718 unsigned NumBytes = Opers.getNumPatchBytes();
1719 assert(NumBytes >= EncodedBytes &&
1720 "Patchpoint can't request size less than the length of a call.");
1721 assert((NumBytes - EncodedBytes) % 4 == 0 &&
1722 "Invalid number of NOP bytes requested!");
1723 for (unsigned i = EncodedBytes; i < NumBytes; i += 4)
1724 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1725}
1726
1727void AArch64AsmPrinter::LowerSTATEPOINT(MCStreamer &OutStreamer, StackMaps &SM,
1728 const MachineInstr &MI) {
1729 StatepointOpers SOpers(&MI);
1730 if (unsigned PatchBytes = SOpers.getNumPatchBytes()) {
1731 assert(PatchBytes % 4 == 0 && "Invalid number of NOP bytes requested!");
1732 for (unsigned i = 0; i < PatchBytes; i += 4)
1733 EmitToStreamer(OutStreamer, MCInstBuilder(AArch64::HINT).addImm(0));
1734 } else {
1735 // Lower call target and choose correct opcode
1736 const MachineOperand &CallTarget = SOpers.getCallTarget();
1737 MCOperand CallTargetMCOp;
1738 unsigned CallOpcode;
1739 switch (CallTarget.getType()) {
1742 MCInstLowering.lowerOperand(CallTarget, CallTargetMCOp);
1743 CallOpcode = AArch64::BL;
1744 break;
1746 CallTargetMCOp = MCOperand::createImm(CallTarget.getImm());
1747 CallOpcode = AArch64::BL;
1748 break;
1750 CallTargetMCOp = MCOperand::createReg(CallTarget.getReg());
1751 CallOpcode = AArch64::BLR;
1752 break;
1753 default:
1754 llvm_unreachable("Unsupported operand type in statepoint call target");
1755 break;
1756 }
1757
1758 EmitToStreamer(OutStreamer,
1759 MCInstBuilder(CallOpcode).addOperand(CallTargetMCOp));
1760 }
1761
1762 auto &Ctx = OutStreamer.getContext();
1763 MCSymbol *MILabel = Ctx.createTempSymbol();
1764 OutStreamer.emitLabel(MILabel);
1765 SM.recordStatepoint(*MILabel, MI);
1766}
1767
1768void AArch64AsmPrinter::LowerFAULTING_OP(const MachineInstr &FaultingMI) {
1769 // FAULTING_LOAD_OP <def>, <faltinf type>, <MBB handler>,
1770 // <opcode>, <operands>
1771
1772 Register DefRegister = FaultingMI.getOperand(0).getReg();
1774 static_cast<FaultMaps::FaultKind>(FaultingMI.getOperand(1).getImm());
1775 MCSymbol *HandlerLabel = FaultingMI.getOperand(2).getMBB()->getSymbol();
1776 unsigned Opcode = FaultingMI.getOperand(3).getImm();
1777 unsigned OperandsBeginIdx = 4;
1778
1779 auto &Ctx = OutStreamer->getContext();
1780 MCSymbol *FaultingLabel = Ctx.createTempSymbol();
1781 OutStreamer->emitLabel(FaultingLabel);
1782
1783 assert(FK < FaultMaps::FaultKindMax && "Invalid Faulting Kind!");
1784 FM.recordFaultingOp(FK, FaultingLabel, HandlerLabel);
1785
1786 MCInst MI;
1787 MI.setOpcode(Opcode);
1788
1789 if (DefRegister != (Register)0)
1790 MI.addOperand(MCOperand::createReg(DefRegister));
1791
1792 for (const MachineOperand &MO :
1793 llvm::drop_begin(FaultingMI.operands(), OperandsBeginIdx)) {
1794 MCOperand Dest;
1795 lowerOperand(MO, Dest);
1796 MI.addOperand(Dest);
1797 }
1798
1799 OutStreamer->AddComment("on-fault: " + HandlerLabel->getName());
1800 EmitToStreamer(MI);
1801}
1802
1803void AArch64AsmPrinter::emitMovXReg(Register Dest, Register Src) {
1804 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::ORRXrs)
1805 .addReg(Dest)
1806 .addReg(AArch64::XZR)
1807 .addReg(Src)
1808 .addImm(0));
1809}
1810
1811void AArch64AsmPrinter::emitMOVZ(Register Dest, uint64_t Imm, unsigned Shift) {
1812 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1813 EmitToStreamer(*OutStreamer,
1814 MCInstBuilder(Is64Bit ? AArch64::MOVZXi : AArch64::MOVZWi)
1815 .addReg(Dest)
1816 .addImm(Imm)
1817 .addImm(Shift));
1818}
1819
1820void AArch64AsmPrinter::emitMOVK(Register Dest, uint64_t Imm, unsigned Shift) {
1821 bool Is64Bit = AArch64::GPR64RegClass.contains(Dest);
1822 EmitToStreamer(*OutStreamer,
1823 MCInstBuilder(Is64Bit ? AArch64::MOVKXi : AArch64::MOVKWi)
1824 .addReg(Dest)
1825 .addReg(Dest)
1826 .addImm(Imm)
1827 .addImm(Shift));
1828}
1829
1830void AArch64AsmPrinter::emitFMov0(const MachineInstr &MI) {
1831 Register DestReg = MI.getOperand(0).getReg();
1832 if (STI->hasZeroCycleZeroingFPR64() &&
1833 !STI->hasZeroCycleZeroingFPWorkaround() && STI->isNeonAvailable()) {
1834 // Convert H/S register to corresponding D register
1835 if (AArch64::H0 <= DestReg && DestReg <= AArch64::H31)
1836 DestReg = AArch64::D0 + (DestReg - AArch64::H0);
1837 else if (AArch64::S0 <= DestReg && DestReg <= AArch64::S31)
1838 DestReg = AArch64::D0 + (DestReg - AArch64::S0);
1839 else
1840 assert(AArch64::D0 <= DestReg && DestReg <= AArch64::D31);
1841
1842 MCInst MOVI;
1843 MOVI.setOpcode(AArch64::MOVID);
1844 MOVI.addOperand(MCOperand::createReg(DestReg));
1846 EmitToStreamer(*OutStreamer, MOVI);
1847 } else {
1848 MCInst FMov;
1849 switch (MI.getOpcode()) {
1850 default: llvm_unreachable("Unexpected opcode");
1851 case AArch64::FMOVH0:
1852 FMov.setOpcode(STI->hasFullFP16() ? AArch64::FMOVWHr : AArch64::FMOVWSr);
1853 if (!STI->hasFullFP16())
1854 DestReg = (AArch64::S0 + (DestReg - AArch64::H0));
1855 FMov.addOperand(MCOperand::createReg(DestReg));
1856 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1857 break;
1858 case AArch64::FMOVS0:
1859 FMov.setOpcode(AArch64::FMOVWSr);
1860 FMov.addOperand(MCOperand::createReg(DestReg));
1861 FMov.addOperand(MCOperand::createReg(AArch64::WZR));
1862 break;
1863 case AArch64::FMOVD0:
1864 FMov.setOpcode(AArch64::FMOVXDr);
1865 FMov.addOperand(MCOperand::createReg(DestReg));
1866 FMov.addOperand(MCOperand::createReg(AArch64::XZR));
1867 break;
1868 }
1869 EmitToStreamer(*OutStreamer, FMov);
1870 }
1871}
1872
1873Register AArch64AsmPrinter::emitPtrauthDiscriminator(uint16_t Disc,
1874 Register AddrDisc,
1875 Register ScratchReg,
1876 bool MayUseAddrAsScratch) {
1877 assert(ScratchReg == AArch64::X16 || ScratchReg == AArch64::X17 ||
1878 !STI->isX16X17Safer());
1879 // So far we've used NoRegister in pseudos. Now we need real encodings.
1880 if (AddrDisc == AArch64::NoRegister)
1881 AddrDisc = AArch64::XZR;
1882
1883 // If there is no constant discriminator, there's no blend involved:
1884 // just use the address discriminator register as-is (XZR or not).
1885 if (!Disc)
1886 return AddrDisc;
1887
1888 // If there's only a constant discriminator, MOV it into the scratch register.
1889 if (AddrDisc == AArch64::XZR) {
1890 emitMOVZ(ScratchReg, Disc, 0);
1891 return ScratchReg;
1892 }
1893
1894 // If there are both, emit a blend into the scratch register.
1895
1896 // Check if we can save one MOV instruction.
1897 assert(MayUseAddrAsScratch || ScratchReg != AddrDisc);
1898 bool AddrDiscIsSafe = AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17 ||
1899 !STI->isX16X17Safer();
1900 if (MayUseAddrAsScratch && AddrDiscIsSafe)
1901 ScratchReg = AddrDisc;
1902 else
1903 emitMovXReg(ScratchReg, AddrDisc);
1904
1905 emitMOVK(ScratchReg, Disc, 48);
1906 return ScratchReg;
1907}
1908
1909/// Emits a code sequence to check an authenticated pointer value.
1910///
1911/// If OnFailure argument is passed, jump there on check failure instead
1912/// of proceeding to the next instruction (only if ShouldTrap is false).
1913void AArch64AsmPrinter::emitPtrauthCheckAuthenticatedValue(
1914 Register TestedReg, Register ScratchReg, AArch64PACKey::ID Key,
1915 AArch64PAuth::AuthCheckMethod Method, bool ShouldTrap,
1916 const MCSymbol *OnFailure) {
1917 // Insert a sequence to check if authentication of TestedReg succeeded,
1918 // such as:
1919 //
1920 // - checked and clearing:
1921 // ; x16 is TestedReg, x17 is ScratchReg
1922 // mov x17, x16
1923 // xpaci x17
1924 // cmp x16, x17
1925 // b.eq Lsuccess
1926 // mov x16, x17
1927 // b Lend
1928 // Lsuccess:
1929 // ; skipped if authentication failed
1930 // Lend:
1931 // ...
1932 //
1933 // - checked and trapping:
1934 // mov x17, x16
1935 // xpaci x17
1936 // cmp x16, x17
1937 // b.eq Lsuccess
1938 // brk #<0xc470 + aut key>
1939 // Lsuccess:
1940 // ...
1941 //
1942 // See the documentation on AuthCheckMethod enumeration constants for
1943 // the specific code sequences that can be used to perform the check.
1945
1946 if (Method == AuthCheckMethod::None)
1947 return;
1948 if (Method == AuthCheckMethod::DummyLoad) {
1949 EmitToStreamer(MCInstBuilder(AArch64::LDRWui)
1950 .addReg(getWRegFromXReg(ScratchReg))
1951 .addReg(TestedReg)
1952 .addImm(0));
1953 assert(ShouldTrap && !OnFailure && "DummyLoad always traps on error");
1954 return;
1955 }
1956
1957 MCSymbol *SuccessSym = createTempSymbol("auth_success_");
1958 if (Method == AuthCheckMethod::XPAC || Method == AuthCheckMethod::XPACHint) {
1959 // mov Xscratch, Xtested
1960 emitMovXReg(ScratchReg, TestedReg);
1961
1962 if (Method == AuthCheckMethod::XPAC) {
1963 // xpac(i|d) Xscratch
1964 unsigned XPACOpc = getXPACOpcodeForKey(Key);
1965 EmitToStreamer(
1966 MCInstBuilder(XPACOpc).addReg(ScratchReg).addReg(ScratchReg));
1967 } else {
1968 // xpaclri
1969
1970 // Note that this method applies XPAC to TestedReg instead of ScratchReg.
1971 assert(TestedReg == AArch64::LR &&
1972 "XPACHint mode is only compatible with checking the LR register");
1973 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
1974 "XPACHint mode is only compatible with I-keys");
1975 EmitToStreamer(MCInstBuilder(AArch64::XPACLRI));
1976 }
1977
1978 // cmp Xtested, Xscratch
1979 EmitToStreamer(MCInstBuilder(AArch64::SUBSXrs)
1980 .addReg(AArch64::XZR)
1981 .addReg(TestedReg)
1982 .addReg(ScratchReg)
1983 .addImm(0));
1984
1985 // b.eq Lsuccess
1986 EmitToStreamer(
1987 MCInstBuilder(AArch64::Bcc)
1988 .addImm(AArch64CC::EQ)
1989 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
1990 } else if (Method == AuthCheckMethod::HighBitsNoTBI) {
1991 // eor Xscratch, Xtested, Xtested, lsl #1
1992 EmitToStreamer(MCInstBuilder(AArch64::EORXrs)
1993 .addReg(ScratchReg)
1994 .addReg(TestedReg)
1995 .addReg(TestedReg)
1996 .addImm(1));
1997 // tbz Xscratch, #62, Lsuccess
1998 EmitToStreamer(
1999 MCInstBuilder(AArch64::TBZX)
2000 .addReg(ScratchReg)
2001 .addImm(62)
2002 .addExpr(MCSymbolRefExpr::create(SuccessSym, OutContext)));
2003 } else {
2004 llvm_unreachable("Unsupported check method");
2005 }
2006
2007 if (ShouldTrap) {
2008 assert(!OnFailure && "Cannot specify OnFailure with ShouldTrap");
2009 // Trapping sequences do a 'brk'.
2010 // brk #<0xc470 + aut key>
2011 EmitToStreamer(MCInstBuilder(AArch64::BRK).addImm(0xc470 | Key));
2012 } else {
2013 // Non-trapping checked sequences return the stripped result in TestedReg,
2014 // skipping over success-only code (such as re-signing the pointer) if
2015 // there is one.
2016 // Note that this can introduce an authentication oracle (such as based on
2017 // the high bits of the re-signed value).
2018
2019 // FIXME: The XPAC method can be optimized by applying XPAC to TestedReg
2020 // instead of ScratchReg, thus eliminating one `mov` instruction.
2021 // Both XPAC and XPACHint can be further optimized by not using a
2022 // conditional branch jumping over an unconditional one.
2023
2024 switch (Method) {
2025 case AuthCheckMethod::XPACHint:
2026 // LR is already XPAC-ed at this point.
2027 break;
2028 case AuthCheckMethod::XPAC:
2029 // mov Xtested, Xscratch
2030 emitMovXReg(TestedReg, ScratchReg);
2031 break;
2032 default:
2033 // If Xtested was not XPAC-ed so far, emit XPAC here.
2034 // xpac(i|d) Xtested
2035 unsigned XPACOpc = getXPACOpcodeForKey(Key);
2036 EmitToStreamer(
2037 MCInstBuilder(XPACOpc).addReg(TestedReg).addReg(TestedReg));
2038 }
2039
2040 if (OnFailure) {
2041 // b Lend
2042 EmitToStreamer(
2043 MCInstBuilder(AArch64::B)
2044 .addExpr(MCSymbolRefExpr::create(OnFailure, OutContext)));
2045 }
2046 }
2047
2048 // If the auth check succeeds, we can continue.
2049 // Lsuccess:
2050 OutStreamer->emitLabel(SuccessSym);
2051}
2052
2053// With Pointer Authentication, it may be needed to explicitly check the
2054// authenticated value in LR before performing a tail call.
2055// Otherwise, the callee may re-sign the invalid return address,
2056// introducing a signing oracle.
2057void AArch64AsmPrinter::emitPtrauthTailCallHardening(const MachineInstr *TC) {
2058 if (!AArch64FI->shouldSignReturnAddress(*MF))
2059 return;
2060
2061 auto LRCheckMethod = STI->getAuthenticatedLRCheckMethod(*MF);
2062 if (LRCheckMethod == AArch64PAuth::AuthCheckMethod::None)
2063 return;
2064
2065 const AArch64RegisterInfo *TRI = STI->getRegisterInfo();
2066 Register ScratchReg =
2067 TC->readsRegister(AArch64::X16, TRI) ? AArch64::X17 : AArch64::X16;
2068 assert(!TC->readsRegister(ScratchReg, TRI) &&
2069 "Neither x16 nor x17 is available as a scratch register");
2071 AArch64FI->shouldSignWithBKey() ? AArch64PACKey::IB : AArch64PACKey::IA;
2072 emitPtrauthCheckAuthenticatedValue(
2073 AArch64::LR, ScratchReg, Key, LRCheckMethod,
2074 /*ShouldTrap=*/true, /*OnFailure=*/nullptr);
2075}
2076
2077void AArch64AsmPrinter::emitPtrauthAuthResign(
2078 Register AUTVal, AArch64PACKey::ID AUTKey, uint64_t AUTDisc,
2079 const MachineOperand *AUTAddrDisc, Register Scratch,
2080 std::optional<AArch64PACKey::ID> PACKey, uint64_t PACDisc,
2081 Register PACAddrDisc) {
2082 const bool IsAUTPAC = PACKey.has_value();
2083
2084 // We expand AUT/AUTPAC into a sequence of the form
2085 //
2086 // ; authenticate x16
2087 // ; check pointer in x16
2088 // Lsuccess:
2089 // ; sign x16 (if AUTPAC)
2090 // Lend: ; if not trapping on failure
2091 //
2092 // with the checking sequence chosen depending on whether/how we should check
2093 // the pointer and whether we should trap on failure.
2094
2095 // By default, auth/resign sequences check for auth failures.
2096 bool ShouldCheck = true;
2097 // In the checked sequence, we only trap if explicitly requested.
2098 bool ShouldTrap = MF->getFunction().hasFnAttribute("ptrauth-auth-traps");
2099
2100 // On an FPAC CPU, you get traps whether you want them or not: there's
2101 // no point in emitting checks or traps.
2102 if (STI->hasFPAC())
2103 ShouldCheck = ShouldTrap = false;
2104
2105 // However, command-line flags can override this, for experimentation.
2106 switch (PtrauthAuthChecks) {
2108 break;
2110 ShouldCheck = ShouldTrap = false;
2111 break;
2113 ShouldCheck = true;
2114 ShouldTrap = false;
2115 break;
2117 ShouldCheck = ShouldTrap = true;
2118 break;
2119 }
2120
2121 // Compute aut discriminator
2122 assert(isUInt<16>(AUTDisc));
2123 Register AUTDiscReg = emitPtrauthDiscriminator(
2124 AUTDisc, AUTAddrDisc->getReg(), Scratch, AUTAddrDisc->isKill());
2125 bool AUTZero = AUTDiscReg == AArch64::XZR;
2126 unsigned AUTOpc = getAUTOpcodeForKey(AUTKey, AUTZero);
2127
2128 // autiza x16 ; if AUTZero
2129 // autia x16, x17 ; if !AUTZero
2130 MCInst AUTInst;
2131 AUTInst.setOpcode(AUTOpc);
2132 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2133 AUTInst.addOperand(MCOperand::createReg(AUTVal));
2134 if (!AUTZero)
2135 AUTInst.addOperand(MCOperand::createReg(AUTDiscReg));
2136 EmitToStreamer(*OutStreamer, AUTInst);
2137
2138 // Unchecked or checked-but-non-trapping AUT is just an "AUT": we're done.
2139 if (!IsAUTPAC && (!ShouldCheck || !ShouldTrap))
2140 return;
2141
2142 MCSymbol *EndSym = nullptr;
2143
2144 if (ShouldCheck) {
2145 if (IsAUTPAC && !ShouldTrap)
2146 EndSym = createTempSymbol("resign_end_");
2147
2148 emitPtrauthCheckAuthenticatedValue(AUTVal, Scratch, AUTKey,
2149 AArch64PAuth::AuthCheckMethod::XPAC,
2150 ShouldTrap, EndSym);
2151 }
2152
2153 // We already emitted unchecked and checked-but-non-trapping AUTs.
2154 // That left us with trapping AUTs, and AUTPACs.
2155 // Trapping AUTs don't need PAC: we're done.
2156 if (!IsAUTPAC)
2157 return;
2158
2159 // Compute pac discriminator
2160 assert(isUInt<16>(PACDisc));
2161 Register PACDiscReg =
2162 emitPtrauthDiscriminator(PACDisc, PACAddrDisc, Scratch);
2163 bool PACZero = PACDiscReg == AArch64::XZR;
2164 unsigned PACOpc = getPACOpcodeForKey(*PACKey, PACZero);
2165
2166 // pacizb x16 ; if PACZero
2167 // pacib x16, x17 ; if !PACZero
2168 MCInst PACInst;
2169 PACInst.setOpcode(PACOpc);
2170 PACInst.addOperand(MCOperand::createReg(AUTVal));
2171 PACInst.addOperand(MCOperand::createReg(AUTVal));
2172 if (!PACZero)
2173 PACInst.addOperand(MCOperand::createReg(PACDiscReg));
2174 EmitToStreamer(*OutStreamer, PACInst);
2175
2176 // Lend:
2177 if (EndSym)
2178 OutStreamer->emitLabel(EndSym);
2179}
2180
2181void AArch64AsmPrinter::emitPtrauthSign(const MachineInstr *MI) {
2182 Register Val = MI->getOperand(1).getReg();
2183 auto Key = (AArch64PACKey::ID)MI->getOperand(2).getImm();
2184 uint64_t Disc = MI->getOperand(3).getImm();
2185 Register AddrDisc = MI->getOperand(4).getReg();
2186 bool AddrDiscKilled = MI->getOperand(4).isKill();
2187
2188 // As long as at least one of Val and AddrDisc is in GPR64noip, a scratch
2189 // register is available.
2190 Register ScratchReg = Val == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2191 assert(ScratchReg != AddrDisc &&
2192 "Neither X16 nor X17 is available as a scratch register");
2193
2194 // Compute pac discriminator
2195 assert(isUInt<16>(Disc));
2196 Register DiscReg = emitPtrauthDiscriminator(
2197 Disc, AddrDisc, ScratchReg, /*MayUseAddrAsScratch=*/AddrDiscKilled);
2198 bool IsZeroDisc = DiscReg == AArch64::XZR;
2199 unsigned Opc = getPACOpcodeForKey(Key, IsZeroDisc);
2200
2201 // paciza x16 ; if IsZeroDisc
2202 // pacia x16, x17 ; if !IsZeroDisc
2203 MCInst PACInst;
2204 PACInst.setOpcode(Opc);
2205 PACInst.addOperand(MCOperand::createReg(Val));
2206 PACInst.addOperand(MCOperand::createReg(Val));
2207 if (!IsZeroDisc)
2208 PACInst.addOperand(MCOperand::createReg(DiscReg));
2209 EmitToStreamer(*OutStreamer, PACInst);
2210}
2211
2212void AArch64AsmPrinter::emitPtrauthBranch(const MachineInstr *MI) {
2213 bool IsCall = MI->getOpcode() == AArch64::BLRA;
2214 unsigned BrTarget = MI->getOperand(0).getReg();
2215
2216 auto Key = (AArch64PACKey::ID)MI->getOperand(1).getImm();
2217 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
2218 "Invalid auth call key");
2219
2220 uint64_t Disc = MI->getOperand(2).getImm();
2221 assert(isUInt<16>(Disc));
2222
2223 unsigned AddrDisc = MI->getOperand(3).getReg();
2224
2225 // Make sure AddrDisc is solely used to compute the discriminator.
2226 // While hardly meaningful, it is still possible to describe an authentication
2227 // of a pointer against its own value (instead of storage address) with
2228 // intrinsics, so use report_fatal_error instead of assert.
2229 if (BrTarget == AddrDisc)
2230 report_fatal_error("Branch target is signed with its own value");
2231
2232 // If we are printing BLRA pseudo, try to save one MOV by making use of the
2233 // fact that x16 and x17 are described as clobbered by the MI instruction and
2234 // AddrDisc is not used as any other input.
2235 //
2236 // Back in the day, emitPtrauthDiscriminator was restricted to only returning
2237 // either x16 or x17, meaning the returned register is always among the
2238 // implicit-def'ed registers of BLRA pseudo. Now this property can be violated
2239 // if isX16X17Safer predicate is false, thus manually check if AddrDisc is
2240 // among x16 and x17 to prevent clobbering unexpected registers.
2241 //
2242 // Unlike BLRA, BRA pseudo is used to perform computed goto, and thus not
2243 // declared as clobbering x16/x17.
2244 //
2245 // FIXME: Make use of `killed` flags and register masks instead.
2246 bool AddrDiscIsImplicitDef =
2247 IsCall && (AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17);
2248 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17,
2249 AddrDiscIsImplicitDef);
2250 bool IsZeroDisc = DiscReg == AArch64::XZR;
2251
2252 unsigned Opc;
2253 if (IsCall) {
2254 if (Key == AArch64PACKey::IA)
2255 Opc = IsZeroDisc ? AArch64::BLRAAZ : AArch64::BLRAA;
2256 else
2257 Opc = IsZeroDisc ? AArch64::BLRABZ : AArch64::BLRAB;
2258 } else {
2259 if (Key == AArch64PACKey::IA)
2260 Opc = IsZeroDisc ? AArch64::BRAAZ : AArch64::BRAA;
2261 else
2262 Opc = IsZeroDisc ? AArch64::BRABZ : AArch64::BRAB;
2263 }
2264
2265 MCInst BRInst;
2266 BRInst.setOpcode(Opc);
2267 BRInst.addOperand(MCOperand::createReg(BrTarget));
2268 if (!IsZeroDisc)
2269 BRInst.addOperand(MCOperand::createReg(DiscReg));
2270 EmitToStreamer(*OutStreamer, BRInst);
2271}
2272
2273const MCExpr *
2274AArch64AsmPrinter::lowerConstantPtrAuth(const ConstantPtrAuth &CPA) {
2275 MCContext &Ctx = OutContext;
2276
2277 // Figure out the base symbol and the addend, if any.
2278 APInt Offset(64, 0);
2279 const Value *BaseGV = CPA.getPointer()->stripAndAccumulateConstantOffsets(
2280 getDataLayout(), Offset, /*AllowNonInbounds=*/true);
2281
2282 auto *BaseGVB = dyn_cast<GlobalValue>(BaseGV);
2283
2284 // If we can't understand the referenced ConstantExpr, there's nothing
2285 // else we can do: emit an error.
2286 if (!BaseGVB) {
2287 BaseGV->getContext().emitError(
2288 "cannot resolve target base/addend of ptrauth constant");
2289 return nullptr;
2290 }
2291
2292 // If there is an addend, turn that into the appropriate MCExpr.
2293 const MCExpr *Sym = MCSymbolRefExpr::create(getSymbol(BaseGVB), Ctx);
2294 if (Offset.sgt(0))
2296 Sym, MCConstantExpr::create(Offset.getSExtValue(), Ctx), Ctx);
2297 else if (Offset.slt(0))
2299 Sym, MCConstantExpr::create((-Offset).getSExtValue(), Ctx), Ctx);
2300
2301 uint64_t KeyID = CPA.getKey()->getZExtValue();
2302 // We later rely on valid KeyID value in AArch64PACKeyIDToString call from
2303 // AArch64AuthMCExpr::printImpl, so fail fast.
2304 if (KeyID > AArch64PACKey::LAST) {
2305 CPA.getContext().emitError("AArch64 PAC Key ID '" + Twine(KeyID) +
2306 "' out of range [0, " +
2307 Twine((unsigned)AArch64PACKey::LAST) + "]");
2308 KeyID = 0;
2309 }
2310
2311 uint64_t Disc = CPA.getDiscriminator()->getZExtValue();
2312 if (!isUInt<16>(Disc)) {
2313 CPA.getContext().emitError("AArch64 PAC Discriminator '" + Twine(Disc) +
2314 "' out of range [0, 0xFFFF]");
2315 Disc = 0;
2316 }
2317
2318 // Finally build the complete @AUTH expr.
2319 return AArch64AuthMCExpr::create(Sym, Disc, AArch64PACKey::ID(KeyID),
2320 CPA.hasAddressDiscriminator(), Ctx);
2321}
2322
2323void AArch64AsmPrinter::LowerLOADauthptrstatic(const MachineInstr &MI) {
2324 unsigned DstReg = MI.getOperand(0).getReg();
2325 const MachineOperand &GAOp = MI.getOperand(1);
2326 const uint64_t KeyC = MI.getOperand(2).getImm();
2327 assert(KeyC <= AArch64PACKey::LAST &&
2328 "key is out of range [0, AArch64PACKey::LAST]");
2329 const auto Key = (AArch64PACKey::ID)KeyC;
2330 const uint64_t Disc = MI.getOperand(3).getImm();
2331 assert(isUInt<16>(Disc) &&
2332 "constant discriminator is out of range [0, 0xffff]");
2333
2334 // Emit instruction sequence like the following:
2335 // ADRP x16, symbol$auth_ptr$key$disc
2336 // LDR x16, [x16, :lo12:symbol$auth_ptr$key$disc]
2337 //
2338 // Where the $auth_ptr$ symbol is the stub slot containing the signed pointer
2339 // to symbol.
2340 MCSymbol *AuthPtrStubSym;
2341 if (TM.getTargetTriple().isOSBinFormatELF()) {
2342 const auto &TLOF =
2343 static_cast<const AArch64_ELFTargetObjectFile &>(getObjFileLowering());
2344
2345 assert(GAOp.getOffset() == 0 &&
2346 "non-zero offset for $auth_ptr$ stub slots is not supported");
2347 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2348 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2349 } else {
2350 assert(TM.getTargetTriple().isOSBinFormatMachO() &&
2351 "LOADauthptrstatic is implemented only for MachO/ELF");
2352
2353 const auto &TLOF = static_cast<const AArch64_MachoTargetObjectFile &>(
2354 getObjFileLowering());
2355
2356 assert(GAOp.getOffset() == 0 &&
2357 "non-zero offset for $auth_ptr$ stub slots is not supported");
2358 const MCSymbol *GASym = TM.getSymbol(GAOp.getGlobal());
2359 AuthPtrStubSym = TLOF.getAuthPtrSlotSymbol(TM, MMI, GASym, Key, Disc);
2360 }
2361
2362 MachineOperand StubMOHi =
2365 AuthPtrStubSym, AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2366 MCOperand StubMCHi, StubMCLo;
2367
2368 MCInstLowering.lowerOperand(StubMOHi, StubMCHi);
2369 MCInstLowering.lowerOperand(StubMOLo, StubMCLo);
2370
2371 EmitToStreamer(
2372 *OutStreamer,
2373 MCInstBuilder(AArch64::ADRP).addReg(DstReg).addOperand(StubMCHi));
2374
2375 EmitToStreamer(*OutStreamer, MCInstBuilder(AArch64::LDRXui)
2376 .addReg(DstReg)
2377 .addReg(DstReg)
2378 .addOperand(StubMCLo));
2379}
2380
2381void AArch64AsmPrinter::LowerMOVaddrPAC(const MachineInstr &MI) {
2382 const bool IsGOTLoad = MI.getOpcode() == AArch64::LOADgotPAC;
2383 const bool IsELFSignedGOT = MI.getParent()
2384 ->getParent()
2385 ->getInfo<AArch64FunctionInfo>()
2386 ->hasELFSignedGOT();
2387 MachineOperand GAOp = MI.getOperand(0);
2388 const uint64_t KeyC = MI.getOperand(1).getImm();
2389 assert(KeyC <= AArch64PACKey::LAST &&
2390 "key is out of range [0, AArch64PACKey::LAST]");
2391 const auto Key = (AArch64PACKey::ID)KeyC;
2392 const unsigned AddrDisc = MI.getOperand(2).getReg();
2393 const uint64_t Disc = MI.getOperand(3).getImm();
2394 assert(isUInt<16>(Disc) &&
2395 "constant discriminator is out of range [0, 0xffff]");
2396
2397 const int64_t Offset = GAOp.getOffset();
2398 GAOp.setOffset(0);
2399
2400 // Emit:
2401 // target materialization:
2402 // - via GOT:
2403 // - unsigned GOT:
2404 // adrp x16, :got:target
2405 // ldr x16, [x16, :got_lo12:target]
2406 // add offset to x16 if offset != 0
2407 // - ELF signed GOT:
2408 // adrp x17, :got:target
2409 // add x17, x17, :got_auth_lo12:target
2410 // ldr x16, [x17]
2411 // aut{i|d}a x16, x17
2412 // check+trap sequence (if no FPAC)
2413 // add offset to x16 if offset != 0
2414 //
2415 // - direct:
2416 // adrp x16, target
2417 // add x16, x16, :lo12:target
2418 // add offset to x16 if offset != 0
2419 //
2420 // add offset to x16:
2421 // - abs(offset) fits 24 bits:
2422 // add/sub x16, x16, #<offset>[, #lsl 12] (up to 2 instructions)
2423 // - abs(offset) does not fit 24 bits:
2424 // - offset < 0:
2425 // movn+movk sequence filling x17 register with the offset (up to 4
2426 // instructions)
2427 // add x16, x16, x17
2428 // - offset > 0:
2429 // movz+movk sequence filling x17 register with the offset (up to 4
2430 // instructions)
2431 // add x16, x16, x17
2432 //
2433 // signing:
2434 // - 0 discriminator:
2435 // paciza x16
2436 // - Non-0 discriminator, no address discriminator:
2437 // mov x17, #Disc
2438 // pacia x16, x17
2439 // - address discriminator (with potentially folded immediate discriminator):
2440 // pacia x16, xAddrDisc
2441
2442 MachineOperand GAMOHi(GAOp), GAMOLo(GAOp);
2443 MCOperand GAMCHi, GAMCLo;
2444
2445 GAMOHi.setTargetFlags(AArch64II::MO_PAGE);
2446 GAMOLo.setTargetFlags(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2447 if (IsGOTLoad) {
2448 GAMOHi.addTargetFlag(AArch64II::MO_GOT);
2449 GAMOLo.addTargetFlag(AArch64II::MO_GOT);
2450 }
2451
2452 MCInstLowering.lowerOperand(GAMOHi, GAMCHi);
2453 MCInstLowering.lowerOperand(GAMOLo, GAMCLo);
2454
2455 EmitToStreamer(
2456 MCInstBuilder(AArch64::ADRP)
2457 .addReg(IsGOTLoad && IsELFSignedGOT ? AArch64::X17 : AArch64::X16)
2458 .addOperand(GAMCHi));
2459
2460 if (IsGOTLoad) {
2461 if (IsELFSignedGOT) {
2462 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2463 .addReg(AArch64::X17)
2464 .addReg(AArch64::X17)
2465 .addOperand(GAMCLo)
2466 .addImm(0));
2467
2468 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2469 .addReg(AArch64::X16)
2470 .addReg(AArch64::X17)
2471 .addImm(0));
2472
2473 assert(GAOp.isGlobal());
2474 assert(GAOp.getGlobal()->getValueType() != nullptr);
2475 unsigned AuthOpcode = GAOp.getGlobal()->getValueType()->isFunctionTy()
2476 ? AArch64::AUTIA
2477 : AArch64::AUTDA;
2478
2479 EmitToStreamer(MCInstBuilder(AuthOpcode)
2480 .addReg(AArch64::X16)
2481 .addReg(AArch64::X16)
2482 .addReg(AArch64::X17));
2483
2484 if (!STI->hasFPAC()) {
2485 auto AuthKey = (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA
2487
2488 emitPtrauthCheckAuthenticatedValue(AArch64::X16, AArch64::X17, AuthKey,
2489 AArch64PAuth::AuthCheckMethod::XPAC,
2490 /*ShouldTrap=*/true,
2491 /*OnFailure=*/nullptr);
2492 }
2493 } else {
2494 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2495 .addReg(AArch64::X16)
2496 .addReg(AArch64::X16)
2497 .addOperand(GAMCLo));
2498 }
2499 } else {
2500 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2501 .addReg(AArch64::X16)
2502 .addReg(AArch64::X16)
2503 .addOperand(GAMCLo)
2504 .addImm(0));
2505 }
2506
2507 if (Offset != 0) {
2508 const uint64_t AbsOffset = (Offset > 0 ? Offset : -((uint64_t)Offset));
2509 const bool IsNeg = Offset < 0;
2510 if (isUInt<24>(AbsOffset)) {
2511 for (int BitPos = 0; BitPos != 24 && (AbsOffset >> BitPos);
2512 BitPos += 12) {
2513 EmitToStreamer(
2514 MCInstBuilder(IsNeg ? AArch64::SUBXri : AArch64::ADDXri)
2515 .addReg(AArch64::X16)
2516 .addReg(AArch64::X16)
2517 .addImm((AbsOffset >> BitPos) & 0xfff)
2518 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, BitPos)));
2519 }
2520 } else {
2521 const uint64_t UOffset = Offset;
2522 EmitToStreamer(MCInstBuilder(IsNeg ? AArch64::MOVNXi : AArch64::MOVZXi)
2523 .addReg(AArch64::X17)
2524 .addImm((IsNeg ? ~UOffset : UOffset) & 0xffff)
2525 .addImm(/*shift=*/0));
2526 auto NeedMovk = [IsNeg, UOffset](int BitPos) -> bool {
2527 assert(BitPos == 16 || BitPos == 32 || BitPos == 48);
2528 uint64_t Shifted = UOffset >> BitPos;
2529 if (!IsNeg)
2530 return Shifted != 0;
2531 for (int I = 0; I != 64 - BitPos; I += 16)
2532 if (((Shifted >> I) & 0xffff) != 0xffff)
2533 return true;
2534 return false;
2535 };
2536 for (int BitPos = 16; BitPos != 64 && NeedMovk(BitPos); BitPos += 16)
2537 emitMOVK(AArch64::X17, (UOffset >> BitPos) & 0xffff, BitPos);
2538
2539 EmitToStreamer(MCInstBuilder(AArch64::ADDXrs)
2540 .addReg(AArch64::X16)
2541 .addReg(AArch64::X16)
2542 .addReg(AArch64::X17)
2543 .addImm(/*shift=*/0));
2544 }
2545 }
2546
2547 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, AArch64::X17);
2548
2549 auto MIB = MCInstBuilder(getPACOpcodeForKey(Key, DiscReg == AArch64::XZR))
2550 .addReg(AArch64::X16)
2551 .addReg(AArch64::X16);
2552 if (DiscReg != AArch64::XZR)
2553 MIB.addReg(DiscReg);
2554 EmitToStreamer(MIB);
2555}
2556
2557void AArch64AsmPrinter::LowerLOADgotAUTH(const MachineInstr &MI) {
2558 Register DstReg = MI.getOperand(0).getReg();
2559 Register AuthResultReg = STI->hasFPAC() ? DstReg : AArch64::X16;
2560 const MachineOperand &GAMO = MI.getOperand(1);
2561 assert(GAMO.getOffset() == 0);
2562
2563 if (MI.getMF()->getTarget().getCodeModel() == CodeModel::Tiny) {
2564 MCOperand GAMC;
2565 MCInstLowering.lowerOperand(GAMO, GAMC);
2566 EmitToStreamer(
2567 MCInstBuilder(AArch64::ADR).addReg(AArch64::X17).addOperand(GAMC));
2568 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2569 .addReg(AuthResultReg)
2570 .addReg(AArch64::X17)
2571 .addImm(0));
2572 } else {
2573 MachineOperand GAHiOp(GAMO);
2574 MachineOperand GALoOp(GAMO);
2575 GAHiOp.addTargetFlag(AArch64II::MO_PAGE);
2576 GALoOp.addTargetFlag(AArch64II::MO_PAGEOFF | AArch64II::MO_NC);
2577
2578 MCOperand GAMCHi, GAMCLo;
2579 MCInstLowering.lowerOperand(GAHiOp, GAMCHi);
2580 MCInstLowering.lowerOperand(GALoOp, GAMCLo);
2581
2582 EmitToStreamer(
2583 MCInstBuilder(AArch64::ADRP).addReg(AArch64::X17).addOperand(GAMCHi));
2584
2585 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
2586 .addReg(AArch64::X17)
2587 .addReg(AArch64::X17)
2588 .addOperand(GAMCLo)
2589 .addImm(0));
2590
2591 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
2592 .addReg(AuthResultReg)
2593 .addReg(AArch64::X17)
2594 .addImm(0));
2595 }
2596
2597 assert(GAMO.isGlobal());
2598 MCSymbol *UndefWeakSym;
2599 if (GAMO.getGlobal()->hasExternalWeakLinkage()) {
2600 UndefWeakSym = createTempSymbol("undef_weak");
2601 EmitToStreamer(
2602 MCInstBuilder(AArch64::CBZX)
2603 .addReg(AuthResultReg)
2604 .addExpr(MCSymbolRefExpr::create(UndefWeakSym, OutContext)));
2605 }
2606
2607 assert(GAMO.getGlobal()->getValueType() != nullptr);
2608 unsigned AuthOpcode = GAMO.getGlobal()->getValueType()->isFunctionTy()
2609 ? AArch64::AUTIA
2610 : AArch64::AUTDA;
2611 EmitToStreamer(MCInstBuilder(AuthOpcode)
2612 .addReg(AuthResultReg)
2613 .addReg(AuthResultReg)
2614 .addReg(AArch64::X17));
2615
2616 if (GAMO.getGlobal()->hasExternalWeakLinkage())
2617 OutStreamer->emitLabel(UndefWeakSym);
2618
2619 if (!STI->hasFPAC()) {
2620 auto AuthKey =
2621 (AuthOpcode == AArch64::AUTIA ? AArch64PACKey::IA : AArch64PACKey::DA);
2622
2623 emitPtrauthCheckAuthenticatedValue(AuthResultReg, AArch64::X17, AuthKey,
2624 AArch64PAuth::AuthCheckMethod::XPAC,
2625 /*ShouldTrap=*/true,
2626 /*OnFailure=*/nullptr);
2627
2628 emitMovXReg(DstReg, AuthResultReg);
2629 }
2630}
2631
2632const MCExpr *
2633AArch64AsmPrinter::lowerBlockAddressConstant(const BlockAddress &BA) {
2635 const Function &Fn = *BA.getFunction();
2636
2637 if (std::optional<uint16_t> BADisc =
2638 STI->getPtrAuthBlockAddressDiscriminatorIfEnabled(Fn))
2639 return AArch64AuthMCExpr::create(BAE, *BADisc, AArch64PACKey::IA,
2640 /*HasAddressDiversity=*/false, OutContext);
2641
2642 return BAE;
2643}
2644
2645void AArch64AsmPrinter::emitCBPseudoExpansion(const MachineInstr *MI) {
2646 bool IsImm = false;
2647 bool Is32Bit = false;
2648
2649 switch (MI->getOpcode()) {
2650 default:
2651 llvm_unreachable("This is not a CB pseudo instruction");
2652 case AArch64::CBWPrr:
2653 Is32Bit = true;
2654 break;
2655 case AArch64::CBXPrr:
2656 Is32Bit = false;
2657 break;
2658 case AArch64::CBWPri:
2659 IsImm = true;
2660 Is32Bit = true;
2661 break;
2662 case AArch64::CBXPri:
2663 IsImm = true;
2664 break;
2665 }
2666
2668 static_cast<AArch64CC::CondCode>(MI->getOperand(0).getImm());
2669 bool NeedsRegSwap = false;
2670 bool NeedsImmDec = false;
2671 bool NeedsImmInc = false;
2672
2673 // Decide if we need to either swap register operands or increment/decrement
2674 // immediate operands
2675 unsigned MCOpC;
2676 switch (CC) {
2677 default:
2678 llvm_unreachable("Invalid CB condition code");
2679 case AArch64CC::EQ:
2680 MCOpC = IsImm ? (Is32Bit ? AArch64::CBEQWri : AArch64::CBEQXri)
2681 : (Is32Bit ? AArch64::CBEQWrr : AArch64::CBEQXrr);
2682 break;
2683 case AArch64CC::NE:
2684 MCOpC = IsImm ? (Is32Bit ? AArch64::CBNEWri : AArch64::CBNEXri)
2685 : (Is32Bit ? AArch64::CBNEWrr : AArch64::CBNEXrr);
2686 break;
2687 case AArch64CC::HS:
2688 MCOpC = IsImm ? (Is32Bit ? AArch64::CBHIWri : AArch64::CBHIXri)
2689 : (Is32Bit ? AArch64::CBHSWrr : AArch64::CBHSXrr);
2690 NeedsImmDec = IsImm;
2691 break;
2692 case AArch64CC::LO:
2693 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLOWri : AArch64::CBLOXri)
2694 : (Is32Bit ? AArch64::CBHIWrr : AArch64::CBHIXrr);
2695 NeedsRegSwap = !IsImm;
2696 break;
2697 case AArch64CC::HI:
2698 MCOpC = IsImm ? (Is32Bit ? AArch64::CBHIWri : AArch64::CBHIXri)
2699 : (Is32Bit ? AArch64::CBHIWrr : AArch64::CBHIXrr);
2700 break;
2701 case AArch64CC::LS:
2702 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLOWri : AArch64::CBLOXri)
2703 : (Is32Bit ? AArch64::CBHSWrr : AArch64::CBHSXrr);
2704 NeedsRegSwap = !IsImm;
2705 NeedsImmInc = IsImm;
2706 break;
2707 case AArch64CC::GE:
2708 MCOpC = IsImm ? (Is32Bit ? AArch64::CBGTWri : AArch64::CBGTXri)
2709 : (Is32Bit ? AArch64::CBGEWrr : AArch64::CBGEXrr);
2710 NeedsImmDec = IsImm;
2711 break;
2712 case AArch64CC::LT:
2713 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLTWri : AArch64::CBLTXri)
2714 : (Is32Bit ? AArch64::CBGTWrr : AArch64::CBGTXrr);
2715 NeedsRegSwap = !IsImm;
2716 break;
2717 case AArch64CC::GT:
2718 MCOpC = IsImm ? (Is32Bit ? AArch64::CBGTWri : AArch64::CBGTXri)
2719 : (Is32Bit ? AArch64::CBGTWrr : AArch64::CBGTXrr);
2720 break;
2721 case AArch64CC::LE:
2722 MCOpC = IsImm ? (Is32Bit ? AArch64::CBLTWri : AArch64::CBLTXri)
2723 : (Is32Bit ? AArch64::CBGEWrr : AArch64::CBGEXrr);
2724 NeedsRegSwap = !IsImm;
2725 NeedsImmInc = IsImm;
2726 break;
2727 }
2728
2729 MCInst Inst;
2730 Inst.setOpcode(MCOpC);
2731
2732 MCOperand Lhs, Rhs, Trgt;
2733 lowerOperand(MI->getOperand(1), Lhs);
2734 lowerOperand(MI->getOperand(2), Rhs);
2735 lowerOperand(MI->getOperand(3), Trgt);
2736
2737 // Now swap, increment or decrement
2738 if (NeedsRegSwap) {
2739 assert(Lhs.isReg() && "Expected register operand for CB");
2740 assert(Rhs.isReg() && "Expected register operand for CB");
2741 Inst.addOperand(Rhs);
2742 Inst.addOperand(Lhs);
2743 } else if (NeedsImmDec) {
2744 Rhs.setImm(Rhs.getImm() - 1);
2745 Inst.addOperand(Lhs);
2746 Inst.addOperand(Rhs);
2747 } else if (NeedsImmInc) {
2748 Rhs.setImm(Rhs.getImm() + 1);
2749 Inst.addOperand(Lhs);
2750 Inst.addOperand(Rhs);
2751 } else {
2752 Inst.addOperand(Lhs);
2753 Inst.addOperand(Rhs);
2754 }
2755
2756 assert((!IsImm || (Rhs.getImm() >= 0 && Rhs.getImm() < 64)) &&
2757 "CB immediate operand out-of-bounds");
2758
2759 Inst.addOperand(Trgt);
2760 EmitToStreamer(*OutStreamer, Inst);
2761}
2762
2763// Simple pseudo-instructions have their lowering (with expansion to real
2764// instructions) auto-generated.
2765#include "AArch64GenMCPseudoLowering.inc"
2766
2767void AArch64AsmPrinter::EmitToStreamer(MCStreamer &S, const MCInst &Inst) {
2768 S.emitInstruction(Inst, *STI);
2769#ifndef NDEBUG
2770 ++InstsEmitted;
2771#endif
2772}
2773
2774void AArch64AsmPrinter::emitInstruction(const MachineInstr *MI) {
2775 AArch64_MC::verifyInstructionPredicates(MI->getOpcode(), STI->getFeatureBits());
2776
2777#ifndef NDEBUG
2778 InstsEmitted = 0;
2779 auto CheckMISize = make_scope_exit([&]() {
2780 assert(STI->getInstrInfo()->getInstSizeInBytes(*MI) >= InstsEmitted * 4);
2781 });
2782#endif
2783
2784 // Do any auto-generated pseudo lowerings.
2785 if (MCInst OutInst; lowerPseudoInstExpansion(MI, OutInst)) {
2786 EmitToStreamer(*OutStreamer, OutInst);
2787 return;
2788 }
2789
2790 if (MI->getOpcode() == AArch64::ADRP) {
2791 for (auto &Opd : MI->operands()) {
2792 if (Opd.isSymbol() && StringRef(Opd.getSymbolName()) ==
2793 "swift_async_extendedFramePointerFlags") {
2794 ShouldEmitWeakSwiftAsyncExtendedFramePointerFlags = true;
2795 }
2796 }
2797 }
2798
2799 if (AArch64FI->getLOHRelated().count(MI)) {
2800 // Generate a label for LOH related instruction
2801 MCSymbol *LOHLabel = createTempSymbol("loh");
2802 // Associate the instruction with the label
2803 LOHInstToLabel[MI] = LOHLabel;
2804 OutStreamer->emitLabel(LOHLabel);
2805 }
2806
2808 static_cast<AArch64TargetStreamer *>(OutStreamer->getTargetStreamer());
2809 // Do any manual lowerings.
2810 switch (MI->getOpcode()) {
2811 default:
2813 "Unhandled tail call instruction");
2814 break;
2815 case AArch64::HINT: {
2816 // CurrentPatchableFunctionEntrySym can be CurrentFnBegin only for
2817 // -fpatchable-function-entry=N,0. The entry MBB is guaranteed to be
2818 // non-empty. If MI is the initial BTI, place the
2819 // __patchable_function_entries label after BTI.
2820 if (CurrentPatchableFunctionEntrySym &&
2821 CurrentPatchableFunctionEntrySym == CurrentFnBegin &&
2822 MI == &MF->front().front()) {
2823 int64_t Imm = MI->getOperand(0).getImm();
2824 if ((Imm & 32) && (Imm & 6)) {
2825 MCInst Inst;
2826 MCInstLowering.Lower(MI, Inst);
2827 EmitToStreamer(*OutStreamer, Inst);
2828 CurrentPatchableFunctionEntrySym = createTempSymbol("patch");
2829 OutStreamer->emitLabel(CurrentPatchableFunctionEntrySym);
2830 return;
2831 }
2832 }
2833 break;
2834 }
2835 case AArch64::MOVMCSym: {
2836 Register DestReg = MI->getOperand(0).getReg();
2837 const MachineOperand &MO_Sym = MI->getOperand(1);
2838 MachineOperand Hi_MOSym(MO_Sym), Lo_MOSym(MO_Sym);
2839 MCOperand Hi_MCSym, Lo_MCSym;
2840
2841 Hi_MOSym.setTargetFlags(AArch64II::MO_G1 | AArch64II::MO_S);
2842 Lo_MOSym.setTargetFlags(AArch64II::MO_G0 | AArch64II::MO_NC);
2843
2844 MCInstLowering.lowerOperand(Hi_MOSym, Hi_MCSym);
2845 MCInstLowering.lowerOperand(Lo_MOSym, Lo_MCSym);
2846
2847 MCInst MovZ;
2848 MovZ.setOpcode(AArch64::MOVZXi);
2849 MovZ.addOperand(MCOperand::createReg(DestReg));
2850 MovZ.addOperand(Hi_MCSym);
2852 EmitToStreamer(*OutStreamer, MovZ);
2853
2854 MCInst MovK;
2855 MovK.setOpcode(AArch64::MOVKXi);
2856 MovK.addOperand(MCOperand::createReg(DestReg));
2857 MovK.addOperand(MCOperand::createReg(DestReg));
2858 MovK.addOperand(Lo_MCSym);
2860 EmitToStreamer(*OutStreamer, MovK);
2861 return;
2862 }
2863 case AArch64::MOVIv2d_ns:
2864 // It is generally beneficial to rewrite "fmov s0, wzr" to "movi d0, #0".
2865 // as movi is more efficient across all cores. Newer cores can eliminate
2866 // fmovs early and there is no difference with movi, but this not true for
2867 // all implementations.
2868 //
2869 // The floating-point version doesn't quite work in rare cases on older
2870 // CPUs, so on those targets we lower this instruction to movi.16b instead.
2871 if (STI->hasZeroCycleZeroingFPWorkaround() &&
2872 MI->getOperand(1).getImm() == 0) {
2873 MCInst TmpInst;
2874 TmpInst.setOpcode(AArch64::MOVIv16b_ns);
2875 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
2876 TmpInst.addOperand(MCOperand::createImm(0));
2877 EmitToStreamer(*OutStreamer, TmpInst);
2878 return;
2879 }
2880 break;
2881
2882 case AArch64::DBG_VALUE:
2883 case AArch64::DBG_VALUE_LIST:
2884 if (isVerbose() && OutStreamer->hasRawTextSupport()) {
2885 SmallString<128> TmpStr;
2886 raw_svector_ostream OS(TmpStr);
2887 PrintDebugValueComment(MI, OS);
2888 OutStreamer->emitRawText(StringRef(OS.str()));
2889 }
2890 return;
2891
2892 case AArch64::EMITBKEY: {
2893 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2894 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2895 ExceptionHandlingType != ExceptionHandling::ARM)
2896 return;
2897
2898 if (getFunctionCFISectionType(*MF) == CFISection::None)
2899 return;
2900
2901 OutStreamer->emitCFIBKeyFrame();
2902 return;
2903 }
2904
2905 case AArch64::EMITMTETAGGED: {
2906 ExceptionHandling ExceptionHandlingType = MAI->getExceptionHandlingType();
2907 if (ExceptionHandlingType != ExceptionHandling::DwarfCFI &&
2908 ExceptionHandlingType != ExceptionHandling::ARM)
2909 return;
2910
2911 if (getFunctionCFISectionType(*MF) != CFISection::None)
2912 OutStreamer->emitCFIMTETaggedFrame();
2913 return;
2914 }
2915
2916 case AArch64::AUTx16x17:
2917 emitPtrauthAuthResign(AArch64::X16,
2918 (AArch64PACKey::ID)MI->getOperand(0).getImm(),
2919 MI->getOperand(1).getImm(), &MI->getOperand(2),
2920 AArch64::X17, std::nullopt, 0, 0);
2921 return;
2922
2923 case AArch64::AUTxMxN:
2924 emitPtrauthAuthResign(MI->getOperand(0).getReg(),
2925 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
2926 MI->getOperand(4).getImm(), &MI->getOperand(5),
2927 MI->getOperand(1).getReg(), std::nullopt, 0, 0);
2928 return;
2929
2930 case AArch64::AUTPAC:
2931 emitPtrauthAuthResign(
2932 AArch64::X16, (AArch64PACKey::ID)MI->getOperand(0).getImm(),
2933 MI->getOperand(1).getImm(), &MI->getOperand(2), AArch64::X17,
2934 (AArch64PACKey::ID)MI->getOperand(3).getImm(),
2935 MI->getOperand(4).getImm(), MI->getOperand(5).getReg());
2936 return;
2937
2938 case AArch64::PAC:
2939 emitPtrauthSign(MI);
2940 return;
2941
2942 case AArch64::LOADauthptrstatic:
2943 LowerLOADauthptrstatic(*MI);
2944 return;
2945
2946 case AArch64::LOADgotPAC:
2947 case AArch64::MOVaddrPAC:
2948 LowerMOVaddrPAC(*MI);
2949 return;
2950
2951 case AArch64::LOADgotAUTH:
2952 LowerLOADgotAUTH(*MI);
2953 return;
2954
2955 case AArch64::BRA:
2956 case AArch64::BLRA:
2957 emitPtrauthBranch(MI);
2958 return;
2959
2960 // Tail calls use pseudo instructions so they have the proper code-gen
2961 // attributes (isCall, isReturn, etc.). We lower them to the real
2962 // instruction here.
2963 case AArch64::AUTH_TCRETURN:
2964 case AArch64::AUTH_TCRETURN_BTI: {
2965 Register Callee = MI->getOperand(0).getReg();
2966 const uint64_t Key = MI->getOperand(2).getImm();
2967 assert((Key == AArch64PACKey::IA || Key == AArch64PACKey::IB) &&
2968 "Invalid auth key for tail-call return");
2969
2970 const uint64_t Disc = MI->getOperand(3).getImm();
2971 assert(isUInt<16>(Disc) && "Integer discriminator is too wide");
2972
2973 Register AddrDisc = MI->getOperand(4).getReg();
2974
2975 Register ScratchReg = Callee == AArch64::X16 ? AArch64::X17 : AArch64::X16;
2976
2977 emitPtrauthTailCallHardening(MI);
2978
2979 // See the comments in emitPtrauthBranch.
2980 if (Callee == AddrDisc)
2981 report_fatal_error("Call target is signed with its own value");
2982
2983 // After isX16X17Safer predicate was introduced, emitPtrauthDiscriminator is
2984 // no longer restricted to only reusing AddrDisc when it is X16 or X17
2985 // (which are implicit-def'ed by AUTH_TCRETURN pseudos), thus impose this
2986 // restriction manually not to clobber an unexpected register.
2987 bool AddrDiscIsImplicitDef =
2988 AddrDisc == AArch64::X16 || AddrDisc == AArch64::X17;
2989 Register DiscReg = emitPtrauthDiscriminator(Disc, AddrDisc, ScratchReg,
2990 AddrDiscIsImplicitDef);
2991
2992 const bool IsZero = DiscReg == AArch64::XZR;
2993 const unsigned Opcodes[2][2] = {{AArch64::BRAA, AArch64::BRAAZ},
2994 {AArch64::BRAB, AArch64::BRABZ}};
2995
2996 MCInst TmpInst;
2997 TmpInst.setOpcode(Opcodes[Key][IsZero]);
2998 TmpInst.addOperand(MCOperand::createReg(Callee));
2999 if (!IsZero)
3000 TmpInst.addOperand(MCOperand::createReg(DiscReg));
3001 EmitToStreamer(*OutStreamer, TmpInst);
3002 return;
3003 }
3004
3005 case AArch64::TCRETURNri:
3006 case AArch64::TCRETURNrix16x17:
3007 case AArch64::TCRETURNrix17:
3008 case AArch64::TCRETURNrinotx16:
3009 case AArch64::TCRETURNriALL: {
3010 emitPtrauthTailCallHardening(MI);
3011
3012 recordIfImportCall(MI);
3013 MCInst TmpInst;
3014 TmpInst.setOpcode(AArch64::BR);
3015 TmpInst.addOperand(MCOperand::createReg(MI->getOperand(0).getReg()));
3016 EmitToStreamer(*OutStreamer, TmpInst);
3017 return;
3018 }
3019 case AArch64::TCRETURNdi: {
3020 emitPtrauthTailCallHardening(MI);
3021
3022 MCOperand Dest;
3023 MCInstLowering.lowerOperand(MI->getOperand(0), Dest);
3024 recordIfImportCall(MI);
3025 MCInst TmpInst;
3026 TmpInst.setOpcode(AArch64::B);
3027 TmpInst.addOperand(Dest);
3028 EmitToStreamer(*OutStreamer, TmpInst);
3029 return;
3030 }
3031 case AArch64::SpeculationBarrierISBDSBEndBB: {
3032 // Print DSB SYS + ISB
3033 MCInst TmpInstDSB;
3034 TmpInstDSB.setOpcode(AArch64::DSB);
3035 TmpInstDSB.addOperand(MCOperand::createImm(0xf));
3036 EmitToStreamer(*OutStreamer, TmpInstDSB);
3037 MCInst TmpInstISB;
3038 TmpInstISB.setOpcode(AArch64::ISB);
3039 TmpInstISB.addOperand(MCOperand::createImm(0xf));
3040 EmitToStreamer(*OutStreamer, TmpInstISB);
3041 return;
3042 }
3043 case AArch64::SpeculationBarrierSBEndBB: {
3044 // Print SB
3045 MCInst TmpInstSB;
3046 TmpInstSB.setOpcode(AArch64::SB);
3047 EmitToStreamer(*OutStreamer, TmpInstSB);
3048 return;
3049 }
3050 case AArch64::TLSDESC_AUTH_CALLSEQ: {
3051 /// lower this to:
3052 /// adrp x0, :tlsdesc_auth:var
3053 /// ldr x16, [x0, #:tlsdesc_auth_lo12:var]
3054 /// add x0, x0, #:tlsdesc_auth_lo12:var
3055 /// blraa x16, x0
3056 /// (TPIDR_EL0 offset now in x0)
3057 const MachineOperand &MO_Sym = MI->getOperand(0);
3058 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3059 MCOperand SymTLSDescLo12, SymTLSDesc;
3060 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3061 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3062 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3063 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3064
3065 MCInst Adrp;
3066 Adrp.setOpcode(AArch64::ADRP);
3067 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3068 Adrp.addOperand(SymTLSDesc);
3069 EmitToStreamer(*OutStreamer, Adrp);
3070
3071 MCInst Ldr;
3072 Ldr.setOpcode(AArch64::LDRXui);
3073 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3074 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3075 Ldr.addOperand(SymTLSDescLo12);
3077 EmitToStreamer(*OutStreamer, Ldr);
3078
3079 MCInst Add;
3080 Add.setOpcode(AArch64::ADDXri);
3081 Add.addOperand(MCOperand::createReg(AArch64::X0));
3082 Add.addOperand(MCOperand::createReg(AArch64::X0));
3083 Add.addOperand(SymTLSDescLo12);
3085 EmitToStreamer(*OutStreamer, Add);
3086
3087 // Authenticated TLSDESC accesses are not relaxed.
3088 // Thus, do not emit .tlsdesccall for AUTH TLSDESC.
3089
3090 MCInst Blraa;
3091 Blraa.setOpcode(AArch64::BLRAA);
3092 Blraa.addOperand(MCOperand::createReg(AArch64::X16));
3093 Blraa.addOperand(MCOperand::createReg(AArch64::X0));
3094 EmitToStreamer(*OutStreamer, Blraa);
3095
3096 return;
3097 }
3098 case AArch64::TLSDESC_CALLSEQ: {
3099 /// lower this to:
3100 /// adrp x0, :tlsdesc:var
3101 /// ldr x1, [x0, #:tlsdesc_lo12:var]
3102 /// add x0, x0, #:tlsdesc_lo12:var
3103 /// .tlsdesccall var
3104 /// blr x1
3105 /// (TPIDR_EL0 offset now in x0)
3106 const MachineOperand &MO_Sym = MI->getOperand(0);
3107 MachineOperand MO_TLSDESC_LO12(MO_Sym), MO_TLSDESC(MO_Sym);
3108 MCOperand Sym, SymTLSDescLo12, SymTLSDesc;
3109 MO_TLSDESC_LO12.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGEOFF);
3110 MO_TLSDESC.setTargetFlags(AArch64II::MO_TLS | AArch64II::MO_PAGE);
3111 MCInstLowering.lowerOperand(MO_Sym, Sym);
3112 MCInstLowering.lowerOperand(MO_TLSDESC_LO12, SymTLSDescLo12);
3113 MCInstLowering.lowerOperand(MO_TLSDESC, SymTLSDesc);
3114
3115 MCInst Adrp;
3116 Adrp.setOpcode(AArch64::ADRP);
3117 Adrp.addOperand(MCOperand::createReg(AArch64::X0));
3118 Adrp.addOperand(SymTLSDesc);
3119 EmitToStreamer(*OutStreamer, Adrp);
3120
3121 MCInst Ldr;
3122 if (STI->isTargetILP32()) {
3123 Ldr.setOpcode(AArch64::LDRWui);
3124 Ldr.addOperand(MCOperand::createReg(AArch64::W1));
3125 } else {
3126 Ldr.setOpcode(AArch64::LDRXui);
3127 Ldr.addOperand(MCOperand::createReg(AArch64::X1));
3128 }
3129 Ldr.addOperand(MCOperand::createReg(AArch64::X0));
3130 Ldr.addOperand(SymTLSDescLo12);
3132 EmitToStreamer(*OutStreamer, Ldr);
3133
3134 MCInst Add;
3135 if (STI->isTargetILP32()) {
3136 Add.setOpcode(AArch64::ADDWri);
3137 Add.addOperand(MCOperand::createReg(AArch64::W0));
3138 Add.addOperand(MCOperand::createReg(AArch64::W0));
3139 } else {
3140 Add.setOpcode(AArch64::ADDXri);
3141 Add.addOperand(MCOperand::createReg(AArch64::X0));
3142 Add.addOperand(MCOperand::createReg(AArch64::X0));
3143 }
3144 Add.addOperand(SymTLSDescLo12);
3146 EmitToStreamer(*OutStreamer, Add);
3147
3148 // Emit a relocation-annotation. This expands to no code, but requests
3149 // the following instruction gets an R_AARCH64_TLSDESC_CALL.
3150 MCInst TLSDescCall;
3151 TLSDescCall.setOpcode(AArch64::TLSDESCCALL);
3152 TLSDescCall.addOperand(Sym);
3153 EmitToStreamer(*OutStreamer, TLSDescCall);
3154#ifndef NDEBUG
3155 --InstsEmitted; // no code emitted
3156#endif
3157
3158 MCInst Blr;
3159 Blr.setOpcode(AArch64::BLR);
3160 Blr.addOperand(MCOperand::createReg(AArch64::X1));
3161 EmitToStreamer(*OutStreamer, Blr);
3162
3163 return;
3164 }
3165
3166 case AArch64::JumpTableDest32:
3167 case AArch64::JumpTableDest16:
3168 case AArch64::JumpTableDest8:
3169 LowerJumpTableDest(*OutStreamer, *MI);
3170 return;
3171
3172 case AArch64::BR_JumpTable:
3173 LowerHardenedBRJumpTable(*MI);
3174 return;
3175
3176 case AArch64::FMOVH0:
3177 case AArch64::FMOVS0:
3178 case AArch64::FMOVD0:
3179 emitFMov0(*MI);
3180 return;
3181
3182 case AArch64::MOPSMemoryCopyPseudo:
3183 case AArch64::MOPSMemoryMovePseudo:
3184 case AArch64::MOPSMemorySetPseudo:
3185 case AArch64::MOPSMemorySetTaggingPseudo:
3186 LowerMOPS(*OutStreamer, *MI);
3187 return;
3188
3189 case TargetOpcode::STACKMAP:
3190 return LowerSTACKMAP(*OutStreamer, SM, *MI);
3191
3192 case TargetOpcode::PATCHPOINT:
3193 return LowerPATCHPOINT(*OutStreamer, SM, *MI);
3194
3195 case TargetOpcode::STATEPOINT:
3196 return LowerSTATEPOINT(*OutStreamer, SM, *MI);
3197
3198 case TargetOpcode::FAULTING_OP:
3199 return LowerFAULTING_OP(*MI);
3200
3201 case TargetOpcode::PATCHABLE_FUNCTION_ENTER:
3202 LowerPATCHABLE_FUNCTION_ENTER(*MI);
3203 return;
3204
3205 case TargetOpcode::PATCHABLE_FUNCTION_EXIT:
3206 LowerPATCHABLE_FUNCTION_EXIT(*MI);
3207 return;
3208
3209 case TargetOpcode::PATCHABLE_TAIL_CALL:
3210 LowerPATCHABLE_TAIL_CALL(*MI);
3211 return;
3212 case TargetOpcode::PATCHABLE_EVENT_CALL:
3213 return LowerPATCHABLE_EVENT_CALL(*MI, false);
3214 case TargetOpcode::PATCHABLE_TYPED_EVENT_CALL:
3215 return LowerPATCHABLE_EVENT_CALL(*MI, true);
3216
3217 case AArch64::KCFI_CHECK:
3218 LowerKCFI_CHECK(*MI);
3219 return;
3220
3221 case AArch64::HWASAN_CHECK_MEMACCESS:
3222 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES:
3223 case AArch64::HWASAN_CHECK_MEMACCESS_FIXEDSHADOW:
3224 case AArch64::HWASAN_CHECK_MEMACCESS_SHORTGRANULES_FIXEDSHADOW:
3225 LowerHWASAN_CHECK_MEMACCESS(*MI);
3226 return;
3227
3228 case AArch64::SEH_StackAlloc:
3229 TS->emitARM64WinCFIAllocStack(MI->getOperand(0).getImm());
3230 return;
3231
3232 case AArch64::SEH_SaveFPLR:
3233 TS->emitARM64WinCFISaveFPLR(MI->getOperand(0).getImm());
3234 return;
3235
3236 case AArch64::SEH_SaveFPLR_X:
3237 assert(MI->getOperand(0).getImm() < 0 &&
3238 "Pre increment SEH opcode must have a negative offset");
3239 TS->emitARM64WinCFISaveFPLRX(-MI->getOperand(0).getImm());
3240 return;
3241
3242 case AArch64::SEH_SaveReg:
3243 TS->emitARM64WinCFISaveReg(MI->getOperand(0).getImm(),
3244 MI->getOperand(1).getImm());
3245 return;
3246
3247 case AArch64::SEH_SaveReg_X:
3248 assert(MI->getOperand(1).getImm() < 0 &&
3249 "Pre increment SEH opcode must have a negative offset");
3250 TS->emitARM64WinCFISaveRegX(MI->getOperand(0).getImm(),
3251 -MI->getOperand(1).getImm());
3252 return;
3253
3254 case AArch64::SEH_SaveRegP:
3255 if (MI->getOperand(1).getImm() == 30 && MI->getOperand(0).getImm() >= 19 &&
3256 MI->getOperand(0).getImm() <= 28) {
3257 assert((MI->getOperand(0).getImm() - 19) % 2 == 0 &&
3258 "Register paired with LR must be odd");
3259 TS->emitARM64WinCFISaveLRPair(MI->getOperand(0).getImm(),
3260 MI->getOperand(2).getImm());
3261 return;
3262 }
3263 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3264 "Non-consecutive registers not allowed for save_regp");
3265 TS->emitARM64WinCFISaveRegP(MI->getOperand(0).getImm(),
3266 MI->getOperand(2).getImm());
3267 return;
3268
3269 case AArch64::SEH_SaveRegP_X:
3270 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3271 "Non-consecutive registers not allowed for save_regp_x");
3272 assert(MI->getOperand(2).getImm() < 0 &&
3273 "Pre increment SEH opcode must have a negative offset");
3274 TS->emitARM64WinCFISaveRegPX(MI->getOperand(0).getImm(),
3275 -MI->getOperand(2).getImm());
3276 return;
3277
3278 case AArch64::SEH_SaveFReg:
3279 TS->emitARM64WinCFISaveFReg(MI->getOperand(0).getImm(),
3280 MI->getOperand(1).getImm());
3281 return;
3282
3283 case AArch64::SEH_SaveFReg_X:
3284 assert(MI->getOperand(1).getImm() < 0 &&
3285 "Pre increment SEH opcode must have a negative offset");
3286 TS->emitARM64WinCFISaveFRegX(MI->getOperand(0).getImm(),
3287 -MI->getOperand(1).getImm());
3288 return;
3289
3290 case AArch64::SEH_SaveFRegP:
3291 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3292 "Non-consecutive registers not allowed for save_regp");
3293 TS->emitARM64WinCFISaveFRegP(MI->getOperand(0).getImm(),
3294 MI->getOperand(2).getImm());
3295 return;
3296
3297 case AArch64::SEH_SaveFRegP_X:
3298 assert((MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1) &&
3299 "Non-consecutive registers not allowed for save_regp_x");
3300 assert(MI->getOperand(2).getImm() < 0 &&
3301 "Pre increment SEH opcode must have a negative offset");
3302 TS->emitARM64WinCFISaveFRegPX(MI->getOperand(0).getImm(),
3303 -MI->getOperand(2).getImm());
3304 return;
3305
3306 case AArch64::SEH_SetFP:
3308 return;
3309
3310 case AArch64::SEH_AddFP:
3311 TS->emitARM64WinCFIAddFP(MI->getOperand(0).getImm());
3312 return;
3313
3314 case AArch64::SEH_Nop:
3315 TS->emitARM64WinCFINop();
3316 return;
3317
3318 case AArch64::SEH_PrologEnd:
3320 return;
3321
3322 case AArch64::SEH_EpilogStart:
3324 return;
3325
3326 case AArch64::SEH_EpilogEnd:
3328 return;
3329
3330 case AArch64::SEH_PACSignLR:
3332 return;
3333
3334 case AArch64::SEH_SaveAnyRegQP:
3335 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3336 "Non-consecutive registers not allowed for save_any_reg");
3337 assert(MI->getOperand(2).getImm() >= 0 &&
3338 "SaveAnyRegQP SEH opcode offset must be non-negative");
3339 assert(MI->getOperand(2).getImm() <= 1008 &&
3340 "SaveAnyRegQP SEH opcode offset must fit into 6 bits");
3341 TS->emitARM64WinCFISaveAnyRegQP(MI->getOperand(0).getImm(),
3342 MI->getOperand(2).getImm());
3343 return;
3344
3345 case AArch64::SEH_SaveAnyRegQPX:
3346 assert(MI->getOperand(1).getImm() - MI->getOperand(0).getImm() == 1 &&
3347 "Non-consecutive registers not allowed for save_any_reg");
3348 assert(MI->getOperand(2).getImm() < 0 &&
3349 "SaveAnyRegQPX SEH opcode offset must be negative");
3350 assert(MI->getOperand(2).getImm() >= -1008 &&
3351 "SaveAnyRegQPX SEH opcode offset must fit into 6 bits");
3352 TS->emitARM64WinCFISaveAnyRegQPX(MI->getOperand(0).getImm(),
3353 -MI->getOperand(2).getImm());
3354 return;
3355
3356 case AArch64::SEH_AllocZ:
3357 assert(MI->getOperand(0).getImm() >= 0 &&
3358 "AllocZ SEH opcode offset must be non-negative");
3359 assert(MI->getOperand(0).getImm() <= 255 &&
3360 "AllocZ SEH opcode offset must fit into 8 bits");
3361 TS->emitARM64WinCFIAllocZ(MI->getOperand(0).getImm());
3362 return;
3363
3364 case AArch64::SEH_SaveZReg:
3365 assert(MI->getOperand(1).getImm() >= 0 &&
3366 "SaveZReg SEH opcode offset must be non-negative");
3367 assert(MI->getOperand(1).getImm() <= 255 &&
3368 "SaveZReg SEH opcode offset must fit into 8 bits");
3369 TS->emitARM64WinCFISaveZReg(MI->getOperand(0).getImm(),
3370 MI->getOperand(1).getImm());
3371 return;
3372
3373 case AArch64::SEH_SavePReg:
3374 assert(MI->getOperand(1).getImm() >= 0 &&
3375 "SavePReg SEH opcode offset must be non-negative");
3376 assert(MI->getOperand(1).getImm() <= 255 &&
3377 "SavePReg SEH opcode offset must fit into 8 bits");
3378 TS->emitARM64WinCFISavePReg(MI->getOperand(0).getImm(),
3379 MI->getOperand(1).getImm());
3380 return;
3381
3382 case AArch64::BLR:
3383 case AArch64::BR: {
3384 recordIfImportCall(MI);
3385 MCInst TmpInst;
3386 MCInstLowering.Lower(MI, TmpInst);
3387 EmitToStreamer(*OutStreamer, TmpInst);
3388 return;
3389 }
3390 case AArch64::CBWPri:
3391 case AArch64::CBXPri:
3392 case AArch64::CBWPrr:
3393 case AArch64::CBXPrr:
3394 emitCBPseudoExpansion(MI);
3395 return;
3396 }
3397
3398 // Finally, do the automated lowerings for everything else.
3399 MCInst TmpInst;
3400 MCInstLowering.Lower(MI, TmpInst);
3401 EmitToStreamer(*OutStreamer, TmpInst);
3402}
3403
3404void AArch64AsmPrinter::recordIfImportCall(
3406 if (!EnableImportCallOptimization)
3407 return;
3408
3409 auto [GV, OpFlags] = BranchInst->getMF()->tryGetCalledGlobal(BranchInst);
3410 if (GV && GV->hasDLLImportStorageClass()) {
3411 auto *CallSiteSymbol = MMI->getContext().createNamedTempSymbol("impcall");
3412 OutStreamer->emitLabel(CallSiteSymbol);
3413
3414 auto *CalledSymbol = MCInstLowering.GetGlobalValueSymbol(GV, OpFlags);
3415 SectionToImportedFunctionCalls[OutStreamer->getCurrentSectionOnly()]
3416 .push_back({CallSiteSymbol, CalledSymbol});
3417 }
3418}
3419
3420void AArch64AsmPrinter::emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI,
3421 MCSymbol *LazyPointer) {
3422 // _ifunc:
3423 // adrp x16, lazy_pointer@GOTPAGE
3424 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3425 // ldr x16, [x16]
3426 // br x16
3427
3428 {
3429 MCInst Adrp;
3430 Adrp.setOpcode(AArch64::ADRP);
3431 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3432 MCOperand SymPage;
3433 MCInstLowering.lowerOperand(
3436 SymPage);
3437 Adrp.addOperand(SymPage);
3438 EmitToStreamer(Adrp);
3439 }
3440
3441 {
3442 MCInst Ldr;
3443 Ldr.setOpcode(AArch64::LDRXui);
3444 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3445 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3446 MCOperand SymPageOff;
3447 MCInstLowering.lowerOperand(
3450 SymPageOff);
3451 Ldr.addOperand(SymPageOff);
3453 EmitToStreamer(Ldr);
3454 }
3455
3456 EmitToStreamer(MCInstBuilder(AArch64::LDRXui)
3457 .addReg(AArch64::X16)
3458 .addReg(AArch64::X16)
3459 .addImm(0));
3460
3461 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3462 : AArch64::BR)
3463 .addReg(AArch64::X16));
3464}
3465
3466void AArch64AsmPrinter::emitMachOIFuncStubHelperBody(Module &M,
3467 const GlobalIFunc &GI,
3468 MCSymbol *LazyPointer) {
3469 // These stub helpers are only ever called once, so here we're optimizing for
3470 // minimum size by using the pre-indexed store variants, which saves a few
3471 // bytes of instructions to bump & restore sp.
3472
3473 // _ifunc.stub_helper:
3474 // stp fp, lr, [sp, #-16]!
3475 // mov fp, sp
3476 // stp x1, x0, [sp, #-16]!
3477 // stp x3, x2, [sp, #-16]!
3478 // stp x5, x4, [sp, #-16]!
3479 // stp x7, x6, [sp, #-16]!
3480 // stp d1, d0, [sp, #-16]!
3481 // stp d3, d2, [sp, #-16]!
3482 // stp d5, d4, [sp, #-16]!
3483 // stp d7, d6, [sp, #-16]!
3484 // bl _resolver
3485 // adrp x16, lazy_pointer@GOTPAGE
3486 // ldr x16, [x16, lazy_pointer@GOTPAGEOFF]
3487 // str x0, [x16]
3488 // mov x16, x0
3489 // ldp d7, d6, [sp], #16
3490 // ldp d5, d4, [sp], #16
3491 // ldp d3, d2, [sp], #16
3492 // ldp d1, d0, [sp], #16
3493 // ldp x7, x6, [sp], #16
3494 // ldp x5, x4, [sp], #16
3495 // ldp x3, x2, [sp], #16
3496 // ldp x1, x0, [sp], #16
3497 // ldp fp, lr, [sp], #16
3498 // br x16
3499
3500 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3501 .addReg(AArch64::SP)
3502 .addReg(AArch64::FP)
3503 .addReg(AArch64::LR)
3504 .addReg(AArch64::SP)
3505 .addImm(-2));
3506
3507 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3508 .addReg(AArch64::FP)
3509 .addReg(AArch64::SP)
3510 .addImm(0)
3511 .addImm(0));
3512
3513 for (int I = 0; I != 4; ++I)
3514 EmitToStreamer(MCInstBuilder(AArch64::STPXpre)
3515 .addReg(AArch64::SP)
3516 .addReg(AArch64::X1 + 2 * I)
3517 .addReg(AArch64::X0 + 2 * I)
3518 .addReg(AArch64::SP)
3519 .addImm(-2));
3520
3521 for (int I = 0; I != 4; ++I)
3522 EmitToStreamer(MCInstBuilder(AArch64::STPDpre)
3523 .addReg(AArch64::SP)
3524 .addReg(AArch64::D1 + 2 * I)
3525 .addReg(AArch64::D0 + 2 * I)
3526 .addReg(AArch64::SP)
3527 .addImm(-2));
3528
3529 EmitToStreamer(
3530 MCInstBuilder(AArch64::BL)
3532
3533 {
3534 MCInst Adrp;
3535 Adrp.setOpcode(AArch64::ADRP);
3536 Adrp.addOperand(MCOperand::createReg(AArch64::X16));
3537 MCOperand SymPage;
3538 MCInstLowering.lowerOperand(
3539 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3541 SymPage);
3542 Adrp.addOperand(SymPage);
3543 EmitToStreamer(Adrp);
3544 }
3545
3546 {
3547 MCInst Ldr;
3548 Ldr.setOpcode(AArch64::LDRXui);
3549 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3550 Ldr.addOperand(MCOperand::createReg(AArch64::X16));
3551 MCOperand SymPageOff;
3552 MCInstLowering.lowerOperand(
3553 MachineOperand::CreateES(LazyPointer->getName().data() + 1,
3555 SymPageOff);
3556 Ldr.addOperand(SymPageOff);
3558 EmitToStreamer(Ldr);
3559 }
3560
3561 EmitToStreamer(MCInstBuilder(AArch64::STRXui)
3562 .addReg(AArch64::X0)
3563 .addReg(AArch64::X16)
3564 .addImm(0));
3565
3566 EmitToStreamer(MCInstBuilder(AArch64::ADDXri)
3567 .addReg(AArch64::X16)
3568 .addReg(AArch64::X0)
3569 .addImm(0)
3570 .addImm(0));
3571
3572 for (int I = 3; I != -1; --I)
3573 EmitToStreamer(MCInstBuilder(AArch64::LDPDpost)
3574 .addReg(AArch64::SP)
3575 .addReg(AArch64::D1 + 2 * I)
3576 .addReg(AArch64::D0 + 2 * I)
3577 .addReg(AArch64::SP)
3578 .addImm(2));
3579
3580 for (int I = 3; I != -1; --I)
3581 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3582 .addReg(AArch64::SP)
3583 .addReg(AArch64::X1 + 2 * I)
3584 .addReg(AArch64::X0 + 2 * I)
3585 .addReg(AArch64::SP)
3586 .addImm(2));
3587
3588 EmitToStreamer(MCInstBuilder(AArch64::LDPXpost)
3589 .addReg(AArch64::SP)
3590 .addReg(AArch64::FP)
3591 .addReg(AArch64::LR)
3592 .addReg(AArch64::SP)
3593 .addImm(2));
3594
3595 EmitToStreamer(MCInstBuilder(TM.getTargetTriple().isArm64e() ? AArch64::BRAAZ
3596 : AArch64::BR)
3597 .addReg(AArch64::X16));
3598}
3599
3600const MCExpr *AArch64AsmPrinter::lowerConstant(const Constant *CV,
3601 const Constant *BaseCV,
3602 uint64_t Offset) {
3603 if (const GlobalValue *GV = dyn_cast<GlobalValue>(CV)) {
3604 return MCSymbolRefExpr::create(MCInstLowering.GetGlobalValueSymbol(GV, 0),
3605 OutContext);
3606 }
3607
3608 return AsmPrinter::lowerConstant(CV, BaseCV, Offset);
3609}
3610
3611char AArch64AsmPrinter::ID = 0;
3612
3613INITIALIZE_PASS(AArch64AsmPrinter, "aarch64-asm-printer",
3614 "AArch64 Assembly Printer", false, false)
3615
3616// Force static initialization.
3618LLVMInitializeAArch64AsmPrinter() {
3624}
static cl::opt< PtrauthCheckMode > PtrauthAuthChecks("aarch64-ptrauth-auth-checks", cl::Hidden, cl::values(clEnumValN(Unchecked, "none", "don't test for failure"), clEnumValN(Poison, "poison", "poison on failure"), clEnumValN(Trap, "trap", "trap on failure")), cl::desc("Check pointer authentication auth/resign failures"), cl::init(Default))
PtrauthCheckMode
@ Poison
@ Default
@ Unchecked
static void emitAuthenticatedPointer(MCStreamer &OutStreamer, MCSymbol *StubLabel, const MCExpr *StubAuthPtrRef)
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
static MCDisassembler::DecodeStatus addOperand(MCInst &Inst, const MCOperand &Opnd)
MachineBasicBlock & MBB
MachineBasicBlock MachineBasicBlock::iterator DebugLoc DL
static GCRegistry::Add< StatepointGC > D("statepoint-example", "an example strategy for statepoint")
#define clEnumValN(ENUMVAL, FLAGNAME, DESC)
Definition: CommandLine.h:687
#define LLVM_ABI
Definition: Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition: Compiler.h:132
This file defines the DenseMap class.
std::string Name
uint64_t Size
Symbol * Sym
Definition: ELF_riscv.cpp:479
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
IRTranslator LLVM IR MI
Module.h This file contains the declarations for the Module class.
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
Register const TargetRegisterInfo * TRI
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
#define P(N)
#define INITIALIZE_PASS(passName, arg, name, cfg, analysis)
Definition: PassSupport.h:56
static SDValue lowerConstant(SDValue Op, SelectionDAG &DAG, const RISCVSubtarget &Subtarget)
raw_pwrite_stream & OS
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition: Value.cpp:480
This file defines the make_scope_exit function, which executes user-defined cleanup logic at scope ex...
static bool printOperand(raw_ostream &OS, const SelectionDAG *G, const SDValue Value)
This file defines the SmallString class.
This file defines the SmallVector class.
static bool printAsmMRegister(const X86AsmPrinter &P, const MachineOperand &MO, char Mode, raw_ostream &O)
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
AArch64FunctionInfo - This class is derived from MachineFunctionInfo and contains private AArch64-spe...
std::optional< std::string > getOutliningStyle() const
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
static bool isTailCallReturnInst(const MachineInstr &MI)
Returns true if MI is one of the TCRETURN* instructions.
AArch64MCInstLower - This class is used to lower an MachineInstr into an MCInst.
bool lowerOperand(const MachineOperand &MO, MCOperand &MCOp) const
virtual void emitARM64WinCFISaveRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQP(unsigned Reg, int Offset)
virtual void emitAttributesSubsection(StringRef VendorName, AArch64BuildAttributes::SubsectionOptional IsOptional, AArch64BuildAttributes::SubsectionType ParameterType)
Build attributes implementation.
virtual void emitARM64WinCFISavePReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFIAllocStack(unsigned Size)
virtual void emitARM64WinCFISaveFPLRX(int Offset)
virtual void emitARM64WinCFIAllocZ(int Offset)
virtual void emitDirectiveVariantPCS(MCSymbol *Symbol)
Callback used to implement the .variant_pcs directive.
virtual void emitARM64WinCFIAddFP(unsigned Size)
virtual void emitARM64WinCFISaveFPLR(int Offset)
virtual void emitARM64WinCFISaveFRegP(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveAnyRegQPX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveFRegX(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveZReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveReg(unsigned Reg, int Offset)
virtual void emitARM64WinCFISaveLRPair(unsigned Reg, int Offset)
virtual void emitAttribute(StringRef VendorName, unsigned Tag, unsigned Value, std::string String)
This implementation is used for AArch64 ELF targets (Linux in particular).
AArch64_MachoTargetObjectFile - This TLOF implementation is used for Darwin.
Class for arbitrary precision integers.
Definition: APInt.h:78
Represent the analysis usage information of a pass.
void setPreservesAll()
Set by analyses that do not transform their input at all.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
const T & front() const
front - Get the first element.
Definition: ArrayRef.h:150
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
This class is intended to be used as a driving class for all asm writers.
Definition: AsmPrinter.h:90
virtual void emitInstruction(const MachineInstr *)
Targets should implement this to emit instructions.
Definition: AsmPrinter.h:629
void EmitToStreamer(MCStreamer &S, const MCInst &Inst)
Definition: AsmPrinter.cpp:433
virtual const MCExpr * lowerConstantPtrAuth(const ConstantPtrAuth &CPA)
Definition: AsmPrinter.h:650
void emitXRayTable()
Emit a table with all XRay instrumentation points.
virtual void emitGlobalAlias(const Module &M, const GlobalAlias &GA)
virtual MCSymbol * GetCPISymbol(unsigned CPID) const
Return the symbol for the specified constant pool entry.
virtual void emitMachOIFuncStubHelperBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:681
virtual const MCExpr * lowerConstant(const Constant *CV, const Constant *BaseCV=nullptr, uint64_t Offset=0)
Lower the specified LLVM Constant to an MCExpr.
virtual void SetupMachineFunction(MachineFunction &MF)
This should be called when a new MachineFunction is being processed from runOnMachineFunction.
void emitFunctionBody()
This method emits the body and trailer for a function.
virtual void emitStartOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the start of their fi...
Definition: AsmPrinter.h:605
virtual void emitEndOfAsmFile(Module &)
This virtual method can be overridden by targets that want to emit something at the end of their file...
Definition: AsmPrinter.h:609
virtual void emitMachOIFuncStubBody(Module &M, const GlobalIFunc &GI, MCSymbol *LazyPointer)
Definition: AsmPrinter.h:675
void getAnalysisUsage(AnalysisUsage &AU) const override
Record analysis usage.
Definition: AsmPrinter.cpp:455
virtual bool shouldEmitWeakSwiftAsyncExtendedFramePointerFlags() const
Definition: AsmPrinter.h:997
virtual const MCSubtargetInfo * getIFuncMCSubtargetInfo() const
getSubtargetInfo() cannot be used where this is needed because we don't have a MachineFunction when w...
Definition: AsmPrinter.h:671
bool runOnMachineFunction(MachineFunction &MF) override
Emit the specified function out to the OutStreamer.
Definition: AsmPrinter.h:452
virtual bool PrintAsmMemoryOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant as...
virtual void emitXXStructor(const DataLayout &DL, const Constant *CV)
Targets can override this to change how global constants that are part of a C++ static/global constru...
Definition: AsmPrinter.h:646
virtual void emitFunctionBodyEnd()
Targets can override this to emit stuff after the last basic block in the function.
Definition: AsmPrinter.h:617
virtual void emitFunctionEntryLabel()
EmitFunctionEntryLabel - Emit the label that is the entrypoint for the function.
virtual std::tuple< const MCSymbol *, uint64_t, const MCSymbol *, codeview::JumpTableEntrySize > getCodeViewJumpTableInfo(int JTI, const MachineInstr *BranchInstr, const MCSymbol *BranchLabel) const
Gets information required to create a CodeView debug symbol for a jump table.
virtual bool PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const char *ExtraCode, raw_ostream &OS)
Print the specified operand of MI, an INLINEASM instruction, using the specified assembler variant.
virtual const MCExpr * lowerBlockAddressConstant(const BlockAddress &BA)
Lower the specified BlockAddress to an MCExpr.
The address of a basic block.
Definition: Constants.h:899
Function * getFunction() const
Definition: Constants.h:935
Conditional or Unconditional Branch instruction.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:163
A signed pointer, in the ptrauth sense.
Definition: Constants.h:1032
Constant * getPointer() const
The pointer that is signed in this ptrauth signed pointer.
Definition: Constants.h:1059
ConstantInt * getKey() const
The Key ID, an i32 constant.
Definition: Constants.h:1062
bool hasAddressDiscriminator() const
Whether there is any non-null address discriminator.
Definition: Constants.h:1077
ConstantInt * getDiscriminator() const
The integer discriminator, an i64 constant, or 0.
Definition: Constants.h:1065
This is an important base class in LLVM.
Definition: Constant.h:43
This class represents an Operation in the Expression.
A parsed version of the target data layout string in and methods for querying it.
Definition: DataLayout.h:63
const Constant * getAliasee() const
Definition: GlobalAlias.h:87
const Constant * getResolver() const
Definition: GlobalIFunc.h:73
bool hasLocalLinkage() const
Definition: GlobalValue.h:530
bool hasExternalWeakLinkage() const
Definition: GlobalValue.h:531
Type * getValueType() const
Definition: GlobalValue.h:298
LLVM_ABI void emitError(const Instruction *I, const Twine &ErrorStr)
emitError - Emit an error message to the currently installed error handler with optional location inf...
static const MCBinaryExpr * createLShr(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:423
static const MCBinaryExpr * createAdd(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition: MCExpr.h:343
static const MCBinaryExpr * createSub(const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx)
Definition: MCExpr.h:428
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition: MCExpr.cpp:212
Context object for machine code objects.
Definition: MCContext.h:83
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
MCInstBuilder & addReg(MCRegister Reg)
Add a new register operand.
Definition: MCInstBuilder.h:37
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
void addOperand(const MCOperand Op)
Definition: MCInst.h:215
void setOpcode(unsigned Op)
Definition: MCInst.h:201
MCSection * getDataSection() const
Instances of this class represent operands of the MCInst class.
Definition: MCInst.h:40
void setImm(int64_t Val)
Definition: MCInst.h:89
static MCOperand createExpr(const MCExpr *Val)
Definition: MCInst.h:166
int64_t getImm() const
Definition: MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition: MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition: MCInst.h:145
bool isReg() const
Definition: MCInst.h:65
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:496
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition: MCExpr.cpp:743
Streaming machine code generation interface.
Definition: MCStreamer.h:220
virtual void emitCFIBKeyFrame()
Definition: MCStreamer.cpp:223
virtual void beginCOFFSymbolDef(const MCSymbol *Symbol)
Start emitting COFF symbol definition.
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
virtual void emitCOFFSymbolType(int Type)
Emit the type of the symbol.
virtual bool hasRawTextSupport() const
Return true if this asm streamer supports emitting unformatted text to the .s file with EmitRawText.
Definition: MCStreamer.h:368
virtual void endCOFFSymbolDef()
Marks the end of the symbol definition.
MCContext & getContext() const
Definition: MCStreamer.h:314
virtual void AddComment(const Twine &T, bool EOL=true)
Add a textual comment.
Definition: MCStreamer.h:387
virtual void emitCFIMTETaggedFrame()
Definition: MCStreamer.cpp:230
void emitValue(const MCExpr *Value, unsigned Size, SMLoc Loc=SMLoc())
Definition: MCStreamer.cpp:178
virtual void emitLabel(MCSymbol *Symbol, SMLoc Loc=SMLoc())
Emit a label for Symbol into the current section.
Definition: MCStreamer.cpp:395
MCTargetStreamer * getTargetStreamer()
Definition: MCStreamer.h:324
MCSection * getCurrentSectionOnly() const
Definition: MCStreamer.h:421
void emitRawText(const Twine &String)
If this file is backed by a assembly streamer, this dumps the specified string in the output ....
virtual void emitCOFFSymbolStorageClass(int StorageClass)
Emit the storage class of the symbol.
Generic base class for all target subtargets.
Represent a reference to a symbol from inside an expression.
Definition: MCExpr.h:190
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition: MCExpr.h:214
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
StringRef getName() const
getName - Get the symbol name.
Definition: MCSymbol.h:188
Metadata node.
Definition: Metadata.h:1077
LLVM_ABI MCSymbol * getSymbol() const
Return the MCSymbol for this basic block.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
Function & getFunction()
Return the LLVM function that this machine code represents.
Ty * getInfo()
getInfo - Keep track of various per-function pieces of information for backends that would like to do...
Representation of each machine instruction.
Definition: MachineInstr.h:72
bool readsRegister(Register Reg, const TargetRegisterInfo *TRI) const
Return true if the MachineInstr reads the specified register.
mop_range operands()
Definition: MachineInstr.h:693
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:595
const std::vector< MachineJumpTableEntry > & getJumpTables() const
MachineModuleInfoELF - This is a MachineModuleInfoImpl implementation for ELF targets.
MachineModuleInfoMachO - This is a MachineModuleInfoImpl implementation for MachO targets.
MachineOperand class - Representation of each machine instruction operand.
unsigned getSubReg() const
static MachineOperand CreateMCSymbol(MCSymbol *Sym, unsigned TargetFlags=0)
const GlobalValue * getGlobal() const
static MachineOperand CreateES(const char *SymName, unsigned TargetFlags=0)
int64_t getImm() const
bool isReg() const
isReg - Tests if this is a MO_Register operand.
MachineBasicBlock * getMBB() const
bool isImm() const
isImm - Tests if this is a MO_Immediate operand.
const BlockAddress * getBlockAddress() const
void setOffset(int64_t Offset)
bool isGlobal() const
isGlobal - Tests if this is a MO_GlobalAddress operand.
MachineOperandType getType() const
getType - Returns the MachineOperandType for this operand.
Register getReg() const
getReg - Returns the register number.
@ MO_Immediate
Immediate operand.
@ MO_GlobalAddress
Address of a global value.
@ MO_BlockAddress
Address of a basic block.
@ MO_Register
Register operand.
@ MO_ExternalSymbol
Name of external global symbol.
int64_t getOffset() const
Return the offset from the symbol in this operand.
A Module instance is used to store all the information related to an LLVM module.
Definition: Module.h:67
Pass interface - Implemented by all 'passes'.
Definition: Pass.h:99
virtual StringRef getPassName() const
getPassName - Return a nice clean name for a pass.
Definition: Pass.cpp:85
MI-level patchpoint operands.
Definition: StackMaps.h:77
Wrapper class representing virtual and physical registers.
Definition: Register.h:19
static SectionKind getMetadata()
Definition: SectionKind.h:188
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
MI-level stackmap operands.
Definition: StackMaps.h:36
uint32_t getNumPatchBytes() const
Return the number of patchable bytes the given stackmap should emit.
Definition: StackMaps.h:51
LLVM_ABI void recordStatepoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a statepoint instruction.
Definition: StackMaps.cpp:560
LLVM_ABI void recordPatchPoint(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a patchpoint instruction.
Definition: StackMaps.cpp:539
LLVM_ABI void recordStackMap(const MCSymbol &L, const MachineInstr &MI)
Generate a stackmap record for a stackmap instruction.
Definition: StackMaps.cpp:529
MI-level Statepoint operands.
Definition: StackMaps.h:159
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition: StringRef.h:148
virtual MCSection * getSectionForJumpTable(const Function &F, const TargetMachine &TM) const
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:83
MCRegister getRegister(unsigned i) const
Return the specified register in the class.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
bool regsOverlap(Register RegA, Register RegB) const
Returns true if the two registers are equal or alias each other.
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
bool isFunctionTy() const
True if this is an instance of FunctionType.
Definition: Type.h:258
LLVM Value Representation.
Definition: Value.h:75
LLVM_ABI const Value * stripAndAccumulateConstantOffsets(const DataLayout &DL, APInt &Offset, bool AllowNonInbounds, bool AllowInvariantGroup=false, function_ref< bool(Value &Value, APInt &Offset)> ExternalAnalysis=nullptr, bool LookThroughIntToPtr=false) const
Accumulate the constant offset this value has compared to a base pointer.
LLVM_ABI LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:1098
LLVM_ABI StringRef getName() const
Return a constant reference to the value's name.
Definition: Value.cpp:322
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
A raw_ostream that writes to an SmallVector or SmallString.
Definition: raw_ostream.h:692
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
StringRef getVendorName(unsigned const Vendor)
@ MO_NC
MO_NC - Indicates whether the linker is expected to check the symbol reference for overflow.
@ MO_G1
MO_G1 - A symbol operand with this flag (granule 1) represents the bits 16-31 of a 64-bit address,...
@ MO_S
MO_S - Indicates that the bits of the symbol operand represented by MO_G0 etc are signed.
@ MO_PAGEOFF
MO_PAGEOFF - A symbol operand with this flag represents the offset of that symbol within a 4K page.
@ MO_GOT
MO_GOT - This flag indicates that a symbol operand represents the address of the GOT entry for the sy...
@ MO_G0
MO_G0 - A symbol operand with this flag (granule 0) represents the bits 0-15 of a 64-bit address,...
@ MO_PAGE
MO_PAGE - A symbol operand with this flag represents the pc-relative offset of the 4K page containing...
@ MO_TLS
MO_TLS - Indicates that the operand being accessed is some kind of thread-local symbol.
AuthCheckMethod
Variants of check performed on an authenticated pointer.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
Key
PAL metadata keys.
@ SectionSize
Definition: COFF.h:61
SymbolStorageClass
Storage class tells where and what the symbol represents.
Definition: COFF.h:218
@ IMAGE_SYM_CLASS_EXTERNAL
External symbol.
Definition: COFF.h:224
@ IMAGE_SYM_CLASS_STATIC
Static.
Definition: COFF.h:225
@ IMAGE_SYM_DTYPE_FUNCTION
A function that returns a base type.
Definition: COFF.h:276
@ SCT_COMPLEX_TYPE_SHIFT
Type is formed as (base + (derived << SCT_COMPLEX_TYPE_SHIFT))
Definition: COFF.h:280
@ AArch64_VectorCall
Used between AArch64 Advanced SIMD functions.
Definition: CallingConv.h:221
@ AArch64_SVE_VectorCall
Used between AArch64 SVE functions.
Definition: CallingConv.h:224
@ C
The default llvm calling convention, compatible with C.
Definition: CallingConv.h:34
@ GNU_PROPERTY_AARCH64_FEATURE_1_BTI
Definition: ELF.h:1850
@ GNU_PROPERTY_AARCH64_FEATURE_1_PAC
Definition: ELF.h:1851
@ GNU_PROPERTY_AARCH64_FEATURE_1_GCS
Definition: ELF.h:1852
@ SHT_PROGBITS
Definition: ELF.h:1140
@ SHF_ALLOC
Definition: ELF.h:1240
@ SHF_GROUP
Definition: ELF.h:1262
@ SHF_EXECINSTR
Definition: ELF.h:1243
@ S_REGULAR
S_REGULAR - Regular section.
Definition: MachO.h:127
Reg
All possible values of the reg field in the ModR/M byte.
ValuesClass values(OptsTy... Options)
Helper to build a ValuesClass by forwarding a variable number of arguments as an initializer list to ...
Definition: CommandLine.h:712
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
auto drop_begin(T &&RangeOrContainer, size_t N=1)
Return a range covering RangeOrContainer with the first N elements excluded.
Definition: STLExtras.h:338
@ Offset
Definition: DWP.cpp:477
LLVM_ABI std::optional< std::string > getArm64ECMangledFunctionName(StringRef Name)
Returns the ARM64EC mangled function name unless the input is already mangled.
Definition: Mangler.cpp:294
detail::scope_exit< std::decay_t< Callable > > make_scope_exit(Callable &&F)
Definition: ScopeExit.h:59
static unsigned getXPACOpcodeForKey(AArch64PACKey::ID K)
Return XPAC opcode to be used for a ptrauth strip using the given key.
ExceptionHandling
Definition: CodeGen.h:53
Target & getTheAArch64beTarget()
Target & getTheAArch64leTarget()
Target & getTheAArch64_32Target()
LLVM_ABI void report_fatal_error(Error Err, bool gen_crash_diag=true)
Definition: Error.cpp:167
Target & getTheARM64_32Target()
static MCRegister getXRegFromWReg(MCRegister Reg)
@ Add
Sum of integers.
Target & getTheARM64Target()
static MCRegister getXRegFromXRegTuple(MCRegister RegTuple)
static unsigned getPACOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return PAC opcode to be used for a ptrauth sign using the given key, or its PAC*Z variant that doesn'...
static MCRegister getWRegFromXReg(MCRegister Reg)
static unsigned getAUTOpcodeForKey(AArch64PACKey::ID K, bool Zero)
Return AUT opcode to be used for a ptrauth auth using the given key, or its AUT*Z variant that doesn'...
@ MCSA_Weak
.weak
Definition: MCDirectives.h:45
@ MCSA_WeakAntiDep
.weak_anti_dep (COFF)
Definition: MCDirectives.h:49
@ MCSA_ELF_TypeFunction
.type _foo, STT_FUNC # aka @function
Definition: MCDirectives.h:23
@ MCSA_Hidden
.hidden (ELF)
Definition: MCDirectives.h:33
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
RegisterAsmPrinter - Helper template for registering a target specific assembly printer,...