LLVM 22.0.0git
X86AsmBackend.cpp
Go to the documentation of this file.
1//===-- X86AsmBackend.cpp - X86 Assembler Backend -------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
17#include "llvm/MC/MCAssembler.h"
19#include "llvm/MC/MCContext.h"
20#include "llvm/MC/MCDwarf.h"
23#include "llvm/MC/MCExpr.h"
24#include "llvm/MC/MCInst.h"
25#include "llvm/MC/MCInstrInfo.h"
29#include "llvm/MC/MCSection.h"
31#include "llvm/MC/MCValue.h"
36
37using namespace llvm;
38
39namespace {
40/// A wrapper for holding a mask of the values from X86::AlignBranchBoundaryKind
41class X86AlignBranchKind {
42private:
43 uint8_t AlignBranchKind = 0;
44
45public:
46 void operator=(const std::string &Val) {
47 if (Val.empty())
48 return;
49 SmallVector<StringRef, 6> BranchTypes;
50 StringRef(Val).split(BranchTypes, '+', -1, false);
51 for (auto BranchType : BranchTypes) {
52 if (BranchType == "fused")
53 addKind(X86::AlignBranchFused);
54 else if (BranchType == "jcc")
55 addKind(X86::AlignBranchJcc);
56 else if (BranchType == "jmp")
57 addKind(X86::AlignBranchJmp);
58 else if (BranchType == "call")
59 addKind(X86::AlignBranchCall);
60 else if (BranchType == "ret")
61 addKind(X86::AlignBranchRet);
62 else if (BranchType == "indirect")
64 else {
65 errs() << "invalid argument " << BranchType.str()
66 << " to -x86-align-branch=; each element must be one of: fused, "
67 "jcc, jmp, call, ret, indirect.(plus separated)\n";
68 }
69 }
70 }
71
72 operator uint8_t() const { return AlignBranchKind; }
73 void addKind(X86::AlignBranchBoundaryKind Value) { AlignBranchKind |= Value; }
74};
75
76X86AlignBranchKind X86AlignBranchKindLoc;
77
78cl::opt<unsigned> X86AlignBranchBoundary(
79 "x86-align-branch-boundary", cl::init(0),
81 "Control how the assembler should align branches with NOP. If the "
82 "boundary's size is not 0, it should be a power of 2 and no less "
83 "than 32. Branches will be aligned to prevent from being across or "
84 "against the boundary of specified size. The default value 0 does not "
85 "align branches."));
86
88 "x86-align-branch",
90 "Specify types of branches to align (plus separated list of types):"
91 "\njcc indicates conditional jumps"
92 "\nfused indicates fused conditional jumps"
93 "\njmp indicates direct unconditional jumps"
94 "\ncall indicates direct and indirect calls"
95 "\nret indicates rets"
96 "\nindirect indicates indirect unconditional jumps"),
97 cl::location(X86AlignBranchKindLoc));
98
99cl::opt<bool> X86AlignBranchWithin32BBoundaries(
100 "x86-branches-within-32B-boundaries", cl::init(false),
101 cl::desc(
102 "Align selected instructions to mitigate negative performance impact "
103 "of Intel's micro code update for errata skx102. May break "
104 "assumptions about labels corresponding to particular instructions, "
105 "and should be used with caution."));
106
107cl::opt<unsigned> X86PadMaxPrefixSize(
108 "x86-pad-max-prefix-size", cl::init(0),
109 cl::desc("Maximum number of prefixes to use for padding"));
110
111cl::opt<bool> X86PadForAlign(
112 "x86-pad-for-align", cl::init(false), cl::Hidden,
113 cl::desc("Pad previous instructions to implement align directives"));
114
115cl::opt<bool> X86PadForBranchAlign(
116 "x86-pad-for-branch-align", cl::init(true), cl::Hidden,
117 cl::desc("Pad previous instructions to implement branch alignment"));
118
119class X86AsmBackend : public MCAsmBackend {
120 const MCSubtargetInfo &STI;
121 std::unique_ptr<const MCInstrInfo> MCII;
122 X86AlignBranchKind AlignBranchType;
123 Align AlignBoundary;
124 unsigned TargetPrefixMax = 0;
125
126 MCInst PrevInst;
127 unsigned PrevInstOpcode = 0;
128 MCBoundaryAlignFragment *PendingBA = nullptr;
129 std::pair<MCFragment *, size_t> PrevInstPosition;
130
131 uint8_t determinePaddingPrefix(const MCInst &Inst) const;
132 bool isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const;
133 bool needAlign(const MCInst &Inst) const;
134 bool canPadBranches(MCObjectStreamer &OS) const;
135 bool canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const;
136
137public:
138 X86AsmBackend(const Target &T, const MCSubtargetInfo &STI)
139 : MCAsmBackend(llvm::endianness::little), STI(STI),
140 MCII(T.createMCInstrInfo()) {
141 if (X86AlignBranchWithin32BBoundaries) {
142 // At the moment, this defaults to aligning fused branches, unconditional
143 // jumps, and (unfused) conditional jumps with nops. Both the
144 // instructions aligned and the alignment method (nop vs prefix) may
145 // change in the future.
146 AlignBoundary = assumeAligned(32);
147 AlignBranchType.addKind(X86::AlignBranchFused);
148 AlignBranchType.addKind(X86::AlignBranchJcc);
149 AlignBranchType.addKind(X86::AlignBranchJmp);
150 }
151 // Allow overriding defaults set by main flag
152 if (X86AlignBranchBoundary.getNumOccurrences())
153 AlignBoundary = assumeAligned(X86AlignBranchBoundary);
154 if (X86AlignBranch.getNumOccurrences())
155 AlignBranchType = X86AlignBranchKindLoc;
156 if (X86PadMaxPrefixSize.getNumOccurrences())
157 TargetPrefixMax = X86PadMaxPrefixSize;
158
159 AllowAutoPadding =
160 AlignBoundary != Align(1) && AlignBranchType != X86::AlignBranchNone;
161 AllowEnhancedRelaxation =
162 AllowAutoPadding && TargetPrefixMax != 0 && X86PadForBranchAlign;
163 }
164
165 void emitInstructionBegin(MCObjectStreamer &OS, const MCInst &Inst,
166 const MCSubtargetInfo &STI);
167 void emitInstructionEnd(MCObjectStreamer &OS, const MCInst &Inst);
168
169
170 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override;
171
172 MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const override;
173
174 std::optional<bool> evaluateFixup(const MCFragment &, MCFixup &, MCValue &,
175 uint64_t &) override;
176 void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target,
177 uint8_t *Data, uint64_t Value, bool IsResolved) override;
178
179 bool mayNeedRelaxation(unsigned Opcode, ArrayRef<MCOperand> Operands,
180 const MCSubtargetInfo &STI) const override;
181
182 bool fixupNeedsRelaxationAdvanced(const MCFragment &, const MCFixup &,
183 const MCValue &, uint64_t,
184 bool) const override;
185
186 void relaxInstruction(MCInst &Inst,
187 const MCSubtargetInfo &STI) const override;
188
189 bool padInstructionViaRelaxation(MCFragment &RF, MCCodeEmitter &Emitter,
190 unsigned &RemainingSize) const;
191
192 bool padInstructionViaPrefix(MCFragment &RF, MCCodeEmitter &Emitter,
193 unsigned &RemainingSize) const;
194
195 bool padInstructionEncoding(MCFragment &RF, MCCodeEmitter &Emitter,
196 unsigned &RemainingSize) const;
197
198 bool finishLayout(const MCAssembler &Asm) const override;
199
200 unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const override;
201
202 bool writeNopData(raw_ostream &OS, uint64_t Count,
203 const MCSubtargetInfo *STI) const override;
204};
205} // end anonymous namespace
206
207static bool isRelaxableBranch(unsigned Opcode) {
208 return Opcode == X86::JCC_1 || Opcode == X86::JMP_1;
209}
210
211static unsigned getRelaxedOpcodeBranch(unsigned Opcode,
212 bool Is16BitMode = false) {
213 switch (Opcode) {
214 default:
215 llvm_unreachable("invalid opcode for branch");
216 case X86::JCC_1:
217 return (Is16BitMode) ? X86::JCC_2 : X86::JCC_4;
218 case X86::JMP_1:
219 return (Is16BitMode) ? X86::JMP_2 : X86::JMP_4;
220 }
221}
222
223static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode) {
224 unsigned Opcode = MI.getOpcode();
225 return isRelaxableBranch(Opcode) ? getRelaxedOpcodeBranch(Opcode, Is16BitMode)
227}
228
230 const MCInstrInfo &MCII) {
231 unsigned Opcode = MI.getOpcode();
232 switch (Opcode) {
233 default:
234 return X86::COND_INVALID;
235 case X86::JCC_1: {
236 const MCInstrDesc &Desc = MCII.get(Opcode);
237 return static_cast<X86::CondCode>(
238 MI.getOperand(Desc.getNumOperands() - 1).getImm());
239 }
240 }
241}
242
246 return classifySecondCondCodeInMacroFusion(CC);
247}
248
249/// Check if the instruction uses RIP relative addressing.
250static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII) {
251 unsigned Opcode = MI.getOpcode();
252 const MCInstrDesc &Desc = MCII.get(Opcode);
253 uint64_t TSFlags = Desc.TSFlags;
254 unsigned CurOp = X86II::getOperandBias(Desc);
255 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
256 if (MemoryOperand < 0)
257 return false;
258 unsigned BaseRegNum = MemoryOperand + CurOp + X86::AddrBaseReg;
259 MCRegister BaseReg = MI.getOperand(BaseRegNum).getReg();
260 return (BaseReg == X86::RIP);
261}
262
263/// Check if the instruction is a prefix.
264static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII) {
265 return X86II::isPrefix(MCII.get(Opcode).TSFlags);
266}
267
268/// Check if the instruction is valid as the first instruction in macro fusion.
269static bool isFirstMacroFusibleInst(const MCInst &Inst,
270 const MCInstrInfo &MCII) {
271 // An Intel instruction with RIP relative addressing is not macro fusible.
272 if (isRIPRelative(Inst, MCII))
273 return false;
276 return FIK != X86::FirstMacroFusionInstKind::Invalid;
277}
278
279/// X86 can reduce the bytes of NOP by padding instructions with prefixes to
280/// get a better peformance in some cases. Here, we determine which prefix is
281/// the most suitable.
282///
283/// If the instruction has a segment override prefix, use the existing one.
284/// If the target is 64-bit, use the CS.
285/// If the target is 32-bit,
286/// - If the instruction has a ESP/EBP base register, use SS.
287/// - Otherwise use DS.
288uint8_t X86AsmBackend::determinePaddingPrefix(const MCInst &Inst) const {
289 assert((STI.hasFeature(X86::Is32Bit) || STI.hasFeature(X86::Is64Bit)) &&
290 "Prefixes can be added only in 32-bit or 64-bit mode.");
291 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
292 uint64_t TSFlags = Desc.TSFlags;
293
294 // Determine where the memory operand starts, if present.
295 int MemoryOperand = X86II::getMemoryOperandNo(TSFlags);
296 if (MemoryOperand != -1)
297 MemoryOperand += X86II::getOperandBias(Desc);
298
299 MCRegister SegmentReg;
300 if (MemoryOperand >= 0) {
301 // Check for explicit segment override on memory operand.
302 SegmentReg = Inst.getOperand(MemoryOperand + X86::AddrSegmentReg).getReg();
303 }
304
305 switch (TSFlags & X86II::FormMask) {
306 default:
307 break;
308 case X86II::RawFrmDstSrc: {
309 // Check segment override opcode prefix as needed (not for %ds).
310 if (Inst.getOperand(2).getReg() != X86::DS)
311 SegmentReg = Inst.getOperand(2).getReg();
312 break;
313 }
314 case X86II::RawFrmSrc: {
315 // Check segment override opcode prefix as needed (not for %ds).
316 if (Inst.getOperand(1).getReg() != X86::DS)
317 SegmentReg = Inst.getOperand(1).getReg();
318 break;
319 }
321 // Check segment override opcode prefix as needed.
322 SegmentReg = Inst.getOperand(1).getReg();
323 break;
324 }
325 }
326
327 if (SegmentReg)
328 return X86::getSegmentOverridePrefixForReg(SegmentReg);
329
330 if (STI.hasFeature(X86::Is64Bit))
331 return X86::CS_Encoding;
332
333 if (MemoryOperand >= 0) {
334 unsigned BaseRegNum = MemoryOperand + X86::AddrBaseReg;
335 MCRegister BaseReg = Inst.getOperand(BaseRegNum).getReg();
336 if (BaseReg == X86::ESP || BaseReg == X86::EBP)
337 return X86::SS_Encoding;
338 }
339 return X86::DS_Encoding;
340}
341
342/// Check if the two instructions will be macro-fused on the target cpu.
343bool X86AsmBackend::isMacroFused(const MCInst &Cmp, const MCInst &Jcc) const {
344 const MCInstrDesc &InstDesc = MCII->get(Jcc.getOpcode());
345 if (!InstDesc.isConditionalBranch())
346 return false;
347 if (!isFirstMacroFusibleInst(Cmp, *MCII))
348 return false;
349 const X86::FirstMacroFusionInstKind CmpKind =
351 const X86::SecondMacroFusionInstKind BranchKind =
353 return X86::isMacroFused(CmpKind, BranchKind);
354}
355
356/// Check if the instruction has a variant symbol operand.
357static bool hasVariantSymbol(const MCInst &MI) {
358 for (auto &Operand : MI) {
359 if (!Operand.isExpr())
360 continue;
361 const MCExpr &Expr = *Operand.getExpr();
362 if (Expr.getKind() == MCExpr::SymbolRef &&
363 cast<MCSymbolRefExpr>(&Expr)->getSpecifier())
364 return true;
365 }
366 return false;
367}
368
369/// X86 has certain instructions which enable interrupts exactly one
370/// instruction *after* the instruction which stores to SS. Return true if the
371/// given instruction may have such an interrupt delay slot.
372static bool mayHaveInterruptDelaySlot(unsigned InstOpcode) {
373 switch (InstOpcode) {
374 case X86::POPSS16:
375 case X86::POPSS32:
376 case X86::STI:
377 return true;
378
379 case X86::MOV16sr:
380 case X86::MOV32sr:
381 case X86::MOV64sr:
382 case X86::MOV16sm:
383 // In fact, this is only the case if the first operand is SS. However, as
384 // segment moves occur extremely rarely, this is just a minor pessimization.
385 return true;
386 }
387 return false;
388}
389
390/// Return true if we can insert NOP or prefixes automatically before the
391/// the instruction to be emitted.
392bool X86AsmBackend::canPadInst(const MCInst &Inst, MCObjectStreamer &OS) const {
393 if (hasVariantSymbol(Inst))
394 // Linker may rewrite the instruction with variant symbol operand(e.g.
395 // TLSCALL).
396 return false;
397
398 if (mayHaveInterruptDelaySlot(PrevInstOpcode))
399 // If this instruction follows an interrupt enabling instruction with a one
400 // instruction delay, inserting a nop would change behavior.
401 return false;
402
403 if (isPrefix(PrevInstOpcode, *MCII))
404 // If this instruction follows a prefix, inserting a nop/prefix would change
405 // semantic.
406 return false;
407
408 if (isPrefix(Inst.getOpcode(), *MCII))
409 // If this instruction is a prefix, inserting a prefix would change
410 // semantic.
411 return false;
412
413 // If this instruction follows any data, there is no clear instruction
414 // boundary, inserting a nop/prefix would change semantic.
415 auto Offset = OS.getCurFragSize();
416 if (Offset && (OS.getCurrentFragment() != PrevInstPosition.first ||
417 Offset != PrevInstPosition.second))
418 return false;
419
420 return true;
421}
422
423bool X86AsmBackend::canPadBranches(MCObjectStreamer &OS) const {
424 if (!OS.getAllowAutoPadding())
425 return false;
426 assert(allowAutoPadding() && "incorrect initialization!");
427
428 // We only pad in text section.
429 if (!OS.getCurrentSectionOnly()->isText())
430 return false;
431
432 // Branches only need to be aligned in 32-bit or 64-bit mode.
433 if (!(STI.hasFeature(X86::Is64Bit) || STI.hasFeature(X86::Is32Bit)))
434 return false;
435
436 return true;
437}
438
439/// Check if the instruction operand needs to be aligned.
440bool X86AsmBackend::needAlign(const MCInst &Inst) const {
441 const MCInstrDesc &Desc = MCII->get(Inst.getOpcode());
442 return (Desc.isConditionalBranch() &&
443 (AlignBranchType & X86::AlignBranchJcc)) ||
444 (Desc.isUnconditionalBranch() &&
445 (AlignBranchType & X86::AlignBranchJmp)) ||
446 (Desc.isCall() && (AlignBranchType & X86::AlignBranchCall)) ||
447 (Desc.isReturn() && (AlignBranchType & X86::AlignBranchRet)) ||
448 (Desc.isIndirectBranch() &&
449 (AlignBranchType & X86::AlignBranchIndirect));
450}
451
453 const MCSubtargetInfo &STI) {
454 bool AutoPadding = S.getAllowAutoPadding();
455 if (LLVM_LIKELY(!AutoPadding && !X86PadForAlign)) {
456 S.MCObjectStreamer::emitInstruction(Inst, STI);
457 return;
458 }
459
460 auto &Backend = static_cast<X86AsmBackend &>(S.getAssembler().getBackend());
461 Backend.emitInstructionBegin(S, Inst, STI);
462 S.MCObjectStreamer::emitInstruction(Inst, STI);
463 Backend.emitInstructionEnd(S, Inst);
464}
465
466/// Insert BoundaryAlignFragment before instructions to align branches.
467void X86AsmBackend::emitInstructionBegin(MCObjectStreamer &OS,
468 const MCInst &Inst, const MCSubtargetInfo &STI) {
469 bool CanPadInst = canPadInst(Inst, OS);
470 if (CanPadInst)
471 OS.getCurrentFragment()->setAllowAutoPadding(true);
472
473 if (!canPadBranches(OS))
474 return;
475
476 // NB: PrevInst only valid if canPadBranches is true.
477 if (!isMacroFused(PrevInst, Inst))
478 // Macro fusion doesn't happen indeed, clear the pending.
479 PendingBA = nullptr;
480
481 // When branch padding is enabled (basically the skx102 erratum => unlikely),
482 // we call canPadInst (not cheap) twice. However, in the common case, we can
483 // avoid unnecessary calls to that, as this is otherwise only used for
484 // relaxable fragments.
485 if (!CanPadInst)
486 return;
487
488 if (PendingBA) {
489 auto *NextFragment = PendingBA->getNext();
490 assert(NextFragment && "NextFragment should not be null");
491 if (NextFragment == OS.getCurrentFragment())
492 return;
493 // We eagerly create an empty fragment when inserting a fragment
494 // with a variable-size tail.
495 if (NextFragment->getNext() == OS.getCurrentFragment())
496 return;
497
498 // Macro fusion actually happens and there is no other fragment inserted
499 // after the previous instruction.
500 //
501 // Do nothing here since we already inserted a BoudaryAlign fragment when
502 // we met the first instruction in the fused pair and we'll tie them
503 // together in emitInstructionEnd.
504 //
505 // Note: When there is at least one fragment, such as MCAlignFragment,
506 // inserted after the previous instruction, e.g.
507 //
508 // \code
509 // cmp %rax %rcx
510 // .align 16
511 // je .Label0
512 // \ endcode
513 //
514 // We will treat the JCC as a unfused branch although it may be fused
515 // with the CMP.
516 return;
517 }
518
519 if (needAlign(Inst) || ((AlignBranchType & X86::AlignBranchFused) &&
520 isFirstMacroFusibleInst(Inst, *MCII))) {
521 // If we meet a unfused branch or the first instuction in a fusiable pair,
522 // insert a BoundaryAlign fragment.
523 PendingBA =
524 OS.newSpecialFragment<MCBoundaryAlignFragment>(AlignBoundary, STI);
525 }
526}
527
528/// Set the last fragment to be aligned for the BoundaryAlignFragment.
529void X86AsmBackend::emitInstructionEnd(MCObjectStreamer &OS,
530 const MCInst &Inst) {
531 // Update PrevInstOpcode here, canPadInst() reads that.
532 MCFragment *CF = OS.getCurrentFragment();
533 PrevInstOpcode = Inst.getOpcode();
534 PrevInstPosition = std::make_pair(CF, OS.getCurFragSize());
535
536 if (!canPadBranches(OS))
537 return;
538
539 // PrevInst is only needed if canPadBranches. Copying an MCInst isn't cheap.
540 PrevInst = Inst;
541
542 if (!needAlign(Inst) || !PendingBA)
543 return;
544
545 // Tie the aligned instructions into a pending BoundaryAlign.
546 PendingBA->setLastFragment(CF);
547 PendingBA = nullptr;
548
549 // We need to ensure that further data isn't added to the current
550 // DataFragment, so that we can get the size of instructions later in
551 // MCAssembler::relaxBoundaryAlign. The easiest way is to insert a new empty
552 // DataFragment.
553 OS.newFragment();
554
555 // Update the maximum alignment on the current section if necessary.
556 CF->getParent()->ensureMinAlignment(AlignBoundary);
557}
558
559std::optional<MCFixupKind> X86AsmBackend::getFixupKind(StringRef Name) const {
560 if (STI.getTargetTriple().isOSBinFormatELF()) {
561 unsigned Type;
562 if (STI.getTargetTriple().getArch() == Triple::x86_64) {
564#define ELF_RELOC(X, Y) .Case(#X, Y)
565#include "llvm/BinaryFormat/ELFRelocs/x86_64.def"
566#undef ELF_RELOC
567 .Case("BFD_RELOC_NONE", ELF::R_X86_64_NONE)
568 .Case("BFD_RELOC_8", ELF::R_X86_64_8)
569 .Case("BFD_RELOC_16", ELF::R_X86_64_16)
570 .Case("BFD_RELOC_32", ELF::R_X86_64_32)
571 .Case("BFD_RELOC_64", ELF::R_X86_64_64)
572 .Default(-1u);
573 } else {
575#define ELF_RELOC(X, Y) .Case(#X, Y)
576#include "llvm/BinaryFormat/ELFRelocs/i386.def"
577#undef ELF_RELOC
578 .Case("BFD_RELOC_NONE", ELF::R_386_NONE)
579 .Case("BFD_RELOC_8", ELF::R_386_8)
580 .Case("BFD_RELOC_16", ELF::R_386_16)
581 .Case("BFD_RELOC_32", ELF::R_386_32)
582 .Default(-1u);
583 }
584 if (Type == -1u)
585 return std::nullopt;
586 return static_cast<MCFixupKind>(FirstLiteralRelocationKind + Type);
587 }
589}
590
591MCFixupKindInfo X86AsmBackend::getFixupKindInfo(MCFixupKind Kind) const {
592 const static MCFixupKindInfo Infos[X86::NumTargetFixupKinds] = {
593 // clang-format off
594 {"reloc_riprel_4byte", 0, 32, 0},
595 {"reloc_riprel_4byte_movq_load", 0, 32, 0},
596 {"reloc_riprel_4byte_movq_load_rex2", 0, 32, 0},
597 {"reloc_riprel_4byte_relax", 0, 32, 0},
598 {"reloc_riprel_4byte_relax_rex", 0, 32, 0},
599 {"reloc_riprel_4byte_relax_rex2", 0, 32, 0},
600 {"reloc_riprel_4byte_relax_evex", 0, 32, 0},
601 {"reloc_signed_4byte", 0, 32, 0},
602 {"reloc_signed_4byte_relax", 0, 32, 0},
603 {"reloc_global_offset_table", 0, 32, 0},
604 {"reloc_branch_4byte_pcrel", 0, 32, 0},
605 // clang-format on
606 };
607
608 // Fixup kinds from .reloc directive are like R_386_NONE/R_X86_64_NONE. They
609 // do not require any extra processing.
610 if (mc::isRelocation(Kind))
611 return {};
612
613 if (Kind < FirstTargetFixupKind)
615
617 "Invalid kind!");
618 assert(Infos[Kind - FirstTargetFixupKind].Name && "Empty fixup name!");
619 return Infos[Kind - FirstTargetFixupKind];
620}
621
622static unsigned getFixupKindSize(unsigned Kind) {
623 switch (Kind) {
624 default:
625 llvm_unreachable("invalid fixup kind!");
626 case FK_NONE:
627 return 0;
628 case FK_SecRel_1:
629 case FK_Data_1:
630 return 1;
631 case FK_SecRel_2:
632 case FK_Data_2:
633 return 2;
645 case FK_SecRel_4:
646 case FK_Data_4:
647 return 4;
648 case FK_SecRel_8:
649 case FK_Data_8:
650 return 8;
651 }
652}
653
654constexpr char GotSymName[] = "_GLOBAL_OFFSET_TABLE_";
655
656// Adjust PC-relative fixup offsets, which are calculated from the start of the
657// next instruction.
658std::optional<bool> X86AsmBackend::evaluateFixup(const MCFragment &,
659 MCFixup &Fixup,
660 MCValue &Target, uint64_t &) {
661 if (Fixup.isPCRel()) {
662 switch (Fixup.getKind()) {
663 case FK_Data_1:
664 Target.setConstant(Target.getConstant() - 1);
665 break;
666 case FK_Data_2:
667 Target.setConstant(Target.getConstant() - 2);
668 break;
669 default: {
670 Target.setConstant(Target.getConstant() - 4);
671 auto *Add = Target.getAddSym();
672 // If this is a pc-relative load off _GLOBAL_OFFSET_TABLE_:
673 // leaq _GLOBAL_OFFSET_TABLE_(%rip), %r15
674 // this needs to be a GOTPC32 relocation.
675 if (Add && Add->getName() == GotSymName)
676 Fixup = MCFixup::create(Fixup.getOffset(), Fixup.getValue(),
678 } break;
679 }
680 }
681 // Use default handling for `Value` and `IsResolved`.
682 return {};
683}
684
685void X86AsmBackend::applyFixup(const MCFragment &F, const MCFixup &Fixup,
686 const MCValue &Target, uint8_t *Data,
687 uint64_t Value, bool IsResolved) {
688 // Force relocation when there is a specifier. This might be too conservative
689 // - GAS doesn't emit a relocation for call local@plt; local:.
690 if (Target.getSpecifier())
691 IsResolved = false;
692 maybeAddReloc(F, Fixup, Target, Value, IsResolved);
693
694 auto Kind = Fixup.getKind();
695 if (mc::isRelocation(Kind))
696 return;
697 unsigned Size = getFixupKindSize(Kind);
698
699 assert(Fixup.getOffset() + Size <= F.getSize() && "Invalid fixup offset!");
700
701 int64_t SignedValue = static_cast<int64_t>(Value);
702 if (IsResolved && Fixup.isPCRel()) {
703 // check that PC relative fixup fits into the fixup size.
704 if (Size > 0 && !isIntN(Size * 8, SignedValue))
705 getContext().reportError(Fixup.getLoc(),
706 "value of " + Twine(SignedValue) +
707 " is too large for field of " + Twine(Size) +
708 ((Size == 1) ? " byte." : " bytes."));
709 } else {
710 // Check that uppper bits are either all zeros or all ones.
711 // Specifically ignore overflow/underflow as long as the leakage is
712 // limited to the lower bits. This is to remain compatible with
713 // other assemblers.
714 assert((Size == 0 || isIntN(Size * 8 + 1, SignedValue)) &&
715 "Value does not fit in the Fixup field");
716 }
717
718 for (unsigned i = 0; i != Size; ++i)
719 Data[i] = uint8_t(Value >> (i * 8));
720}
721
722bool X86AsmBackend::mayNeedRelaxation(unsigned Opcode,
724 const MCSubtargetInfo &STI) const {
725 unsigned SkipOperands = X86::isCCMPCC(Opcode) ? 2 : 0;
726 return isRelaxableBranch(Opcode) ||
727 (X86::getOpcodeForLongImmediateForm(Opcode) != Opcode &&
728 Operands[Operands.size() - 1 - SkipOperands].isExpr());
729}
730
731bool X86AsmBackend::fixupNeedsRelaxationAdvanced(const MCFragment &,
732 const MCFixup &Fixup,
733 const MCValue &Target,
735 bool Resolved) const {
736 // If resolved, relax if the value is too big for a (signed) i8.
737 //
738 // Currently, `jmp local@plt` relaxes JMP even if the offset is small,
739 // different from gas.
740 if (Resolved)
741 return !isInt<8>(Value) || Target.getSpecifier();
742
743 // Otherwise, relax unless there is a @ABS8 specifier.
744 if (Fixup.getKind() == FK_Data_1 && Target.getAddSym() &&
745 Target.getSpecifier() == X86::S_ABS8)
746 return false;
747 return true;
748}
749
750// FIXME: Can tblgen help at all here to verify there aren't other instructions
751// we can relax?
752void X86AsmBackend::relaxInstruction(MCInst &Inst,
753 const MCSubtargetInfo &STI) const {
754 // The only relaxations X86 does is from a 1byte pcrel to a 4byte pcrel.
755 bool Is16BitMode = STI.hasFeature(X86::Is16Bit);
756 unsigned RelaxedOp = getRelaxedOpcode(Inst, Is16BitMode);
757 assert(RelaxedOp != Inst.getOpcode());
758 Inst.setOpcode(RelaxedOp);
759}
760
761bool X86AsmBackend::padInstructionViaPrefix(MCFragment &RF,
763 unsigned &RemainingSize) const {
764 if (!RF.getAllowAutoPadding())
765 return false;
766 // If the instruction isn't fully relaxed, shifting it around might require a
767 // larger value for one of the fixups then can be encoded. The outer loop
768 // will also catch this before moving to the next instruction, but we need to
769 // prevent padding this single instruction as well.
770 if (mayNeedRelaxation(RF.getOpcode(), RF.getOperands(),
771 *RF.getSubtargetInfo()))
772 return false;
773
774 const unsigned OldSize = RF.getVarSize();
775 if (OldSize == 15)
776 return false;
777
778 const unsigned MaxPossiblePad = std::min(15 - OldSize, RemainingSize);
779 const unsigned RemainingPrefixSize = [&]() -> unsigned {
781 X86_MC::emitPrefix(Emitter, RF.getInst(), Code, STI);
782 assert(Code.size() < 15 && "The number of prefixes must be less than 15.");
783
784 // TODO: It turns out we need a decent amount of plumbing for the target
785 // specific bits to determine number of prefixes its safe to add. Various
786 // targets (older chips mostly, but also Atom family) encounter decoder
787 // stalls with too many prefixes. For testing purposes, we set the value
788 // externally for the moment.
789 unsigned ExistingPrefixSize = Code.size();
790 if (TargetPrefixMax <= ExistingPrefixSize)
791 return 0;
792 return TargetPrefixMax - ExistingPrefixSize;
793 }();
794 const unsigned PrefixBytesToAdd =
795 std::min(MaxPossiblePad, RemainingPrefixSize);
796 if (PrefixBytesToAdd == 0)
797 return false;
798
799 const uint8_t Prefix = determinePaddingPrefix(RF.getInst());
800
802 Code.append(PrefixBytesToAdd, Prefix);
803 Code.append(RF.getVarContents().begin(), RF.getVarContents().end());
804 RF.setVarContents(Code);
805
806 // Adjust the fixups for the change in offsets
807 for (auto &F : RF.getVarFixups())
808 F.setOffset(PrefixBytesToAdd + F.getOffset());
809
810 RemainingSize -= PrefixBytesToAdd;
811 return true;
812}
813
814bool X86AsmBackend::padInstructionViaRelaxation(MCFragment &RF,
816 unsigned &RemainingSize) const {
817 if (!mayNeedRelaxation(RF.getOpcode(), RF.getOperands(),
818 *RF.getSubtargetInfo()))
819 // TODO: There are lots of other tricks we could apply for increasing
820 // encoding size without impacting performance.
821 return false;
822
823 MCInst Relaxed = RF.getInst();
824 relaxInstruction(Relaxed, *RF.getSubtargetInfo());
825
828 Emitter.encodeInstruction(Relaxed, Code, Fixups, *RF.getSubtargetInfo());
829 const unsigned OldSize = RF.getVarContents().size();
830 const unsigned NewSize = Code.size();
831 assert(NewSize >= OldSize && "size decrease during relaxation?");
832 unsigned Delta = NewSize - OldSize;
833 if (Delta > RemainingSize)
834 return false;
835 RF.setInst(Relaxed);
836 RF.setVarContents(Code);
837 RF.setVarFixups(Fixups);
838 RemainingSize -= Delta;
839 return true;
840}
841
842bool X86AsmBackend::padInstructionEncoding(MCFragment &RF,
844 unsigned &RemainingSize) const {
845 bool Changed = false;
846 if (RemainingSize != 0)
847 Changed |= padInstructionViaRelaxation(RF, Emitter, RemainingSize);
848 if (RemainingSize != 0)
849 Changed |= padInstructionViaPrefix(RF, Emitter, RemainingSize);
850 return Changed;
851}
852
853bool X86AsmBackend::finishLayout(const MCAssembler &Asm) const {
854 // See if we can further relax some instructions to cut down on the number of
855 // nop bytes required for code alignment. The actual win is in reducing
856 // instruction count, not number of bytes. Modern X86-64 can easily end up
857 // decode limited. It is often better to reduce the number of instructions
858 // (i.e. eliminate nops) even at the cost of increasing the size and
859 // complexity of others.
860 if (!X86PadForAlign && !X86PadForBranchAlign)
861 return false;
862
863 // The processed regions are delimitered by LabeledFragments. -g may have more
864 // MCSymbols and therefore different relaxation results. X86PadForAlign is
865 // disabled by default to eliminate the -g vs non -g difference.
866 DenseSet<MCFragment *> LabeledFragments;
867 for (const MCSymbol &S : Asm.symbols())
868 LabeledFragments.insert(S.getFragment());
869
870 bool Changed = false;
871 for (MCSection &Sec : Asm) {
872 if (!Sec.isText())
873 continue;
874
876 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) {
877 MCFragment &F = *I;
878
879 if (LabeledFragments.count(&F))
880 Relaxable.clear();
881
882 if (F.getKind() == MCFragment::FT_Data) // Skip and ignore
883 continue;
884
885 if (F.getKind() == MCFragment::FT_Relaxable) {
886 auto &RF = cast<MCFragment>(*I);
887 Relaxable.push_back(&RF);
888 continue;
889 }
890
891 auto canHandle = [](MCFragment &F) -> bool {
892 switch (F.getKind()) {
893 default:
894 return false;
896 return X86PadForAlign;
898 return X86PadForBranchAlign;
899 }
900 };
901 // For any unhandled kind, assume we can't change layout.
902 if (!canHandle(F)) {
903 Relaxable.clear();
904 continue;
905 }
906
907 // To keep the effects local, prefer to relax instructions closest to
908 // the align directive. This is purely about human understandability
909 // of the resulting code. If we later find a reason to expand
910 // particular instructions over others, we can adjust.
911 unsigned RemainingSize = Asm.computeFragmentSize(F) - F.getFixedSize();
912 while (!Relaxable.empty() && RemainingSize != 0) {
913 auto &RF = *Relaxable.pop_back_val();
914 // Give the backend a chance to play any tricks it wishes to increase
915 // the encoding size of the given instruction. Target independent code
916 // will try further relaxation, but target's may play further tricks.
917 Changed |= padInstructionEncoding(RF, Asm.getEmitter(), RemainingSize);
918
919 // If we have an instruction which hasn't been fully relaxed, we can't
920 // skip past it and insert bytes before it. Changing its starting
921 // offset might require a larger negative offset than it can encode.
922 // We don't need to worry about larger positive offsets as none of the
923 // possible offsets between this and our align are visible, and the
924 // ones afterwards aren't changing.
925 if (mayNeedRelaxation(RF.getOpcode(), RF.getOperands(),
926 *RF.getSubtargetInfo()))
927 break;
928 }
929 Relaxable.clear();
930
931 // If we're looking at a boundary align, make sure we don't try to pad
932 // its target instructions for some following directive. Doing so would
933 // break the alignment of the current boundary align.
934 if (auto *BF = dyn_cast<MCBoundaryAlignFragment>(&F)) {
935 cast<MCBoundaryAlignFragment>(F).setSize(RemainingSize);
936 Changed = true;
937 const MCFragment *LastFragment = BF->getLastFragment();
938 if (!LastFragment)
939 continue;
940 while (&*I != LastFragment)
941 ++I;
942 }
943 }
944 }
945
946 return Changed;
947}
948
949unsigned X86AsmBackend::getMaximumNopSize(const MCSubtargetInfo &STI) const {
950 if (STI.hasFeature(X86::Is16Bit))
951 return 4;
952 if (!STI.hasFeature(X86::FeatureNOPL) && !STI.hasFeature(X86::Is64Bit))
953 return 1;
954 if (STI.hasFeature(X86::TuningFast7ByteNOP))
955 return 7;
956 if (STI.hasFeature(X86::TuningFast15ByteNOP))
957 return 15;
958 if (STI.hasFeature(X86::TuningFast11ByteNOP))
959 return 11;
960 // FIXME: handle 32-bit mode
961 // 15-bytes is the longest single NOP instruction, but 10-bytes is
962 // commonly the longest that can be efficiently decoded.
963 return 10;
964}
965
966/// Write a sequence of optimal nops to the output, covering \p Count
967/// bytes.
968/// \return - true on success, false on failure
969bool X86AsmBackend::writeNopData(raw_ostream &OS, uint64_t Count,
970 const MCSubtargetInfo *STI) const {
971 static const char Nops32Bit[10][11] = {
972 // nop
973 "\x90",
974 // xchg %ax,%ax
975 "\x66\x90",
976 // nopl (%[re]ax)
977 "\x0f\x1f\x00",
978 // nopl 0(%[re]ax)
979 "\x0f\x1f\x40\x00",
980 // nopl 0(%[re]ax,%[re]ax,1)
981 "\x0f\x1f\x44\x00\x00",
982 // nopw 0(%[re]ax,%[re]ax,1)
983 "\x66\x0f\x1f\x44\x00\x00",
984 // nopl 0L(%[re]ax)
985 "\x0f\x1f\x80\x00\x00\x00\x00",
986 // nopl 0L(%[re]ax,%[re]ax,1)
987 "\x0f\x1f\x84\x00\x00\x00\x00\x00",
988 // nopw 0L(%[re]ax,%[re]ax,1)
989 "\x66\x0f\x1f\x84\x00\x00\x00\x00\x00",
990 // nopw %cs:0L(%[re]ax,%[re]ax,1)
991 "\x66\x2e\x0f\x1f\x84\x00\x00\x00\x00\x00",
992 };
993
994 // 16-bit mode uses different nop patterns than 32-bit.
995 static const char Nops16Bit[4][11] = {
996 // nop
997 "\x90",
998 // xchg %eax,%eax
999 "\x66\x90",
1000 // lea 0(%si),%si
1001 "\x8d\x74\x00",
1002 // lea 0w(%si),%si
1003 "\x8d\xb4\x00\x00",
1004 };
1005
1006 const char(*Nops)[11] =
1007 STI->hasFeature(X86::Is16Bit) ? Nops16Bit : Nops32Bit;
1008
1009 uint64_t MaxNopLength = (uint64_t)getMaximumNopSize(*STI);
1010
1011 // Emit as many MaxNopLength NOPs as needed, then emit a NOP of the remaining
1012 // length.
1013 do {
1014 const uint8_t ThisNopLength = (uint8_t) std::min(Count, MaxNopLength);
1015 const uint8_t Prefixes = ThisNopLength <= 10 ? 0 : ThisNopLength - 10;
1016 for (uint8_t i = 0; i < Prefixes; i++)
1017 OS << '\x66';
1018 const uint8_t Rest = ThisNopLength - Prefixes;
1019 if (Rest != 0)
1020 OS.write(Nops[Rest - 1], Rest);
1021 Count -= ThisNopLength;
1022 } while (Count != 0);
1023
1024 return true;
1025}
1026
1027/* *** */
1028
1029namespace {
1030
1031class ELFX86AsmBackend : public X86AsmBackend {
1032public:
1033 uint8_t OSABI;
1034 ELFX86AsmBackend(const Target &T, uint8_t OSABI, const MCSubtargetInfo &STI)
1035 : X86AsmBackend(T, STI), OSABI(OSABI) {}
1036};
1037
1038class ELFX86_32AsmBackend : public ELFX86AsmBackend {
1039public:
1040 ELFX86_32AsmBackend(const Target &T, uint8_t OSABI,
1041 const MCSubtargetInfo &STI)
1042 : ELFX86AsmBackend(T, OSABI, STI) {}
1043
1044 std::unique_ptr<MCObjectTargetWriter>
1045 createObjectTargetWriter() const override {
1046 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI, ELF::EM_386);
1047 }
1048};
1049
1050class ELFX86_X32AsmBackend : public ELFX86AsmBackend {
1051public:
1052 ELFX86_X32AsmBackend(const Target &T, uint8_t OSABI,
1053 const MCSubtargetInfo &STI)
1054 : ELFX86AsmBackend(T, OSABI, STI) {}
1055
1056 std::unique_ptr<MCObjectTargetWriter>
1057 createObjectTargetWriter() const override {
1058 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1060 }
1061};
1062
1063class ELFX86_IAMCUAsmBackend : public ELFX86AsmBackend {
1064public:
1065 ELFX86_IAMCUAsmBackend(const Target &T, uint8_t OSABI,
1066 const MCSubtargetInfo &STI)
1067 : ELFX86AsmBackend(T, OSABI, STI) {}
1068
1069 std::unique_ptr<MCObjectTargetWriter>
1070 createObjectTargetWriter() const override {
1071 return createX86ELFObjectWriter(/*IsELF64*/ false, OSABI,
1073 }
1074};
1075
1076class ELFX86_64AsmBackend : public ELFX86AsmBackend {
1077public:
1078 ELFX86_64AsmBackend(const Target &T, uint8_t OSABI,
1079 const MCSubtargetInfo &STI)
1080 : ELFX86AsmBackend(T, OSABI, STI) {}
1081
1082 std::unique_ptr<MCObjectTargetWriter>
1083 createObjectTargetWriter() const override {
1084 return createX86ELFObjectWriter(/*IsELF64*/ true, OSABI, ELF::EM_X86_64);
1085 }
1086};
1087
1088class WindowsX86AsmBackend : public X86AsmBackend {
1089 bool Is64Bit;
1090
1091public:
1092 WindowsX86AsmBackend(const Target &T, bool is64Bit,
1093 const MCSubtargetInfo &STI)
1094 : X86AsmBackend(T, STI)
1095 , Is64Bit(is64Bit) {
1096 }
1097
1098 std::optional<MCFixupKind> getFixupKind(StringRef Name) const override {
1100 .Case("dir32", FK_Data_4)
1101 .Case("secrel32", FK_SecRel_4)
1102 .Case("secidx", FK_SecRel_2)
1104 }
1105
1106 std::unique_ptr<MCObjectTargetWriter>
1107 createObjectTargetWriter() const override {
1108 return createX86WinCOFFObjectWriter(Is64Bit);
1109 }
1110};
1111
1112namespace CU {
1113
1114 /// Compact unwind encoding values.
1116 /// [RE]BP based frame where [RE]BP is pused on the stack immediately after
1117 /// the return address, then [RE]SP is moved to [RE]BP.
1118 UNWIND_MODE_BP_FRAME = 0x01000000,
1119
1120 /// A frameless function with a small constant stack size.
1121 UNWIND_MODE_STACK_IMMD = 0x02000000,
1122
1123 /// A frameless function with a large constant stack size.
1124 UNWIND_MODE_STACK_IND = 0x03000000,
1125
1126 /// No compact unwind encoding is available.
1127 UNWIND_MODE_DWARF = 0x04000000,
1128
1129 /// Mask for encoding the frame registers.
1130 UNWIND_BP_FRAME_REGISTERS = 0x00007FFF,
1131
1132 /// Mask for encoding the frameless registers.
1133 UNWIND_FRAMELESS_STACK_REG_PERMUTATION = 0x000003FF
1134 };
1135
1136} // namespace CU
1137
1138class DarwinX86AsmBackend : public X86AsmBackend {
1139 const MCRegisterInfo &MRI;
1140
1141 /// Number of registers that can be saved in a compact unwind encoding.
1142 enum { CU_NUM_SAVED_REGS = 6 };
1143
1144 mutable unsigned SavedRegs[CU_NUM_SAVED_REGS];
1145 Triple TT;
1146 bool Is64Bit;
1147
1148 unsigned OffsetSize; ///< Offset of a "push" instruction.
1149 unsigned MoveInstrSize; ///< Size of a "move" instruction.
1150 unsigned StackDivide; ///< Amount to adjust stack size by.
1151protected:
1152 /// Size of a "push" instruction for the given register.
1153 unsigned PushInstrSize(MCRegister Reg) const {
1154 switch (Reg.id()) {
1155 case X86::EBX:
1156 case X86::ECX:
1157 case X86::EDX:
1158 case X86::EDI:
1159 case X86::ESI:
1160 case X86::EBP:
1161 case X86::RBX:
1162 case X86::RBP:
1163 return 1;
1164 case X86::R12:
1165 case X86::R13:
1166 case X86::R14:
1167 case X86::R15:
1168 return 2;
1169 }
1170 return 1;
1171 }
1172
1173private:
1174 /// Get the compact unwind number for a given register. The number
1175 /// corresponds to the enum lists in compact_unwind_encoding.h.
1176 int getCompactUnwindRegNum(unsigned Reg) const {
1177 static const MCPhysReg CU32BitRegs[7] = {
1178 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
1179 };
1180 static const MCPhysReg CU64BitRegs[] = {
1181 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
1182 };
1183 const MCPhysReg *CURegs = Is64Bit ? CU64BitRegs : CU32BitRegs;
1184 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
1185 if (*CURegs == Reg)
1186 return Idx;
1187
1188 return -1;
1189 }
1190
1191 /// Return the registers encoded for a compact encoding with a frame
1192 /// pointer.
1193 uint32_t encodeCompactUnwindRegistersWithFrame() const {
1194 // Encode the registers in the order they were saved --- 3-bits per
1195 // register. The list of saved registers is assumed to be in reverse
1196 // order. The registers are numbered from 1 to CU_NUM_SAVED_REGS.
1197 uint32_t RegEnc = 0;
1198 for (int i = 0, Idx = 0; i != CU_NUM_SAVED_REGS; ++i) {
1199 unsigned Reg = SavedRegs[i];
1200 if (Reg == 0) break;
1201
1202 int CURegNum = getCompactUnwindRegNum(Reg);
1203 if (CURegNum == -1) return ~0U;
1204
1205 // Encode the 3-bit register number in order, skipping over 3-bits for
1206 // each register.
1207 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
1208 }
1209
1210 assert((RegEnc & 0x3FFFF) == RegEnc &&
1211 "Invalid compact register encoding!");
1212 return RegEnc;
1213 }
1214
1215 /// Create the permutation encoding used with frameless stacks. It is
1216 /// passed the number of registers to be saved and an array of the registers
1217 /// saved.
1218 uint32_t encodeCompactUnwindRegistersWithoutFrame(unsigned RegCount) const {
1219 // The saved registers are numbered from 1 to 6. In order to encode the
1220 // order in which they were saved, we re-number them according to their
1221 // place in the register order. The re-numbering is relative to the last
1222 // re-numbered register. E.g., if we have registers {6, 2, 4, 5} saved in
1223 // that order:
1224 //
1225 // Orig Re-Num
1226 // ---- ------
1227 // 6 6
1228 // 2 2
1229 // 4 3
1230 // 5 3
1231 //
1232 for (unsigned i = 0; i < RegCount; ++i) {
1233 int CUReg = getCompactUnwindRegNum(SavedRegs[i]);
1234 if (CUReg == -1) return ~0U;
1235 SavedRegs[i] = CUReg;
1236 }
1237
1238 // Reverse the list.
1239 std::reverse(&SavedRegs[0], &SavedRegs[CU_NUM_SAVED_REGS]);
1240
1241 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
1242 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i){
1243 unsigned Countless = 0;
1244 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
1245 if (SavedRegs[j] < SavedRegs[i])
1246 ++Countless;
1247
1248 RenumRegs[i] = SavedRegs[i] - Countless - 1;
1249 }
1250
1251 // Take the renumbered values and encode them into a 10-bit number.
1252 uint32_t permutationEncoding = 0;
1253 switch (RegCount) {
1254 case 6:
1255 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
1256 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
1257 + RenumRegs[4];
1258 break;
1259 case 5:
1260 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
1261 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
1262 + RenumRegs[5];
1263 break;
1264 case 4:
1265 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
1266 + 3 * RenumRegs[4] + RenumRegs[5];
1267 break;
1268 case 3:
1269 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
1270 + RenumRegs[5];
1271 break;
1272 case 2:
1273 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
1274 break;
1275 case 1:
1276 permutationEncoding |= RenumRegs[5];
1277 break;
1278 }
1279
1280 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
1281 "Invalid compact register encoding!");
1282 return permutationEncoding;
1283 }
1284
1285public:
1286 DarwinX86AsmBackend(const Target &T, const MCRegisterInfo &MRI,
1287 const MCSubtargetInfo &STI)
1288 : X86AsmBackend(T, STI), MRI(MRI), TT(STI.getTargetTriple()),
1289 Is64Bit(TT.isArch64Bit()) {
1290 memset(SavedRegs, 0, sizeof(SavedRegs));
1291 OffsetSize = Is64Bit ? 8 : 4;
1292 MoveInstrSize = Is64Bit ? 3 : 2;
1293 StackDivide = Is64Bit ? 8 : 4;
1294 }
1295
1296 std::unique_ptr<MCObjectTargetWriter>
1297 createObjectTargetWriter() const override {
1299 uint32_t CPUSubType = cantFail(MachO::getCPUSubType(TT));
1300 return createX86MachObjectWriter(Is64Bit, CPUType, CPUSubType);
1301 }
1302
1303 /// Implementation of algorithm to generate the compact unwind encoding
1304 /// for the CFI instructions.
1305 uint64_t generateCompactUnwindEncoding(const MCDwarfFrameInfo *FI,
1306 const MCContext *Ctxt) const override {
1308 if (Instrs.empty()) return 0;
1309 if (!isDarwinCanonicalPersonality(FI->Personality) &&
1311 return CU::UNWIND_MODE_DWARF;
1312
1313 // Reset the saved registers.
1314 unsigned SavedRegIdx = 0;
1315 memset(SavedRegs, 0, sizeof(SavedRegs));
1316
1317 bool HasFP = false;
1318
1319 // Encode that we are using EBP/RBP as the frame pointer.
1320 uint64_t CompactUnwindEncoding = 0;
1321
1322 unsigned SubtractInstrIdx = Is64Bit ? 3 : 2;
1323 unsigned InstrOffset = 0;
1324 unsigned StackAdjust = 0;
1325 uint64_t StackSize = 0;
1326 int64_t MinAbsOffset = std::numeric_limits<int64_t>::max();
1327
1328 for (const MCCFIInstruction &Inst : Instrs) {
1329 switch (Inst.getOperation()) {
1330 default:
1331 // Any other CFI directives indicate a frame that we aren't prepared
1332 // to represent via compact unwind, so just bail out.
1333 return CU::UNWIND_MODE_DWARF;
1335 // Defines a frame pointer. E.g.
1336 //
1337 // movq %rsp, %rbp
1338 // L0:
1339 // .cfi_def_cfa_register %rbp
1340 //
1341 HasFP = true;
1342
1343 // If the frame pointer is other than esp/rsp, we do not have a way to
1344 // generate a compact unwinding representation, so bail out.
1345 if (*MRI.getLLVMRegNum(Inst.getRegister(), true) !=
1346 (Is64Bit ? X86::RBP : X86::EBP))
1347 return CU::UNWIND_MODE_DWARF;
1348
1349 // Reset the counts.
1350 memset(SavedRegs, 0, sizeof(SavedRegs));
1351 StackAdjust = 0;
1352 SavedRegIdx = 0;
1353 MinAbsOffset = std::numeric_limits<int64_t>::max();
1354 InstrOffset += MoveInstrSize;
1355 break;
1356 }
1358 // Defines a new offset for the CFA. E.g.
1359 //
1360 // With frame:
1361 //
1362 // pushq %rbp
1363 // L0:
1364 // .cfi_def_cfa_offset 16
1365 //
1366 // Without frame:
1367 //
1368 // subq $72, %rsp
1369 // L0:
1370 // .cfi_def_cfa_offset 80
1371 //
1372 StackSize = Inst.getOffset() / StackDivide;
1373 break;
1374 }
1376 // Defines a "push" of a callee-saved register. E.g.
1377 //
1378 // pushq %r15
1379 // pushq %r14
1380 // pushq %rbx
1381 // L0:
1382 // subq $120, %rsp
1383 // L1:
1384 // .cfi_offset %rbx, -40
1385 // .cfi_offset %r14, -32
1386 // .cfi_offset %r15, -24
1387 //
1388 if (SavedRegIdx == CU_NUM_SAVED_REGS)
1389 // If there are too many saved registers, we cannot use a compact
1390 // unwind encoding.
1391 return CU::UNWIND_MODE_DWARF;
1392
1393 MCRegister Reg = *MRI.getLLVMRegNum(Inst.getRegister(), true);
1394 SavedRegs[SavedRegIdx++] = Reg;
1395 StackAdjust += OffsetSize;
1396 MinAbsOffset = std::min(MinAbsOffset, std::abs(Inst.getOffset()));
1397 InstrOffset += PushInstrSize(Reg);
1398 break;
1399 }
1400 }
1401 }
1402
1403 StackAdjust /= StackDivide;
1404
1405 if (HasFP) {
1406 if ((StackAdjust & 0xFF) != StackAdjust)
1407 // Offset was too big for a compact unwind encoding.
1408 return CU::UNWIND_MODE_DWARF;
1409
1410 // We don't attempt to track a real StackAdjust, so if the saved registers
1411 // aren't adjacent to rbp we can't cope.
1412 if (SavedRegIdx != 0 && MinAbsOffset != 3 * (int)OffsetSize)
1413 return CU::UNWIND_MODE_DWARF;
1414
1415 // Get the encoding of the saved registers when we have a frame pointer.
1416 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame();
1417 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1418
1419 CompactUnwindEncoding |= CU::UNWIND_MODE_BP_FRAME;
1420 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
1421 CompactUnwindEncoding |= RegEnc & CU::UNWIND_BP_FRAME_REGISTERS;
1422 } else {
1423 SubtractInstrIdx += InstrOffset;
1424 ++StackAdjust;
1425
1426 if ((StackSize & 0xFF) == StackSize) {
1427 // Frameless stack with a small stack size.
1428 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IMMD;
1429
1430 // Encode the stack size.
1431 CompactUnwindEncoding |= (StackSize & 0xFF) << 16;
1432 } else {
1433 if ((StackAdjust & 0x7) != StackAdjust)
1434 // The extra stack adjustments are too big for us to handle.
1435 return CU::UNWIND_MODE_DWARF;
1436
1437 // Frameless stack with an offset too large for us to encode compactly.
1438 CompactUnwindEncoding |= CU::UNWIND_MODE_STACK_IND;
1439
1440 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
1441 // instruction.
1442 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
1443
1444 // Encode any extra stack adjustments (done via push instructions).
1445 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
1446 }
1447
1448 // Encode the number of registers saved. (Reverse the list first.)
1449 std::reverse(&SavedRegs[0], &SavedRegs[SavedRegIdx]);
1450 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
1451
1452 // Get the encoding of the saved registers when we don't have a frame
1453 // pointer.
1454 uint32_t RegEnc = encodeCompactUnwindRegistersWithoutFrame(SavedRegIdx);
1455 if (RegEnc == ~0U) return CU::UNWIND_MODE_DWARF;
1456
1457 // Encode the register encoding.
1458 CompactUnwindEncoding |=
1459 RegEnc & CU::UNWIND_FRAMELESS_STACK_REG_PERMUTATION;
1460 }
1461
1462 return CompactUnwindEncoding;
1463 }
1464};
1465
1466} // end anonymous namespace
1467
1469 const MCSubtargetInfo &STI,
1470 const MCRegisterInfo &MRI,
1471 const MCTargetOptions &Options) {
1472 const Triple &TheTriple = STI.getTargetTriple();
1473 if (TheTriple.isOSBinFormatMachO())
1474 return new DarwinX86AsmBackend(T, MRI, STI);
1475
1476 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1477 return new WindowsX86AsmBackend(T, false, STI);
1478
1479 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1480
1481 if (TheTriple.isOSIAMCU())
1482 return new ELFX86_IAMCUAsmBackend(T, OSABI, STI);
1483
1484 return new ELFX86_32AsmBackend(T, OSABI, STI);
1485}
1486
1488 const MCSubtargetInfo &STI,
1489 const MCRegisterInfo &MRI,
1490 const MCTargetOptions &Options) {
1491 const Triple &TheTriple = STI.getTargetTriple();
1492 if (TheTriple.isOSBinFormatMachO())
1493 return new DarwinX86AsmBackend(T, MRI, STI);
1494
1495 if (TheTriple.isOSWindows() && TheTriple.isOSBinFormatCOFF())
1496 return new WindowsX86AsmBackend(T, true, STI);
1497
1498 if (TheTriple.isUEFI()) {
1499 assert(TheTriple.isOSBinFormatCOFF() &&
1500 "Only COFF format is supported in UEFI environment.");
1501 return new WindowsX86AsmBackend(T, true, STI);
1502 }
1503
1504 uint8_t OSABI = MCELFObjectTargetWriter::getOSABI(TheTriple.getOS());
1505
1506 if (TheTriple.isX32())
1507 return new ELFX86_X32AsmBackend(T, OSABI, STI);
1508 return new ELFX86_64AsmBackend(T, OSABI, STI);
1509}
1510
1511namespace {
1512class X86ELFStreamer : public MCELFStreamer {
1513public:
1514 X86ELFStreamer(MCContext &Context, std::unique_ptr<MCAsmBackend> TAB,
1515 std::unique_ptr<MCObjectWriter> OW,
1516 std::unique_ptr<MCCodeEmitter> Emitter)
1517 : MCELFStreamer(Context, std::move(TAB), std::move(OW),
1518 std::move(Emitter)) {}
1519
1520 void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override;
1521};
1522} // end anonymous namespace
1523
1524void X86ELFStreamer::emitInstruction(const MCInst &Inst,
1525 const MCSubtargetInfo &STI) {
1526 X86_MC::emitInstruction(*this, Inst, STI);
1527}
1528
1530 std::unique_ptr<MCAsmBackend> &&MAB,
1531 std::unique_ptr<MCObjectWriter> &&MOW,
1532 std::unique_ptr<MCCodeEmitter> &&MCE) {
1533 return new X86ELFStreamer(Context, std::move(MAB), std::move(MOW),
1534 std::move(MCE));
1535}
unsigned const MachineRegisterInfo * MRI
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
#define LLVM_LIKELY(EXPR)
Definition: Compiler.h:335
dxil DXContainer Global Emitter
Returns the sub type a function will return at a given Idx Should correspond to the result type of an ExtractValue instruction executed with just that one unsigned Idx
std::string Name
uint64_t Size
IRTranslator LLVM IR MI
static LVOptions Options
Definition: LVOptions.cpp:25
#define F(x, y, z)
Definition: MD5.cpp:55
#define I(x, y, z)
Definition: MD5.cpp:58
mir Rename Register Operands
PowerPC TLS Dynamic Call Fixup
raw_pwrite_stream & OS
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static MCInstrInfo * createMCInstrInfo()
static unsigned getRelaxedOpcodeBranch(unsigned Opcode, bool Is16BitMode=false)
static X86::SecondMacroFusionInstKind classifySecondInstInMacroFusion(const MCInst &MI, const MCInstrInfo &MCII)
static bool isRIPRelative(const MCInst &MI, const MCInstrInfo &MCII)
Check if the instruction uses RIP relative addressing.
static bool mayHaveInterruptDelaySlot(unsigned InstOpcode)
X86 has certain instructions which enable interrupts exactly one instruction after the instruction wh...
static bool isFirstMacroFusibleInst(const MCInst &Inst, const MCInstrInfo &MCII)
Check if the instruction is valid as the first instruction in macro fusion.
constexpr char GotSymName[]
static X86::CondCode getCondFromBranch(const MCInst &MI, const MCInstrInfo &MCII)
static unsigned getRelaxedOpcode(const MCInst &MI, bool Is16BitMode)
static unsigned getFixupKindSize(unsigned Kind)
static bool isRelaxableBranch(unsigned Opcode)
static bool isPrefix(unsigned Opcode, const MCInstrInfo &MCII)
Check if the instruction is a prefix.
static bool hasVariantSymbol(const MCInst &MI)
Check if the instruction has a variant symbol operand.
static bool is64Bit(const char *name)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory),...
Definition: ArrayRef.h:41
size_t size() const
size - Get the array size.
Definition: ArrayRef.h:147
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:142
Implements a dense probed hash-table based set.
Definition: DenseSet.h:263
Generic interface to target specific assembler backends.
Definition: MCAsmBackend.h:55
virtual unsigned getMaximumNopSize(const MCSubtargetInfo &STI) const
Returns the maximum size of a nop in bytes on this target.
Definition: MCAsmBackend.h:189
virtual bool finishLayout(const MCAssembler &Asm) const
Definition: MCAsmBackend.h:202
virtual bool writeNopData(raw_ostream &OS, uint64_t Count, const MCSubtargetInfo *STI) const =0
Write an (optimal) nop sequence of Count bytes to the given output.
virtual void relaxInstruction(MCInst &Inst, const MCSubtargetInfo &STI) const
Relax the instruction in the given fragment to the next wider instruction.
Definition: MCAsmBackend.h:157
virtual MCFixupKindInfo getFixupKindInfo(MCFixupKind Kind) const
Get information on a fixup kind.
virtual bool mayNeedRelaxation(unsigned Opcode, ArrayRef< MCOperand > Operands, const MCSubtargetInfo &STI) const
Check whether the given instruction (encoded as Opcode+Operands) may need relaxation.
Definition: MCAsmBackend.h:135
virtual bool fixupNeedsRelaxationAdvanced(const MCFragment &, const MCFixup &, const MCValue &, uint64_t, bool Resolved) const
Target specific predicate for whether a given fixup requires the associated instruction to be relaxed...
virtual std::optional< MCFixupKind > getFixupKind(StringRef Name) const
Map a relocation name used in .reloc to a fixup kind.
virtual void applyFixup(const MCFragment &, const MCFixup &, const MCValue &Target, uint8_t *Data, uint64_t Value, bool IsResolved)=0
virtual std::optional< bool > evaluateFixup(const MCFragment &, MCFixup &, MCValue &, uint64_t &)
Definition: MCAsmBackend.h:112
MCAsmBackend & getBackend() const
Definition: MCAssembler.h:175
Represents required padding such that a particular other set of fragments does not cross a particular...
Definition: MCSection.h:459
void setLastFragment(const MCFragment *F)
Definition: MCSection.h:482
MCCodeEmitter - Generic instruction encoding interface.
Definition: MCCodeEmitter.h:23
Context object for machine code objects.
Definition: MCContext.h:83
LLVM_ABI bool emitCompactUnwindNonCanonical() const
Definition: MCContext.cpp:985
Base class for the full range of assembler expressions which are needed for parsing.
Definition: MCExpr.h:34
@ SymbolRef
References to labels and assigned expressions.
Definition: MCExpr.h:43
ExprKind getKind() const
Definition: MCExpr.h:85
Encode information on a single operation to perform on a byte sequence (e.g., an encoded instruction)...
Definition: MCFixup.h:61
static MCFixup create(uint32_t Offset, const MCExpr *Value, MCFixupKind Kind, bool PCRel=false)
Consider bit fields if we need more flags.
Definition: MCFixup.h:86
bool getAllowAutoPadding() const
Definition: MCSection.h:186
MCInst getInst() const
Definition: MCSection.h:656
unsigned getOpcode() const
Definition: MCSection.h:222
MCSection * getParent() const
Definition: MCSection.h:158
LLVM_ABI void setVarFixups(ArrayRef< MCFixup > Fixups)
Definition: MCSection.cpp:87
MCFragment * getNext() const
Definition: MCSection.h:154
ArrayRef< MCOperand > getOperands() const
Definition: MCSection.h:651
size_t getVarSize() const
Definition: MCSection.h:201
LLVM_ABI void setVarContents(ArrayRef< char > Contents)
Definition: MCSection.cpp:61
MutableArrayRef< char > getVarContents()
Definition: MCSection.h:622
const MCSubtargetInfo * getSubtargetInfo() const
Retrieve the MCSubTargetInfo in effect when the instruction was encoded.
Definition: MCSection.h:174
MutableArrayRef< MCFixup > getVarFixups()
Definition: MCSection.h:642
void setInst(const MCInst &Inst)
Definition: MCSection.h:665
Instances of this class represent a single low-level machine instruction.
Definition: MCInst.h:188
unsigned getOpcode() const
Definition: MCInst.h:202
void setOpcode(unsigned Op)
Definition: MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition: MCInst.h:210
Describe properties that are true of each instruction in the target description file.
Definition: MCInstrDesc.h:199
bool isConditionalBranch() const
Return true if this is a branch which may fall through to the next instruction or may transfer contro...
Definition: MCInstrDesc.h:318
Interface to description of machine instruction set.
Definition: MCInstrInfo.h:27
const MCInstrDesc & get(unsigned Opcode) const
Return the machine instruction descriptor that corresponds to the specified instruction opcode.
Definition: MCInstrInfo.h:64
Streaming object file generation interface.
MCAssembler & getAssembler()
void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI) override
Emit the given Instruction into the current section.
MCRegister getReg() const
Returns the register number.
Definition: MCInst.h:73
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
Wrapper class representing physical registers. Should be passed by value.
Definition: MCRegister.h:33
Instances of this class represent a uniqued identifier for a section in the current translation unit.
Definition: MCSection.h:496
void ensureMinAlignment(Align MinAlignment)
Makes sure that Alignment is at least MinAlignment.
Definition: MCSection.h:583
Streaming machine code generation interface.
Definition: MCStreamer.h:220
bool getAllowAutoPadding() const
Definition: MCStreamer.h:329
Generic base class for all target subtargets.
bool hasFeature(unsigned Feature) const
const Triple & getTargetTriple() const
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
Definition: MCSymbol.h:42
iterator end() const
Definition: ArrayRef.h:348
iterator begin() const
Definition: ArrayRef.h:347
SmallString - A SmallString is just a SmallVector with methods and accessors that make it work better...
Definition: SmallString.h:26
bool empty() const
Definition: SmallVector.h:82
void push_back(const T &Elt)
Definition: SmallVector.h:414
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
Definition: SmallVector.h:1197
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition: StringRef.h:710
A switch()-like statement whose cases are string literals.
Definition: StringSwitch.h:43
StringSwitch & Case(StringLiteral S, T Value)
Definition: StringSwitch.h:68
R Default(T Value)
Definition: StringSwitch.h:177
Target - Wrapper for Target specific information.
Triple - Helper class for working with autoconf configuration names.
Definition: Triple.h:47
bool isX32() const
Tests whether the target is X32.
Definition: Triple.h:1131
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition: Triple.h:779
OSType getOS() const
Get the parsed operating system type of this triple.
Definition: Triple.h:417
ArchType getArch() const
Get the parsed architecture type of this triple.
Definition: Triple.h:408
bool isOSBinFormatCOFF() const
Tests whether the OS uses the COFF binary format.
Definition: Triple.h:771
bool isUEFI() const
Tests whether the OS is UEFI.
Definition: Triple.h:671
bool isOSWindows() const
Tests whether the OS is Windows.
Definition: Triple.h:676
bool isOSIAMCU() const
Definition: Triple.h:649
bool isOSBinFormatELF() const
Tests whether the OS uses the ELF binary format.
Definition: Triple.h:766
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
Definition: Twine.h:82
The instances of the Type class are immutable: once they are created, they are never changed.
Definition: Type.h:45
LLVM Value Representation.
Definition: Value.h:75
std::pair< iterator, bool > insert(const ValueT &V)
Definition: DenseSet.h:194
size_type count(const_arg_type_t< ValueT > V) const
Return 1 if the specified key is in the set, 0 otherwise.
Definition: DenseSet.h:174
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition: raw_ostream.h:53
raw_ostream & write(unsigned char C)
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
CompactUnwindEncodings
Compact unwind encoding values.
@ EM_386
Definition: ELF.h:141
@ EM_X86_64
Definition: ELF.h:183
@ EM_IAMCU
Definition: ELF.h:144
LLVM_ABI Expected< uint32_t > getCPUSubType(const Triple &T)
Definition: MachO.cpp:95
LLVM_ABI Expected< uint32_t > getCPUType(const Triple &T)
Definition: MachO.cpp:77
@ Relaxed
Definition: NVPTX.h:158
VE::Fixups getFixupKind(uint8_t S)
Definition: VEMCAsmInfo.cpp:38
Reg
All possible values of the reg field in the ModR/M byte.
@ RawFrmDstSrc
RawFrmDstSrc - This form is for instructions that use the source index register SI/ESI/RSI with a pos...
Definition: X86BaseInfo.h:518
@ RawFrmSrc
RawFrmSrc - This form is for instructions that use the source index register SI/ESI/RSI with a possib...
Definition: X86BaseInfo.h:511
@ RawFrmMemOffs
RawFrmMemOffs - This form is for instructions that store an absolute memory offset as an immediate wi...
Definition: X86BaseInfo.h:508
bool isPrefix(uint64_t TSFlags)
Definition: X86BaseInfo.h:882
int getMemoryOperandNo(uint64_t TSFlags)
Definition: X86BaseInfo.h:1011
unsigned getOperandBias(const MCInstrDesc &Desc)
Compute whether all of the def operands are repeated in the uses and therefore should be skipped.
Definition: X86BaseInfo.h:968
void emitPrefix(MCCodeEmitter &MCE, const MCInst &MI, SmallVectorImpl< char > &CB, const MCSubtargetInfo &STI)
void emitInstruction(MCObjectStreamer &, const MCInst &Inst, const MCSubtargetInfo &STI)
FirstMacroFusionInstKind classifyFirstOpcodeInMacroFusion(unsigned Opcode)
Definition: X86BaseInfo.h:126
AlignBranchBoundaryKind
Defines the possible values of the branch boundary alignment mask.
Definition: X86BaseInfo.h:309
@ AlignBranchJmp
Definition: X86BaseInfo.h:313
@ AlignBranchIndirect
Definition: X86BaseInfo.h:316
@ AlignBranchJcc
Definition: X86BaseInfo.h:312
@ AlignBranchCall
Definition: X86BaseInfo.h:314
@ AlignBranchRet
Definition: X86BaseInfo.h:315
@ AlignBranchNone
Definition: X86BaseInfo.h:310
@ AlignBranchFused
Definition: X86BaseInfo.h:311
SecondMacroFusionInstKind
Definition: X86BaseInfo.h:116
EncodingOfSegmentOverridePrefix getSegmentOverridePrefixForReg(MCRegister Reg)
Given a segment register, return the encoding of the segment override prefix for it.
Definition: X86BaseInfo.h:332
FirstMacroFusionInstKind
Definition: X86BaseInfo.h:107
unsigned getOpcodeForLongImmediateForm(unsigned Opcode)
@ AddrSegmentReg
Definition: X86BaseInfo.h:34
bool isMacroFused(FirstMacroFusionInstKind FirstKind, SecondMacroFusionInstKind SecondKind)
Definition: X86BaseInfo.h:290
@ reloc_riprel_4byte_movq_load_rex2
Definition: X86FixupKinds.h:19
@ reloc_signed_4byte_relax
Definition: X86FixupKinds.h:33
@ reloc_branch_4byte_pcrel
Definition: X86FixupKinds.h:38
@ NumTargetFixupKinds
Definition: X86FixupKinds.h:41
@ reloc_riprel_4byte_relax
Definition: X86FixupKinds.h:21
@ reloc_riprel_4byte_relax_evex
Definition: X86FixupKinds.h:27
@ reloc_signed_4byte
Definition: X86FixupKinds.h:30
@ reloc_riprel_4byte_relax_rex
Definition: X86FixupKinds.h:23
@ reloc_global_offset_table
Definition: X86FixupKinds.h:35
@ reloc_riprel_4byte_movq_load
Definition: X86FixupKinds.h:18
@ reloc_riprel_4byte
Definition: X86FixupKinds.h:17
@ reloc_riprel_4byte_relax_rex2
Definition: X86FixupKinds.h:25
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:444
LocationClass< Ty > location(Ty &L)
Definition: CommandLine.h:464
bool isRelocation(MCFixupKind FixupKind)
Definition: MCFixup.h:130
NodeAddr< CodeNode * > Code
Definition: RDFGraph.h:388
BaseReg
Stack frame base register. Bit 0 of FREInfo.Info.
Definition: SFrame.h:77
This is an optimization pass for GlobalISel generic memory operations.
Definition: AddressRanges.h:18
@ Offset
Definition: DWP.cpp:477
@ FirstTargetFixupKind
Definition: MCFixup.h:44
@ FK_SecRel_2
A two-byte section relative fixup.
Definition: MCFixup.h:40
@ FirstLiteralRelocationKind
Definition: MCFixup.h:29
@ FK_Data_8
A eight-byte fixup.
Definition: MCFixup.h:37
@ FK_Data_1
A one-byte fixup.
Definition: MCFixup.h:34
@ FK_Data_4
A four-byte fixup.
Definition: MCFixup.h:36
@ FK_SecRel_8
A eight-byte section relative fixup.
Definition: MCFixup.h:42
@ FK_NONE
A no-op fixup.
Definition: MCFixup.h:33
@ FK_SecRel_4
A four-byte section relative fixup.
Definition: MCFixup.h:41
@ FK_SecRel_1
A one-byte section relative fixup.
Definition: MCFixup.h:39
@ FK_Data_2
A two-byte fixup.
Definition: MCFixup.h:35
MCAsmBackend * createX86_64AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
std::unique_ptr< MCObjectTargetWriter > createX86WinCOFFObjectWriter(bool Is64Bit)
Construct an X86 Win COFF object writer.
MCStreamer * createX86ELFStreamer(const Triple &T, MCContext &Context, std::unique_ptr< MCAsmBackend > &&MAB, std::unique_ptr< MCObjectWriter > &&MOW, std::unique_ptr< MCCodeEmitter > &&MCE)
LLVM_ABI raw_fd_ostream & errs()
This returns a reference to a raw_ostream for standard error.
void cantFail(Error Err, const char *Msg=nullptr)
Report a fatal error if Err is a failure value.
Definition: Error.h:769
std::unique_ptr< MCObjectTargetWriter > createX86MachObjectWriter(bool Is64Bit, uint32_t CPUType, uint32_t CPUSubtype)
Construct an X86 Mach-O object writer.
@ Add
Sum of integers.
OutputIt move(R &&Range, OutputIt Out)
Provide wrappers to std::move which take ranges instead of having to pass begin/end explicitly.
Definition: STLExtras.h:1886
constexpr bool isIntN(unsigned N, int64_t x)
Checks if an signed integer fits into the given (dynamic) bit width.
Definition: MathExtras.h:257
std::unique_ptr< MCObjectTargetWriter > createX86ELFObjectWriter(bool IsELF64, uint8_t OSABI, uint16_t EMachine)
Construct an X86 ELF object writer.
Align assumeAligned(uint64_t Value)
Treats the value 0 as a 1, so Align is always at least 1.
Definition: Alignment.h:111
endianness
Definition: bit.h:71
MCAsmBackend * createX86_32AsmBackend(const Target &T, const MCSubtargetInfo &STI, const MCRegisterInfo &MRI, const MCTargetOptions &Options)
Implement std::hash so that hash_code can be used in STL containers.
Definition: BitVector.h:856
This struct is a compact representation of a valid (non-zero power of two) alignment.
Definition: Alignment.h:39
Description of the encoding of one expression Op.
const MCSymbol * Personality
Definition: MCDwarf.h:774
std::vector< MCCFIInstruction > Instructions
Definition: MCDwarf.h:776
Target independent information on a fixup kind.
Definition: MCAsmBackend.h:38