LLVM 22.0.0git
AArch64AsmParser.cpp
Go to the documentation of this file.
1//==- AArch64AsmParser.cpp - Parse AArch64 assembly to MCInst instructions -==//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
9#include "AArch64InstrInfo.h"
17#include "llvm/ADT/APFloat.h"
18#include "llvm/ADT/APInt.h"
19#include "llvm/ADT/ArrayRef.h"
20#include "llvm/ADT/STLExtras.h"
21#include "llvm/ADT/SmallSet.h"
24#include "llvm/ADT/StringMap.h"
25#include "llvm/ADT/StringRef.h"
27#include "llvm/ADT/Twine.h"
28#include "llvm/MC/MCAsmInfo.h"
29#include "llvm/MC/MCContext.h"
30#include "llvm/MC/MCExpr.h"
31#include "llvm/MC/MCInst.h"
40#include "llvm/MC/MCStreamer.h"
42#include "llvm/MC/MCSymbol.h"
44#include "llvm/MC/MCValue.h"
50#include "llvm/Support/SMLoc.h"
54#include <cassert>
55#include <cctype>
56#include <cstdint>
57#include <cstdio>
58#include <optional>
59#include <string>
60#include <tuple>
61#include <utility>
62#include <vector>
63
64using namespace llvm;
65
66namespace {
67
68enum class RegKind {
69 Scalar,
70 NeonVector,
71 SVEDataVector,
72 SVEPredicateAsCounter,
73 SVEPredicateVector,
74 Matrix,
75 LookupTable
76};
77
78enum class MatrixKind { Array, Tile, Row, Col };
79
80enum RegConstraintEqualityTy {
81 EqualsReg,
82 EqualsSuperReg,
83 EqualsSubReg
84};
85
86class AArch64AsmParser : public MCTargetAsmParser {
87private:
88 StringRef Mnemonic; ///< Instruction mnemonic.
89
90 // Map of register aliases registers via the .req directive.
91 StringMap<std::pair<RegKind, unsigned>> RegisterReqs;
92
93 class PrefixInfo {
94 public:
95 static PrefixInfo CreateFromInst(const MCInst &Inst, uint64_t TSFlags) {
96 PrefixInfo Prefix;
97 switch (Inst.getOpcode()) {
98 case AArch64::MOVPRFX_ZZ:
99 Prefix.Active = true;
100 Prefix.Dst = Inst.getOperand(0).getReg();
101 break;
102 case AArch64::MOVPRFX_ZPmZ_B:
103 case AArch64::MOVPRFX_ZPmZ_H:
104 case AArch64::MOVPRFX_ZPmZ_S:
105 case AArch64::MOVPRFX_ZPmZ_D:
106 Prefix.Active = true;
107 Prefix.Predicated = true;
108 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
109 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
110 "No destructive element size set for movprfx");
111 Prefix.Dst = Inst.getOperand(0).getReg();
112 Prefix.Pg = Inst.getOperand(2).getReg();
113 break;
114 case AArch64::MOVPRFX_ZPzZ_B:
115 case AArch64::MOVPRFX_ZPzZ_H:
116 case AArch64::MOVPRFX_ZPzZ_S:
117 case AArch64::MOVPRFX_ZPzZ_D:
118 Prefix.Active = true;
119 Prefix.Predicated = true;
120 Prefix.ElementSize = TSFlags & AArch64::ElementSizeMask;
121 assert(Prefix.ElementSize != AArch64::ElementSizeNone &&
122 "No destructive element size set for movprfx");
123 Prefix.Dst = Inst.getOperand(0).getReg();
124 Prefix.Pg = Inst.getOperand(1).getReg();
125 break;
126 default:
127 break;
128 }
129
130 return Prefix;
131 }
132
133 PrefixInfo() = default;
134 bool isActive() const { return Active; }
135 bool isPredicated() const { return Predicated; }
136 unsigned getElementSize() const {
137 assert(Predicated);
138 return ElementSize;
139 }
140 MCRegister getDstReg() const { return Dst; }
141 MCRegister getPgReg() const {
142 assert(Predicated);
143 return Pg;
144 }
145
146 private:
147 bool Active = false;
148 bool Predicated = false;
149 unsigned ElementSize;
150 MCRegister Dst;
151 MCRegister Pg;
152 } NextPrefix;
153
154 AArch64TargetStreamer &getTargetStreamer() {
155 MCTargetStreamer &TS = *getParser().getStreamer().getTargetStreamer();
156 return static_cast<AArch64TargetStreamer &>(TS);
157 }
158
159 SMLoc getLoc() const { return getParser().getTok().getLoc(); }
160
161 bool parseSysAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
162 bool parseSyspAlias(StringRef Name, SMLoc NameLoc, OperandVector &Operands);
163 void createSysAlias(uint16_t Encoding, OperandVector &Operands, SMLoc S);
164 AArch64CC::CondCode parseCondCodeString(StringRef Cond,
165 std::string &Suggestion);
166 bool parseCondCode(OperandVector &Operands, bool invertCondCode);
167 unsigned matchRegisterNameAlias(StringRef Name, RegKind Kind);
168 bool parseRegister(OperandVector &Operands);
169 bool parseSymbolicImmVal(const MCExpr *&ImmVal);
170 bool parseNeonVectorList(OperandVector &Operands);
171 bool parseOptionalMulOperand(OperandVector &Operands);
172 bool parseOptionalVGOperand(OperandVector &Operands, StringRef &VecGroup);
173 bool parseKeywordOperand(OperandVector &Operands);
174 bool parseOperand(OperandVector &Operands, bool isCondCode,
175 bool invertCondCode);
176 bool parseImmExpr(int64_t &Out);
177 bool parseComma();
178 bool parseRegisterInRange(unsigned &Out, unsigned Base, unsigned First,
179 unsigned Last);
180
181 bool showMatchError(SMLoc Loc, unsigned ErrCode, uint64_t ErrorInfo,
183
184 bool parseDataExpr(const MCExpr *&Res) override;
185 bool parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc);
186
187 bool parseDirectiveArch(SMLoc L);
188 bool parseDirectiveArchExtension(SMLoc L);
189 bool parseDirectiveCPU(SMLoc L);
190 bool parseDirectiveInst(SMLoc L);
191
192 bool parseDirectiveTLSDescCall(SMLoc L);
193
194 bool parseDirectiveLOH(StringRef LOH, SMLoc L);
195 bool parseDirectiveLtorg(SMLoc L);
196
197 bool parseDirectiveReq(StringRef Name, SMLoc L);
198 bool parseDirectiveUnreq(SMLoc L);
199 bool parseDirectiveCFINegateRAState();
200 bool parseDirectiveCFINegateRAStateWithPC();
201 bool parseDirectiveCFIBKeyFrame();
202 bool parseDirectiveCFIMTETaggedFrame();
203
204 bool parseDirectiveVariantPCS(SMLoc L);
205
206 bool parseDirectiveSEHAllocStack(SMLoc L);
207 bool parseDirectiveSEHPrologEnd(SMLoc L);
208 bool parseDirectiveSEHSaveR19R20X(SMLoc L);
209 bool parseDirectiveSEHSaveFPLR(SMLoc L);
210 bool parseDirectiveSEHSaveFPLRX(SMLoc L);
211 bool parseDirectiveSEHSaveReg(SMLoc L);
212 bool parseDirectiveSEHSaveRegX(SMLoc L);
213 bool parseDirectiveSEHSaveRegP(SMLoc L);
214 bool parseDirectiveSEHSaveRegPX(SMLoc L);
215 bool parseDirectiveSEHSaveLRPair(SMLoc L);
216 bool parseDirectiveSEHSaveFReg(SMLoc L);
217 bool parseDirectiveSEHSaveFRegX(SMLoc L);
218 bool parseDirectiveSEHSaveFRegP(SMLoc L);
219 bool parseDirectiveSEHSaveFRegPX(SMLoc L);
220 bool parseDirectiveSEHSetFP(SMLoc L);
221 bool parseDirectiveSEHAddFP(SMLoc L);
222 bool parseDirectiveSEHNop(SMLoc L);
223 bool parseDirectiveSEHSaveNext(SMLoc L);
224 bool parseDirectiveSEHEpilogStart(SMLoc L);
225 bool parseDirectiveSEHEpilogEnd(SMLoc L);
226 bool parseDirectiveSEHTrapFrame(SMLoc L);
227 bool parseDirectiveSEHMachineFrame(SMLoc L);
228 bool parseDirectiveSEHContext(SMLoc L);
229 bool parseDirectiveSEHECContext(SMLoc L);
230 bool parseDirectiveSEHClearUnwoundToCall(SMLoc L);
231 bool parseDirectiveSEHPACSignLR(SMLoc L);
232 bool parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired, bool Writeback);
233 bool parseDirectiveSEHAllocZ(SMLoc L);
234 bool parseDirectiveSEHSaveZReg(SMLoc L);
235 bool parseDirectiveSEHSavePReg(SMLoc L);
236 bool parseDirectiveAeabiSubSectionHeader(SMLoc L);
237 bool parseDirectiveAeabiAArch64Attr(SMLoc L);
238
239 bool validateInstruction(MCInst &Inst, SMLoc &IDLoc,
240 SmallVectorImpl<SMLoc> &Loc);
241 unsigned getNumRegsForRegKind(RegKind K);
242 bool matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
243 OperandVector &Operands, MCStreamer &Out,
244 uint64_t &ErrorInfo,
245 bool MatchingInlineAsm) override;
246 /// @name Auto-generated Match Functions
247 /// {
248
249#define GET_ASSEMBLER_HEADER
250#include "AArch64GenAsmMatcher.inc"
251
252 /// }
253
254 ParseStatus tryParseScalarRegister(MCRegister &Reg);
255 ParseStatus tryParseVectorRegister(MCRegister &Reg, StringRef &Kind,
256 RegKind MatchKind);
257 ParseStatus tryParseMatrixRegister(OperandVector &Operands);
258 ParseStatus tryParseSVCR(OperandVector &Operands);
259 ParseStatus tryParseOptionalShiftExtend(OperandVector &Operands);
260 ParseStatus tryParseBarrierOperand(OperandVector &Operands);
261 ParseStatus tryParseBarriernXSOperand(OperandVector &Operands);
262 ParseStatus tryParseSysReg(OperandVector &Operands);
263 ParseStatus tryParseSysCROperand(OperandVector &Operands);
264 template <bool IsSVEPrefetch = false>
265 ParseStatus tryParsePrefetch(OperandVector &Operands);
266 ParseStatus tryParseRPRFMOperand(OperandVector &Operands);
267 ParseStatus tryParsePSBHint(OperandVector &Operands);
268 ParseStatus tryParseBTIHint(OperandVector &Operands);
269 ParseStatus tryParseAdrpLabel(OperandVector &Operands);
270 ParseStatus tryParseAdrLabel(OperandVector &Operands);
271 template <bool AddFPZeroAsLiteral>
272 ParseStatus tryParseFPImm(OperandVector &Operands);
273 ParseStatus tryParseImmWithOptionalShift(OperandVector &Operands);
274 ParseStatus tryParseGPR64sp0Operand(OperandVector &Operands);
275 bool tryParseNeonVectorRegister(OperandVector &Operands);
276 ParseStatus tryParseVectorIndex(OperandVector &Operands);
277 ParseStatus tryParseGPRSeqPair(OperandVector &Operands);
278 ParseStatus tryParseSyspXzrPair(OperandVector &Operands);
279 template <bool ParseShiftExtend,
280 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg>
281 ParseStatus tryParseGPROperand(OperandVector &Operands);
282 ParseStatus tryParseZTOperand(OperandVector &Operands);
283 template <bool ParseShiftExtend, bool ParseSuffix>
284 ParseStatus tryParseSVEDataVector(OperandVector &Operands);
285 template <RegKind RK>
286 ParseStatus tryParseSVEPredicateVector(OperandVector &Operands);
288 tryParseSVEPredicateOrPredicateAsCounterVector(OperandVector &Operands);
289 template <RegKind VectorKind>
290 ParseStatus tryParseVectorList(OperandVector &Operands,
291 bool ExpectMatch = false);
292 ParseStatus tryParseMatrixTileList(OperandVector &Operands);
293 ParseStatus tryParseSVEPattern(OperandVector &Operands);
294 ParseStatus tryParseSVEVecLenSpecifier(OperandVector &Operands);
295 ParseStatus tryParseGPR64x8(OperandVector &Operands);
296 ParseStatus tryParseImmRange(OperandVector &Operands);
297 template <int> ParseStatus tryParseAdjImm0_63(OperandVector &Operands);
298 ParseStatus tryParsePHintInstOperand(OperandVector &Operands);
299
300public:
301 enum AArch64MatchResultTy {
302 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
303#define GET_OPERAND_DIAGNOSTIC_TYPES
304#include "AArch64GenAsmMatcher.inc"
305 };
306 bool IsILP32;
307 bool IsWindowsArm64EC;
308
309 AArch64AsmParser(const MCSubtargetInfo &STI, MCAsmParser &Parser,
310 const MCInstrInfo &MII, const MCTargetOptions &Options)
311 : MCTargetAsmParser(Options, STI, MII) {
312 IsILP32 = STI.getTargetTriple().getEnvironment() == Triple::GNUILP32;
313 IsWindowsArm64EC = STI.getTargetTriple().isWindowsArm64EC();
315 MCStreamer &S = getParser().getStreamer();
316 if (S.getTargetStreamer() == nullptr)
317 new AArch64TargetStreamer(S);
318
319 // Alias .hword/.word/.[dx]word to the target-independent
320 // .2byte/.4byte/.8byte directives as they have the same form and
321 // semantics:
322 /// ::= (.hword | .word | .dword | .xword ) [ expression (, expression)* ]
323 Parser.addAliasForDirective(".hword", ".2byte");
324 Parser.addAliasForDirective(".word", ".4byte");
325 Parser.addAliasForDirective(".dword", ".8byte");
326 Parser.addAliasForDirective(".xword", ".8byte");
327
328 // Initialize the set of available features.
329 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
330 }
331
332 bool areEqualRegs(const MCParsedAsmOperand &Op1,
333 const MCParsedAsmOperand &Op2) const override;
334 bool parseInstruction(ParseInstructionInfo &Info, StringRef Name,
335 SMLoc NameLoc, OperandVector &Operands) override;
336 bool parseRegister(MCRegister &Reg, SMLoc &StartLoc, SMLoc &EndLoc) override;
337 ParseStatus tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
338 SMLoc &EndLoc) override;
339 bool ParseDirective(AsmToken DirectiveID) override;
340 unsigned validateTargetOperandClass(MCParsedAsmOperand &Op,
341 unsigned Kind) override;
342
343 static bool classifySymbolRef(const MCExpr *Expr, AArch64::Specifier &ELFSpec,
344 AArch64::Specifier &DarwinSpec,
345 int64_t &Addend);
346};
347
348/// AArch64Operand - Instances of this class represent a parsed AArch64 machine
349/// instruction.
350class AArch64Operand : public MCParsedAsmOperand {
351private:
352 enum KindTy {
353 k_Immediate,
354 k_ShiftedImm,
355 k_ImmRange,
356 k_CondCode,
357 k_Register,
358 k_MatrixRegister,
359 k_MatrixTileList,
360 k_SVCR,
361 k_VectorList,
362 k_VectorIndex,
363 k_Token,
364 k_SysReg,
365 k_SysCR,
366 k_Prefetch,
367 k_ShiftExtend,
368 k_FPImm,
369 k_Barrier,
370 k_PSBHint,
371 k_PHint,
372 k_BTIHint,
373 } Kind;
374
375 SMLoc StartLoc, EndLoc;
376
377 struct TokOp {
378 const char *Data;
379 unsigned Length;
380 bool IsSuffix; // Is the operand actually a suffix on the mnemonic.
381 };
382
383 // Separate shift/extend operand.
384 struct ShiftExtendOp {
386 unsigned Amount;
387 bool HasExplicitAmount;
388 };
389
390 struct RegOp {
391 unsigned RegNum;
392 RegKind Kind;
393 int ElementWidth;
394
395 // The register may be allowed as a different register class,
396 // e.g. for GPR64as32 or GPR32as64.
397 RegConstraintEqualityTy EqualityTy;
398
399 // In some cases the shift/extend needs to be explicitly parsed together
400 // with the register, rather than as a separate operand. This is needed
401 // for addressing modes where the instruction as a whole dictates the
402 // scaling/extend, rather than specific bits in the instruction.
403 // By parsing them as a single operand, we avoid the need to pass an
404 // extra operand in all CodeGen patterns (because all operands need to
405 // have an associated value), and we avoid the need to update TableGen to
406 // accept operands that have no associated bits in the instruction.
407 //
408 // An added benefit of parsing them together is that the assembler
409 // can give a sensible diagnostic if the scaling is not correct.
410 //
411 // The default is 'lsl #0' (HasExplicitAmount = false) if no
412 // ShiftExtend is specified.
413 ShiftExtendOp ShiftExtend;
414 };
415
416 struct MatrixRegOp {
417 unsigned RegNum;
418 unsigned ElementWidth;
419 MatrixKind Kind;
420 };
421
422 struct MatrixTileListOp {
423 unsigned RegMask = 0;
424 };
425
426 struct VectorListOp {
427 unsigned RegNum;
428 unsigned Count;
429 unsigned Stride;
430 unsigned NumElements;
431 unsigned ElementWidth;
432 RegKind RegisterKind;
433 };
434
435 struct VectorIndexOp {
436 int Val;
437 };
438
439 struct ImmOp {
440 const MCExpr *Val;
441 };
442
443 struct ShiftedImmOp {
444 const MCExpr *Val;
445 unsigned ShiftAmount;
446 };
447
448 struct ImmRangeOp {
449 unsigned First;
450 unsigned Last;
451 };
452
453 struct CondCodeOp {
455 };
456
457 struct FPImmOp {
458 uint64_t Val; // APFloat value bitcasted to uint64_t.
459 bool IsExact; // describes whether parsed value was exact.
460 };
461
462 struct BarrierOp {
463 const char *Data;
464 unsigned Length;
465 unsigned Val; // Not the enum since not all values have names.
466 bool HasnXSModifier;
467 };
468
469 struct SysRegOp {
470 const char *Data;
471 unsigned Length;
472 uint32_t MRSReg;
473 uint32_t MSRReg;
474 uint32_t PStateField;
475 };
476
477 struct SysCRImmOp {
478 unsigned Val;
479 };
480
481 struct PrefetchOp {
482 const char *Data;
483 unsigned Length;
484 unsigned Val;
485 };
486
487 struct PSBHintOp {
488 const char *Data;
489 unsigned Length;
490 unsigned Val;
491 };
492 struct PHintOp {
493 const char *Data;
494 unsigned Length;
495 unsigned Val;
496 };
497 struct BTIHintOp {
498 const char *Data;
499 unsigned Length;
500 unsigned Val;
501 };
502
503 struct SVCROp {
504 const char *Data;
505 unsigned Length;
506 unsigned PStateField;
507 };
508
509 union {
510 struct TokOp Tok;
511 struct RegOp Reg;
512 struct MatrixRegOp MatrixReg;
513 struct MatrixTileListOp MatrixTileList;
514 struct VectorListOp VectorList;
515 struct VectorIndexOp VectorIndex;
516 struct ImmOp Imm;
517 struct ShiftedImmOp ShiftedImm;
518 struct ImmRangeOp ImmRange;
519 struct CondCodeOp CondCode;
520 struct FPImmOp FPImm;
521 struct BarrierOp Barrier;
522 struct SysRegOp SysReg;
523 struct SysCRImmOp SysCRImm;
524 struct PrefetchOp Prefetch;
525 struct PSBHintOp PSBHint;
526 struct PHintOp PHint;
527 struct BTIHintOp BTIHint;
528 struct ShiftExtendOp ShiftExtend;
529 struct SVCROp SVCR;
530 };
531
532 // Keep the MCContext around as the MCExprs may need manipulated during
533 // the add<>Operands() calls.
534 MCContext &Ctx;
535
536public:
537 AArch64Operand(KindTy K, MCContext &Ctx) : Kind(K), Ctx(Ctx) {}
538
539 AArch64Operand(const AArch64Operand &o) : MCParsedAsmOperand(), Ctx(o.Ctx) {
540 Kind = o.Kind;
541 StartLoc = o.StartLoc;
542 EndLoc = o.EndLoc;
543 switch (Kind) {
544 case k_Token:
545 Tok = o.Tok;
546 break;
547 case k_Immediate:
548 Imm = o.Imm;
549 break;
550 case k_ShiftedImm:
551 ShiftedImm = o.ShiftedImm;
552 break;
553 case k_ImmRange:
554 ImmRange = o.ImmRange;
555 break;
556 case k_CondCode:
557 CondCode = o.CondCode;
558 break;
559 case k_FPImm:
560 FPImm = o.FPImm;
561 break;
562 case k_Barrier:
563 Barrier = o.Barrier;
564 break;
565 case k_Register:
566 Reg = o.Reg;
567 break;
568 case k_MatrixRegister:
569 MatrixReg = o.MatrixReg;
570 break;
571 case k_MatrixTileList:
572 MatrixTileList = o.MatrixTileList;
573 break;
574 case k_VectorList:
575 VectorList = o.VectorList;
576 break;
577 case k_VectorIndex:
578 VectorIndex = o.VectorIndex;
579 break;
580 case k_SysReg:
581 SysReg = o.SysReg;
582 break;
583 case k_SysCR:
584 SysCRImm = o.SysCRImm;
585 break;
586 case k_Prefetch:
587 Prefetch = o.Prefetch;
588 break;
589 case k_PSBHint:
590 PSBHint = o.PSBHint;
591 break;
592 case k_PHint:
593 PHint = o.PHint;
594 break;
595 case k_BTIHint:
596 BTIHint = o.BTIHint;
597 break;
598 case k_ShiftExtend:
599 ShiftExtend = o.ShiftExtend;
600 break;
601 case k_SVCR:
602 SVCR = o.SVCR;
603 break;
604 }
605 }
606
607 /// getStartLoc - Get the location of the first token of this operand.
608 SMLoc getStartLoc() const override { return StartLoc; }
609 /// getEndLoc - Get the location of the last token of this operand.
610 SMLoc getEndLoc() const override { return EndLoc; }
611
612 StringRef getToken() const {
613 assert(Kind == k_Token && "Invalid access!");
614 return StringRef(Tok.Data, Tok.Length);
615 }
616
617 bool isTokenSuffix() const {
618 assert(Kind == k_Token && "Invalid access!");
619 return Tok.IsSuffix;
620 }
621
622 const MCExpr *getImm() const {
623 assert(Kind == k_Immediate && "Invalid access!");
624 return Imm.Val;
625 }
626
627 const MCExpr *getShiftedImmVal() const {
628 assert(Kind == k_ShiftedImm && "Invalid access!");
629 return ShiftedImm.Val;
630 }
631
632 unsigned getShiftedImmShift() const {
633 assert(Kind == k_ShiftedImm && "Invalid access!");
634 return ShiftedImm.ShiftAmount;
635 }
636
637 unsigned getFirstImmVal() const {
638 assert(Kind == k_ImmRange && "Invalid access!");
639 return ImmRange.First;
640 }
641
642 unsigned getLastImmVal() const {
643 assert(Kind == k_ImmRange && "Invalid access!");
644 return ImmRange.Last;
645 }
646
648 assert(Kind == k_CondCode && "Invalid access!");
649 return CondCode.Code;
650 }
651
652 APFloat getFPImm() const {
653 assert (Kind == k_FPImm && "Invalid access!");
654 return APFloat(APFloat::IEEEdouble(), APInt(64, FPImm.Val, true));
655 }
656
657 bool getFPImmIsExact() const {
658 assert (Kind == k_FPImm && "Invalid access!");
659 return FPImm.IsExact;
660 }
661
662 unsigned getBarrier() const {
663 assert(Kind == k_Barrier && "Invalid access!");
664 return Barrier.Val;
665 }
666
667 StringRef getBarrierName() const {
668 assert(Kind == k_Barrier && "Invalid access!");
669 return StringRef(Barrier.Data, Barrier.Length);
670 }
671
672 bool getBarriernXSModifier() const {
673 assert(Kind == k_Barrier && "Invalid access!");
674 return Barrier.HasnXSModifier;
675 }
676
677 MCRegister getReg() const override {
678 assert(Kind == k_Register && "Invalid access!");
679 return Reg.RegNum;
680 }
681
682 unsigned getMatrixReg() const {
683 assert(Kind == k_MatrixRegister && "Invalid access!");
684 return MatrixReg.RegNum;
685 }
686
687 unsigned getMatrixElementWidth() const {
688 assert(Kind == k_MatrixRegister && "Invalid access!");
689 return MatrixReg.ElementWidth;
690 }
691
692 MatrixKind getMatrixKind() const {
693 assert(Kind == k_MatrixRegister && "Invalid access!");
694 return MatrixReg.Kind;
695 }
696
697 unsigned getMatrixTileListRegMask() const {
698 assert(isMatrixTileList() && "Invalid access!");
699 return MatrixTileList.RegMask;
700 }
701
702 RegConstraintEqualityTy getRegEqualityTy() const {
703 assert(Kind == k_Register && "Invalid access!");
704 return Reg.EqualityTy;
705 }
706
707 unsigned getVectorListStart() const {
708 assert(Kind == k_VectorList && "Invalid access!");
709 return VectorList.RegNum;
710 }
711
712 unsigned getVectorListCount() const {
713 assert(Kind == k_VectorList && "Invalid access!");
714 return VectorList.Count;
715 }
716
717 unsigned getVectorListStride() const {
718 assert(Kind == k_VectorList && "Invalid access!");
719 return VectorList.Stride;
720 }
721
722 int getVectorIndex() const {
723 assert(Kind == k_VectorIndex && "Invalid access!");
724 return VectorIndex.Val;
725 }
726
727 StringRef getSysReg() const {
728 assert(Kind == k_SysReg && "Invalid access!");
729 return StringRef(SysReg.Data, SysReg.Length);
730 }
731
732 unsigned getSysCR() const {
733 assert(Kind == k_SysCR && "Invalid access!");
734 return SysCRImm.Val;
735 }
736
737 unsigned getPrefetch() const {
738 assert(Kind == k_Prefetch && "Invalid access!");
739 return Prefetch.Val;
740 }
741
742 unsigned getPSBHint() const {
743 assert(Kind == k_PSBHint && "Invalid access!");
744 return PSBHint.Val;
745 }
746
747 unsigned getPHint() const {
748 assert(Kind == k_PHint && "Invalid access!");
749 return PHint.Val;
750 }
751
752 StringRef getPSBHintName() const {
753 assert(Kind == k_PSBHint && "Invalid access!");
754 return StringRef(PSBHint.Data, PSBHint.Length);
755 }
756
757 StringRef getPHintName() const {
758 assert(Kind == k_PHint && "Invalid access!");
759 return StringRef(PHint.Data, PHint.Length);
760 }
761
762 unsigned getBTIHint() const {
763 assert(Kind == k_BTIHint && "Invalid access!");
764 return BTIHint.Val;
765 }
766
767 StringRef getBTIHintName() const {
768 assert(Kind == k_BTIHint && "Invalid access!");
769 return StringRef(BTIHint.Data, BTIHint.Length);
770 }
771
772 StringRef getSVCR() const {
773 assert(Kind == k_SVCR && "Invalid access!");
774 return StringRef(SVCR.Data, SVCR.Length);
775 }
776
777 StringRef getPrefetchName() const {
778 assert(Kind == k_Prefetch && "Invalid access!");
779 return StringRef(Prefetch.Data, Prefetch.Length);
780 }
781
782 AArch64_AM::ShiftExtendType getShiftExtendType() const {
783 if (Kind == k_ShiftExtend)
784 return ShiftExtend.Type;
785 if (Kind == k_Register)
786 return Reg.ShiftExtend.Type;
787 llvm_unreachable("Invalid access!");
788 }
789
790 unsigned getShiftExtendAmount() const {
791 if (Kind == k_ShiftExtend)
792 return ShiftExtend.Amount;
793 if (Kind == k_Register)
794 return Reg.ShiftExtend.Amount;
795 llvm_unreachable("Invalid access!");
796 }
797
798 bool hasShiftExtendAmount() const {
799 if (Kind == k_ShiftExtend)
800 return ShiftExtend.HasExplicitAmount;
801 if (Kind == k_Register)
802 return Reg.ShiftExtend.HasExplicitAmount;
803 llvm_unreachable("Invalid access!");
804 }
805
806 bool isImm() const override { return Kind == k_Immediate; }
807 bool isMem() const override { return false; }
808
809 bool isUImm6() const {
810 if (!isImm())
811 return false;
812 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
813 if (!MCE)
814 return false;
815 int64_t Val = MCE->getValue();
816 return (Val >= 0 && Val < 64);
817 }
818
819 template <int Width> bool isSImm() const {
820 return bool(isSImmScaled<Width, 1>());
821 }
822
823 template <int Bits, int Scale> DiagnosticPredicate isSImmScaled() const {
824 return isImmScaled<Bits, Scale>(true);
825 }
826
827 template <int Bits, int Scale, int Offset = 0, bool IsRange = false>
828 DiagnosticPredicate isUImmScaled() const {
829 if (IsRange && isImmRange() &&
830 (getLastImmVal() != getFirstImmVal() + Offset))
832
833 return isImmScaled<Bits, Scale, IsRange>(false);
834 }
835
836 template <int Bits, int Scale, bool IsRange = false>
837 DiagnosticPredicate isImmScaled(bool Signed) const {
838 if ((!isImm() && !isImmRange()) || (isImm() && IsRange) ||
839 (isImmRange() && !IsRange))
841
842 int64_t Val;
843 if (isImmRange())
844 Val = getFirstImmVal();
845 else {
846 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
847 if (!MCE)
849 Val = MCE->getValue();
850 }
851
852 int64_t MinVal, MaxVal;
853 if (Signed) {
854 int64_t Shift = Bits - 1;
855 MinVal = (int64_t(1) << Shift) * -Scale;
856 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
857 } else {
858 MinVal = 0;
859 MaxVal = ((int64_t(1) << Bits) - 1) * Scale;
860 }
861
862 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
864
866 }
867
868 DiagnosticPredicate isSVEPattern() const {
869 if (!isImm())
871 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
872 if (!MCE)
874 int64_t Val = MCE->getValue();
875 if (Val >= 0 && Val < 32)
878 }
879
880 DiagnosticPredicate isSVEVecLenSpecifier() const {
881 if (!isImm())
883 auto *MCE = dyn_cast<MCConstantExpr>(getImm());
884 if (!MCE)
886 int64_t Val = MCE->getValue();
887 if (Val >= 0 && Val <= 1)
890 }
891
892 bool isSymbolicUImm12Offset(const MCExpr *Expr) const {
893 AArch64::Specifier ELFSpec;
894 AArch64::Specifier DarwinSpec;
895 int64_t Addend;
896 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
897 Addend)) {
898 // If we don't understand the expression, assume the best and
899 // let the fixup and relocation code deal with it.
900 return true;
901 }
902
903 if (DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
911 ELFSpec)) {
912 // Note that we don't range-check the addend. It's adjusted modulo page
913 // size when converted, so there is no "out of range" condition when using
914 // @pageoff.
915 return true;
916 } else if (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF ||
917 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) {
918 // @gotpageoff/@tlvppageoff can only be used directly, not with an addend.
919 return Addend == 0;
920 }
921
922 return false;
923 }
924
925 template <int Scale> bool isUImm12Offset() const {
926 if (!isImm())
927 return false;
928
929 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
930 if (!MCE)
931 return isSymbolicUImm12Offset(getImm());
932
933 int64_t Val = MCE->getValue();
934 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
935 }
936
937 template <int N, int M>
938 bool isImmInRange() const {
939 if (!isImm())
940 return false;
941 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
942 if (!MCE)
943 return false;
944 int64_t Val = MCE->getValue();
945 return (Val >= N && Val <= M);
946 }
947
948 // NOTE: Also used for isLogicalImmNot as anything that can be represented as
949 // a logical immediate can always be represented when inverted.
950 template <typename T>
951 bool isLogicalImm() const {
952 if (!isImm())
953 return false;
954 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
955 if (!MCE)
956 return false;
957
958 int64_t Val = MCE->getValue();
959 // Avoid left shift by 64 directly.
960 uint64_t Upper = UINT64_C(-1) << (sizeof(T) * 4) << (sizeof(T) * 4);
961 // Allow all-0 or all-1 in top bits to permit bitwise NOT.
962 if ((Val & Upper) && (Val & Upper) != Upper)
963 return false;
964
965 return AArch64_AM::isLogicalImmediate(Val & ~Upper, sizeof(T) * 8);
966 }
967
968 bool isShiftedImm() const { return Kind == k_ShiftedImm; }
969
970 bool isImmRange() const { return Kind == k_ImmRange; }
971
972 /// Returns the immediate value as a pair of (imm, shift) if the immediate is
973 /// a shifted immediate by value 'Shift' or '0', or if it is an unshifted
974 /// immediate that can be shifted by 'Shift'.
975 template <unsigned Width>
976 std::optional<std::pair<int64_t, unsigned>> getShiftedVal() const {
977 if (isShiftedImm() && Width == getShiftedImmShift())
978 if (auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
979 return std::make_pair(CE->getValue(), Width);
980
981 if (isImm())
982 if (auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
983 int64_t Val = CE->getValue();
984 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
985 return std::make_pair(Val >> Width, Width);
986 else
987 return std::make_pair(Val, 0u);
988 }
989
990 return {};
991 }
992
993 bool isAddSubImm() const {
994 if (!isShiftedImm() && !isImm())
995 return false;
996
997 const MCExpr *Expr;
998
999 // An ADD/SUB shifter is either 'lsl #0' or 'lsl #12'.
1000 if (isShiftedImm()) {
1001 unsigned Shift = ShiftedImm.ShiftAmount;
1002 Expr = ShiftedImm.Val;
1003 if (Shift != 0 && Shift != 12)
1004 return false;
1005 } else {
1006 Expr = getImm();
1007 }
1008
1009 AArch64::Specifier ELFSpec;
1010 AArch64::Specifier DarwinSpec;
1011 int64_t Addend;
1012 if (AArch64AsmParser::classifySymbolRef(Expr, ELFSpec, DarwinSpec,
1013 Addend)) {
1014 return DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
1015 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF ||
1016 (DarwinSpec == AArch64::S_MACHO_GOTPAGEOFF && Addend == 0) ||
1024 ELFSpec);
1025 }
1026
1027 // If it's a constant, it should be a real immediate in range.
1028 if (auto ShiftedVal = getShiftedVal<12>())
1029 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
1030
1031 // If it's an expression, we hope for the best and let the fixup/relocation
1032 // code deal with it.
1033 return true;
1034 }
1035
1036 bool isAddSubImmNeg() const {
1037 if (!isShiftedImm() && !isImm())
1038 return false;
1039
1040 // Otherwise it should be a real negative immediate in range.
1041 if (auto ShiftedVal = getShiftedVal<12>())
1042 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
1043
1044 return false;
1045 }
1046
1047 // Signed value in the range -128 to +127. For element widths of
1048 // 16 bits or higher it may also be a signed multiple of 256 in the
1049 // range -32768 to +32512.
1050 // For element-width of 8 bits a range of -128 to 255 is accepted,
1051 // since a copy of a byte can be either signed/unsigned.
1052 template <typename T>
1053 DiagnosticPredicate isSVECpyImm() const {
1054 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1056
1057 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1058 std::is_same<int8_t, T>::value;
1059 if (auto ShiftedImm = getShiftedVal<8>())
1060 if (!(IsByte && ShiftedImm->second) &&
1061 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
1062 << ShiftedImm->second))
1064
1066 }
1067
1068 // Unsigned value in the range 0 to 255. For element widths of
1069 // 16 bits or higher it may also be a signed multiple of 256 in the
1070 // range 0 to 65280.
1071 template <typename T> DiagnosticPredicate isSVEAddSubImm() const {
1072 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
1074
1075 bool IsByte = std::is_same<int8_t, std::make_signed_t<T>>::value ||
1076 std::is_same<int8_t, T>::value;
1077 if (auto ShiftedImm = getShiftedVal<8>())
1078 if (!(IsByte && ShiftedImm->second) &&
1079 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
1080 << ShiftedImm->second))
1082
1084 }
1085
1086 template <typename T> DiagnosticPredicate isSVEPreferredLogicalImm() const {
1087 if (isLogicalImm<T>() && !isSVECpyImm<T>())
1090 }
1091
1092 bool isCondCode() const { return Kind == k_CondCode; }
1093
1094 bool isSIMDImmType10() const {
1095 if (!isImm())
1096 return false;
1097 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1098 if (!MCE)
1099 return false;
1101 }
1102
1103 template<int N>
1104 bool isBranchTarget() const {
1105 if (!isImm())
1106 return false;
1107 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1108 if (!MCE)
1109 return true;
1110 int64_t Val = MCE->getValue();
1111 if (Val & 0x3)
1112 return false;
1113 assert(N > 0 && "Branch target immediate cannot be 0 bits!");
1114 return (Val >= -((1<<(N-1)) << 2) && Val <= (((1<<(N-1))-1) << 2));
1115 }
1116
1117 bool isMovWSymbol(ArrayRef<AArch64::Specifier> AllowedModifiers) const {
1118 if (!isImm())
1119 return false;
1120
1121 AArch64::Specifier ELFSpec;
1122 AArch64::Specifier DarwinSpec;
1123 int64_t Addend;
1124 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFSpec, DarwinSpec,
1125 Addend)) {
1126 return false;
1127 }
1128 if (DarwinSpec != AArch64::S_None)
1129 return false;
1130
1131 return llvm::is_contained(AllowedModifiers, ELFSpec);
1132 }
1133
1134 bool isMovWSymbolG3() const {
1135 return isMovWSymbol({AArch64::S_ABS_G3, AArch64::S_PREL_G3});
1136 }
1137
1138 bool isMovWSymbolG2() const {
1139 return isMovWSymbol({AArch64::S_ABS_G2, AArch64::S_ABS_G2_S,
1143 }
1144
1145 bool isMovWSymbolG1() const {
1146 return isMovWSymbol({AArch64::S_ABS_G1, AArch64::S_ABS_G1_S,
1151 }
1152
1153 bool isMovWSymbolG0() const {
1154 return isMovWSymbol({AArch64::S_ABS_G0, AArch64::S_ABS_G0_S,
1159 }
1160
1161 template<int RegWidth, int Shift>
1162 bool isMOVZMovAlias() const {
1163 if (!isImm()) return false;
1164
1165 const MCExpr *E = getImm();
1166 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) {
1167 uint64_t Value = CE->getValue();
1168
1169 return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth);
1170 }
1171 // Only supports the case of Shift being 0 if an expression is used as an
1172 // operand
1173 return !Shift && E;
1174 }
1175
1176 template<int RegWidth, int Shift>
1177 bool isMOVNMovAlias() const {
1178 if (!isImm()) return false;
1179
1180 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1181 if (!CE) return false;
1182 uint64_t Value = CE->getValue();
1183
1184 return AArch64_AM::isMOVNMovAlias(Value, Shift, RegWidth);
1185 }
1186
1187 bool isFPImm() const {
1188 return Kind == k_FPImm &&
1189 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt()) != -1;
1190 }
1191
1192 bool isBarrier() const {
1193 return Kind == k_Barrier && !getBarriernXSModifier();
1194 }
1195 bool isBarriernXS() const {
1196 return Kind == k_Barrier && getBarriernXSModifier();
1197 }
1198 bool isSysReg() const { return Kind == k_SysReg; }
1199
1200 bool isMRSSystemRegister() const {
1201 if (!isSysReg()) return false;
1202
1203 return SysReg.MRSReg != -1U;
1204 }
1205
1206 bool isMSRSystemRegister() const {
1207 if (!isSysReg()) return false;
1208 return SysReg.MSRReg != -1U;
1209 }
1210
1211 bool isSystemPStateFieldWithImm0_1() const {
1212 if (!isSysReg()) return false;
1213 return AArch64PState::lookupPStateImm0_1ByEncoding(SysReg.PStateField);
1214 }
1215
1216 bool isSystemPStateFieldWithImm0_15() const {
1217 if (!isSysReg())
1218 return false;
1219 return AArch64PState::lookupPStateImm0_15ByEncoding(SysReg.PStateField);
1220 }
1221
1222 bool isSVCR() const {
1223 if (Kind != k_SVCR)
1224 return false;
1225 return SVCR.PStateField != -1U;
1226 }
1227
1228 bool isReg() const override {
1229 return Kind == k_Register;
1230 }
1231
1232 bool isVectorList() const { return Kind == k_VectorList; }
1233
1234 bool isScalarReg() const {
1235 return Kind == k_Register && Reg.Kind == RegKind::Scalar;
1236 }
1237
1238 bool isNeonVectorReg() const {
1239 return Kind == k_Register && Reg.Kind == RegKind::NeonVector;
1240 }
1241
1242 bool isNeonVectorRegLo() const {
1243 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1244 (AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1245 Reg.RegNum) ||
1246 AArch64MCRegisterClasses[AArch64::FPR64_loRegClassID].contains(
1247 Reg.RegNum));
1248 }
1249
1250 bool isNeonVectorReg0to7() const {
1251 return Kind == k_Register && Reg.Kind == RegKind::NeonVector &&
1252 (AArch64MCRegisterClasses[AArch64::FPR128_0to7RegClassID].contains(
1253 Reg.RegNum));
1254 }
1255
1256 bool isMatrix() const { return Kind == k_MatrixRegister; }
1257 bool isMatrixTileList() const { return Kind == k_MatrixTileList; }
1258
1259 template <unsigned Class> bool isSVEPredicateAsCounterReg() const {
1260 RegKind RK;
1261 switch (Class) {
1262 case AArch64::PPRRegClassID:
1263 case AArch64::PPR_3bRegClassID:
1264 case AArch64::PPR_p8to15RegClassID:
1265 case AArch64::PNRRegClassID:
1266 case AArch64::PNR_p8to15RegClassID:
1267 case AArch64::PPRorPNRRegClassID:
1268 RK = RegKind::SVEPredicateAsCounter;
1269 break;
1270 default:
1271 llvm_unreachable("Unsupported register class");
1272 }
1273
1274 return (Kind == k_Register && Reg.Kind == RK) &&
1275 AArch64MCRegisterClasses[Class].contains(getReg());
1276 }
1277
1278 template <unsigned Class> bool isSVEVectorReg() const {
1279 RegKind RK;
1280 switch (Class) {
1281 case AArch64::ZPRRegClassID:
1282 case AArch64::ZPR_3bRegClassID:
1283 case AArch64::ZPR_4bRegClassID:
1284 case AArch64::ZPRMul2_LoRegClassID:
1285 case AArch64::ZPRMul2_HiRegClassID:
1286 case AArch64::ZPR_KRegClassID:
1287 RK = RegKind::SVEDataVector;
1288 break;
1289 case AArch64::PPRRegClassID:
1290 case AArch64::PPR_3bRegClassID:
1291 case AArch64::PPR_p8to15RegClassID:
1292 case AArch64::PNRRegClassID:
1293 case AArch64::PNR_p8to15RegClassID:
1294 case AArch64::PPRorPNRRegClassID:
1295 RK = RegKind::SVEPredicateVector;
1296 break;
1297 default:
1298 llvm_unreachable("Unsupported register class");
1299 }
1300
1301 return (Kind == k_Register && Reg.Kind == RK) &&
1302 AArch64MCRegisterClasses[Class].contains(getReg());
1303 }
1304
1305 template <unsigned Class> bool isFPRasZPR() const {
1306 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1307 AArch64MCRegisterClasses[Class].contains(getReg());
1308 }
1309
1310 template <int ElementWidth, unsigned Class>
1311 DiagnosticPredicate isSVEPredicateVectorRegOfWidth() const {
1312 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateVector)
1314
1315 if (isSVEVectorReg<Class>() && (Reg.ElementWidth == ElementWidth))
1317
1319 }
1320
1321 template <int ElementWidth, unsigned Class>
1322 DiagnosticPredicate isSVEPredicateOrPredicateAsCounterRegOfWidth() const {
1323 if (Kind != k_Register || (Reg.Kind != RegKind::SVEPredicateAsCounter &&
1324 Reg.Kind != RegKind::SVEPredicateVector))
1326
1327 if ((isSVEPredicateAsCounterReg<Class>() ||
1328 isSVEPredicateVectorRegOfWidth<ElementWidth, Class>()) &&
1329 Reg.ElementWidth == ElementWidth)
1331
1333 }
1334
1335 template <int ElementWidth, unsigned Class>
1336 DiagnosticPredicate isSVEPredicateAsCounterRegOfWidth() const {
1337 if (Kind != k_Register || Reg.Kind != RegKind::SVEPredicateAsCounter)
1339
1340 if (isSVEPredicateAsCounterReg<Class>() && (Reg.ElementWidth == ElementWidth))
1342
1344 }
1345
1346 template <int ElementWidth, unsigned Class>
1347 DiagnosticPredicate isSVEDataVectorRegOfWidth() const {
1348 if (Kind != k_Register || Reg.Kind != RegKind::SVEDataVector)
1350
1351 if (isSVEVectorReg<Class>() && Reg.ElementWidth == ElementWidth)
1353
1355 }
1356
1357 template <int ElementWidth, unsigned Class,
1358 AArch64_AM::ShiftExtendType ShiftExtendTy, int ShiftWidth,
1359 bool ShiftWidthAlwaysSame>
1360 DiagnosticPredicate isSVEDataVectorRegWithShiftExtend() const {
1361 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1362 if (!VectorMatch.isMatch())
1364
1365 // Give a more specific diagnostic when the user has explicitly typed in
1366 // a shift-amount that does not match what is expected, but for which
1367 // there is also an unscaled addressing mode (e.g. sxtw/uxtw).
1368 bool MatchShift = getShiftExtendAmount() == Log2_32(ShiftWidth / 8);
1369 if (!MatchShift && (ShiftExtendTy == AArch64_AM::UXTW ||
1370 ShiftExtendTy == AArch64_AM::SXTW) &&
1371 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1373
1374 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1376
1378 }
1379
1380 bool isGPR32as64() const {
1381 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1382 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(Reg.RegNum);
1383 }
1384
1385 bool isGPR64as32() const {
1386 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1387 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(Reg.RegNum);
1388 }
1389
1390 bool isGPR64x8() const {
1391 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1392 AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID].contains(
1393 Reg.RegNum);
1394 }
1395
1396 bool isWSeqPair() const {
1397 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1398 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1399 Reg.RegNum);
1400 }
1401
1402 bool isXSeqPair() const {
1403 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1404 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1405 Reg.RegNum);
1406 }
1407
1408 bool isSyspXzrPair() const {
1409 return isGPR64<AArch64::GPR64RegClassID>() && Reg.RegNum == AArch64::XZR;
1410 }
1411
1412 template<int64_t Angle, int64_t Remainder>
1413 DiagnosticPredicate isComplexRotation() const {
1414 if (!isImm())
1416
1417 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1418 if (!CE)
1420 uint64_t Value = CE->getValue();
1421
1422 if (Value % Angle == Remainder && Value <= 270)
1425 }
1426
1427 template <unsigned RegClassID> bool isGPR64() const {
1428 return Kind == k_Register && Reg.Kind == RegKind::Scalar &&
1429 AArch64MCRegisterClasses[RegClassID].contains(getReg());
1430 }
1431
1432 template <unsigned RegClassID, int ExtWidth>
1433 DiagnosticPredicate isGPR64WithShiftExtend() const {
1434 if (Kind != k_Register || Reg.Kind != RegKind::Scalar)
1436
1437 if (isGPR64<RegClassID>() && getShiftExtendType() == AArch64_AM::LSL &&
1438 getShiftExtendAmount() == Log2_32(ExtWidth / 8))
1441 }
1442
1443 /// Is this a vector list with the type implicit (presumably attached to the
1444 /// instruction itself)?
1445 template <RegKind VectorKind, unsigned NumRegs, bool IsConsecutive = false>
1446 bool isImplicitlyTypedVectorList() const {
1447 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1448 VectorList.NumElements == 0 &&
1449 VectorList.RegisterKind == VectorKind &&
1450 (!IsConsecutive || (VectorList.Stride == 1));
1451 }
1452
1453 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1454 unsigned ElementWidth, unsigned Stride = 1>
1455 bool isTypedVectorList() const {
1456 if (Kind != k_VectorList)
1457 return false;
1458 if (VectorList.Count != NumRegs)
1459 return false;
1460 if (VectorList.RegisterKind != VectorKind)
1461 return false;
1462 if (VectorList.ElementWidth != ElementWidth)
1463 return false;
1464 if (VectorList.Stride != Stride)
1465 return false;
1466 return VectorList.NumElements == NumElements;
1467 }
1468
1469 template <RegKind VectorKind, unsigned NumRegs, unsigned NumElements,
1470 unsigned ElementWidth, unsigned RegClass>
1471 DiagnosticPredicate isTypedVectorListMultiple() const {
1472 bool Res =
1473 isTypedVectorList<VectorKind, NumRegs, NumElements, ElementWidth>();
1474 if (!Res)
1476 if (!AArch64MCRegisterClasses[RegClass].contains(VectorList.RegNum))
1479 }
1480
1481 template <RegKind VectorKind, unsigned NumRegs, unsigned Stride,
1482 unsigned ElementWidth>
1483 DiagnosticPredicate isTypedVectorListStrided() const {
1484 bool Res = isTypedVectorList<VectorKind, NumRegs, /*NumElements*/ 0,
1485 ElementWidth, Stride>();
1486 if (!Res)
1488 if ((VectorList.RegNum < (AArch64::Z0 + Stride)) ||
1489 ((VectorList.RegNum >= AArch64::Z16) &&
1490 (VectorList.RegNum < (AArch64::Z16 + Stride))))
1493 }
1494
1495 template <int Min, int Max>
1496 DiagnosticPredicate isVectorIndex() const {
1497 if (Kind != k_VectorIndex)
1499 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1502 }
1503
1504 bool isToken() const override { return Kind == k_Token; }
1505
1506 bool isTokenEqual(StringRef Str) const {
1507 return Kind == k_Token && getToken() == Str;
1508 }
1509 bool isSysCR() const { return Kind == k_SysCR; }
1510 bool isPrefetch() const { return Kind == k_Prefetch; }
1511 bool isPSBHint() const { return Kind == k_PSBHint; }
1512 bool isPHint() const { return Kind == k_PHint; }
1513 bool isBTIHint() const { return Kind == k_BTIHint; }
1514 bool isShiftExtend() const { return Kind == k_ShiftExtend; }
1515 bool isShifter() const {
1516 if (!isShiftExtend())
1517 return false;
1518
1519 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1520 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1521 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR ||
1522 ST == AArch64_AM::MSL);
1523 }
1524
1525 template <unsigned ImmEnum> DiagnosticPredicate isExactFPImm() const {
1526 if (Kind != k_FPImm)
1528
1529 if (getFPImmIsExact()) {
1530 // Lookup the immediate from table of supported immediates.
1531 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1532 assert(Desc && "Unknown enum value");
1533
1534 // Calculate its FP value.
1535 APFloat RealVal(APFloat::IEEEdouble());
1536 auto StatusOrErr =
1537 RealVal.convertFromString(Desc->Repr, APFloat::rmTowardZero);
1538 if (errorToBool(StatusOrErr.takeError()) || *StatusOrErr != APFloat::opOK)
1539 llvm_unreachable("FP immediate is not exact");
1540
1541 if (getFPImm().bitwiseIsEqual(RealVal))
1543 }
1544
1546 }
1547
1548 template <unsigned ImmA, unsigned ImmB>
1549 DiagnosticPredicate isExactFPImm() const {
1550 DiagnosticPredicate Res = DiagnosticPredicate::NoMatch;
1551 if ((Res = isExactFPImm<ImmA>()))
1553 if ((Res = isExactFPImm<ImmB>()))
1555 return Res;
1556 }
1557
1558 bool isExtend() const {
1559 if (!isShiftExtend())
1560 return false;
1561
1562 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1563 return (ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1564 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1565 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW ||
1566 ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1567 ET == AArch64_AM::LSL) &&
1568 getShiftExtendAmount() <= 4;
1569 }
1570
1571 bool isExtend64() const {
1572 if (!isExtend())
1573 return false;
1574 // Make sure the extend expects a 32-bit source register.
1575 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1576 return ET == AArch64_AM::UXTB || ET == AArch64_AM::SXTB ||
1577 ET == AArch64_AM::UXTH || ET == AArch64_AM::SXTH ||
1578 ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW;
1579 }
1580
1581 bool isExtendLSL64() const {
1582 if (!isExtend())
1583 return false;
1584 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1585 return (ET == AArch64_AM::UXTX || ET == AArch64_AM::SXTX ||
1586 ET == AArch64_AM::LSL) &&
1587 getShiftExtendAmount() <= 4;
1588 }
1589
1590 bool isLSLImm3Shift() const {
1591 if (!isShiftExtend())
1592 return false;
1593 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1594 return ET == AArch64_AM::LSL && getShiftExtendAmount() <= 7;
1595 }
1596
1597 template<int Width> bool isMemXExtend() const {
1598 if (!isExtend())
1599 return false;
1600 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1601 return (ET == AArch64_AM::LSL || ET == AArch64_AM::SXTX) &&
1602 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1603 getShiftExtendAmount() == 0);
1604 }
1605
1606 template<int Width> bool isMemWExtend() const {
1607 if (!isExtend())
1608 return false;
1609 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
1610 return (ET == AArch64_AM::UXTW || ET == AArch64_AM::SXTW) &&
1611 (getShiftExtendAmount() == Log2_32(Width / 8) ||
1612 getShiftExtendAmount() == 0);
1613 }
1614
1615 template <unsigned width>
1616 bool isArithmeticShifter() const {
1617 if (!isShifter())
1618 return false;
1619
1620 // An arithmetic shifter is LSL, LSR, or ASR.
1621 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1622 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1623 ST == AArch64_AM::ASR) && getShiftExtendAmount() < width;
1624 }
1625
1626 template <unsigned width>
1627 bool isLogicalShifter() const {
1628 if (!isShifter())
1629 return false;
1630
1631 // A logical shifter is LSL, LSR, ASR or ROR.
1632 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1633 return (ST == AArch64_AM::LSL || ST == AArch64_AM::LSR ||
1634 ST == AArch64_AM::ASR || ST == AArch64_AM::ROR) &&
1635 getShiftExtendAmount() < width;
1636 }
1637
1638 bool isMovImm32Shifter() const {
1639 if (!isShifter())
1640 return false;
1641
1642 // A MOVi shifter is LSL of 0, 16, 32, or 48.
1643 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1644 if (ST != AArch64_AM::LSL)
1645 return false;
1646 uint64_t Val = getShiftExtendAmount();
1647 return (Val == 0 || Val == 16);
1648 }
1649
1650 bool isMovImm64Shifter() const {
1651 if (!isShifter())
1652 return false;
1653
1654 // A MOVi shifter is LSL of 0 or 16.
1655 AArch64_AM::ShiftExtendType ST = getShiftExtendType();
1656 if (ST != AArch64_AM::LSL)
1657 return false;
1658 uint64_t Val = getShiftExtendAmount();
1659 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1660 }
1661
1662 bool isLogicalVecShifter() const {
1663 if (!isShifter())
1664 return false;
1665
1666 // A logical vector shifter is a left shift by 0, 8, 16, or 24.
1667 unsigned Shift = getShiftExtendAmount();
1668 return getShiftExtendType() == AArch64_AM::LSL &&
1669 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1670 }
1671
1672 bool isLogicalVecHalfWordShifter() const {
1673 if (!isLogicalVecShifter())
1674 return false;
1675
1676 // A logical vector shifter is a left shift by 0 or 8.
1677 unsigned Shift = getShiftExtendAmount();
1678 return getShiftExtendType() == AArch64_AM::LSL &&
1679 (Shift == 0 || Shift == 8);
1680 }
1681
1682 bool isMoveVecShifter() const {
1683 if (!isShiftExtend())
1684 return false;
1685
1686 // A logical vector shifter is a left shift by 8 or 16.
1687 unsigned Shift = getShiftExtendAmount();
1688 return getShiftExtendType() == AArch64_AM::MSL &&
1689 (Shift == 8 || Shift == 16);
1690 }
1691
1692 // Fallback unscaled operands are for aliases of LDR/STR that fall back
1693 // to LDUR/STUR when the offset is not legal for the former but is for
1694 // the latter. As such, in addition to checking for being a legal unscaled
1695 // address, also check that it is not a legal scaled address. This avoids
1696 // ambiguity in the matcher.
1697 template<int Width>
1698 bool isSImm9OffsetFB() const {
1699 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1700 }
1701
1702 bool isAdrpLabel() const {
1703 // Validation was handled during parsing, so we just verify that
1704 // something didn't go haywire.
1705 if (!isImm())
1706 return false;
1707
1708 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1709 int64_t Val = CE->getValue();
1710 int64_t Min = - (4096 * (1LL << (21 - 1)));
1711 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1712 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1713 }
1714
1715 return true;
1716 }
1717
1718 bool isAdrLabel() const {
1719 // Validation was handled during parsing, so we just verify that
1720 // something didn't go haywire.
1721 if (!isImm())
1722 return false;
1723
1724 if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1725 int64_t Val = CE->getValue();
1726 int64_t Min = - (1LL << (21 - 1));
1727 int64_t Max = ((1LL << (21 - 1)) - 1);
1728 return Val >= Min && Val <= Max;
1729 }
1730
1731 return true;
1732 }
1733
1734 template <MatrixKind Kind, unsigned EltSize, unsigned RegClass>
1735 DiagnosticPredicate isMatrixRegOperand() const {
1736 if (!isMatrix())
1738 if (getMatrixKind() != Kind ||
1739 !AArch64MCRegisterClasses[RegClass].contains(getMatrixReg()) ||
1740 EltSize != getMatrixElementWidth())
1743 }
1744
1745 bool isPAuthPCRelLabel16Operand() const {
1746 // PAuth PCRel16 operands are similar to regular branch targets, but only
1747 // negative values are allowed for concrete immediates as signing instr
1748 // should be in a lower address.
1749 if (!isImm())
1750 return false;
1751 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1752 if (!MCE)
1753 return true;
1754 int64_t Val = MCE->getValue();
1755 if (Val & 0b11)
1756 return false;
1757 return (Val <= 0) && (Val > -(1 << 18));
1758 }
1759
1760 void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1761 // Add as immediates when possible. Null MCExpr = 0.
1762 if (!Expr)
1764 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1765 Inst.addOperand(MCOperand::createImm(CE->getValue()));
1766 else
1768 }
1769
1770 void addRegOperands(MCInst &Inst, unsigned N) const {
1771 assert(N == 1 && "Invalid number of operands!");
1773 }
1774
1775 void addMatrixOperands(MCInst &Inst, unsigned N) const {
1776 assert(N == 1 && "Invalid number of operands!");
1777 Inst.addOperand(MCOperand::createReg(getMatrixReg()));
1778 }
1779
1780 void addGPR32as64Operands(MCInst &Inst, unsigned N) const {
1781 assert(N == 1 && "Invalid number of operands!");
1782 assert(
1783 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(getReg()));
1784
1785 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1786 MCRegister Reg = RI->getRegClass(AArch64::GPR32RegClassID)
1788
1790 }
1791
1792 void addGPR64as32Operands(MCInst &Inst, unsigned N) const {
1793 assert(N == 1 && "Invalid number of operands!");
1794 assert(
1795 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(getReg()));
1796
1797 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
1798 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
1800
1802 }
1803
1804 template <int Width>
1805 void addFPRasZPRRegOperands(MCInst &Inst, unsigned N) const {
1806 unsigned Base;
1807 switch (Width) {
1808 case 8: Base = AArch64::B0; break;
1809 case 16: Base = AArch64::H0; break;
1810 case 32: Base = AArch64::S0; break;
1811 case 64: Base = AArch64::D0; break;
1812 case 128: Base = AArch64::Q0; break;
1813 default:
1814 llvm_unreachable("Unsupported width");
1815 }
1816 Inst.addOperand(MCOperand::createReg(AArch64::Z0 + getReg() - Base));
1817 }
1818
1819 void addPPRorPNRRegOperands(MCInst &Inst, unsigned N) const {
1820 assert(N == 1 && "Invalid number of operands!");
1821 unsigned Reg = getReg();
1822 // Normalise to PPR
1823 if (Reg >= AArch64::PN0 && Reg <= AArch64::PN15)
1824 Reg = Reg - AArch64::PN0 + AArch64::P0;
1826 }
1827
1828 void addPNRasPPRRegOperands(MCInst &Inst, unsigned N) const {
1829 assert(N == 1 && "Invalid number of operands!");
1830 Inst.addOperand(
1831 MCOperand::createReg((getReg() - AArch64::PN0) + AArch64::P0));
1832 }
1833
1834 void addVectorReg64Operands(MCInst &Inst, unsigned N) const {
1835 assert(N == 1 && "Invalid number of operands!");
1836 assert(
1837 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1838 Inst.addOperand(MCOperand::createReg(AArch64::D0 + getReg() - AArch64::Q0));
1839 }
1840
1841 void addVectorReg128Operands(MCInst &Inst, unsigned N) const {
1842 assert(N == 1 && "Invalid number of operands!");
1843 assert(
1844 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(getReg()));
1846 }
1847
1848 void addVectorRegLoOperands(MCInst &Inst, unsigned N) const {
1849 assert(N == 1 && "Invalid number of operands!");
1851 }
1852
1853 void addVectorReg0to7Operands(MCInst &Inst, unsigned N) const {
1854 assert(N == 1 && "Invalid number of operands!");
1856 }
1857
1858 enum VecListIndexType {
1859 VecListIdx_DReg = 0,
1860 VecListIdx_QReg = 1,
1861 VecListIdx_ZReg = 2,
1862 VecListIdx_PReg = 3,
1863 };
1864
1865 template <VecListIndexType RegTy, unsigned NumRegs,
1866 bool IsConsecutive = false>
1867 void addVectorListOperands(MCInst &Inst, unsigned N) const {
1868 assert(N == 1 && "Invalid number of operands!");
1869 assert((!IsConsecutive || (getVectorListStride() == 1)) &&
1870 "Expected consecutive registers");
1871 static const unsigned FirstRegs[][5] = {
1872 /* DReg */ { AArch64::Q0,
1873 AArch64::D0, AArch64::D0_D1,
1874 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1875 /* QReg */ { AArch64::Q0,
1876 AArch64::Q0, AArch64::Q0_Q1,
1877 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1878 /* ZReg */ { AArch64::Z0,
1879 AArch64::Z0, AArch64::Z0_Z1,
1880 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 },
1881 /* PReg */ { AArch64::P0,
1882 AArch64::P0, AArch64::P0_P1 }
1883 };
1884
1885 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1886 " NumRegs must be <= 4 for ZRegs");
1887
1888 assert((RegTy != VecListIdx_PReg || NumRegs <= 2) &&
1889 " NumRegs must be <= 2 for PRegs");
1890
1891 unsigned FirstReg = FirstRegs[(unsigned)RegTy][NumRegs];
1892 Inst.addOperand(MCOperand::createReg(FirstReg + getVectorListStart() -
1893 FirstRegs[(unsigned)RegTy][0]));
1894 }
1895
1896 template <unsigned NumRegs>
1897 void addStridedVectorListOperands(MCInst &Inst, unsigned N) const {
1898 assert(N == 1 && "Invalid number of operands!");
1899 assert((NumRegs == 2 || NumRegs == 4) && " NumRegs must be 2 or 4");
1900
1901 switch (NumRegs) {
1902 case 2:
1903 if (getVectorListStart() < AArch64::Z16) {
1904 assert((getVectorListStart() < AArch64::Z8) &&
1905 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1907 AArch64::Z0_Z8 + getVectorListStart() - AArch64::Z0));
1908 } else {
1909 assert((getVectorListStart() < AArch64::Z24) &&
1910 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1912 AArch64::Z16_Z24 + getVectorListStart() - AArch64::Z16));
1913 }
1914 break;
1915 case 4:
1916 if (getVectorListStart() < AArch64::Z16) {
1917 assert((getVectorListStart() < AArch64::Z4) &&
1918 (getVectorListStart() >= AArch64::Z0) && "Invalid Register");
1920 AArch64::Z0_Z4_Z8_Z12 + getVectorListStart() - AArch64::Z0));
1921 } else {
1922 assert((getVectorListStart() < AArch64::Z20) &&
1923 (getVectorListStart() >= AArch64::Z16) && "Invalid Register");
1925 AArch64::Z16_Z20_Z24_Z28 + getVectorListStart() - AArch64::Z16));
1926 }
1927 break;
1928 default:
1929 llvm_unreachable("Unsupported number of registers for strided vec list");
1930 }
1931 }
1932
1933 void addMatrixTileListOperands(MCInst &Inst, unsigned N) const {
1934 assert(N == 1 && "Invalid number of operands!");
1935 unsigned RegMask = getMatrixTileListRegMask();
1936 assert(RegMask <= 0xFF && "Invalid mask!");
1937 Inst.addOperand(MCOperand::createImm(RegMask));
1938 }
1939
1940 void addVectorIndexOperands(MCInst &Inst, unsigned N) const {
1941 assert(N == 1 && "Invalid number of operands!");
1942 Inst.addOperand(MCOperand::createImm(getVectorIndex()));
1943 }
1944
1945 template <unsigned ImmIs0, unsigned ImmIs1>
1946 void addExactFPImmOperands(MCInst &Inst, unsigned N) const {
1947 assert(N == 1 && "Invalid number of operands!");
1948 assert(bool(isExactFPImm<ImmIs0, ImmIs1>()) && "Invalid operand");
1949 Inst.addOperand(MCOperand::createImm(bool(isExactFPImm<ImmIs1>())));
1950 }
1951
1952 void addImmOperands(MCInst &Inst, unsigned N) const {
1953 assert(N == 1 && "Invalid number of operands!");
1954 // If this is a pageoff symrefexpr with an addend, adjust the addend
1955 // to be only the page-offset portion. Otherwise, just add the expr
1956 // as-is.
1957 addExpr(Inst, getImm());
1958 }
1959
1960 template <int Shift>
1961 void addImmWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1962 assert(N == 2 && "Invalid number of operands!");
1963 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1964 Inst.addOperand(MCOperand::createImm(ShiftedVal->first));
1965 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1966 } else if (isShiftedImm()) {
1967 addExpr(Inst, getShiftedImmVal());
1968 Inst.addOperand(MCOperand::createImm(getShiftedImmShift()));
1969 } else {
1970 addExpr(Inst, getImm());
1972 }
1973 }
1974
1975 template <int Shift>
1976 void addImmNegWithOptionalShiftOperands(MCInst &Inst, unsigned N) const {
1977 assert(N == 2 && "Invalid number of operands!");
1978 if (auto ShiftedVal = getShiftedVal<Shift>()) {
1979 Inst.addOperand(MCOperand::createImm(-ShiftedVal->first));
1980 Inst.addOperand(MCOperand::createImm(ShiftedVal->second));
1981 } else
1982 llvm_unreachable("Not a shifted negative immediate");
1983 }
1984
1985 void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1986 assert(N == 1 && "Invalid number of operands!");
1988 }
1989
1990 void addAdrpLabelOperands(MCInst &Inst, unsigned N) const {
1991 assert(N == 1 && "Invalid number of operands!");
1992 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
1993 if (!MCE)
1994 addExpr(Inst, getImm());
1995 else
1996 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 12));
1997 }
1998
1999 void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
2000 addImmOperands(Inst, N);
2001 }
2002
2003 template<int Scale>
2004 void addUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
2005 assert(N == 1 && "Invalid number of operands!");
2006 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2007
2008 if (!MCE) {
2010 return;
2011 }
2012 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2013 }
2014
2015 void addUImm6Operands(MCInst &Inst, unsigned N) const {
2016 assert(N == 1 && "Invalid number of operands!");
2017 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2019 }
2020
2021 template <int Scale>
2022 void addImmScaledOperands(MCInst &Inst, unsigned N) const {
2023 assert(N == 1 && "Invalid number of operands!");
2024 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2025 Inst.addOperand(MCOperand::createImm(MCE->getValue() / Scale));
2026 }
2027
2028 template <int Scale>
2029 void addImmScaledRangeOperands(MCInst &Inst, unsigned N) const {
2030 assert(N == 1 && "Invalid number of operands!");
2031 Inst.addOperand(MCOperand::createImm(getFirstImmVal() / Scale));
2032 }
2033
2034 template <typename T>
2035 void addLogicalImmOperands(MCInst &Inst, unsigned N) const {
2036 assert(N == 1 && "Invalid number of operands!");
2037 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2038 std::make_unsigned_t<T> Val = MCE->getValue();
2039 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2040 Inst.addOperand(MCOperand::createImm(encoding));
2041 }
2042
2043 template <typename T>
2044 void addLogicalImmNotOperands(MCInst &Inst, unsigned N) const {
2045 assert(N == 1 && "Invalid number of operands!");
2046 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2047 std::make_unsigned_t<T> Val = ~MCE->getValue();
2048 uint64_t encoding = AArch64_AM::encodeLogicalImmediate(Val, sizeof(T) * 8);
2049 Inst.addOperand(MCOperand::createImm(encoding));
2050 }
2051
2052 void addSIMDImmType10Operands(MCInst &Inst, unsigned N) const {
2053 assert(N == 1 && "Invalid number of operands!");
2054 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2055 uint64_t encoding = AArch64_AM::encodeAdvSIMDModImmType10(MCE->getValue());
2056 Inst.addOperand(MCOperand::createImm(encoding));
2057 }
2058
2059 void addBranchTarget26Operands(MCInst &Inst, unsigned N) const {
2060 // Branch operands don't encode the low bits, so shift them off
2061 // here. If it's a label, however, just put it on directly as there's
2062 // not enough information now to do anything.
2063 assert(N == 1 && "Invalid number of operands!");
2064 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2065 if (!MCE) {
2066 addExpr(Inst, getImm());
2067 return;
2068 }
2069 assert(MCE && "Invalid constant immediate operand!");
2070 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2071 }
2072
2073 void addPAuthPCRelLabel16Operands(MCInst &Inst, unsigned N) const {
2074 // PC-relative operands don't encode the low bits, so shift them off
2075 // here. If it's a label, however, just put it on directly as there's
2076 // not enough information now to do anything.
2077 assert(N == 1 && "Invalid number of operands!");
2078 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2079 if (!MCE) {
2080 addExpr(Inst, getImm());
2081 return;
2082 }
2083 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2084 }
2085
2086 void addPCRelLabel19Operands(MCInst &Inst, unsigned N) const {
2087 // Branch operands don't encode the low bits, so shift them off
2088 // here. If it's a label, however, just put it on directly as there's
2089 // not enough information now to do anything.
2090 assert(N == 1 && "Invalid number of operands!");
2091 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2092 if (!MCE) {
2093 addExpr(Inst, getImm());
2094 return;
2095 }
2096 assert(MCE && "Invalid constant immediate operand!");
2097 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2098 }
2099
2100 void addPCRelLabel9Operands(MCInst &Inst, unsigned N) const {
2101 // Branch operands don't encode the low bits, so shift them off
2102 // here. If it's a label, however, just put it on directly as there's
2103 // not enough information now to do anything.
2104 assert(N == 1 && "Invalid number of operands!");
2105 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2106 if (!MCE) {
2107 addExpr(Inst, getImm());
2108 return;
2109 }
2110 assert(MCE && "Invalid constant immediate operand!");
2111 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2112 }
2113
2114 void addBranchTarget14Operands(MCInst &Inst, unsigned N) const {
2115 // Branch operands don't encode the low bits, so shift them off
2116 // here. If it's a label, however, just put it on directly as there's
2117 // not enough information now to do anything.
2118 assert(N == 1 && "Invalid number of operands!");
2119 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(getImm());
2120 if (!MCE) {
2121 addExpr(Inst, getImm());
2122 return;
2123 }
2124 assert(MCE && "Invalid constant immediate operand!");
2125 Inst.addOperand(MCOperand::createImm(MCE->getValue() >> 2));
2126 }
2127
2128 void addFPImmOperands(MCInst &Inst, unsigned N) const {
2129 assert(N == 1 && "Invalid number of operands!");
2131 AArch64_AM::getFP64Imm(getFPImm().bitcastToAPInt())));
2132 }
2133
2134 void addBarrierOperands(MCInst &Inst, unsigned N) const {
2135 assert(N == 1 && "Invalid number of operands!");
2136 Inst.addOperand(MCOperand::createImm(getBarrier()));
2137 }
2138
2139 void addBarriernXSOperands(MCInst &Inst, unsigned N) const {
2140 assert(N == 1 && "Invalid number of operands!");
2141 Inst.addOperand(MCOperand::createImm(getBarrier()));
2142 }
2143
2144 void addMRSSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2145 assert(N == 1 && "Invalid number of operands!");
2146
2147 Inst.addOperand(MCOperand::createImm(SysReg.MRSReg));
2148 }
2149
2150 void addMSRSystemRegisterOperands(MCInst &Inst, unsigned N) const {
2151 assert(N == 1 && "Invalid number of operands!");
2152
2153 Inst.addOperand(MCOperand::createImm(SysReg.MSRReg));
2154 }
2155
2156 void addSystemPStateFieldWithImm0_1Operands(MCInst &Inst, unsigned N) const {
2157 assert(N == 1 && "Invalid number of operands!");
2158
2159 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2160 }
2161
2162 void addSVCROperands(MCInst &Inst, unsigned N) const {
2163 assert(N == 1 && "Invalid number of operands!");
2164
2165 Inst.addOperand(MCOperand::createImm(SVCR.PStateField));
2166 }
2167
2168 void addSystemPStateFieldWithImm0_15Operands(MCInst &Inst, unsigned N) const {
2169 assert(N == 1 && "Invalid number of operands!");
2170
2171 Inst.addOperand(MCOperand::createImm(SysReg.PStateField));
2172 }
2173
2174 void addSysCROperands(MCInst &Inst, unsigned N) const {
2175 assert(N == 1 && "Invalid number of operands!");
2176 Inst.addOperand(MCOperand::createImm(getSysCR()));
2177 }
2178
2179 void addPrefetchOperands(MCInst &Inst, unsigned N) const {
2180 assert(N == 1 && "Invalid number of operands!");
2181 Inst.addOperand(MCOperand::createImm(getPrefetch()));
2182 }
2183
2184 void addPSBHintOperands(MCInst &Inst, unsigned N) const {
2185 assert(N == 1 && "Invalid number of operands!");
2186 Inst.addOperand(MCOperand::createImm(getPSBHint()));
2187 }
2188
2189 void addPHintOperands(MCInst &Inst, unsigned N) const {
2190 assert(N == 1 && "Invalid number of operands!");
2191 Inst.addOperand(MCOperand::createImm(getPHint()));
2192 }
2193
2194 void addBTIHintOperands(MCInst &Inst, unsigned N) const {
2195 assert(N == 1 && "Invalid number of operands!");
2196 Inst.addOperand(MCOperand::createImm(getBTIHint()));
2197 }
2198
2199 void addShifterOperands(MCInst &Inst, unsigned N) const {
2200 assert(N == 1 && "Invalid number of operands!");
2201 unsigned Imm =
2202 AArch64_AM::getShifterImm(getShiftExtendType(), getShiftExtendAmount());
2204 }
2205
2206 void addLSLImm3ShifterOperands(MCInst &Inst, unsigned N) const {
2207 assert(N == 1 && "Invalid number of operands!");
2208 unsigned Imm = getShiftExtendAmount();
2210 }
2211
2212 void addSyspXzrPairOperand(MCInst &Inst, unsigned N) const {
2213 assert(N == 1 && "Invalid number of operands!");
2214
2215 if (!isScalarReg())
2216 return;
2217
2218 const MCRegisterInfo *RI = Ctx.getRegisterInfo();
2219 MCRegister Reg = RI->getRegClass(AArch64::GPR64RegClassID)
2221 if (Reg != AArch64::XZR)
2222 llvm_unreachable("wrong register");
2223
2224 Inst.addOperand(MCOperand::createReg(AArch64::XZR));
2225 }
2226
2227 void addExtendOperands(MCInst &Inst, unsigned N) const {
2228 assert(N == 1 && "Invalid number of operands!");
2229 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2230 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTW;
2231 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2233 }
2234
2235 void addExtend64Operands(MCInst &Inst, unsigned N) const {
2236 assert(N == 1 && "Invalid number of operands!");
2237 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2238 if (ET == AArch64_AM::LSL) ET = AArch64_AM::UXTX;
2239 unsigned Imm = AArch64_AM::getArithExtendImm(ET, getShiftExtendAmount());
2241 }
2242
2243 void addMemExtendOperands(MCInst &Inst, unsigned N) const {
2244 assert(N == 2 && "Invalid number of operands!");
2245 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2246 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2247 Inst.addOperand(MCOperand::createImm(IsSigned));
2248 Inst.addOperand(MCOperand::createImm(getShiftExtendAmount() != 0));
2249 }
2250
2251 // For 8-bit load/store instructions with a register offset, both the
2252 // "DoShift" and "NoShift" variants have a shift of 0. Because of this,
2253 // they're disambiguated by whether the shift was explicit or implicit rather
2254 // than its size.
2255 void addMemExtend8Operands(MCInst &Inst, unsigned N) const {
2256 assert(N == 2 && "Invalid number of operands!");
2257 AArch64_AM::ShiftExtendType ET = getShiftExtendType();
2258 bool IsSigned = ET == AArch64_AM::SXTW || ET == AArch64_AM::SXTX;
2259 Inst.addOperand(MCOperand::createImm(IsSigned));
2260 Inst.addOperand(MCOperand::createImm(hasShiftExtendAmount()));
2261 }
2262
2263 template<int Shift>
2264 void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const {
2265 assert(N == 1 && "Invalid number of operands!");
2266
2267 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2268 if (CE) {
2269 uint64_t Value = CE->getValue();
2270 Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff));
2271 } else {
2272 addExpr(Inst, getImm());
2273 }
2274 }
2275
2276 template<int Shift>
2277 void addMOVNMovAliasOperands(MCInst &Inst, unsigned N) const {
2278 assert(N == 1 && "Invalid number of operands!");
2279
2280 const MCConstantExpr *CE = cast<MCConstantExpr>(getImm());
2281 uint64_t Value = CE->getValue();
2282 Inst.addOperand(MCOperand::createImm((~Value >> Shift) & 0xffff));
2283 }
2284
2285 void addComplexRotationEvenOperands(MCInst &Inst, unsigned N) const {
2286 assert(N == 1 && "Invalid number of operands!");
2287 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2288 Inst.addOperand(MCOperand::createImm(MCE->getValue() / 90));
2289 }
2290
2291 void addComplexRotationOddOperands(MCInst &Inst, unsigned N) const {
2292 assert(N == 1 && "Invalid number of operands!");
2293 const MCConstantExpr *MCE = cast<MCConstantExpr>(getImm());
2294 Inst.addOperand(MCOperand::createImm((MCE->getValue() - 90) / 180));
2295 }
2296
2297 void print(raw_ostream &OS, const MCAsmInfo &MAI) const override;
2298
2299 static std::unique_ptr<AArch64Operand>
2300 CreateToken(StringRef Str, SMLoc S, MCContext &Ctx, bool IsSuffix = false) {
2301 auto Op = std::make_unique<AArch64Operand>(k_Token, Ctx);
2302 Op->Tok.Data = Str.data();
2303 Op->Tok.Length = Str.size();
2304 Op->Tok.IsSuffix = IsSuffix;
2305 Op->StartLoc = S;
2306 Op->EndLoc = S;
2307 return Op;
2308 }
2309
2310 static std::unique_ptr<AArch64Operand>
2311 CreateReg(unsigned RegNum, RegKind Kind, SMLoc S, SMLoc E, MCContext &Ctx,
2312 RegConstraintEqualityTy EqTy = RegConstraintEqualityTy::EqualsReg,
2314 unsigned ShiftAmount = 0,
2315 unsigned HasExplicitAmount = false) {
2316 auto Op = std::make_unique<AArch64Operand>(k_Register, Ctx);
2317 Op->Reg.RegNum = RegNum;
2318 Op->Reg.Kind = Kind;
2319 Op->Reg.ElementWidth = 0;
2320 Op->Reg.EqualityTy = EqTy;
2321 Op->Reg.ShiftExtend.Type = ExtTy;
2322 Op->Reg.ShiftExtend.Amount = ShiftAmount;
2323 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2324 Op->StartLoc = S;
2325 Op->EndLoc = E;
2326 return Op;
2327 }
2328
2329 static std::unique_ptr<AArch64Operand>
2330 CreateVectorReg(unsigned RegNum, RegKind Kind, unsigned ElementWidth,
2331 SMLoc S, SMLoc E, MCContext &Ctx,
2333 unsigned ShiftAmount = 0,
2334 unsigned HasExplicitAmount = false) {
2335 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
2336 Kind == RegKind::SVEPredicateVector ||
2337 Kind == RegKind::SVEPredicateAsCounter) &&
2338 "Invalid vector kind");
2339 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
2340 HasExplicitAmount);
2341 Op->Reg.ElementWidth = ElementWidth;
2342 return Op;
2343 }
2344
2345 static std::unique_ptr<AArch64Operand>
2346 CreateVectorList(unsigned RegNum, unsigned Count, unsigned Stride,
2347 unsigned NumElements, unsigned ElementWidth,
2348 RegKind RegisterKind, SMLoc S, SMLoc E, MCContext &Ctx) {
2349 auto Op = std::make_unique<AArch64Operand>(k_VectorList, Ctx);
2350 Op->VectorList.RegNum = RegNum;
2351 Op->VectorList.Count = Count;
2352 Op->VectorList.Stride = Stride;
2353 Op->VectorList.NumElements = NumElements;
2354 Op->VectorList.ElementWidth = ElementWidth;
2355 Op->VectorList.RegisterKind = RegisterKind;
2356 Op->StartLoc = S;
2357 Op->EndLoc = E;
2358 return Op;
2359 }
2360
2361 static std::unique_ptr<AArch64Operand>
2362 CreateVectorIndex(int Idx, SMLoc S, SMLoc E, MCContext &Ctx) {
2363 auto Op = std::make_unique<AArch64Operand>(k_VectorIndex, Ctx);
2364 Op->VectorIndex.Val = Idx;
2365 Op->StartLoc = S;
2366 Op->EndLoc = E;
2367 return Op;
2368 }
2369
2370 static std::unique_ptr<AArch64Operand>
2371 CreateMatrixTileList(unsigned RegMask, SMLoc S, SMLoc E, MCContext &Ctx) {
2372 auto Op = std::make_unique<AArch64Operand>(k_MatrixTileList, Ctx);
2373 Op->MatrixTileList.RegMask = RegMask;
2374 Op->StartLoc = S;
2375 Op->EndLoc = E;
2376 return Op;
2377 }
2378
2379 static void ComputeRegsForAlias(unsigned Reg, SmallSet<unsigned, 8> &OutRegs,
2380 const unsigned ElementWidth) {
2381 static std::map<std::pair<unsigned, unsigned>, std::vector<unsigned>>
2382 RegMap = {
2383 {{0, AArch64::ZAB0},
2384 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2385 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2386 {{8, AArch64::ZAB0},
2387 {AArch64::ZAD0, AArch64::ZAD1, AArch64::ZAD2, AArch64::ZAD3,
2388 AArch64::ZAD4, AArch64::ZAD5, AArch64::ZAD6, AArch64::ZAD7}},
2389 {{16, AArch64::ZAH0},
2390 {AArch64::ZAD0, AArch64::ZAD2, AArch64::ZAD4, AArch64::ZAD6}},
2391 {{16, AArch64::ZAH1},
2392 {AArch64::ZAD1, AArch64::ZAD3, AArch64::ZAD5, AArch64::ZAD7}},
2393 {{32, AArch64::ZAS0}, {AArch64::ZAD0, AArch64::ZAD4}},
2394 {{32, AArch64::ZAS1}, {AArch64::ZAD1, AArch64::ZAD5}},
2395 {{32, AArch64::ZAS2}, {AArch64::ZAD2, AArch64::ZAD6}},
2396 {{32, AArch64::ZAS3}, {AArch64::ZAD3, AArch64::ZAD7}},
2397 };
2398
2399 if (ElementWidth == 64)
2400 OutRegs.insert(Reg);
2401 else {
2402 std::vector<unsigned> Regs = RegMap[std::make_pair(ElementWidth, Reg)];
2403 assert(!Regs.empty() && "Invalid tile or element width!");
2404 OutRegs.insert_range(Regs);
2405 }
2406 }
2407
2408 static std::unique_ptr<AArch64Operand> CreateImm(const MCExpr *Val, SMLoc S,
2409 SMLoc E, MCContext &Ctx) {
2410 auto Op = std::make_unique<AArch64Operand>(k_Immediate, Ctx);
2411 Op->Imm.Val = Val;
2412 Op->StartLoc = S;
2413 Op->EndLoc = E;
2414 return Op;
2415 }
2416
2417 static std::unique_ptr<AArch64Operand> CreateShiftedImm(const MCExpr *Val,
2418 unsigned ShiftAmount,
2419 SMLoc S, SMLoc E,
2420 MCContext &Ctx) {
2421 auto Op = std::make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
2422 Op->ShiftedImm .Val = Val;
2423 Op->ShiftedImm.ShiftAmount = ShiftAmount;
2424 Op->StartLoc = S;
2425 Op->EndLoc = E;
2426 return Op;
2427 }
2428
2429 static std::unique_ptr<AArch64Operand> CreateImmRange(unsigned First,
2430 unsigned Last, SMLoc S,
2431 SMLoc E,
2432 MCContext &Ctx) {
2433 auto Op = std::make_unique<AArch64Operand>(k_ImmRange, Ctx);
2434 Op->ImmRange.First = First;
2435 Op->ImmRange.Last = Last;
2436 Op->EndLoc = E;
2437 return Op;
2438 }
2439
2440 static std::unique_ptr<AArch64Operand>
2441 CreateCondCode(AArch64CC::CondCode Code, SMLoc S, SMLoc E, MCContext &Ctx) {
2442 auto Op = std::make_unique<AArch64Operand>(k_CondCode, Ctx);
2443 Op->CondCode.Code = Code;
2444 Op->StartLoc = S;
2445 Op->EndLoc = E;
2446 return Op;
2447 }
2448
2449 static std::unique_ptr<AArch64Operand>
2450 CreateFPImm(APFloat Val, bool IsExact, SMLoc S, MCContext &Ctx) {
2451 auto Op = std::make_unique<AArch64Operand>(k_FPImm, Ctx);
2452 Op->FPImm.Val = Val.bitcastToAPInt().getSExtValue();
2453 Op->FPImm.IsExact = IsExact;
2454 Op->StartLoc = S;
2455 Op->EndLoc = S;
2456 return Op;
2457 }
2458
2459 static std::unique_ptr<AArch64Operand> CreateBarrier(unsigned Val,
2460 StringRef Str,
2461 SMLoc S,
2462 MCContext &Ctx,
2463 bool HasnXSModifier) {
2464 auto Op = std::make_unique<AArch64Operand>(k_Barrier, Ctx);
2465 Op->Barrier.Val = Val;
2466 Op->Barrier.Data = Str.data();
2467 Op->Barrier.Length = Str.size();
2468 Op->Barrier.HasnXSModifier = HasnXSModifier;
2469 Op->StartLoc = S;
2470 Op->EndLoc = S;
2471 return Op;
2472 }
2473
2474 static std::unique_ptr<AArch64Operand> CreateSysReg(StringRef Str, SMLoc S,
2475 uint32_t MRSReg,
2476 uint32_t MSRReg,
2477 uint32_t PStateField,
2478 MCContext &Ctx) {
2479 auto Op = std::make_unique<AArch64Operand>(k_SysReg, Ctx);
2480 Op->SysReg.Data = Str.data();
2481 Op->SysReg.Length = Str.size();
2482 Op->SysReg.MRSReg = MRSReg;
2483 Op->SysReg.MSRReg = MSRReg;
2484 Op->SysReg.PStateField = PStateField;
2485 Op->StartLoc = S;
2486 Op->EndLoc = S;
2487 return Op;
2488 }
2489
2490 static std::unique_ptr<AArch64Operand>
2491 CreatePHintInst(unsigned Val, StringRef Str, SMLoc S, MCContext &Ctx) {
2492 auto Op = std::make_unique<AArch64Operand>(k_PHint, Ctx);
2493 Op->PHint.Val = Val;
2494 Op->PHint.Data = Str.data();
2495 Op->PHint.Length = Str.size();
2496 Op->StartLoc = S;
2497 Op->EndLoc = S;
2498 return Op;
2499 }
2500
2501 static std::unique_ptr<AArch64Operand> CreateSysCR(unsigned Val, SMLoc S,
2502 SMLoc E, MCContext &Ctx) {
2503 auto Op = std::make_unique<AArch64Operand>(k_SysCR, Ctx);
2504 Op->SysCRImm.Val = Val;
2505 Op->StartLoc = S;
2506 Op->EndLoc = E;
2507 return Op;
2508 }
2509
2510 static std::unique_ptr<AArch64Operand> CreatePrefetch(unsigned Val,
2511 StringRef Str,
2512 SMLoc S,
2513 MCContext &Ctx) {
2514 auto Op = std::make_unique<AArch64Operand>(k_Prefetch, Ctx);
2515 Op->Prefetch.Val = Val;
2516 Op->Barrier.Data = Str.data();
2517 Op->Barrier.Length = Str.size();
2518 Op->StartLoc = S;
2519 Op->EndLoc = S;
2520 return Op;
2521 }
2522
2523 static std::unique_ptr<AArch64Operand> CreatePSBHint(unsigned Val,
2524 StringRef Str,
2525 SMLoc S,
2526 MCContext &Ctx) {
2527 auto Op = std::make_unique<AArch64Operand>(k_PSBHint, Ctx);
2528 Op->PSBHint.Val = Val;
2529 Op->PSBHint.Data = Str.data();
2530 Op->PSBHint.Length = Str.size();
2531 Op->StartLoc = S;
2532 Op->EndLoc = S;
2533 return Op;
2534 }
2535
2536 static std::unique_ptr<AArch64Operand> CreateBTIHint(unsigned Val,
2537 StringRef Str,
2538 SMLoc S,
2539 MCContext &Ctx) {
2540 auto Op = std::make_unique<AArch64Operand>(k_BTIHint, Ctx);
2541 Op->BTIHint.Val = Val | 32;
2542 Op->BTIHint.Data = Str.data();
2543 Op->BTIHint.Length = Str.size();
2544 Op->StartLoc = S;
2545 Op->EndLoc = S;
2546 return Op;
2547 }
2548
2549 static std::unique_ptr<AArch64Operand>
2550 CreateMatrixRegister(unsigned RegNum, unsigned ElementWidth, MatrixKind Kind,
2551 SMLoc S, SMLoc E, MCContext &Ctx) {
2552 auto Op = std::make_unique<AArch64Operand>(k_MatrixRegister, Ctx);
2553 Op->MatrixReg.RegNum = RegNum;
2554 Op->MatrixReg.ElementWidth = ElementWidth;
2555 Op->MatrixReg.Kind = Kind;
2556 Op->StartLoc = S;
2557 Op->EndLoc = E;
2558 return Op;
2559 }
2560
2561 static std::unique_ptr<AArch64Operand>
2562 CreateSVCR(uint32_t PStateField, StringRef Str, SMLoc S, MCContext &Ctx) {
2563 auto Op = std::make_unique<AArch64Operand>(k_SVCR, Ctx);
2564 Op->SVCR.PStateField = PStateField;
2565 Op->SVCR.Data = Str.data();
2566 Op->SVCR.Length = Str.size();
2567 Op->StartLoc = S;
2568 Op->EndLoc = S;
2569 return Op;
2570 }
2571
2572 static std::unique_ptr<AArch64Operand>
2573 CreateShiftExtend(AArch64_AM::ShiftExtendType ShOp, unsigned Val,
2574 bool HasExplicitAmount, SMLoc S, SMLoc E, MCContext &Ctx) {
2575 auto Op = std::make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2576 Op->ShiftExtend.Type = ShOp;
2577 Op->ShiftExtend.Amount = Val;
2578 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2579 Op->StartLoc = S;
2580 Op->EndLoc = E;
2581 return Op;
2582 }
2583};
2584
2585} // end anonymous namespace.
2586
2587void AArch64Operand::print(raw_ostream &OS, const MCAsmInfo &MAI) const {
2588 switch (Kind) {
2589 case k_FPImm:
2590 OS << "<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2591 if (!getFPImmIsExact())
2592 OS << " (inexact)";
2593 OS << ">";
2594 break;
2595 case k_Barrier: {
2596 StringRef Name = getBarrierName();
2597 if (!Name.empty())
2598 OS << "<barrier " << Name << ">";
2599 else
2600 OS << "<barrier invalid #" << getBarrier() << ">";
2601 break;
2602 }
2603 case k_Immediate:
2604 MAI.printExpr(OS, *getImm());
2605 break;
2606 case k_ShiftedImm: {
2607 unsigned Shift = getShiftedImmShift();
2608 OS << "<shiftedimm ";
2609 MAI.printExpr(OS, *getShiftedImmVal());
2610 OS << ", lsl #" << AArch64_AM::getShiftValue(Shift) << ">";
2611 break;
2612 }
2613 case k_ImmRange: {
2614 OS << "<immrange ";
2615 OS << getFirstImmVal();
2616 OS << ":" << getLastImmVal() << ">";
2617 break;
2618 }
2619 case k_CondCode:
2620 OS << "<condcode " << getCondCode() << ">";
2621 break;
2622 case k_VectorList: {
2623 OS << "<vectorlist ";
2624 unsigned Reg = getVectorListStart();
2625 for (unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2626 OS << Reg + i * getVectorListStride() << " ";
2627 OS << ">";
2628 break;
2629 }
2630 case k_VectorIndex:
2631 OS << "<vectorindex " << getVectorIndex() << ">";
2632 break;
2633 case k_SysReg:
2634 OS << "<sysreg: " << getSysReg() << '>';
2635 break;
2636 case k_Token:
2637 OS << "'" << getToken() << "'";
2638 break;
2639 case k_SysCR:
2640 OS << "c" << getSysCR();
2641 break;
2642 case k_Prefetch: {
2643 StringRef Name = getPrefetchName();
2644 if (!Name.empty())
2645 OS << "<prfop " << Name << ">";
2646 else
2647 OS << "<prfop invalid #" << getPrefetch() << ">";
2648 break;
2649 }
2650 case k_PSBHint:
2651 OS << getPSBHintName();
2652 break;
2653 case k_PHint:
2654 OS << getPHintName();
2655 break;
2656 case k_BTIHint:
2657 OS << getBTIHintName();
2658 break;
2659 case k_MatrixRegister:
2660 OS << "<matrix " << getMatrixReg() << ">";
2661 break;
2662 case k_MatrixTileList: {
2663 OS << "<matrixlist ";
2664 unsigned RegMask = getMatrixTileListRegMask();
2665 unsigned MaxBits = 8;
2666 for (unsigned I = MaxBits; I > 0; --I)
2667 OS << ((RegMask & (1 << (I - 1))) >> (I - 1));
2668 OS << '>';
2669 break;
2670 }
2671 case k_SVCR: {
2672 OS << getSVCR();
2673 break;
2674 }
2675 case k_Register:
2676 OS << "<register " << getReg() << ">";
2677 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2678 break;
2679 [[fallthrough]];
2680 case k_ShiftExtend:
2681 OS << "<" << AArch64_AM::getShiftExtendName(getShiftExtendType()) << " #"
2682 << getShiftExtendAmount();
2683 if (!hasShiftExtendAmount())
2684 OS << "<imp>";
2685 OS << '>';
2686 break;
2687 }
2688}
2689
2690/// @name Auto-generated Match Functions
2691/// {
2692
2694
2695/// }
2696
2697static unsigned MatchNeonVectorRegName(StringRef Name) {
2698 return StringSwitch<unsigned>(Name.lower())
2699 .Case("v0", AArch64::Q0)
2700 .Case("v1", AArch64::Q1)
2701 .Case("v2", AArch64::Q2)
2702 .Case("v3", AArch64::Q3)
2703 .Case("v4", AArch64::Q4)
2704 .Case("v5", AArch64::Q5)
2705 .Case("v6", AArch64::Q6)
2706 .Case("v7", AArch64::Q7)
2707 .Case("v8", AArch64::Q8)
2708 .Case("v9", AArch64::Q9)
2709 .Case("v10", AArch64::Q10)
2710 .Case("v11", AArch64::Q11)
2711 .Case("v12", AArch64::Q12)
2712 .Case("v13", AArch64::Q13)
2713 .Case("v14", AArch64::Q14)
2714 .Case("v15", AArch64::Q15)
2715 .Case("v16", AArch64::Q16)
2716 .Case("v17", AArch64::Q17)
2717 .Case("v18", AArch64::Q18)
2718 .Case("v19", AArch64::Q19)
2719 .Case("v20", AArch64::Q20)
2720 .Case("v21", AArch64::Q21)
2721 .Case("v22", AArch64::Q22)
2722 .Case("v23", AArch64::Q23)
2723 .Case("v24", AArch64::Q24)
2724 .Case("v25", AArch64::Q25)
2725 .Case("v26", AArch64::Q26)
2726 .Case("v27", AArch64::Q27)
2727 .Case("v28", AArch64::Q28)
2728 .Case("v29", AArch64::Q29)
2729 .Case("v30", AArch64::Q30)
2730 .Case("v31", AArch64::Q31)
2731 .Default(0);
2732}
2733
2734/// Returns an optional pair of (#elements, element-width) if Suffix
2735/// is a valid vector kind. Where the number of elements in a vector
2736/// or the vector width is implicit or explicitly unknown (but still a
2737/// valid suffix kind), 0 is used.
2738static std::optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2739 RegKind VectorKind) {
2740 std::pair<int, int> Res = {-1, -1};
2741
2742 switch (VectorKind) {
2743 case RegKind::NeonVector:
2745 .Case("", {0, 0})
2746 .Case(".1d", {1, 64})
2747 .Case(".1q", {1, 128})
2748 // '.2h' needed for fp16 scalar pairwise reductions
2749 .Case(".2h", {2, 16})
2750 .Case(".2b", {2, 8})
2751 .Case(".2s", {2, 32})
2752 .Case(".2d", {2, 64})
2753 // '.4b' is another special case for the ARMv8.2a dot product
2754 // operand
2755 .Case(".4b", {4, 8})
2756 .Case(".4h", {4, 16})
2757 .Case(".4s", {4, 32})
2758 .Case(".8b", {8, 8})
2759 .Case(".8h", {8, 16})
2760 .Case(".16b", {16, 8})
2761 // Accept the width neutral ones, too, for verbose syntax. If
2762 // those aren't used in the right places, the token operand won't
2763 // match so all will work out.
2764 .Case(".b", {0, 8})
2765 .Case(".h", {0, 16})
2766 .Case(".s", {0, 32})
2767 .Case(".d", {0, 64})
2768 .Default({-1, -1});
2769 break;
2770 case RegKind::SVEPredicateAsCounter:
2771 case RegKind::SVEPredicateVector:
2772 case RegKind::SVEDataVector:
2773 case RegKind::Matrix:
2775 .Case("", {0, 0})
2776 .Case(".b", {0, 8})
2777 .Case(".h", {0, 16})
2778 .Case(".s", {0, 32})
2779 .Case(".d", {0, 64})
2780 .Case(".q", {0, 128})
2781 .Default({-1, -1});
2782 break;
2783 default:
2784 llvm_unreachable("Unsupported RegKind");
2785 }
2786
2787 if (Res == std::make_pair(-1, -1))
2788 return std::nullopt;
2789
2790 return std::optional<std::pair<int, int>>(Res);
2791}
2792
2793static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) {
2794 return parseVectorKind(Suffix, VectorKind).has_value();
2795}
2796
2798 return StringSwitch<unsigned>(Name.lower())
2799 .Case("z0", AArch64::Z0)
2800 .Case("z1", AArch64::Z1)
2801 .Case("z2", AArch64::Z2)
2802 .Case("z3", AArch64::Z3)
2803 .Case("z4", AArch64::Z4)
2804 .Case("z5", AArch64::Z5)
2805 .Case("z6", AArch64::Z6)
2806 .Case("z7", AArch64::Z7)
2807 .Case("z8", AArch64::Z8)
2808 .Case("z9", AArch64::Z9)
2809 .Case("z10", AArch64::Z10)
2810 .Case("z11", AArch64::Z11)
2811 .Case("z12", AArch64::Z12)
2812 .Case("z13", AArch64::Z13)
2813 .Case("z14", AArch64::Z14)
2814 .Case("z15", AArch64::Z15)
2815 .Case("z16", AArch64::Z16)
2816 .Case("z17", AArch64::Z17)
2817 .Case("z18", AArch64::Z18)
2818 .Case("z19", AArch64::Z19)
2819 .Case("z20", AArch64::Z20)
2820 .Case("z21", AArch64::Z21)
2821 .Case("z22", AArch64::Z22)
2822 .Case("z23", AArch64::Z23)
2823 .Case("z24", AArch64::Z24)
2824 .Case("z25", AArch64::Z25)
2825 .Case("z26", AArch64::Z26)
2826 .Case("z27", AArch64::Z27)
2827 .Case("z28", AArch64::Z28)
2828 .Case("z29", AArch64::Z29)
2829 .Case("z30", AArch64::Z30)
2830 .Case("z31", AArch64::Z31)
2831 .Default(0);
2832}
2833
2835 return StringSwitch<unsigned>(Name.lower())
2836 .Case("p0", AArch64::P0)
2837 .Case("p1", AArch64::P1)
2838 .Case("p2", AArch64::P2)
2839 .Case("p3", AArch64::P3)
2840 .Case("p4", AArch64::P4)
2841 .Case("p5", AArch64::P5)
2842 .Case("p6", AArch64::P6)
2843 .Case("p7", AArch64::P7)
2844 .Case("p8", AArch64::P8)
2845 .Case("p9", AArch64::P9)
2846 .Case("p10", AArch64::P10)
2847 .Case("p11", AArch64::P11)
2848 .Case("p12", AArch64::P12)
2849 .Case("p13", AArch64::P13)
2850 .Case("p14", AArch64::P14)
2851 .Case("p15", AArch64::P15)
2852 .Default(0);
2853}
2854
2856 return StringSwitch<unsigned>(Name.lower())
2857 .Case("pn0", AArch64::PN0)
2858 .Case("pn1", AArch64::PN1)
2859 .Case("pn2", AArch64::PN2)
2860 .Case("pn3", AArch64::PN3)
2861 .Case("pn4", AArch64::PN4)
2862 .Case("pn5", AArch64::PN5)
2863 .Case("pn6", AArch64::PN6)
2864 .Case("pn7", AArch64::PN7)
2865 .Case("pn8", AArch64::PN8)
2866 .Case("pn9", AArch64::PN9)
2867 .Case("pn10", AArch64::PN10)
2868 .Case("pn11", AArch64::PN11)
2869 .Case("pn12", AArch64::PN12)
2870 .Case("pn13", AArch64::PN13)
2871 .Case("pn14", AArch64::PN14)
2872 .Case("pn15", AArch64::PN15)
2873 .Default(0);
2874}
2875
2877 return StringSwitch<unsigned>(Name.lower())
2878 .Case("za0.d", AArch64::ZAD0)
2879 .Case("za1.d", AArch64::ZAD1)
2880 .Case("za2.d", AArch64::ZAD2)
2881 .Case("za3.d", AArch64::ZAD3)
2882 .Case("za4.d", AArch64::ZAD4)
2883 .Case("za5.d", AArch64::ZAD5)
2884 .Case("za6.d", AArch64::ZAD6)
2885 .Case("za7.d", AArch64::ZAD7)
2886 .Case("za0.s", AArch64::ZAS0)
2887 .Case("za1.s", AArch64::ZAS1)
2888 .Case("za2.s", AArch64::ZAS2)
2889 .Case("za3.s", AArch64::ZAS3)
2890 .Case("za0.h", AArch64::ZAH0)
2891 .Case("za1.h", AArch64::ZAH1)
2892 .Case("za0.b", AArch64::ZAB0)
2893 .Default(0);
2894}
2895
2896static unsigned matchMatrixRegName(StringRef Name) {
2897 return StringSwitch<unsigned>(Name.lower())
2898 .Case("za", AArch64::ZA)
2899 .Case("za0.q", AArch64::ZAQ0)
2900 .Case("za1.q", AArch64::ZAQ1)
2901 .Case("za2.q", AArch64::ZAQ2)
2902 .Case("za3.q", AArch64::ZAQ3)
2903 .Case("za4.q", AArch64::ZAQ4)
2904 .Case("za5.q", AArch64::ZAQ5)
2905 .Case("za6.q", AArch64::ZAQ6)
2906 .Case("za7.q", AArch64::ZAQ7)
2907 .Case("za8.q", AArch64::ZAQ8)
2908 .Case("za9.q", AArch64::ZAQ9)
2909 .Case("za10.q", AArch64::ZAQ10)
2910 .Case("za11.q", AArch64::ZAQ11)
2911 .Case("za12.q", AArch64::ZAQ12)
2912 .Case("za13.q", AArch64::ZAQ13)
2913 .Case("za14.q", AArch64::ZAQ14)
2914 .Case("za15.q", AArch64::ZAQ15)
2915 .Case("za0.d", AArch64::ZAD0)
2916 .Case("za1.d", AArch64::ZAD1)
2917 .Case("za2.d", AArch64::ZAD2)
2918 .Case("za3.d", AArch64::ZAD3)
2919 .Case("za4.d", AArch64::ZAD4)
2920 .Case("za5.d", AArch64::ZAD5)
2921 .Case("za6.d", AArch64::ZAD6)
2922 .Case("za7.d", AArch64::ZAD7)
2923 .Case("za0.s", AArch64::ZAS0)
2924 .Case("za1.s", AArch64::ZAS1)
2925 .Case("za2.s", AArch64::ZAS2)
2926 .Case("za3.s", AArch64::ZAS3)
2927 .Case("za0.h", AArch64::ZAH0)
2928 .Case("za1.h", AArch64::ZAH1)
2929 .Case("za0.b", AArch64::ZAB0)
2930 .Case("za0h.q", AArch64::ZAQ0)
2931 .Case("za1h.q", AArch64::ZAQ1)
2932 .Case("za2h.q", AArch64::ZAQ2)
2933 .Case("za3h.q", AArch64::ZAQ3)
2934 .Case("za4h.q", AArch64::ZAQ4)
2935 .Case("za5h.q", AArch64::ZAQ5)
2936 .Case("za6h.q", AArch64::ZAQ6)
2937 .Case("za7h.q", AArch64::ZAQ7)
2938 .Case("za8h.q", AArch64::ZAQ8)
2939 .Case("za9h.q", AArch64::ZAQ9)
2940 .Case("za10h.q", AArch64::ZAQ10)
2941 .Case("za11h.q", AArch64::ZAQ11)
2942 .Case("za12h.q", AArch64::ZAQ12)
2943 .Case("za13h.q", AArch64::ZAQ13)
2944 .Case("za14h.q", AArch64::ZAQ14)
2945 .Case("za15h.q", AArch64::ZAQ15)
2946 .Case("za0h.d", AArch64::ZAD0)
2947 .Case("za1h.d", AArch64::ZAD1)
2948 .Case("za2h.d", AArch64::ZAD2)
2949 .Case("za3h.d", AArch64::ZAD3)
2950 .Case("za4h.d", AArch64::ZAD4)
2951 .Case("za5h.d", AArch64::ZAD5)
2952 .Case("za6h.d", AArch64::ZAD6)
2953 .Case("za7h.d", AArch64::ZAD7)
2954 .Case("za0h.s", AArch64::ZAS0)
2955 .Case("za1h.s", AArch64::ZAS1)
2956 .Case("za2h.s", AArch64::ZAS2)
2957 .Case("za3h.s", AArch64::ZAS3)
2958 .Case("za0h.h", AArch64::ZAH0)
2959 .Case("za1h.h", AArch64::ZAH1)
2960 .Case("za0h.b", AArch64::ZAB0)
2961 .Case("za0v.q", AArch64::ZAQ0)
2962 .Case("za1v.q", AArch64::ZAQ1)
2963 .Case("za2v.q", AArch64::ZAQ2)
2964 .Case("za3v.q", AArch64::ZAQ3)
2965 .Case("za4v.q", AArch64::ZAQ4)
2966 .Case("za5v.q", AArch64::ZAQ5)
2967 .Case("za6v.q", AArch64::ZAQ6)
2968 .Case("za7v.q", AArch64::ZAQ7)
2969 .Case("za8v.q", AArch64::ZAQ8)
2970 .Case("za9v.q", AArch64::ZAQ9)
2971 .Case("za10v.q", AArch64::ZAQ10)
2972 .Case("za11v.q", AArch64::ZAQ11)
2973 .Case("za12v.q", AArch64::ZAQ12)
2974 .Case("za13v.q", AArch64::ZAQ13)
2975 .Case("za14v.q", AArch64::ZAQ14)
2976 .Case("za15v.q", AArch64::ZAQ15)
2977 .Case("za0v.d", AArch64::ZAD0)
2978 .Case("za1v.d", AArch64::ZAD1)
2979 .Case("za2v.d", AArch64::ZAD2)
2980 .Case("za3v.d", AArch64::ZAD3)
2981 .Case("za4v.d", AArch64::ZAD4)
2982 .Case("za5v.d", AArch64::ZAD5)
2983 .Case("za6v.d", AArch64::ZAD6)
2984 .Case("za7v.d", AArch64::ZAD7)
2985 .Case("za0v.s", AArch64::ZAS0)
2986 .Case("za1v.s", AArch64::ZAS1)
2987 .Case("za2v.s", AArch64::ZAS2)
2988 .Case("za3v.s", AArch64::ZAS3)
2989 .Case("za0v.h", AArch64::ZAH0)
2990 .Case("za1v.h", AArch64::ZAH1)
2991 .Case("za0v.b", AArch64::ZAB0)
2992 .Default(0);
2993}
2994
2995bool AArch64AsmParser::parseRegister(MCRegister &Reg, SMLoc &StartLoc,
2996 SMLoc &EndLoc) {
2997 return !tryParseRegister(Reg, StartLoc, EndLoc).isSuccess();
2998}
2999
3000ParseStatus AArch64AsmParser::tryParseRegister(MCRegister &Reg, SMLoc &StartLoc,
3001 SMLoc &EndLoc) {
3002 StartLoc = getLoc();
3003 ParseStatus Res = tryParseScalarRegister(Reg);
3004 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3005 return Res;
3006}
3007
3008// Matches a register name or register alias previously defined by '.req'
3009unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name,
3010 RegKind Kind) {
3011 unsigned RegNum = 0;
3012 if ((RegNum = matchSVEDataVectorRegName(Name)))
3013 return Kind == RegKind::SVEDataVector ? RegNum : 0;
3014
3015 if ((RegNum = matchSVEPredicateVectorRegName(Name)))
3016 return Kind == RegKind::SVEPredicateVector ? RegNum : 0;
3017
3018 if ((RegNum = matchSVEPredicateAsCounterRegName(Name)))
3019 return Kind == RegKind::SVEPredicateAsCounter ? RegNum : 0;
3020
3021 if ((RegNum = MatchNeonVectorRegName(Name)))
3022 return Kind == RegKind::NeonVector ? RegNum : 0;
3023
3024 if ((RegNum = matchMatrixRegName(Name)))
3025 return Kind == RegKind::Matrix ? RegNum : 0;
3026
3027 if (Name.equals_insensitive("zt0"))
3028 return Kind == RegKind::LookupTable ? unsigned(AArch64::ZT0) : 0;
3029
3030 // The parsed register must be of RegKind Scalar
3031 if ((RegNum = MatchRegisterName(Name)))
3032 return (Kind == RegKind::Scalar) ? RegNum : 0;
3033
3034 if (!RegNum) {
3035 // Handle a few common aliases of registers.
3036 if (auto RegNum = StringSwitch<unsigned>(Name.lower())
3037 .Case("fp", AArch64::FP)
3038 .Case("lr", AArch64::LR)
3039 .Case("x31", AArch64::XZR)
3040 .Case("w31", AArch64::WZR)
3041 .Default(0))
3042 return Kind == RegKind::Scalar ? RegNum : 0;
3043
3044 // Check for aliases registered via .req. Canonicalize to lower case.
3045 // That's more consistent since register names are case insensitive, and
3046 // it's how the original entry was passed in from MC/MCParser/AsmParser.
3047 auto Entry = RegisterReqs.find(Name.lower());
3048 if (Entry == RegisterReqs.end())
3049 return 0;
3050
3051 // set RegNum if the match is the right kind of register
3052 if (Kind == Entry->getValue().first)
3053 RegNum = Entry->getValue().second;
3054 }
3055 return RegNum;
3056}
3057
3058unsigned AArch64AsmParser::getNumRegsForRegKind(RegKind K) {
3059 switch (K) {
3060 case RegKind::Scalar:
3061 case RegKind::NeonVector:
3062 case RegKind::SVEDataVector:
3063 return 32;
3064 case RegKind::Matrix:
3065 case RegKind::SVEPredicateVector:
3066 case RegKind::SVEPredicateAsCounter:
3067 return 16;
3068 case RegKind::LookupTable:
3069 return 1;
3070 }
3071 llvm_unreachable("Unsupported RegKind");
3072}
3073
3074/// tryParseScalarRegister - Try to parse a register name. The token must be an
3075/// Identifier when called, and if it is a register name the token is eaten and
3076/// the register is added to the operand list.
3077ParseStatus AArch64AsmParser::tryParseScalarRegister(MCRegister &RegNum) {
3078 const AsmToken &Tok = getTok();
3079 if (Tok.isNot(AsmToken::Identifier))
3080 return ParseStatus::NoMatch;
3081
3082 std::string lowerCase = Tok.getString().lower();
3083 unsigned Reg = matchRegisterNameAlias(lowerCase, RegKind::Scalar);
3084 if (Reg == 0)
3085 return ParseStatus::NoMatch;
3086
3087 RegNum = Reg;
3088 Lex(); // Eat identifier token.
3089 return ParseStatus::Success;
3090}
3091
3092/// tryParseSysCROperand - Try to parse a system instruction CR operand name.
3093ParseStatus AArch64AsmParser::tryParseSysCROperand(OperandVector &Operands) {
3094 SMLoc S = getLoc();
3095
3096 if (getTok().isNot(AsmToken::Identifier))
3097 return Error(S, "Expected cN operand where 0 <= N <= 15");
3098
3099 StringRef Tok = getTok().getIdentifier();
3100 if (Tok[0] != 'c' && Tok[0] != 'C')
3101 return Error(S, "Expected cN operand where 0 <= N <= 15");
3102
3103 uint32_t CRNum;
3104 bool BadNum = Tok.drop_front().getAsInteger(10, CRNum);
3105 if (BadNum || CRNum > 15)
3106 return Error(S, "Expected cN operand where 0 <= N <= 15");
3107
3108 Lex(); // Eat identifier token.
3109 Operands.push_back(
3110 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
3111 return ParseStatus::Success;
3112}
3113
3114// Either an identifier for named values or a 6-bit immediate.
3115ParseStatus AArch64AsmParser::tryParseRPRFMOperand(OperandVector &Operands) {
3116 SMLoc S = getLoc();
3117 const AsmToken &Tok = getTok();
3118
3119 unsigned MaxVal = 63;
3120
3121 // Immediate case, with optional leading hash:
3122 if (parseOptionalToken(AsmToken::Hash) ||
3123 Tok.is(AsmToken::Integer)) {
3124 const MCExpr *ImmVal;
3125 if (getParser().parseExpression(ImmVal))
3126 return ParseStatus::Failure;
3127
3128 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3129 if (!MCE)
3130 return TokError("immediate value expected for prefetch operand");
3131 unsigned prfop = MCE->getValue();
3132 if (prfop > MaxVal)
3133 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3134 "] expected");
3135
3136 auto RPRFM = AArch64RPRFM::lookupRPRFMByEncoding(MCE->getValue());
3137 Operands.push_back(AArch64Operand::CreatePrefetch(
3138 prfop, RPRFM ? RPRFM->Name : "", S, getContext()));
3139 return ParseStatus::Success;
3140 }
3141
3142 if (Tok.isNot(AsmToken::Identifier))
3143 return TokError("prefetch hint expected");
3144
3145 auto RPRFM = AArch64RPRFM::lookupRPRFMByName(Tok.getString());
3146 if (!RPRFM)
3147 return TokError("prefetch hint expected");
3148
3149 Operands.push_back(AArch64Operand::CreatePrefetch(
3150 RPRFM->Encoding, Tok.getString(), S, getContext()));
3151 Lex(); // Eat identifier token.
3152 return ParseStatus::Success;
3153}
3154
3155/// tryParsePrefetch - Try to parse a prefetch operand.
3156template <bool IsSVEPrefetch>
3157ParseStatus AArch64AsmParser::tryParsePrefetch(OperandVector &Operands) {
3158 SMLoc S = getLoc();
3159 const AsmToken &Tok = getTok();
3160
3161 auto LookupByName = [](StringRef N) {
3162 if (IsSVEPrefetch) {
3163 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(N))
3164 return std::optional<unsigned>(Res->Encoding);
3165 } else if (auto Res = AArch64PRFM::lookupPRFMByName(N))
3166 return std::optional<unsigned>(Res->Encoding);
3167 return std::optional<unsigned>();
3168 };
3169
3170 auto LookupByEncoding = [](unsigned E) {
3171 if (IsSVEPrefetch) {
3172 if (auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(E))
3173 return std::optional<StringRef>(Res->Name);
3174 } else if (auto Res = AArch64PRFM::lookupPRFMByEncoding(E))
3175 return std::optional<StringRef>(Res->Name);
3176 return std::optional<StringRef>();
3177 };
3178 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
3179
3180 // Either an identifier for named values or a 5-bit immediate.
3181 // Eat optional hash.
3182 if (parseOptionalToken(AsmToken::Hash) ||
3183 Tok.is(AsmToken::Integer)) {
3184 const MCExpr *ImmVal;
3185 if (getParser().parseExpression(ImmVal))
3186 return ParseStatus::Failure;
3187
3188 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3189 if (!MCE)
3190 return TokError("immediate value expected for prefetch operand");
3191 unsigned prfop = MCE->getValue();
3192 if (prfop > MaxVal)
3193 return TokError("prefetch operand out of range, [0," + utostr(MaxVal) +
3194 "] expected");
3195
3196 auto PRFM = LookupByEncoding(MCE->getValue());
3197 Operands.push_back(AArch64Operand::CreatePrefetch(prfop, PRFM.value_or(""),
3198 S, getContext()));
3199 return ParseStatus::Success;
3200 }
3201
3202 if (Tok.isNot(AsmToken::Identifier))
3203 return TokError("prefetch hint expected");
3204
3205 auto PRFM = LookupByName(Tok.getString());
3206 if (!PRFM)
3207 return TokError("prefetch hint expected");
3208
3209 Operands.push_back(AArch64Operand::CreatePrefetch(
3210 *PRFM, Tok.getString(), S, getContext()));
3211 Lex(); // Eat identifier token.
3212 return ParseStatus::Success;
3213}
3214
3215/// tryParsePSBHint - Try to parse a PSB operand, mapped to Hint command
3216ParseStatus AArch64AsmParser::tryParsePSBHint(OperandVector &Operands) {
3217 SMLoc S = getLoc();
3218 const AsmToken &Tok = getTok();
3219 if (Tok.isNot(AsmToken::Identifier))
3220 return TokError("invalid operand for instruction");
3221
3222 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.getString());
3223 if (!PSB)
3224 return TokError("invalid operand for instruction");
3225
3226 Operands.push_back(AArch64Operand::CreatePSBHint(
3227 PSB->Encoding, Tok.getString(), S, getContext()));
3228 Lex(); // Eat identifier token.
3229 return ParseStatus::Success;
3230}
3231
3232ParseStatus AArch64AsmParser::tryParseSyspXzrPair(OperandVector &Operands) {
3233 SMLoc StartLoc = getLoc();
3234
3235 MCRegister RegNum;
3236
3237 // The case where xzr, xzr is not present is handled by an InstAlias.
3238
3239 auto RegTok = getTok(); // in case we need to backtrack
3240 if (!tryParseScalarRegister(RegNum).isSuccess())
3241 return ParseStatus::NoMatch;
3242
3243 if (RegNum != AArch64::XZR) {
3244 getLexer().UnLex(RegTok);
3245 return ParseStatus::NoMatch;
3246 }
3247
3248 if (parseComma())
3249 return ParseStatus::Failure;
3250
3251 if (!tryParseScalarRegister(RegNum).isSuccess())
3252 return TokError("expected register operand");
3253
3254 if (RegNum != AArch64::XZR)
3255 return TokError("xzr must be followed by xzr");
3256
3257 // We need to push something, since we claim this is an operand in .td.
3258 // See also AArch64AsmParser::parseKeywordOperand.
3259 Operands.push_back(AArch64Operand::CreateReg(
3260 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
3261
3262 return ParseStatus::Success;
3263}
3264
3265/// tryParseBTIHint - Try to parse a BTI operand, mapped to Hint command
3266ParseStatus AArch64AsmParser::tryParseBTIHint(OperandVector &Operands) {
3267 SMLoc S = getLoc();
3268 const AsmToken &Tok = getTok();
3269 if (Tok.isNot(AsmToken::Identifier))
3270 return TokError("invalid operand for instruction");
3271
3272 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.getString());
3273 if (!BTI)
3274 return TokError("invalid operand for instruction");
3275
3276 Operands.push_back(AArch64Operand::CreateBTIHint(
3277 BTI->Encoding, Tok.getString(), S, getContext()));
3278 Lex(); // Eat identifier token.
3279 return ParseStatus::Success;
3280}
3281
3282/// tryParseAdrpLabel - Parse and validate a source label for the ADRP
3283/// instruction.
3284ParseStatus AArch64AsmParser::tryParseAdrpLabel(OperandVector &Operands) {
3285 SMLoc S = getLoc();
3286 const MCExpr *Expr = nullptr;
3287
3288 if (getTok().is(AsmToken::Hash)) {
3289 Lex(); // Eat hash token.
3290 }
3291
3292 if (parseSymbolicImmVal(Expr))
3293 return ParseStatus::Failure;
3294
3295 AArch64::Specifier ELFSpec;
3296 AArch64::Specifier DarwinSpec;
3297 int64_t Addend;
3298 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3299 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3300 // No modifier was specified at all; this is the syntax for an ELF basic
3301 // ADRP relocation (unfortunately).
3302 Expr =
3304 } else if ((DarwinSpec == AArch64::S_MACHO_GOTPAGE ||
3305 DarwinSpec == AArch64::S_MACHO_TLVPPAGE) &&
3306 Addend != 0) {
3307 return Error(S, "gotpage label reference not allowed an addend");
3308 } else if (DarwinSpec != AArch64::S_MACHO_PAGE &&
3309 DarwinSpec != AArch64::S_MACHO_GOTPAGE &&
3310 DarwinSpec != AArch64::S_MACHO_TLVPPAGE &&
3311 ELFSpec != AArch64::S_ABS_PAGE_NC &&
3312 ELFSpec != AArch64::S_GOT_PAGE &&
3313 ELFSpec != AArch64::S_GOT_AUTH_PAGE &&
3314 ELFSpec != AArch64::S_GOT_PAGE_LO15 &&
3315 ELFSpec != AArch64::S_GOTTPREL_PAGE &&
3316 ELFSpec != AArch64::S_TLSDESC_PAGE &&
3317 ELFSpec != AArch64::S_TLSDESC_AUTH_PAGE) {
3318 // The operand must be an @page or @gotpage qualified symbolref.
3319 return Error(S, "page or gotpage label reference expected");
3320 }
3321 }
3322
3323 // We have either a label reference possibly with addend or an immediate. The
3324 // addend is a raw value here. The linker will adjust it to only reference the
3325 // page.
3326 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3327 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3328
3329 return ParseStatus::Success;
3330}
3331
3332/// tryParseAdrLabel - Parse and validate a source label for the ADR
3333/// instruction.
3334ParseStatus AArch64AsmParser::tryParseAdrLabel(OperandVector &Operands) {
3335 SMLoc S = getLoc();
3336 const MCExpr *Expr = nullptr;
3337
3338 // Leave anything with a bracket to the default for SVE
3339 if (getTok().is(AsmToken::LBrac))
3340 return ParseStatus::NoMatch;
3341
3342 if (getTok().is(AsmToken::Hash))
3343 Lex(); // Eat hash token.
3344
3345 if (parseSymbolicImmVal(Expr))
3346 return ParseStatus::Failure;
3347
3348 AArch64::Specifier ELFSpec;
3349 AArch64::Specifier DarwinSpec;
3350 int64_t Addend;
3351 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
3352 if (DarwinSpec == AArch64::S_None && ELFSpec == AArch64::S_INVALID) {
3353 // No modifier was specified at all; this is the syntax for an ELF basic
3354 // ADR relocation (unfortunately).
3356 } else if (ELFSpec != AArch64::S_GOT_AUTH_PAGE) {
3357 // For tiny code model, we use :got_auth: operator to fill 21-bit imm of
3358 // adr. It's not actually GOT entry page address but the GOT address
3359 // itself - we just share the same variant kind with :got_auth: operator
3360 // applied for adrp.
3361 // TODO: can we somehow get current TargetMachine object to call
3362 // getCodeModel() on it to ensure we are using tiny code model?
3363 return Error(S, "unexpected adr label");
3364 }
3365 }
3366
3367 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3368 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3369 return ParseStatus::Success;
3370}
3371
3372/// tryParseFPImm - A floating point immediate expression operand.
3373template <bool AddFPZeroAsLiteral>
3374ParseStatus AArch64AsmParser::tryParseFPImm(OperandVector &Operands) {
3375 SMLoc S = getLoc();
3376
3377 bool Hash = parseOptionalToken(AsmToken::Hash);
3378
3379 // Handle negation, as that still comes through as a separate token.
3380 bool isNegative = parseOptionalToken(AsmToken::Minus);
3381
3382 const AsmToken &Tok = getTok();
3383 if (!Tok.is(AsmToken::Real) && !Tok.is(AsmToken::Integer)) {
3384 if (!Hash)
3385 return ParseStatus::NoMatch;
3386 return TokError("invalid floating point immediate");
3387 }
3388
3389 // Parse hexadecimal representation.
3390 if (Tok.is(AsmToken::Integer) && Tok.getString().starts_with("0x")) {
3391 if (Tok.getIntVal() > 255 || isNegative)
3392 return TokError("encoded floating point value out of range");
3393
3395 Operands.push_back(
3396 AArch64Operand::CreateFPImm(F, true, S, getContext()));
3397 } else {
3398 // Parse FP representation.
3399 APFloat RealVal(APFloat::IEEEdouble());
3400 auto StatusOrErr =
3401 RealVal.convertFromString(Tok.getString(), APFloat::rmTowardZero);
3402 if (errorToBool(StatusOrErr.takeError()))
3403 return TokError("invalid floating point representation");
3404
3405 if (isNegative)
3406 RealVal.changeSign();
3407
3408 if (AddFPZeroAsLiteral && RealVal.isPosZero()) {
3409 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
3410 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
3411 } else
3412 Operands.push_back(AArch64Operand::CreateFPImm(
3413 RealVal, *StatusOrErr == APFloat::opOK, S, getContext()));
3414 }
3415
3416 Lex(); // Eat the token.
3417
3418 return ParseStatus::Success;
3419}
3420
3421/// tryParseImmWithOptionalShift - Parse immediate operand, optionally with
3422/// a shift suffix, for example '#1, lsl #12'.
3423ParseStatus
3424AArch64AsmParser::tryParseImmWithOptionalShift(OperandVector &Operands) {
3425 SMLoc S = getLoc();
3426
3427 if (getTok().is(AsmToken::Hash))
3428 Lex(); // Eat '#'
3429 else if (getTok().isNot(AsmToken::Integer))
3430 // Operand should start from # or should be integer, emit error otherwise.
3431 return ParseStatus::NoMatch;
3432
3433 if (getTok().is(AsmToken::Integer) &&
3434 getLexer().peekTok().is(AsmToken::Colon))
3435 return tryParseImmRange(Operands);
3436
3437 const MCExpr *Imm = nullptr;
3438 if (parseSymbolicImmVal(Imm))
3439 return ParseStatus::Failure;
3440 else if (getTok().isNot(AsmToken::Comma)) {
3441 Operands.push_back(
3442 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3443 return ParseStatus::Success;
3444 }
3445
3446 // Eat ','
3447 Lex();
3448 StringRef VecGroup;
3449 if (!parseOptionalVGOperand(Operands, VecGroup)) {
3450 Operands.push_back(
3451 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3452 Operands.push_back(
3453 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
3454 return ParseStatus::Success;
3455 }
3456
3457 // The optional operand must be "lsl #N" where N is non-negative.
3458 if (!getTok().is(AsmToken::Identifier) ||
3459 !getTok().getIdentifier().equals_insensitive("lsl"))
3460 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3461
3462 // Eat 'lsl'
3463 Lex();
3464
3465 parseOptionalToken(AsmToken::Hash);
3466
3467 if (getTok().isNot(AsmToken::Integer))
3468 return Error(getLoc(), "only 'lsl #+N' valid after immediate");
3469
3470 int64_t ShiftAmount = getTok().getIntVal();
3471
3472 if (ShiftAmount < 0)
3473 return Error(getLoc(), "positive shift amount required");
3474 Lex(); // Eat the number
3475
3476 // Just in case the optional lsl #0 is used for immediates other than zero.
3477 if (ShiftAmount == 0 && Imm != nullptr) {
3478 Operands.push_back(
3479 AArch64Operand::CreateImm(Imm, S, getLoc(), getContext()));
3480 return ParseStatus::Success;
3481 }
3482
3483 Operands.push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount, S,
3484 getLoc(), getContext()));
3485 return ParseStatus::Success;
3486}
3487
3488/// parseCondCodeString - Parse a Condition Code string, optionally returning a
3489/// suggestion to help common typos.
3491AArch64AsmParser::parseCondCodeString(StringRef Cond, std::string &Suggestion) {
3492 AArch64CC::CondCode CC = StringSwitch<AArch64CC::CondCode>(Cond.lower())
3493 .Case("eq", AArch64CC::EQ)
3494 .Case("ne", AArch64CC::NE)
3495 .Case("cs", AArch64CC::HS)
3496 .Case("hs", AArch64CC::HS)
3497 .Case("cc", AArch64CC::LO)
3498 .Case("lo", AArch64CC::LO)
3499 .Case("mi", AArch64CC::MI)
3500 .Case("pl", AArch64CC::PL)
3501 .Case("vs", AArch64CC::VS)
3502 .Case("vc", AArch64CC::VC)
3503 .Case("hi", AArch64CC::HI)
3504 .Case("ls", AArch64CC::LS)
3505 .Case("ge", AArch64CC::GE)
3506 .Case("lt", AArch64CC::LT)
3507 .Case("gt", AArch64CC::GT)
3508 .Case("le", AArch64CC::LE)
3509 .Case("al", AArch64CC::AL)
3510 .Case("nv", AArch64CC::NV)
3511 // SVE condition code aliases:
3512 .Case("none", AArch64CC::EQ)
3513 .Case("any", AArch64CC::NE)
3514 .Case("nlast", AArch64CC::HS)
3515 .Case("last", AArch64CC::LO)
3516 .Case("first", AArch64CC::MI)
3517 .Case("nfrst", AArch64CC::PL)
3518 .Case("pmore", AArch64CC::HI)
3519 .Case("plast", AArch64CC::LS)
3520 .Case("tcont", AArch64CC::GE)
3521 .Case("tstop", AArch64CC::LT)
3522 .Default(AArch64CC::Invalid);
3523
3524 if (CC == AArch64CC::Invalid && Cond.lower() == "nfirst")
3525 Suggestion = "nfrst";
3526
3527 return CC;
3528}
3529
3530/// parseCondCode - Parse a Condition Code operand.
3531bool AArch64AsmParser::parseCondCode(OperandVector &Operands,
3532 bool invertCondCode) {
3533 SMLoc S = getLoc();
3534 const AsmToken &Tok = getTok();
3535 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3536
3537 StringRef Cond = Tok.getString();
3538 std::string Suggestion;
3539 AArch64CC::CondCode CC = parseCondCodeString(Cond, Suggestion);
3540 if (CC == AArch64CC::Invalid) {
3541 std::string Msg = "invalid condition code";
3542 if (!Suggestion.empty())
3543 Msg += ", did you mean " + Suggestion + "?";
3544 return TokError(Msg);
3545 }
3546 Lex(); // Eat identifier token.
3547
3548 if (invertCondCode) {
3549 if (CC == AArch64CC::AL || CC == AArch64CC::NV)
3550 return TokError("condition codes AL and NV are invalid for this instruction");
3552 }
3553
3554 Operands.push_back(
3555 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
3556 return false;
3557}
3558
3559ParseStatus AArch64AsmParser::tryParseSVCR(OperandVector &Operands) {
3560 const AsmToken &Tok = getTok();
3561 SMLoc S = getLoc();
3562
3563 if (Tok.isNot(AsmToken::Identifier))
3564 return TokError("invalid operand for instruction");
3565
3566 unsigned PStateImm = -1;
3567 const auto *SVCR = AArch64SVCR::lookupSVCRByName(Tok.getString());
3568 if (!SVCR)
3569 return ParseStatus::NoMatch;
3570 if (SVCR->haveFeatures(getSTI().getFeatureBits()))
3571 PStateImm = SVCR->Encoding;
3572
3573 Operands.push_back(
3574 AArch64Operand::CreateSVCR(PStateImm, Tok.getString(), S, getContext()));
3575 Lex(); // Eat identifier token.
3576 return ParseStatus::Success;
3577}
3578
3579ParseStatus AArch64AsmParser::tryParseMatrixRegister(OperandVector &Operands) {
3580 const AsmToken &Tok = getTok();
3581 SMLoc S = getLoc();
3582
3583 StringRef Name = Tok.getString();
3584
3585 if (Name.equals_insensitive("za") || Name.starts_with_insensitive("za.")) {
3586 Lex(); // eat "za[.(b|h|s|d)]"
3587 unsigned ElementWidth = 0;
3588 auto DotPosition = Name.find('.');
3589 if (DotPosition != StringRef::npos) {
3590 const auto &KindRes =
3591 parseVectorKind(Name.drop_front(DotPosition), RegKind::Matrix);
3592 if (!KindRes)
3593 return TokError(
3594 "Expected the register to be followed by element width suffix");
3595 ElementWidth = KindRes->second;
3596 }
3597 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3598 AArch64::ZA, ElementWidth, MatrixKind::Array, S, getLoc(),
3599 getContext()));
3600 if (getLexer().is(AsmToken::LBrac)) {
3601 // There's no comma after matrix operand, so we can parse the next operand
3602 // immediately.
3603 if (parseOperand(Operands, false, false))
3604 return ParseStatus::NoMatch;
3605 }
3606 return ParseStatus::Success;
3607 }
3608
3609 // Try to parse matrix register.
3610 unsigned Reg = matchRegisterNameAlias(Name, RegKind::Matrix);
3611 if (!Reg)
3612 return ParseStatus::NoMatch;
3613
3614 size_t DotPosition = Name.find('.');
3615 assert(DotPosition != StringRef::npos && "Unexpected register");
3616
3617 StringRef Head = Name.take_front(DotPosition);
3618 StringRef Tail = Name.drop_front(DotPosition);
3619 StringRef RowOrColumn = Head.take_back();
3620
3621 MatrixKind Kind = StringSwitch<MatrixKind>(RowOrColumn.lower())
3622 .Case("h", MatrixKind::Row)
3623 .Case("v", MatrixKind::Col)
3624 .Default(MatrixKind::Tile);
3625
3626 // Next up, parsing the suffix
3627 const auto &KindRes = parseVectorKind(Tail, RegKind::Matrix);
3628 if (!KindRes)
3629 return TokError(
3630 "Expected the register to be followed by element width suffix");
3631 unsigned ElementWidth = KindRes->second;
3632
3633 Lex();
3634
3635 Operands.push_back(AArch64Operand::CreateMatrixRegister(
3636 Reg, ElementWidth, Kind, S, getLoc(), getContext()));
3637
3638 if (getLexer().is(AsmToken::LBrac)) {
3639 // There's no comma after matrix operand, so we can parse the next operand
3640 // immediately.
3641 if (parseOperand(Operands, false, false))
3642 return ParseStatus::NoMatch;
3643 }
3644 return ParseStatus::Success;
3645}
3646
3647/// tryParseOptionalShift - Some operands take an optional shift argument. Parse
3648/// them if present.
3649ParseStatus
3650AArch64AsmParser::tryParseOptionalShiftExtend(OperandVector &Operands) {
3651 const AsmToken &Tok = getTok();
3652 std::string LowerID = Tok.getString().lower();
3654 StringSwitch<AArch64_AM::ShiftExtendType>(LowerID)
3655 .Case("lsl", AArch64_AM::LSL)
3656 .Case("lsr", AArch64_AM::LSR)
3657 .Case("asr", AArch64_AM::ASR)
3658 .Case("ror", AArch64_AM::ROR)
3659 .Case("msl", AArch64_AM::MSL)
3660 .Case("uxtb", AArch64_AM::UXTB)
3661 .Case("uxth", AArch64_AM::UXTH)
3662 .Case("uxtw", AArch64_AM::UXTW)
3663 .Case("uxtx", AArch64_AM::UXTX)
3664 .Case("sxtb", AArch64_AM::SXTB)
3665 .Case("sxth", AArch64_AM::SXTH)
3666 .Case("sxtw", AArch64_AM::SXTW)
3667 .Case("sxtx", AArch64_AM::SXTX)
3669
3671 return ParseStatus::NoMatch;
3672
3673 SMLoc S = Tok.getLoc();
3674 Lex();
3675
3676 bool Hash = parseOptionalToken(AsmToken::Hash);
3677
3678 if (!Hash && getLexer().isNot(AsmToken::Integer)) {
3679 if (ShOp == AArch64_AM::LSL || ShOp == AArch64_AM::LSR ||
3680 ShOp == AArch64_AM::ASR || ShOp == AArch64_AM::ROR ||
3681 ShOp == AArch64_AM::MSL) {
3682 // We expect a number here.
3683 return TokError("expected #imm after shift specifier");
3684 }
3685
3686 // "extend" type operations don't need an immediate, #0 is implicit.
3687 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3688 Operands.push_back(
3689 AArch64Operand::CreateShiftExtend(ShOp, 0, false, S, E, getContext()));
3690 return ParseStatus::Success;
3691 }
3692
3693 // Make sure we do actually have a number, identifier or a parenthesized
3694 // expression.
3695 SMLoc E = getLoc();
3696 if (!getTok().is(AsmToken::Integer) && !getTok().is(AsmToken::LParen) &&
3697 !getTok().is(AsmToken::Identifier))
3698 return Error(E, "expected integer shift amount");
3699
3700 const MCExpr *ImmVal;
3701 if (getParser().parseExpression(ImmVal))
3702 return ParseStatus::Failure;
3703
3704 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
3705 if (!MCE)
3706 return Error(E, "expected constant '#imm' after shift specifier");
3707
3708 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
3709 Operands.push_back(AArch64Operand::CreateShiftExtend(
3710 ShOp, MCE->getValue(), true, S, E, getContext()));
3711 return ParseStatus::Success;
3712}
3713
3714static const struct Extension {
3715 const char *Name;
3717} ExtensionMap[] = {
3718 {"crc", {AArch64::FeatureCRC}},
3719 {"sm4", {AArch64::FeatureSM4}},
3720 {"sha3", {AArch64::FeatureSHA3}},
3721 {"sha2", {AArch64::FeatureSHA2}},
3722 {"aes", {AArch64::FeatureAES}},
3723 {"crypto", {AArch64::FeatureCrypto}},
3724 {"fp", {AArch64::FeatureFPARMv8}},
3725 {"simd", {AArch64::FeatureNEON}},
3726 {"ras", {AArch64::FeatureRAS}},
3727 {"rasv2", {AArch64::FeatureRASv2}},
3728 {"lse", {AArch64::FeatureLSE}},
3729 {"predres", {AArch64::FeaturePredRes}},
3730 {"predres2", {AArch64::FeatureSPECRES2}},
3731 {"ccdp", {AArch64::FeatureCacheDeepPersist}},
3732 {"mte", {AArch64::FeatureMTE}},
3733 {"memtag", {AArch64::FeatureMTE}},
3734 {"tlb-rmi", {AArch64::FeatureTLB_RMI}},
3735 {"pan", {AArch64::FeaturePAN}},
3736 {"pan-rwv", {AArch64::FeaturePAN_RWV}},
3737 {"ccpp", {AArch64::FeatureCCPP}},
3738 {"rcpc", {AArch64::FeatureRCPC}},
3739 {"rng", {AArch64::FeatureRandGen}},
3740 {"sve", {AArch64::FeatureSVE}},
3741 {"sve-b16b16", {AArch64::FeatureSVEB16B16}},
3742 {"sve2", {AArch64::FeatureSVE2}},
3743 {"sve-aes", {AArch64::FeatureSVEAES}},
3744 {"sve2-aes", {AArch64::FeatureAliasSVE2AES, AArch64::FeatureSVEAES}},
3745 {"sve-sm4", {AArch64::FeatureSVESM4}},
3746 {"sve2-sm4", {AArch64::FeatureAliasSVE2SM4, AArch64::FeatureSVESM4}},
3747 {"sve-sha3", {AArch64::FeatureSVESHA3}},
3748 {"sve2-sha3", {AArch64::FeatureAliasSVE2SHA3, AArch64::FeatureSVESHA3}},
3749 {"sve-bitperm", {AArch64::FeatureSVEBitPerm}},
3750 {"sve2-bitperm",
3751 {AArch64::FeatureAliasSVE2BitPerm, AArch64::FeatureSVEBitPerm,
3752 AArch64::FeatureSVE2}},
3753 {"sve2p1", {AArch64::FeatureSVE2p1}},
3754 {"ls64", {AArch64::FeatureLS64}},
3755 {"xs", {AArch64::FeatureXS}},
3756 {"pauth", {AArch64::FeaturePAuth}},
3757 {"flagm", {AArch64::FeatureFlagM}},
3758 {"rme", {AArch64::FeatureRME}},
3759 {"sme", {AArch64::FeatureSME}},
3760 {"sme-f64f64", {AArch64::FeatureSMEF64F64}},
3761 {"sme-f16f16", {AArch64::FeatureSMEF16F16}},
3762 {"sme-i16i64", {AArch64::FeatureSMEI16I64}},
3763 {"sme2", {AArch64::FeatureSME2}},
3764 {"sme2p1", {AArch64::FeatureSME2p1}},
3765 {"sme-b16b16", {AArch64::FeatureSMEB16B16}},
3766 {"hbc", {AArch64::FeatureHBC}},
3767 {"mops", {AArch64::FeatureMOPS}},
3768 {"mec", {AArch64::FeatureMEC}},
3769 {"the", {AArch64::FeatureTHE}},
3770 {"d128", {AArch64::FeatureD128}},
3771 {"lse128", {AArch64::FeatureLSE128}},
3772 {"ite", {AArch64::FeatureITE}},
3773 {"cssc", {AArch64::FeatureCSSC}},
3774 {"rcpc3", {AArch64::FeatureRCPC3}},
3775 {"gcs", {AArch64::FeatureGCS}},
3776 {"bf16", {AArch64::FeatureBF16}},
3777 {"compnum", {AArch64::FeatureComplxNum}},
3778 {"dotprod", {AArch64::FeatureDotProd}},
3779 {"f32mm", {AArch64::FeatureMatMulFP32}},
3780 {"f64mm", {AArch64::FeatureMatMulFP64}},
3781 {"fp16", {AArch64::FeatureFullFP16}},
3782 {"fp16fml", {AArch64::FeatureFP16FML}},
3783 {"i8mm", {AArch64::FeatureMatMulInt8}},
3784 {"lor", {AArch64::FeatureLOR}},
3785 {"profile", {AArch64::FeatureSPE}},
3786 // "rdma" is the name documented by binutils for the feature, but
3787 // binutils also accepts incomplete prefixes of features, so "rdm"
3788 // works too. Support both spellings here.
3789 {"rdm", {AArch64::FeatureRDM}},
3790 {"rdma", {AArch64::FeatureRDM}},
3791 {"sb", {AArch64::FeatureSB}},
3792 {"ssbs", {AArch64::FeatureSSBS}},
3793 {"tme", {AArch64::FeatureTME}},
3794 {"fp8", {AArch64::FeatureFP8}},
3795 {"faminmax", {AArch64::FeatureFAMINMAX}},
3796 {"fp8fma", {AArch64::FeatureFP8FMA}},
3797 {"ssve-fp8fma", {AArch64::FeatureSSVE_FP8FMA}},
3798 {"fp8dot2", {AArch64::FeatureFP8DOT2}},
3799 {"ssve-fp8dot2", {AArch64::FeatureSSVE_FP8DOT2}},
3800 {"fp8dot4", {AArch64::FeatureFP8DOT4}},
3801 {"ssve-fp8dot4", {AArch64::FeatureSSVE_FP8DOT4}},
3802 {"lut", {AArch64::FeatureLUT}},
3803 {"sme-lutv2", {AArch64::FeatureSME_LUTv2}},
3804 {"sme-f8f16", {AArch64::FeatureSMEF8F16}},
3805 {"sme-f8f32", {AArch64::FeatureSMEF8F32}},
3806 {"sme-fa64", {AArch64::FeatureSMEFA64}},
3807 {"cpa", {AArch64::FeatureCPA}},
3808 {"tlbiw", {AArch64::FeatureTLBIW}},
3809 {"pops", {AArch64::FeaturePoPS}},
3810 {"cmpbr", {AArch64::FeatureCMPBR}},
3811 {"f8f32mm", {AArch64::FeatureF8F32MM}},
3812 {"f8f16mm", {AArch64::FeatureF8F16MM}},
3813 {"fprcvt", {AArch64::FeatureFPRCVT}},
3814 {"lsfe", {AArch64::FeatureLSFE}},
3815 {"sme2p2", {AArch64::FeatureSME2p2}},
3816 {"ssve-aes", {AArch64::FeatureSSVE_AES}},
3817 {"sve2p2", {AArch64::FeatureSVE2p2}},
3818 {"sve-aes2", {AArch64::FeatureSVEAES2}},
3819 {"sve-bfscale", {AArch64::FeatureSVEBFSCALE}},
3820 {"sve-f16f32mm", {AArch64::FeatureSVE_F16F32MM}},
3821 {"lsui", {AArch64::FeatureLSUI}},
3822 {"occmo", {AArch64::FeatureOCCMO}},
3823 {"pcdphint", {AArch64::FeaturePCDPHINT}},
3824 {"ssve-bitperm", {AArch64::FeatureSSVE_BitPerm}},
3825 {"sme-mop4", {AArch64::FeatureSME_MOP4}},
3826 {"sme-tmop", {AArch64::FeatureSME_TMOP}},
3828
3829static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str) {
3830 if (FBS[AArch64::HasV8_0aOps])
3831 Str += "ARMv8a";
3832 if (FBS[AArch64::HasV8_1aOps])
3833 Str += "ARMv8.1a";
3834 else if (FBS[AArch64::HasV8_2aOps])
3835 Str += "ARMv8.2a";
3836 else if (FBS[AArch64::HasV8_3aOps])
3837 Str += "ARMv8.3a";
3838 else if (FBS[AArch64::HasV8_4aOps])
3839 Str += "ARMv8.4a";
3840 else if (FBS[AArch64::HasV8_5aOps])
3841 Str += "ARMv8.5a";
3842 else if (FBS[AArch64::HasV8_6aOps])
3843 Str += "ARMv8.6a";
3844 else if (FBS[AArch64::HasV8_7aOps])
3845 Str += "ARMv8.7a";
3846 else if (FBS[AArch64::HasV8_8aOps])
3847 Str += "ARMv8.8a";
3848 else if (FBS[AArch64::HasV8_9aOps])
3849 Str += "ARMv8.9a";
3850 else if (FBS[AArch64::HasV9_0aOps])
3851 Str += "ARMv9-a";
3852 else if (FBS[AArch64::HasV9_1aOps])
3853 Str += "ARMv9.1a";
3854 else if (FBS[AArch64::HasV9_2aOps])
3855 Str += "ARMv9.2a";
3856 else if (FBS[AArch64::HasV9_3aOps])
3857 Str += "ARMv9.3a";
3858 else if (FBS[AArch64::HasV9_4aOps])
3859 Str += "ARMv9.4a";
3860 else if (FBS[AArch64::HasV9_5aOps])
3861 Str += "ARMv9.5a";
3862 else if (FBS[AArch64::HasV9_6aOps])
3863 Str += "ARMv9.6a";
3864 else if (FBS[AArch64::HasV8_0rOps])
3865 Str += "ARMv8r";
3866 else {
3867 SmallVector<std::string, 2> ExtMatches;
3868 for (const auto& Ext : ExtensionMap) {
3869 // Use & in case multiple features are enabled
3870 if ((FBS & Ext.Features) != FeatureBitset())
3871 ExtMatches.push_back(Ext.Name);
3872 }
3873 Str += !ExtMatches.empty() ? llvm::join(ExtMatches, ", ") : "(unknown)";
3874 }
3875}
3876
3877void AArch64AsmParser::createSysAlias(uint16_t Encoding, OperandVector &Operands,
3878 SMLoc S) {
3879 const uint16_t Op2 = Encoding & 7;
3880 const uint16_t Cm = (Encoding & 0x78) >> 3;
3881 const uint16_t Cn = (Encoding & 0x780) >> 7;
3882 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
3883
3884 const MCExpr *Expr = MCConstantExpr::create(Op1, getContext());
3885
3886 Operands.push_back(
3887 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3888 Operands.push_back(
3889 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
3890 Operands.push_back(
3891 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
3892 Expr = MCConstantExpr::create(Op2, getContext());
3893 Operands.push_back(
3894 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
3895}
3896
3897/// parseSysAlias - The IC, DC, AT, and TLBI instructions are simple aliases for
3898/// the SYS instruction. Parse them specially so that we create a SYS MCInst.
3899bool AArch64AsmParser::parseSysAlias(StringRef Name, SMLoc NameLoc,
3901 if (Name.contains('.'))
3902 return TokError("invalid operand");
3903
3904 Mnemonic = Name;
3905 Operands.push_back(AArch64Operand::CreateToken("sys", NameLoc, getContext()));
3906
3907 const AsmToken &Tok = getTok();
3908 StringRef Op = Tok.getString();
3909 SMLoc S = Tok.getLoc();
3910 bool ExpectRegister = true;
3911
3912 if (Mnemonic == "ic") {
3913 const AArch64IC::IC *IC = AArch64IC::lookupICByName(Op);
3914 if (!IC)
3915 return TokError("invalid operand for IC instruction");
3916 else if (!IC->haveFeatures(getSTI().getFeatureBits())) {
3917 std::string Str("IC " + std::string(IC->Name) + " requires: ");
3919 return TokError(Str);
3920 }
3921 ExpectRegister = IC->NeedsReg;
3922 createSysAlias(IC->Encoding, Operands, S);
3923 } else if (Mnemonic == "dc") {
3924 const AArch64DC::DC *DC = AArch64DC::lookupDCByName(Op);
3925 if (!DC)
3926 return TokError("invalid operand for DC instruction");
3927 else if (!DC->haveFeatures(getSTI().getFeatureBits())) {
3928 std::string Str("DC " + std::string(DC->Name) + " requires: ");
3930 return TokError(Str);
3931 }
3932 createSysAlias(DC->Encoding, Operands, S);
3933 } else if (Mnemonic == "at") {
3934 const AArch64AT::AT *AT = AArch64AT::lookupATByName(Op);
3935 if (!AT)
3936 return TokError("invalid operand for AT instruction");
3937 else if (!AT->haveFeatures(getSTI().getFeatureBits())) {
3938 std::string Str("AT " + std::string(AT->Name) + " requires: ");
3940 return TokError(Str);
3941 }
3942 createSysAlias(AT->Encoding, Operands, S);
3943 } else if (Mnemonic == "tlbi") {
3944 const AArch64TLBI::TLBI *TLBI = AArch64TLBI::lookupTLBIByName(Op);
3945 if (!TLBI)
3946 return TokError("invalid operand for TLBI instruction");
3947 else if (!TLBI->haveFeatures(getSTI().getFeatureBits())) {
3948 std::string Str("TLBI " + std::string(TLBI->Name) + " requires: ");
3950 return TokError(Str);
3951 }
3952 ExpectRegister = TLBI->NeedsReg;
3953 createSysAlias(TLBI->Encoding, Operands, S);
3954 } else if (Mnemonic == "cfp" || Mnemonic == "dvp" || Mnemonic == "cpp" || Mnemonic == "cosp") {
3955
3956 if (Op.lower() != "rctx")
3957 return TokError("invalid operand for prediction restriction instruction");
3958
3959 bool hasAll = getSTI().hasFeature(AArch64::FeatureAll);
3960 bool hasPredres = hasAll || getSTI().hasFeature(AArch64::FeaturePredRes);
3961 bool hasSpecres2 = hasAll || getSTI().hasFeature(AArch64::FeatureSPECRES2);
3962
3963 if (Mnemonic == "cosp" && !hasSpecres2)
3964 return TokError("COSP requires: predres2");
3965 if (!hasPredres)
3966 return TokError(Mnemonic.upper() + "RCTX requires: predres");
3967
3968 uint16_t PRCTX_Op2 = Mnemonic == "cfp" ? 0b100
3969 : Mnemonic == "dvp" ? 0b101
3970 : Mnemonic == "cosp" ? 0b110
3971 : Mnemonic == "cpp" ? 0b111
3972 : 0;
3973 assert(PRCTX_Op2 &&
3974 "Invalid mnemonic for prediction restriction instruction");
3975 const auto SYS_3_7_3 = 0b01101110011; // op=3, CRn=7, CRm=3
3976 const auto Encoding = SYS_3_7_3 << 3 | PRCTX_Op2;
3977
3978 createSysAlias(Encoding, Operands, S);
3979 }
3980
3981 Lex(); // Eat operand.
3982
3983 bool HasRegister = false;
3984
3985 // Check for the optional register operand.
3986 if (parseOptionalToken(AsmToken::Comma)) {
3987 if (Tok.isNot(AsmToken::Identifier) || parseRegister(Operands))
3988 return TokError("expected register operand");
3989 HasRegister = true;
3990 }
3991
3992 if (ExpectRegister && !HasRegister)
3993 return TokError("specified " + Mnemonic + " op requires a register");
3994 else if (!ExpectRegister && HasRegister)
3995 return TokError("specified " + Mnemonic + " op does not use a register");
3996
3997 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
3998 return true;
3999
4000 return false;
4001}
4002
4003/// parseSyspAlias - The TLBIP instructions are simple aliases for
4004/// the SYSP instruction. Parse them specially so that we create a SYSP MCInst.
4005bool AArch64AsmParser::parseSyspAlias(StringRef Name, SMLoc NameLoc,
4007 if (Name.contains('.'))
4008 return TokError("invalid operand");
4009
4010 Mnemonic = Name;
4011 Operands.push_back(
4012 AArch64Operand::CreateToken("sysp", NameLoc, getContext()));
4013
4014 const AsmToken &Tok = getTok();
4015 StringRef Op = Tok.getString();
4016 SMLoc S = Tok.getLoc();
4017
4018 if (Mnemonic == "tlbip") {
4019 bool HasnXSQualifier = Op.ends_with_insensitive("nXS");
4020 if (HasnXSQualifier) {
4021 Op = Op.drop_back(3);
4022 }
4023 const AArch64TLBI::TLBI *TLBIorig = AArch64TLBI::lookupTLBIByName(Op);
4024 if (!TLBIorig)
4025 return TokError("invalid operand for TLBIP instruction");
4026 const AArch64TLBI::TLBI TLBI(
4027 TLBIorig->Name, TLBIorig->Encoding | (HasnXSQualifier ? (1 << 7) : 0),
4028 TLBIorig->NeedsReg,
4029 HasnXSQualifier
4030 ? TLBIorig->FeaturesRequired | FeatureBitset({AArch64::FeatureXS})
4031 : TLBIorig->FeaturesRequired);
4032 if (!TLBI.haveFeatures(getSTI().getFeatureBits())) {
4033 std::string Name =
4034 std::string(TLBI.Name) + (HasnXSQualifier ? "nXS" : "");
4035 std::string Str("TLBIP " + Name + " requires: ");
4037 return TokError(Str);
4038 }
4039 createSysAlias(TLBI.Encoding, Operands, S);
4040 }
4041
4042 Lex(); // Eat operand.
4043
4044 if (parseComma())
4045 return true;
4046
4047 if (Tok.isNot(AsmToken::Identifier))
4048 return TokError("expected register identifier");
4049 auto Result = tryParseSyspXzrPair(Operands);
4050 if (Result.isNoMatch())
4051 Result = tryParseGPRSeqPair(Operands);
4052 if (!Result.isSuccess())
4053 return TokError("specified " + Mnemonic +
4054 " op requires a pair of registers");
4055
4056 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
4057 return true;
4058
4059 return false;
4060}
4061
4062ParseStatus AArch64AsmParser::tryParseBarrierOperand(OperandVector &Operands) {
4063 MCAsmParser &Parser = getParser();
4064 const AsmToken &Tok = getTok();
4065
4066 if (Mnemonic == "tsb" && Tok.isNot(AsmToken::Identifier))
4067 return TokError("'csync' operand expected");
4068 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4069 // Immediate operand.
4070 const MCExpr *ImmVal;
4071 SMLoc ExprLoc = getLoc();
4072 AsmToken IntTok = Tok;
4073 if (getParser().parseExpression(ImmVal))
4074 return ParseStatus::Failure;
4075 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4076 if (!MCE)
4077 return Error(ExprLoc, "immediate value expected for barrier operand");
4078 int64_t Value = MCE->getValue();
4079 if (Mnemonic == "dsb" && Value > 15) {
4080 // This case is a no match here, but it might be matched by the nXS
4081 // variant. Deliberately not unlex the optional '#' as it is not necessary
4082 // to characterize an integer immediate.
4083 Parser.getLexer().UnLex(IntTok);
4084 return ParseStatus::NoMatch;
4085 }
4086 if (Value < 0 || Value > 15)
4087 return Error(ExprLoc, "barrier operand out of range");
4088 auto DB = AArch64DB::lookupDBByEncoding(Value);
4089 Operands.push_back(AArch64Operand::CreateBarrier(Value, DB ? DB->Name : "",
4090 ExprLoc, getContext(),
4091 false /*hasnXSModifier*/));
4092 return ParseStatus::Success;
4093 }
4094
4095 if (Tok.isNot(AsmToken::Identifier))
4096 return TokError("invalid operand for instruction");
4097
4098 StringRef Operand = Tok.getString();
4099 auto TSB = AArch64TSB::lookupTSBByName(Operand);
4100 auto DB = AArch64DB::lookupDBByName(Operand);
4101 // The only valid named option for ISB is 'sy'
4102 if (Mnemonic == "isb" && (!DB || DB->Encoding != AArch64DB::sy))
4103 return TokError("'sy' or #imm operand expected");
4104 // The only valid named option for TSB is 'csync'
4105 if (Mnemonic == "tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync))
4106 return TokError("'csync' operand expected");
4107 if (!DB && !TSB) {
4108 if (Mnemonic == "dsb") {
4109 // This case is a no match here, but it might be matched by the nXS
4110 // variant.
4111 return ParseStatus::NoMatch;
4112 }
4113 return TokError("invalid barrier option name");
4114 }
4115
4116 Operands.push_back(AArch64Operand::CreateBarrier(
4117 DB ? DB->Encoding : TSB->Encoding, Tok.getString(), getLoc(),
4118 getContext(), false /*hasnXSModifier*/));
4119 Lex(); // Consume the option
4120
4121 return ParseStatus::Success;
4122}
4123
4124ParseStatus
4125AArch64AsmParser::tryParseBarriernXSOperand(OperandVector &Operands) {
4126 const AsmToken &Tok = getTok();
4127
4128 assert(Mnemonic == "dsb" && "Instruction does not accept nXS operands");
4129 if (Mnemonic != "dsb")
4130 return ParseStatus::Failure;
4131
4132 if (parseOptionalToken(AsmToken::Hash) || Tok.is(AsmToken::Integer)) {
4133 // Immediate operand.
4134 const MCExpr *ImmVal;
4135 SMLoc ExprLoc = getLoc();
4136 if (getParser().parseExpression(ImmVal))
4137 return ParseStatus::Failure;
4138 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4139 if (!MCE)
4140 return Error(ExprLoc, "immediate value expected for barrier operand");
4141 int64_t Value = MCE->getValue();
4142 // v8.7-A DSB in the nXS variant accepts only the following immediate
4143 // values: 16, 20, 24, 28.
4144 if (Value != 16 && Value != 20 && Value != 24 && Value != 28)
4145 return Error(ExprLoc, "barrier operand out of range");
4146 auto DB = AArch64DBnXS::lookupDBnXSByImmValue(Value);
4147 Operands.push_back(AArch64Operand::CreateBarrier(DB->Encoding, DB->Name,
4148 ExprLoc, getContext(),
4149 true /*hasnXSModifier*/));
4150 return ParseStatus::Success;
4151 }
4152
4153 if (Tok.isNot(AsmToken::Identifier))
4154 return TokError("invalid operand for instruction");
4155
4156 StringRef Operand = Tok.getString();
4157 auto DB = AArch64DBnXS::lookupDBnXSByName(Operand);
4158
4159 if (!DB)
4160 return TokError("invalid barrier option name");
4161
4162 Operands.push_back(
4163 AArch64Operand::CreateBarrier(DB->Encoding, Tok.getString(), getLoc(),
4164 getContext(), true /*hasnXSModifier*/));
4165 Lex(); // Consume the option
4166
4167 return ParseStatus::Success;
4168}
4169
4170ParseStatus AArch64AsmParser::tryParseSysReg(OperandVector &Operands) {
4171 const AsmToken &Tok = getTok();
4172
4173 if (Tok.isNot(AsmToken::Identifier))
4174 return ParseStatus::NoMatch;
4175
4176 if (AArch64SVCR::lookupSVCRByName(Tok.getString()))
4177 return ParseStatus::NoMatch;
4178
4179 int MRSReg, MSRReg;
4180 auto SysReg = AArch64SysReg::lookupSysRegByName(Tok.getString());
4181 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
4182 MRSReg = SysReg->Readable ? SysReg->Encoding : -1;
4183 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
4184 } else
4185 MRSReg = MSRReg = AArch64SysReg::parseGenericRegister(Tok.getString());
4186
4187 unsigned PStateImm = -1;
4188 auto PState15 = AArch64PState::lookupPStateImm0_15ByName(Tok.getString());
4189 if (PState15 && PState15->haveFeatures(getSTI().getFeatureBits()))
4190 PStateImm = PState15->Encoding;
4191 if (!PState15) {
4192 auto PState1 = AArch64PState::lookupPStateImm0_1ByName(Tok.getString());
4193 if (PState1 && PState1->haveFeatures(getSTI().getFeatureBits()))
4194 PStateImm = PState1->Encoding;
4195 }
4196
4197 Operands.push_back(
4198 AArch64Operand::CreateSysReg(Tok.getString(), getLoc(), MRSReg, MSRReg,
4199 PStateImm, getContext()));
4200 Lex(); // Eat identifier
4201
4202 return ParseStatus::Success;
4203}
4204
4205ParseStatus
4206AArch64AsmParser::tryParsePHintInstOperand(OperandVector &Operands) {
4207 SMLoc S = getLoc();
4208 const AsmToken &Tok = getTok();
4209 if (Tok.isNot(AsmToken::Identifier))
4210 return TokError("invalid operand for instruction");
4211
4213 if (!PH)
4214 return TokError("invalid operand for instruction");
4215
4216 Operands.push_back(AArch64Operand::CreatePHintInst(
4217 PH->Encoding, Tok.getString(), S, getContext()));
4218 Lex(); // Eat identifier token.
4219 return ParseStatus::Success;
4220}
4221
4222/// tryParseNeonVectorRegister - Parse a vector register operand.
4223bool AArch64AsmParser::tryParseNeonVectorRegister(OperandVector &Operands) {
4224 if (getTok().isNot(AsmToken::Identifier))
4225 return true;
4226
4227 SMLoc S = getLoc();
4228 // Check for a vector register specifier first.
4229 StringRef Kind;
4230 MCRegister Reg;
4231 ParseStatus Res = tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
4232 if (!Res.isSuccess())
4233 return true;
4234
4235 const auto &KindRes = parseVectorKind(Kind, RegKind::NeonVector);
4236 if (!KindRes)
4237 return true;
4238
4239 unsigned ElementWidth = KindRes->second;
4240 Operands.push_back(
4241 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
4242 S, getLoc(), getContext()));
4243
4244 // If there was an explicit qualifier, that goes on as a literal text
4245 // operand.
4246 if (!Kind.empty())
4247 Operands.push_back(AArch64Operand::CreateToken(Kind, S, getContext()));
4248
4249 return tryParseVectorIndex(Operands).isFailure();
4250}
4251
4252ParseStatus AArch64AsmParser::tryParseVectorIndex(OperandVector &Operands) {
4253 SMLoc SIdx = getLoc();
4254 if (parseOptionalToken(AsmToken::LBrac)) {
4255 const MCExpr *ImmVal;
4256 if (getParser().parseExpression(ImmVal))
4257 return ParseStatus::NoMatch;
4258 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4259 if (!MCE)
4260 return TokError("immediate value expected for vector index");
4261
4262 SMLoc E = getLoc();
4263
4264 if (parseToken(AsmToken::RBrac, "']' expected"))
4265 return ParseStatus::Failure;
4266
4267 Operands.push_back(AArch64Operand::CreateVectorIndex(MCE->getValue(), SIdx,
4268 E, getContext()));
4269 return ParseStatus::Success;
4270 }
4271
4272 return ParseStatus::NoMatch;
4273}
4274
4275// tryParseVectorRegister - Try to parse a vector register name with
4276// optional kind specifier. If it is a register specifier, eat the token
4277// and return it.
4278ParseStatus AArch64AsmParser::tryParseVectorRegister(MCRegister &Reg,
4279 StringRef &Kind,
4280 RegKind MatchKind) {
4281 const AsmToken &Tok = getTok();
4282
4283 if (Tok.isNot(AsmToken::Identifier))
4284 return ParseStatus::NoMatch;
4285
4286 StringRef Name = Tok.getString();
4287 // If there is a kind specifier, it's separated from the register name by
4288 // a '.'.
4289 size_t Start = 0, Next = Name.find('.');
4290 StringRef Head = Name.slice(Start, Next);
4291 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
4292
4293 if (RegNum) {
4294 if (Next != StringRef::npos) {
4295 Kind = Name.substr(Next);
4296 if (!isValidVectorKind(Kind, MatchKind))
4297 return TokError("invalid vector kind qualifier");
4298 }
4299 Lex(); // Eat the register token.
4300
4301 Reg = RegNum;
4302 return ParseStatus::Success;
4303 }
4304
4305 return ParseStatus::NoMatch;
4306}
4307
4308ParseStatus AArch64AsmParser::tryParseSVEPredicateOrPredicateAsCounterVector(
4310 ParseStatus Status =
4311 tryParseSVEPredicateVector<RegKind::SVEPredicateAsCounter>(Operands);
4312 if (!Status.isSuccess())
4313 Status = tryParseSVEPredicateVector<RegKind::SVEPredicateVector>(Operands);
4314 return Status;
4315}
4316
4317/// tryParseSVEPredicateVector - Parse a SVE predicate register operand.
4318template <RegKind RK>
4319ParseStatus
4320AArch64AsmParser::tryParseSVEPredicateVector(OperandVector &Operands) {
4321 // Check for a SVE predicate register specifier first.
4322 const SMLoc S = getLoc();
4323 StringRef Kind;
4324 MCRegister RegNum;
4325 auto Res = tryParseVectorRegister(RegNum, Kind, RK);
4326 if (!Res.isSuccess())
4327 return Res;
4328
4329 const auto &KindRes = parseVectorKind(Kind, RK);
4330 if (!KindRes)
4331 return ParseStatus::NoMatch;
4332
4333 unsigned ElementWidth = KindRes->second;
4334 Operands.push_back(AArch64Operand::CreateVectorReg(
4335 RegNum, RK, ElementWidth, S,
4336 getLoc(), getContext()));
4337
4338 if (getLexer().is(AsmToken::LBrac)) {
4339 if (RK == RegKind::SVEPredicateAsCounter) {
4340 ParseStatus ResIndex = tryParseVectorIndex(Operands);
4341 if (ResIndex.isSuccess())
4342 return ParseStatus::Success;
4343 } else {
4344 // Indexed predicate, there's no comma so try parse the next operand
4345 // immediately.
4346 if (parseOperand(Operands, false, false))
4347 return ParseStatus::NoMatch;
4348 }
4349 }
4350
4351 // Not all predicates are followed by a '/m' or '/z'.
4352 if (getTok().isNot(AsmToken::Slash))
4353 return ParseStatus::Success;
4354
4355 // But when they do they shouldn't have an element type suffix.
4356 if (!Kind.empty())
4357 return Error(S, "not expecting size suffix");
4358
4359 // Add a literal slash as operand
4360 Operands.push_back(AArch64Operand::CreateToken("/", getLoc(), getContext()));
4361
4362 Lex(); // Eat the slash.
4363
4364 // Zeroing or merging?
4365 auto Pred = getTok().getString().lower();
4366 if (RK == RegKind::SVEPredicateAsCounter && Pred != "z")
4367 return Error(getLoc(), "expecting 'z' predication");
4368
4369 if (RK == RegKind::SVEPredicateVector && Pred != "z" && Pred != "m")
4370 return Error(getLoc(), "expecting 'm' or 'z' predication");
4371
4372 // Add zero/merge token.
4373 const char *ZM = Pred == "z" ? "z" : "m";
4374 Operands.push_back(AArch64Operand::CreateToken(ZM, getLoc(), getContext()));
4375
4376 Lex(); // Eat zero/merge token.
4377 return ParseStatus::Success;
4378}
4379
4380/// parseRegister - Parse a register operand.
4381bool AArch64AsmParser::parseRegister(OperandVector &Operands) {
4382 // Try for a Neon vector register.
4383 if (!tryParseNeonVectorRegister(Operands))
4384 return false;
4385
4386 if (tryParseZTOperand(Operands).isSuccess())
4387 return false;
4388
4389 // Otherwise try for a scalar register.
4390 if (tryParseGPROperand<false>(Operands).isSuccess())
4391 return false;
4392
4393 return true;
4394}
4395
4396bool AArch64AsmParser::parseSymbolicImmVal(const MCExpr *&ImmVal) {
4397 bool HasELFModifier = false;
4398 AArch64::Specifier RefKind;
4399 SMLoc Loc = getLexer().getLoc();
4400 if (parseOptionalToken(AsmToken::Colon)) {
4401 HasELFModifier = true;
4402
4403 if (getTok().isNot(AsmToken::Identifier))
4404 return TokError("expect relocation specifier in operand after ':'");
4405
4406 std::string LowerCase = getTok().getIdentifier().lower();
4407 RefKind = StringSwitch<AArch64::Specifier>(LowerCase)
4408 .Case("lo12", AArch64::S_LO12)
4409 .Case("abs_g3", AArch64::S_ABS_G3)
4410 .Case("abs_g2", AArch64::S_ABS_G2)
4411 .Case("abs_g2_s", AArch64::S_ABS_G2_S)
4412 .Case("abs_g2_nc", AArch64::S_ABS_G2_NC)
4413 .Case("abs_g1", AArch64::S_ABS_G1)
4414 .Case("abs_g1_s", AArch64::S_ABS_G1_S)
4415 .Case("abs_g1_nc", AArch64::S_ABS_G1_NC)
4416 .Case("abs_g0", AArch64::S_ABS_G0)
4417 .Case("abs_g0_s", AArch64::S_ABS_G0_S)
4418 .Case("abs_g0_nc", AArch64::S_ABS_G0_NC)
4419 .Case("prel_g3", AArch64::S_PREL_G3)
4420 .Case("prel_g2", AArch64::S_PREL_G2)
4421 .Case("prel_g2_nc", AArch64::S_PREL_G2_NC)
4422 .Case("prel_g1", AArch64::S_PREL_G1)
4423 .Case("prel_g1_nc", AArch64::S_PREL_G1_NC)
4424 .Case("prel_g0", AArch64::S_PREL_G0)
4425 .Case("prel_g0_nc", AArch64::S_PREL_G0_NC)
4426 .Case("dtprel_g2", AArch64::S_DTPREL_G2)
4427 .Case("dtprel_g1", AArch64::S_DTPREL_G1)
4428 .Case("dtprel_g1_nc", AArch64::S_DTPREL_G1_NC)
4429 .Case("dtprel_g0", AArch64::S_DTPREL_G0)
4430 .Case("dtprel_g0_nc", AArch64::S_DTPREL_G0_NC)
4431 .Case("dtprel_hi12", AArch64::S_DTPREL_HI12)
4432 .Case("dtprel_lo12", AArch64::S_DTPREL_LO12)
4433 .Case("dtprel_lo12_nc", AArch64::S_DTPREL_LO12_NC)
4434 .Case("pg_hi21_nc", AArch64::S_ABS_PAGE_NC)
4435 .Case("tprel_g2", AArch64::S_TPREL_G2)
4436 .Case("tprel_g1", AArch64::S_TPREL_G1)
4437 .Case("tprel_g1_nc", AArch64::S_TPREL_G1_NC)
4438 .Case("tprel_g0", AArch64::S_TPREL_G0)
4439 .Case("tprel_g0_nc", AArch64::S_TPREL_G0_NC)
4440 .Case("tprel_hi12", AArch64::S_TPREL_HI12)
4441 .Case("tprel_lo12", AArch64::S_TPREL_LO12)
4442 .Case("tprel_lo12_nc", AArch64::S_TPREL_LO12_NC)
4443 .Case("tlsdesc_lo12", AArch64::S_TLSDESC_LO12)
4444 .Case("tlsdesc_auth_lo12", AArch64::S_TLSDESC_AUTH_LO12)
4445 .Case("got", AArch64::S_GOT_PAGE)
4446 .Case("gotpage_lo15", AArch64::S_GOT_PAGE_LO15)
4447 .Case("got_lo12", AArch64::S_GOT_LO12)
4448 .Case("got_auth", AArch64::S_GOT_AUTH_PAGE)
4449 .Case("got_auth_lo12", AArch64::S_GOT_AUTH_LO12)
4450 .Case("gottprel", AArch64::S_GOTTPREL_PAGE)
4451 .Case("gottprel_lo12", AArch64::S_GOTTPREL_LO12_NC)
4452 .Case("gottprel_g1", AArch64::S_GOTTPREL_G1)
4453 .Case("gottprel_g0_nc", AArch64::S_GOTTPREL_G0_NC)
4454 .Case("tlsdesc", AArch64::S_TLSDESC_PAGE)
4455 .Case("tlsdesc_auth", AArch64::S_TLSDESC_AUTH_PAGE)
4456 .Case("secrel_lo12", AArch64::S_SECREL_LO12)
4457 .Case("secrel_hi12", AArch64::S_SECREL_HI12)
4458 .Default(AArch64::S_INVALID);
4459
4460 if (RefKind == AArch64::S_INVALID)
4461 return TokError("expect relocation specifier in operand after ':'");
4462
4463 Lex(); // Eat identifier
4464
4465 if (parseToken(AsmToken::Colon, "expect ':' after relocation specifier"))
4466 return true;
4467 }
4468
4469 if (getParser().parseExpression(ImmVal))
4470 return true;
4471
4472 if (HasELFModifier)
4473 ImmVal = MCSpecifierExpr::create(ImmVal, RefKind, getContext(), Loc);
4474
4475 SMLoc EndLoc;
4476 if (getContext().getAsmInfo()->hasSubsectionsViaSymbols()) {
4477 if (getParser().parseAtSpecifier(ImmVal, EndLoc))
4478 return true;
4479 const MCExpr *Term;
4480 MCBinaryExpr::Opcode Opcode;
4481 if (parseOptionalToken(AsmToken::Plus))
4482 Opcode = MCBinaryExpr::Add;
4483 else if (parseOptionalToken(AsmToken::Minus))
4484 Opcode = MCBinaryExpr::Sub;
4485 else
4486 return false;
4487 if (getParser().parsePrimaryExpr(Term, EndLoc))
4488 return true;
4489 ImmVal = MCBinaryExpr::create(Opcode, ImmVal, Term, getContext());
4490 }
4491
4492 return false;
4493}
4494
4495ParseStatus AArch64AsmParser::tryParseMatrixTileList(OperandVector &Operands) {
4496 if (getTok().isNot(AsmToken::LCurly))
4497 return ParseStatus::NoMatch;
4498
4499 auto ParseMatrixTile = [this](unsigned &Reg,
4500 unsigned &ElementWidth) -> ParseStatus {
4501 StringRef Name = getTok().getString();
4502 size_t DotPosition = Name.find('.');
4503 if (DotPosition == StringRef::npos)
4504 return ParseStatus::NoMatch;
4505
4506 unsigned RegNum = matchMatrixTileListRegName(Name);
4507 if (!RegNum)
4508 return ParseStatus::NoMatch;
4509
4510 StringRef Tail = Name.drop_front(DotPosition);
4511 const std::optional<std::pair<int, int>> &KindRes =
4512 parseVectorKind(Tail, RegKind::Matrix);
4513 if (!KindRes)
4514 return TokError(
4515 "Expected the register to be followed by element width suffix");
4516 ElementWidth = KindRes->second;
4517 Reg = RegNum;
4518 Lex(); // Eat the register.
4519 return ParseStatus::Success;
4520 };
4521
4522 SMLoc S = getLoc();
4523 auto LCurly = getTok();
4524 Lex(); // Eat left bracket token.
4525
4526 // Empty matrix list
4527 if (parseOptionalToken(AsmToken::RCurly)) {
4528 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4529 /*RegMask=*/0, S, getLoc(), getContext()));
4530 return ParseStatus::Success;
4531 }
4532
4533 // Try parse {za} alias early
4534 if (getTok().getString().equals_insensitive("za")) {
4535 Lex(); // Eat 'za'
4536
4537 if (parseToken(AsmToken::RCurly, "'}' expected"))
4538 return ParseStatus::Failure;
4539
4540 Operands.push_back(AArch64Operand::CreateMatrixTileList(
4541 /*RegMask=*/0xFF, S, getLoc(), getContext()));
4542 return ParseStatus::Success;
4543 }
4544
4545 SMLoc TileLoc = getLoc();
4546
4547 unsigned FirstReg, ElementWidth;
4548 auto ParseRes = ParseMatrixTile(FirstReg, ElementWidth);
4549 if (!ParseRes.isSuccess()) {
4550 getLexer().UnLex(LCurly);
4551 return ParseRes;
4552 }
4553
4554 const MCRegisterInfo *RI = getContext().getRegisterInfo();
4555
4556 unsigned PrevReg = FirstReg;
4557
4558 SmallSet<unsigned, 8> DRegs;
4559 AArch64Operand::ComputeRegsForAlias(FirstReg, DRegs, ElementWidth);
4560
4561 SmallSet<unsigned, 8> SeenRegs;
4562 SeenRegs.insert(FirstReg);
4563
4564 while (parseOptionalToken(AsmToken::Comma)) {
4565 TileLoc = getLoc();
4566 unsigned Reg, NextElementWidth;
4567 ParseRes = ParseMatrixTile(Reg, NextElementWidth);
4568 if (!ParseRes.isSuccess())
4569 return ParseRes;
4570
4571 // Element size must match on all regs in the list.
4572 if (ElementWidth != NextElementWidth)
4573 return Error(TileLoc, "mismatched register size suffix");
4574
4575 if (RI->getEncodingValue(Reg) <= (RI->getEncodingValue(PrevReg)))
4576 Warning(TileLoc, "tile list not in ascending order");
4577
4578 if (SeenRegs.contains(Reg))
4579 Warning(TileLoc, "duplicate tile in list");
4580 else {
4581 SeenRegs.insert(Reg);
4582 AArch64Operand::ComputeRegsForAlias(Reg, DRegs, ElementWidth);
4583 }
4584
4585 PrevReg = Reg;
4586 }
4587
4588 if (parseToken(AsmToken::RCurly, "'}' expected"))
4589 return ParseStatus::Failure;
4590
4591 unsigned RegMask = 0;
4592 for (auto Reg : DRegs)
4593 RegMask |= 0x1 << (RI->getEncodingValue(Reg) -
4594 RI->getEncodingValue(AArch64::ZAD0));
4595 Operands.push_back(
4596 AArch64Operand::CreateMatrixTileList(RegMask, S, getLoc(), getContext()));
4597
4598 return ParseStatus::Success;
4599}
4600
4601template <RegKind VectorKind>
4602ParseStatus AArch64AsmParser::tryParseVectorList(OperandVector &Operands,
4603 bool ExpectMatch) {
4604 MCAsmParser &Parser = getParser();
4605 if (!getTok().is(AsmToken::LCurly))
4606 return ParseStatus::NoMatch;
4607
4608 // Wrapper around parse function
4609 auto ParseVector = [this](MCRegister &Reg, StringRef &Kind, SMLoc Loc,
4610 bool NoMatchIsError) -> ParseStatus {
4611 auto RegTok = getTok();
4612 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
4613 if (ParseRes.isSuccess()) {
4614 if (parseVectorKind(Kind, VectorKind))
4615 return ParseRes;
4616 llvm_unreachable("Expected a valid vector kind");
4617 }
4618
4619 if (RegTok.is(AsmToken::Identifier) && ParseRes.isNoMatch() &&
4620 RegTok.getString().equals_insensitive("zt0"))
4621 return ParseStatus::NoMatch;
4622
4623 if (RegTok.isNot(AsmToken::Identifier) || ParseRes.isFailure() ||
4624 (ParseRes.isNoMatch() && NoMatchIsError &&
4625 !RegTok.getString().starts_with_insensitive("za")))
4626 return Error(Loc, "vector register expected");
4627
4628 return ParseStatus::NoMatch;
4629 };
4630
4631 unsigned NumRegs = getNumRegsForRegKind(VectorKind);
4632 SMLoc S = getLoc();
4633 auto LCurly = getTok();
4634 Lex(); // Eat left bracket token.
4635
4636 StringRef Kind;
4637 MCRegister FirstReg;
4638 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
4639
4640 // Put back the original left bracket if there was no match, so that
4641 // different types of list-operands can be matched (e.g. SVE, Neon).
4642 if (ParseRes.isNoMatch())
4643 Parser.getLexer().UnLex(LCurly);
4644
4645 if (!ParseRes.isSuccess())
4646 return ParseRes;
4647
4648 MCRegister PrevReg = FirstReg;
4649 unsigned Count = 1;
4650
4651 unsigned Stride = 1;
4652 if (parseOptionalToken(AsmToken::Minus)) {
4653 SMLoc Loc = getLoc();
4654 StringRef NextKind;
4655
4656 MCRegister Reg;
4657 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4658 if (!ParseRes.isSuccess())
4659 return ParseRes;
4660
4661 // Any Kind suffices must match on all regs in the list.
4662 if (Kind != NextKind)
4663 return Error(Loc, "mismatched register size suffix");
4664
4665 unsigned Space =
4666 (PrevReg < Reg) ? (Reg - PrevReg) : (NumRegs - (PrevReg - Reg));
4667
4668 if (Space == 0 || Space > 3)
4669 return Error(Loc, "invalid number of vectors");
4670
4671 Count += Space;
4672 }
4673 else {
4674 bool HasCalculatedStride = false;
4675 while (parseOptionalToken(AsmToken::Comma)) {
4676 SMLoc Loc = getLoc();
4677 StringRef NextKind;
4678 MCRegister Reg;
4679 ParseRes = ParseVector(Reg, NextKind, getLoc(), true);
4680 if (!ParseRes.isSuccess())
4681 return ParseRes;
4682
4683 // Any Kind suffices must match on all regs in the list.
4684 if (Kind != NextKind)
4685 return Error(Loc, "mismatched register size suffix");
4686
4687 unsigned RegVal = getContext().getRegisterInfo()->getEncodingValue(Reg);
4688 unsigned PrevRegVal =
4689 getContext().getRegisterInfo()->getEncodingValue(PrevReg);
4690 if (!HasCalculatedStride) {
4691 Stride = (PrevRegVal < RegVal) ? (RegVal - PrevRegVal)
4692 : (NumRegs - (PrevRegVal - RegVal));
4693 HasCalculatedStride = true;
4694 }
4695
4696 // Register must be incremental (with a wraparound at last register).
4697 if (Stride == 0 || RegVal != ((PrevRegVal + Stride) % NumRegs))
4698 return Error(Loc, "registers must have the same sequential stride");
4699
4700 PrevReg = Reg;
4701 ++Count;
4702 }
4703 }
4704
4705 if (parseToken(AsmToken::RCurly, "'}' expected"))
4706 return ParseStatus::Failure;
4707
4708 if (Count > 4)
4709 return Error(S, "invalid number of vectors");
4710
4711 unsigned NumElements = 0;
4712 unsigned ElementWidth = 0;
4713 if (!Kind.empty()) {
4714 if (const auto &VK = parseVectorKind(Kind, VectorKind))
4715 std::tie(NumElements, ElementWidth) = *VK;
4716 }
4717
4718 Operands.push_back(AArch64Operand::CreateVectorList(
4719 FirstReg, Count, Stride, NumElements, ElementWidth, VectorKind, S,
4720 getLoc(), getContext()));
4721
4722 return ParseStatus::Success;
4723}
4724
4725/// parseNeonVectorList - Parse a vector list operand for AdvSIMD instructions.
4726bool AArch64AsmParser::parseNeonVectorList(OperandVector &Operands) {
4727 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands, true);
4728 if (!ParseRes.isSuccess())
4729 return true;
4730
4731 return tryParseVectorIndex(Operands).isFailure();
4732}
4733
4734ParseStatus AArch64AsmParser::tryParseGPR64sp0Operand(OperandVector &Operands) {
4735 SMLoc StartLoc = getLoc();
4736
4737 MCRegister RegNum;
4738 ParseStatus Res = tryParseScalarRegister(RegNum);
4739 if (!Res.isSuccess())
4740 return Res;
4741
4742 if (!parseOptionalToken(AsmToken::Comma)) {
4743 Operands.push_back(AArch64Operand::CreateReg(
4744 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4745 return ParseStatus::Success;
4746 }
4747
4748 parseOptionalToken(AsmToken::Hash);
4749
4750 if (getTok().isNot(AsmToken::Integer))
4751 return Error(getLoc(), "index must be absent or #0");
4752
4753 const MCExpr *ImmVal;
4754 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
4755 cast<MCConstantExpr>(ImmVal)->getValue() != 0)
4756 return Error(getLoc(), "index must be absent or #0");
4757
4758 Operands.push_back(AArch64Operand::CreateReg(
4759 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext()));
4760 return ParseStatus::Success;
4761}
4762
4763ParseStatus AArch64AsmParser::tryParseZTOperand(OperandVector &Operands) {
4764 SMLoc StartLoc = getLoc();
4765 const AsmToken &Tok = getTok();
4766 std::string Name = Tok.getString().lower();
4767
4768 unsigned RegNum = matchRegisterNameAlias(Name, RegKind::LookupTable);
4769
4770 if (RegNum == 0)
4771 return ParseStatus::NoMatch;
4772
4773 Operands.push_back(AArch64Operand::CreateReg(
4774 RegNum, RegKind::LookupTable, StartLoc, getLoc(), getContext()));
4775 Lex(); // Eat register.
4776
4777 // Check if register is followed by an index
4778 if (parseOptionalToken(AsmToken::LBrac)) {
4779 Operands.push_back(
4780 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4781 const MCExpr *ImmVal;
4782 if (getParser().parseExpression(ImmVal))
4783 return ParseStatus::NoMatch;
4784 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
4785 if (!MCE)
4786 return TokError("immediate value expected for vector index");
4787 Operands.push_back(AArch64Operand::CreateImm(
4788 MCConstantExpr::create(MCE->getValue(), getContext()), StartLoc,
4789 getLoc(), getContext()));
4790 if (parseOptionalToken(AsmToken::Comma))
4791 if (parseOptionalMulOperand(Operands))
4792 return ParseStatus::Failure;
4793 if (parseToken(AsmToken::RBrac, "']' expected"))
4794 return ParseStatus::Failure;
4795 Operands.push_back(
4796 AArch64Operand::CreateToken("]", getLoc(), getContext()));
4797 }
4798 return ParseStatus::Success;
4799}
4800
4801template <bool ParseShiftExtend, RegConstraintEqualityTy EqTy>
4802ParseStatus AArch64AsmParser::tryParseGPROperand(OperandVector &Operands) {
4803 SMLoc StartLoc = getLoc();
4804
4805 MCRegister RegNum;
4806 ParseStatus Res = tryParseScalarRegister(RegNum);
4807 if (!Res.isSuccess())
4808 return Res;
4809
4810 // No shift/extend is the default.
4811 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
4812 Operands.push_back(AArch64Operand::CreateReg(
4813 RegNum, RegKind::Scalar, StartLoc, getLoc(), getContext(), EqTy));
4814 return ParseStatus::Success;
4815 }
4816
4817 // Eat the comma
4818 Lex();
4819
4820 // Match the shift
4822 Res = tryParseOptionalShiftExtend(ExtOpnd);
4823 if (!Res.isSuccess())
4824 return Res;
4825
4826 auto Ext = static_cast<AArch64Operand*>(ExtOpnd.back().get());
4827 Operands.push_back(AArch64Operand::CreateReg(
4828 RegNum, RegKind::Scalar, StartLoc, Ext->getEndLoc(), getContext(), EqTy,
4829 Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
4830 Ext->hasShiftExtendAmount()));
4831
4832 return ParseStatus::Success;
4833}
4834
4835bool AArch64AsmParser::parseOptionalMulOperand(OperandVector &Operands) {
4836 MCAsmParser &Parser = getParser();
4837
4838 // Some SVE instructions have a decoration after the immediate, i.e.
4839 // "mul vl". We parse them here and add tokens, which must be present in the
4840 // asm string in the tablegen instruction.
4841 bool NextIsVL =
4842 Parser.getLexer().peekTok().getString().equals_insensitive("vl");
4843 bool NextIsHash = Parser.getLexer().peekTok().is(AsmToken::Hash);
4844 if (!getTok().getString().equals_insensitive("mul") ||
4845 !(NextIsVL || NextIsHash))
4846 return true;
4847
4848 Operands.push_back(
4849 AArch64Operand::CreateToken("mul", getLoc(), getContext()));
4850 Lex(); // Eat the "mul"
4851
4852 if (NextIsVL) {
4853 Operands.push_back(
4854 AArch64Operand::CreateToken("vl", getLoc(), getContext()));
4855 Lex(); // Eat the "vl"
4856 return false;
4857 }
4858
4859 if (NextIsHash) {
4860 Lex(); // Eat the #
4861 SMLoc S = getLoc();
4862
4863 // Parse immediate operand.
4864 const MCExpr *ImmVal;
4865 if (!Parser.parseExpression(ImmVal))
4866 if (const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
4867 Operands.push_back(AArch64Operand::CreateImm(
4868 MCConstantExpr::create(MCE->getValue(), getContext()), S, getLoc(),
4869 getContext()));
4870 return false;
4871 }
4872 }
4873
4874 return Error(getLoc(), "expected 'vl' or '#<imm>'");
4875}
4876
4877bool AArch64AsmParser::parseOptionalVGOperand(OperandVector &Operands,
4878 StringRef &VecGroup) {
4879 MCAsmParser &Parser = getParser();
4880 auto Tok = Parser.getTok();
4881 if (Tok.isNot(AsmToken::Identifier))
4882 return true;
4883
4884 StringRef VG = StringSwitch<StringRef>(Tok.getString().lower())
4885 .Case("vgx2", "vgx2")
4886 .Case("vgx4", "vgx4")
4887 .Default("");
4888
4889 if (VG.empty())
4890 return true;
4891
4892 VecGroup = VG;
4893 Parser.Lex(); // Eat vgx[2|4]
4894 return false;
4895}
4896
4897bool AArch64AsmParser::parseKeywordOperand(OperandVector &Operands) {
4898 auto Tok = getTok();
4899 if (Tok.isNot(AsmToken::Identifier))
4900 return true;
4901
4902 auto Keyword = Tok.getString();
4903 Keyword = StringSwitch<StringRef>(Keyword.lower())
4904 .Case("sm", "sm")
4905 .Case("za", "za")
4906 .Default(Keyword);
4907 Operands.push_back(
4908 AArch64Operand::CreateToken(Keyword, Tok.getLoc(), getContext()));
4909
4910 Lex();
4911 return false;
4912}
4913
4914/// parseOperand - Parse a arm instruction operand. For now this parses the
4915/// operand regardless of the mnemonic.
4916bool AArch64AsmParser::parseOperand(OperandVector &Operands, bool isCondCode,
4917 bool invertCondCode) {
4918 MCAsmParser &Parser = getParser();
4919
4920 ParseStatus ResTy =
4921 MatchOperandParserImpl(Operands, Mnemonic, /*ParseForAllFeatures=*/true);
4922
4923 // Check if the current operand has a custom associated parser, if so, try to
4924 // custom parse the operand, or fallback to the general approach.
4925 if (ResTy.isSuccess())
4926 return false;
4927 // If there wasn't a custom match, try the generic matcher below. Otherwise,
4928 // there was a match, but an error occurred, in which case, just return that
4929 // the operand parsing failed.
4930 if (ResTy.isFailure())
4931 return true;
4932
4933 // Nothing custom, so do general case parsing.
4934 SMLoc S, E;
4935 auto parseOptionalShiftExtend = [&](AsmToken SavedTok) {
4936 if (parseOptionalToken(AsmToken::Comma)) {
4937 ParseStatus Res = tryParseOptionalShiftExtend(Operands);
4938 if (!Res.isNoMatch())
4939 return Res.isFailure();
4940 getLexer().UnLex(SavedTok);
4941 }
4942 return false;
4943 };
4944 switch (getLexer().getKind()) {
4945 default: {
4946 SMLoc S = getLoc();
4947 const MCExpr *Expr;
4948 if (parseSymbolicImmVal(Expr))
4949 return Error(S, "invalid operand");
4950
4951 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
4952 Operands.push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
4953 return parseOptionalShiftExtend(getTok());
4954 }
4955 case AsmToken::LBrac: {
4956 Operands.push_back(
4957 AArch64Operand::CreateToken("[", getLoc(), getContext()));
4958 Lex(); // Eat '['
4959
4960 // There's no comma after a '[', so we can parse the next operand
4961 // immediately.
4962 return parseOperand(Operands, false, false);
4963 }
4964 case AsmToken::LCurly: {
4965 if (!parseNeonVectorList(Operands))
4966 return false;
4967
4968 Operands.push_back(
4969 AArch64Operand::CreateToken("{", getLoc(), getContext()));
4970 Lex(); // Eat '{'
4971
4972 // There's no comma after a '{', so we can parse the next operand
4973 // immediately.
4974 return parseOperand(Operands, false, false);
4975 }
4976 case AsmToken::Identifier: {
4977 // See if this is a "VG" decoration used by SME instructions.
4978 StringRef VecGroup;
4979 if (!parseOptionalVGOperand(Operands, VecGroup)) {
4980 Operands.push_back(
4981 AArch64Operand::CreateToken(VecGroup, getLoc(), getContext()));
4982 return false;
4983 }
4984 // If we're expecting a Condition Code operand, then just parse that.
4985 if (isCondCode)
4986 return parseCondCode(Operands, invertCondCode);
4987
4988 // If it's a register name, parse it.
4989 if (!parseRegister(Operands)) {
4990 // Parse an optional shift/extend modifier.
4991 AsmToken SavedTok = getTok();
4992 if (parseOptionalToken(AsmToken::Comma)) {
4993 // The operand after the register may be a label (e.g. ADR/ADRP). Check
4994 // such cases and don't report an error when <label> happens to match a
4995 // shift/extend modifier.
4996 ParseStatus Res = MatchOperandParserImpl(Operands, Mnemonic,
4997 /*ParseForAllFeatures=*/true);
4998 if (!Res.isNoMatch())
4999 return Res.isFailure();
5000 Res = tryParseOptionalShiftExtend(Operands);
5001 if (!Res.isNoMatch())
5002 return Res.isFailure();
5003 getLexer().UnLex(SavedTok);
5004 }
5005 return false;
5006 }
5007
5008 // See if this is a "mul vl" decoration or "mul #<int>" operand used
5009 // by SVE instructions.
5010 if (!parseOptionalMulOperand(Operands))
5011 return false;
5012
5013 // If this is a two-word mnemonic, parse its special keyword
5014 // operand as an identifier.
5015 if (Mnemonic == "brb" || Mnemonic == "smstart" || Mnemonic == "smstop" ||
5016 Mnemonic == "gcsb")
5017 return parseKeywordOperand(Operands);
5018
5019 // This was not a register so parse other operands that start with an
5020 // identifier (like labels) as expressions and create them as immediates.
5021 const MCExpr *IdVal, *Term;
5022 S = getLoc();
5023 if (getParser().parseExpression(IdVal))
5024 return true;
5025 if (getParser().parseAtSpecifier(IdVal, E))
5026 return true;
5027 std::optional<MCBinaryExpr::Opcode> Opcode;
5028 if (parseOptionalToken(AsmToken::Plus))
5029 Opcode = MCBinaryExpr::Add;
5030 else if (parseOptionalToken(AsmToken::Minus))
5031 Opcode = MCBinaryExpr::Sub;
5032 if (Opcode) {
5033 if (getParser().parsePrimaryExpr(Term, E))
5034 return true;
5035 IdVal = MCBinaryExpr::create(*Opcode, IdVal, Term, getContext());
5036 }
5037 Operands.push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
5038
5039 // Parse an optional shift/extend modifier.
5040 return parseOptionalShiftExtend(getTok());
5041 }
5042 case AsmToken::Integer:
5043 case AsmToken::Real:
5044 case AsmToken::Hash: {
5045 // #42 -> immediate.
5046 S = getLoc();
5047
5048 parseOptionalToken(AsmToken::Hash);
5049
5050 // Parse a negative sign
5051 bool isNegative = false;
5052 if (getTok().is(AsmToken::Minus)) {
5053 isNegative = true;
5054 // We need to consume this token only when we have a Real, otherwise
5055 // we let parseSymbolicImmVal take care of it
5056 if (Parser.getLexer().peekTok().is(AsmToken::Real))
5057 Lex();
5058 }
5059
5060 // The only Real that should come through here is a literal #0.0 for
5061 // the fcmp[e] r, #0.0 instructions. They expect raw token operands,
5062 // so convert the value.
5063 const AsmToken &Tok = getTok();
5064 if (Tok.is(AsmToken::Real)) {
5065 APFloat RealVal(APFloat::IEEEdouble(), Tok.getString());
5066 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
5067 if (Mnemonic != "fcmp" && Mnemonic != "fcmpe" && Mnemonic != "fcmeq" &&
5068 Mnemonic != "fcmge" && Mnemonic != "fcmgt" && Mnemonic != "fcmle" &&
5069 Mnemonic != "fcmlt" && Mnemonic != "fcmne")
5070 return TokError("unexpected floating point literal");
5071 else if (IntVal != 0 || isNegative)
5072 return TokError("expected floating-point constant #0.0");
5073 Lex(); // Eat the token.
5074
5075 Operands.push_back(AArch64Operand::CreateToken("#0", S, getContext()));
5076 Operands.push_back(AArch64Operand::CreateToken(".0", S, getContext()));
5077 return false;
5078 }
5079
5080 const MCExpr *ImmVal;
5081 if (parseSymbolicImmVal(ImmVal))
5082 return true;
5083
5084 E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
5085 Operands.push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
5086
5087 // Parse an optional shift/extend modifier.
5088 return parseOptionalShiftExtend(Tok);
5089 }
5090 case AsmToken::Equal: {
5091 SMLoc Loc = getLoc();
5092 if (Mnemonic != "ldr") // only parse for ldr pseudo (e.g. ldr r0, =val)
5093 return TokError("unexpected token in operand");
5094 Lex(); // Eat '='
5095 const MCExpr *SubExprVal;
5096 if (getParser().parseExpression(SubExprVal))
5097 return true;
5098
5099 if (Operands.size() < 2 ||
5100 !static_cast<AArch64Operand &>(*Operands[1]).isScalarReg())
5101 return Error(Loc, "Only valid when first operand is register");
5102
5103 bool IsXReg =
5104 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
5105 Operands[1]->getReg());
5106
5107 MCContext& Ctx = getContext();
5108 E = SMLoc::getFromPointer(Loc.getPointer() - 1);
5109 // If the op is an imm and can be fit into a mov, then replace ldr with mov.
5110 if (isa<MCConstantExpr>(SubExprVal)) {
5111 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
5112 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
5113 while (Imm > 0xFFFF && llvm::countr_zero(Imm) >= 16) {
5114 ShiftAmt += 16;
5115 Imm >>= 16;
5116 }
5117 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
5118 Operands[0] = AArch64Operand::CreateToken("movz", Loc, Ctx);
5119 Operands.push_back(AArch64Operand::CreateImm(
5120 MCConstantExpr::create(Imm, Ctx), S, E, Ctx));
5121 if (ShiftAmt)
5122 Operands.push_back(AArch64Operand::CreateShiftExtend(AArch64_AM::LSL,
5123 ShiftAmt, true, S, E, Ctx));
5124 return false;
5125 }
5126 APInt Simm = APInt(64, Imm << ShiftAmt);
5127 // check if the immediate is an unsigned or signed 32-bit int for W regs
5128 if (!IsXReg && !(Simm.isIntN(32) || Simm.isSignedIntN(32)))
5129 return Error(Loc, "Immediate too large for register");
5130 }
5131 // If it is a label or an imm that cannot fit in a movz, put it into CP.
5132 const MCExpr *CPLoc =
5133 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
5134 Operands.push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
5135 return false;
5136 }
5137 }
5138}
5139
5140bool AArch64AsmParser::parseImmExpr(int64_t &Out) {
5141 const MCExpr *Expr = nullptr;
5142 SMLoc L = getLoc();
5143 if (check(getParser().parseExpression(Expr), L, "expected expression"))
5144 return true;
5145 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
5146 if (check(!Value, L, "expected constant expression"))
5147 return true;
5148 Out = Value->getValue();
5149 return false;
5150}
5151
5152bool AArch64AsmParser::parseComma() {
5153 if (check(getTok().isNot(AsmToken::Comma), getLoc(), "expected comma"))
5154 return true;
5155 // Eat the comma
5156 Lex();
5157 return false;
5158}
5159
5160bool AArch64AsmParser::parseRegisterInRange(unsigned &Out, unsigned Base,
5161 unsigned First, unsigned Last) {
5162 MCRegister Reg;
5163 SMLoc Start, End;
5164 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register"))
5165 return true;
5166
5167 // Special handling for FP and LR; they aren't linearly after x28 in
5168 // the registers enum.
5169 unsigned RangeEnd = Last;
5170 if (Base == AArch64::X0) {
5171 if (Last == AArch64::FP) {
5172 RangeEnd = AArch64::X28;
5173 if (Reg == AArch64::FP) {
5174 Out = 29;
5175 return false;
5176 }
5177 }
5178 if (Last == AArch64::LR) {
5179 RangeEnd = AArch64::X28;
5180 if (Reg == AArch64::FP) {
5181 Out = 29;
5182 return false;
5183 } else if (Reg == AArch64::LR) {
5184 Out = 30;
5185 return false;
5186 }
5187 }
5188 }
5189
5190 if (check(Reg < First || Reg > RangeEnd, Start,
5191 Twine("expected register in range ") +
5194 return true;
5195 Out = Reg - Base;
5196 return false;
5197}
5198
5199bool AArch64AsmParser::areEqualRegs(const MCParsedAsmOperand &Op1,
5200 const MCParsedAsmOperand &Op2) const {
5201 auto &AOp1 = static_cast<const AArch64Operand&>(Op1);
5202 auto &AOp2 = static_cast<const AArch64Operand&>(Op2);
5203
5204 if (AOp1.isVectorList() && AOp2.isVectorList())
5205 return AOp1.getVectorListCount() == AOp2.getVectorListCount() &&
5206 AOp1.getVectorListStart() == AOp2.getVectorListStart() &&
5207 AOp1.getVectorListStride() == AOp2.getVectorListStride();
5208
5209 if (!AOp1.isReg() || !AOp2.isReg())
5210 return false;
5211
5212 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
5213 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
5214 return MCTargetAsmParser::areEqualRegs(Op1, Op2);
5215
5216 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
5217 "Testing equality of non-scalar registers not supported");
5218
5219 // Check if a registers match their sub/super register classes.
5220 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
5221 return getXRegFromWReg(Op1.getReg()) == Op2.getReg();
5222 if (AOp1.getRegEqualityTy() == EqualsSubReg)
5223 return getWRegFromXReg(Op1.getReg()) == Op2.getReg();
5224 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
5225 return getXRegFromWReg(Op2.getReg()) == Op1.getReg();
5226 if (AOp2.getRegEqualityTy() == EqualsSubReg)
5227 return getWRegFromXReg(Op2.getReg()) == Op1.getReg();
5228
5229 return false;
5230}
5231
5232/// Parse an AArch64 instruction mnemonic followed by its operands.
5233bool AArch64AsmParser::parseInstruction(ParseInstructionInfo &Info,
5234 StringRef Name, SMLoc NameLoc,
5236 Name = StringSwitch<StringRef>(Name.lower())
5237 .Case("beq", "b.eq")
5238 .Case("bne", "b.ne")
5239 .Case("bhs", "b.hs")
5240 .Case("bcs", "b.cs")
5241 .Case("blo", "b.lo")
5242 .Case("bcc", "b.cc")
5243 .Case("bmi", "b.mi")
5244 .Case("bpl", "b.pl")
5245 .Case("bvs", "b.vs")
5246 .Case("bvc", "b.vc")
5247 .Case("bhi", "b.hi")
5248 .Case("bls", "b.ls")
5249 .Case("bge", "b.ge")
5250 .Case("blt", "b.lt")
5251 .Case("bgt", "b.gt")
5252 .Case("ble", "b.le")
5253 .Case("bal", "b.al")
5254 .Case("bnv", "b.nv")
5255 .Default(Name);
5256
5257 // First check for the AArch64-specific .req directive.
5258 if (getTok().is(AsmToken::Identifier) &&
5259 getTok().getIdentifier().lower() == ".req") {
5260 parseDirectiveReq(Name, NameLoc);
5261 // We always return 'error' for this, as we're done with this
5262 // statement and don't need to match the 'instruction."
5263 return true;
5264 }
5265
5266 // Create the leading tokens for the mnemonic, split by '.' characters.
5267 size_t Start = 0, Next = Name.find('.');
5268 StringRef Head = Name.slice(Start, Next);
5269
5270 // IC, DC, AT, TLBI and Prediction invalidation instructions are aliases for
5271 // the SYS instruction.
5272 if (Head == "ic" || Head == "dc" || Head == "at" || Head == "tlbi" ||
5273 Head == "cfp" || Head == "dvp" || Head == "cpp" || Head == "cosp")
5274 return parseSysAlias(Head, NameLoc, Operands);
5275
5276 // TLBIP instructions are aliases for the SYSP instruction.
5277 if (Head == "tlbip")
5278 return parseSyspAlias(Head, NameLoc, Operands);
5279
5280 Operands.push_back(AArch64Operand::CreateToken(Head, NameLoc, getContext()));
5281 Mnemonic = Head;
5282
5283 // Handle condition codes for a branch mnemonic
5284 if ((Head == "b" || Head == "bc") && Next != StringRef::npos) {
5285 Start = Next;
5286 Next = Name.find('.', Start + 1);
5287 Head = Name.slice(Start + 1, Next);
5288
5289 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5290 (Head.data() - Name.data()));
5291 std::string Suggestion;
5292 AArch64CC::CondCode CC = parseCondCodeString(Head, Suggestion);
5293 if (CC == AArch64CC::Invalid) {
5294 std::string Msg = "invalid condition code";
5295 if (!Suggestion.empty())
5296 Msg += ", did you mean " + Suggestion + "?";
5297 return Error(SuffixLoc, Msg);
5298 }
5299 Operands.push_back(AArch64Operand::CreateToken(".", SuffixLoc, getContext(),
5300 /*IsSuffix=*/true));
5301 Operands.push_back(
5302 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
5303 }
5304
5305 // Add the remaining tokens in the mnemonic.
5306 while (Next != StringRef::npos) {
5307 Start = Next;
5308 Next = Name.find('.', Start + 1);
5309 Head = Name.slice(Start, Next);
5310 SMLoc SuffixLoc = SMLoc::getFromPointer(NameLoc.getPointer() +
5311 (Head.data() - Name.data()) + 1);
5312 Operands.push_back(AArch64Operand::CreateToken(
5313 Head, SuffixLoc, getContext(), /*IsSuffix=*/true));
5314 }
5315
5316 // Conditional compare instructions have a Condition Code operand, which needs
5317 // to be parsed and an immediate operand created.
5318 bool condCodeFourthOperand =
5319 (Head == "ccmp" || Head == "ccmn" || Head == "fccmp" ||
5320 Head == "fccmpe" || Head == "fcsel" || Head == "csel" ||
5321 Head == "csinc" || Head == "csinv" || Head == "csneg");
5322
5323 // These instructions are aliases to some of the conditional select
5324 // instructions. However, the condition code is inverted in the aliased
5325 // instruction.
5326 //
5327 // FIXME: Is this the correct way to handle these? Or should the parser
5328 // generate the aliased instructions directly?
5329 bool condCodeSecondOperand = (Head == "cset" || Head == "csetm");
5330 bool condCodeThirdOperand =
5331 (Head == "cinc" || Head == "cinv" || Head == "cneg");
5332
5333 // Read the remaining operands.
5334 if (getLexer().isNot(AsmToken::EndOfStatement)) {
5335
5336 unsigned N = 1;
5337 do {
5338 // Parse and remember the operand.
5339 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
5340 (N == 3 && condCodeThirdOperand) ||
5341 (N == 2 && condCodeSecondOperand),
5342 condCodeSecondOperand || condCodeThirdOperand)) {
5343 return true;
5344 }
5345
5346 // After successfully parsing some operands there are three special cases
5347 // to consider (i.e. notional operands not separated by commas). Two are
5348 // due to memory specifiers:
5349 // + An RBrac will end an address for load/store/prefetch
5350 // + An '!' will indicate a pre-indexed operation.
5351 //
5352 // And a further case is '}', which ends a group of tokens specifying the
5353 // SME accumulator array 'ZA' or tile vector, i.e.
5354 //
5355 // '{ ZA }' or '{ <ZAt><HV>.<BHSDQ>[<Wv>, #<imm>] }'
5356 //
5357 // It's someone else's responsibility to make sure these tokens are sane
5358 // in the given context!
5359
5360 if (parseOptionalToken(AsmToken::RBrac))
5361 Operands.push_back(
5362 AArch64Operand::CreateToken("]", getLoc(), getContext()));
5363 if (parseOptionalToken(AsmToken::Exclaim))
5364 Operands.push_back(
5365 AArch64Operand::CreateToken("!", getLoc(), getContext()));
5366 if (parseOptionalToken(AsmToken::RCurly))
5367 Operands.push_back(
5368 AArch64Operand::CreateToken("}", getLoc(), getContext()));
5369
5370 ++N;
5371 } while (parseOptionalToken(AsmToken::Comma));
5372 }
5373
5374 if (parseToken(AsmToken::EndOfStatement, "unexpected token in argument list"))
5375 return true;
5376
5377 return false;
5378}
5379
5380static inline bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg) {
5381 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
5382 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
5383 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
5384 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
5385 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
5386 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
5387 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
5388}
5389
5390// FIXME: This entire function is a giant hack to provide us with decent
5391// operand range validation/diagnostics until TableGen/MC can be extended
5392// to support autogeneration of this kind of validation.
5393bool AArch64AsmParser::validateInstruction(MCInst &Inst, SMLoc &IDLoc,
5394 SmallVectorImpl<SMLoc> &Loc) {
5395 const MCRegisterInfo *RI = getContext().getRegisterInfo();
5396 const MCInstrDesc &MCID = MII.get(Inst.getOpcode());
5397
5398 // A prefix only applies to the instruction following it. Here we extract
5399 // prefix information for the next instruction before validating the current
5400 // one so that in the case of failure we don't erroneously continue using the
5401 // current prefix.
5402 PrefixInfo Prefix = NextPrefix;
5403 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.TSFlags);
5404
5405 // Before validating the instruction in isolation we run through the rules
5406 // applicable when it follows a prefix instruction.
5407 // NOTE: brk & hlt can be prefixed but require no additional validation.
5408 if (Prefix.isActive() &&
5409 (Inst.getOpcode() != AArch64::BRK) &&
5410 (Inst.getOpcode() != AArch64::HLT)) {
5411
5412 // Prefixed instructions must have a destructive operand.
5415 return Error(IDLoc, "instruction is unpredictable when following a"
5416 " movprfx, suggest replacing movprfx with mov");
5417
5418 // Destination operands must match.
5419 if (Inst.getOperand(0).getReg() != Prefix.getDstReg())
5420 return Error(Loc[0], "instruction is unpredictable when following a"
5421 " movprfx writing to a different destination");
5422
5423 // Destination operand must not be used in any other location.
5424 for (unsigned i = 1; i < Inst.getNumOperands(); ++i) {
5425 if (Inst.getOperand(i).isReg() &&
5426 (MCID.getOperandConstraint(i, MCOI::TIED_TO) == -1) &&
5427 isMatchingOrAlias(Prefix.getDstReg(), Inst.getOperand(i).getReg()))
5428 return Error(Loc[0], "instruction is unpredictable when following a"
5429 " movprfx and destination also used as non-destructive"
5430 " source");
5431 }
5432
5433 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
5434 if (Prefix.isPredicated()) {
5435 int PgIdx = -1;
5436
5437 // Find the instructions general predicate.
5438 for (unsigned i = 1; i < Inst.getNumOperands(); ++i)
5439 if (Inst.getOperand(i).isReg() &&
5440 PPRRegClass.contains(Inst.getOperand(i).getReg())) {
5441 PgIdx = i;
5442 break;
5443 }
5444
5445 // Instruction must be predicated if the movprfx is predicated.
5446 if (PgIdx == -1 ||
5448 return Error(IDLoc, "instruction is unpredictable when following a"
5449 " predicated movprfx, suggest using unpredicated movprfx");
5450
5451 // Instruction must use same general predicate as the movprfx.
5452 if (Inst.getOperand(PgIdx).getReg() != Prefix.getPgReg())
5453 return Error(IDLoc, "instruction is unpredictable when following a"
5454 " predicated movprfx using a different general predicate");
5455
5456 // Instruction element type must match the movprfx.
5457 if ((MCID.TSFlags & AArch64::ElementSizeMask) != Prefix.getElementSize())
5458 return Error(IDLoc, "instruction is unpredictable when following a"
5459 " predicated movprfx with a different element size");
5460 }
5461 }
5462
5463 // On ARM64EC, only valid registers may be used. Warn against using
5464 // explicitly disallowed registers.
5465 if (IsWindowsArm64EC) {
5466 for (unsigned i = 0; i < Inst.getNumOperands(); ++i) {
5467 if (Inst.getOperand(i).isReg()) {
5468 MCRegister Reg = Inst.getOperand(i).getReg();
5469 // At this point, vector registers are matched to their
5470 // appropriately sized alias.
5471 if ((Reg == AArch64::W13 || Reg == AArch64::X13) ||
5472 (Reg == AArch64::W14 || Reg == AArch64::X14) ||
5473 (Reg == AArch64::W23 || Reg == AArch64::X23) ||
5474 (Reg == AArch64::W24 || Reg == AArch64::X24) ||
5475 (Reg == AArch64::W28 || Reg == AArch64::X28) ||
5476 (Reg >= AArch64::Q16 && Reg <= AArch64::Q31) ||
5477 (Reg >= AArch64::D16 && Reg <= AArch64::D31) ||
5478 (Reg >= AArch64::S16 && Reg <= AArch64::S31) ||
5479 (Reg >= AArch64::H16 && Reg <= AArch64::H31) ||
5480 (Reg >= AArch64::B16 && Reg <= AArch64::B31)) {
5481 Warning(IDLoc, "register " + Twine(RI->getName(Reg)) +
5482 " is disallowed on ARM64EC.");
5483 }
5484 }
5485 }
5486 }
5487
5488 // Check for indexed addressing modes w/ the base register being the
5489 // same as a destination/source register or pair load where
5490 // the Rt == Rt2. All of those are undefined behaviour.
5491 switch (Inst.getOpcode()) {
5492 case AArch64::LDPSWpre:
5493 case AArch64::LDPWpost:
5494 case AArch64::LDPWpre:
5495 case AArch64::LDPXpost:
5496 case AArch64::LDPXpre: {
5497 MCRegister Rt = Inst.getOperand(1).getReg();
5498 MCRegister Rt2 = Inst.getOperand(2).getReg();
5499 MCRegister Rn = Inst.getOperand(3).getReg();
5500 if (RI->isSubRegisterEq(Rn, Rt))
5501 return Error(Loc[0], "unpredictable LDP instruction, writeback base "
5502 "is also a destination");
5503 if (RI->isSubRegisterEq(Rn, Rt2))
5504 return Error(Loc[1], "unpredictable LDP instruction, writeback base "
5505 "is also a destination");
5506 [[fallthrough]];
5507 }
5508 case AArch64::LDR_ZA:
5509 case AArch64::STR_ZA: {
5510 if (Inst.getOperand(2).isImm() && Inst.getOperand(4).isImm() &&
5511 Inst.getOperand(2).getImm() != Inst.getOperand(4).getImm())
5512 return Error(Loc[1],
5513 "unpredictable instruction, immediate and offset mismatch.");
5514 break;
5515 }
5516 case AArch64::LDPDi:
5517 case AArch64::LDPQi:
5518 case AArch64::LDPSi:
5519 case AArch64::LDPSWi:
5520 case AArch64::LDPWi:
5521 case AArch64::LDPXi: {
5522 MCRegister Rt = Inst.getOperand(0).getReg();
5523 MCRegister Rt2 = Inst.getOperand(1).getReg();
5524 if (Rt == Rt2)
5525 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5526 break;
5527 }
5528 case AArch64::LDPDpost:
5529 case AArch64::LDPDpre:
5530 case AArch64::LDPQpost:
5531 case AArch64::LDPQpre:
5532 case AArch64::LDPSpost:
5533 case AArch64::LDPSpre:
5534 case AArch64::LDPSWpost: {
5535 MCRegister Rt = Inst.getOperand(1).getReg();
5536 MCRegister Rt2 = Inst.getOperand(2).getReg();
5537 if (Rt == Rt2)
5538 return Error(Loc[1], "unpredictable LDP instruction, Rt2==Rt");
5539 break;
5540 }
5541 case AArch64::STPDpost:
5542 case AArch64::STPDpre:
5543 case AArch64::STPQpost:
5544 case AArch64::STPQpre:
5545 case AArch64::STPSpost:
5546 case AArch64::STPSpre:
5547 case AArch64::STPWpost:
5548 case AArch64::STPWpre:
5549 case AArch64::STPXpost:
5550 case AArch64::STPXpre: {
5551 MCRegister Rt = Inst.getOperand(1).getReg();
5552 MCRegister Rt2 = Inst.getOperand(2).getReg();
5553 MCRegister Rn = Inst.getOperand(3).getReg();
5554 if (RI->isSubRegisterEq(Rn, Rt))
5555 return Error(Loc[0], "unpredictable STP instruction, writeback base "
5556 "is also a source");
5557 if (RI->isSubRegisterEq(Rn, Rt2))
5558 return Error(Loc[1], "unpredictable STP instruction, writeback base "
5559 "is also a source");
5560 break;
5561 }
5562 case AArch64::LDRBBpre:
5563 case AArch64::LDRBpre:
5564 case AArch64::LDRHHpre:
5565 case AArch64::LDRHpre:
5566 case AArch64::LDRSBWpre:
5567 case AArch64::LDRSBXpre:
5568 case AArch64::LDRSHWpre:
5569 case AArch64::LDRSHXpre:
5570 case AArch64::LDRSWpre:
5571 case AArch64::LDRWpre:
5572 case AArch64::LDRXpre:
5573 case AArch64::LDRBBpost:
5574 case AArch64::LDRBpost:
5575 case AArch64::LDRHHpost:
5576 case AArch64::LDRHpost:
5577 case AArch64::LDRSBWpost:
5578 case AArch64::LDRSBXpost:
5579 case AArch64::LDRSHWpost:
5580 case AArch64::LDRSHXpost:
5581 case AArch64::LDRSWpost:
5582 case AArch64::LDRWpost:
5583 case AArch64::LDRXpost: {
5584 MCRegister Rt = Inst.getOperand(1).getReg();
5585 MCRegister Rn = Inst.getOperand(2).getReg();
5586 if (RI->isSubRegisterEq(Rn, Rt))
5587 return Error(Loc[0], "unpredictable LDR instruction, writeback base "
5588 "is also a source");
5589 break;
5590 }
5591 case AArch64::STRBBpost:
5592 case AArch64::STRBpost:
5593 case AArch64::STRHHpost:
5594 case AArch64::STRHpost:
5595 case AArch64::STRWpost:
5596 case AArch64::STRXpost:
5597 case AArch64::STRBBpre:
5598 case AArch64::STRBpre:
5599 case AArch64::STRHHpre:
5600 case AArch64::STRHpre:
5601 case AArch64::STRWpre:
5602 case AArch64::STRXpre: {
5603 MCRegister Rt = Inst.getOperand(1).getReg();
5604 MCRegister Rn = Inst.getOperand(2).getReg();
5605 if (RI->isSubRegisterEq(Rn, Rt))
5606 return Error(Loc[0], "unpredictable STR instruction, writeback base "
5607 "is also a source");
5608 break;
5609 }
5610 case AArch64::STXRB:
5611 case AArch64::STXRH:
5612 case AArch64::STXRW:
5613 case AArch64::STXRX:
5614 case AArch64::STLXRB:
5615 case AArch64::STLXRH:
5616 case AArch64::STLXRW:
5617 case AArch64::STLXRX: {
5618 MCRegister Rs = Inst.getOperand(0).getReg();
5619 MCRegister Rt = Inst.getOperand(1).getReg();
5620 MCRegister Rn = Inst.getOperand(2).getReg();
5621 if (RI->isSubRegisterEq(Rt, Rs) ||
5622 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5623 return Error(Loc[0],
5624 "unpredictable STXR instruction, status is also a source");
5625 break;
5626 }
5627 case AArch64::STXPW:
5628 case AArch64::STXPX:
5629 case AArch64::STLXPW:
5630 case AArch64::STLXPX: {
5631 MCRegister Rs = Inst.getOperand(0).getReg();
5632 MCRegister Rt1 = Inst.getOperand(1).getReg();
5633 MCRegister Rt2 = Inst.getOperand(2).getReg();
5634 MCRegister Rn = Inst.getOperand(3).getReg();
5635 if (RI->isSubRegisterEq(Rt1, Rs) || RI->isSubRegisterEq(Rt2, Rs) ||
5636 (RI->isSubRegisterEq(Rn, Rs) && Rn != AArch64::SP))
5637 return Error(Loc[0],
5638 "unpredictable STXP instruction, status is also a source");
5639 break;
5640 }
5641 case AArch64::LDRABwriteback:
5642 case AArch64::LDRAAwriteback: {
5643 MCRegister Xt = Inst.getOperand(0).getReg();
5644 MCRegister Xn = Inst.getOperand(1).getReg();
5645 if (Xt == Xn)
5646 return Error(Loc[0],
5647 "unpredictable LDRA instruction, writeback base"
5648 " is also a destination");
5649 break;
5650 }
5651 }
5652
5653 // Check v8.8-A memops instructions.
5654 switch (Inst.getOpcode()) {
5655 case AArch64::CPYFP:
5656 case AArch64::CPYFPWN:
5657 case AArch64::CPYFPRN:
5658 case AArch64::CPYFPN:
5659 case AArch64::CPYFPWT:
5660 case AArch64::CPYFPWTWN:
5661 case AArch64::CPYFPWTRN:
5662 case AArch64::CPYFPWTN:
5663 case AArch64::CPYFPRT:
5664 case AArch64::CPYFPRTWN:
5665 case AArch64::CPYFPRTRN:
5666 case AArch64::CPYFPRTN:
5667 case AArch64::CPYFPT:
5668 case AArch64::CPYFPTWN:
5669 case AArch64::CPYFPTRN:
5670 case AArch64::CPYFPTN:
5671 case AArch64::CPYFM:
5672 case AArch64::CPYFMWN:
5673 case AArch64::CPYFMRN:
5674 case AArch64::CPYFMN:
5675 case AArch64::CPYFMWT:
5676 case AArch64::CPYFMWTWN:
5677 case AArch64::CPYFMWTRN:
5678 case AArch64::CPYFMWTN:
5679 case AArch64::CPYFMRT:
5680 case AArch64::CPYFMRTWN:
5681 case AArch64::CPYFMRTRN:
5682 case AArch64::CPYFMRTN:
5683 case AArch64::CPYFMT:
5684 case AArch64::CPYFMTWN:
5685 case AArch64::CPYFMTRN:
5686 case AArch64::CPYFMTN:
5687 case AArch64::CPYFE:
5688 case AArch64::CPYFEWN:
5689 case AArch64::CPYFERN:
5690 case AArch64::CPYFEN:
5691 case AArch64::CPYFEWT:
5692 case AArch64::CPYFEWTWN:
5693 case AArch64::CPYFEWTRN:
5694 case AArch64::CPYFEWTN:
5695 case AArch64::CPYFERT:
5696 case AArch64::CPYFERTWN:
5697 case AArch64::CPYFERTRN:
5698 case AArch64::CPYFERTN:
5699 case AArch64::CPYFET:
5700 case AArch64::CPYFETWN:
5701 case AArch64::CPYFETRN:
5702 case AArch64::CPYFETN:
5703 case AArch64::CPYP:
5704 case AArch64::CPYPWN:
5705 case AArch64::CPYPRN:
5706 case AArch64::CPYPN:
5707 case AArch64::CPYPWT:
5708 case AArch64::CPYPWTWN:
5709 case AArch64::CPYPWTRN:
5710 case AArch64::CPYPWTN:
5711 case AArch64::CPYPRT:
5712 case AArch64::CPYPRTWN:
5713 case AArch64::CPYPRTRN:
5714 case AArch64::CPYPRTN:
5715 case AArch64::CPYPT:
5716 case AArch64::CPYPTWN:
5717 case AArch64::CPYPTRN:
5718 case AArch64::CPYPTN:
5719 case AArch64::CPYM:
5720 case AArch64::CPYMWN:
5721 case AArch64::CPYMRN:
5722 case AArch64::CPYMN:
5723 case AArch64::CPYMWT:
5724 case AArch64::CPYMWTWN:
5725 case AArch64::CPYMWTRN:
5726 case AArch64::CPYMWTN:
5727 case AArch64::CPYMRT:
5728 case AArch64::CPYMRTWN:
5729 case AArch64::CPYMRTRN:
5730 case AArch64::CPYMRTN:
5731 case AArch64::CPYMT:
5732 case AArch64::CPYMTWN:
5733 case AArch64::CPYMTRN:
5734 case AArch64::CPYMTN:
5735 case AArch64::CPYE:
5736 case AArch64::CPYEWN:
5737 case AArch64::CPYERN:
5738 case AArch64::CPYEN:
5739 case AArch64::CPYEWT:
5740 case AArch64::CPYEWTWN:
5741 case AArch64::CPYEWTRN:
5742 case AArch64::CPYEWTN:
5743 case AArch64::CPYERT:
5744 case AArch64::CPYERTWN:
5745 case AArch64::CPYERTRN:
5746 case AArch64::CPYERTN:
5747 case AArch64::CPYET:
5748 case AArch64::CPYETWN:
5749 case AArch64::CPYETRN:
5750 case AArch64::CPYETN: {
5751 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5752 MCRegister Xs_wb = Inst.getOperand(1).getReg();
5753 MCRegister Xn_wb = Inst.getOperand(2).getReg();
5754 MCRegister Xd = Inst.getOperand(3).getReg();
5755 MCRegister Xs = Inst.getOperand(4).getReg();
5756 MCRegister Xn = Inst.getOperand(5).getReg();
5757 if (Xd_wb != Xd)
5758 return Error(Loc[0],
5759 "invalid CPY instruction, Xd_wb and Xd do not match");
5760 if (Xs_wb != Xs)
5761 return Error(Loc[0],
5762 "invalid CPY instruction, Xs_wb and Xs do not match");
5763 if (Xn_wb != Xn)
5764 return Error(Loc[0],
5765 "invalid CPY instruction, Xn_wb and Xn do not match");
5766 if (Xd == Xs)
5767 return Error(Loc[0], "invalid CPY instruction, destination and source"
5768 " registers are the same");
5769 if (Xd == Xn)
5770 return Error(Loc[0], "invalid CPY instruction, destination and size"
5771 " registers are the same");
5772 if (Xs == Xn)
5773 return Error(Loc[0], "invalid CPY instruction, source and size"
5774 " registers are the same");
5775 break;
5776 }
5777 case AArch64::SETP:
5778 case AArch64::SETPT:
5779 case AArch64::SETPN:
5780 case AArch64::SETPTN:
5781 case AArch64::SETM:
5782 case AArch64::SETMT:
5783 case AArch64::SETMN:
5784 case AArch64::SETMTN:
5785 case AArch64::SETE:
5786 case AArch64::SETET:
5787 case AArch64::SETEN:
5788 case AArch64::SETETN:
5789 case AArch64::SETGP:
5790 case AArch64::SETGPT:
5791 case AArch64::SETGPN:
5792 case AArch64::SETGPTN:
5793 case AArch64::SETGM:
5794 case AArch64::SETGMT:
5795 case AArch64::SETGMN:
5796 case AArch64::SETGMTN:
5797 case AArch64::MOPSSETGE:
5798 case AArch64::MOPSSETGET:
5799 case AArch64::MOPSSETGEN:
5800 case AArch64::MOPSSETGETN: {
5801 MCRegister Xd_wb = Inst.getOperand(0).getReg();
5802 MCRegister Xn_wb = Inst.getOperand(1).getReg();
5803 MCRegister Xd = Inst.getOperand(2).getReg();
5804 MCRegister Xn = Inst.getOperand(3).getReg();
5805 MCRegister Xm = Inst.getOperand(4).getReg();
5806 if (Xd_wb != Xd)
5807 return Error(Loc[0],
5808 "invalid SET instruction, Xd_wb and Xd do not match");
5809 if (Xn_wb != Xn)
5810 return Error(Loc[0],
5811 "invalid SET instruction, Xn_wb and Xn do not match");
5812 if (Xd == Xn)
5813 return Error(Loc[0], "invalid SET instruction, destination and size"
5814 " registers are the same");
5815 if (Xd == Xm)
5816 return Error(Loc[0], "invalid SET instruction, destination and source"
5817 " registers are the same");
5818 if (Xn == Xm)
5819 return Error(Loc[0], "invalid SET instruction, source and size"
5820 " registers are the same");
5821 break;
5822 }
5823 }
5824
5825 // Now check immediate ranges. Separate from the above as there is overlap
5826 // in the instructions being checked and this keeps the nested conditionals
5827 // to a minimum.
5828 switch (Inst.getOpcode()) {
5829 case AArch64::ADDSWri:
5830 case AArch64::ADDSXri:
5831 case AArch64::ADDWri:
5832 case AArch64::ADDXri:
5833 case AArch64::SUBSWri:
5834 case AArch64::SUBSXri:
5835 case AArch64::SUBWri:
5836 case AArch64::SUBXri: {
5837 // Annoyingly we can't do this in the isAddSubImm predicate, so there is
5838 // some slight duplication here.
5839 if (Inst.getOperand(2).isExpr()) {
5840 const MCExpr *Expr = Inst.getOperand(2).getExpr();
5841 AArch64::Specifier ELFSpec;
5842 AArch64::Specifier DarwinSpec;
5843 int64_t Addend;
5844 if (classifySymbolRef(Expr, ELFSpec, DarwinSpec, Addend)) {
5845
5846 // Only allow these with ADDXri.
5847 if ((DarwinSpec == AArch64::S_MACHO_PAGEOFF ||
5848 DarwinSpec == AArch64::S_MACHO_TLVPPAGEOFF) &&
5849 Inst.getOpcode() == AArch64::ADDXri)
5850 return false;
5851
5852 // Only allow these with ADDXri/ADDWri
5860 ELFSpec) &&
5861 (Inst.getOpcode() == AArch64::ADDXri ||
5862 Inst.getOpcode() == AArch64::ADDWri))
5863 return false;
5864
5865 // Don't allow symbol refs in the immediate field otherwise
5866 // Note: Loc.back() may be Loc[1] or Loc[2] depending on the number of
5867 // operands of the original instruction (i.e. 'add w0, w1, borked' vs
5868 // 'cmp w0, 'borked')
5869 return Error(Loc.back(), "invalid immediate expression");
5870 }
5871 // We don't validate more complex expressions here
5872 }
5873 return false;
5874 }
5875 default:
5876 return false;
5877 }
5878}
5879
5881 const FeatureBitset &FBS,
5882 unsigned VariantID = 0);
5883
5884bool AArch64AsmParser::showMatchError(SMLoc Loc, unsigned ErrCode,
5887 switch (ErrCode) {
5888 case Match_InvalidTiedOperand: {
5889 auto &Op = static_cast<const AArch64Operand &>(*Operands[ErrorInfo]);
5890 if (Op.isVectorList())
5891 return Error(Loc, "operand must match destination register list");
5892
5893 assert(Op.isReg() && "Unexpected operand type");
5894 switch (Op.getRegEqualityTy()) {
5895 case RegConstraintEqualityTy::EqualsSubReg:
5896 return Error(Loc, "operand must be 64-bit form of destination register");
5897 case RegConstraintEqualityTy::EqualsSuperReg:
5898 return Error(Loc, "operand must be 32-bit form of destination register");
5899 case RegConstraintEqualityTy::EqualsReg:
5900 return Error(Loc, "operand must match destination register");
5901 }
5902 llvm_unreachable("Unknown RegConstraintEqualityTy");
5903 }
5904 case Match_MissingFeature:
5905 return Error(Loc,
5906 "instruction requires a CPU feature not currently enabled");
5907 case Match_InvalidOperand:
5908 return Error(Loc, "invalid operand for instruction");
5909 case Match_InvalidSuffix:
5910 return Error(Loc, "invalid type suffix for instruction");
5911 case Match_InvalidCondCode:
5912 return Error(Loc, "expected AArch64 condition code");
5913 case Match_AddSubRegExtendSmall:
5914 return Error(Loc,
5915 "expected '[su]xt[bhw]' with optional integer in range [0, 4]");
5916 case Match_AddSubRegExtendLarge:
5917 return Error(Loc,
5918 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
5919 case Match_AddSubSecondSource:
5920 return Error(Loc,
5921 "expected compatible register, symbol or integer in range [0, 4095]");
5922 case Match_LogicalSecondSource:
5923 return Error(Loc, "expected compatible register or logical immediate");
5924 case Match_InvalidMovImm32Shift:
5925 return Error(Loc, "expected 'lsl' with optional integer 0 or 16");
5926 case Match_InvalidMovImm64Shift:
5927 return Error(Loc, "expected 'lsl' with optional integer 0, 16, 32 or 48");
5928 case Match_AddSubRegShift32:
5929 return Error(Loc,
5930 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
5931 case Match_AddSubRegShift64:
5932 return Error(Loc,
5933 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
5934 case Match_InvalidFPImm:
5935 return Error(Loc,
5936 "expected compatible register or floating-point constant");
5937 case Match_InvalidMemoryIndexedSImm6:
5938 return Error(Loc, "index must be an integer in range [-32, 31].");
5939 case Match_InvalidMemoryIndexedSImm5:
5940 return Error(Loc, "index must be an integer in range [-16, 15].");
5941 case Match_InvalidMemoryIndexed1SImm4:
5942 return Error(Loc, "index must be an integer in range [-8, 7].");
5943 case Match_InvalidMemoryIndexed2SImm4:
5944 return Error(Loc, "index must be a multiple of 2 in range [-16, 14].");
5945 case Match_InvalidMemoryIndexed3SImm4:
5946 return Error(Loc, "index must be a multiple of 3 in range [-24, 21].");
5947 case Match_InvalidMemoryIndexed4SImm4:
5948 return Error(Loc, "index must be a multiple of 4 in range [-32, 28].");
5949 case Match_InvalidMemoryIndexed16SImm4:
5950 return Error(Loc, "index must be a multiple of 16 in range [-128, 112].");
5951 case Match_InvalidMemoryIndexed32SImm4:
5952 return Error(Loc, "index must be a multiple of 32 in range [-256, 224].");
5953 case Match_InvalidMemoryIndexed1SImm6:
5954 return Error(Loc, "index must be an integer in range [-32, 31].");
5955 case Match_InvalidMemoryIndexedSImm8:
5956 return Error(Loc, "index must be an integer in range [-128, 127].");
5957 case Match_InvalidMemoryIndexedSImm9:
5958 return Error(Loc, "index must be an integer in range [-256, 255].");
5959 case Match_InvalidMemoryIndexed16SImm9:
5960 return Error(Loc, "index must be a multiple of 16 in range [-4096, 4080].");
5961 case Match_InvalidMemoryIndexed8SImm10:
5962 return Error(Loc, "index must be a multiple of 8 in range [-4096, 4088].");
5963 case Match_InvalidMemoryIndexed4SImm7:
5964 return Error(Loc, "index must be a multiple of 4 in range [-256, 252].");
5965 case Match_InvalidMemoryIndexed8SImm7:
5966 return Error(Loc, "index must be a multiple of 8 in range [-512, 504].");
5967 case Match_InvalidMemoryIndexed16SImm7:
5968 return Error(Loc, "index must be a multiple of 16 in range [-1024, 1008].");
5969 case Match_InvalidMemoryIndexed8UImm5:
5970 return Error(Loc, "index must be a multiple of 8 in range [0, 248].");
5971 case Match_InvalidMemoryIndexed8UImm3:
5972 return Error(Loc, "index must be a multiple of 8 in range [0, 56].");
5973 case Match_InvalidMemoryIndexed4UImm5:
5974 return Error(Loc, "index must be a multiple of 4 in range [0, 124].");
5975 case Match_InvalidMemoryIndexed2UImm5:
5976 return Error(Loc, "index must be a multiple of 2 in range [0, 62].");
5977 case Match_InvalidMemoryIndexed8UImm6:
5978 return Error(Loc, "index must be a multiple of 8 in range [0, 504].");
5979 case Match_InvalidMemoryIndexed16UImm6:
5980 return Error(Loc, "index must be a multiple of 16 in range [0, 1008].");
5981 case Match_InvalidMemoryIndexed4UImm6:
5982 return Error(Loc, "index must be a multiple of 4 in range [0, 252].");
5983 case Match_InvalidMemoryIndexed2UImm6:
5984 return Error(Loc, "index must be a multiple of 2 in range [0, 126].");
5985 case Match_InvalidMemoryIndexed1UImm6:
5986 return Error(Loc, "index must be in range [0, 63].");
5987 case Match_InvalidMemoryWExtend8:
5988 return Error(Loc,
5989 "expected 'uxtw' or 'sxtw' with optional shift of #0");
5990 case Match_InvalidMemoryWExtend16:
5991 return Error(Loc,
5992 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
5993 case Match_InvalidMemoryWExtend32:
5994 return Error(Loc,
5995 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
5996 case Match_InvalidMemoryWExtend64:
5997 return Error(Loc,
5998 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
5999 case Match_InvalidMemoryWExtend128:
6000 return Error(Loc,
6001 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
6002 case Match_InvalidMemoryXExtend8:
6003 return Error(Loc,
6004 "expected 'lsl' or 'sxtx' with optional shift of #0");
6005 case Match_InvalidMemoryXExtend16:
6006 return Error(Loc,
6007 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
6008 case Match_InvalidMemoryXExtend32:
6009 return Error(Loc,
6010 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
6011 case Match_InvalidMemoryXExtend64:
6012 return Error(Loc,
6013 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
6014 case Match_InvalidMemoryXExtend128:
6015 return Error(Loc,
6016 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
6017 case Match_InvalidMemoryIndexed1:
6018 return Error(Loc, "index must be an integer in range [0, 4095].");
6019 case Match_InvalidMemoryIndexed2:
6020 return Error(Loc, "index must be a multiple of 2 in range [0, 8190].");
6021 case Match_InvalidMemoryIndexed4:
6022 return Error(Loc, "index must be a multiple of 4 in range [0, 16380].");
6023 case Match_InvalidMemoryIndexed8:
6024 return Error(Loc, "index must be a multiple of 8 in range [0, 32760].");
6025 case Match_InvalidMemoryIndexed16:
6026 return Error(Loc, "index must be a multiple of 16 in range [0, 65520].");
6027 case Match_InvalidImm0_0:
6028 return Error(Loc, "immediate must be 0.");
6029 case Match_InvalidImm0_1:
6030 return Error(Loc, "immediate must be an integer in range [0, 1].");
6031 case Match_InvalidImm0_3:
6032 return Error(Loc, "immediate must be an integer in range [0, 3].");
6033 case Match_InvalidImm0_7:
6034 return Error(Loc, "immediate must be an integer in range [0, 7].");
6035 case Match_InvalidImm0_15:
6036 return Error(Loc, "immediate must be an integer in range [0, 15].");
6037 case Match_InvalidImm0_31:
6038 return Error(Loc, "immediate must be an integer in range [0, 31].");
6039 case Match_InvalidImm0_63:
6040 return Error(Loc, "immediate must be an integer in range [0, 63].");
6041 case Match_InvalidImm0_127:
6042 return Error(Loc, "immediate must be an integer in range [0, 127].");
6043 case Match_InvalidImm0_255:
6044 return Error(Loc, "immediate must be an integer in range [0, 255].");
6045 case Match_InvalidImm0_65535:
6046 return Error(Loc, "immediate must be an integer in range [0, 65535].");
6047 case Match_InvalidImm1_8:
6048 return Error(Loc, "immediate must be an integer in range [1, 8].");
6049 case Match_InvalidImm1_16:
6050 return Error(Loc, "immediate must be an integer in range [1, 16].");
6051 case Match_InvalidImm1_32:
6052 return Error(Loc, "immediate must be an integer in range [1, 32].");
6053 case Match_InvalidImm1_64:
6054 return Error(Loc, "immediate must be an integer in range [1, 64].");
6055 case Match_InvalidImmM1_62:
6056 return Error(Loc, "immediate must be an integer in range [-1, 62].");
6057 case Match_InvalidMemoryIndexedRange2UImm0:
6058 return Error(Loc, "vector select offset must be the immediate range 0:1.");
6059 case Match_InvalidMemoryIndexedRange2UImm1:
6060 return Error(Loc, "vector select offset must be an immediate range of the "
6061 "form <immf>:<imml>, where the first "
6062 "immediate is a multiple of 2 in the range [0, 2], and "
6063 "the second immediate is immf + 1.");
6064 case Match_InvalidMemoryIndexedRange2UImm2:
6065 case Match_InvalidMemoryIndexedRange2UImm3:
6066 return Error(
6067 Loc,
6068 "vector select offset must be an immediate range of the form "
6069 "<immf>:<imml>, "
6070 "where the first immediate is a multiple of 2 in the range [0, 6] or "
6071 "[0, 14] "
6072 "depending on the instruction, and the second immediate is immf + 1.");
6073 case Match_InvalidMemoryIndexedRange4UImm0:
6074 return Error(Loc, "vector select offset must be the immediate range 0:3.");
6075 case Match_InvalidMemoryIndexedRange4UImm1:
6076 case Match_InvalidMemoryIndexedRange4UImm2:
6077 return Error(
6078 Loc,
6079 "vector select offset must be an immediate range of the form "
6080 "<immf>:<imml>, "
6081 "where the first immediate is a multiple of 4 in the range [0, 4] or "
6082 "[0, 12] "
6083 "depending on the instruction, and the second immediate is immf + 3.");
6084 case Match_InvalidSVEAddSubImm8:
6085 return Error(Loc, "immediate must be an integer in range [0, 255]"
6086 " with a shift amount of 0");
6087 case Match_InvalidSVEAddSubImm16:
6088 case Match_InvalidSVEAddSubImm32:
6089 case Match_InvalidSVEAddSubImm64:
6090 return Error(Loc, "immediate must be an integer in range [0, 255] or a "
6091 "multiple of 256 in range [256, 65280]");
6092 case Match_InvalidSVECpyImm8:
6093 return Error(Loc, "immediate must be an integer in range [-128, 255]"
6094 " with a shift amount of 0");
6095 case Match_InvalidSVECpyImm16:
6096 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6097 "multiple of 256 in range [-32768, 65280]");
6098 case Match_InvalidSVECpyImm32:
6099 case Match_InvalidSVECpyImm64:
6100 return Error(Loc, "immediate must be an integer in range [-128, 127] or a "
6101 "multiple of 256 in range [-32768, 32512]");
6102 case Match_InvalidIndexRange0_0:
6103 return Error(Loc, "expected lane specifier '[0]'");
6104 case Match_InvalidIndexRange1_1:
6105 return Error(Loc, "expected lane specifier '[1]'");
6106 case Match_InvalidIndexRange0_15:
6107 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6108 case Match_InvalidIndexRange0_7:
6109 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6110 case Match_InvalidIndexRange0_3:
6111 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6112 case Match_InvalidIndexRange0_1:
6113 return Error(Loc, "vector lane must be an integer in range [0, 1].");
6114 case Match_InvalidSVEIndexRange0_63:
6115 return Error(Loc, "vector lane must be an integer in range [0, 63].");
6116 case Match_InvalidSVEIndexRange0_31:
6117 return Error(Loc, "vector lane must be an integer in range [0, 31].");
6118 case Match_InvalidSVEIndexRange0_15:
6119 return Error(Loc, "vector lane must be an integer in range [0, 15].");
6120 case Match_InvalidSVEIndexRange0_7:
6121 return Error(Loc, "vector lane must be an integer in range [0, 7].");
6122 case Match_InvalidSVEIndexRange0_3:
6123 return Error(Loc, "vector lane must be an integer in range [0, 3].");
6124 case Match_InvalidLabel:
6125 return Error(Loc, "expected label or encodable integer pc offset");
6126 case Match_MRS:
6127 return Error(Loc, "expected readable system register");
6128 case Match_MSR:
6129 case Match_InvalidSVCR:
6130 return Error(Loc, "expected writable system register or pstate");
6131 case Match_InvalidComplexRotationEven:
6132 return Error(Loc, "complex rotation must be 0, 90, 180 or 270.");
6133 case Match_InvalidComplexRotationOdd:
6134 return Error(Loc, "complex rotation must be 90 or 270.");
6135 case Match_MnemonicFail: {
6136 std::string Suggestion = AArch64MnemonicSpellCheck(
6137 ((AArch64Operand &)*Operands[0]).getToken(),
6138 ComputeAvailableFeatures(STI->getFeatureBits()));
6139 return Error(Loc, "unrecognized instruction mnemonic" + Suggestion);
6140 }
6141 case Match_InvalidGPR64shifted8:
6142 return Error(Loc, "register must be x0..x30 or xzr, without shift");
6143 case Match_InvalidGPR64shifted16:
6144 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #1'");
6145 case Match_InvalidGPR64shifted32:
6146 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #2'");
6147 case Match_InvalidGPR64shifted64:
6148 return Error(Loc, "register must be x0..x30 or xzr, with required shift 'lsl #3'");
6149 case Match_InvalidGPR64shifted128:
6150 return Error(
6151 Loc, "register must be x0..x30 or xzr, with required shift 'lsl #4'");
6152 case Match_InvalidGPR64NoXZRshifted8:
6153 return Error(Loc, "register must be x0..x30 without shift");
6154 case Match_InvalidGPR64NoXZRshifted16:
6155 return Error(Loc, "register must be x0..x30 with required shift 'lsl #1'");
6156 case Match_InvalidGPR64NoXZRshifted32:
6157 return Error(Loc, "register must be x0..x30 with required shift 'lsl #2'");
6158 case Match_InvalidGPR64NoXZRshifted64:
6159 return Error(Loc, "register must be x0..x30 with required shift 'lsl #3'");
6160 case Match_InvalidGPR64NoXZRshifted128:
6161 return Error(Loc, "register must be x0..x30 with required shift 'lsl #4'");
6162 case Match_InvalidZPR32UXTW8:
6163 case Match_InvalidZPR32SXTW8:
6164 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
6165 case Match_InvalidZPR32UXTW16:
6166 case Match_InvalidZPR32SXTW16:
6167 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
6168 case Match_InvalidZPR32UXTW32:
6169 case Match_InvalidZPR32SXTW32:
6170 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
6171 case Match_InvalidZPR32UXTW64:
6172 case Match_InvalidZPR32SXTW64:
6173 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
6174 case Match_InvalidZPR64UXTW8:
6175 case Match_InvalidZPR64SXTW8:
6176 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
6177 case Match_InvalidZPR64UXTW16:
6178 case Match_InvalidZPR64SXTW16:
6179 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
6180 case Match_InvalidZPR64UXTW32:
6181 case Match_InvalidZPR64SXTW32:
6182 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
6183 case Match_InvalidZPR64UXTW64:
6184 case Match_InvalidZPR64SXTW64:
6185 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
6186 case Match_InvalidZPR32LSL8:
6187 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s'");
6188 case Match_InvalidZPR32LSL16:
6189 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
6190 case Match_InvalidZPR32LSL32:
6191 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
6192 case Match_InvalidZPR32LSL64:
6193 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
6194 case Match_InvalidZPR64LSL8:
6195 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d'");
6196 case Match_InvalidZPR64LSL16:
6197 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
6198 case Match_InvalidZPR64LSL32:
6199 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
6200 case Match_InvalidZPR64LSL64:
6201 return Error(Loc, "invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
6202 case Match_InvalidZPR0:
6203 return Error(Loc, "expected register without element width suffix");
6204 case Match_InvalidZPR8:
6205 case Match_InvalidZPR16:
6206 case Match_InvalidZPR32:
6207 case Match_InvalidZPR64:
6208 case Match_InvalidZPR128:
6209 return Error(Loc, "invalid element width");
6210 case Match_InvalidZPR_3b8:
6211 return Error(Loc, "Invalid restricted vector register, expected z0.b..z7.b");
6212 case Match_InvalidZPR_3b16:
6213 return Error(Loc, "Invalid restricted vector register, expected z0.h..z7.h");
6214 case Match_InvalidZPR_3b32:
6215 return Error(Loc, "Invalid restricted vector register, expected z0.s..z7.s");
6216 case Match_InvalidZPR_4b8:
6217 return Error(Loc,
6218 "Invalid restricted vector register, expected z0.b..z15.b");
6219 case Match_InvalidZPR_4b16:
6220 return Error(Loc, "Invalid restricted vector register, expected z0.h..z15.h");
6221 case Match_InvalidZPR_4b32:
6222 return Error(Loc, "Invalid restricted vector register, expected z0.s..z15.s");
6223 case Match_InvalidZPR_4b64:
6224 return Error(Loc, "Invalid restricted vector register, expected z0.d..z15.d");
6225 case Match_InvalidZPRMul2_Lo8:
6226 return Error(Loc, "Invalid restricted vector register, expected even "
6227 "register in z0.b..z14.b");
6228 case Match_InvalidZPRMul2_Hi8:
6229 return Error(Loc, "Invalid restricted vector register, expected even "
6230 "register in z16.b..z30.b");
6231 case Match_InvalidZPRMul2_Lo16:
6232 return Error(Loc, "Invalid restricted vector register, expected even "
6233 "register in z0.h..z14.h");
6234 case Match_InvalidZPRMul2_Hi16:
6235 return Error(Loc, "Invalid restricted vector register, expected even "
6236 "register in z16.h..z30.h");
6237 case Match_InvalidZPRMul2_Lo32:
6238 return Error(Loc, "Invalid restricted vector register, expected even "
6239 "register in z0.s..z14.s");
6240 case Match_InvalidZPRMul2_Hi32:
6241 return Error(Loc, "Invalid restricted vector register, expected even "
6242 "register in z16.s..z30.s");
6243 case Match_InvalidZPRMul2_Lo64:
6244 return Error(Loc, "Invalid restricted vector register, expected even "
6245 "register in z0.d..z14.d");
6246 case Match_InvalidZPRMul2_Hi64:
6247 return Error(Loc, "Invalid restricted vector register, expected even "
6248 "register in z16.d..z30.d");
6249 case Match_InvalidZPR_K0:
6250 return Error(Loc, "invalid restricted vector register, expected register "
6251 "in z20..z23 or z28..z31");
6252 case Match_InvalidSVEPattern:
6253 return Error(Loc, "invalid predicate pattern");
6254 case Match_InvalidSVEPPRorPNRAnyReg:
6255 case Match_InvalidSVEPPRorPNRBReg:
6256 case Match_InvalidSVEPredicateAnyReg:
6257 case Match_InvalidSVEPredicateBReg:
6258 case Match_InvalidSVEPredicateHReg:
6259 case Match_InvalidSVEPredicateSReg:
6260 case Match_InvalidSVEPredicateDReg:
6261 return Error(Loc, "invalid predicate register.");
6262 case Match_InvalidSVEPredicate3bAnyReg:
6263 return Error(Loc, "invalid restricted predicate register, expected p0..p7 (without element suffix)");
6264 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6265 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6266 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6267 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6268 return Error(Loc, "Invalid predicate register, expected PN in range "
6269 "pn8..pn15 with element suffix.");
6270 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6271 return Error(Loc, "invalid restricted predicate-as-counter register "
6272 "expected pn8..pn15");
6273 case Match_InvalidSVEPNPredicateBReg:
6274 case Match_InvalidSVEPNPredicateHReg:
6275 case Match_InvalidSVEPNPredicateSReg:
6276 case Match_InvalidSVEPNPredicateDReg:
6277 return Error(Loc, "Invalid predicate register, expected PN in range "
6278 "pn0..pn15 with element suffix.");
6279 case Match_InvalidSVEVecLenSpecifier:
6280 return Error(Loc, "Invalid vector length specifier, expected VLx2 or VLx4");
6281 case Match_InvalidSVEPredicateListMul2x8:
6282 case Match_InvalidSVEPredicateListMul2x16:
6283 case Match_InvalidSVEPredicateListMul2x32:
6284 case Match_InvalidSVEPredicateListMul2x64:
6285 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6286 "predicate registers, where the first vector is a multiple of 2 "
6287 "and with correct element type");
6288 case Match_InvalidSVEExactFPImmOperandHalfOne:
6289 return Error(Loc, "Invalid floating point constant, expected 0.5 or 1.0.");
6290 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6291 return Error(Loc, "Invalid floating point constant, expected 0.5 or 2.0.");
6292 case Match_InvalidSVEExactFPImmOperandZeroOne:
6293 return Error(Loc, "Invalid floating point constant, expected 0.0 or 1.0.");
6294 case Match_InvalidMatrixTileVectorH8:
6295 case Match_InvalidMatrixTileVectorV8:
6296 return Error(Loc, "invalid matrix operand, expected za0h.b or za0v.b");
6297 case Match_InvalidMatrixTileVectorH16:
6298 case Match_InvalidMatrixTileVectorV16:
6299 return Error(Loc,
6300 "invalid matrix operand, expected za[0-1]h.h or za[0-1]v.h");
6301 case Match_InvalidMatrixTileVectorH32:
6302 case Match_InvalidMatrixTileVectorV32:
6303 return Error(Loc,
6304 "invalid matrix operand, expected za[0-3]h.s or za[0-3]v.s");
6305 case Match_InvalidMatrixTileVectorH64:
6306 case Match_InvalidMatrixTileVectorV64:
6307 return Error(Loc,
6308 "invalid matrix operand, expected za[0-7]h.d or za[0-7]v.d");
6309 case Match_InvalidMatrixTileVectorH128:
6310 case Match_InvalidMatrixTileVectorV128:
6311 return Error(Loc,
6312 "invalid matrix operand, expected za[0-15]h.q or za[0-15]v.q");
6313 case Match_InvalidMatrixTile16:
6314 return Error(Loc, "invalid matrix operand, expected za[0-1].h");
6315 case Match_InvalidMatrixTile32:
6316 return Error(Loc, "invalid matrix operand, expected za[0-3].s");
6317 case Match_InvalidMatrixTile64:
6318 return Error(Loc, "invalid matrix operand, expected za[0-7].d");
6319 case Match_InvalidMatrix:
6320 return Error(Loc, "invalid matrix operand, expected za");
6321 case Match_InvalidMatrix8:
6322 return Error(Loc, "invalid matrix operand, expected suffix .b");
6323 case Match_InvalidMatrix16:
6324 return Error(Loc, "invalid matrix operand, expected suffix .h");
6325 case Match_InvalidMatrix32:
6326 return Error(Loc, "invalid matrix operand, expected suffix .s");
6327 case Match_InvalidMatrix64:
6328 return Error(Loc, "invalid matrix operand, expected suffix .d");
6329 case Match_InvalidMatrixIndexGPR32_12_15:
6330 return Error(Loc, "operand must be a register in range [w12, w15]");
6331 case Match_InvalidMatrixIndexGPR32_8_11:
6332 return Error(Loc, "operand must be a register in range [w8, w11]");
6333 case Match_InvalidSVEVectorList2x8Mul2:
6334 case Match_InvalidSVEVectorList2x16Mul2:
6335 case Match_InvalidSVEVectorList2x32Mul2:
6336 case Match_InvalidSVEVectorList2x64Mul2:
6337 case Match_InvalidSVEVectorList2x128Mul2:
6338 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6339 "SVE vectors, where the first vector is a multiple of 2 "
6340 "and with matching element types");
6341 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6342 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6343 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6344 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6345 return Error(Loc, "Invalid vector list, expected list with 2 consecutive "
6346 "SVE vectors in the range z0-z14, where the first vector "
6347 "is a multiple of 2 "
6348 "and with matching element types");
6349 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6350 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6351 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6352 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6353 return Error(Loc,
6354 "Invalid vector list, expected list with 2 consecutive "
6355 "SVE vectors in the range z16-z30, where the first vector "
6356 "is a multiple of 2 "
6357 "and with matching element types");
6358 case Match_InvalidSVEVectorList4x8Mul4:
6359 case Match_InvalidSVEVectorList4x16Mul4:
6360 case Match_InvalidSVEVectorList4x32Mul4:
6361 case Match_InvalidSVEVectorList4x64Mul4:
6362 case Match_InvalidSVEVectorList4x128Mul4:
6363 return Error(Loc, "Invalid vector list, expected list with 4 consecutive "
6364 "SVE vectors, where the first vector is a multiple of 4 "
6365 "and with matching element types");
6366 case Match_InvalidLookupTable:
6367 return Error(Loc, "Invalid lookup table, expected zt0");
6368 case Match_InvalidSVEVectorListStrided2x8:
6369 case Match_InvalidSVEVectorListStrided2x16:
6370 case Match_InvalidSVEVectorListStrided2x32:
6371 case Match_InvalidSVEVectorListStrided2x64:
6372 return Error(
6373 Loc,
6374 "Invalid vector list, expected list with each SVE vector in the list "
6375 "8 registers apart, and the first register in the range [z0, z7] or "
6376 "[z16, z23] and with correct element type");
6377 case Match_InvalidSVEVectorListStrided4x8:
6378 case Match_InvalidSVEVectorListStrided4x16:
6379 case Match_InvalidSVEVectorListStrided4x32:
6380 case Match_InvalidSVEVectorListStrided4x64:
6381 return Error(
6382 Loc,
6383 "Invalid vector list, expected list with each SVE vector in the list "
6384 "4 registers apart, and the first register in the range [z0, z3] or "
6385 "[z16, z19] and with correct element type");
6386 case Match_AddSubLSLImm3ShiftLarge:
6387 return Error(Loc,
6388 "expected 'lsl' with optional integer in range [0, 7]");
6389 default:
6390 llvm_unreachable("unexpected error code!");
6391 }
6392}
6393
6394static const char *getSubtargetFeatureName(uint64_t Val);
6395
6396bool AArch64AsmParser::matchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
6398 MCStreamer &Out,
6400 bool MatchingInlineAsm) {
6401 assert(!Operands.empty() && "Unexpected empty operand list!");
6402 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[0]);
6403 assert(Op.isToken() && "Leading operand should always be a mnemonic!");
6404
6405 StringRef Tok = Op.getToken();
6406 unsigned NumOperands = Operands.size();
6407
6408 if (NumOperands == 4 && Tok == "lsl") {
6409 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6410 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6411 if (Op2.isScalarReg() && Op3.isImm()) {
6412 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6413 if (Op3CE) {
6414 uint64_t Op3Val = Op3CE->getValue();
6415 uint64_t NewOp3Val = 0;
6416 uint64_t NewOp4Val = 0;
6417 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].contains(
6418 Op2.getReg())) {
6419 NewOp3Val = (32 - Op3Val) & 0x1f;
6420 NewOp4Val = 31 - Op3Val;
6421 } else {
6422 NewOp3Val = (64 - Op3Val) & 0x3f;
6423 NewOp4Val = 63 - Op3Val;
6424 }
6425
6426 const MCExpr *NewOp3 = MCConstantExpr::create(NewOp3Val, getContext());
6427 const MCExpr *NewOp4 = MCConstantExpr::create(NewOp4Val, getContext());
6428
6429 Operands[0] =
6430 AArch64Operand::CreateToken("ubfm", Op.getStartLoc(), getContext());
6431 Operands.push_back(AArch64Operand::CreateImm(
6432 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
6433 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
6434 Op3.getEndLoc(), getContext());
6435 }
6436 }
6437 } else if (NumOperands == 4 && Tok == "bfc") {
6438 // FIXME: Horrible hack to handle BFC->BFM alias.
6439 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6440 AArch64Operand LSBOp = static_cast<AArch64Operand &>(*Operands[2]);
6441 AArch64Operand WidthOp = static_cast<AArch64Operand &>(*Operands[3]);
6442
6443 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
6444 const MCConstantExpr *LSBCE = dyn_cast<MCConstantExpr>(LSBOp.getImm());
6445 const MCConstantExpr *WidthCE = dyn_cast<MCConstantExpr>(WidthOp.getImm());
6446
6447 if (LSBCE && WidthCE) {
6448 uint64_t LSB = LSBCE->getValue();
6449 uint64_t Width = WidthCE->getValue();
6450
6451 uint64_t RegWidth = 0;
6452 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6453 Op1.getReg()))
6454 RegWidth = 64;
6455 else
6456 RegWidth = 32;
6457
6458 if (LSB >= RegWidth)
6459 return Error(LSBOp.getStartLoc(),
6460 "expected integer in range [0, 31]");
6461 if (Width < 1 || Width > RegWidth)
6462 return Error(WidthOp.getStartLoc(),
6463 "expected integer in range [1, 32]");
6464
6465 uint64_t ImmR = 0;
6466 if (RegWidth == 32)
6467 ImmR = (32 - LSB) & 0x1f;
6468 else
6469 ImmR = (64 - LSB) & 0x3f;
6470
6471 uint64_t ImmS = Width - 1;
6472
6473 if (ImmR != 0 && ImmS >= ImmR)
6474 return Error(WidthOp.getStartLoc(),
6475 "requested insert overflows register");
6476
6477 const MCExpr *ImmRExpr = MCConstantExpr::create(ImmR, getContext());
6478 const MCExpr *ImmSExpr = MCConstantExpr::create(ImmS, getContext());
6479 Operands[0] =
6480 AArch64Operand::CreateToken("bfm", Op.getStartLoc(), getContext());
6481 Operands[2] = AArch64Operand::CreateReg(
6482 RegWidth == 32 ? AArch64::WZR : AArch64::XZR, RegKind::Scalar,
6483 SMLoc(), SMLoc(), getContext());
6484 Operands[3] = AArch64Operand::CreateImm(
6485 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
6486 Operands.emplace_back(
6487 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
6488 WidthOp.getEndLoc(), getContext()));
6489 }
6490 }
6491 } else if (NumOperands == 5) {
6492 // FIXME: Horrible hack to handle the BFI -> BFM, SBFIZ->SBFM, and
6493 // UBFIZ -> UBFM aliases.
6494 if (Tok == "bfi" || Tok == "sbfiz" || Tok == "ubfiz") {
6495 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6496 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6497 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6498
6499 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6500 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6501 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6502
6503 if (Op3CE && Op4CE) {
6504 uint64_t Op3Val = Op3CE->getValue();
6505 uint64_t Op4Val = Op4CE->getValue();
6506
6507 uint64_t RegWidth = 0;
6508 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6509 Op1.getReg()))
6510 RegWidth = 64;
6511 else
6512 RegWidth = 32;
6513
6514 if (Op3Val >= RegWidth)
6515 return Error(Op3.getStartLoc(),
6516 "expected integer in range [0, 31]");
6517 if (Op4Val < 1 || Op4Val > RegWidth)
6518 return Error(Op4.getStartLoc(),
6519 "expected integer in range [1, 32]");
6520
6521 uint64_t NewOp3Val = 0;
6522 if (RegWidth == 32)
6523 NewOp3Val = (32 - Op3Val) & 0x1f;
6524 else
6525 NewOp3Val = (64 - Op3Val) & 0x3f;
6526
6527 uint64_t NewOp4Val = Op4Val - 1;
6528
6529 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
6530 return Error(Op4.getStartLoc(),
6531 "requested insert overflows register");
6532
6533 const MCExpr *NewOp3 =
6534 MCConstantExpr::create(NewOp3Val, getContext());
6535 const MCExpr *NewOp4 =
6536 MCConstantExpr::create(NewOp4Val, getContext());
6537 Operands[3] = AArch64Operand::CreateImm(
6538 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
6539 Operands[4] = AArch64Operand::CreateImm(
6540 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6541 if (Tok == "bfi")
6542 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6543 getContext());
6544 else if (Tok == "sbfiz")
6545 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6546 getContext());
6547 else if (Tok == "ubfiz")
6548 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6549 getContext());
6550 else
6551 llvm_unreachable("No valid mnemonic for alias?");
6552 }
6553 }
6554
6555 // FIXME: Horrible hack to handle the BFXIL->BFM, SBFX->SBFM, and
6556 // UBFX -> UBFM aliases.
6557 } else if (NumOperands == 5 &&
6558 (Tok == "bfxil" || Tok == "sbfx" || Tok == "ubfx")) {
6559 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6560 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6561 AArch64Operand &Op4 = static_cast<AArch64Operand &>(*Operands[4]);
6562
6563 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
6564 const MCConstantExpr *Op3CE = dyn_cast<MCConstantExpr>(Op3.getImm());
6565 const MCConstantExpr *Op4CE = dyn_cast<MCConstantExpr>(Op4.getImm());
6566
6567 if (Op3CE && Op4CE) {
6568 uint64_t Op3Val = Op3CE->getValue();
6569 uint64_t Op4Val = Op4CE->getValue();
6570
6571 uint64_t RegWidth = 0;
6572 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6573 Op1.getReg()))
6574 RegWidth = 64;
6575 else
6576 RegWidth = 32;
6577
6578 if (Op3Val >= RegWidth)
6579 return Error(Op3.getStartLoc(),
6580 "expected integer in range [0, 31]");
6581 if (Op4Val < 1 || Op4Val > RegWidth)
6582 return Error(Op4.getStartLoc(),
6583 "expected integer in range [1, 32]");
6584
6585 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
6586
6587 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
6588 return Error(Op4.getStartLoc(),
6589 "requested extract overflows register");
6590
6591 const MCExpr *NewOp4 =
6592 MCConstantExpr::create(NewOp4Val, getContext());
6593 Operands[4] = AArch64Operand::CreateImm(
6594 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
6595 if (Tok == "bfxil")
6596 Operands[0] = AArch64Operand::CreateToken("bfm", Op.getStartLoc(),
6597 getContext());
6598 else if (Tok == "sbfx")
6599 Operands[0] = AArch64Operand::CreateToken("sbfm", Op.getStartLoc(),
6600 getContext());
6601 else if (Tok == "ubfx")
6602 Operands[0] = AArch64Operand::CreateToken("ubfm", Op.getStartLoc(),
6603 getContext());
6604 else
6605 llvm_unreachable("No valid mnemonic for alias?");
6606 }
6607 }
6608 }
6609 }
6610
6611 // The Cyclone CPU and early successors didn't execute the zero-cycle zeroing
6612 // instruction for FP registers correctly in some rare circumstances. Convert
6613 // it to a safe instruction and warn (because silently changing someone's
6614 // assembly is rude).
6615 if (getSTI().hasFeature(AArch64::FeatureZCZeroingFPWorkaround) &&
6616 NumOperands == 4 && Tok == "movi") {
6617 AArch64Operand &Op1 = static_cast<AArch64Operand &>(*Operands[1]);
6618 AArch64Operand &Op2 = static_cast<AArch64Operand &>(*Operands[2]);
6619 AArch64Operand &Op3 = static_cast<AArch64Operand &>(*Operands[3]);
6620 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
6621 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
6622 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
6623 if (Suffix.lower() == ".2d" &&
6624 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
6625 Warning(IDLoc, "instruction movi.2d with immediate #0 may not function"
6626 " correctly on this CPU, converting to equivalent movi.16b");
6627 // Switch the suffix to .16b.
6628 unsigned Idx = Op1.isToken() ? 1 : 2;
6629 Operands[Idx] =
6630 AArch64Operand::CreateToken(".16b", IDLoc, getContext());
6631 }
6632 }
6633 }
6634
6635 // FIXME: Horrible hack for sxtw and uxtw with Wn src and Xd dst operands.
6636 // InstAlias can't quite handle this since the reg classes aren't
6637 // subclasses.
6638 if (NumOperands == 3 && (Tok == "sxtw" || Tok == "uxtw")) {
6639 // The source register can be Wn here, but the matcher expects a
6640 // GPR64. Twiddle it here if necessary.
6641 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6642 if (Op.isScalarReg()) {
6643 MCRegister Reg = getXRegFromWReg(Op.getReg());
6644 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6645 Op.getStartLoc(), Op.getEndLoc(),
6646 getContext());
6647 }
6648 }
6649 // FIXME: Likewise for sxt[bh] with a Xd dst operand
6650 else if (NumOperands == 3 && (Tok == "sxtb" || Tok == "sxth")) {
6651 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6652 if (Op.isScalarReg() &&
6653 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6654 Op.getReg())) {
6655 // The source register can be Wn here, but the matcher expects a
6656 // GPR64. Twiddle it here if necessary.
6657 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[2]);
6658 if (Op.isScalarReg()) {
6659 MCRegister Reg = getXRegFromWReg(Op.getReg());
6660 Operands[2] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6661 Op.getStartLoc(),
6662 Op.getEndLoc(), getContext());
6663 }
6664 }
6665 }
6666 // FIXME: Likewise for uxt[bh] with a Xd dst operand
6667 else if (NumOperands == 3 && (Tok == "uxtb" || Tok == "uxth")) {
6668 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6669 if (Op.isScalarReg() &&
6670 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
6671 Op.getReg())) {
6672 // The source register can be Wn here, but the matcher expects a
6673 // GPR32. Twiddle it here if necessary.
6674 AArch64Operand &Op = static_cast<AArch64Operand &>(*Operands[1]);
6675 if (Op.isScalarReg()) {
6676 MCRegister Reg = getWRegFromXReg(Op.getReg());
6677 Operands[1] = AArch64Operand::CreateReg(Reg, RegKind::Scalar,
6678 Op.getStartLoc(),
6679 Op.getEndLoc(), getContext());
6680 }
6681 }
6682 }
6683
6684 MCInst Inst;
6685 FeatureBitset MissingFeatures;
6686 // First try to match against the secondary set of tables containing the
6687 // short-form NEON instructions (e.g. "fadd.2s v0, v1, v2").
6688 unsigned MatchResult =
6689 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6690 MatchingInlineAsm, 1);
6691
6692 // If that fails, try against the alternate table containing long-form NEON:
6693 // "fadd v0.2s, v1.2s, v2.2s"
6694 if (MatchResult != Match_Success) {
6695 // But first, save the short-form match result: we can use it in case the
6696 // long-form match also fails.
6697 auto ShortFormNEONErrorInfo = ErrorInfo;
6698 auto ShortFormNEONMatchResult = MatchResult;
6699 auto ShortFormNEONMissingFeatures = MissingFeatures;
6700
6701 MatchResult =
6702 MatchInstructionImpl(Operands, Inst, ErrorInfo, MissingFeatures,
6703 MatchingInlineAsm, 0);
6704
6705 // Now, both matches failed, and the long-form match failed on the mnemonic
6706 // suffix token operand. The short-form match failure is probably more
6707 // relevant: use it instead.
6708 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
6709 Operands.size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
6710 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
6711 MatchResult = ShortFormNEONMatchResult;
6712 ErrorInfo = ShortFormNEONErrorInfo;
6713 MissingFeatures = ShortFormNEONMissingFeatures;
6714 }
6715 }
6716
6717 switch (MatchResult) {
6718 case Match_Success: {
6719 // Perform range checking and other semantic validations
6720 SmallVector<SMLoc, 8> OperandLocs;
6721 NumOperands = Operands.size();
6722 for (unsigned i = 1; i < NumOperands; ++i)
6723 OperandLocs.push_back(Operands[i]->getStartLoc());
6724 if (validateInstruction(Inst, IDLoc, OperandLocs))
6725 return true;
6726
6727 Inst.setLoc(IDLoc);
6728 Out.emitInstruction(Inst, getSTI());
6729 return false;
6730 }
6731 case Match_MissingFeature: {
6732 assert(MissingFeatures.any() && "Unknown missing feature!");
6733 // Special case the error message for the very common case where only
6734 // a single subtarget feature is missing (neon, e.g.).
6735 std::string Msg = "instruction requires:";
6736 for (unsigned i = 0, e = MissingFeatures.size(); i != e; ++i) {
6737 if (MissingFeatures[i]) {
6738 Msg += " ";
6739 Msg += getSubtargetFeatureName(i);
6740 }
6741 }
6742 return Error(IDLoc, Msg);
6743 }
6744 case Match_MnemonicFail:
6745 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
6746 case Match_InvalidOperand: {
6747 SMLoc ErrorLoc = IDLoc;
6748
6749 if (ErrorInfo != ~0ULL) {
6750 if (ErrorInfo >= Operands.size())
6751 return Error(IDLoc, "too few operands for instruction",
6752 SMRange(IDLoc, getTok().getLoc()));
6753
6754 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6755 if (ErrorLoc == SMLoc())
6756 ErrorLoc = IDLoc;
6757 }
6758 // If the match failed on a suffix token operand, tweak the diagnostic
6759 // accordingly.
6760 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
6761 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
6762 MatchResult = Match_InvalidSuffix;
6763
6764 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
6765 }
6766 case Match_InvalidTiedOperand:
6767 case Match_InvalidMemoryIndexed1:
6768 case Match_InvalidMemoryIndexed2:
6769 case Match_InvalidMemoryIndexed4:
6770 case Match_InvalidMemoryIndexed8:
6771 case Match_InvalidMemoryIndexed16:
6772 case Match_InvalidCondCode:
6773 case Match_AddSubLSLImm3ShiftLarge:
6774 case Match_AddSubRegExtendSmall:
6775 case Match_AddSubRegExtendLarge:
6776 case Match_AddSubSecondSource:
6777 case Match_LogicalSecondSource:
6778 case Match_AddSubRegShift32:
6779 case Match_AddSubRegShift64:
6780 case Match_InvalidMovImm32Shift:
6781 case Match_InvalidMovImm64Shift:
6782 case Match_InvalidFPImm:
6783 case Match_InvalidMemoryWExtend8:
6784 case Match_InvalidMemoryWExtend16:
6785 case Match_InvalidMemoryWExtend32:
6786 case Match_InvalidMemoryWExtend64:
6787 case Match_InvalidMemoryWExtend128:
6788 case Match_InvalidMemoryXExtend8:
6789 case Match_InvalidMemoryXExtend16:
6790 case Match_InvalidMemoryXExtend32:
6791 case Match_InvalidMemoryXExtend64:
6792 case Match_InvalidMemoryXExtend128:
6793 case Match_InvalidMemoryIndexed1SImm4:
6794 case Match_InvalidMemoryIndexed2SImm4:
6795 case Match_InvalidMemoryIndexed3SImm4:
6796 case Match_InvalidMemoryIndexed4SImm4:
6797 case Match_InvalidMemoryIndexed1SImm6:
6798 case Match_InvalidMemoryIndexed16SImm4:
6799 case Match_InvalidMemoryIndexed32SImm4:
6800 case Match_InvalidMemoryIndexed4SImm7:
6801 case Match_InvalidMemoryIndexed8SImm7:
6802 case Match_InvalidMemoryIndexed16SImm7:
6803 case Match_InvalidMemoryIndexed8UImm5:
6804 case Match_InvalidMemoryIndexed8UImm3:
6805 case Match_InvalidMemoryIndexed4UImm5:
6806 case Match_InvalidMemoryIndexed2UImm5:
6807 case Match_InvalidMemoryIndexed1UImm6:
6808 case Match_InvalidMemoryIndexed2UImm6:
6809 case Match_InvalidMemoryIndexed4UImm6:
6810 case Match_InvalidMemoryIndexed8UImm6:
6811 case Match_InvalidMemoryIndexed16UImm6:
6812 case Match_InvalidMemoryIndexedSImm6:
6813 case Match_InvalidMemoryIndexedSImm5:
6814 case Match_InvalidMemoryIndexedSImm8:
6815 case Match_InvalidMemoryIndexedSImm9:
6816 case Match_InvalidMemoryIndexed16SImm9:
6817 case Match_InvalidMemoryIndexed8SImm10:
6818 case Match_InvalidImm0_0:
6819 case Match_InvalidImm0_1:
6820 case Match_InvalidImm0_3:
6821 case Match_InvalidImm0_7:
6822 case Match_InvalidImm0_15:
6823 case Match_InvalidImm0_31:
6824 case Match_InvalidImm0_63:
6825 case Match_InvalidImm0_127:
6826 case Match_InvalidImm0_255:
6827 case Match_InvalidImm0_65535:
6828 case Match_InvalidImm1_8:
6829 case Match_InvalidImm1_16:
6830 case Match_InvalidImm1_32:
6831 case Match_InvalidImm1_64:
6832 case Match_InvalidImmM1_62:
6833 case Match_InvalidMemoryIndexedRange2UImm0:
6834 case Match_InvalidMemoryIndexedRange2UImm1:
6835 case Match_InvalidMemoryIndexedRange2UImm2:
6836 case Match_InvalidMemoryIndexedRange2UImm3:
6837 case Match_InvalidMemoryIndexedRange4UImm0:
6838 case Match_InvalidMemoryIndexedRange4UImm1:
6839 case Match_InvalidMemoryIndexedRange4UImm2:
6840 case Match_InvalidSVEAddSubImm8:
6841 case Match_InvalidSVEAddSubImm16:
6842 case Match_InvalidSVEAddSubImm32:
6843 case Match_InvalidSVEAddSubImm64:
6844 case Match_InvalidSVECpyImm8:
6845 case Match_InvalidSVECpyImm16:
6846 case Match_InvalidSVECpyImm32:
6847 case Match_InvalidSVECpyImm64:
6848 case Match_InvalidIndexRange0_0:
6849 case Match_InvalidIndexRange1_1:
6850 case Match_InvalidIndexRange0_15:
6851 case Match_InvalidIndexRange0_7:
6852 case Match_InvalidIndexRange0_3:
6853 case Match_InvalidIndexRange0_1:
6854 case Match_InvalidSVEIndexRange0_63:
6855 case Match_InvalidSVEIndexRange0_31:
6856 case Match_InvalidSVEIndexRange0_15:
6857 case Match_InvalidSVEIndexRange0_7:
6858 case Match_InvalidSVEIndexRange0_3:
6859 case Match_InvalidLabel:
6860 case Match_InvalidComplexRotationEven:
6861 case Match_InvalidComplexRotationOdd:
6862 case Match_InvalidGPR64shifted8:
6863 case Match_InvalidGPR64shifted16:
6864 case Match_InvalidGPR64shifted32:
6865 case Match_InvalidGPR64shifted64:
6866 case Match_InvalidGPR64shifted128:
6867 case Match_InvalidGPR64NoXZRshifted8:
6868 case Match_InvalidGPR64NoXZRshifted16:
6869 case Match_InvalidGPR64NoXZRshifted32:
6870 case Match_InvalidGPR64NoXZRshifted64:
6871 case Match_InvalidGPR64NoXZRshifted128:
6872 case Match_InvalidZPR32UXTW8:
6873 case Match_InvalidZPR32UXTW16:
6874 case Match_InvalidZPR32UXTW32:
6875 case Match_InvalidZPR32UXTW64:
6876 case Match_InvalidZPR32SXTW8:
6877 case Match_InvalidZPR32SXTW16:
6878 case Match_InvalidZPR32SXTW32:
6879 case Match_InvalidZPR32SXTW64:
6880 case Match_InvalidZPR64UXTW8:
6881 case Match_InvalidZPR64SXTW8:
6882 case Match_InvalidZPR64UXTW16:
6883 case Match_InvalidZPR64SXTW16:
6884 case Match_InvalidZPR64UXTW32:
6885 case Match_InvalidZPR64SXTW32:
6886 case Match_InvalidZPR64UXTW64:
6887 case Match_InvalidZPR64SXTW64:
6888 case Match_InvalidZPR32LSL8:
6889 case Match_InvalidZPR32LSL16:
6890 case Match_InvalidZPR32LSL32:
6891 case Match_InvalidZPR32LSL64:
6892 case Match_InvalidZPR64LSL8:
6893 case Match_InvalidZPR64LSL16:
6894 case Match_InvalidZPR64LSL32:
6895 case Match_InvalidZPR64LSL64:
6896 case Match_InvalidZPR0:
6897 case Match_InvalidZPR8:
6898 case Match_InvalidZPR16:
6899 case Match_InvalidZPR32:
6900 case Match_InvalidZPR64:
6901 case Match_InvalidZPR128:
6902 case Match_InvalidZPR_3b8:
6903 case Match_InvalidZPR_3b16:
6904 case Match_InvalidZPR_3b32:
6905 case Match_InvalidZPR_4b8:
6906 case Match_InvalidZPR_4b16:
6907 case Match_InvalidZPR_4b32:
6908 case Match_InvalidZPR_4b64:
6909 case Match_InvalidSVEPPRorPNRAnyReg:
6910 case Match_InvalidSVEPPRorPNRBReg:
6911 case Match_InvalidSVEPredicateAnyReg:
6912 case Match_InvalidSVEPattern:
6913 case Match_InvalidSVEVecLenSpecifier:
6914 case Match_InvalidSVEPredicateBReg:
6915 case Match_InvalidSVEPredicateHReg:
6916 case Match_InvalidSVEPredicateSReg:
6917 case Match_InvalidSVEPredicateDReg:
6918 case Match_InvalidSVEPredicate3bAnyReg:
6919 case Match_InvalidSVEPNPredicateB_p8to15Reg:
6920 case Match_InvalidSVEPNPredicateH_p8to15Reg:
6921 case Match_InvalidSVEPNPredicateS_p8to15Reg:
6922 case Match_InvalidSVEPNPredicateD_p8to15Reg:
6923 case Match_InvalidSVEPNPredicateAny_p8to15Reg:
6924 case Match_InvalidSVEPNPredicateBReg:
6925 case Match_InvalidSVEPNPredicateHReg:
6926 case Match_InvalidSVEPNPredicateSReg:
6927 case Match_InvalidSVEPNPredicateDReg:
6928 case Match_InvalidSVEPredicateListMul2x8:
6929 case Match_InvalidSVEPredicateListMul2x16:
6930 case Match_InvalidSVEPredicateListMul2x32:
6931 case Match_InvalidSVEPredicateListMul2x64:
6932 case Match_InvalidSVEExactFPImmOperandHalfOne:
6933 case Match_InvalidSVEExactFPImmOperandHalfTwo:
6934 case Match_InvalidSVEExactFPImmOperandZeroOne:
6935 case Match_InvalidMatrixTile16:
6936 case Match_InvalidMatrixTile32:
6937 case Match_InvalidMatrixTile64:
6938 case Match_InvalidMatrix:
6939 case Match_InvalidMatrix8:
6940 case Match_InvalidMatrix16:
6941 case Match_InvalidMatrix32:
6942 case Match_InvalidMatrix64:
6943 case Match_InvalidMatrixTileVectorH8:
6944 case Match_InvalidMatrixTileVectorH16:
6945 case Match_InvalidMatrixTileVectorH32:
6946 case Match_InvalidMatrixTileVectorH64:
6947 case Match_InvalidMatrixTileVectorH128:
6948 case Match_InvalidMatrixTileVectorV8:
6949 case Match_InvalidMatrixTileVectorV16:
6950 case Match_InvalidMatrixTileVectorV32:
6951 case Match_InvalidMatrixTileVectorV64:
6952 case Match_InvalidMatrixTileVectorV128:
6953 case Match_InvalidSVCR:
6954 case Match_InvalidMatrixIndexGPR32_12_15:
6955 case Match_InvalidMatrixIndexGPR32_8_11:
6956 case Match_InvalidLookupTable:
6957 case Match_InvalidZPRMul2_Lo8:
6958 case Match_InvalidZPRMul2_Hi8:
6959 case Match_InvalidZPRMul2_Lo16:
6960 case Match_InvalidZPRMul2_Hi16:
6961 case Match_InvalidZPRMul2_Lo32:
6962 case Match_InvalidZPRMul2_Hi32:
6963 case Match_InvalidZPRMul2_Lo64:
6964 case Match_InvalidZPRMul2_Hi64:
6965 case Match_InvalidZPR_K0:
6966 case Match_InvalidSVEVectorList2x8Mul2:
6967 case Match_InvalidSVEVectorList2x16Mul2:
6968 case Match_InvalidSVEVectorList2x32Mul2:
6969 case Match_InvalidSVEVectorList2x64Mul2:
6970 case Match_InvalidSVEVectorList2x128Mul2:
6971 case Match_InvalidSVEVectorList4x8Mul4:
6972 case Match_InvalidSVEVectorList4x16Mul4:
6973 case Match_InvalidSVEVectorList4x32Mul4:
6974 case Match_InvalidSVEVectorList4x64Mul4:
6975 case Match_InvalidSVEVectorList4x128Mul4:
6976 case Match_InvalidSVEVectorList2x8Mul2_Lo:
6977 case Match_InvalidSVEVectorList2x16Mul2_Lo:
6978 case Match_InvalidSVEVectorList2x32Mul2_Lo:
6979 case Match_InvalidSVEVectorList2x64Mul2_Lo:
6980 case Match_InvalidSVEVectorList2x8Mul2_Hi:
6981 case Match_InvalidSVEVectorList2x16Mul2_Hi:
6982 case Match_InvalidSVEVectorList2x32Mul2_Hi:
6983 case Match_InvalidSVEVectorList2x64Mul2_Hi:
6984 case Match_InvalidSVEVectorListStrided2x8:
6985 case Match_InvalidSVEVectorListStrided2x16:
6986 case Match_InvalidSVEVectorListStrided2x32:
6987 case Match_InvalidSVEVectorListStrided2x64:
6988 case Match_InvalidSVEVectorListStrided4x8:
6989 case Match_InvalidSVEVectorListStrided4x16:
6990 case Match_InvalidSVEVectorListStrided4x32:
6991 case Match_InvalidSVEVectorListStrided4x64:
6992 case Match_MSR:
6993 case Match_MRS: {
6994 if (ErrorInfo >= Operands.size())
6995 return Error(IDLoc, "too few operands for instruction", SMRange(IDLoc, (*Operands.back()).getEndLoc()));
6996 // Any time we get here, there's nothing fancy to do. Just get the
6997 // operand SMLoc and display the diagnostic.
6998 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
6999 if (ErrorLoc == SMLoc())
7000 ErrorLoc = IDLoc;
7001 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
7002 }
7003 }
7004
7005 llvm_unreachable("Implement any new match types added!");
7006}
7007
7008/// ParseDirective parses the arm specific directives
7009bool AArch64AsmParser::ParseDirective(AsmToken DirectiveID) {
7010 const MCContext::Environment Format = getContext().getObjectFileType();
7011 bool IsMachO = Format == MCContext::IsMachO;
7012 bool IsCOFF = Format == MCContext::IsCOFF;
7013 bool IsELF = Format == MCContext::IsELF;
7014
7015 auto IDVal = DirectiveID.getIdentifier().lower();
7016 SMLoc Loc = DirectiveID.getLoc();
7017 if (IDVal == ".arch")
7018 parseDirectiveArch(Loc);
7019 else if (IDVal == ".cpu")
7020 parseDirectiveCPU(Loc);
7021 else if (IDVal == ".tlsdesccall")
7022 parseDirectiveTLSDescCall(Loc);
7023 else if (IDVal == ".ltorg" || IDVal == ".pool")
7024 parseDirectiveLtorg(Loc);
7025 else if (IDVal == ".unreq")
7026 parseDirectiveUnreq(Loc);
7027 else if (IDVal == ".inst")
7028 parseDirectiveInst(Loc);
7029 else if (IDVal == ".cfi_negate_ra_state")
7030 parseDirectiveCFINegateRAState();
7031 else if (IDVal == ".cfi_negate_ra_state_with_pc")
7032 parseDirectiveCFINegateRAStateWithPC();
7033 else if (IDVal == ".cfi_b_key_frame")
7034 parseDirectiveCFIBKeyFrame();
7035 else if (IDVal == ".cfi_mte_tagged_frame")
7036 parseDirectiveCFIMTETaggedFrame();
7037 else if (IDVal == ".arch_extension")
7038 parseDirectiveArchExtension(Loc);
7039 else if (IDVal == ".variant_pcs")
7040 parseDirectiveVariantPCS(Loc);
7041 else if (IsMachO) {
7042 if (IDVal == MCLOHDirectiveName())
7043 parseDirectiveLOH(IDVal, Loc);
7044 else
7045 return true;
7046 } else if (IsCOFF) {
7047 if (IDVal == ".seh_stackalloc")
7048 parseDirectiveSEHAllocStack(Loc);
7049 else if (IDVal == ".seh_endprologue")
7050 parseDirectiveSEHPrologEnd(Loc);
7051 else if (IDVal == ".seh_save_r19r20_x")
7052 parseDirectiveSEHSaveR19R20X(Loc);
7053 else if (IDVal == ".seh_save_fplr")
7054 parseDirectiveSEHSaveFPLR(Loc);
7055 else if (IDVal == ".seh_save_fplr_x")
7056 parseDirectiveSEHSaveFPLRX(Loc);
7057 else if (IDVal == ".seh_save_reg")
7058 parseDirectiveSEHSaveReg(Loc);
7059 else if (IDVal == ".seh_save_reg_x")
7060 parseDirectiveSEHSaveRegX(Loc);
7061 else if (IDVal == ".seh_save_regp")
7062 parseDirectiveSEHSaveRegP(Loc);
7063 else if (IDVal == ".seh_save_regp_x")
7064 parseDirectiveSEHSaveRegPX(Loc);
7065 else if (IDVal == ".seh_save_lrpair")
7066 parseDirectiveSEHSaveLRPair(Loc);
7067 else if (IDVal == ".seh_save_freg")
7068 parseDirectiveSEHSaveFReg(Loc);
7069 else if (IDVal == ".seh_save_freg_x")
7070 parseDirectiveSEHSaveFRegX(Loc);
7071 else if (IDVal == ".seh_save_fregp")
7072 parseDirectiveSEHSaveFRegP(Loc);
7073 else if (IDVal == ".seh_save_fregp_x")
7074 parseDirectiveSEHSaveFRegPX(Loc);
7075 else if (IDVal == ".seh_set_fp")
7076 parseDirectiveSEHSetFP(Loc);
7077 else if (IDVal == ".seh_add_fp")
7078 parseDirectiveSEHAddFP(Loc);
7079 else if (IDVal == ".seh_nop")
7080 parseDirectiveSEHNop(Loc);
7081 else if (IDVal == ".seh_save_next")
7082 parseDirectiveSEHSaveNext(Loc);
7083 else if (IDVal == ".seh_startepilogue")
7084 parseDirectiveSEHEpilogStart(Loc);
7085 else if (IDVal == ".seh_endepilogue")
7086 parseDirectiveSEHEpilogEnd(Loc);
7087 else if (IDVal == ".seh_trap_frame")
7088 parseDirectiveSEHTrapFrame(Loc);
7089 else if (IDVal == ".seh_pushframe")
7090 parseDirectiveSEHMachineFrame(Loc);
7091 else if (IDVal == ".seh_context")
7092 parseDirectiveSEHContext(Loc);
7093 else if (IDVal == ".seh_ec_context")
7094 parseDirectiveSEHECContext(Loc);
7095 else if (IDVal == ".seh_clear_unwound_to_call")
7096 parseDirectiveSEHClearUnwoundToCall(Loc);
7097 else if (IDVal == ".seh_pac_sign_lr")
7098 parseDirectiveSEHPACSignLR(Loc);
7099 else if (IDVal == ".seh_save_any_reg")
7100 parseDirectiveSEHSaveAnyReg(Loc, false, false);
7101 else if (IDVal == ".seh_save_any_reg_p")
7102 parseDirectiveSEHSaveAnyReg(Loc, true, false);
7103 else if (IDVal == ".seh_save_any_reg_x")
7104 parseDirectiveSEHSaveAnyReg(Loc, false, true);
7105 else if (IDVal == ".seh_save_any_reg_px")
7106 parseDirectiveSEHSaveAnyReg(Loc, true, true);
7107 else if (IDVal == ".seh_allocz")
7108 parseDirectiveSEHAllocZ(Loc);
7109 else if (IDVal == ".seh_save_zreg")
7110 parseDirectiveSEHSaveZReg(Loc);
7111 else if (IDVal == ".seh_save_preg")
7112 parseDirectiveSEHSavePReg(Loc);
7113 else
7114 return true;
7115 } else if (IsELF) {
7116 if (IDVal == ".aeabi_subsection")
7117 parseDirectiveAeabiSubSectionHeader(Loc);
7118 else if (IDVal == ".aeabi_attribute")
7119 parseDirectiveAeabiAArch64Attr(Loc);
7120 else
7121 return true;
7122 } else
7123 return true;
7124 return false;
7125}
7126
7127static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo,
7128 SmallVector<StringRef, 4> &RequestedExtensions) {
7129 const bool NoCrypto = llvm::is_contained(RequestedExtensions, "nocrypto");
7130 const bool Crypto = llvm::is_contained(RequestedExtensions, "crypto");
7131
7132 if (!NoCrypto && Crypto) {
7133 // Map 'generic' (and others) to sha2 and aes, because
7134 // that was the traditional meaning of crypto.
7135 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7136 ArchInfo == AArch64::ARMV8_3A) {
7137 RequestedExtensions.push_back("sha2");
7138 RequestedExtensions.push_back("aes");
7139 }
7140 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7141 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7142 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7143 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7144 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7145 ArchInfo == AArch64::ARMV9_4A || ArchInfo == AArch64::ARMV8R) {
7146 RequestedExtensions.push_back("sm4");
7147 RequestedExtensions.push_back("sha3");
7148 RequestedExtensions.push_back("sha2");
7149 RequestedExtensions.push_back("aes");
7150 }
7151 } else if (NoCrypto) {
7152 // Map 'generic' (and others) to sha2 and aes, because
7153 // that was the traditional meaning of crypto.
7154 if (ArchInfo == AArch64::ARMV8_1A || ArchInfo == AArch64::ARMV8_2A ||
7155 ArchInfo == AArch64::ARMV8_3A) {
7156 RequestedExtensions.push_back("nosha2");
7157 RequestedExtensions.push_back("noaes");
7158 }
7159 if (ArchInfo == AArch64::ARMV8_4A || ArchInfo == AArch64::ARMV8_5A ||
7160 ArchInfo == AArch64::ARMV8_6A || ArchInfo == AArch64::ARMV8_7A ||
7161 ArchInfo == AArch64::ARMV8_8A || ArchInfo == AArch64::ARMV8_9A ||
7162 ArchInfo == AArch64::ARMV9A || ArchInfo == AArch64::ARMV9_1A ||
7163 ArchInfo == AArch64::ARMV9_2A || ArchInfo == AArch64::ARMV9_3A ||
7164 ArchInfo == AArch64::ARMV9_4A) {
7165 RequestedExtensions.push_back("nosm4");
7166 RequestedExtensions.push_back("nosha3");
7167 RequestedExtensions.push_back("nosha2");
7168 RequestedExtensions.push_back("noaes");
7169 }
7170 }
7171}
7172
7174 return SMLoc::getFromPointer(L.getPointer() + Offset);
7175}
7176
7177/// parseDirectiveArch
7178/// ::= .arch token
7179bool AArch64AsmParser::parseDirectiveArch(SMLoc L) {
7180 SMLoc CurLoc = getLoc();
7181
7182 StringRef Name = getParser().parseStringToEndOfStatement().trim();
7183 StringRef Arch, ExtensionString;
7184 std::tie(Arch, ExtensionString) = Name.split('+');
7185
7186 const AArch64::ArchInfo *ArchInfo = AArch64::parseArch(Arch);
7187 if (!ArchInfo)
7188 return Error(CurLoc, "unknown arch name");
7189
7190 if (parseToken(AsmToken::EndOfStatement))
7191 return true;
7192
7193 // Get the architecture and extension features.
7194 std::vector<StringRef> AArch64Features;
7195 AArch64Features.push_back(ArchInfo->ArchFeature);
7196 AArch64::getExtensionFeatures(ArchInfo->DefaultExts, AArch64Features);
7197
7198 MCSubtargetInfo &STI = copySTI();
7199 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
7200 STI.setDefaultFeatures("generic", /*TuneCPU*/ "generic",
7201 join(ArchFeatures.begin(), ArchFeatures.end(), ","));
7202
7203 SmallVector<StringRef, 4> RequestedExtensions;
7204 if (!ExtensionString.empty())
7205 ExtensionString.split(RequestedExtensions, '+');
7206
7207 ExpandCryptoAEK(*ArchInfo, RequestedExtensions);
7208 CurLoc = incrementLoc(CurLoc, Arch.size());
7209
7210 for (auto Name : RequestedExtensions) {
7211 // Advance source location past '+'.
7212 CurLoc = incrementLoc(CurLoc, 1);
7213
7214 bool EnableFeature = !Name.consume_front_insensitive("no");
7215
7216 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7217 return Extension.Name == Name;
7218 });
7219
7220 if (It == std::end(ExtensionMap))
7221 return Error(CurLoc, "unsupported architectural extension: " + Name);
7222
7223 if (EnableFeature)
7224 STI.SetFeatureBitsTransitively(It->Features);
7225 else
7226 STI.ClearFeatureBitsTransitively(It->Features);
7227 CurLoc = incrementLoc(CurLoc, Name.size());
7228 }
7229 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7230 setAvailableFeatures(Features);
7231
7232 getTargetStreamer().emitDirectiveArch(Name);
7233 return false;
7234}
7235
7236/// parseDirectiveArchExtension
7237/// ::= .arch_extension [no]feature
7238bool AArch64AsmParser::parseDirectiveArchExtension(SMLoc L) {
7239 SMLoc ExtLoc = getLoc();
7240
7241 StringRef FullName = getParser().parseStringToEndOfStatement().trim();
7242
7243 if (parseEOL())
7244 return true;
7245
7246 bool EnableFeature = true;
7247 StringRef Name = FullName;
7248 if (Name.starts_with_insensitive("no")) {
7249 EnableFeature = false;
7250 Name = Name.substr(2);
7251 }
7252
7253 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7254 return Extension.Name == Name;
7255 });
7256
7257 if (It == std::end(ExtensionMap))
7258 return Error(ExtLoc, "unsupported architectural extension: " + Name);
7259
7260 MCSubtargetInfo &STI = copySTI();
7261 if (EnableFeature)
7262 STI.SetFeatureBitsTransitively(It->Features);
7263 else
7264 STI.ClearFeatureBitsTransitively(It->Features);
7265 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7266 setAvailableFeatures(Features);
7267
7268 getTargetStreamer().emitDirectiveArchExtension(FullName);
7269 return false;
7270}
7271
7272/// parseDirectiveCPU
7273/// ::= .cpu id
7274bool AArch64AsmParser::parseDirectiveCPU(SMLoc L) {
7275 SMLoc CurLoc = getLoc();
7276
7277 StringRef CPU, ExtensionString;
7278 std::tie(CPU, ExtensionString) =
7279 getParser().parseStringToEndOfStatement().trim().split('+');
7280
7281 if (parseToken(AsmToken::EndOfStatement))
7282 return true;
7283
7284 SmallVector<StringRef, 4> RequestedExtensions;
7285 if (!ExtensionString.empty())
7286 ExtensionString.split(RequestedExtensions, '+');
7287
7288 const llvm::AArch64::ArchInfo *CpuArch = llvm::AArch64::getArchForCpu(CPU);
7289 if (!CpuArch) {
7290 Error(CurLoc, "unknown CPU name");
7291 return false;
7292 }
7293 ExpandCryptoAEK(*CpuArch, RequestedExtensions);
7294
7295 MCSubtargetInfo &STI = copySTI();
7296 STI.setDefaultFeatures(CPU, /*TuneCPU*/ CPU, "");
7297 CurLoc = incrementLoc(CurLoc, CPU.size());
7298
7299 for (auto Name : RequestedExtensions) {
7300 // Advance source location past '+'.
7301 CurLoc = incrementLoc(CurLoc, 1);
7302
7303 bool EnableFeature = !Name.consume_front_insensitive("no");
7304
7305 auto It = llvm::find_if(ExtensionMap, [&Name](const auto &Extension) {
7306 return Extension.Name == Name;
7307 });
7308
7309 if (It == std::end(ExtensionMap))
7310 return Error(CurLoc, "unsupported architectural extension: " + Name);
7311
7312 if (EnableFeature)
7313 STI.SetFeatureBitsTransitively(It->Features);
7314 else
7315 STI.ClearFeatureBitsTransitively(It->Features);
7316 CurLoc = incrementLoc(CurLoc, Name.size());
7317 }
7318 FeatureBitset Features = ComputeAvailableFeatures(STI.getFeatureBits());
7319 setAvailableFeatures(Features);
7320 return false;
7321}
7322
7323/// parseDirectiveInst
7324/// ::= .inst opcode [, ...]
7325bool AArch64AsmParser::parseDirectiveInst(SMLoc Loc) {
7326 if (getLexer().is(AsmToken::EndOfStatement))
7327 return Error(Loc, "expected expression following '.inst' directive");
7328
7329 auto parseOp = [&]() -> bool {
7330 SMLoc L = getLoc();
7331 const MCExpr *Expr = nullptr;
7332 if (check(getParser().parseExpression(Expr), L, "expected expression"))
7333 return true;
7334 const MCConstantExpr *Value = dyn_cast_or_null<MCConstantExpr>(Expr);
7335 if (check(!Value, L, "expected constant expression"))
7336 return true;
7337 getTargetStreamer().emitInst(Value->getValue());
7338 return false;
7339 };
7340
7341 return parseMany(parseOp);
7342}
7343
7344// parseDirectiveTLSDescCall:
7345// ::= .tlsdesccall symbol
7346bool AArch64AsmParser::parseDirectiveTLSDescCall(SMLoc L) {
7347 StringRef Name;
7348 if (check(getParser().parseIdentifier(Name), L, "expected symbol") ||
7349 parseToken(AsmToken::EndOfStatement))
7350 return true;
7351
7352 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
7353 const MCExpr *Expr = MCSymbolRefExpr::create(Sym, getContext());
7355
7356 MCInst Inst;
7357 Inst.setOpcode(AArch64::TLSDESCCALL);
7359
7360 getParser().getStreamer().emitInstruction(Inst, getSTI());
7361 return false;
7362}
7363
7364/// ::= .loh <lohName | lohId> label1, ..., labelN
7365/// The number of arguments depends on the loh identifier.
7366bool AArch64AsmParser::parseDirectiveLOH(StringRef IDVal, SMLoc Loc) {
7368 if (getTok().isNot(AsmToken::Identifier)) {
7369 if (getTok().isNot(AsmToken::Integer))
7370 return TokError("expected an identifier or a number in directive");
7371 // We successfully get a numeric value for the identifier.
7372 // Check if it is valid.
7373 int64_t Id = getTok().getIntVal();
7374 if (Id <= -1U && !isValidMCLOHType(Id))
7375 return TokError("invalid numeric identifier in directive");
7376 Kind = (MCLOHType)Id;
7377 } else {
7378 StringRef Name = getTok().getIdentifier();
7379 // We successfully parse an identifier.
7380 // Check if it is a recognized one.
7381 int Id = MCLOHNameToId(Name);
7382
7383 if (Id == -1)
7384 return TokError("invalid identifier in directive");
7385 Kind = (MCLOHType)Id;
7386 }
7387 // Consume the identifier.
7388 Lex();
7389 // Get the number of arguments of this LOH.
7390 int NbArgs = MCLOHIdToNbArgs(Kind);
7391
7392 assert(NbArgs != -1 && "Invalid number of arguments");
7393
7395 for (int Idx = 0; Idx < NbArgs; ++Idx) {
7396 StringRef Name;
7397 if (getParser().parseIdentifier(Name))
7398 return TokError("expected identifier in directive");
7399 Args.push_back(getContext().getOrCreateSymbol(Name));
7400
7401 if (Idx + 1 == NbArgs)
7402 break;
7403 if (parseComma())
7404 return true;
7405 }
7406 if (parseEOL())
7407 return true;
7408
7409 getStreamer().emitLOHDirective(Kind, Args);
7410 return false;
7411}
7412
7413/// parseDirectiveLtorg
7414/// ::= .ltorg | .pool
7415bool AArch64AsmParser::parseDirectiveLtorg(SMLoc L) {
7416 if (parseEOL())
7417 return true;
7418 getTargetStreamer().emitCurrentConstantPool();
7419 return false;
7420}
7421
7422/// parseDirectiveReq
7423/// ::= name .req registername
7424bool AArch64AsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7425 Lex(); // Eat the '.req' token.
7426 SMLoc SRegLoc = getLoc();
7427 RegKind RegisterKind = RegKind::Scalar;
7428 MCRegister RegNum;
7429 ParseStatus ParseRes = tryParseScalarRegister(RegNum);
7430
7431 if (!ParseRes.isSuccess()) {
7432 StringRef Kind;
7433 RegisterKind = RegKind::NeonVector;
7434 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
7435
7436 if (ParseRes.isFailure())
7437 return true;
7438
7439 if (ParseRes.isSuccess() && !Kind.empty())
7440 return Error(SRegLoc, "vector register without type specifier expected");
7441 }
7442
7443 if (!ParseRes.isSuccess()) {
7444 StringRef Kind;
7445 RegisterKind = RegKind::SVEDataVector;
7446 ParseRes =
7447 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7448
7449 if (ParseRes.isFailure())
7450 return true;
7451
7452 if (ParseRes.isSuccess() && !Kind.empty())
7453 return Error(SRegLoc,
7454 "sve vector register without type specifier expected");
7455 }
7456
7457 if (!ParseRes.isSuccess()) {
7458 StringRef Kind;
7459 RegisterKind = RegKind::SVEPredicateVector;
7460 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7461
7462 if (ParseRes.isFailure())
7463 return true;
7464
7465 if (ParseRes.isSuccess() && !Kind.empty())
7466 return Error(SRegLoc,
7467 "sve predicate register without type specifier expected");
7468 }
7469
7470 if (!ParseRes.isSuccess())
7471 return Error(SRegLoc, "register name or alias expected");
7472
7473 // Shouldn't be anything else.
7474 if (parseEOL())
7475 return true;
7476
7477 auto pair = std::make_pair(RegisterKind, (unsigned) RegNum);
7478 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
7479 Warning(L, "ignoring redefinition of register alias '" + Name + "'");
7480
7481 return false;
7482}
7483
7484/// parseDirectiveUneq
7485/// ::= .unreq registername
7486bool AArch64AsmParser::parseDirectiveUnreq(SMLoc L) {
7487 if (getTok().isNot(AsmToken::Identifier))
7488 return TokError("unexpected input in .unreq directive.");
7489 RegisterReqs.erase(getTok().getIdentifier().lower());
7490 Lex(); // Eat the identifier.
7491 return parseToken(AsmToken::EndOfStatement);
7492}
7493
7494bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
7495 if (parseEOL())
7496 return true;
7497 getStreamer().emitCFINegateRAState();
7498 return false;
7499}
7500
7501bool AArch64AsmParser::parseDirectiveCFINegateRAStateWithPC() {
7502 if (parseEOL())
7503 return true;
7504 getStreamer().emitCFINegateRAStateWithPC();
7505 return false;
7506}
7507
7508/// parseDirectiveCFIBKeyFrame
7509/// ::= .cfi_b_key
7510bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
7511 if (parseEOL())
7512 return true;
7513 getStreamer().emitCFIBKeyFrame();
7514 return false;
7515}
7516
7517/// parseDirectiveCFIMTETaggedFrame
7518/// ::= .cfi_mte_tagged_frame
7519bool AArch64AsmParser::parseDirectiveCFIMTETaggedFrame() {
7520 if (parseEOL())
7521 return true;
7522 getStreamer().emitCFIMTETaggedFrame();
7523 return false;
7524}
7525
7526/// parseDirectiveVariantPCS
7527/// ::= .variant_pcs symbolname
7528bool AArch64AsmParser::parseDirectiveVariantPCS(SMLoc L) {
7529 StringRef Name;
7530 if (getParser().parseIdentifier(Name))
7531 return TokError("expected symbol name");
7532 if (parseEOL())
7533 return true;
7534 getTargetStreamer().emitDirectiveVariantPCS(
7535 getContext().getOrCreateSymbol(Name));
7536 return false;
7537}
7538
7539/// parseDirectiveSEHAllocStack
7540/// ::= .seh_stackalloc
7541bool AArch64AsmParser::parseDirectiveSEHAllocStack(SMLoc L) {
7542 int64_t Size;
7543 if (parseImmExpr(Size))
7544 return true;
7545 getTargetStreamer().emitARM64WinCFIAllocStack(Size);
7546 return false;
7547}
7548
7549/// parseDirectiveSEHPrologEnd
7550/// ::= .seh_endprologue
7551bool AArch64AsmParser::parseDirectiveSEHPrologEnd(SMLoc L) {
7552 getTargetStreamer().emitARM64WinCFIPrologEnd();
7553 return false;
7554}
7555
7556/// parseDirectiveSEHSaveR19R20X
7557/// ::= .seh_save_r19r20_x
7558bool AArch64AsmParser::parseDirectiveSEHSaveR19R20X(SMLoc L) {
7559 int64_t Offset;
7560 if (parseImmExpr(Offset))
7561 return true;
7562 getTargetStreamer().emitARM64WinCFISaveR19R20X(Offset);
7563 return false;
7564}
7565
7566/// parseDirectiveSEHSaveFPLR
7567/// ::= .seh_save_fplr
7568bool AArch64AsmParser::parseDirectiveSEHSaveFPLR(SMLoc L) {
7569 int64_t Offset;
7570 if (parseImmExpr(Offset))
7571 return true;
7572 getTargetStreamer().emitARM64WinCFISaveFPLR(Offset);
7573 return false;
7574}
7575
7576/// parseDirectiveSEHSaveFPLRX
7577/// ::= .seh_save_fplr_x
7578bool AArch64AsmParser::parseDirectiveSEHSaveFPLRX(SMLoc L) {
7579 int64_t Offset;
7580 if (parseImmExpr(Offset))
7581 return true;
7582 getTargetStreamer().emitARM64WinCFISaveFPLRX(Offset);
7583 return false;
7584}
7585
7586/// parseDirectiveSEHSaveReg
7587/// ::= .seh_save_reg
7588bool AArch64AsmParser::parseDirectiveSEHSaveReg(SMLoc L) {
7589 unsigned Reg;
7590 int64_t Offset;
7591 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7592 parseComma() || parseImmExpr(Offset))
7593 return true;
7594 getTargetStreamer().emitARM64WinCFISaveReg(Reg, Offset);
7595 return false;
7596}
7597
7598/// parseDirectiveSEHSaveRegX
7599/// ::= .seh_save_reg_x
7600bool AArch64AsmParser::parseDirectiveSEHSaveRegX(SMLoc L) {
7601 unsigned Reg;
7602 int64_t Offset;
7603 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7604 parseComma() || parseImmExpr(Offset))
7605 return true;
7606 getTargetStreamer().emitARM64WinCFISaveRegX(Reg, Offset);
7607 return false;
7608}
7609
7610/// parseDirectiveSEHSaveRegP
7611/// ::= .seh_save_regp
7612bool AArch64AsmParser::parseDirectiveSEHSaveRegP(SMLoc L) {
7613 unsigned Reg;
7614 int64_t Offset;
7615 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7616 parseComma() || parseImmExpr(Offset))
7617 return true;
7618 getTargetStreamer().emitARM64WinCFISaveRegP(Reg, Offset);
7619 return false;
7620}
7621
7622/// parseDirectiveSEHSaveRegPX
7623/// ::= .seh_save_regp_x
7624bool AArch64AsmParser::parseDirectiveSEHSaveRegPX(SMLoc L) {
7625 unsigned Reg;
7626 int64_t Offset;
7627 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::FP) ||
7628 parseComma() || parseImmExpr(Offset))
7629 return true;
7630 getTargetStreamer().emitARM64WinCFISaveRegPX(Reg, Offset);
7631 return false;
7632}
7633
7634/// parseDirectiveSEHSaveLRPair
7635/// ::= .seh_save_lrpair
7636bool AArch64AsmParser::parseDirectiveSEHSaveLRPair(SMLoc L) {
7637 unsigned Reg;
7638 int64_t Offset;
7639 L = getLoc();
7640 if (parseRegisterInRange(Reg, AArch64::X0, AArch64::X19, AArch64::LR) ||
7641 parseComma() || parseImmExpr(Offset))
7642 return true;
7643 if (check(((Reg - 19) % 2 != 0), L,
7644 "expected register with even offset from x19"))
7645 return true;
7646 getTargetStreamer().emitARM64WinCFISaveLRPair(Reg, Offset);
7647 return false;
7648}
7649
7650/// parseDirectiveSEHSaveFReg
7651/// ::= .seh_save_freg
7652bool AArch64AsmParser::parseDirectiveSEHSaveFReg(SMLoc L) {
7653 unsigned Reg;
7654 int64_t Offset;
7655 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7656 parseComma() || parseImmExpr(Offset))
7657 return true;
7658 getTargetStreamer().emitARM64WinCFISaveFReg(Reg, Offset);
7659 return false;
7660}
7661
7662/// parseDirectiveSEHSaveFRegX
7663/// ::= .seh_save_freg_x
7664bool AArch64AsmParser::parseDirectiveSEHSaveFRegX(SMLoc L) {
7665 unsigned Reg;
7666 int64_t Offset;
7667 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D15) ||
7668 parseComma() || parseImmExpr(Offset))
7669 return true;
7670 getTargetStreamer().emitARM64WinCFISaveFRegX(Reg, Offset);
7671 return false;
7672}
7673
7674/// parseDirectiveSEHSaveFRegP
7675/// ::= .seh_save_fregp
7676bool AArch64AsmParser::parseDirectiveSEHSaveFRegP(SMLoc L) {
7677 unsigned Reg;
7678 int64_t Offset;
7679 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7680 parseComma() || parseImmExpr(Offset))
7681 return true;
7682 getTargetStreamer().emitARM64WinCFISaveFRegP(Reg, Offset);
7683 return false;
7684}
7685
7686/// parseDirectiveSEHSaveFRegPX
7687/// ::= .seh_save_fregp_x
7688bool AArch64AsmParser::parseDirectiveSEHSaveFRegPX(SMLoc L) {
7689 unsigned Reg;
7690 int64_t Offset;
7691 if (parseRegisterInRange(Reg, AArch64::D0, AArch64::D8, AArch64::D14) ||
7692 parseComma() || parseImmExpr(Offset))
7693 return true;
7694 getTargetStreamer().emitARM64WinCFISaveFRegPX(Reg, Offset);
7695 return false;
7696}
7697
7698/// parseDirectiveSEHSetFP
7699/// ::= .seh_set_fp
7700bool AArch64AsmParser::parseDirectiveSEHSetFP(SMLoc L) {
7701 getTargetStreamer().emitARM64WinCFISetFP();
7702 return false;
7703}
7704
7705/// parseDirectiveSEHAddFP
7706/// ::= .seh_add_fp
7707bool AArch64AsmParser::parseDirectiveSEHAddFP(SMLoc L) {
7708 int64_t Size;
7709 if (parseImmExpr(Size))
7710 return true;
7711 getTargetStreamer().emitARM64WinCFIAddFP(Size);
7712 return false;
7713}
7714
7715/// parseDirectiveSEHNop
7716/// ::= .seh_nop
7717bool AArch64AsmParser::parseDirectiveSEHNop(SMLoc L) {
7718 getTargetStreamer().emitARM64WinCFINop();
7719 return false;
7720}
7721
7722/// parseDirectiveSEHSaveNext
7723/// ::= .seh_save_next
7724bool AArch64AsmParser::parseDirectiveSEHSaveNext(SMLoc L) {
7725 getTargetStreamer().emitARM64WinCFISaveNext();
7726 return false;
7727}
7728
7729/// parseDirectiveSEHEpilogStart
7730/// ::= .seh_startepilogue
7731bool AArch64AsmParser::parseDirectiveSEHEpilogStart(SMLoc L) {
7732 getTargetStreamer().emitARM64WinCFIEpilogStart();
7733 return false;
7734}
7735
7736/// parseDirectiveSEHEpilogEnd
7737/// ::= .seh_endepilogue
7738bool AArch64AsmParser::parseDirectiveSEHEpilogEnd(SMLoc L) {
7739 getTargetStreamer().emitARM64WinCFIEpilogEnd();
7740 return false;
7741}
7742
7743/// parseDirectiveSEHTrapFrame
7744/// ::= .seh_trap_frame
7745bool AArch64AsmParser::parseDirectiveSEHTrapFrame(SMLoc L) {
7746 getTargetStreamer().emitARM64WinCFITrapFrame();
7747 return false;
7748}
7749
7750/// parseDirectiveSEHMachineFrame
7751/// ::= .seh_pushframe
7752bool AArch64AsmParser::parseDirectiveSEHMachineFrame(SMLoc L) {
7753 getTargetStreamer().emitARM64WinCFIMachineFrame();
7754 return false;
7755}
7756
7757/// parseDirectiveSEHContext
7758/// ::= .seh_context
7759bool AArch64AsmParser::parseDirectiveSEHContext(SMLoc L) {
7760 getTargetStreamer().emitARM64WinCFIContext();
7761 return false;
7762}
7763
7764/// parseDirectiveSEHECContext
7765/// ::= .seh_ec_context
7766bool AArch64AsmParser::parseDirectiveSEHECContext(SMLoc L) {
7767 getTargetStreamer().emitARM64WinCFIECContext();
7768 return false;
7769}
7770
7771/// parseDirectiveSEHClearUnwoundToCall
7772/// ::= .seh_clear_unwound_to_call
7773bool AArch64AsmParser::parseDirectiveSEHClearUnwoundToCall(SMLoc L) {
7774 getTargetStreamer().emitARM64WinCFIClearUnwoundToCall();
7775 return false;
7776}
7777
7778/// parseDirectiveSEHPACSignLR
7779/// ::= .seh_pac_sign_lr
7780bool AArch64AsmParser::parseDirectiveSEHPACSignLR(SMLoc L) {
7781 getTargetStreamer().emitARM64WinCFIPACSignLR();
7782 return false;
7783}
7784
7785/// parseDirectiveSEHSaveAnyReg
7786/// ::= .seh_save_any_reg
7787/// ::= .seh_save_any_reg_p
7788/// ::= .seh_save_any_reg_x
7789/// ::= .seh_save_any_reg_px
7790bool AArch64AsmParser::parseDirectiveSEHSaveAnyReg(SMLoc L, bool Paired,
7791 bool Writeback) {
7792 MCRegister Reg;
7793 SMLoc Start, End;
7794 int64_t Offset;
7795 if (check(parseRegister(Reg, Start, End), getLoc(), "expected register") ||
7796 parseComma() || parseImmExpr(Offset))
7797 return true;
7798
7799 if (Reg == AArch64::FP || Reg == AArch64::LR ||
7800 (Reg >= AArch64::X0 && Reg <= AArch64::X28)) {
7801 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7802 return Error(L, "invalid save_any_reg offset");
7803 unsigned EncodedReg;
7804 if (Reg == AArch64::FP)
7805 EncodedReg = 29;
7806 else if (Reg == AArch64::LR)
7807 EncodedReg = 30;
7808 else
7809 EncodedReg = Reg - AArch64::X0;
7810 if (Paired) {
7811 if (Reg == AArch64::LR)
7812 return Error(Start, "lr cannot be paired with another register");
7813 if (Writeback)
7814 getTargetStreamer().emitARM64WinCFISaveAnyRegIPX(EncodedReg, Offset);
7815 else
7816 getTargetStreamer().emitARM64WinCFISaveAnyRegIP(EncodedReg, Offset);
7817 } else {
7818 if (Writeback)
7819 getTargetStreamer().emitARM64WinCFISaveAnyRegIX(EncodedReg, Offset);
7820 else
7821 getTargetStreamer().emitARM64WinCFISaveAnyRegI(EncodedReg, Offset);
7822 }
7823 } else if (Reg >= AArch64::D0 && Reg <= AArch64::D31) {
7824 unsigned EncodedReg = Reg - AArch64::D0;
7825 if (Offset < 0 || Offset % (Paired || Writeback ? 16 : 8))
7826 return Error(L, "invalid save_any_reg offset");
7827 if (Paired) {
7828 if (Reg == AArch64::D31)
7829 return Error(Start, "d31 cannot be paired with another register");
7830 if (Writeback)
7831 getTargetStreamer().emitARM64WinCFISaveAnyRegDPX(EncodedReg, Offset);
7832 else
7833 getTargetStreamer().emitARM64WinCFISaveAnyRegDP(EncodedReg, Offset);
7834 } else {
7835 if (Writeback)
7836 getTargetStreamer().emitARM64WinCFISaveAnyRegDX(EncodedReg, Offset);
7837 else
7838 getTargetStreamer().emitARM64WinCFISaveAnyRegD(EncodedReg, Offset);
7839 }
7840 } else if (Reg >= AArch64::Q0 && Reg <= AArch64::Q31) {
7841 unsigned EncodedReg = Reg - AArch64::Q0;
7842 if (Offset < 0 || Offset % 16)
7843 return Error(L, "invalid save_any_reg offset");
7844 if (Paired) {
7845 if (Reg == AArch64::Q31)
7846 return Error(Start, "q31 cannot be paired with another register");
7847 if (Writeback)
7848 getTargetStreamer().emitARM64WinCFISaveAnyRegQPX(EncodedReg, Offset);
7849 else
7850 getTargetStreamer().emitARM64WinCFISaveAnyRegQP(EncodedReg, Offset);
7851 } else {
7852 if (Writeback)
7853 getTargetStreamer().emitARM64WinCFISaveAnyRegQX(EncodedReg, Offset);
7854 else
7855 getTargetStreamer().emitARM64WinCFISaveAnyRegQ(EncodedReg, Offset);
7856 }
7857 } else {
7858 return Error(Start, "save_any_reg register must be x, q or d register");
7859 }
7860 return false;
7861}
7862
7863/// parseDirectiveAllocZ
7864/// ::= .seh_allocz
7865bool AArch64AsmParser::parseDirectiveSEHAllocZ(SMLoc L) {
7866 int64_t Offset;
7867 if (parseImmExpr(Offset))
7868 return true;
7869 getTargetStreamer().emitARM64WinCFIAllocZ(Offset);
7870 return false;
7871}
7872
7873/// parseDirectiveSEHSaveZReg
7874/// ::= .seh_save_zreg
7875bool AArch64AsmParser::parseDirectiveSEHSaveZReg(SMLoc L) {
7876 MCRegister RegNum;
7877 StringRef Kind;
7878 int64_t Offset;
7879 ParseStatus Res =
7880 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
7881 if (!Res.isSuccess())
7882 return true;
7883 if (check(RegNum < AArch64::Z8 || RegNum > AArch64::Z23, L,
7884 "expected register in range z8 to z23"))
7885 return true;
7886 if (parseComma() || parseImmExpr(Offset))
7887 return true;
7888 getTargetStreamer().emitARM64WinCFISaveZReg(RegNum - AArch64::Z0, Offset);
7889 return false;
7890}
7891
7892/// parseDirectiveSEHSavePReg
7893/// ::= .seh_save_preg
7894bool AArch64AsmParser::parseDirectiveSEHSavePReg(SMLoc L) {
7895 MCRegister RegNum;
7896 StringRef Kind;
7897 int64_t Offset;
7898 ParseStatus Res =
7899 tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
7900 if (!Res.isSuccess())
7901 return true;
7902 if (check(RegNum < AArch64::P4 || RegNum > AArch64::P15, L,
7903 "expected register in range p4 to p15"))
7904 return true;
7905 if (parseComma() || parseImmExpr(Offset))
7906 return true;
7907 getTargetStreamer().emitARM64WinCFISavePReg(RegNum - AArch64::P0, Offset);
7908 return false;
7909}
7910
7911bool AArch64AsmParser::parseDirectiveAeabiSubSectionHeader(SMLoc L) {
7912 // Handle parsing of .aeabi_subsection directives
7913 // - On first declaration of a subsection, expect exactly three identifiers
7914 // after `.aeabi_subsection`: the subsection name and two parameters.
7915 // - When switching to an existing subsection, it is valid to provide only
7916 // the subsection name, or the name together with the two parameters.
7917 MCAsmParser &Parser = getParser();
7918
7919 // Consume the name (subsection name)
7920 StringRef SubsectionName;
7921 AArch64BuildAttributes::VendorID SubsectionNameID;
7922 if (Parser.getTok().is(AsmToken::Identifier)) {
7923 SubsectionName = Parser.getTok().getIdentifier();
7924 SubsectionNameID = AArch64BuildAttributes::getVendorID(SubsectionName);
7925 } else {
7926 Error(Parser.getTok().getLoc(), "subsection name not found");
7927 return true;
7928 }
7929 Parser.Lex();
7930
7931 std::unique_ptr<MCELFStreamer::AttributeSubSection> SubsectionExists =
7932 getTargetStreamer().getAttributesSubsectionByName(SubsectionName);
7933 // Check whether only the subsection name was provided.
7934 // If so, the user is trying to switch to a subsection that should have been
7935 // declared before.
7937 if (SubsectionExists) {
7938 getTargetStreamer().emitAttributesSubsection(
7939 SubsectionName,
7941 SubsectionExists->IsOptional),
7943 SubsectionExists->ParameterType));
7944 return false;
7945 }
7946 // If subsection does not exists, report error.
7947 else {
7948 Error(Parser.getTok().getLoc(),
7949 "Could not switch to subsection '" + SubsectionName +
7950 "' using subsection name, subsection has not been defined");
7951 return true;
7952 }
7953 }
7954
7955 // Otherwise, expecting 2 more parameters: consume a comma
7956 // parseComma() return *false* on success, and call Lex(), no need to call
7957 // Lex() again.
7958 if (Parser.parseComma()) {
7959 return true;
7960 }
7961
7962 // Consume the first parameter (optionality parameter)
7964 // options: optional/required
7965 if (Parser.getTok().is(AsmToken::Identifier)) {
7966 StringRef Optionality = Parser.getTok().getIdentifier();
7967 IsOptional = AArch64BuildAttributes::getOptionalID(Optionality);
7969 Error(Parser.getTok().getLoc(),
7971 return true;
7972 }
7973 if (SubsectionExists) {
7974 if (IsOptional != SubsectionExists->IsOptional) {
7975 Error(Parser.getTok().getLoc(),
7976 "optionality mismatch! subsection '" + SubsectionName +
7977 "' already exists with optionality defined as '" +
7979 SubsectionExists->IsOptional) +
7980 "' and not '" +
7981 AArch64BuildAttributes::getOptionalStr(IsOptional) + "'");
7982 return true;
7983 }
7984 }
7985 } else {
7986 Error(Parser.getTok().getLoc(),
7987 "optionality parameter not found, expected required|optional");
7988 return true;
7989 }
7990 // Check for possible IsOptional unaccepted values for known subsections
7991 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID) {
7992 if (AArch64BuildAttributes::REQUIRED == IsOptional) {
7993 Error(Parser.getTok().getLoc(),
7994 "aeabi_feature_and_bits must be marked as optional");
7995 return true;
7996 }
7997 }
7998 if (AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
7999 if (AArch64BuildAttributes::OPTIONAL == IsOptional) {
8000 Error(Parser.getTok().getLoc(),
8001 "aeabi_pauthabi must be marked as required");
8002 return true;
8003 }
8004 }
8005 Parser.Lex();
8006 // consume a comma
8007 if (Parser.parseComma()) {
8008 return true;
8009 }
8010
8011 // Consume the second parameter (type parameter)
8013 if (Parser.getTok().is(AsmToken::Identifier)) {
8014 StringRef Name = Parser.getTok().getIdentifier();
8017 Error(Parser.getTok().getLoc(),
8019 return true;
8020 }
8021 if (SubsectionExists) {
8022 if (Type != SubsectionExists->ParameterType) {
8023 Error(Parser.getTok().getLoc(),
8024 "type mismatch! subsection '" + SubsectionName +
8025 "' already exists with type defined as '" +
8027 SubsectionExists->ParameterType) +
8028 "' and not '" + AArch64BuildAttributes::getTypeStr(Type) +
8029 "'");
8030 return true;
8031 }
8032 }
8033 } else {
8034 Error(Parser.getTok().getLoc(),
8035 "type parameter not found, expected uleb128|ntbs");
8036 return true;
8037 }
8038 // Check for possible unaccepted 'type' values for known subsections
8039 if (AArch64BuildAttributes::AEABI_FEATURE_AND_BITS == SubsectionNameID ||
8040 AArch64BuildAttributes::AEABI_PAUTHABI == SubsectionNameID) {
8042 Error(Parser.getTok().getLoc(),
8043 SubsectionName + " must be marked as ULEB128");
8044 return true;
8045 }
8046 }
8047 Parser.Lex();
8048
8049 // Parsing finished, check for trailing tokens.
8051 Error(Parser.getTok().getLoc(), "unexpected token for AArch64 build "
8052 "attributes subsection header directive");
8053 return true;
8054 }
8055
8056 getTargetStreamer().emitAttributesSubsection(SubsectionName, IsOptional, Type);
8057
8058 return false;
8059}
8060
8061bool AArch64AsmParser::parseDirectiveAeabiAArch64Attr(SMLoc L) {
8062 // Expecting 2 Tokens: after '.aeabi_attribute', e.g.:
8063 // .aeabi_attribute (1)Tag_Feature_BTI, (2)[uleb128|ntbs]
8064 // separated by a comma.
8065 MCAsmParser &Parser = getParser();
8066
8067 std::unique_ptr<MCELFStreamer::AttributeSubSection> ActiveSubsection =
8068 getTargetStreamer().getActiveAttributesSubsection();
8069 if (nullptr == ActiveSubsection) {
8070 Error(Parser.getTok().getLoc(),
8071 "no active subsection, build attribute can not be added");
8072 return true;
8073 }
8074 StringRef ActiveSubsectionName = ActiveSubsection->VendorName;
8075 unsigned ActiveSubsectionType = ActiveSubsection->ParameterType;
8076
8077 unsigned ActiveSubsectionID = AArch64BuildAttributes::VENDOR_UNKNOWN;
8079 AArch64BuildAttributes::AEABI_PAUTHABI) == ActiveSubsectionName)
8080 ActiveSubsectionID = AArch64BuildAttributes::AEABI_PAUTHABI;
8083 ActiveSubsectionName)
8085
8086 StringRef TagStr = "";
8087 unsigned Tag;
8088 if (Parser.getTok().is(AsmToken::Integer)) {
8089 Tag = getTok().getIntVal();
8090 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8091 TagStr = Parser.getTok().getIdentifier();
8092 switch (ActiveSubsectionID) {
8094 // Tag was provided as an unrecognized string instead of an unsigned
8095 // integer
8096 Error(Parser.getTok().getLoc(), "unrecognized Tag: '" + TagStr +
8097 "' \nExcept for public subsections, "
8098 "tags have to be an unsigned int.");
8099 return true;
8100 break;
8104 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8105 TagStr + "' for subsection '" +
8106 ActiveSubsectionName + "'");
8107 return true;
8108 }
8109 break;
8113 Error(Parser.getTok().getLoc(), "unknown AArch64 build attribute '" +
8114 TagStr + "' for subsection '" +
8115 ActiveSubsectionName + "'");
8116 return true;
8117 }
8118 break;
8119 }
8120 } else {
8121 Error(Parser.getTok().getLoc(), "AArch64 build attributes tag not found");
8122 return true;
8123 }
8124 Parser.Lex();
8125 // consume a comma
8126 // parseComma() return *false* on success, and call Lex(), no need to call
8127 // Lex() again.
8128 if (Parser.parseComma()) {
8129 return true;
8130 }
8131
8132 // Consume the second parameter (attribute value)
8133 unsigned ValueInt = unsigned(-1);
8134 std::string ValueStr = "";
8135 if (Parser.getTok().is(AsmToken::Integer)) {
8136 if (AArch64BuildAttributes::NTBS == ActiveSubsectionType) {
8137 Error(
8138 Parser.getTok().getLoc(),
8139 "active subsection type is NTBS (string), found ULEB128 (unsigned)");
8140 return true;
8141 }
8142 ValueInt = getTok().getIntVal();
8143 } else if (Parser.getTok().is(AsmToken::Identifier)) {
8144 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8145 Error(
8146 Parser.getTok().getLoc(),
8147 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8148 return true;
8149 }
8150 ValueStr = Parser.getTok().getIdentifier();
8151 } else if (Parser.getTok().is(AsmToken::String)) {
8152 if (AArch64BuildAttributes::ULEB128 == ActiveSubsectionType) {
8153 Error(
8154 Parser.getTok().getLoc(),
8155 "active subsection type is ULEB128 (unsigned), found NTBS (string)");
8156 return true;
8157 }
8158 ValueStr = Parser.getTok().getString();
8159 } else {
8160 Error(Parser.getTok().getLoc(), "AArch64 build attributes value not found");
8161 return true;
8162 }
8163 // Check for possible unaccepted values for known tags
8164 // (AEABI_FEATURE_AND_BITS)
8165 if (ActiveSubsectionID == AArch64BuildAttributes::AEABI_FEATURE_AND_BITS) {
8166 if (0 != ValueInt && 1 != ValueInt) {
8167 Error(Parser.getTok().getLoc(),
8168 "unknown AArch64 build attributes Value for Tag '" + TagStr +
8169 "' options are 0|1");
8170 return true;
8171 }
8172 }
8173 Parser.Lex();
8174
8175 // Parsing finished. Check for trailing tokens.
8177 Error(Parser.getTok().getLoc(),
8178 "unexpected token for AArch64 build attributes tag and value "
8179 "attribute directive");
8180 return true;
8181 }
8182
8183 if (unsigned(-1) != ValueInt) {
8184 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, ValueInt, "");
8185 }
8186 if ("" != ValueStr) {
8187 getTargetStreamer().emitAttribute(ActiveSubsectionName, Tag, unsigned(-1),
8188 ValueStr);
8189 }
8190 return false;
8191}
8192
8193bool AArch64AsmParser::parseDataExpr(const MCExpr *&Res) {
8194 SMLoc EndLoc;
8195
8196 if (getParser().parseExpression(Res))
8197 return true;
8198 MCAsmParser &Parser = getParser();
8199 if (!parseOptionalToken(AsmToken::At))
8200 return false;
8201 if (getLexer().getKind() != AsmToken::Identifier)
8202 return Error(getLoc(), "expected relocation specifier");
8203
8204 std::string Identifier = Parser.getTok().getIdentifier().lower();
8205 SMLoc Loc = getLoc();
8206 Lex();
8207 if (Identifier == "auth")
8208 return parseAuthExpr(Res, EndLoc);
8209
8210 auto Spec = AArch64::S_None;
8211 if (STI->getTargetTriple().isOSBinFormatMachO()) {
8212 if (Identifier == "got")
8213 Spec = AArch64::S_MACHO_GOT;
8214 } else {
8215 // Unofficial, experimental syntax that will be changed.
8216 if (Identifier == "gotpcrel")
8217 Spec = AArch64::S_GOTPCREL;
8218 else if (Identifier == "plt")
8219 Spec = AArch64::S_PLT;
8220 }
8221 if (Spec == AArch64::S_None)
8222 return Error(Loc, "invalid relocation specifier");
8223 if (auto *SRE = dyn_cast<MCSymbolRefExpr>(Res))
8224 Res = MCSymbolRefExpr::create(&SRE->getSymbol(), Spec, getContext(),
8225 SRE->getLoc());
8226 else
8227 return Error(Loc, "@ specifier only allowed after a symbol");
8228
8229 for (;;) {
8230 std::optional<MCBinaryExpr::Opcode> Opcode;
8231 if (parseOptionalToken(AsmToken::Plus))
8232 Opcode = MCBinaryExpr::Add;
8233 else if (parseOptionalToken(AsmToken::Minus))
8234 Opcode = MCBinaryExpr::Sub;
8235 else
8236 break;
8237 const MCExpr *Term;
8238 if (getParser().parsePrimaryExpr(Term, EndLoc, nullptr))
8239 return true;
8240 Res = MCBinaryExpr::create(*Opcode, Res, Term, getContext(), Res->getLoc());
8241 }
8242 return false;
8243}
8244
8245/// parseAuthExpr
8246/// ::= _sym@AUTH(ib,123[,addr])
8247/// ::= (_sym + 5)@AUTH(ib,123[,addr])
8248/// ::= (_sym - 5)@AUTH(ib,123[,addr])
8249bool AArch64AsmParser::parseAuthExpr(const MCExpr *&Res, SMLoc &EndLoc) {
8250 MCAsmParser &Parser = getParser();
8251 MCContext &Ctx = getContext();
8252 AsmToken Tok = Parser.getTok();
8253
8254 // At this point, we encountered "<id>@AUTH". There is no fallback anymore.
8255 if (parseToken(AsmToken::LParen, "expected '('"))
8256 return true;
8257
8258 if (Parser.getTok().isNot(AsmToken::Identifier))
8259 return TokError("expected key name");
8260
8261 StringRef KeyStr = Parser.getTok().getIdentifier();
8262 auto KeyIDOrNone = AArch64StringToPACKeyID(KeyStr);
8263 if (!KeyIDOrNone)
8264 return TokError("invalid key '" + KeyStr + "'");
8265 Parser.Lex();
8266
8267 if (parseToken(AsmToken::Comma, "expected ','"))
8268 return true;
8269
8270 if (Parser.getTok().isNot(AsmToken::Integer))
8271 return TokError("expected integer discriminator");
8272 int64_t Discriminator = Parser.getTok().getIntVal();
8273
8274 if (!isUInt<16>(Discriminator))
8275 return TokError("integer discriminator " + Twine(Discriminator) +
8276 " out of range [0, 0xFFFF]");
8277 Parser.Lex();
8278
8279 bool UseAddressDiversity = false;
8280 if (Parser.getTok().is(AsmToken::Comma)) {
8281 Parser.Lex();
8282 if (Parser.getTok().isNot(AsmToken::Identifier) ||
8283 Parser.getTok().getIdentifier() != "addr")
8284 return TokError("expected 'addr'");
8285 UseAddressDiversity = true;
8286 Parser.Lex();
8287 }
8288
8289 EndLoc = Parser.getTok().getEndLoc();
8290 if (parseToken(AsmToken::RParen, "expected ')'"))
8291 return true;
8292
8293 Res = AArch64AuthMCExpr::create(Res, Discriminator, *KeyIDOrNone,
8294 UseAddressDiversity, Ctx, Res->getLoc());
8295 return false;
8296}
8297
8298bool AArch64AsmParser::classifySymbolRef(const MCExpr *Expr,
8299 AArch64::Specifier &ELFSpec,
8300 AArch64::Specifier &DarwinSpec,
8301 int64_t &Addend) {
8302 ELFSpec = AArch64::S_INVALID;
8303 DarwinSpec = AArch64::S_None;
8304 Addend = 0;
8305
8306 if (auto *AE = dyn_cast<MCSpecifierExpr>(Expr)) {
8307 ELFSpec = AE->getSpecifier();
8308 Expr = AE->getSubExpr();
8309 }
8310
8311 const MCSymbolRefExpr *SE = dyn_cast<MCSymbolRefExpr>(Expr);
8312 if (SE) {
8313 // It's a simple symbol reference with no addend.
8314 DarwinSpec = AArch64::Specifier(SE->getKind());
8315 return true;
8316 }
8317
8318 // Check that it looks like a symbol + an addend
8319 MCValue Res;
8320 bool Relocatable = Expr->evaluateAsRelocatable(Res, nullptr);
8321 if (!Relocatable || Res.getSubSym())
8322 return false;
8323
8324 // Treat expressions with an ELFSpec (like ":abs_g1:3", or
8325 // ":abs_g1:x" where x is constant) as symbolic even if there is no symbol.
8326 if (!Res.getAddSym() && ELFSpec == AArch64::S_INVALID)
8327 return false;
8328
8329 if (Res.getAddSym())
8330 DarwinSpec = AArch64::Specifier(Res.getSpecifier());
8331 Addend = Res.getConstant();
8332
8333 // It's some symbol reference + a constant addend, but really
8334 // shouldn't use both Darwin and ELF syntax.
8335 return ELFSpec == AArch64::S_INVALID || DarwinSpec == AArch64::S_None;
8336}
8337
8338/// Force static initialization.
8339extern "C" LLVM_ABI LLVM_EXTERNAL_VISIBILITY void
8347
8348#define GET_REGISTER_MATCHER
8349#define GET_SUBTARGET_FEATURE_NAME
8350#define GET_MATCHER_IMPLEMENTATION
8351#define GET_MNEMONIC_SPELL_CHECKER
8352#include "AArch64GenAsmMatcher.inc"
8353
8354// Define this matcher function after the auto-generated include so we
8355// have the match class enum definitions.
8356unsigned AArch64AsmParser::validateTargetOperandClass(MCParsedAsmOperand &AsmOp,
8357 unsigned Kind) {
8358 AArch64Operand &Op = static_cast<AArch64Operand &>(AsmOp);
8359
8360 auto MatchesOpImmediate = [&](int64_t ExpectedVal) -> MatchResultTy {
8361 if (!Op.isImm())
8362 return Match_InvalidOperand;
8363 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op.getImm());
8364 if (!CE)
8365 return Match_InvalidOperand;
8366 if (CE->getValue() == ExpectedVal)
8367 return Match_Success;
8368 return Match_InvalidOperand;
8369 };
8370
8371 switch (Kind) {
8372 default:
8373 return Match_InvalidOperand;
8374 case MCK_MPR:
8375 // If the Kind is a token for the MPR register class which has the "za"
8376 // register (SME accumulator array), check if the asm is a literal "za"
8377 // token. This is for the "smstart za" alias that defines the register
8378 // as a literal token.
8379 if (Op.isTokenEqual("za"))
8380 return Match_Success;
8381 return Match_InvalidOperand;
8382
8383 // If the kind is a token for a literal immediate, check if our asm operand
8384 // matches. This is for InstAliases which have a fixed-value immediate in
8385 // the asm string, such as hints which are parsed into a specific
8386 // instruction definition.
8387#define MATCH_HASH(N) \
8388 case MCK__HASH_##N: \
8389 return MatchesOpImmediate(N);
8390 MATCH_HASH(0)
8391 MATCH_HASH(1)
8392 MATCH_HASH(2)
8393 MATCH_HASH(3)
8394 MATCH_HASH(4)
8395 MATCH_HASH(6)
8396 MATCH_HASH(7)
8397 MATCH_HASH(8)
8398 MATCH_HASH(10)
8399 MATCH_HASH(12)
8400 MATCH_HASH(14)
8401 MATCH_HASH(16)
8402 MATCH_HASH(24)
8403 MATCH_HASH(25)
8404 MATCH_HASH(26)
8405 MATCH_HASH(27)
8406 MATCH_HASH(28)
8407 MATCH_HASH(29)
8408 MATCH_HASH(30)
8409 MATCH_HASH(31)
8410 MATCH_HASH(32)
8411 MATCH_HASH(40)
8412 MATCH_HASH(48)
8413 MATCH_HASH(64)
8414#undef MATCH_HASH
8415#define MATCH_HASH_MINUS(N) \
8416 case MCK__HASH__MINUS_##N: \
8417 return MatchesOpImmediate(-N);
8421#undef MATCH_HASH_MINUS
8422 }
8423}
8424
8425ParseStatus AArch64AsmParser::tryParseGPRSeqPair(OperandVector &Operands) {
8426
8427 SMLoc S = getLoc();
8428
8429 if (getTok().isNot(AsmToken::Identifier))
8430 return Error(S, "expected register");
8431
8432 MCRegister FirstReg;
8433 ParseStatus Res = tryParseScalarRegister(FirstReg);
8434 if (!Res.isSuccess())
8435 return Error(S, "expected first even register of a consecutive same-size "
8436 "even/odd register pair");
8437
8438 const MCRegisterClass &WRegClass =
8439 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
8440 const MCRegisterClass &XRegClass =
8441 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
8442
8443 bool isXReg = XRegClass.contains(FirstReg),
8444 isWReg = WRegClass.contains(FirstReg);
8445 if (!isXReg && !isWReg)
8446 return Error(S, "expected first even register of a consecutive same-size "
8447 "even/odd register pair");
8448
8449 const MCRegisterInfo *RI = getContext().getRegisterInfo();
8450 unsigned FirstEncoding = RI->getEncodingValue(FirstReg);
8451
8452 if (FirstEncoding & 0x1)
8453 return Error(S, "expected first even register of a consecutive same-size "
8454 "even/odd register pair");
8455
8456 if (getTok().isNot(AsmToken::Comma))
8457 return Error(getLoc(), "expected comma");
8458 // Eat the comma
8459 Lex();
8460
8461 SMLoc E = getLoc();
8462 MCRegister SecondReg;
8463 Res = tryParseScalarRegister(SecondReg);
8464 if (!Res.isSuccess())
8465 return Error(E, "expected second odd register of a consecutive same-size "
8466 "even/odd register pair");
8467
8468 if (RI->getEncodingValue(SecondReg) != FirstEncoding + 1 ||
8469 (isXReg && !XRegClass.contains(SecondReg)) ||
8470 (isWReg && !WRegClass.contains(SecondReg)))
8471 return Error(E, "expected second odd register of a consecutive same-size "
8472 "even/odd register pair");
8473
8474 MCRegister Pair;
8475 if (isXReg) {
8476 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube64,
8477 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
8478 } else {
8479 Pair = RI->getMatchingSuperReg(FirstReg, AArch64::sube32,
8480 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
8481 }
8482
8483 Operands.push_back(AArch64Operand::CreateReg(Pair, RegKind::Scalar, S,
8484 getLoc(), getContext()));
8485
8486 return ParseStatus::Success;
8487}
8488
8489template <bool ParseShiftExtend, bool ParseSuffix>
8490ParseStatus AArch64AsmParser::tryParseSVEDataVector(OperandVector &Operands) {
8491 const SMLoc S = getLoc();
8492 // Check for a SVE vector register specifier first.
8493 MCRegister RegNum;
8494 StringRef Kind;
8495
8496 ParseStatus Res =
8497 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
8498
8499 if (!Res.isSuccess())
8500 return Res;
8501
8502 if (ParseSuffix && Kind.empty())
8503 return ParseStatus::NoMatch;
8504
8505 const auto &KindRes = parseVectorKind(Kind, RegKind::SVEDataVector);
8506 if (!KindRes)
8507 return ParseStatus::NoMatch;
8508
8509 unsigned ElementWidth = KindRes->second;
8510
8511 // No shift/extend is the default.
8512 if (!ParseShiftExtend || getTok().isNot(AsmToken::Comma)) {
8513 Operands.push_back(AArch64Operand::CreateVectorReg(
8514 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
8515
8516 ParseStatus Res = tryParseVectorIndex(Operands);
8517 if (Res.isFailure())
8518 return ParseStatus::Failure;
8519 return ParseStatus::Success;
8520 }
8521
8522 // Eat the comma
8523 Lex();
8524
8525 // Match the shift
8527 Res = tryParseOptionalShiftExtend(ExtOpnd);
8528 if (!Res.isSuccess())
8529 return Res;
8530
8531 auto Ext = static_cast<AArch64Operand *>(ExtOpnd.back().get());
8532 Operands.push_back(AArch64Operand::CreateVectorReg(
8533 RegNum, RegKind::SVEDataVector, ElementWidth, S, Ext->getEndLoc(),
8534 getContext(), Ext->getShiftExtendType(), Ext->getShiftExtendAmount(),
8535 Ext->hasShiftExtendAmount()));
8536
8537 return ParseStatus::Success;
8538}
8539
8540ParseStatus AArch64AsmParser::tryParseSVEPattern(OperandVector &Operands) {
8541 MCAsmParser &Parser = getParser();
8542
8543 SMLoc SS = getLoc();
8544 const AsmToken &TokE = getTok();
8545 bool IsHash = TokE.is(AsmToken::Hash);
8546
8547 if (!IsHash && TokE.isNot(AsmToken::Identifier))
8548 return ParseStatus::NoMatch;
8549
8550 int64_t Pattern;
8551 if (IsHash) {
8552 Lex(); // Eat hash
8553
8554 // Parse the immediate operand.
8555 const MCExpr *ImmVal;
8556 SS = getLoc();
8557 if (Parser.parseExpression(ImmVal))
8558 return ParseStatus::Failure;
8559
8560 auto *MCE = dyn_cast<MCConstantExpr>(ImmVal);
8561 if (!MCE)
8562 return TokError("invalid operand for instruction");
8563
8564 Pattern = MCE->getValue();
8565 } else {
8566 // Parse the pattern
8567 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.getString());
8568 if (!Pat)
8569 return ParseStatus::NoMatch;
8570
8571 Lex();
8572 Pattern = Pat->Encoding;
8573 assert(Pattern >= 0 && Pattern < 32);
8574 }
8575
8576 Operands.push_back(
8577 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8578 SS, getLoc(), getContext()));
8579
8580 return ParseStatus::Success;
8581}
8582
8583ParseStatus
8584AArch64AsmParser::tryParseSVEVecLenSpecifier(OperandVector &Operands) {
8585 int64_t Pattern;
8586 SMLoc SS = getLoc();
8587 const AsmToken &TokE = getTok();
8588 // Parse the pattern
8589 auto Pat = AArch64SVEVecLenSpecifier::lookupSVEVECLENSPECIFIERByName(
8590 TokE.getString());
8591 if (!Pat)
8592 return ParseStatus::NoMatch;
8593
8594 Lex();
8595 Pattern = Pat->Encoding;
8596 assert(Pattern >= 0 && Pattern <= 1 && "Pattern does not exist");
8597
8598 Operands.push_back(
8599 AArch64Operand::CreateImm(MCConstantExpr::create(Pattern, getContext()),
8600 SS, getLoc(), getContext()));
8601
8602 return ParseStatus::Success;
8603}
8604
8605ParseStatus AArch64AsmParser::tryParseGPR64x8(OperandVector &Operands) {
8606 SMLoc SS = getLoc();
8607
8608 MCRegister XReg;
8609 if (!tryParseScalarRegister(XReg).isSuccess())
8610 return ParseStatus::NoMatch;
8611
8612 MCContext &ctx = getContext();
8613 const MCRegisterInfo *RI = ctx.getRegisterInfo();
8614 MCRegister X8Reg = RI->getMatchingSuperReg(
8615 XReg, AArch64::x8sub_0,
8616 &AArch64MCRegisterClasses[AArch64::GPR64x8ClassRegClassID]);
8617 if (!X8Reg)
8618 return Error(SS,
8619 "expected an even-numbered x-register in the range [x0,x22]");
8620
8621 Operands.push_back(
8622 AArch64Operand::CreateReg(X8Reg, RegKind::Scalar, SS, getLoc(), ctx));
8623 return ParseStatus::Success;
8624}
8625
8626ParseStatus AArch64AsmParser::tryParseImmRange(OperandVector &Operands) {
8627 SMLoc S = getLoc();
8628
8629 if (getTok().isNot(AsmToken::Integer))
8630 return ParseStatus::NoMatch;
8631
8632 if (getLexer().peekTok().isNot(AsmToken::Colon))
8633 return ParseStatus::NoMatch;
8634
8635 const MCExpr *ImmF;
8636 if (getParser().parseExpression(ImmF))
8637 return ParseStatus::NoMatch;
8638
8639 if (getTok().isNot(AsmToken::Colon))
8640 return ParseStatus::NoMatch;
8641
8642 Lex(); // Eat ':'
8643 if (getTok().isNot(AsmToken::Integer))
8644 return ParseStatus::NoMatch;
8645
8646 SMLoc E = getTok().getLoc();
8647 const MCExpr *ImmL;
8648 if (getParser().parseExpression(ImmL))
8649 return ParseStatus::NoMatch;
8650
8651 unsigned ImmFVal = cast<MCConstantExpr>(ImmF)->getValue();
8652 unsigned ImmLVal = cast<MCConstantExpr>(ImmL)->getValue();
8653
8654 Operands.push_back(
8655 AArch64Operand::CreateImmRange(ImmFVal, ImmLVal, S, E, getContext()));
8656 return ParseStatus::Success;
8657}
8658
8659template <int Adj>
8660ParseStatus AArch64AsmParser::tryParseAdjImm0_63(OperandVector &Operands) {
8661 SMLoc S = getLoc();
8662
8663 parseOptionalToken(AsmToken::Hash);
8664 bool IsNegative = parseOptionalToken(AsmToken::Minus);
8665
8666 if (getTok().isNot(AsmToken::Integer))
8667 return ParseStatus::NoMatch;
8668
8669 const MCExpr *Ex;
8670 if (getParser().parseExpression(Ex))
8671 return ParseStatus::NoMatch;
8672
8673 int64_t Imm = dyn_cast<MCConstantExpr>(Ex)->getValue();
8674 if (IsNegative)
8675 Imm = -Imm;
8676
8677 // We want an adjusted immediate in the range [0, 63]. If we don't have one,
8678 // return a value, which is certain to trigger a error message about invalid
8679 // immediate range instead of a non-descriptive invalid operand error.
8680 static_assert(Adj == 1 || Adj == -1, "Unsafe immediate adjustment");
8681 if (Imm == INT64_MIN || Imm == INT64_MAX || Imm + Adj < 0 || Imm + Adj > 63)
8682 Imm = -2;
8683 else
8684 Imm += Adj;
8685
8686 SMLoc E = SMLoc::getFromPointer(getLoc().getPointer() - 1);
8687 Operands.push_back(AArch64Operand::CreateImm(
8689
8690 return ParseStatus::Success;
8691}
#define MATCH_HASH_MINUS(N)
static unsigned matchSVEDataVectorRegName(StringRef Name)
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
static void ExpandCryptoAEK(const AArch64::ArchInfo &ArchInfo, SmallVector< StringRef, 4 > &RequestedExtensions)
static unsigned matchSVEPredicateAsCounterRegName(StringRef Name)
static MCRegister MatchRegisterName(StringRef Name)
static bool isMatchingOrAlias(MCRegister ZReg, MCRegister Reg)
LLVM_ABI LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64AsmParser()
Force static initialization.
static const char * getSubtargetFeatureName(uint64_t Val)
static unsigned MatchNeonVectorRegName(StringRef Name)
}
static std::optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (elements, element-width) if Suffix is a valid vector kind.
static unsigned matchMatrixRegName(StringRef Name)
static unsigned matchMatrixTileListRegName(StringRef Name)
static std::string AArch64MnemonicSpellCheck(StringRef S, const FeatureBitset &FBS, unsigned VariantID=0)
static SMLoc incrementLoc(SMLoc L, int Offset)
#define MATCH_HASH(N)
static const struct Extension ExtensionMap[]
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
static unsigned matchSVEPredicateVectorRegName(StringRef Name)
static SDValue getCondCode(SelectionDAG &DAG, AArch64CC::CondCode CC)
Like SelectionDAG::getCondCode(), but for AArch64 condition codes.
assert(UImm &&(UImm !=~static_cast< T >(0)) &&"Invalid immediate!")
This file defines the StringMap class.
static bool isNot(const MachineRegisterInfo &MRI, const MachineInstr &MI)
This file declares a class to represent arbitrary precision floating point values and provide a varie...
This file implements a class to represent arbitrary precision integral constant values and operations...
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
Analysis containing CSE Info
Definition CSEInfo.cpp:27
#define LLVM_ABI
Definition Compiler.h:213
#define LLVM_EXTERNAL_VISIBILITY
Definition Compiler.h:132
@ Default
static LVOptions Options
Definition LVOptions.cpp:25
Live Register Matrix
loop data Loop Data Prefetch
#define F(x, y, z)
Definition MD5.cpp:55
#define I(x, y, z)
Definition MD5.cpp:58
mir Rename Register Operands
Register Reg
#define T
static unsigned getReg(const MCDisassembler *D, unsigned RC, unsigned RegNo)
static bool isReg(const MCInst &MI, unsigned OpNo)
const SmallVectorImpl< MachineOperand > & Cond
This file contains some templates that are useful if you are working with the STL at all.
static bool contains(SmallPtrSetImpl< ConstantExpr * > &Cache, ConstantExpr *Expr, Constant *C)
Definition Value.cpp:480
This file defines the SmallSet class.
This file defines the SmallVector class.
This file contains some functions that are useful when dealing with strings.
This file implements the StringSwitch template, which mimics a switch() statement whose cases are str...
static TableGen::Emitter::Opt Y("gen-skeleton-entry", EmitSkeleton, "Generate example skeleton entry")
static TableGen::Emitter::OptClass< SkeletonEmitter > X("gen-skeleton-class", "Generate example skeleton class")
static const AArch64AuthMCExpr * create(const MCExpr *Expr, uint16_t Discriminator, AArch64PACKey::ID Key, bool HasAddressDiversity, MCContext &Ctx, SMLoc Loc=SMLoc())
static const char * getRegisterName(MCRegister Reg, unsigned AltIdx=AArch64::NoRegAltName)
APInt bitcastToAPInt() const
Definition APFloat.h:1353
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
Definition APInt.h:435
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
Definition APInt.h:432
int64_t getSExtValue() const
Get sign extended value.
Definition APInt.h:1562
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
Definition AsmLexer.h:121
void UnLex(AsmToken const &Token)
Definition AsmLexer.h:106
LLVM_ABI SMLoc getLoc() const
Definition AsmLexer.cpp:32
int64_t getIntVal() const
Definition MCAsmMacro.h:108
bool isNot(TokenKind K) const
Definition MCAsmMacro.h:76
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
Definition MCAsmMacro.h:103
bool is(TokenKind K) const
Definition MCAsmMacro.h:75
LLVM_ABI SMLoc getEndLoc() const
Definition AsmLexer.cpp:34
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string.
Definition MCAsmMacro.h:92
Base class for user error types.
Definition Error.h:354
Container class for subtarget features.
constexpr size_t size() const
This class is intended to be used as a base class for asm properties and features specific to the tar...
Definition MCAsmInfo.h:64
void printExpr(raw_ostream &, const MCExpr &) const
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
AsmLexer & getLexer()
const AsmToken & getTok() const
Get the current AsmToken from the stream.
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
static LLVM_ABI const MCBinaryExpr * create(Opcode Op, const MCExpr *LHS, const MCExpr *RHS, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:201
@ Sub
Subtraction.
Definition MCExpr.h:324
@ Add
Addition.
Definition MCExpr.h:302
int64_t getValue() const
Definition MCExpr.h:171
static LLVM_ABI const MCConstantExpr * create(int64_t Value, MCContext &Ctx, bool PrintInHex=false, unsigned SizeInBytes=0)
Definition MCExpr.cpp:212
const MCRegisterInfo * getRegisterInfo() const
Definition MCContext.h:414
LLVM_ABI bool evaluateAsRelocatable(MCValue &Res, const MCAssembler *Asm) const
Try to evaluate the expression to a relocatable value, i.e.
Definition MCExpr.cpp:450
SMLoc getLoc() const
Definition MCExpr.h:86
unsigned getNumOperands() const
Definition MCInst.h:212
void setLoc(SMLoc loc)
Definition MCInst.h:207
unsigned getOpcode() const
Definition MCInst.h:202
void addOperand(const MCOperand Op)
Definition MCInst.h:215
void setOpcode(unsigned Op)
Definition MCInst.h:201
const MCOperand & getOperand(unsigned i) const
Definition MCInst.h:210
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specified operand constraint if it is present.
static MCOperand createExpr(const MCExpr *Val)
Definition MCInst.h:166
int64_t getImm() const
Definition MCInst.h:84
static MCOperand createReg(MCRegister Reg)
Definition MCInst.h:138
static MCOperand createImm(int64_t Val)
Definition MCInst.h:145
bool isImm() const
Definition MCInst.h:66
bool isReg() const
Definition MCInst.h:65
MCRegister getReg() const
Returns the register number.
Definition MCInst.h:73
const MCExpr * getExpr() const
Definition MCInst.h:118
bool isExpr() const
Definition MCInst.h:69
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand.
virtual MCRegister getReg() const =0
MCRegister getRegister(unsigned i) const
getRegister - Return the specified register in the class.
bool contains(MCRegister Reg) const
contains - Return true if the specified register is included in this register class.
MCRegister getMatchingSuperReg(MCRegister Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg.
const char * getName(MCRegister RegNo) const
Return the human-readable symbolic target-specific name for the specified physical register.
uint16_t getEncodingValue(MCRegister Reg) const
Returns the encoding for Reg.
bool isSubRegisterEq(MCRegister RegA, MCRegister RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
Wrapper class representing physical registers. Should be passed by value.
Definition MCRegister.h:33
static const MCSpecifierExpr * create(const MCExpr *Expr, Spec S, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.cpp:743
Streaming machine code generation interface.
Definition MCStreamer.h:220
virtual void emitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI)
Emit the given Instruction into the current section.
MCTargetStreamer * getTargetStreamer()
Definition MCStreamer.h:324
const Triple & getTargetTriple() const
const FeatureBitset & getFeatureBits() const
FeatureBitset SetFeatureBitsTransitively(const FeatureBitset &FB)
Set/clear additional feature bits, including all other bits they imply.
void setDefaultFeatures(StringRef CPU, StringRef TuneCPU, StringRef FS)
Set the features to the default for the given CPU and TuneCPU, with ano appended feature string.
FeatureBitset ClearFeatureBitsTransitively(const FeatureBitset &FB)
VariantKind getKind() const
Definition MCExpr.h:232
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx, SMLoc Loc=SMLoc())
Definition MCExpr.h:214
MCTargetAsmParser - Generic interface to target specific assembly parsers.
virtual bool areEqualRegs(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two operands are registers and are equal.
const MCSymbol * getAddSym() const
Definition MCValue.h:49
int64_t getConstant() const
Definition MCValue.h:44
uint32_t getSpecifier() const
Definition MCValue.h:46
const MCSymbol * getSubSym() const
Definition MCValue.h:51
Ternary parse status returned by various parse* methods.
constexpr bool isFailure() const
static constexpr StatusTy Failure
constexpr bool isSuccess() const
static constexpr StatusTy Success
static constexpr StatusTy NoMatch
constexpr bool isNoMatch() const
Represents a location in source code.
Definition SMLoc.h:23
static SMLoc getFromPointer(const char *Ptr)
Definition SMLoc.h:36
constexpr const char * getPointer() const
Definition SMLoc.h:34
void insert_range(Range &&R)
Definition SmallSet.h:193
bool contains(const T &V) const
Check if the SmallSet contains the given element.
Definition SmallSet.h:226
std::pair< const_iterator, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
Definition SmallSet.h:181
void push_back(const T &Elt)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small.
iterator end()
Definition StringMap.h:224
iterator find(StringRef Key)
Definition StringMap.h:235
void erase(iterator I)
Definition StringMap.h:416
bool insert(MapEntryTy *KeyValue)
insert - Insert the specified key/value pair into the map.
Definition StringMap.h:310
StringRef - Represent a constant reference to a string, i.e.
Definition StringRef.h:55
std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
Definition StringRef.h:710
bool getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
Definition StringRef.h:480
bool starts_with(StringRef Prefix) const
Check if this string starts with the given Prefix.
Definition StringRef.h:269
constexpr bool empty() const
empty - Check if the string is empty.
Definition StringRef.h:151
StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
Definition StringRef.h:619
LLVM_ABI std::string upper() const
Convert the given ASCII string to uppercase.
constexpr size_t size() const
size - Get the string size.
Definition StringRef.h:154
constexpr const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
Definition StringRef.h:148
StringRef take_back(size_t N=1) const
Return a StringRef equal to 'this' but with only the last N elements remaining.
Definition StringRef.h:599
StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
Definition StringRef.h:824
LLVM_ABI std::string lower() const
static constexpr size_t npos
Definition StringRef.h:57
bool equals_insensitive(StringRef RHS) const
Check for string equality, ignoring case.
Definition StringRef.h:180
A switch()-like statement whose cases are string literals.
StringSwitch & Case(StringLiteral S, T Value)
bool isOSBinFormatMachO() const
Tests whether the environment is MachO.
Definition Triple.h:782
This class implements an extremely fast bulk output stream that can only output to a stream.
Definition raw_ostream.h:53
#define INT64_MIN
Definition DataTypes.h:74
#define INT64_MAX
Definition DataTypes.h:71
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
SubsectionType getTypeID(StringRef Type)
StringRef getVendorName(unsigned const Vendor)
StringRef getOptionalStr(unsigned Optional)
VendorID
AArch64 build attributes vendors IDs (a.k.a subsection name)
SubsectionOptional getOptionalID(StringRef Optional)
FeatureAndBitsTags getFeatureAndBitsTagsID(StringRef FeatureAndBitsTag)
VendorID getVendorID(StringRef const Vendor)
PauthABITags getPauthABITagsID(StringRef PauthABITag)
StringRef getTypeStr(unsigned Type)
static CondCode getInvertedCondCode(CondCode Code)
const PHint * lookupPHintByName(StringRef)
uint32_t parseGenericRegister(StringRef Name)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
static float getFPImmFloat(unsigned Imm)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
static bool isAdvSIMDModImmType10(uint64_t Imm)
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
LLVM_ABI const ArchInfo * parseArch(StringRef Arch)
LLVM_ABI const ArchInfo * getArchForCpu(StringRef CPU)
LLVM_ABI bool getExtensionFeatures(const AArch64::ExtensionBitset &Extensions, std::vector< StringRef > &Features)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
bool isPredicated(const MCInst &MI, const MCInstrInfo *MCII)
@ Entry
Definition COFF.h:862
@ Tail
Attemps to make calls as fast as possible while guaranteeing that tail call optimization can always b...
Definition CallingConv.h:76
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out,...
float getFPImm(unsigned Imm)
@ CE
Windows NT (Windows on ARM)
Definition MCAsmInfo.h:48
constexpr double e
Definition MathExtras.h:47
NodeAddr< CodeNode * > Code
Definition RDFGraph.h:388
Context & getContext() const
Definition BasicBlock.h:99
This is an optimization pass for GlobalISel generic memory operations.
static std::optional< AArch64PACKey::ID > AArch64StringToPACKeyID(StringRef Name)
Return numeric key ID for 2-letter identifier string.
bool errorToBool(Error Err)
Helper for converting an Error to a bool.
Definition Error.h:1113
@ Offset
Definition DWP.cpp:477
FunctionAddr VTableAddr Value
Definition InstrProf.h:137
static int MCLOHNameToId(StringRef Name)
Printable print(const GCNRegPressure &RP, const GCNSubtarget *ST=nullptr, unsigned DynamicVGPRBlockSize=0)
static bool isMem(const MachineInstr &MI, unsigned Op)
LLVM_ABI std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \t\n\v\f\r")
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
decltype(auto) dyn_cast(const From &Val)
dyn_cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:649
Target & getTheAArch64beTarget()
static StringRef MCLOHDirectiveName()
std::string utostr(uint64_t X, bool isNeg=false)
static bool isValidMCLOHType(unsigned Kind)
Op::Description Desc
Target & getTheAArch64leTarget()
int countr_zero(T Val)
Count number of 0's from the least significant bit to the most stopping at the first 1.
Definition bit.h:157
auto dyn_cast_or_null(const Y &Val)
Definition Casting.h:759
SmallVectorImpl< std::unique_ptr< MCParsedAsmOperand > > OperandVector
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
Definition MathExtras.h:336
Target & getTheAArch64_32Target()
MachineInstr * getImm(const MachineOperand &MO, const MachineRegisterInfo *MRI)
decltype(auto) get(const PointerIntPair< PointerTy, IntBits, IntType, PtrTraits, Info > &Pair)
FunctionAddr VTableAddr Count
Definition InstrProf.h:139
constexpr bool isUInt(uint64_t x)
Checks if an unsigned integer fits into the given bit width.
Definition MathExtras.h:198
class LLVM_GSL_OWNER SmallVector
Forward declaration of SmallVector so that calculateSmallVectorDefaultInlinedElements can reference s...
bool isa(const From &Val)
isa<X> - Return true if the parameter to the template is an instance of one of the template type argu...
Definition Casting.h:548
Target & getTheARM64_32Target()
@ First
Helpers to iterate all locations in the MemoryEffectsBase class.
Definition ModRef.h:71
static int MCLOHIdToNbArgs(MCLOHType Kind)
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
static MCRegister getXRegFromWReg(MCRegister Reg)
MCLOHType
Linker Optimization Hint Type.
FunctionAddr VTableAddr Next
Definition InstrProf.h:141
Target & getTheARM64Target()
DWARFExpression::Operation Op
ArrayRef(const T &OneElt) -> ArrayRef< T >
static MCRegister getWRegFromXReg(MCRegister Reg)
decltype(auto) cast(const From &Val)
cast<X> - Return the argument parameter cast to the specified type.
Definition Casting.h:565
auto find_if(R &&Range, UnaryPredicate P)
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly.
Definition STLExtras.h:1760
bool is_contained(R &&Range, const E &Element)
Returns true if Element is found in Range.
Definition STLExtras.h:1899
#define N
const FeatureBitset Features
const char * Name
AArch64::ExtensionBitset DefaultExts
RegisterMCAsmParser - Helper template for registering a target specific assembly parser,...
bool haveFeatures(FeatureBitset ActiveFeatures) const
FeatureBitset getRequiredFeatures() const
const char * Name
FeatureBitset FeaturesRequired