86 static PrefixInfo CreateFromInst(
const MCInst &Inst, uint64_t TSFlags) {
89 case AArch64::MOVPRFX_ZZ:
93 case AArch64::MOVPRFX_ZPmZ_B:
94 case AArch64::MOVPRFX_ZPmZ_H:
95 case AArch64::MOVPRFX_ZPmZ_S:
96 case AArch64::MOVPRFX_ZPmZ_D:
98 Prefix.Predicated =
true;
101 "No destructive element size set for movprfx");
105 case AArch64::MOVPRFX_ZPzZ_B:
106 case AArch64::MOVPRFX_ZPzZ_H:
107 case AArch64::MOVPRFX_ZPzZ_S:
108 case AArch64::MOVPRFX_ZPzZ_D:
109 Prefix.Active =
true;
110 Prefix.Predicated =
true;
113 "No destructive element size set for movprfx");
124 PrefixInfo() : Active(
false), Predicated(
false) {}
125 bool isActive()
const {
return Active; }
127 unsigned getElementSize()
const {
131 unsigned getDstReg()
const {
return Dst; }
132 unsigned getPgReg()
const {
140 unsigned ElementSize;
150 SMLoc getLoc()
const {
return getParser().getTok().getLoc(); }
155 bool parseCondCode(
OperandVector &Operands,
bool invertCondCode);
158 bool parseSymbolicImmVal(
const MCExpr *&ImmVal);
162 bool invertCondCode);
164 bool showMatchError(
SMLoc Loc,
unsigned ErrCode, uint64_t
ErrorInfo,
167 bool parseDirectiveArch(
SMLoc L);
168 bool parseDirectiveArchExtension(
SMLoc L);
169 bool parseDirectiveCPU(
SMLoc L);
170 bool parseDirectiveInst(
SMLoc L);
172 bool parseDirectiveTLSDescCall(
SMLoc L);
175 bool parseDirectiveLtorg(
SMLoc L);
178 bool parseDirectiveUnreq(
SMLoc L);
179 bool parseDirectiveCFINegateRAState();
180 bool parseDirectiveCFIBKeyFrame();
182 bool validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
184 bool MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
187 bool MatchingInlineAsm)
override;
191 #define GET_ASSEMBLER_HEADER 192 #include "AArch64GenAsmMatcher.inc" 204 template <
bool IsSVEPrefetch = false>
210 template<
bool AddFPZeroAsLiteral>
217 template <
bool ParseShiftExtend,
220 template <
bool ParseShiftExtend,
bool ParseSuffix>
223 template <RegKind VectorKind>
225 bool ExpectMatch =
false);
229 enum AArch64MatchResultTy {
230 Match_InvalidSuffix = FIRST_TARGET_MATCH_RESULT_TY,
231 #define GET_OPERAND_DIAGNOSTIC_TYPES 232 #include "AArch64GenAsmMatcher.inc" 253 setAvailableFeatures(ComputeAvailableFeatures(getSTI().getFeatureBits()));
260 bool ParseRegister(
unsigned &RegNo,
SMLoc &StartLoc,
SMLoc &EndLoc)
override;
261 bool ParseDirective(
AsmToken DirectiveID)
override;
263 unsigned Kind)
override;
265 static bool classifySymbolRef(
const MCExpr *Expr,
293 SMLoc StartLoc, EndLoc;
302 struct ShiftExtendOp {
305 bool HasExplicitAmount;
331 ShiftExtendOp ShiftExtend;
334 struct VectorListOp {
337 unsigned NumElements;
338 unsigned ElementWidth;
342 struct VectorIndexOp {
350 struct ShiftedImmOp {
352 unsigned ShiftAmount;
407 struct VectorListOp VectorList;
408 struct VectorIndexOp VectorIndex;
410 struct ShiftedImmOp ShiftedImm;
411 struct CondCodeOp CondCode;
412 struct FPImmOp FPImm;
413 struct BarrierOp Barrier;
414 struct SysRegOp SysReg;
415 struct SysCRImmOp SysCRImm;
416 struct PrefetchOp Prefetch;
417 struct PSBHintOp PSBHint;
418 struct BTIHintOp BTIHint;
419 struct ShiftExtendOp ShiftExtend;
427 AArch64Operand(KindTy K,
MCContext &Ctx) :
Kind(K), Ctx(Ctx) {}
431 StartLoc = o.StartLoc;
441 ShiftedImm = o.ShiftedImm;
456 VectorList = o.VectorList;
459 VectorIndex = o.VectorIndex;
465 SysCRImm = o.SysCRImm;
477 ShiftExtend = o.ShiftExtend;
483 SMLoc getStartLoc()
const override {
return StartLoc; }
485 SMLoc getEndLoc()
const override {
return EndLoc; }
488 assert(Kind == k_Token &&
"Invalid access!");
492 bool isTokenSuffix()
const {
493 assert(Kind == k_Token &&
"Invalid access!");
497 const MCExpr *getImm()
const {
498 assert(Kind == k_Immediate &&
"Invalid access!");
502 const MCExpr *getShiftedImmVal()
const {
503 assert(Kind == k_ShiftedImm &&
"Invalid access!");
504 return ShiftedImm.Val;
507 unsigned getShiftedImmShift()
const {
508 assert(Kind == k_ShiftedImm &&
"Invalid access!");
509 return ShiftedImm.ShiftAmount;
513 assert(Kind == k_CondCode &&
"Invalid access!");
518 assert (Kind == k_FPImm &&
"Invalid access!");
522 bool getFPImmIsExact()
const {
523 assert (Kind == k_FPImm &&
"Invalid access!");
524 return FPImm.IsExact;
527 unsigned getBarrier()
const {
528 assert(Kind == k_Barrier &&
"Invalid access!");
533 assert(Kind == k_Barrier &&
"Invalid access!");
537 unsigned getReg()
const override {
538 assert(Kind == k_Register &&
"Invalid access!");
543 assert(Kind == k_Register &&
"Invalid access!");
544 return Reg.EqualityTy;
547 unsigned getVectorListStart()
const {
548 assert(Kind == k_VectorList &&
"Invalid access!");
549 return VectorList.RegNum;
552 unsigned getVectorListCount()
const {
553 assert(Kind == k_VectorList &&
"Invalid access!");
554 return VectorList.Count;
557 unsigned getVectorIndex()
const {
558 assert(Kind == k_VectorIndex &&
"Invalid access!");
559 return VectorIndex.Val;
563 assert(Kind == k_SysReg &&
"Invalid access!");
564 return StringRef(SysReg.Data, SysReg.Length);
567 unsigned getSysCR()
const {
568 assert(Kind == k_SysCR &&
"Invalid access!");
572 unsigned getPrefetch()
const {
573 assert(Kind == k_Prefetch &&
"Invalid access!");
577 unsigned getPSBHint()
const {
578 assert(Kind == k_PSBHint &&
"Invalid access!");
583 assert(Kind == k_PSBHint &&
"Invalid access!");
584 return StringRef(PSBHint.Data, PSBHint.Length);
587 unsigned getBTIHint()
const {
588 assert(Kind == k_BTIHint &&
"Invalid access!");
593 assert(Kind == k_BTIHint &&
"Invalid access!");
594 return StringRef(BTIHint.Data, BTIHint.Length);
598 assert(Kind == k_Prefetch &&
"Invalid access!");
603 if (Kind == k_ShiftExtend)
604 return ShiftExtend.Type;
605 if (Kind == k_Register)
606 return Reg.ShiftExtend.Type;
610 unsigned getShiftExtendAmount()
const {
611 if (Kind == k_ShiftExtend)
612 return ShiftExtend.Amount;
613 if (Kind == k_Register)
614 return Reg.ShiftExtend.Amount;
618 bool hasShiftExtendAmount()
const {
619 if (Kind == k_ShiftExtend)
620 return ShiftExtend.HasExplicitAmount;
621 if (Kind == k_Register)
622 return Reg.ShiftExtend.HasExplicitAmount;
626 bool isImm()
const override {
return Kind == k_Immediate; }
627 bool isMem()
const override {
return false; }
629 bool isUImm6()
const {
636 return (Val >= 0 && Val < 64);
639 template <
int W
idth>
bool isSImm()
const {
return isSImmScaled<Width, 1>(); }
642 return isImmScaled<Bits, Scale>(
true);
646 return isImmScaled<Bits, Scale>(
false);
649 template <
int Bits,
int Scale>
658 int64_t MinVal, MaxVal;
660 int64_t Shift =
Bits - 1;
661 MinVal = (int64_t(1) << Shift) * -Scale;
662 MaxVal = ((int64_t(1) << Shift) - 1) * Scale;
665 MaxVal = ((int64_t(1) <<
Bits) - 1) * Scale;
669 if (Val >= MinVal && Val <= MaxVal && (Val % Scale) == 0)
681 int64_t Val = MCE->getValue();
682 if (Val >= 0 && Val < 32)
687 bool isSymbolicUImm12Offset(
const MCExpr *Expr)
const {
691 if (!AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind, DarwinRefKind,
722 template <
int Scale>
bool isUImm12Offset()
const {
728 return isSymbolicUImm12Offset(getImm());
731 return (Val % Scale) == 0 && Val >= 0 && (Val / Scale) < 0x1000;
734 template <
int N,
int M>
735 bool isImmInRange()
const {
742 return (Val >=
N && Val <= M);
747 template <
typename T>
748 bool isLogicalImm()
const {
756 int64_t SVal =
typename std::make_signed<T>::type(Val);
757 int64_t UVal =
typename std::make_unsigned<T>::type(Val);
758 if (Val != SVal && Val != UVal)
764 bool isShiftedImm()
const {
return Kind == k_ShiftedImm; }
769 template <
unsigned W
idth>
771 if (isShiftedImm() && Width == getShiftedImmShift())
772 if (
auto *CE = dyn_cast<MCConstantExpr>(getShiftedImmVal()))
773 return std::make_pair(CE->getValue(), Width);
776 if (
auto *CE = dyn_cast<MCConstantExpr>(getImm())) {
777 int64_t Val = CE->getValue();
778 if ((Val != 0) && (uint64_t(Val >> Width) << Width) == uint64_t(Val))
779 return std::make_pair(Val >> Width, Width);
781 return std::make_pair(Val, 0u);
787 bool isAddSubImm()
const {
788 if (!isShiftedImm() && !isImm())
794 if (isShiftedImm()) {
795 unsigned Shift = ShiftedImm.ShiftAmount;
796 Expr = ShiftedImm.Val;
797 if (Shift != 0 && Shift != 12)
806 if (AArch64AsmParser::classifySymbolRef(Expr, ELFRefKind,
807 DarwinRefKind, Addend)) {
824 if (
auto ShiftedVal = getShiftedVal<12>())
825 return ShiftedVal->first >= 0 && ShiftedVal->first <= 0xfff;
832 bool isAddSubImmNeg()
const {
833 if (!isShiftedImm() && !isImm())
837 if (
auto ShiftedVal = getShiftedVal<12>())
838 return ShiftedVal->first < 0 && -ShiftedVal->first <= 0xfff;
848 template <
typename T>
850 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
854 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
855 if (
auto ShiftedImm = getShiftedVal<8>())
856 if (!(IsByte && ShiftedImm->second) &&
857 AArch64_AM::isSVECpyImm<T>(uint64_t(ShiftedImm->first)
858 << ShiftedImm->second))
868 if (!isShiftedImm() && (!isImm() || !isa<MCConstantExpr>(getImm())))
872 std::is_same<int8_t, typename std::make_signed<T>::type>::value;
873 if (
auto ShiftedImm = getShiftedVal<8>())
874 if (!(IsByte && ShiftedImm->second) &&
875 AArch64_AM::isSVEAddSubImm<T>(ShiftedImm->first
876 << ShiftedImm->second))
883 if (isLogicalImm<T>() && !isSVECpyImm<T>())
888 bool isCondCode()
const {
return Kind == k_CondCode; }
890 bool isSIMDImmType10()
const {
900 bool isBranchTarget()
const {
909 assert(
N > 0 &&
"Branch target immediate cannot be 0 bits!");
910 return (Val >= -((1<<(
N-1)) << 2) && Val <= (((1<<(
N-1))-1) << 2));
921 if (!AArch64AsmParser::classifySymbolRef(getImm(), ELFRefKind,
922 DarwinRefKind, Addend)) {
928 for (
unsigned i = 0; i != AllowedModifiers.
size(); ++i) {
929 if (ELFRefKind == AllowedModifiers[i])
936 bool isMovZSymbolG3()
const {
940 bool isMovZSymbolG2()
const {
946 bool isMovZSymbolG1()
const {
947 return isMovWSymbol({
954 bool isMovZSymbolG0()
const {
960 bool isMovKSymbolG3()
const {
964 bool isMovKSymbolG2()
const {
968 bool isMovKSymbolG1()
const {
974 bool isMovKSymbolG0()
const {
980 template<
int RegW
idth,
int Shift>
982 if (!isImm())
return false;
985 if (!CE)
return false;
991 template<
int RegW
idth,
int Shift>
993 if (!isImm())
return false;
996 if (!CE)
return false;
1002 bool isFPImm()
const {
1003 return Kind == k_FPImm &&
1007 bool isBarrier()
const {
return Kind == k_Barrier; }
1008 bool isSysReg()
const {
return Kind == k_SysReg; }
1010 bool isMRSSystemRegister()
const {
1011 if (!isSysReg())
return false;
1013 return SysReg.MRSReg != -1U;
1016 bool isMSRSystemRegister()
const {
1017 if (!isSysReg())
return false;
1018 return SysReg.MSRReg != -1U;
1021 bool isSystemPStateFieldWithImm0_1()
const {
1022 if (!isSysReg())
return false;
1023 return (SysReg.PStateField == AArch64PState::PAN ||
1024 SysReg.PStateField == AArch64PState::DIT ||
1025 SysReg.PStateField == AArch64PState::UAO ||
1026 SysReg.PStateField == AArch64PState::SSBS);
1029 bool isSystemPStateFieldWithImm0_15()
const {
1030 if (!isSysReg() || isSystemPStateFieldWithImm0_1())
return false;
1031 return SysReg.PStateField != -1U;
1034 bool isReg()
const override {
1035 return Kind == k_Register;
1038 bool isScalarReg()
const {
1042 bool isNeonVectorReg()
const {
1043 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector;
1046 bool isNeonVectorRegLo()
const {
1047 return Kind == k_Register &&
Reg.Kind == RegKind::NeonVector &&
1048 AArch64MCRegisterClasses[AArch64::FPR128_loRegClassID].contains(
1052 template <
unsigned Class>
bool isSVEVectorReg()
const {
1055 case AArch64::ZPRRegClassID:
1056 case AArch64::ZPR_3bRegClassID:
1057 case AArch64::ZPR_4bRegClassID:
1058 RK = RegKind::SVEDataVector;
1060 case AArch64::PPRRegClassID:
1061 case AArch64::PPR_3bRegClassID:
1062 RK = RegKind::SVEPredicateVector;
1068 return (Kind == k_Register &&
Reg.Kind == RK) &&
1069 AArch64MCRegisterClasses[
Class].contains(
getReg());
1072 template <
unsigned Class>
bool isFPRasZPR()
const {
1074 AArch64MCRegisterClasses[
Class].contains(
getReg());
1077 template <
int ElementW
idth,
unsigned Class>
1079 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEPredicateVector)
1082 if (isSVEVectorReg<Class>() &&
1083 (ElementWidth == 0 ||
Reg.ElementWidth == ElementWidth))
1089 template <
int ElementW
idth,
unsigned Class>
1091 if (Kind != k_Register ||
Reg.Kind != RegKind::SVEDataVector)
1094 if (isSVEVectorReg<Class>() &&
1095 (ElementWidth == 0 ||
Reg.ElementWidth == ElementWidth))
1101 template <
int ElementWidth,
unsigned Class,
1103 bool ShiftWidthAlwaysSame>
1105 auto VectorMatch = isSVEDataVectorRegOfWidth<ElementWidth, Class>();
1106 if (!VectorMatch.isMatch())
1112 bool MatchShift = getShiftExtendAmount() ==
Log2_32(ShiftWidth / 8);
1115 !ShiftWidthAlwaysSame && hasShiftExtendAmount() && ShiftWidth == 8)
1118 if (MatchShift && ShiftExtendTy == getShiftExtendType())
1124 bool isGPR32as64()
const {
1126 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].contains(
Reg.RegNum);
1129 bool isGPR64as32()
const {
1131 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].contains(
Reg.RegNum);
1134 bool isWSeqPair()
const {
1136 AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID].contains(
1140 bool isXSeqPair()
const {
1142 AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID].contains(
1146 template<
int64_t Angle,
int64_t Remainder>
1154 if (Value % Angle == Remainder && Value <= 270)
1159 template <
unsigned RegClassID>
bool isGPR64()
const {
1161 AArch64MCRegisterClasses[RegClassID].contains(
getReg());
1164 template <
unsigned RegClassID,
int ExtW
idth>
1169 if (isGPR64<RegClassID>() && getShiftExtendType() ==
AArch64_AM::LSL &&
1170 getShiftExtendAmount() ==
Log2_32(ExtWidth / 8))
1177 template <RegKind VectorKind,
unsigned NumRegs>
1178 bool isImplicitlyTypedVectorList()
const {
1179 return Kind == k_VectorList && VectorList.Count == NumRegs &&
1180 VectorList.NumElements == 0 &&
1181 VectorList.RegisterKind == VectorKind;
1184 template <
RegKind VectorKind,
unsigned NumRegs,
unsigned NumElements,
1185 unsigned ElementWidth>
1186 bool isTypedVectorList()
const {
1187 if (Kind != k_VectorList)
1189 if (VectorList.Count != NumRegs)
1191 if (VectorList.RegisterKind != VectorKind)
1193 if (VectorList.ElementWidth != ElementWidth)
1195 return VectorList.NumElements == NumElements;
1198 template <
int Min,
int Max>
1200 if (Kind != k_VectorIndex)
1202 if (VectorIndex.Val >= Min && VectorIndex.Val <= Max)
1207 bool isToken()
const override {
return Kind == k_Token; }
1209 bool isTokenEqual(
StringRef Str)
const {
1210 return Kind == k_Token &&
getToken() == Str;
1212 bool isSysCR()
const {
return Kind == k_SysCR; }
1213 bool isPrefetch()
const {
return Kind == k_Prefetch; }
1214 bool isPSBHint()
const {
return Kind == k_PSBHint; }
1215 bool isBTIHint()
const {
return Kind == k_BTIHint; }
1216 bool isShiftExtend()
const {
return Kind == k_ShiftExtend; }
1217 bool isShifter()
const {
1218 if (!isShiftExtend())
1228 if (Kind != k_FPImm)
1231 if (getFPImmIsExact()) {
1233 auto *Desc = AArch64ExactFPImm::lookupExactFPImmByEnum(ImmEnum);
1234 assert(Desc &&
"Unknown enum value");
1242 if (getFPImm().bitwiseIsEqual(RealVal))
1249 template <
unsigned ImmA,
unsigned ImmB>
1252 if ((Res = isExactFPImm<ImmA>()))
1254 if ((Res = isExactFPImm<ImmB>()))
1259 bool isExtend()
const {
1260 if (!isShiftExtend())
1269 getShiftExtendAmount() <= 4;
1272 bool isExtend64()
const {
1280 bool isExtendLSL64()
const {
1286 getShiftExtendAmount() <= 4;
1289 template<
int W
idth>
bool isMemXExtend()
const {
1294 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1295 getShiftExtendAmount() == 0);
1298 template<
int W
idth>
bool isMemWExtend()
const {
1303 (getShiftExtendAmount() ==
Log2_32(Width / 8) ||
1304 getShiftExtendAmount() == 0);
1307 template <
unsigned w
idth>
1308 bool isArithmeticShifter()
const {
1318 template <
unsigned w
idth>
1319 bool isLogicalShifter()
const {
1327 getShiftExtendAmount() < width;
1330 bool isMovImm32Shifter()
const {
1338 uint64_t Val = getShiftExtendAmount();
1339 return (Val == 0 || Val == 16);
1342 bool isMovImm64Shifter()
const {
1350 uint64_t Val = getShiftExtendAmount();
1351 return (Val == 0 || Val == 16 || Val == 32 || Val == 48);
1354 bool isLogicalVecShifter()
const {
1359 unsigned Shift = getShiftExtendAmount();
1361 (Shift == 0 || Shift == 8 || Shift == 16 || Shift == 24);
1364 bool isLogicalVecHalfWordShifter()
const {
1365 if (!isLogicalVecShifter())
1369 unsigned Shift = getShiftExtendAmount();
1371 (Shift == 0 || Shift == 8);
1374 bool isMoveVecShifter()
const {
1375 if (!isShiftExtend())
1379 unsigned Shift = getShiftExtendAmount();
1381 (Shift == 8 || Shift == 16);
1390 bool isSImm9OffsetFB()
const {
1391 return isSImm<9>() && !isUImm12Offset<Width / 8>();
1394 bool isAdrpLabel()
const {
1400 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1401 int64_t Val = CE->getValue();
1402 int64_t Min = - (4096 * (1LL << (21 - 1)));
1403 int64_t Max = 4096 * ((1LL << (21 - 1)) - 1);
1404 return (Val % 4096) == 0 && Val >= Min && Val <= Max;
1410 bool isAdrLabel()
const {
1416 if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Imm.Val)) {
1417 int64_t Val = CE->getValue();
1418 int64_t Min = - (1LL << (21 - 1));
1419 int64_t Max = ((1LL << (21 - 1)) - 1);
1420 return Val >= Min && Val <= Max;
1430 else if (
const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1436 void addRegOperands(
MCInst &Inst,
unsigned N)
const {
1437 assert(N == 1 &&
"Invalid number of operands!");
1441 void addGPR32as64Operands(
MCInst &Inst,
unsigned N)
const {
1442 assert(N == 1 &&
"Invalid number of operands!");
1444 AArch64MCRegisterClasses[AArch64::GPR64RegClassID].
contains(
getReg()));
1453 void addGPR64as32Operands(
MCInst &Inst,
unsigned N)
const {
1454 assert(N == 1 &&
"Invalid number of operands!");
1456 AArch64MCRegisterClasses[AArch64::GPR32RegClassID].
contains(
getReg()));
1465 template <
int W
idth>
1466 void addFPRasZPRRegOperands(
MCInst &Inst,
unsigned N)
const {
1469 case 8: Base = AArch64::B0;
break;
1470 case 16: Base = AArch64::H0;
break;
1471 case 32: Base = AArch64::S0;
break;
1472 case 64: Base = AArch64::D0;
break;
1473 case 128: Base = AArch64::Q0;
break;
1480 void addVectorReg64Operands(
MCInst &Inst,
unsigned N)
const {
1481 assert(N == 1 &&
"Invalid number of operands!");
1483 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1487 void addVectorReg128Operands(
MCInst &Inst,
unsigned N)
const {
1488 assert(N == 1 &&
"Invalid number of operands!");
1490 AArch64MCRegisterClasses[AArch64::FPR128RegClassID].
contains(
getReg()));
1494 void addVectorRegLoOperands(
MCInst &Inst,
unsigned N)
const {
1495 assert(N == 1 &&
"Invalid number of operands!");
1499 enum VecListIndexType {
1500 VecListIdx_DReg = 0,
1501 VecListIdx_QReg = 1,
1502 VecListIdx_ZReg = 2,
1505 template <VecListIndexType RegTy,
unsigned NumRegs>
1506 void addVectorListOperands(
MCInst &Inst,
unsigned N)
const {
1507 assert(N == 1 &&
"Invalid number of operands!");
1508 static const unsigned FirstRegs[][5] = {
1510 AArch64::D0, AArch64::D0_D1,
1511 AArch64::D0_D1_D2, AArch64::D0_D1_D2_D3 },
1513 AArch64::Q0, AArch64::Q0_Q1,
1514 AArch64::Q0_Q1_Q2, AArch64::Q0_Q1_Q2_Q3 },
1516 AArch64::Z0, AArch64::Z0_Z1,
1517 AArch64::Z0_Z1_Z2, AArch64::Z0_Z1_Z2_Z3 }
1520 assert((RegTy != VecListIdx_ZReg || NumRegs <= 4) &&
1521 " NumRegs must be <= 4 for ZRegs");
1523 unsigned FirstReg = FirstRegs[(
unsigned)RegTy][NumRegs];
1525 FirstRegs[(
unsigned)RegTy][0]));
1528 void addVectorIndexOperands(
MCInst &Inst,
unsigned N)
const {
1529 assert(N == 1 &&
"Invalid number of operands!");
1533 template <
unsigned ImmIs0,
unsigned ImmIs1>
1534 void addExactFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1535 assert(N == 1 &&
"Invalid number of operands!");
1536 assert(
bool(isExactFPImm<ImmIs0, ImmIs1>()) &&
"Invalid operand");
1540 void addImmOperands(
MCInst &Inst,
unsigned N)
const {
1541 assert(N == 1 &&
"Invalid number of operands!");
1545 addExpr(Inst, getImm());
1548 template <
int Shift>
1549 void addImmWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1550 assert(N == 2 &&
"Invalid number of operands!");
1551 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1554 }
else if (isShiftedImm()) {
1555 addExpr(Inst, getShiftedImmVal());
1558 addExpr(Inst, getImm());
1563 template <
int Shift>
1564 void addImmNegWithOptionalShiftOperands(
MCInst &Inst,
unsigned N)
const {
1565 assert(N == 2 &&
"Invalid number of operands!");
1566 if (
auto ShiftedVal = getShiftedVal<Shift>()) {
1573 void addCondCodeOperands(
MCInst &Inst,
unsigned N)
const {
1574 assert(N == 1 &&
"Invalid number of operands!");
1578 void addAdrpLabelOperands(
MCInst &Inst,
unsigned N)
const {
1579 assert(N == 1 &&
"Invalid number of operands!");
1582 addExpr(Inst, getImm());
1587 void addAdrLabelOperands(
MCInst &Inst,
unsigned N)
const {
1588 addImmOperands(Inst, N);
1592 void addUImm12OffsetOperands(
MCInst &Inst,
unsigned N)
const {
1593 assert(N == 1 &&
"Invalid number of operands!");
1603 void addUImm6Operands(
MCInst &Inst,
unsigned N)
const {
1604 assert(N == 1 &&
"Invalid number of operands!");
1609 template <
int Scale>
1610 void addImmScaledOperands(
MCInst &Inst,
unsigned N)
const {
1611 assert(N == 1 &&
"Invalid number of operands!");
1616 template <
typename T>
1617 void addLogicalImmOperands(
MCInst &Inst,
unsigned N)
const {
1618 assert(N == 1 &&
"Invalid number of operands!");
1620 typename std::make_unsigned<T>::type Val = MCE->
getValue();
1625 template <
typename T>
1626 void addLogicalImmNotOperands(
MCInst &Inst,
unsigned N)
const {
1627 assert(N == 1 &&
"Invalid number of operands!");
1629 typename std::make_unsigned<T>::type Val = ~MCE->
getValue();
1634 void addSIMDImmType10Operands(
MCInst &Inst,
unsigned N)
const {
1635 assert(N == 1 &&
"Invalid number of operands!");
1641 void addBranchTarget26Operands(
MCInst &Inst,
unsigned N)
const {
1645 assert(N == 1 &&
"Invalid number of operands!");
1648 addExpr(Inst, getImm());
1651 assert(MCE &&
"Invalid constant immediate operand!");
1655 void addPCRelLabel19Operands(
MCInst &Inst,
unsigned N)
const {
1659 assert(N == 1 &&
"Invalid number of operands!");
1662 addExpr(Inst, getImm());
1665 assert(MCE &&
"Invalid constant immediate operand!");
1669 void addBranchTarget14Operands(
MCInst &Inst,
unsigned N)
const {
1673 assert(N == 1 &&
"Invalid number of operands!");
1676 addExpr(Inst, getImm());
1679 assert(MCE &&
"Invalid constant immediate operand!");
1683 void addFPImmOperands(
MCInst &Inst,
unsigned N)
const {
1684 assert(N == 1 &&
"Invalid number of operands!");
1689 void addBarrierOperands(
MCInst &Inst,
unsigned N)
const {
1690 assert(N == 1 &&
"Invalid number of operands!");
1694 void addMRSSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
1695 assert(N == 1 &&
"Invalid number of operands!");
1700 void addMSRSystemRegisterOperands(
MCInst &Inst,
unsigned N)
const {
1701 assert(N == 1 &&
"Invalid number of operands!");
1706 void addSystemPStateFieldWithImm0_1Operands(
MCInst &Inst,
unsigned N)
const {
1707 assert(N == 1 &&
"Invalid number of operands!");
1712 void addSystemPStateFieldWithImm0_15Operands(
MCInst &Inst,
unsigned N)
const {
1713 assert(N == 1 &&
"Invalid number of operands!");
1718 void addSysCROperands(
MCInst &Inst,
unsigned N)
const {
1719 assert(N == 1 &&
"Invalid number of operands!");
1723 void addPrefetchOperands(
MCInst &Inst,
unsigned N)
const {
1724 assert(N == 1 &&
"Invalid number of operands!");
1728 void addPSBHintOperands(
MCInst &Inst,
unsigned N)
const {
1729 assert(N == 1 &&
"Invalid number of operands!");
1733 void addBTIHintOperands(
MCInst &Inst,
unsigned N)
const {
1734 assert(N == 1 &&
"Invalid number of operands!");
1738 void addShifterOperands(
MCInst &Inst,
unsigned N)
const {
1739 assert(N == 1 &&
"Invalid number of operands!");
1745 void addExtendOperands(
MCInst &Inst,
unsigned N)
const {
1746 assert(N == 1 &&
"Invalid number of operands!");
1753 void addExtend64Operands(
MCInst &Inst,
unsigned N)
const {
1754 assert(N == 1 &&
"Invalid number of operands!");
1761 void addMemExtendOperands(
MCInst &Inst,
unsigned N)
const {
1762 assert(N == 2 &&
"Invalid number of operands!");
1773 void addMemExtend8Operands(
MCInst &Inst,
unsigned N)
const {
1774 assert(N == 2 &&
"Invalid number of operands!");
1782 void addMOVZMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
1783 assert(N == 1 &&
"Invalid number of operands!");
1791 void addMOVNMovAliasOperands(
MCInst &Inst,
unsigned N)
const {
1792 assert(N == 1 &&
"Invalid number of operands!");
1799 void addComplexRotationEvenOperands(
MCInst &Inst,
unsigned N)
const {
1800 assert(N == 1 &&
"Invalid number of operands!");
1805 void addComplexRotationOddOperands(
MCInst &Inst,
unsigned N)
const {
1806 assert(N == 1 &&
"Invalid number of operands!");
1813 static std::unique_ptr<AArch64Operand>
1815 auto Op = make_unique<AArch64Operand>(k_Token, Ctx);
1816 Op->Tok.Data = Str.
data();
1817 Op->Tok.Length = Str.
size();
1818 Op->Tok.IsSuffix = IsSuffix;
1824 static std::unique_ptr<AArch64Operand>
1828 unsigned ShiftAmount = 0,
1829 unsigned HasExplicitAmount =
false) {
1830 auto Op = make_unique<AArch64Operand>(k_Register, Ctx);
1831 Op->Reg.RegNum = RegNum;
1833 Op->Reg.ElementWidth = 0;
1834 Op->Reg.EqualityTy = EqTy;
1835 Op->Reg.ShiftExtend.Type = ExtTy;
1836 Op->Reg.ShiftExtend.Amount = ShiftAmount;
1837 Op->Reg.ShiftExtend.HasExplicitAmount = HasExplicitAmount;
1843 static std::unique_ptr<AArch64Operand>
1844 CreateVectorReg(
unsigned RegNum,
RegKind Kind,
unsigned ElementWidth,
1847 unsigned ShiftAmount = 0,
1848 unsigned HasExplicitAmount =
false) {
1849 assert((Kind == RegKind::NeonVector || Kind == RegKind::SVEDataVector ||
1850 Kind == RegKind::SVEPredicateVector) &&
1851 "Invalid vector kind");
1852 auto Op = CreateReg(RegNum, Kind, S, E, Ctx, EqualsReg, ExtTy, ShiftAmount,
1854 Op->Reg.ElementWidth = ElementWidth;
1858 static std::unique_ptr<AArch64Operand>
1859 CreateVectorList(
unsigned RegNum,
unsigned Count,
unsigned NumElements,
1862 auto Op = make_unique<AArch64Operand>(k_VectorList, Ctx);
1863 Op->VectorList.RegNum = RegNum;
1864 Op->VectorList.Count = Count;
1865 Op->VectorList.NumElements = NumElements;
1866 Op->VectorList.ElementWidth = ElementWidth;
1873 static std::unique_ptr<AArch64Operand>
1875 auto Op = make_unique<AArch64Operand>(k_VectorIndex, Ctx);
1876 Op->VectorIndex.Val = Idx;
1882 static std::unique_ptr<AArch64Operand> CreateImm(
const MCExpr *Val,
SMLoc S,
1884 auto Op = make_unique<AArch64Operand>(k_Immediate, Ctx);
1891 static std::unique_ptr<AArch64Operand> CreateShiftedImm(
const MCExpr *Val,
1892 unsigned ShiftAmount,
1895 auto Op = make_unique<AArch64Operand>(k_ShiftedImm, Ctx);
1896 Op->ShiftedImm .Val = Val;
1897 Op->ShiftedImm.ShiftAmount = ShiftAmount;
1903 static std::unique_ptr<AArch64Operand>
1905 auto Op = make_unique<AArch64Operand>(k_CondCode, Ctx);
1906 Op->CondCode.Code =
Code;
1912 static std::unique_ptr<AArch64Operand>
1914 auto Op = make_unique<AArch64Operand>(k_FPImm, Ctx);
1916 Op->FPImm.IsExact = IsExact;
1922 static std::unique_ptr<AArch64Operand> CreateBarrier(
unsigned Val,
1926 auto Op = make_unique<AArch64Operand>(k_Barrier, Ctx);
1927 Op->Barrier.Val = Val;
1928 Op->Barrier.Data = Str.
data();
1929 Op->Barrier.Length = Str.
size();
1935 static std::unique_ptr<AArch64Operand> CreateSysReg(
StringRef Str,
SMLoc S,
1940 auto Op = make_unique<AArch64Operand>(k_SysReg, Ctx);
1941 Op->SysReg.Data = Str.
data();
1942 Op->SysReg.Length = Str.
size();
1943 Op->SysReg.MRSReg = MRSReg;
1944 Op->SysReg.MSRReg = MSRReg;
1945 Op->SysReg.PStateField = PStateField;
1951 static std::unique_ptr<AArch64Operand> CreateSysCR(
unsigned Val,
SMLoc S,
1953 auto Op = make_unique<AArch64Operand>(k_SysCR, Ctx);
1954 Op->SysCRImm.Val = Val;
1960 static std::unique_ptr<AArch64Operand> CreatePrefetch(
unsigned Val,
1964 auto Op = make_unique<AArch64Operand>(k_Prefetch, Ctx);
1965 Op->Prefetch.Val = Val;
1966 Op->Barrier.Data = Str.
data();
1967 Op->Barrier.Length = Str.
size();
1973 static std::unique_ptr<AArch64Operand> CreatePSBHint(
unsigned Val,
1977 auto Op = make_unique<AArch64Operand>(k_PSBHint, Ctx);
1978 Op->PSBHint.Val = Val;
1979 Op->PSBHint.Data = Str.
data();
1980 Op->PSBHint.Length = Str.
size();
1986 static std::unique_ptr<AArch64Operand> CreateBTIHint(
unsigned Val,
1990 auto Op = make_unique<AArch64Operand>(k_BTIHint, Ctx);
1991 Op->BTIHint.Val = Val << 1 | 32;
1992 Op->BTIHint.Data = Str.
data();
1993 Op->BTIHint.Length = Str.
size();
1999 static std::unique_ptr<AArch64Operand>
2002 auto Op = make_unique<AArch64Operand>(k_ShiftExtend, Ctx);
2003 Op->ShiftExtend.Type = ShOp;
2004 Op->ShiftExtend.Amount = Val;
2005 Op->ShiftExtend.HasExplicitAmount = HasExplicitAmount;
2017 OS <<
"<fpimm " << getFPImm().bitcastToAPInt().getZExtValue();
2018 if (!getFPImmIsExact())
2025 OS <<
"<barrier " << Name <<
">";
2027 OS <<
"<barrier invalid #" << getBarrier() <<
">";
2033 case k_ShiftedImm: {
2034 unsigned Shift = getShiftedImmShift();
2035 OS <<
"<shiftedimm ";
2036 OS << *getShiftedImmVal();
2043 case k_VectorList: {
2044 OS <<
"<vectorlist ";
2045 unsigned Reg = getVectorListStart();
2046 for (
unsigned i = 0, e = getVectorListCount(); i != e; ++i)
2047 OS << Reg + i <<
" ";
2052 OS <<
"<vectorindex " << getVectorIndex() <<
">";
2055 OS <<
"<sysreg: " << getSysReg() <<
'>';
2061 OS <<
"c" << getSysCR();
2066 OS <<
"<prfop " << Name <<
">";
2068 OS <<
"<prfop invalid #" << getPrefetch() <<
">";
2072 OS << getPSBHintName();
2075 OS <<
"<register " <<
getReg() <<
">";
2076 if (!getShiftExtendAmount() && !hasShiftExtendAmount())
2080 OS << getBTIHintName();
2084 << getShiftExtendAmount();
2085 if (!hasShiftExtendAmount())
2095 static unsigned MatchRegisterName(StringRef Name);
2099 static unsigned MatchNeonVectorRegName(StringRef Name) {
2100 return StringSwitch<unsigned>(Name.lower())
2101 .Case("v0", AArch64::Q0)
2102 .Case("v1", AArch64::Q1)
2103 .Case("v2", AArch64::Q2)
2104 .Case("v3", AArch64::Q3)
2105 .Case("v4", AArch64::Q4)
2106 .Case("v5", AArch64::Q5)
2107 .Case("v6", AArch64::Q6)
2108 .Case("v7", AArch64::Q7)
2109 .Case("v8", AArch64::Q8)
2110 .Case("v9", AArch64::Q9)
2111 .Case("v10", AArch64::Q10)
2112 .Case("v11", AArch64::Q11)
2113 .Case("v12", AArch64::Q12)
2114 .Case("v13", AArch64::Q13)
2115 .Case("v14", AArch64::Q14)
2116 .Case("v15", AArch64::Q15)
2117 .Case("v16", AArch64::Q16)
2118 .Case("v17", AArch64::Q17)
2119 .Case("v18", AArch64::Q18)
2120 .Case("v19", AArch64::Q19)
2121 .Case("v20", AArch64::Q20)
2122 .Case("v21", AArch64::Q21)
2123 .Case("v22", AArch64::Q22)
2124 .Case("v23", AArch64::Q23)
2125 .Case("v24", AArch64::Q24)
2126 .Case("v25", AArch64::Q25)
2127 .Case("v26", AArch64::Q26)
2128 .Case("v27", AArch64::Q27)
2129 .Case("v28", AArch64::Q28)
2130 .Case("v29", AArch64::Q29)
2131 .Case("v30", AArch64::Q30)
2132 .Case("v31", AArch64::Q31)
2140 static Optional<std::pair<int, int>> parseVectorKind(StringRef Suffix,
2141 RegKind VectorKind) {
2142 std::pair<int, int> Res = {-1, -1};
2144 switch (VectorKind) {
2145 case RegKind::NeonVector:
2147 StringSwitch<std::pair<int, int>>(Suffix.lower())
2149 .Case(".1d", {1, 64})
2150 .Case(".1q", {1, 128})
2151 // '.2h
' needed for fp16 scalar pairwise reductions 2152 .Case(".2h", {2, 16}) 2153 .Case(".2s", {2, 32}) 2154 .Case(".2d", {2, 64}) 2155 // '.4b
' is another special case for the ARMv8.2a dot product 2157 .Case(".4b", {4, 8}) 2158 .Case(".4h", {4, 16}) 2159 .Case(".4s", {4, 32}) 2160 .Case(".8b", {8, 8}) 2161 .Case(".8h", {8, 16}) 2162 .Case(".16b", {16, 8}) 2163 // Accept the width neutral ones, too, for verbose syntax. If those 2164 // aren't used in the right places, the token operand won
't match so 2165 // all will work out. 2167 .Case(".h", {0, 16}) 2168 .Case(".s", {0, 32}) 2169 .Case(".d", {0, 64}) 2172 case RegKind::SVEPredicateVector: 2173 case RegKind::SVEDataVector: 2174 Res = StringSwitch<std::pair<int, int>>(Suffix.lower()) 2177 .Case(".h", {0, 16}) 2178 .Case(".s", {0, 32}) 2179 .Case(".d", {0, 64}) 2180 .Case(".q", {0, 128}) 2184 llvm_unreachable("Unsupported RegKind"); 2187 if (Res == std::make_pair(-1, -1)) 2188 return Optional<std::pair<int, int>>(); 2190 return Optional<std::pair<int, int>>(Res); 2193 static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind) { 2194 return parseVectorKind(Suffix, VectorKind).hasValue(); 2197 static unsigned matchSVEDataVectorRegName(StringRef Name) { 2198 return StringSwitch<unsigned>(Name.lower()) 2199 .Case("z0", AArch64::Z0) 2200 .Case("z1", AArch64::Z1) 2201 .Case("z2", AArch64::Z2) 2202 .Case("z3", AArch64::Z3) 2203 .Case("z4", AArch64::Z4) 2204 .Case("z5", AArch64::Z5) 2205 .Case("z6", AArch64::Z6) 2206 .Case("z7", AArch64::Z7) 2207 .Case("z8", AArch64::Z8) 2208 .Case("z9", AArch64::Z9) 2209 .Case("z10", AArch64::Z10) 2210 .Case("z11", AArch64::Z11) 2211 .Case("z12", AArch64::Z12) 2212 .Case("z13", AArch64::Z13) 2213 .Case("z14", AArch64::Z14) 2214 .Case("z15", AArch64::Z15) 2215 .Case("z16", AArch64::Z16) 2216 .Case("z17", AArch64::Z17) 2217 .Case("z18", AArch64::Z18) 2218 .Case("z19", AArch64::Z19) 2219 .Case("z20", AArch64::Z20) 2220 .Case("z21", AArch64::Z21) 2221 .Case("z22", AArch64::Z22) 2222 .Case("z23", AArch64::Z23) 2223 .Case("z24", AArch64::Z24) 2224 .Case("z25", AArch64::Z25) 2225 .Case("z26", AArch64::Z26) 2226 .Case("z27", AArch64::Z27) 2227 .Case("z28", AArch64::Z28) 2228 .Case("z29", AArch64::Z29) 2229 .Case("z30", AArch64::Z30) 2230 .Case("z31", AArch64::Z31) 2234 static unsigned matchSVEPredicateVectorRegName(StringRef Name) { 2235 return StringSwitch<unsigned>(Name.lower()) 2236 .Case("p0", AArch64::P0) 2237 .Case("p1", AArch64::P1) 2238 .Case("p2", AArch64::P2) 2239 .Case("p3", AArch64::P3) 2240 .Case("p4", AArch64::P4) 2241 .Case("p5", AArch64::P5) 2242 .Case("p6", AArch64::P6) 2243 .Case("p7", AArch64::P7) 2244 .Case("p8", AArch64::P8) 2245 .Case("p9", AArch64::P9) 2246 .Case("p10", AArch64::P10) 2247 .Case("p11", AArch64::P11) 2248 .Case("p12", AArch64::P12) 2249 .Case("p13", AArch64::P13) 2250 .Case("p14", AArch64::P14) 2251 .Case("p15", AArch64::P15) 2255 bool AArch64AsmParser::ParseRegister(unsigned &RegNo, SMLoc &StartLoc, 2257 StartLoc = getLoc(); 2258 auto Res = tryParseScalarRegister(RegNo); 2259 EndLoc = SMLoc::getFromPointer(getLoc().getPointer() - 1); 2260 return Res != MatchOperand_Success; 2263 // Matches a register name or register alias previously defined by '.req
' 2264 unsigned AArch64AsmParser::matchRegisterNameAlias(StringRef Name, 2266 unsigned RegNum = 0; 2267 if ((RegNum = matchSVEDataVectorRegName(Name))) 2268 return Kind == RegKind::SVEDataVector ? RegNum : 0; 2270 if ((RegNum = matchSVEPredicateVectorRegName(Name))) 2271 return Kind == RegKind::SVEPredicateVector ? RegNum : 0; 2273 if ((RegNum = MatchNeonVectorRegName(Name))) 2274 return Kind == RegKind::NeonVector ? RegNum : 0; 2276 // The parsed register must be of RegKind Scalar 2277 if ((RegNum = MatchRegisterName(Name))) 2278 return Kind == RegKind::Scalar ? RegNum : 0; 2281 // Handle a few common aliases of registers. 2282 if (auto RegNum = StringSwitch<unsigned>(Name.lower()) 2283 .Case("fp", AArch64::FP) 2284 .Case("lr", AArch64::LR) 2285 .Case("x31", AArch64::XZR) 2286 .Case("w31", AArch64::WZR) 2288 return Kind == RegKind::Scalar ? RegNum : 0; 2290 // Check for aliases registered via .req. Canonicalize to lower case. 2291 // That's more consistent since
register names are
case insensitive, and
2293 auto Entry = RegisterReqs.find(
Name.lower());
2294 if (Entry == RegisterReqs.end())
2298 if (
Kind == Entry->getValue().first)
2299 RegNum = Entry->getValue().second;
2308 AArch64AsmParser::tryParseScalarRegister(
unsigned &RegNum) {
2326 AArch64AsmParser::tryParseSysCROperand(
OperandVector &Operands) {
2331 Error(S,
"Expected cN operand where 0 <= N <= 15");
2336 if (Tok[0] !=
'c' && Tok[0] !=
'C') {
2337 Error(S,
"Expected cN operand where 0 <= N <= 15");
2343 if (BadNum || CRNum > 15) {
2344 Error(S,
"Expected cN operand where 0 <= N <= 15");
2350 AArch64Operand::CreateSysCR(CRNum, S, getLoc(), getContext()));
2355 template <
bool IsSVEPrefetch>
2357 AArch64AsmParser::tryParsePrefetch(
OperandVector &Operands) {
2363 if (IsSVEPrefetch) {
2364 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByName(
N))
2366 }
else if (
auto Res = AArch64PRFM::lookupPRFMByName(
N))
2371 auto LookupByEncoding = [](
unsigned E) {
2372 if (IsSVEPrefetch) {
2373 if (
auto Res = AArch64SVEPRFM::lookupSVEPRFMByEncoding(
E))
2375 }
else if (
auto Res = AArch64PRFM::lookupPRFMByEncoding(
E))
2379 unsigned MaxVal = IsSVEPrefetch ? 15 : 31;
2386 if (getParser().parseExpression(ImmVal))
2391 TokError(
"immediate value expected for prefetch operand");
2395 if (prfop > MaxVal) {
2396 TokError(
"prefetch operand out of range, [0," +
utostr(MaxVal) +
2401 auto PRFM = LookupByEncoding(MCE->
getValue());
2402 Operands.
push_back(AArch64Operand::CreatePrefetch(
2403 prfop, PRFM.getValueOr(
""), S, getContext()));
2408 TokError(
"prefetch hint expected");
2412 auto PRFM = LookupByName(Tok.
getString());
2414 TokError(
"prefetch hint expected");
2419 Operands.
push_back(AArch64Operand::CreatePrefetch(
2420 *PRFM, Tok.
getString(), S, getContext()));
2426 AArch64AsmParser::tryParsePSBHint(
OperandVector &Operands) {
2431 TokError(
"invalid operand for instruction");
2435 auto PSB = AArch64PSBHint::lookupPSBByName(Tok.
getString());
2437 TokError(
"invalid operand for instruction");
2442 Operands.
push_back(AArch64Operand::CreatePSBHint(
2443 PSB->Encoding, Tok.
getString(), S, getContext()));
2449 AArch64AsmParser::tryParseBTIHint(
OperandVector &Operands) {
2454 TokError(
"invalid operand for instruction");
2458 auto BTI = AArch64BTIHint::lookupBTIByName(Tok.
getString());
2460 TokError(
"invalid operand for instruction");
2465 Operands.
push_back(AArch64Operand::CreateBTIHint(
2466 BTI->Encoding, Tok.
getString(), S, getContext()));
2473 AArch64AsmParser::tryParseAdrpLabel(
OperandVector &Operands) {
2482 if (parseSymbolicImmVal(Expr))
2488 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2498 Error(S,
"gotpage label reference not allowed an addend");
2507 Error(S,
"page or gotpage label reference expected");
2516 Operands.
push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2524 AArch64AsmParser::tryParseAdrLabel(
OperandVector &Operands) {
2535 if (parseSymbolicImmVal(Expr))
2541 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
2548 Error(S,
"unexpected adr label");
2554 Operands.
push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
2559 template<
bool AddFPZeroAsLiteral>
2574 TokError(
"invalid floating point immediate");
2580 if (Tok.
getIntVal() > 255 || isNegative) {
2581 TokError(
"encoded floating point value out of range");
2587 AArch64Operand::CreateFPImm(F,
true, S, getContext()));
2596 if (AddFPZeroAsLiteral && RealVal.
isPosZero()) {
2598 AArch64Operand::CreateToken(
"#0",
false, S, getContext()));
2600 AArch64Operand::CreateToken(
".0",
false, S, getContext()));
2602 Operands.
push_back(AArch64Operand::CreateFPImm(
2614 AArch64AsmParser::tryParseImmWithOptionalShift(
OperandVector &Operands) {
2625 if (parseSymbolicImmVal(Imm))
2630 AArch64Operand::CreateImm(Imm, S, E, getContext()));
2656 if (ShiftAmount < 0) {
2663 if (ShiftAmount == 0 && Imm != 0) {
2665 Operands.
push_back(AArch64Operand::CreateImm(Imm, S, E, getContext()));
2670 Operands.
push_back(AArch64Operand::CreateShiftedImm(Imm, ShiftAmount,
2671 S, E, getContext()));
2699 getSTI().getFeatureBits()[AArch64::FeatureSVE])
2717 bool AArch64AsmParser::parseCondCode(
OperandVector &Operands,
2718 bool invertCondCode) {
2727 return TokError(
"invalid condition code");
2730 if (invertCondCode) {
2732 return TokError(
"condition codes AL and NV are invalid for this instruction");
2737 AArch64Operand::CreateCondCode(CC, S, getLoc(), getContext()));
2744 AArch64AsmParser::tryParseOptionalShiftExtend(
OperandVector &Operands) {
2778 TokError(
"expected #imm after shift specifier");
2785 AArch64Operand::CreateShiftExtend(ShOp, 0,
false, S, E, getContext()));
2795 Error(E,
"expected integer shift amount");
2800 if (getParser().parseExpression(ImmVal))
2805 Error(E,
"expected constant '#imm' after shift specifier");
2810 Operands.
push_back(AArch64Operand::CreateShiftExtend(
2811 ShOp, MCE->
getValue(),
true, S,
E, getContext()));
2819 {
"crc", {AArch64::FeatureCRC}},
2820 {
"sm4", {AArch64::FeatureSM4}},
2821 {
"sha3", {AArch64::FeatureSHA3}},
2822 {
"sha2", {AArch64::FeatureSHA2}},
2823 {
"aes", {AArch64::FeatureAES}},
2824 {
"crypto", {AArch64::FeatureCrypto}},
2825 {
"fp", {AArch64::FeatureFPARMv8}},
2826 {
"simd", {AArch64::FeatureNEON}},
2827 {
"ras", {AArch64::FeatureRAS}},
2828 {
"lse", {AArch64::FeatureLSE}},
2829 {
"predres", {AArch64::FeaturePredRes}},
2830 {
"ccdp", {AArch64::FeatureCacheDeepPersist}},
2831 {
"mte", {AArch64::FeatureMTE}},
2832 {
"tlb-rmi", {AArch64::FeatureTLB_RMI}},
2833 {
"pan-rwv", {AArch64::FeaturePAN_RWV}},
2834 {
"ccpp", {AArch64::FeatureCCPP}},
2835 {
"sve", {AArch64::FeatureSVE}},
2844 if (FBS[AArch64::HasV8_1aOps])
2846 else if (FBS[AArch64::HasV8_2aOps])
2848 else if (FBS[AArch64::HasV8_3aOps])
2850 else if (FBS[AArch64::HasV8_4aOps])
2852 else if (FBS[AArch64::HasV8_5aOps])
2866 void AArch64AsmParser::createSysAlias(uint16_t Encoding,
OperandVector &Operands,
2868 const uint16_t Op2 = Encoding & 7;
2869 const uint16_t Cm = (Encoding & 0x78) >> 3;
2870 const uint16_t Cn = (Encoding & 0x780) >> 7;
2871 const uint16_t Op1 = (Encoding & 0x3800) >> 11;
2876 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2878 AArch64Operand::CreateSysCR(Cn, S, getLoc(), getContext()));
2880 AArch64Operand::CreateSysCR(Cm, S, getLoc(), getContext()));
2883 AArch64Operand::CreateImm(Expr, S, getLoc(), getContext()));
2891 return TokError(
"invalid operand");
2895 AArch64Operand::CreateToken(
"sys",
false, NameLoc, getContext()));
2902 if (Mnemonic ==
"ic") {
2905 return TokError(
"invalid operand for IC instruction");
2906 else if (!IC->
haveFeatures(getSTI().getFeatureBits())) {
2907 std::string Str(
"IC " + std::string(IC->
Name) +
" requires ");
2909 return TokError(Str.c_str());
2911 createSysAlias(IC->
Encoding, Operands, S);
2912 }
else if (Mnemonic ==
"dc") {
2915 return TokError(
"invalid operand for DC instruction");
2916 else if (!DC->
haveFeatures(getSTI().getFeatureBits())) {
2917 std::string Str(
"DC " + std::string(DC->
Name) +
" requires ");
2919 return TokError(Str.c_str());
2921 createSysAlias(DC->
Encoding, Operands, S);
2922 }
else if (Mnemonic ==
"at") {
2925 return TokError(
"invalid operand for AT instruction");
2926 else if (!AT->
haveFeatures(getSTI().getFeatureBits())) {
2927 std::string Str(
"AT " + std::string(AT->
Name) +
" requires ");
2929 return TokError(Str.c_str());
2931 createSysAlias(AT->
Encoding, Operands, S);
2932 }
else if (Mnemonic ==
"tlbi") {
2935 return TokError(
"invalid operand for TLBI instruction");
2936 else if (!TLBI->
haveFeatures(getSTI().getFeatureBits())) {
2937 std::string Str(
"TLBI " + std::string(TLBI->
Name) +
" requires ");
2939 return TokError(Str.c_str());
2941 createSysAlias(TLBI->
Encoding, Operands, S);
2942 }
else if (Mnemonic ==
"cfp" || Mnemonic ==
"dvp" || Mnemonic ==
"cpp") {
2945 return TokError(
"invalid operand for prediction restriction instruction");
2946 else if (!PRCTX->
haveFeatures(getSTI().getFeatureBits())) {
2948 Mnemonic.upper() + std::string(PRCTX->
Name) +
" requires ");
2950 return TokError(Str.c_str());
2952 uint16_t PRCTX_Op2 =
2953 Mnemonic ==
"cfp" ? 4 :
2954 Mnemonic ==
"dvp" ? 5 :
2955 Mnemonic ==
"cpp" ? 7 :
2957 assert(PRCTX_Op2 &&
"Invalid mnemonic for prediction restriction instruction");
2958 createSysAlias(PRCTX->
Encoding << 3 | PRCTX_Op2 , Operands, S);
2964 bool HasRegister =
false;
2969 return TokError(
"expected register operand");
2973 if (ExpectRegister && !HasRegister)
2974 return TokError(
"specified " + Mnemonic +
" op requires a register");
2975 else if (!ExpectRegister && HasRegister)
2976 return TokError(
"specified " + Mnemonic +
" op does not use a register");
2985 AArch64AsmParser::tryParseBarrierOperand(
OperandVector &Operands) {
2990 TokError(
"'csync' operand expected");
2996 SMLoc ExprLoc = getLoc();
2997 if (getParser().parseExpression(ImmVal))
3001 Error(ExprLoc,
"immediate value expected for barrier operand");
3005 Error(ExprLoc,
"barrier operand out of range");
3008 auto DB = AArch64DB::lookupDBByEncoding(MCE->
getValue());
3009 Operands.
push_back(AArch64Operand::CreateBarrier(
3010 MCE->
getValue(), DB ? DB->Name :
"", ExprLoc, getContext()));
3015 TokError(
"invalid operand for instruction");
3019 auto TSB = AArch64TSB::lookupTSBByName(Tok.
getString());
3021 auto DB = AArch64DB::lookupDBByName(Tok.
getString());
3022 if (Mnemonic ==
"isb" && (!DB || DB->Encoding != AArch64DB::sy)) {
3023 TokError(
"'sy' or #imm operand expected");
3026 }
else if (Mnemonic ==
"tsb" && (!TSB || TSB->Encoding != AArch64TSB::csync)) {
3027 TokError(
"'csync' operand expected");
3029 }
else if (!DB && !TSB) {
3030 TokError(
"invalid barrier option name");
3034 Operands.
push_back(AArch64Operand::CreateBarrier(
3035 DB ? DB->Encoding : TSB->Encoding, Tok.
getString(), getLoc(), getContext()));
3051 if (SysReg && SysReg->haveFeatures(getSTI().getFeatureBits())) {
3052 MRSReg = SysReg->
Readable ? SysReg->Encoding : -1;
3053 MSRReg = SysReg->Writeable ? SysReg->Encoding : -1;
3057 auto PState = AArch64PState::lookupPStateByName(Tok.
getString());
3058 unsigned PStateImm = -1;
3059 if (PState && PState->haveFeatures(getSTI().getFeatureBits()))
3060 PStateImm = PState->Encoding;
3063 AArch64Operand::CreateSysReg(Tok.
getString(), getLoc(), MRSReg, MSRReg,
3064 PStateImm, getContext()));
3071 bool AArch64AsmParser::tryParseNeonVectorRegister(
OperandVector &Operands) {
3081 tryParseVectorRegister(Reg, Kind, RegKind::NeonVector);
3089 unsigned ElementWidth = KindRes->second;
3091 AArch64Operand::CreateVectorReg(Reg, RegKind::NeonVector, ElementWidth,
3092 S, getLoc(), getContext()));
3098 AArch64Operand::CreateToken(Kind,
false, S, getContext()));
3104 AArch64AsmParser::tryParseVectorIndex(
OperandVector &Operands) {
3105 SMLoc SIdx = getLoc();
3108 if (getParser().parseExpression(ImmVal))
3112 TokError(
"immediate value expected for vector index");
3144 size_t Start = 0, Next = Name.
find(
'.');
3146 unsigned RegNum = matchRegisterNameAlias(Head, MatchKind);
3152 TokError(
"invalid vector kind qualifier");
3167 AArch64AsmParser::tryParseSVEPredicateVector(
OperandVector &Operands) {
3169 const SMLoc S = getLoc();
3172 auto Res = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
3176 const auto &KindRes =
parseVectorKind(Kind, RegKind::SVEPredicateVector);
3180 unsigned ElementWidth = KindRes->second;
3181 Operands.
push_back(AArch64Operand::CreateVectorReg(
3182 RegNum, RegKind::SVEPredicateVector, ElementWidth, S,
3183 getLoc(), getContext()));
3191 if (!Kind.
empty()) {
3192 Error(S,
"not expecting size suffix");
3198 AArch64Operand::CreateToken(
"/" ,
false, getLoc(), getContext()));
3204 if (Pred !=
"z" && Pred !=
"m") {
3205 Error(getLoc(),
"expecting 'm' or 'z' predication");
3210 const char *ZM = Pred ==
"z" ?
"z" :
"m";
3212 AArch64Operand::CreateToken(ZM,
false, getLoc(), getContext()));
3219 bool AArch64AsmParser::parseRegister(
OperandVector &Operands) {
3221 if (!tryParseNeonVectorRegister(Operands))
3231 bool AArch64AsmParser::parseSymbolicImmVal(
const MCExpr *&ImmVal) {
3233 bool HasELFModifier =
false;
3237 HasELFModifier =
true;
3240 return TokError(
"expect relocation specifier in operand after ':'");
3284 return TokError(
"expect relocation specifier in operand after ':'");
3288 if (parseToken(
AsmToken::Colon,
"expect ':' after relocation specifier"))
3292 if (getParser().parseExpression(ImmVal))
3301 template <RegKind VectorKind>
3303 AArch64AsmParser::tryParseVectorList(
OperandVector &Operands,
3311 bool NoMatchIsError) {
3312 auto RegTok = Parser.
getTok();
3313 auto ParseRes = tryParseVectorRegister(Reg, Kind, VectorKind);
3323 Error(Loc,
"vector register expected");
3331 auto LCurly = Parser.
getTok();
3336 auto ParseRes = ParseVector(FirstReg, Kind, getLoc(), ExpectMatch);
3346 int64_t PrevReg = FirstReg;
3350 SMLoc Loc = getLoc();
3354 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
3359 if (Kind != NextKind) {
3360 Error(Loc,
"mismatched register size suffix");
3364 unsigned Space = (PrevReg <
Reg) ? (Reg - PrevReg) : (Reg + 32 - PrevReg);
3366 if (Space == 0 || Space > 3) {
3367 Error(Loc,
"invalid number of vectors");
3375 SMLoc Loc = getLoc();
3378 ParseRes = ParseVector(Reg, NextKind, getLoc(),
true);
3383 if (Kind != NextKind) {
3384 Error(Loc,
"mismatched register size suffix");
3389 if (getContext().getRegisterInfo()->getEncodingValue(Reg) !=
3390 (getContext().getRegisterInfo()->getEncodingValue(PrevReg) + 1) % 32) {
3391 Error(Loc,
"registers must be sequential");
3404 Error(S,
"invalid number of vectors");
3408 unsigned NumElements = 0;
3409 unsigned ElementWidth = 0;
3410 if (!Kind.
empty()) {
3412 std::tie(NumElements, ElementWidth) = *VK;
3415 Operands.
push_back(AArch64Operand::CreateVectorList(
3416 FirstReg, Count, NumElements, ElementWidth, VectorKind, S, getLoc(),
3423 bool AArch64AsmParser::parseNeonVectorList(
OperandVector &Operands) {
3424 auto ParseRes = tryParseVectorList<RegKind::NeonVector>(Operands,
true);
3432 AArch64AsmParser::tryParseGPR64sp0Operand(
OperandVector &Operands) {
3433 SMLoc StartLoc = getLoc();
3441 Operands.
push_back(AArch64Operand::CreateReg(
3449 Error(getLoc(),
"index must be absent or #0");
3454 if (getParser().parseExpression(ImmVal) || !isa<MCConstantExpr>(ImmVal) ||
3455 cast<MCConstantExpr>(ImmVal)->getValue() != 0) {
3456 Error(getLoc(),
"index must be absent or #0");
3460 Operands.
push_back(AArch64Operand::CreateReg(
3465 template <
bool ParseShiftExtend, RegConstra
intEqualityTy EqTy>
3467 AArch64AsmParser::tryParseGPROperand(
OperandVector &Operands) {
3468 SMLoc StartLoc = getLoc();
3476 if (!ParseShiftExtend || getParser().getTok().isNot(
AsmToken::Comma)) {
3477 Operands.
push_back(AArch64Operand::CreateReg(
3487 Res = tryParseOptionalShiftExtend(ExtOpnd);
3491 auto Ext =
static_cast<AArch64Operand*
>(ExtOpnd.
back().get());
3492 Operands.
push_back(AArch64Operand::CreateReg(
3494 Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
3495 Ext->hasShiftExtendAmount()));
3500 bool AArch64AsmParser::parseOptionalMulOperand(
OperandVector &Operands) {
3509 !(NextIsVL || NextIsHash))
3513 AArch64Operand::CreateToken(
"mul",
false, getLoc(), getContext()));
3518 AArch64Operand::CreateToken(
"vl",
false, getLoc(), getContext()));
3530 if (
const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal)) {
3531 Operands.
push_back(AArch64Operand::CreateImm(
3538 return Error(getLoc(),
"expected 'vl' or '#<imm>'");
3543 bool AArch64AsmParser::parseOperand(
OperandVector &Operands,
bool isCondCode,
3544 bool invertCondCode) {
3548 MatchOperandParserImpl(Operands, Mnemonic,
true);
3562 switch (getLexer().getKind()) {
3566 if (parseSymbolicImmVal(Expr))
3567 return Error(S,
"invalid operand");
3570 Operands.
push_back(AArch64Operand::CreateImm(Expr, S, E, getContext()));
3575 Operands.
push_back(AArch64Operand::CreateToken(
"[",
false, Loc,
3581 return parseOperand(Operands,
false,
false);
3584 return parseNeonVectorList(Operands);
3588 return parseCondCode(Operands, invertCondCode);
3591 if (!parseRegister(Operands))
3596 if (!parseOptionalMulOperand(Operands))
3609 if (getParser().parseExpression(IdVal))
3612 Operands.
push_back(AArch64Operand::CreateImm(IdVal, S, E, getContext()));
3624 bool isNegative =
false;
3639 uint64_t
IntVal = RealVal.bitcastToAPInt().getZExtValue();
3640 if (Mnemonic !=
"fcmp" && Mnemonic !=
"fcmpe" && Mnemonic !=
"fcmeq" &&
3641 Mnemonic !=
"fcmge" && Mnemonic !=
"fcmgt" && Mnemonic !=
"fcmle" &&
3642 Mnemonic !=
"fcmlt" && Mnemonic !=
"fcmne")
3643 return TokError(
"unexpected floating point literal");
3644 else if (IntVal != 0 || isNegative)
3645 return TokError(
"expected floating-point constant #0.0");
3649 AArch64Operand::CreateToken(
"#0",
false, S, getContext()));
3651 AArch64Operand::CreateToken(
".0",
false, S, getContext()));
3656 if (parseSymbolicImmVal(ImmVal))
3660 Operands.
push_back(AArch64Operand::CreateImm(ImmVal, S, E, getContext()));
3664 SMLoc Loc = getLoc();
3665 if (Mnemonic !=
"ldr")
3666 return TokError(
"unexpected token in operand");
3668 const MCExpr *SubExprVal;
3669 if (getParser().parseExpression(SubExprVal))
3672 if (Operands.
size() < 2 ||
3673 !
static_cast<AArch64Operand &
>(*Operands[1]).isScalarReg())
3674 return Error(Loc,
"Only valid when first operand is register");
3677 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
3683 if (isa<MCConstantExpr>(SubExprVal)) {
3684 uint64_t Imm = (cast<MCConstantExpr>(SubExprVal))->getValue();
3685 uint32_t ShiftAmt = 0, MaxShiftAmt = IsXReg ? 48 : 16;
3690 if (ShiftAmt <= MaxShiftAmt && Imm <= 0xFFFF) {
3691 Operands[0] = AArch64Operand::CreateToken(
"movz",
false, Loc, Ctx);
3692 Operands.
push_back(AArch64Operand::CreateImm(
3696 ShiftAmt,
true, S, E, Ctx));
3702 return Error(Loc,
"Immediate too large for register");
3706 getTargetStreamer().addConstantPoolEntry(SubExprVal, IsXReg ? 8 : 4, Loc);
3707 Operands.
push_back(AArch64Operand::CreateImm(CPLoc, S, E, Ctx));
3715 auto &AOp1 =
static_cast<const AArch64Operand&
>(Op1);
3716 auto &AOp2 =
static_cast<const AArch64Operand&
>(Op2);
3717 if (AOp1.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg &&
3718 AOp2.getRegEqualityTy() == RegConstraintEqualityTy::EqualsReg)
3721 assert(AOp1.isScalarReg() && AOp2.isScalarReg() &&
3722 "Testing equality of non-scalar registers not supported");
3725 if (AOp1.getRegEqualityTy() == EqualsSuperReg)
3727 if (AOp1.getRegEqualityTy() == EqualsSubReg)
3729 if (AOp2.getRegEqualityTy() == EqualsSuperReg)
3731 if (AOp2.getRegEqualityTy() == EqualsSubReg)
3744 .Case(
"beq",
"b.eq")
3745 .
Case(
"bne",
"b.ne")
3746 .
Case(
"bhs",
"b.hs")
3747 .
Case(
"bcs",
"b.cs")
3748 .
Case(
"blo",
"b.lo")
3749 .
Case(
"bcc",
"b.cc")
3750 .
Case(
"bmi",
"b.mi")
3751 .
Case(
"bpl",
"b.pl")
3752 .
Case(
"bvs",
"b.vs")
3753 .
Case(
"bvc",
"b.vc")
3754 .
Case(
"bhi",
"b.hi")
3755 .
Case(
"bls",
"b.ls")
3756 .
Case(
"bge",
"b.ge")
3757 .
Case(
"blt",
"b.lt")
3758 .
Case(
"bgt",
"b.gt")
3759 .
Case(
"ble",
"b.le")
3760 .
Case(
"bal",
"b.al")
3761 .
Case(
"bnv",
"b.nv")
3767 parseDirectiveReq(Name, NameLoc);
3774 size_t Start = 0, Next = Name.
find(
'.');
3779 if (Head ==
"ic" || Head ==
"dc" || Head ==
"at" || Head ==
"tlbi" ||
3780 Head ==
"cfp" || Head ==
"dvp" || Head ==
"cpp")
3781 return parseSysAlias(Head, NameLoc, Operands);
3784 AArch64Operand::CreateToken(Head,
false, NameLoc, getContext()));
3790 Next = Name.
find(
'.', Start + 1);
3791 Head = Name.
slice(Start + 1, Next);
3797 return Error(SuffixLoc,
"invalid condition code");
3799 AArch64Operand::CreateToken(
".",
true, SuffixLoc, getContext()));
3801 AArch64Operand::CreateCondCode(CC, NameLoc, NameLoc, getContext()));
3807 Next = Name.
find(
'.', Start + 1);
3808 Head = Name.
slice(Start, Next);
3812 AArch64Operand::CreateToken(Head,
true, SuffixLoc, getContext()));
3817 bool condCodeFourthOperand =
3818 (Head ==
"ccmp" || Head ==
"ccmn" || Head ==
"fccmp" ||
3819 Head ==
"fccmpe" || Head ==
"fcsel" || Head ==
"csel" ||
3820 Head ==
"csinc" || Head ==
"csinv" || Head ==
"csneg");
3828 bool condCodeSecondOperand = (Head ==
"cset" || Head ==
"csetm");
3829 bool condCodeThirdOperand =
3830 (Head ==
"cinc" || Head ==
"cinv" || Head ==
"cneg");
3838 if (parseOperand(Operands, (N == 4 && condCodeFourthOperand) ||
3839 (N == 3 && condCodeThirdOperand) ||
3840 (N == 2 && condCodeSecondOperand),
3841 condCodeSecondOperand || condCodeThirdOperand)) {
3857 AArch64Operand::CreateToken(
"]",
false, RLoc, getContext()));
3861 AArch64Operand::CreateToken(
"!",
false, ELoc, getContext()));
3874 assert((ZReg >= AArch64::Z0) && (ZReg <= AArch64::Z31));
3875 return (ZReg == ((Reg - AArch64::B0) + AArch64::Z0)) ||
3876 (ZReg == ((Reg - AArch64::H0) + AArch64::Z0)) ||
3877 (ZReg == ((Reg - AArch64::S0) + AArch64::Z0)) ||
3878 (ZReg == ((Reg - AArch64::D0) + AArch64::Z0)) ||
3879 (ZReg == ((Reg - AArch64::Q0) + AArch64::Z0)) ||
3880 (ZReg == ((Reg - AArch64::Z0) + AArch64::Z0));
3886 bool AArch64AsmParser::validateInstruction(
MCInst &Inst,
SMLoc &IDLoc,
3895 PrefixInfo
Prefix = NextPrefix;
3896 NextPrefix = PrefixInfo::CreateFromInst(Inst, MCID.
TSFlags);
3901 if (Prefix.isActive() &&
3908 return Error(IDLoc,
"instruction is unpredictable when following a" 3909 " movprfx, suggest replacing movprfx with mov");
3913 return Error(Loc[0],
"instruction is unpredictable when following a" 3914 " movprfx writing to a different destination");
3921 return Error(Loc[0],
"instruction is unpredictable when following a" 3922 " movprfx and destination also used as non-destructive" 3926 auto PPRRegClass = AArch64MCRegisterClasses[AArch64::PPRRegClassID];
3927 if (Prefix.isPredicated()) {
3941 return Error(IDLoc,
"instruction is unpredictable when following a" 3942 " predicated movprfx, suggest using unpredicated movprfx");
3946 return Error(IDLoc,
"instruction is unpredictable when following a" 3947 " predicated movprfx using a different general predicate");
3951 return Error(IDLoc,
"instruction is unpredictable when following a" 3952 " predicated movprfx with a different element size");
3960 case AArch64::LDPSWpre:
3961 case AArch64::LDPWpost:
3962 case AArch64::LDPWpre:
3963 case AArch64::LDPXpost:
3964 case AArch64::LDPXpre: {
3969 return Error(Loc[0],
"unpredictable LDP instruction, writeback base " 3970 "is also a destination");
3972 return Error(Loc[1],
"unpredictable LDP instruction, writeback base " 3973 "is also a destination");
3976 case AArch64::LDPDi:
3977 case AArch64::LDPQi:
3978 case AArch64::LDPSi:
3979 case AArch64::LDPSWi:
3980 case AArch64::LDPWi:
3981 case AArch64::LDPXi: {
3985 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
3988 case AArch64::LDPDpost:
3989 case AArch64::LDPDpre:
3990 case AArch64::LDPQpost:
3991 case AArch64::LDPQpre:
3992 case AArch64::LDPSpost:
3993 case AArch64::LDPSpre:
3994 case AArch64::LDPSWpost: {
3998 return Error(Loc[1],
"unpredictable LDP instruction, Rt2==Rt");
4001 case AArch64::STPDpost:
4002 case AArch64::STPDpre:
4003 case AArch64::STPQpost:
4004 case AArch64::STPQpre:
4005 case AArch64::STPSpost:
4006 case AArch64::STPSpre:
4007 case AArch64::STPWpost:
4008 case AArch64::STPWpre:
4009 case AArch64::STPXpost:
4010 case AArch64::STPXpre: {
4015 return Error(Loc[0],
"unpredictable STP instruction, writeback base " 4016 "is also a source");
4018 return Error(Loc[1],
"unpredictable STP instruction, writeback base " 4019 "is also a source");
4022 case AArch64::LDRBBpre:
4023 case AArch64::LDRBpre:
4024 case AArch64::LDRHHpre:
4025 case AArch64::LDRHpre:
4026 case AArch64::LDRSBWpre:
4027 case AArch64::LDRSBXpre:
4028 case AArch64::LDRSHWpre:
4029 case AArch64::LDRSHXpre:
4030 case AArch64::LDRSWpre:
4031 case AArch64::LDRWpre:
4032 case AArch64::LDRXpre:
4033 case AArch64::LDRBBpost:
4034 case AArch64::LDRBpost:
4035 case AArch64::LDRHHpost:
4036 case AArch64::LDRHpost:
4037 case AArch64::LDRSBWpost:
4038 case AArch64::LDRSBXpost:
4039 case AArch64::LDRSHWpost:
4040 case AArch64::LDRSHXpost:
4041 case AArch64::LDRSWpost:
4042 case AArch64::LDRWpost:
4043 case AArch64::LDRXpost: {
4047 return Error(Loc[0],
"unpredictable LDR instruction, writeback base " 4048 "is also a source");
4051 case AArch64::STRBBpost:
4052 case AArch64::STRBpost:
4053 case AArch64::STRHHpost:
4054 case AArch64::STRHpost:
4055 case AArch64::STRWpost:
4056 case AArch64::STRXpost:
4057 case AArch64::STRBBpre:
4058 case AArch64::STRBpre:
4059 case AArch64::STRHHpre:
4060 case AArch64::STRHpre:
4061 case AArch64::STRWpre:
4062 case AArch64::STRXpre: {
4066 return Error(Loc[0],
"unpredictable STR instruction, writeback base " 4067 "is also a source");
4070 case AArch64::STXRB:
4071 case AArch64::STXRH:
4072 case AArch64::STXRW:
4073 case AArch64::STXRX:
4074 case AArch64::STLXRB:
4075 case AArch64::STLXRH:
4076 case AArch64::STLXRW:
4077 case AArch64::STLXRX: {
4083 return Error(Loc[0],
4084 "unpredictable STXR instruction, status is also a source");
4087 case AArch64::STXPW:
4088 case AArch64::STXPX:
4089 case AArch64::STLXPW:
4090 case AArch64::STLXPX: {
4097 return Error(Loc[0],
4098 "unpredictable STXP instruction, status is also a source");
4101 case AArch64::LDGV: {
4105 return Error(Loc[0],
4106 "unpredictable LDGV instruction, writeback register is also " 4107 "the target register");
4117 case AArch64::ADDSWri:
4118 case AArch64::ADDSXri:
4119 case AArch64::ADDWri:
4120 case AArch64::ADDXri:
4121 case AArch64::SUBSWri:
4122 case AArch64::SUBSXri:
4123 case AArch64::SUBWri:
4124 case AArch64::SUBXri: {
4132 if (classifySymbolRef(Expr, ELFRefKind, DarwinRefKind, Addend)) {
4159 return Error(Loc.
back(),
"invalid immediate expression");
4171 unsigned VariantID = 0);
4173 bool AArch64AsmParser::showMatchError(
SMLoc Loc,
unsigned ErrCode,
4177 case Match_InvalidTiedOperand: {
4179 static_cast<const AArch64Operand &
>(*Operands[ErrorInfo])
4180 .getRegEqualityTy();
4182 case RegConstraintEqualityTy::EqualsSubReg:
4183 return Error(Loc,
"operand must be 64-bit form of destination register");
4184 case RegConstraintEqualityTy::EqualsSuperReg:
4185 return Error(Loc,
"operand must be 32-bit form of destination register");
4186 case RegConstraintEqualityTy::EqualsReg:
4187 return Error(Loc,
"operand must match destination register");
4191 case Match_MissingFeature:
4193 "instruction requires a CPU feature not currently enabled");
4194 case Match_InvalidOperand:
4195 return Error(Loc,
"invalid operand for instruction");
4196 case Match_InvalidSuffix:
4197 return Error(Loc,
"invalid type suffix for instruction");
4198 case Match_InvalidCondCode:
4199 return Error(Loc,
"expected AArch64 condition code");
4200 case Match_AddSubRegExtendSmall:
4202 "expected '[su]xt[bhw]' or 'lsl' with optional integer in range [0, 4]");
4203 case Match_AddSubRegExtendLarge:
4205 "expected 'sxtx' 'uxtx' or 'lsl' with optional integer in range [0, 4]");
4206 case Match_AddSubSecondSource:
4208 "expected compatible register, symbol or integer in range [0, 4095]");
4209 case Match_LogicalSecondSource:
4210 return Error(Loc,
"expected compatible register or logical immediate");
4211 case Match_InvalidMovImm32Shift:
4212 return Error(Loc,
"expected 'lsl' with optional integer 0 or 16");
4213 case Match_InvalidMovImm64Shift:
4214 return Error(Loc,
"expected 'lsl' with optional integer 0, 16, 32 or 48");
4215 case Match_AddSubRegShift32:
4217 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 31]");
4218 case Match_AddSubRegShift64:
4220 "expected 'lsl', 'lsr' or 'asr' with optional integer in range [0, 63]");
4221 case Match_InvalidFPImm:
4223 "expected compatible register or floating-point constant");
4224 case Match_InvalidMemoryIndexedSImm6:
4225 return Error(Loc,
"index must be an integer in range [-32, 31].");
4226 case Match_InvalidMemoryIndexedSImm5:
4227 return Error(Loc,
"index must be an integer in range [-16, 15].");
4228 case Match_InvalidMemoryIndexed1SImm4:
4229 return Error(Loc,
"index must be an integer in range [-8, 7].");
4230 case Match_InvalidMemoryIndexed2SImm4:
4231 return Error(Loc,
"index must be a multiple of 2 in range [-16, 14].");
4232 case Match_InvalidMemoryIndexed3SImm4:
4233 return Error(Loc,
"index must be a multiple of 3 in range [-24, 21].");
4234 case Match_InvalidMemoryIndexed4SImm4:
4235 return Error(Loc,
"index must be a multiple of 4 in range [-32, 28].");
4236 case Match_InvalidMemoryIndexed16SImm4:
4237 return Error(Loc,
"index must be a multiple of 16 in range [-128, 112].");
4238 case Match_InvalidMemoryIndexed1SImm6:
4239 return Error(Loc,
"index must be an integer in range [-32, 31].");
4240 case Match_InvalidMemoryIndexedSImm8:
4241 return Error(Loc,
"index must be an integer in range [-128, 127].");
4242 case Match_InvalidMemoryIndexedSImm9:
4243 return Error(Loc,
"index must be an integer in range [-256, 255].");
4244 case Match_InvalidMemoryIndexed16SImm9:
4245 return Error(Loc,
"index must be a multiple of 16 in range [-4096, 4080].");
4246 case Match_InvalidMemoryIndexed8SImm10:
4247 return Error(Loc,
"index must be a multiple of 8 in range [-4096, 4088].");
4248 case Match_InvalidMemoryIndexed4SImm7:
4249 return Error(Loc,
"index must be a multiple of 4 in range [-256, 252].");
4250 case Match_InvalidMemoryIndexed8SImm7:
4251 return Error(Loc,
"index must be a multiple of 8 in range [-512, 504].");
4252 case Match_InvalidMemoryIndexed16SImm7:
4253 return Error(Loc,
"index must be a multiple of 16 in range [-1024, 1008].");
4254 case Match_InvalidMemoryIndexed8UImm5:
4255 return Error(Loc,
"index must be a multiple of 8 in range [0, 248].");
4256 case Match_InvalidMemoryIndexed4UImm5:
4257 return Error(Loc,
"index must be a multiple of 4 in range [0, 124].");
4258 case Match_InvalidMemoryIndexed2UImm5:
4259 return Error(Loc,
"index must be a multiple of 2 in range [0, 62].");
4260 case Match_InvalidMemoryIndexed8UImm6:
4261 return Error(Loc,
"index must be a multiple of 8 in range [0, 504].");
4262 case Match_InvalidMemoryIndexed16UImm6:
4263 return Error(Loc,
"index must be a multiple of 16 in range [0, 1008].");
4264 case Match_InvalidMemoryIndexed4UImm6:
4265 return Error(Loc,
"index must be a multiple of 4 in range [0, 252].");
4266 case Match_InvalidMemoryIndexed2UImm6:
4267 return Error(Loc,
"index must be a multiple of 2 in range [0, 126].");
4268 case Match_InvalidMemoryIndexed1UImm6:
4269 return Error(Loc,
"index must be in range [0, 63].");
4270 case Match_InvalidMemoryWExtend8:
4272 "expected 'uxtw' or 'sxtw' with optional shift of #0");
4273 case Match_InvalidMemoryWExtend16:
4275 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #1");
4276 case Match_InvalidMemoryWExtend32:
4278 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #2");
4279 case Match_InvalidMemoryWExtend64:
4281 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #3");
4282 case Match_InvalidMemoryWExtend128:
4284 "expected 'uxtw' or 'sxtw' with optional shift of #0 or #4");
4285 case Match_InvalidMemoryXExtend8:
4287 "expected 'lsl' or 'sxtx' with optional shift of #0");
4288 case Match_InvalidMemoryXExtend16:
4290 "expected 'lsl' or 'sxtx' with optional shift of #0 or #1");
4291 case Match_InvalidMemoryXExtend32:
4293 "expected 'lsl' or 'sxtx' with optional shift of #0 or #2");
4294 case Match_InvalidMemoryXExtend64:
4296 "expected 'lsl' or 'sxtx' with optional shift of #0 or #3");
4297 case Match_InvalidMemoryXExtend128:
4299 "expected 'lsl' or 'sxtx' with optional shift of #0 or #4");
4300 case Match_InvalidMemoryIndexed1:
4301 return Error(Loc,
"index must be an integer in range [0, 4095].");
4302 case Match_InvalidMemoryIndexed2:
4303 return Error(Loc,
"index must be a multiple of 2 in range [0, 8190].");
4304 case Match_InvalidMemoryIndexed4:
4305 return Error(Loc,
"index must be a multiple of 4 in range [0, 16380].");
4306 case Match_InvalidMemoryIndexed8:
4307 return Error(Loc,
"index must be a multiple of 8 in range [0, 32760].");
4308 case Match_InvalidMemoryIndexed16:
4309 return Error(Loc,
"index must be a multiple of 16 in range [0, 65520].");
4310 case Match_InvalidImm0_1:
4311 return Error(Loc,
"immediate must be an integer in range [0, 1].");
4312 case Match_InvalidImm0_7:
4313 return Error(Loc,
"immediate must be an integer in range [0, 7].");
4314 case Match_InvalidImm0_15:
4315 return Error(Loc,
"immediate must be an integer in range [0, 15].");
4316 case Match_InvalidImm0_31:
4317 return Error(Loc,
"immediate must be an integer in range [0, 31].");
4318 case Match_InvalidImm0_63:
4319 return Error(Loc,
"immediate must be an integer in range [0, 63].");
4320 case Match_InvalidImm0_127:
4321 return Error(Loc,
"immediate must be an integer in range [0, 127].");
4322 case Match_InvalidImm0_255:
4323 return Error(Loc,
"immediate must be an integer in range [0, 255].");
4324 case Match_InvalidImm0_65535:
4325 return Error(Loc,
"immediate must be an integer in range [0, 65535].");
4326 case Match_InvalidImm1_8:
4327 return Error(Loc,
"immediate must be an integer in range [1, 8].");
4328 case Match_InvalidImm1_16:
4329 return Error(Loc,
"immediate must be an integer in range [1, 16].");
4330 case Match_InvalidImm1_32:
4331 return Error(Loc,
"immediate must be an integer in range [1, 32].");
4332 case Match_InvalidImm1_64:
4333 return Error(Loc,
"immediate must be an integer in range [1, 64].");
4334 case Match_InvalidSVEAddSubImm8:
4335 return Error(Loc,
"immediate must be an integer in range [0, 255]" 4336 " with a shift amount of 0");
4337 case Match_InvalidSVEAddSubImm16:
4338 case Match_InvalidSVEAddSubImm32:
4339 case Match_InvalidSVEAddSubImm64:
4340 return Error(Loc,
"immediate must be an integer in range [0, 255] or a " 4341 "multiple of 256 in range [256, 65280]");
4342 case Match_InvalidSVECpyImm8:
4343 return Error(Loc,
"immediate must be an integer in range [-128, 255]" 4344 " with a shift amount of 0");
4345 case Match_InvalidSVECpyImm16:
4346 return Error(Loc,
"immediate must be an integer in range [-128, 127] or a " 4347 "multiple of 256 in range [-32768, 65280]");
4348 case Match_InvalidSVECpyImm32:
4349 case Match_InvalidSVECpyImm64:
4350 return Error(Loc,
"immediate must be an integer in range [-128, 127] or a " 4351 "multiple of 256 in range [-32768, 32512]");
4352 case Match_InvalidIndexRange1_1:
4353 return Error(Loc,
"expected lane specifier '[1]'");
4354 case Match_InvalidIndexRange0_15:
4355 return Error(Loc,
"vector lane must be an integer in range [0, 15].");
4356 case Match_InvalidIndexRange0_7:
4357 return Error(Loc,
"vector lane must be an integer in range [0, 7].");
4358 case Match_InvalidIndexRange0_3:
4359 return Error(Loc,
"vector lane must be an integer in range [0, 3].");
4360 case Match_InvalidIndexRange0_1:
4361 return Error(Loc,
"vector lane must be an integer in range [0, 1].");
4362 case Match_InvalidSVEIndexRange0_63:
4363 return Error(Loc,
"vector lane must be an integer in range [0, 63].");
4364 case Match_InvalidSVEIndexRange0_31:
4365 return Error(Loc,
"vector lane must be an integer in range [0, 31].");
4366 case Match_InvalidSVEIndexRange0_15:
4367 return Error(Loc,
"vector lane must be an integer in range [0, 15].");
4368 case Match_InvalidSVEIndexRange0_7:
4369 return Error(Loc,
"vector lane must be an integer in range [0, 7].");
4370 case Match_InvalidSVEIndexRange0_3:
4371 return Error(Loc,
"vector lane must be an integer in range [0, 3].");
4372 case Match_InvalidLabel:
4373 return Error(Loc,
"expected label or encodable integer pc offset");
4375 return Error(Loc,
"expected readable system register");
4377 return Error(Loc,
"expected writable system register or pstate");
4378 case Match_InvalidComplexRotationEven:
4379 return Error(Loc,
"complex rotation must be 0, 90, 180 or 270.");
4380 case Match_InvalidComplexRotationOdd:
4381 return Error(Loc,
"complex rotation must be 90 or 270.");
4382 case Match_MnemonicFail: {
4384 ((AArch64Operand &)*Operands[0]).
getToken(),
4385 ComputeAvailableFeatures(STI->getFeatureBits()));
4386 return Error(Loc,
"unrecognized instruction mnemonic" + Suggestion);
4388 case Match_InvalidGPR64shifted8:
4389 return Error(Loc,
"register must be x0..x30 or xzr, without shift");
4390 case Match_InvalidGPR64shifted16:
4391 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #1'");
4392 case Match_InvalidGPR64shifted32:
4393 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #2'");
4394 case Match_InvalidGPR64shifted64:
4395 return Error(Loc,
"register must be x0..x30 or xzr, with required shift 'lsl #3'");
4396 case Match_InvalidGPR64NoXZRshifted8:
4397 return Error(Loc,
"register must be x0..x30 without shift");
4398 case Match_InvalidGPR64NoXZRshifted16:
4399 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #1'");
4400 case Match_InvalidGPR64NoXZRshifted32:
4401 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #2'");
4402 case Match_InvalidGPR64NoXZRshifted64:
4403 return Error(Loc,
"register must be x0..x30 with required shift 'lsl #3'");
4404 case Match_InvalidZPR32UXTW8:
4405 case Match_InvalidZPR32SXTW8:
4406 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw)'");
4407 case Match_InvalidZPR32UXTW16:
4408 case Match_InvalidZPR32SXTW16:
4409 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #1'");
4410 case Match_InvalidZPR32UXTW32:
4411 case Match_InvalidZPR32SXTW32:
4412 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #2'");
4413 case Match_InvalidZPR32UXTW64:
4414 case Match_InvalidZPR32SXTW64:
4415 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, (uxtw|sxtw) #3'");
4416 case Match_InvalidZPR64UXTW8:
4417 case Match_InvalidZPR64SXTW8:
4418 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (uxtw|sxtw)'");
4419 case Match_InvalidZPR64UXTW16:
4420 case Match_InvalidZPR64SXTW16:
4421 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #1'");
4422 case Match_InvalidZPR64UXTW32:
4423 case Match_InvalidZPR64SXTW32:
4424 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #2'");
4425 case Match_InvalidZPR64UXTW64:
4426 case Match_InvalidZPR64SXTW64:
4427 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, (lsl|uxtw|sxtw) #3'");
4428 case Match_InvalidZPR32LSL8:
4429 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s'");
4430 case Match_InvalidZPR32LSL16:
4431 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #1'");
4432 case Match_InvalidZPR32LSL32:
4433 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #2'");
4434 case Match_InvalidZPR32LSL64:
4435 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].s, lsl #3'");
4436 case Match_InvalidZPR64LSL8:
4437 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d'");
4438 case Match_InvalidZPR64LSL16:
4439 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #1'");
4440 case Match_InvalidZPR64LSL32:
4441 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #2'");
4442 case Match_InvalidZPR64LSL64:
4443 return Error(Loc,
"invalid shift/extend specified, expected 'z[0..31].d, lsl #3'");
4444 case Match_InvalidZPR0:
4445 return Error(Loc,
"expected register without element width sufix");
4446 case Match_InvalidZPR8:
4447 case Match_InvalidZPR16:
4448 case Match_InvalidZPR32:
4449 case Match_InvalidZPR64:
4450 case Match_InvalidZPR128:
4451 return Error(Loc,
"invalid element width");
4452 case Match_InvalidZPR_3b8:
4453 return Error(Loc,
"Invalid restricted vector register, expected z0.b..z7.b");
4454 case Match_InvalidZPR_3b16:
4455 return Error(Loc,
"Invalid restricted vector register, expected z0.h..z7.h");
4456 case Match_InvalidZPR_3b32:
4457 return Error(Loc,
"Invalid restricted vector register, expected z0.s..z7.s");
4458 case Match_InvalidZPR_4b16:
4459 return Error(Loc,
"Invalid restricted vector register, expected z0.h..z15.h");
4460 case Match_InvalidZPR_4b32:
4461 return Error(Loc,
"Invalid restricted vector register, expected z0.s..z15.s");
4462 case Match_InvalidZPR_4b64:
4463 return Error(Loc,
"Invalid restricted vector register, expected z0.d..z15.d");
4464 case Match_InvalidSVEPattern:
4465 return Error(Loc,
"invalid predicate pattern");
4466 case Match_InvalidSVEPredicateAnyReg:
4467 case Match_InvalidSVEPredicateBReg:
4468 case Match_InvalidSVEPredicateHReg:
4469 case Match_InvalidSVEPredicateSReg:
4470 case Match_InvalidSVEPredicateDReg:
4471 return Error(Loc,
"invalid predicate register.");
4472 case Match_InvalidSVEPredicate3bAnyReg:
4473 case Match_InvalidSVEPredicate3bBReg:
4474 case Match_InvalidSVEPredicate3bHReg:
4475 case Match_InvalidSVEPredicate3bSReg:
4476 case Match_InvalidSVEPredicate3bDReg:
4477 return Error(Loc,
"restricted predicate has range [0, 7].");
4478 case Match_InvalidSVEExactFPImmOperandHalfOne:
4479 return Error(Loc,
"Invalid floating point constant, expected 0.5 or 1.0.");
4480 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4481 return Error(Loc,
"Invalid floating point constant, expected 0.5 or 2.0.");
4482 case Match_InvalidSVEExactFPImmOperandZeroOne:
4483 return Error(Loc,
"Invalid floating point constant, expected 0.0 or 1.0.");
4491 bool AArch64AsmParser::MatchAndEmitInstruction(
SMLoc IDLoc,
unsigned &Opcode,
4494 uint64_t &ErrorInfo,
4495 bool MatchingInlineAsm) {
4496 assert(!Operands.
empty() &&
"Unexpect empty operand list!");
4497 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(*Operands[0]);
4498 assert(Op.isToken() &&
"Leading operand should always be a mnemonic!");
4501 unsigned NumOperands = Operands.
size();
4503 if (NumOperands == 4 && Tok ==
"lsl") {
4504 AArch64Operand &Op2 =
static_cast<AArch64Operand &
>(*Operands[2]);
4505 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
4506 if (Op2.isScalarReg() && Op3.isImm()) {
4509 uint64_t Op3Val = Op3CE->
getValue();
4510 uint64_t NewOp3Val = 0;
4511 uint64_t NewOp4Val = 0;
4512 if (AArch64MCRegisterClasses[AArch64::GPR32allRegClassID].
contains(
4514 NewOp3Val = (32 - Op3Val) & 0x1f;
4515 NewOp4Val = 31 - Op3Val;
4517 NewOp3Val = (64 - Op3Val) & 0x3f;
4518 NewOp4Val = 63 - Op3Val;
4524 Operands[0] = AArch64Operand::CreateToken(
4525 "ubfm",
false, Op.getStartLoc(), getContext());
4526 Operands.
push_back(AArch64Operand::CreateImm(
4527 NewOp4, Op3.getStartLoc(), Op3.getEndLoc(), getContext()));
4528 Operands[3] = AArch64Operand::CreateImm(NewOp3, Op3.getStartLoc(),
4529 Op3.getEndLoc(), getContext());
4532 }
else if (NumOperands == 4 && Tok ==
"bfc") {
4534 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
4535 AArch64Operand LSBOp =
static_cast<AArch64Operand &
>(*Operands[2]);
4536 AArch64Operand WidthOp =
static_cast<AArch64Operand &
>(*Operands[3]);
4538 if (Op1.isScalarReg() && LSBOp.isImm() && WidthOp.isImm()) {
4542 if (LSBCE && WidthCE) {
4544 uint64_t Width = WidthCE->getValue();
4546 uint64_t RegWidth = 0;
4547 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
4553 if (LSB >= RegWidth)
4554 return Error(LSBOp.getStartLoc(),
4555 "expected integer in range [0, 31]");
4556 if (Width < 1 || Width > RegWidth)
4557 return Error(WidthOp.getStartLoc(),
4558 "expected integer in range [1, 32]");
4562 ImmR = (32 - LSB) & 0x1f;
4564 ImmR = (64 - LSB) & 0x3f;
4566 uint64_t ImmS = Width - 1;
4568 if (ImmR != 0 && ImmS >= ImmR)
4569 return Error(WidthOp.getStartLoc(),
4570 "requested insert overflows register");
4574 Operands[0] = AArch64Operand::CreateToken(
4575 "bfm",
false, Op.getStartLoc(), getContext());
4576 Operands[2] = AArch64Operand::CreateReg(
4579 Operands[3] = AArch64Operand::CreateImm(
4580 ImmRExpr, LSBOp.getStartLoc(), LSBOp.getEndLoc(), getContext());
4581 Operands.emplace_back(
4582 AArch64Operand::CreateImm(ImmSExpr, WidthOp.getStartLoc(),
4583 WidthOp.getEndLoc(), getContext()));
4586 }
else if (NumOperands == 5) {
4589 if (Tok ==
"bfi" || Tok ==
"sbfiz" || Tok ==
"ubfiz") {
4590 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
4591 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
4592 AArch64Operand &Op4 =
static_cast<AArch64Operand &
>(*Operands[4]);
4594 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4598 if (Op3CE && Op4CE) {
4599 uint64_t Op3Val = Op3CE->
getValue();
4600 uint64_t Op4Val = Op4CE->getValue();
4602 uint64_t RegWidth = 0;
4603 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
4609 if (Op3Val >= RegWidth)
4610 return Error(Op3.getStartLoc(),
4611 "expected integer in range [0, 31]");
4612 if (Op4Val < 1 || Op4Val > RegWidth)
4613 return Error(Op4.getStartLoc(),
4614 "expected integer in range [1, 32]");
4616 uint64_t NewOp3Val = 0;
4618 NewOp3Val = (32 - Op3Val) & 0x1f;
4620 NewOp3Val = (64 - Op3Val) & 0x3f;
4622 uint64_t NewOp4Val = Op4Val - 1;
4624 if (NewOp3Val != 0 && NewOp4Val >= NewOp3Val)
4625 return Error(Op4.getStartLoc(),
4626 "requested insert overflows register");
4632 Operands[3] = AArch64Operand::CreateImm(
4633 NewOp3, Op3.getStartLoc(), Op3.getEndLoc(), getContext());
4634 Operands[4] = AArch64Operand::CreateImm(
4635 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4637 Operands[0] = AArch64Operand::CreateToken(
4638 "bfm",
false, Op.getStartLoc(), getContext());
4639 else if (Tok ==
"sbfiz")
4640 Operands[0] = AArch64Operand::CreateToken(
4641 "sbfm",
false, Op.getStartLoc(), getContext());
4642 else if (Tok ==
"ubfiz")
4643 Operands[0] = AArch64Operand::CreateToken(
4644 "ubfm",
false, Op.getStartLoc(), getContext());
4652 }
else if (NumOperands == 5 &&
4653 (Tok ==
"bfxil" || Tok ==
"sbfx" || Tok ==
"ubfx")) {
4654 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
4655 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
4656 AArch64Operand &Op4 =
static_cast<AArch64Operand &
>(*Operands[4]);
4658 if (Op1.isScalarReg() && Op3.isImm() && Op4.isImm()) {
4662 if (Op3CE && Op4CE) {
4663 uint64_t Op3Val = Op3CE->
getValue();
4664 uint64_t Op4Val = Op4CE->getValue();
4666 uint64_t RegWidth = 0;
4667 if (AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].
contains(
4673 if (Op3Val >= RegWidth)
4674 return Error(Op3.getStartLoc(),
4675 "expected integer in range [0, 31]");
4676 if (Op4Val < 1 || Op4Val > RegWidth)
4677 return Error(Op4.getStartLoc(),
4678 "expected integer in range [1, 32]");
4680 uint64_t NewOp4Val = Op3Val + Op4Val - 1;
4682 if (NewOp4Val >= RegWidth || NewOp4Val < Op3Val)
4683 return Error(Op4.getStartLoc(),
4684 "requested extract overflows register");
4688 Operands[4] = AArch64Operand::CreateImm(
4689 NewOp4, Op4.getStartLoc(), Op4.getEndLoc(), getContext());
4691 Operands[0] = AArch64Operand::CreateToken(
4692 "bfm",
false, Op.getStartLoc(), getContext());
4693 else if (Tok ==
"sbfx")
4694 Operands[0] = AArch64Operand::CreateToken(
4695 "sbfm",
false, Op.getStartLoc(), getContext());
4696 else if (Tok ==
"ubfx")
4697 Operands[0] = AArch64Operand::CreateToken(
4698 "ubfm",
false, Op.getStartLoc(), getContext());
4710 if (getSTI().getFeatureBits()[AArch64::FeatureZCZeroingFPWorkaround] &&
4711 NumOperands == 4 && Tok ==
"movi") {
4712 AArch64Operand &Op1 =
static_cast<AArch64Operand &
>(*Operands[1]);
4713 AArch64Operand &Op2 =
static_cast<AArch64Operand &
>(*Operands[2]);
4714 AArch64Operand &Op3 =
static_cast<AArch64Operand &
>(*Operands[3]);
4715 if ((Op1.isToken() && Op2.isNeonVectorReg() && Op3.isImm()) ||
4716 (Op1.isNeonVectorReg() && Op2.isToken() && Op3.isImm())) {
4717 StringRef Suffix = Op1.isToken() ? Op1.getToken() : Op2.getToken();
4718 if (Suffix.
lower() ==
".2d" &&
4719 cast<MCConstantExpr>(Op3.getImm())->getValue() == 0) {
4720 Warning(IDLoc,
"instruction movi.2d with immediate #0 may not function" 4721 " correctly on this CPU, converting to equivalent movi.16b");
4723 unsigned Idx = Op1.isToken() ? 1 : 2;
4724 Operands[Idx] = AArch64Operand::CreateToken(
".16b",
false, IDLoc,
4733 if (NumOperands == 3 && (Tok ==
"sxtw" || Tok ==
"uxtw")) {
4736 AArch64Operand &Op =
static_cast<AArch64Operand &
>(*Operands[2]);
4737 if (Op.isScalarReg()) {
4740 Op.getStartLoc(), Op.getEndLoc(),
4745 else if (NumOperands == 3 && (Tok ==
"sxtb" || Tok ==
"sxth")) {
4746 AArch64Operand &Op =
static_cast<AArch64Operand &
>(*Operands[1]);
4747 if (Op.isScalarReg() &&
4748 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4752 AArch64Operand &Op =
static_cast<AArch64Operand &
>(*Operands[2]);
4753 if (Op.isScalarReg()) {
4757 Op.getEndLoc(), getContext());
4762 else if (NumOperands == 3 && (Tok ==
"uxtb" || Tok ==
"uxth")) {
4763 AArch64Operand &Op =
static_cast<AArch64Operand &
>(*Operands[1]);
4764 if (Op.isScalarReg() &&
4765 AArch64MCRegisterClasses[AArch64::GPR64allRegClassID].contains(
4769 AArch64Operand &Op =
static_cast<AArch64Operand &
>(*Operands[1]);
4770 if (Op.isScalarReg()) {
4774 Op.getEndLoc(), getContext());
4782 unsigned MatchResult =
4783 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 1);
4787 if (MatchResult != Match_Success) {
4790 auto ShortFormNEONErrorInfo = ErrorInfo;
4791 auto ShortFormNEONMatchResult = MatchResult;
4794 MatchInstructionImpl(Operands, Inst, ErrorInfo, MatchingInlineAsm, 0);
4799 if (MatchResult == Match_InvalidOperand && ErrorInfo == 1 &&
4800 Operands.
size() > 1 && ((AArch64Operand &)*Operands[1]).isToken() &&
4801 ((AArch64Operand &)*Operands[1]).isTokenSuffix()) {
4802 MatchResult = ShortFormNEONMatchResult;
4803 ErrorInfo = ShortFormNEONErrorInfo;
4807 switch (MatchResult) {
4808 case Match_Success: {
4811 NumOperands = Operands.
size();
4812 for (
unsigned i = 1; i < NumOperands; ++i)
4813 OperandLocs.
push_back(Operands[i]->getStartLoc());
4814 if (validateInstruction(Inst, IDLoc, OperandLocs))
4821 case Match_MissingFeature: {
4822 assert(ErrorInfo &&
"Unknown missing feature!");
4825 std::string Msg =
"instruction requires:";
4827 for (
unsigned i = 0; i < (
sizeof(ErrorInfo)*8-1); ++i) {
4828 if (ErrorInfo & Mask) {
4834 return Error(IDLoc, Msg);
4836 case Match_MnemonicFail:
4837 return showMatchError(IDLoc, MatchResult, ErrorInfo, Operands);
4838 case Match_InvalidOperand: {
4839 SMLoc ErrorLoc = IDLoc;
4841 if (ErrorInfo != ~0ULL) {
4842 if (ErrorInfo >= Operands.
size())
4843 return Error(IDLoc,
"too few operands for instruction",
4844 SMRange(IDLoc, getTok().getLoc()));
4846 ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
4847 if (ErrorLoc ==
SMLoc())
4852 if (((AArch64Operand &)*Operands[ErrorInfo]).isToken() &&
4853 ((AArch64Operand &)*Operands[ErrorInfo]).isTokenSuffix())
4854 MatchResult = Match_InvalidSuffix;
4856 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
4858 case Match_InvalidTiedOperand:
4859 case Match_InvalidMemoryIndexed1:
4860 case Match_InvalidMemoryIndexed2:
4861 case Match_InvalidMemoryIndexed4:
4862 case Match_InvalidMemoryIndexed8:
4863 case Match_InvalidMemoryIndexed16:
4864 case Match_InvalidCondCode:
4865 case Match_AddSubRegExtendSmall:
4866 case Match_AddSubRegExtendLarge:
4867 case Match_AddSubSecondSource:
4868 case Match_LogicalSecondSource:
4869 case Match_AddSubRegShift32:
4870 case Match_AddSubRegShift64:
4871 case Match_InvalidMovImm32Shift:
4872 case Match_InvalidMovImm64Shift:
4873 case Match_InvalidFPImm:
4874 case Match_InvalidMemoryWExtend8:
4875 case Match_InvalidMemoryWExtend16:
4876 case Match_InvalidMemoryWExtend32:
4877 case Match_InvalidMemoryWExtend64:
4878 case Match_InvalidMemoryWExtend128:
4879 case Match_InvalidMemoryXExtend8:
4880 case Match_InvalidMemoryXExtend16:
4881 case Match_InvalidMemoryXExtend32:
4882 case Match_InvalidMemoryXExtend64:
4883 case Match_InvalidMemoryXExtend128:
4884 case Match_InvalidMemoryIndexed1SImm4:
4885 case Match_InvalidMemoryIndexed2SImm4:
4886 case Match_InvalidMemoryIndexed3SImm4:
4887 case Match_InvalidMemoryIndexed4SImm4:
4888 case Match_InvalidMemoryIndexed1SImm6:
4889 case Match_InvalidMemoryIndexed16SImm4:
4890 case Match_InvalidMemoryIndexed4SImm7:
4891 case Match_InvalidMemoryIndexed8SImm7:
4892 case Match_InvalidMemoryIndexed16SImm7:
4893 case Match_InvalidMemoryIndexed8UImm5:
4894 case Match_InvalidMemoryIndexed4UImm5:
4895 case Match_InvalidMemoryIndexed2UImm5:
4896 case Match_InvalidMemoryIndexed1UImm6:
4897 case Match_InvalidMemoryIndexed2UImm6:
4898 case Match_InvalidMemoryIndexed4UImm6:
4899 case Match_InvalidMemoryIndexed8UImm6:
4900 case Match_InvalidMemoryIndexed16UImm6:
4901 case Match_InvalidMemoryIndexedSImm6:
4902 case Match_InvalidMemoryIndexedSImm5:
4903 case Match_InvalidMemoryIndexedSImm8:
4904 case Match_InvalidMemoryIndexedSImm9:
4905 case Match_InvalidMemoryIndexed16SImm9:
4906 case Match_InvalidMemoryIndexed8SImm10:
4907 case Match_InvalidImm0_1:
4908 case Match_InvalidImm0_7:
4909 case Match_InvalidImm0_15:
4910 case Match_InvalidImm0_31:
4911 case Match_InvalidImm0_63:
4912 case Match_InvalidImm0_127:
4913 case Match_InvalidImm0_255:
4914 case Match_InvalidImm0_65535:
4915 case Match_InvalidImm1_8:
4916 case Match_InvalidImm1_16:
4917 case Match_InvalidImm1_32:
4918 case Match_InvalidImm1_64:
4919 case Match_InvalidSVEAddSubImm8:
4920 case Match_InvalidSVEAddSubImm16:
4921 case Match_InvalidSVEAddSubImm32:
4922 case Match_InvalidSVEAddSubImm64:
4923 case Match_InvalidSVECpyImm8:
4924 case Match_InvalidSVECpyImm16:
4925 case Match_InvalidSVECpyImm32:
4926 case Match_InvalidSVECpyImm64:
4927 case Match_InvalidIndexRange1_1:
4928 case Match_InvalidIndexRange0_15:
4929 case Match_InvalidIndexRange0_7:
4930 case Match_InvalidIndexRange0_3:
4931 case Match_InvalidIndexRange0_1:
4932 case Match_InvalidSVEIndexRange0_63:
4933 case Match_InvalidSVEIndexRange0_31:
4934 case Match_InvalidSVEIndexRange0_15:
4935 case Match_InvalidSVEIndexRange0_7:
4936 case Match_InvalidSVEIndexRange0_3:
4937 case Match_InvalidLabel:
4938 case Match_InvalidComplexRotationEven:
4939 case Match_InvalidComplexRotationOdd:
4940 case Match_InvalidGPR64shifted8:
4941 case Match_InvalidGPR64shifted16:
4942 case Match_InvalidGPR64shifted32:
4943 case Match_InvalidGPR64shifted64:
4944 case Match_InvalidGPR64NoXZRshifted8:
4945 case Match_InvalidGPR64NoXZRshifted16:
4946 case Match_InvalidGPR64NoXZRshifted32:
4947 case Match_InvalidGPR64NoXZRshifted64:
4948 case Match_InvalidZPR32UXTW8:
4949 case Match_InvalidZPR32UXTW16:
4950 case Match_InvalidZPR32UXTW32:
4951 case Match_InvalidZPR32UXTW64:
4952 case Match_InvalidZPR32SXTW8:
4953 case Match_InvalidZPR32SXTW16:
4954 case Match_InvalidZPR32SXTW32:
4955 case Match_InvalidZPR32SXTW64:
4956 case Match_InvalidZPR64UXTW8:
4957 case Match_InvalidZPR64SXTW8:
4958 case Match_InvalidZPR64UXTW16:
4959 case Match_InvalidZPR64SXTW16:
4960 case Match_InvalidZPR64UXTW32:
4961 case Match_InvalidZPR64SXTW32:
4962 case Match_InvalidZPR64UXTW64:
4963 case Match_InvalidZPR64SXTW64:
4964 case Match_InvalidZPR32LSL8:
4965 case Match_InvalidZPR32LSL16:
4966 case Match_InvalidZPR32LSL32:
4967 case Match_InvalidZPR32LSL64:
4968 case Match_InvalidZPR64LSL8:
4969 case Match_InvalidZPR64LSL16:
4970 case Match_InvalidZPR64LSL32:
4971 case Match_InvalidZPR64LSL64:
4972 case Match_InvalidZPR0:
4973 case Match_InvalidZPR8:
4974 case Match_InvalidZPR16:
4975 case Match_InvalidZPR32:
4976 case Match_InvalidZPR64:
4977 case Match_InvalidZPR128:
4978 case Match_InvalidZPR_3b8:
4979 case Match_InvalidZPR_3b16:
4980 case Match_InvalidZPR_3b32:
4981 case Match_InvalidZPR_4b16:
4982 case Match_InvalidZPR_4b32:
4983 case Match_InvalidZPR_4b64:
4984 case Match_InvalidSVEPredicateAnyReg:
4985 case Match_InvalidSVEPattern:
4986 case Match_InvalidSVEPredicateBReg:
4987 case Match_InvalidSVEPredicateHReg:
4988 case Match_InvalidSVEPredicateSReg:
4989 case Match_InvalidSVEPredicateDReg:
4990 case Match_InvalidSVEPredicate3bAnyReg:
4991 case Match_InvalidSVEPredicate3bBReg:
4992 case Match_InvalidSVEPredicate3bHReg:
4993 case Match_InvalidSVEPredicate3bSReg:
4994 case Match_InvalidSVEPredicate3bDReg:
4995 case Match_InvalidSVEExactFPImmOperandHalfOne:
4996 case Match_InvalidSVEExactFPImmOperandHalfTwo:
4997 case Match_InvalidSVEExactFPImmOperandZeroOne:
5000 if (ErrorInfo >= Operands.
size())
5001 return Error(IDLoc,
"too few operands for instruction",
SMRange(IDLoc, (*Operands.
back()).getEndLoc()));
5004 SMLoc ErrorLoc = ((AArch64Operand &)*Operands[ErrorInfo]).getStartLoc();
5005 if (ErrorLoc ==
SMLoc())
5007 return showMatchError(ErrorLoc, MatchResult, ErrorInfo, Operands);
5015 bool AArch64AsmParser::ParseDirective(
AsmToken DirectiveID) {
5017 getContext().getObjectFileInfo()->getObjectFileType();
5022 if (IDVal ==
".arch")
5023 parseDirectiveArch(Loc);
5024 else if (IDVal ==
".cpu")
5025 parseDirectiveCPU(Loc);
5026 else if (IDVal ==
".tlsdesccall")
5027 parseDirectiveTLSDescCall(Loc);
5028 else if (IDVal ==
".ltorg" || IDVal ==
".pool")
5029 parseDirectiveLtorg(Loc);
5030 else if (IDVal ==
".unreq")
5031 parseDirectiveUnreq(Loc);
5032 else if (IDVal ==
".inst")
5033 parseDirectiveInst(Loc);
5034 else if (IDVal ==
".cfi_negate_ra_state")
5035 parseDirectiveCFINegateRAState();
5036 else if (IDVal ==
".cfi_b_key_frame")
5037 parseDirectiveCFIBKeyFrame();
5038 else if (IDVal ==
".arch_extension")
5039 parseDirectiveArchExtension(Loc);
5042 parseDirectiveLOH(IDVal, Loc);
5052 const bool NoCrypto =
5054 "nocrypto") !=
std::end(RequestedExtensions));
5057 "crypto") !=
std::end(RequestedExtensions));
5059 if (!NoCrypto && Crypto) {
5064 case AArch64::ArchKind::ARMV8_1A:
5065 case AArch64::ArchKind::ARMV8_2A:
5066 case AArch64::ArchKind::ARMV8_3A:
5070 case AArch64::ArchKind::ARMV8_4A:
5071 case AArch64::ArchKind::ARMV8_5A:
5078 }
else if (NoCrypto) {
5083 case AArch64::ArchKind::ARMV8_1A:
5084 case AArch64::ArchKind::ARMV8_2A:
5085 case AArch64::ArchKind::ARMV8_3A:
5086 RequestedExtensions.
push_back(
"nosha2");
5089 case AArch64::ArchKind::ARMV8_4A:
5090 case AArch64::ArchKind::ARMV8_5A:
5092 RequestedExtensions.
push_back(
"nosha3");
5093 RequestedExtensions.
push_back(
"nosha2");
5102 bool AArch64AsmParser::parseDirectiveArch(
SMLoc L) {
5103 SMLoc ArchLoc = getLoc();
5106 std::tie(Arch, ExtensionString) =
5107 getParser().parseStringToEndOfStatement().
trim().
split(
'+');
5110 if (ID == AArch64::ArchKind::INVALID)
5111 return Error(ArchLoc,
"unknown arch name");
5117 std::vector<StringRef> AArch64Features;
5123 std::vector<std::string> ArchFeatures(AArch64Features.begin(), AArch64Features.end());
5127 if (!ExtensionString.
empty())
5128 ExtensionString.
split(RequestedExtensions,
'+');
5133 for (
auto Name : RequestedExtensions) {
5134 bool EnableFeature =
true;
5137 EnableFeature =
false;
5152 ComputeAvailableFeatures(STI.
ToggleFeature(ToggleFeatures));
5153 setAvailableFeatures(Features);
5162 bool AArch64AsmParser::parseDirectiveArchExtension(
SMLoc L) {
5166 return Error(getLexer().getLoc(),
"expected architecture extension name");
5174 "unexpected token in '.arch_extension' directive"))
5177 bool EnableFeature =
true;
5179 EnableFeature =
false;
5190 return Error(ExtLoc,
"unsupported architectural extension: " + Name);
5196 ComputeAvailableFeatures(STI.
ToggleFeature(ToggleFeatures));
5197 setAvailableFeatures(Features);
5201 return Error(ExtLoc,
"unknown architectural extension: " + Name);
5210 bool AArch64AsmParser::parseDirectiveCPU(
SMLoc L) {
5211 SMLoc CurLoc = getLoc();
5214 std::tie(CPU, ExtensionString) =
5215 getParser().parseStringToEndOfStatement().
trim().
split(
'+');
5221 if (!ExtensionString.
empty())
5222 ExtensionString.
split(RequestedExtensions,
'+');
5226 if (!getSTI().isCPUStringValid(CPU)) {
5227 Error(CurLoc,
"unknown CPU name");
5238 for (
auto Name : RequestedExtensions) {
5242 bool EnableFeature =
true;
5245 EnableFeature =
false;
5249 bool FoundExtension =
false;
5261 ComputeAvailableFeatures(STI.
ToggleFeature(ToggleFeatures));
5262 setAvailableFeatures(Features);
5263 FoundExtension =
true;
5268 if (!FoundExtension)
5269 Error(CurLoc,
"unsupported architectural extension");
5278 bool AArch64AsmParser::parseDirectiveInst(
SMLoc Loc) {
5280 return Error(Loc,
"expected expression following '.inst' directive");
5282 auto parseOp = [&]() ->
bool {
5285 if (check(getParser().parseExpression(Expr), L,
"expected expression"))
5288 if (check(!Value, L,
"expected constant expression"))
5290 getTargetStreamer().emitInst(Value->
getValue());
5294 if (parseMany(parseOp))
5295 return addErrorSuffix(
" in '.inst' directive");
5301 bool AArch64AsmParser::parseDirectiveTLSDescCall(
SMLoc L) {
5303 if (check(getParser().parseIdentifier(Name), L,
5304 "expected symbol after directive") ||
5308 MCSymbol *Sym = getContext().getOrCreateSymbol(Name);
5316 getParser().getStreamer().EmitInstruction(Inst, getSTI());
5322 bool AArch64AsmParser::parseDirectiveLOH(
StringRef IDVal,
SMLoc Loc) {
5326 return TokError(
"expected an identifier or a number in directive");
5329 int64_t
Id = getParser().getTok().getIntVal();
5331 return TokError(
"invalid numeric identifier in directive");
5334 StringRef Name = getTok().getIdentifier();
5340 return TokError(
"invalid identifier in directive");
5348 assert(NbArgs != -1 &&
"Invalid number of arguments");
5351 for (
int Idx = 0; Idx < NbArgs; ++Idx) {
5353 if (getParser().parseIdentifier(Name))
5354 return TokError(
"expected identifier in directive");
5355 Args.
push_back(getContext().getOrCreateSymbol(Name));
5357 if (Idx + 1 == NbArgs)
5360 "unexpected token in '" +
Twine(IDVal) +
"' directive"))
5364 "unexpected token in '" +
Twine(IDVal) +
"' directive"))
5367 getStreamer().EmitLOHDirective((
MCLOHType)Kind, Args);
5373 bool AArch64AsmParser::parseDirectiveLtorg(
SMLoc L) {
5376 getTargetStreamer().emitCurrentConstantPool();
5382 bool AArch64AsmParser::parseDirectiveReq(
StringRef Name,
SMLoc L) {
5385 SMLoc SRegLoc = getLoc();
5392 RegisterKind = RegKind::NeonVector;
5393 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::NeonVector);
5399 return Error(SRegLoc,
"vector register without type specifier expected");
5404 RegisterKind = RegKind::SVEDataVector;
5406 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5412 return Error(SRegLoc,
5413 "sve vector register without type specifier expected");
5418 RegisterKind = RegKind::SVEPredicateVector;
5419 ParseRes = tryParseVectorRegister(RegNum, Kind, RegKind::SVEPredicateVector);
5425 return Error(SRegLoc,
5426 "sve predicate register without type specifier expected");
5430 return Error(SRegLoc,
"register name or alias expected");
5434 "unexpected input in .req directive"))
5437 auto pair = std::make_pair(RegisterKind, (
unsigned) RegNum);
5438 if (RegisterReqs.insert(std::make_pair(Name, pair)).first->second != pair)
5439 Warning(L,
"ignoring redefinition of register alias '" + Name +
"'");
5446 bool AArch64AsmParser::parseDirectiveUnreq(
SMLoc L) {
5449 return TokError(
"unexpected input in .unreq directive.");
5453 return addErrorSuffix(
"in '.unreq' directive");
5457 bool AArch64AsmParser::parseDirectiveCFINegateRAState() {
5460 getStreamer().EmitCFINegateRAState();
5466 bool AArch64AsmParser::parseDirectiveCFIBKeyFrame() {
5468 "unexpected token in '.cfi_b_key_frame'"))
5470 getStreamer().EmitCFIBKeyFrame();
5475 AArch64AsmParser::classifySymbolRef(
const MCExpr *Expr,
5483 if (
const AArch64MCExpr *AE = dyn_cast<AArch64MCExpr>(Expr)) {
5484 ELFRefKind = AE->getKind();
5485 Expr = AE->getSubExpr();
5491 DarwinRefKind = SE->
getKind();
5498 if (!Relocatable || Res.
getSymB())
5523 #define GET_REGISTER_MATCHER 5524 #define GET_SUBTARGET_FEATURE_NAME 5525 #define GET_MATCHER_IMPLEMENTATION 5526 #define GET_MNEMONIC_SPELL_CHECKER 5527 #include "AArch64GenAsmMatcher.inc" 5533 AArch64Operand &
Op =
static_cast<AArch64Operand &
>(AsmOp);
5537 int64_t ExpectedVal;
5540 return Match_InvalidOperand;
5582 return Match_InvalidOperand;
5585 return Match_InvalidOperand;
5587 return Match_Success;
5588 return Match_InvalidOperand;
5592 AArch64AsmParser::tryParseGPRSeqPair(
OperandVector &Operands) {
5597 Error(S,
"expected register");
5607 AArch64MCRegisterClasses[AArch64::GPR32RegClassID];
5609 AArch64MCRegisterClasses[AArch64::GPR64RegClassID];
5611 bool isXReg = XRegClass.
contains(FirstReg),
5612 isWReg = WRegClass.
contains(FirstReg);
5613 if (!isXReg && !isWReg) {
5614 Error(S,
"expected first even register of a " 5615 "consecutive same-size even/odd register pair");
5622 if (FirstEncoding & 0x1) {
5623 Error(S,
"expected first even register of a " 5624 "consecutive same-size even/odd register pair");
5629 Error(getLoc(),
"expected comma");
5637 Res = tryParseScalarRegister(SecondReg);
5642 (isXReg && !XRegClass.
contains(SecondReg)) ||
5643 (isWReg && !WRegClass.
contains(SecondReg))) {
5644 Error(E,
"expected second odd register of a " 5645 "consecutive same-size even/odd register pair");
5652 &AArch64MCRegisterClasses[AArch64::XSeqPairsClassRegClassID]);
5655 &AArch64MCRegisterClasses[AArch64::WSeqPairsClassRegClassID]);
5659 getLoc(), getContext()));
5664 template <
bool ParseShiftExtend,
bool ParseSuffix>
5666 AArch64AsmParser::tryParseSVEDataVector(
OperandVector &Operands) {
5667 const SMLoc S = getLoc();
5673 tryParseVectorRegister(RegNum, Kind, RegKind::SVEDataVector);
5678 if (ParseSuffix && Kind.
empty())
5685 unsigned ElementWidth = KindRes->second;
5688 if (!ParseShiftExtend || getParser().getTok().isNot(
AsmToken::Comma)) {
5689 Operands.
push_back(AArch64Operand::CreateVectorReg(
5690 RegNum, RegKind::SVEDataVector, ElementWidth, S, S, getContext()));
5703 Res = tryParseOptionalShiftExtend(ExtOpnd);
5707 auto Ext =
static_cast<AArch64Operand *
>(ExtOpnd.
back().get());
5708 Operands.
push_back(AArch64Operand::CreateVectorReg(
5709 RegNum, RegKind::SVEDataVector, ElementWidth, S,
Ext->getEndLoc(),
5710 getContext(),
Ext->getShiftExtendType(),
Ext->getShiftExtendAmount(),
5711 Ext->hasShiftExtendAmount()));
5717 AArch64AsmParser::tryParseSVEPattern(
OperandVector &Operands) {
5720 SMLoc SS = getLoc();
5741 Pattern = MCE->getValue();
5744 auto Pat = AArch64SVEPredPattern::lookupSVEPREDPATByName(TokE.
getString());
5749 Pattern = Pat->Encoding;
5750 assert(Pattern >= 0 && Pattern < 32);
5755 SS, getLoc(), getContext()));
static bool isReg(const MCInst &MI, unsigned OpNo)
Represents a range in source code.
const_iterator end(StringRef path)
Get end iterator over path.
Target & getTheAArch64beTarget()
static MSP430CC::CondCodes getCondCode(unsigned Cond)
static GCMetadataPrinterRegistry::Add< ErlangGCPrinter > X("erlang", "erlang-compatible garbage collector")
static float getFPImmFloat(unsigned Imm)
LLVM_NODISCARD bool startswith_lower(StringRef Prefix) const
Check if this string starts with the given Prefix, ignoring case.
StringRef getString() const
Get the string for the current token, this includes all characters (for example, the quotes on string...
const_iterator begin(StringRef path, Style style=Style::native)
Get begin iterator over path.
static const MCSymbolRefExpr * create(const MCSymbol *Symbol, MCContext &Ctx)
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
ArchKind getCPUArchKind(StringRef CPU)
This represents an "assembler immediate".
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
VariantKind getKind() const
LLVM_NODISCARD bool equals_lower(StringRef RHS) const
equals_lower - Check for string equality, ignoring case.
static const AArch64MCExpr * create(const MCExpr *Expr, VariantKind Kind, MCContext &Ctx)
virtual bool regsEqual(const MCParsedAsmOperand &Op1, const MCParsedAsmOperand &Op2) const
Returns whether two registers are equal and is used by the tied-operands checks in the AsmMatcher...
Generic assembler parser interface, for use by target specific assembly parsers.
virtual void Initialize(MCAsmParser &Parser)
Initialize the extension for parsing using the given Parser.
Target & getTheAArch64leTarget()
static MCOperand createExpr(const MCExpr *Val)
MCTargetAsmParser - Generic interface to target specific assembly parsers.
void push_back(const T &Elt)
static CondCode getInvertedCondCode(CondCode Code)
Describe properties that are true of each instruction in the target description file.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t size() const
size - Get the string size.
Target specific streamer interface.
bool isNot(TokenKind K) const
const FeatureBitset Features
virtual const AsmToken & Lex()=0
Get the next AsmToken in the stream, possibly handling file inclusion first.
static unsigned getXRegFromWReg(unsigned Reg)
static bool isMOVZMovAlias(uint64_t Value, int Shift, int RegWidth)
const AsmToken & getTok() const
Get the current AsmToken from the stream.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE const char * data() const
data - Get a pointer to the start of the string (which may not be null terminated).
virtual void EmitInstruction(const MCInst &Inst, const MCSubtargetInfo &STI, bool PrintSchedInfo=false)
Emit the given Instruction into the current section.
return AArch64::GPR64RegClass contains(Reg)
static SMLoc incrementLoc(SMLoc L, int Offset)
static GCMetadataPrinterRegistry::Add< OcamlGCMetadataPrinter > Y("ocaml", "ocaml 3.10-compatible collector")
StringRef getIdentifier() const
Get the identifier string for the current token, which should be an identifier or a string...
int64_t getConstant() const
const MCSymbolRefExpr * getSymB() const
amdgpu Simplify well known AMD library false Value Value const Twine & Name
opStatus convertFromString(StringRef, roundingMode)
static MCOperand createReg(unsigned Reg)
static ManagedStatic< DebugCounter > DC
Twine - A lightweight data structure for efficiently representing the concatenation of temporary valu...
bool contains(unsigned Reg) const
contains - Return true if the specified register is included in this register class.
const FeatureBitset & getFeatureBits() const
static bool isSVEAddSubImm(int64_t Imm)
Returns true if Imm is valid for ADD/SUB.
static Optional< std::pair< int, int > > parseVectorKind(StringRef Suffix, RegKind VectorKind)
Returns an optional pair of (#elements, element-width) if Suffix is a valid vector kind...
This class consists of common code factored out of the SmallVector class to reduce code duplication b...
Base class for the full range of assembler expressions which are needed for parsing.
Target independent representation for an assembler token.
Represent a reference to a symbol from inside an expression.
bool getExtensionFeatures(unsigned Extensions, std::vector< StringRef > &Features)
static bool isLogicalImmediate(uint64_t imm, unsigned regSize)
isLogicalImmediate - Return true if the immediate is valid for a logical immediate instruction of the...
std::string join(IteratorT Begin, IteratorT End, StringRef Separator)
Joins the strings in the range [Begin, End), adding Separator between the elements.
Target & getTheARM64Target()
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE R Default(T Value)
static bool isSVECpyImm(int64_t Imm)
Returns true if Imm is valid for CPY/DUP.
static bool isMem(const MachineInstr &MI, unsigned Op)
zlib-gnu style compression
MCParsedAsmOperand - This abstract class represents a source-level assembly instruction operand...
This file implements a class to represent arbitrary precision integral constant values and operations...
virtual bool parseExpression(const MCExpr *&Res, SMLoc &EndLoc)=0
Parse an arbitrary expression.
unsigned getReg() const
Returns the register number.
ArchKind parseArch(StringRef Arch)
static bool isMOVNMovAlias(uint64_t Value, int Shift, int RegWidth)
Context object for machine code objects.
std::pair< StringRef, StringRef > getToken(StringRef Source, StringRef Delimiters=" \\\)
getToken - This function extracts one token from source, ignoring any leading characters that appear ...
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool startswith(StringRef Prefix) const
Check if this string starts with the given Prefix.
int64_t getSExtValue() const
Get sign extended value.
static const struct Extension ExtensionMap[]
RegisterMCAsmParser - Helper template for registering a target specific assembly parser, for use in the target machine initialization function.
bool evaluateAsRelocatable(MCValue &Res, const MCAsmLayout *Layout, const MCFixup *Fixup) const
Try to evaluate the expression to a relocatable value, i.e.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE bool empty() const
empty - Check if the string is empty.
static bool isMatchingOrAlias(unsigned ZReg, unsigned Reg)
unsigned getRegister(unsigned i) const
getRegister - Return the specified register in the class.
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
const MCExpr * getExpr() const
static const fltSemantics & IEEEdouble() LLVM_READNONE
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef substr(size_t Start, size_t N=npos) const
Return a reference to the substring from [Start, Start + N).
MCRegisterClass - Base class of TargetRegisterClass.
FeatureBitset getRequiredFeatures() const
Analysis containing CSE Info
Instances of this class represent a single low-level machine instruction.
static unsigned getShifterImm(AArch64_AM::ShiftExtendType ST, unsigned Imm)
getShifterImm - Encode the shift type and amount: imm: 6-bit shift amount shifter: 000 ==> lsl 001 ==...
static unsigned getWRegFromXReg(unsigned Reg)
MCRegisterInfo base class - We assume that the target defines a static array of MCRegisterDesc object...
virtual void addAliasForDirective(StringRef Directive, StringRef Alias)=0
FeatureBitset ToggleFeature(uint64_t FB)
Toggle a feature and return the re-computed feature bits.
static unsigned getShiftValue(unsigned Imm)
getShiftValue - Extract the shift value.
const char * getPointer() const
A switch()-like statement whose cases are string literals.
Streaming machine code generation interface.
static void ExpandCryptoAEK(AArch64::ArchKind ArchKind, SmallVector< StringRef, 4 > &RequestedExtensions)
MCTargetStreamer * getTargetStreamer()
Container class for subtarget features.
std::size_t countTrailingZeros(T Val, ZeroBehavior ZB=ZB_Width)
Count number of 0's from the least significant bit to the most stopping at the first 1...
unsigned getMatchingSuperReg(unsigned Reg, unsigned SubIdx, const MCRegisterClass *RC) const
Return a super-register of the specified register Reg so its sub-register of index SubIdx is Reg...
LLVM_NODISCARD StringRef trim(char Char) const
Return string with consecutive Char characters starting from the left and right removed.
The instances of the Type class are immutable: once they are created, they are never changed...
static const char * getShiftExtendName(AArch64_AM::ShiftExtendType ST)
getShiftName - Get the string encoding for the shift type.
size_t size() const
size - Get the array size.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator begin()
static int MCLOHNameToId(StringRef Name)
Interface to description of machine instruction set.
virtual MCAsmLexer & getLexer()=0
This file declares a class to represent arbitrary precision floating point values and provide a varie...
MCLOHType
Linker Optimization Hint Type.
const MCSymbolRefExpr * getSymA() const
int64_t getIntVal() const
static void print(raw_ostream &Out, object::Archive::Kind Kind, T Val)
unsigned getNumOperands() const
static bool isValidVectorKind(StringRef Suffix, RegKind VectorKind)
const AsmToken peekTok(bool ShouldSkipSpace=true)
Look ahead at the next token to be lexed.
auto find_if(R &&Range, UnaryPredicate P) -> decltype(adl_begin(Range))
Provide wrappers to std::find_if which take ranges instead of having to pass begin/end explicitly...
bool isIntN(unsigned N) const
Check if this APInt has an N-bits unsigned integer value.
virtual unsigned getReg() const =0
auto find(R &&Range, const T &Val) -> decltype(adl_begin(Range))
Provide wrappers to std::find which take ranges instead of having to pass begin/end explicitly...
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
std::enable_if< std::numeric_limits< T >::is_signed, bool >::type getAsInteger(unsigned Radix, T &Result) const
Parse the current string as an integer of the specified radix.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef drop_front(size_t N=1) const
Return a StringRef equal to 'this' but with the first N elements dropped.
int getOperandConstraint(unsigned OpNum, MCOI::OperandConstraint Constraint) const
Returns the value of the specific constraint if it is set.
static uint64_t encodeLogicalImmediate(uint64_t imm, unsigned regSize)
encodeLogicalImmediate - Return the encoded immediate value for a logical immediate instruction of th...
static int getFP64Imm(const APInt &Imm)
getFP64Imm - Return an 8-bit floating-point version of the 64-bit floating-point value.
MCStreamer & getStreamer()
void setOpcode(unsigned Op)
bool isSubRegisterEq(unsigned RegA, unsigned RegB) const
Returns true if RegB is a sub-register of RegA or if RegB == RegA.
void UnLex(AsmToken const &Token)
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
const MCOperand & getOperand(unsigned i) const
LLVM_NODISCARD std::pair< StringRef, StringRef > split(char Separator) const
Split into two substrings around the first occurrence of a separator character.
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE StringRef slice(size_t Start, size_t End) const
Return a reference to the substring from [Start, End).
const FeatureBitset Features
std::string utostr(uint64_t X, bool isNeg=false)
static StringRef MCLOHDirectiveName()
static void setRequiredFeatureString(FeatureBitset FBS, std::string &Str)
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
StringMap - This is an unconventional map that is specialized for handling keys that are "strings"...
bool is(TokenKind K) const
Class for arbitrary precision integers.
static unsigned getReg(const void *D, unsigned RC, unsigned RegNo)
static uint8_t encodeAdvSIMDModImmType10(uint64_t Imm)
const SysReg * lookupSysRegByName(StringRef)
Base class for user error types.
bool isPredicated(MCInstrInfo const &MCII, MCInst const &MCI)
LLVM_ATTRIBUTE_ALWAYS_INLINE StringSwitch & Case(StringLiteral S, T Value)
uint32_t parseGenericRegister(StringRef Name)
static unsigned getArithExtendImm(AArch64_AM::ShiftExtendType ET, unsigned Imm)
getArithExtendImm - Encode the extend type and shift amount for an arithmetic instruction: imm: 3-bit...
loop data Loop Data Prefetch
static SMLoc getFromPointer(const char *Ptr)
bool haveFeatures(FeatureBitset ActiveFeatures) const
static bool isAdvSIMDModImmType10(uint64_t Imm)
LLVM_ATTRIBUTE_ALWAYS_INLINE iterator end()
uint16_t getEncodingValue(unsigned RegNo) const
Returns the encoding for RegNo.
StringRef getABIName() const
getABIName - If this returns a non-empty string this represents the textual name of the ABI that we w...
LLVM_NODISCARD bool empty() const
static bool isValidMCLOHType(unsigned Kind)
Generic base class for all target subtargets.
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
static std::string AArch64MnemonicSpellCheck(StringRef S, uint64_t FBS, unsigned VariantID=0)
bool isSignedIntN(unsigned N) const
Check if this APInt has an N-bits signed integer value.
LLVM_NODISCARD std::string lower() const
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
static int MCLOHIdToNbArgs(MCLOHType Kind)
const MCRegisterInfo * getRegisterInfo() const
LLVM Value Representation.
std::underlying_type< E >::type Mask()
Get a bitmask with 1s in all places up to the high-order bit of E's largest value.
This class implements an extremely fast bulk output stream that can only output to a stream...
void addOperand(const MCOperand &Op)
StringRef - Represent a constant reference to a string, i.e.
APInt bitcastToAPInt() const
Represents a location in source code.
static const char * getSubtargetFeatureName(uint64_t Val)
unsigned getOpcode() const
LLVM_NODISCARD LLVM_ATTRIBUTE_ALWAYS_INLINE size_t find(char C, size_t From=0) const
Search for the first character C in the string.
static MCOperand createImm(int64_t Val)
bool getArchFeatures(ArchKind AK, std::vector< StringRef > &Features)
static const MCConstantExpr * create(int64_t Value, MCContext &Ctx)
constexpr char Args[]
Key for Kernel::Metadata::mArgs.
void LLVMInitializeAArch64AsmParser()
Force static initialization.
const MCRegisterClass & getRegClass(unsigned i) const
Returns the register class associated with the enumeration value.
unsigned getDefaultExtensions(StringRef CPU, ArchKind AK)
void setDefaultFeatures(StringRef CPU, StringRef FS)
Set the features to the default for the given CPU with an appended feature string.