64 #define PASS_KEY "x86-slh" 65 #define DEBUG_TYPE PASS_KEY 67 STATISTIC(NumCondBranchesTraced,
"Number of conditional branches traced");
68 STATISTIC(NumBranchesUntraced,
"Number of branches unable to trace");
70 "Number of address mode used registers hardaned");
72 "Number of post-load register values hardened");
74 "Number of calls or jumps requiring extra hardening");
75 STATISTIC(NumInstsInserted,
"Number of instructions inserted");
76 STATISTIC(NumLFENCEsInserted,
"Number of lfence instructions inserted");
79 "x86-speculative-load-hardening",
86 "Use LFENCE along each conditional edge to harden against speculative " 87 "loads rather than conditional movs and poisoned pointers."),
92 cl::desc(
"Harden the value loaded *after* it is loaded by " 93 "flushing the loaded bits to 1. This is hard to do " 94 "in general but can be done easily for GPRs."),
99 cl::desc(
"Use a full speculation fence to harden both call and ret edges " 100 "rather than a lighter weight mitigation."),
105 cl::desc(
"Harden interprocedurally by passing our state in and out of " 106 "functions in the high bits of the stack pointer."),
111 cl::desc(
"Sanitize loads from memory. When disable, no " 112 "significant security is provided."),
117 cl::desc(
"Harden indirect calls and jumps against using speculatively " 118 "stored attacker controlled addresses. This is designed to " 119 "mitigate Spectre v1.2 style attacks."),
132 return "X86 speculative load hardening";
143 struct BlockCondInfo {
194 unsigned PredStateReg);
206 bool canHardenRegister(
unsigned Reg);
213 void hardenIndirectCallOrJumpInstr(
222 void X86SpeculativeLoadHardeningPass::getAnalysisUsage(
231 assert(!Succ.
isEHPad() &&
"Shouldn't get edges to EH pads!");
245 "Didn't start with the right target!");
255 "Without an unconditional branch, the old layout successor should " 256 "be an actual successor!");
260 UncondBr = &*BrBuilder;
271 "Cannot have a branchless successor and an unconditional branch!");
273 "A non-branch successor must have been a layout successor before " 274 "and now is a layout successor of the new block.");
280 if (SuccCount == 1) {
293 for (
int OpIdx = 1, NumOps =
MI.getNumOperands(); OpIdx < NumOps;
297 assert(OpMBB.
isMBB() &&
"Block operand to a PHI is not a block!");
298 if (OpMBB.
getMBB() != &MBB)
302 if (SuccCount == 1) {
308 MI.addOperand(MF, OpV);
315 for (
auto &LI : Succ.liveins())
319 << Succ.getName() <<
"'.\n");
335 for (
auto &
MI : MBB) {
342 for (
int OpIdx = 1, NumOps =
MI.getNumOperands(); OpIdx < NumOps;
358 while (!DupIndices.
empty()) {
362 MI.RemoveOperand(OpIdx + 1);
363 MI.RemoveOperand(OpIdx);
381 if (
MI.getOpcode() == X86::LFENCE)
401 bool X86SpeculativeLoadHardeningPass::runOnMachineFunction(
414 TII = Subtarget->getInstrInfo();
415 TRI = Subtarget->getRegisterInfo();
418 PS.emplace(MF, &X86::GR64_NOSPRegClass);
426 hardenEdgesWithLFENCE(MF);
444 if (!HasVulnerableLoad && Infos.
empty())
449 const int PoisonVal = -1;
450 PS->PoisonReg = MRI->createVirtualRegister(PS->RC);
451 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::MOV64ri32), PS->PoisonReg)
464 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::LFENCE));
466 ++NumLFENCEsInserted;
479 PS->InitialReg = extractPredStateFromSP(Entry, EntryInsertPt, Loc);
483 PS->InitialReg = MRI->createVirtualRegister(PS->RC);
484 unsigned PredStateSubReg = MRI->createVirtualRegister(&X86::GR32RegClass);
485 auto ZeroI =
BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::MOV32r0),
489 ZeroI->findRegisterDefOperand(X86::EFLAGS);
491 "Must have an implicit def of EFLAGS!");
493 BuildMI(Entry, EntryInsertPt, Loc,
TII->get(X86::SUBREG_TO_REG),
507 PS->SSA.Initialize(PS->InitialReg);
508 PS->SSA.AddAvailableValue(&Entry, PS->InitialReg);
511 auto CMovs = tracePredStateThroughCFG(MF, Infos);
522 assert(!MBB.isEHScopeEntry() &&
"Only Itanium ABI EH supported!");
523 assert(!MBB.isEHFuncletEntry() &&
"Only Itanium ABI EH supported!");
524 assert(!MBB.isCleanupFuncletEntry() &&
"Only Itanium ABI EH supported!");
527 PS->SSA.AddAvailableValue(
529 extractPredStateFromSP(MBB, MBB.SkipPHIsAndLabels(MBB.begin()), Loc));
536 unfoldCallAndJumpLoads(MF);
539 auto IndirectBrCMovs = tracePredStateThroughIndirectBranches(MF);
540 CMovs.append(IndirectBrCMovs.begin(), IndirectBrCMovs.end());
546 tracePredStateThroughBlocksAndHarden(MF);
552 if (!
Op.isReg() ||
Op.getReg() != PS->InitialReg)
555 PS->SSA.RewriteUse(
Op);
569 void X86SpeculativeLoadHardeningPass::hardenEdgesWithLFENCE(
576 if (MBB.succ_size() <= 1)
581 auto TermIt = MBB.getFirstTerminator();
582 if (TermIt == MBB.end() || !TermIt->isBranch())
589 if (!SuccMBB->isEHPad())
594 auto InsertPt = MBB->SkipPHIsAndLabels(MBB->begin());
597 ++NumLFENCEsInserted;
602 X86SpeculativeLoadHardeningPass::collectBlockCondInfo(
MachineFunction &MF) {
609 if (MBB.succ_size() <= 1)
632 BlockCondInfo
Info = {&MBB, {},
nullptr};
638 if (!
MI.isTerminator())
642 if (!
MI.isBranch()) {
643 Info.CondBrs.clear();
649 if (
MI.getOpcode() == X86::JMP_1) {
650 Info.CondBrs.clear();
665 Info.CondBrs.clear();
671 Info.CondBrs.push_back(&
MI);
673 if (Info.CondBrs.empty()) {
674 ++NumBranchesUntraced;
675 LLVM_DEBUG(
dbgs() <<
"WARNING: unable to secure successors of block:\n";
694 X86SpeculativeLoadHardeningPass::tracePredStateThroughCFG(
702 for (
const BlockCondInfo &
Info : Infos) {
709 ++NumCondBranchesTraced;
714 UncondBr ? (UncondBr->
getOpcode() == X86::JMP_1
722 ++SuccCounts[UncondSucc];
723 for (
auto *CondBr : CondBrs)
724 ++SuccCounts[CondBr->getOperand(0).getMBB()];
728 auto BuildCheckingBlockForSuccAndConds =
735 (SuccCount == 1 && Succ.pred_size() == 1)
739 bool LiveEFLAGS = Succ.
isLiveIn(X86::EFLAGS);
741 CheckingMBB.addLiveIn(X86::EFLAGS);
744 auto InsertPt = CheckingMBB.begin();
745 assert((InsertPt == CheckingMBB.end() || !InsertPt->isPHI()) &&
746 "Should never have a PHI in the initial checking block as it " 747 "always has a single predecessor!");
751 unsigned CurStateReg = PS->InitialReg;
754 int PredStateSizeInBytes =
TRI->getRegSizeInBits(*PS->RC) / 8;
757 unsigned UpdatedStateReg =
MRI->createVirtualRegister(PS->RC);
761 TII->get(CMovOp), UpdatedStateReg)
766 if (!LiveEFLAGS && Cond == Conds.back())
775 if (CurStateReg == PS->InitialReg)
779 CurStateReg = UpdatedStateReg;
784 PS->SSA.AddAvailableValue(&CheckingMBB, CurStateReg);
787 std::vector<X86::CondCode> UncondCodeSeq;
788 for (
auto *CondBr : CondBrs) {
790 int &SuccCount = SuccCounts[&Succ];
794 UncondCodeSeq.push_back(Cond);
796 BuildCheckingBlockForSuccAndConds(MBB, Succ, SuccCount, CondBr, UncondBr,
819 assert(SuccCounts[UncondSucc] == 1 &&
820 "We should never have more than one edge to the unconditional " 821 "successor at this point because every other edge must have been " 826 UncondCodeSeq.erase(std::unique(UncondCodeSeq.begin(), UncondCodeSeq.end()),
827 UncondCodeSeq.end());
830 BuildCheckingBlockForSuccAndConds(MBB, *UncondSucc, 1,
831 UncondBr, UncondBr, UncondCodeSeq);
847 Opcode,
true,
false, &Index);
852 void X86SpeculativeLoadHardeningPass::unfoldCallAndJumpLoads(
855 for (
auto MII = MBB.instr_begin(), MIE = MBB.instr_end(); MII != MIE;) {
870 dbgs() <<
"ERROR: Found an unexpected loading branch or call " 876 case X86::FARCALL16m:
877 case X86::FARCALL32m:
887 case X86::CALL16m_NT:
889 case X86::CALL32m_NT:
891 case X86::CALL64m_NT:
898 case X86::TAILJMPm64:
899 case X86::TAILJMPm64_REX:
901 case X86::TCRETURNmi64:
902 case X86::TCRETURNmi: {
909 <<
"ERROR: Unable to unfold load from instruction:\n";
913 unsigned Reg =
MRI->createVirtualRegister(UnfoldedRC);
918 TII->unfoldMemoryOperand(MF, MI, Reg,
true,
922 "Computed unfolded register class but failed to unfold");
924 for (
auto *NewMI : NewMIs)
928 dbgs() <<
"Unfolded load successfully into:\n";
929 for (
auto *NewMI : NewMIs) {
959 X86SpeculativeLoadHardeningPass::tracePredStateThroughIndirectBranches(
966 TargetAddrSSA.
Initialize(
MRI->createVirtualRegister(&X86::GR64RegClass));
980 auto MII = MBB.instr_rbegin();
981 while (MII != MBB.instr_rend() && MII->isDebugInstr())
983 if (MII == MBB.instr_rend())
1005 case X86::JMP16m_NT:
1007 case X86::JMP32m_NT:
1009 case X86::JMP64m_NT:
1015 "Support for 16-bit indirect branches is not implemented.");
1018 "Support for 32-bit indirect branches is not implemented.");
1027 return !OtherTI.isDebugInstr() && &OtherTI != &TI;
1030 dbgs() <<
"ERROR: Found other terminators in a block with an indirect " 1031 "branch! This is not yet supported! Terminator sequence:\n";
1042 IndirectTerminatedMBBs.
insert(&MBB);
1046 IndirectTargetMBBs.
insert(Succ);
1053 if (IndirectTargetMBBs.
empty())
1061 if (!IndirectTargetMBBs.
count(&MBB))
1068 "Unexpected EH pad as target of an indirect branch!");
1075 assert(!MBB.isLiveIn(X86::EFLAGS) &&
1076 "Cannot check within a block that already has live-in EFLAGS!");
1083 if (IndirectTerminatedMBBs.
count(Pred))
1092 return Succ->isEHPad() || Succ == &MBB;
1095 dbgs() <<
"ERROR: Found conditional entry to target of indirect " 1101 "an indirect branch!");
1107 auto InsertPt = Pred->getFirstTerminator();
1108 unsigned TargetReg =
MRI->createVirtualRegister(&X86::GR64RegClass);
1110 !Subtarget->isPositionIndependent()) {
1113 TII->get(X86::MOV64ri32), TargetReg)
1146 MBB.setHasAddressTaken();
1147 auto InsertPt = MBB.SkipPHIsLabelsAndDebug(MBB.begin());
1149 !Subtarget->isPositionIndependent()) {
1159 unsigned AddrReg =
MRI->createVirtualRegister(&X86::GR64RegClass);
1179 int PredStateSizeInBytes =
TRI->getRegSizeInBits(*PS->RC) / 8;
1181 unsigned UpdatedStateReg =
MRI->createVirtualRegister(PS->RC);
1184 .addReg(PS->InitialReg)
1193 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
1216 case TargetOpcode::COPY:
1217 case TargetOpcode::INSERT_SUBREG:
1218 case TargetOpcode::SUBREG_TO_REG:
1225 case X86::IMUL16rri8:
1226 case X86::IMUL16rri:
1228 case X86::IMUL32rri8:
1229 case X86::IMUL32rri:
1231 case X86::IMUL64rri32:
1232 case X86::IMUL64rri8:
1244 case X86::LZCNT16rr:
1245 case X86::LZCNT32rr:
1246 case X86::LZCNT64rr:
1247 case X86::POPCNT16rr:
1248 case X86::POPCNT32rr:
1249 case X86::POPCNT64rr:
1250 case X86::TZCNT16rr:
1251 case X86::TZCNT32rr:
1252 case X86::TZCNT64rr:
1257 case X86::BLCFILL32rr:
1258 case X86::BLCFILL64rr:
1261 case X86::BLCIC32rr:
1262 case X86::BLCIC64rr:
1263 case X86::BLCMSK32rr:
1264 case X86::BLCMSK64rr:
1267 case X86::BLSFILL32rr:
1268 case X86::BLSFILL64rr:
1271 case X86::BLSIC32rr:
1272 case X86::BLSIC64rr:
1273 case X86::BLSMSK32rr:
1274 case X86::BLSMSK64rr:
1277 case X86::TZMSK32rr:
1278 case X86::TZMSK64rr:
1282 case X86::BEXTR32rr:
1283 case X86::BEXTR64rr:
1284 case X86::BEXTRI32ri:
1285 case X86::BEXTRI64ri:
1290 case X86::ROL8r1:
case X86::ROL16r1:
case X86::ROL32r1:
case X86::ROL64r1:
1291 case X86::ROL8rCL:
case X86::ROL16rCL:
case X86::ROL32rCL:
case X86::ROL64rCL:
1292 case X86::ROL8ri:
case X86::ROL16ri:
case X86::ROL32ri:
case X86::ROL64ri:
1293 case X86::ROR8r1:
case X86::ROR16r1:
case X86::ROR32r1:
case X86::ROR64r1:
1294 case X86::ROR8rCL:
case X86::ROR16rCL:
case X86::ROR32rCL:
case X86::ROR64rCL:
1295 case X86::ROR8ri:
case X86::ROR16ri:
case X86::ROR32ri:
case X86::ROR64ri:
1296 case X86::SAR8r1:
case X86::SAR16r1:
case X86::SAR32r1:
case X86::SAR64r1:
1297 case X86::SAR8rCL:
case X86::SAR16rCL:
case X86::SAR32rCL:
case X86::SAR64rCL:
1298 case X86::SAR8ri:
case X86::SAR16ri:
case X86::SAR32ri:
case X86::SAR64ri:
1299 case X86::SHL8r1:
case X86::SHL16r1:
case X86::SHL32r1:
case X86::SHL64r1:
1300 case X86::SHL8rCL:
case X86::SHL16rCL:
case X86::SHL32rCL:
case X86::SHL64rCL:
1301 case X86::SHL8ri:
case X86::SHL16ri:
case X86::SHL32ri:
case X86::SHL64ri:
1302 case X86::SHR8r1:
case X86::SHR16r1:
case X86::SHR32r1:
case X86::SHR64r1:
1303 case X86::SHR8rCL:
case X86::SHR16rCL:
case X86::SHR32rCL:
case X86::SHR64rCL:
1304 case X86::SHR8ri:
case X86::SHR16ri:
case X86::SHR32ri:
case X86::SHR64ri:
1305 case X86::SHLD16rrCL:
case X86::SHLD32rrCL:
case X86::SHLD64rrCL:
1306 case X86::SHLD16rri8:
case X86::SHLD32rri8:
case X86::SHLD64rri8:
1307 case X86::SHRD16rrCL:
case X86::SHRD32rrCL:
case X86::SHRD64rrCL:
1308 case X86::SHRD16rri8:
case X86::SHRD32rri8:
case X86::SHRD64rri8:
1311 case X86::ADC8rr:
case X86::ADC8ri:
1312 case X86::ADC16rr:
case X86::ADC16ri:
case X86::ADC16ri8:
1313 case X86::ADC32rr:
case X86::ADC32ri:
case X86::ADC32ri8:
1314 case X86::ADC64rr:
case X86::ADC64ri8:
case X86::ADC64ri32:
1315 case X86::ADD8rr:
case X86::ADD8ri:
1316 case X86::ADD16rr:
case X86::ADD16ri:
case X86::ADD16ri8:
1317 case X86::ADD32rr:
case X86::ADD32ri:
case X86::ADD32ri8:
1318 case X86::ADD64rr:
case X86::ADD64ri8:
case X86::ADD64ri32:
1319 case X86::AND8rr:
case X86::AND8ri:
1320 case X86::AND16rr:
case X86::AND16ri:
case X86::AND16ri8:
1321 case X86::AND32rr:
case X86::AND32ri:
case X86::AND32ri8:
1322 case X86::AND64rr:
case X86::AND64ri8:
case X86::AND64ri32:
1323 case X86::OR8rr:
case X86::OR8ri:
1324 case X86::OR16rr:
case X86::OR16ri:
case X86::OR16ri8:
1325 case X86::OR32rr:
case X86::OR32ri:
case X86::OR32ri8:
1326 case X86::OR64rr:
case X86::OR64ri8:
case X86::OR64ri32:
1327 case X86::SBB8rr:
case X86::SBB8ri:
1328 case X86::SBB16rr:
case X86::SBB16ri:
case X86::SBB16ri8:
1329 case X86::SBB32rr:
case X86::SBB32ri:
case X86::SBB32ri8:
1330 case X86::SBB64rr:
case X86::SBB64ri8:
case X86::SBB64ri32:
1331 case X86::SUB8rr:
case X86::SUB8ri:
1332 case X86::SUB16rr:
case X86::SUB16ri:
case X86::SUB16ri8:
1333 case X86::SUB32rr:
case X86::SUB32ri:
case X86::SUB32ri8:
1334 case X86::SUB64rr:
case X86::SUB64ri8:
case X86::SUB64ri32:
1335 case X86::XOR8rr:
case X86::XOR8ri:
1336 case X86::XOR16rr:
case X86::XOR16ri:
case X86::XOR16ri8:
1337 case X86::XOR32rr:
case X86::XOR32ri:
case X86::XOR32ri8:
1338 case X86::XOR64rr:
case X86::XOR64ri8:
case X86::XOR64ri32:
1340 case X86::ADCX32rr:
case X86::ADCX64rr:
1341 case X86::ADOX32rr:
case X86::ADOX64rr:
1342 case X86::ANDN32rr:
case X86::ANDN64rr:
1344 case X86::DEC8r:
case X86::DEC16r:
case X86::DEC32r:
case X86::DEC64r:
1345 case X86::INC8r:
case X86::INC16r:
case X86::INC32r:
case X86::INC64r:
1346 case X86::NEG8r:
case X86::NEG16r:
case X86::NEG32r:
case X86::NEG64r:
1352 LLVM_DEBUG(
dbgs() <<
" Unable to harden post-load due to EFLAGS: ";
1362 case X86::NOT8r:
case X86::NOT16r:
case X86::NOT32r:
case X86::NOT64r:
1367 case X86::MOVSX16rr8:
1368 case X86::MOVSX32rr8:
case X86::MOVSX32rr16:
1369 case X86::MOVSX64rr8:
case X86::MOVSX64rr16:
case X86::MOVSX64rr32:
1370 case X86::MOVZX16rr8:
1371 case X86::MOVZX32rr8:
case X86::MOVZX32rr16:
1372 case X86::MOVZX64rr8:
case X86::MOVZX64rr16:
1388 case X86::LEA64_32r:
1417 case X86::IMUL16rmi8:
1418 case X86::IMUL16rmi:
1420 case X86::IMUL32rmi8:
1421 case X86::IMUL32rmi:
1423 case X86::IMUL64rmi32:
1424 case X86::IMUL64rmi8:
1436 case X86::LZCNT16rm:
1437 case X86::LZCNT32rm:
1438 case X86::LZCNT64rm:
1439 case X86::POPCNT16rm:
1440 case X86::POPCNT32rm:
1441 case X86::POPCNT64rm:
1442 case X86::TZCNT16rm:
1443 case X86::TZCNT32rm:
1444 case X86::TZCNT64rm:
1449 case X86::BLCFILL32rm:
1450 case X86::BLCFILL64rm:
1453 case X86::BLCIC32rm:
1454 case X86::BLCIC64rm:
1455 case X86::BLCMSK32rm:
1456 case X86::BLCMSK64rm:
1459 case X86::BLSFILL32rm:
1460 case X86::BLSFILL64rm:
1463 case X86::BLSIC32rm:
1464 case X86::BLSIC64rm:
1465 case X86::BLSMSK32rm:
1466 case X86::BLSMSK64rm:
1469 case X86::TZMSK32rm:
1470 case X86::TZMSK64rm:
1474 case X86::BEXTR32rm:
1475 case X86::BEXTR64rm:
1476 case X86::BEXTRI32mi:
1477 case X86::BEXTRI64mi:
1521 LLVM_DEBUG(
dbgs() <<
" Unable to harden post-load due to EFLAGS: ";
1547 case X86::CVTTSD2SI64rm:
case X86::VCVTTSD2SI64rm:
case X86::VCVTTSD2SI64Zrm:
1548 case X86::CVTTSD2SIrm:
case X86::VCVTTSD2SIrm:
case X86::VCVTTSD2SIZrm:
1549 case X86::CVTTSS2SI64rm:
case X86::VCVTTSS2SI64rm:
case X86::VCVTTSS2SI64Zrm:
1550 case X86::CVTTSS2SIrm:
case X86::VCVTTSS2SIrm:
case X86::VCVTTSS2SIZrm:
1551 case X86::CVTSI2SDrm:
case X86::VCVTSI2SDrm:
case X86::VCVTSI2SDZrm:
1552 case X86::CVTSI2SSrm:
case X86::VCVTSI2SSrm:
case X86::VCVTSI2SSZrm:
1553 case X86::CVTSI642SDrm:
case X86::VCVTSI642SDrm:
case X86::VCVTSI642SDZrm:
1554 case X86::CVTSI642SSrm:
case X86::VCVTSI642SSrm:
case X86::VCVTSI642SSZrm:
1555 case X86::CVTSS2SDrm:
case X86::VCVTSS2SDrm:
case X86::VCVTSS2SDZrm:
1556 case X86::CVTSD2SSrm:
case X86::VCVTSD2SSrm:
case X86::VCVTSD2SSZrm:
1558 case X86::VCVTTSD2USI64Zrm:
1559 case X86::VCVTTSD2USIZrm:
1560 case X86::VCVTTSS2USI64Zrm:
1561 case X86::VCVTTSS2USIZrm:
1562 case X86::VCVTUSI2SDZrm:
1563 case X86::VCVTUSI642SDZrm:
1564 case X86::VCVTUSI2SSZrm:
1565 case X86::VCVTUSI642SSZrm:
1569 case X86::MOV8rm_NOREX:
1573 case X86::MOVSX16rm8:
1574 case X86::MOVSX32rm16:
1575 case X86::MOVSX32rm8:
1576 case X86::MOVSX32rm8_NOREX:
1577 case X86::MOVSX64rm16:
1578 case X86::MOVSX64rm32:
1579 case X86::MOVSX64rm8:
1580 case X86::MOVZX16rm8:
1581 case X86::MOVZX32rm16:
1582 case X86::MOVZX32rm8:
1583 case X86::MOVZX32rm8_NOREX:
1584 case X86::MOVZX64rm16:
1585 case X86::MOVZX64rm8:
1597 if (DefOp->isDead())
1605 if (
MI.killsRegister(X86::EFLAGS, &TRI))
1641 void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden(
1678 return Op.isReg() && LoadDepRegs.
test(
Op.getReg());
1682 LoadDepRegs.
set(
Def.getReg());
1687 if (
MI.getOpcode() == X86::LFENCE)
1705 if (MemRefBeginIdx < 0) {
1707 <<
"WARNING: unable to harden loading instruction: ";
1721 unsigned BaseReg = 0, IndexReg = 0;
1722 if (!BaseMO.
isFI() && BaseMO.
getReg() != X86::RIP &&
1723 BaseMO.
getReg() != X86::NoRegister)
1724 BaseReg = BaseMO.
getReg();
1725 if (IndexMO.
getReg() != X86::NoRegister)
1726 IndexReg = IndexMO.
getReg();
1728 if (!BaseReg && !IndexReg)
1736 if ((BaseReg && LoadDepRegs.
test(BaseReg)) ||
1737 (IndexReg && LoadDepRegs.
test(IndexReg)))
1746 MI.getDesc().getNumDefs() == 1 &&
MI.getOperand(0).isReg() &&
1747 canHardenRegister(
MI.getOperand(0).getReg()) &&
1748 !HardenedAddrRegs.
count(BaseReg) &&
1749 !HardenedAddrRegs.
count(IndexReg)) {
1751 HardenedAddrRegs.
insert(
MI.getOperand(0).getReg());
1759 HardenedAddrRegs.
insert(BaseReg);
1761 HardenedAddrRegs.
insert(IndexReg);
1765 LoadDepRegs.
set(
Def.getReg());
1777 "Requested to harden both the address and def of a load!");
1780 if (HardenLoadAddr.
erase(&
MI)) {
1783 assert(MemRefBeginIdx >= 0 &&
"Cannot have an invalid index here!");
1791 hardenLoadAddr(
MI, BaseMO, IndexMO, AddrRegToHardenedReg);
1797 if (HardenPostLoad.
erase(&
MI)) {
1798 assert(!
MI.isCall() &&
"Must not try to post-load harden a call!");
1805 MachineInstr *SunkMI = sinkPostLoadHardenedInst(
MI, HardenPostLoad);
1810 if (SunkMI != &
MI) {
1817 HardenPostLoad.
insert(SunkMI);
1822 unsigned HardenedReg = hardenPostLoad(
MI);
1825 AddrRegToHardenedReg[HardenedReg] = HardenedReg;
1836 hardenIndirectCallOrJumpInstr(
MI, AddrRegToHardenedReg);
1843 if (!
MI.isCall() && !
MI.isReturn())
1848 if (
MI.isReturn() && !
MI.isCall()) {
1849 hardenReturnInstr(
MI);
1856 assert(
MI.isCall() &&
"Should only reach here for calls!");
1857 tracePredStateThroughCall(
MI);
1860 HardenPostLoad.
clear();
1861 HardenLoadAddr.
clear();
1862 HardenedAddrRegs.
clear();
1863 AddrRegToHardenedReg.
clear();
1868 LoadDepRegs.
clear();
1878 unsigned X86SpeculativeLoadHardeningPass::saveEFLAGS(
1883 unsigned Reg =
MRI->createVirtualRegister(&X86::GR32RegClass);
1886 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::COPY),
Reg).addReg(X86::EFLAGS);
1896 void X86SpeculativeLoadHardeningPass::restoreEFLAGS(
1899 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::COPY), X86::EFLAGS).addReg(Reg);
1907 void X86SpeculativeLoadHardeningPass::mergePredStateIntoSP(
1909 unsigned PredStateReg) {
1910 unsigned TmpReg =
MRI->createVirtualRegister(PS->RC);
1914 auto ShiftI =
BuildMI(MBB, InsertPt, Loc,
TII->get(X86::SHL64ri), TmpReg)
1919 auto OrI =
BuildMI(MBB, InsertPt, Loc,
TII->get(X86::OR64rr), X86::RSP)
1927 unsigned X86SpeculativeLoadHardeningPass::extractPredStateFromSP(
1930 unsigned PredStateReg =
MRI->createVirtualRegister(PS->RC);
1931 unsigned TmpReg =
MRI->createVirtualRegister(PS->RC);
1936 BuildMI(MBB, InsertPt, Loc,
TII->get(TargetOpcode::COPY), TmpReg)
1939 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::SAR64ri), PredStateReg)
1941 .
addImm(
TRI->getRegSizeInBits(*PS->RC) - 1);
1945 return PredStateReg;
1948 void X86SpeculativeLoadHardeningPass::hardenLoadAddr(
1960 if (BaseMO.
isFI()) {
1964 dbgs() <<
" Skipping hardening base of explicit stack frame load: ";
1966 }
else if (BaseMO.
getReg() == X86::RIP ||
1967 BaseMO.
getReg() == X86::NoRegister) {
1977 dbgs() <<
" Cannot harden base of " 1978 << (BaseMO.
getReg() == X86::RIP ?
"RIP-relative" :
"no-base")
1979 <<
" address in a load!");
1982 "Only allowed to have a frame index or register base.");
1983 HardenOpRegs.push_back(&BaseMO);
1986 if (IndexMO.
getReg() != X86::NoRegister &&
1987 (HardenOpRegs.empty() ||
1988 HardenOpRegs.front()->getReg() != IndexMO.
getReg()))
1989 HardenOpRegs.push_back(&IndexMO);
1991 assert((HardenOpRegs.size() == 1 || HardenOpRegs.size() == 2) &&
1992 "Should have exactly one or two registers to harden!");
1993 assert((HardenOpRegs.size() == 1 ||
1994 HardenOpRegs[0]->getReg() != HardenOpRegs[1]->getReg()) &&
1995 "Should not have two of the same registers!");
2000 auto It = AddrRegToHardenedReg.
find(Op->
getReg());
2001 if (It == AddrRegToHardenedReg.
end())
2010 if (HardenOpRegs.empty())
2014 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2021 unsigned FlagsReg = 0;
2022 if (EFLAGSLive && !Subtarget->hasBMI2()) {
2024 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2028 unsigned OpReg = Op->
getReg();
2029 auto *OpRC =
MRI->getRegClass(OpReg);
2030 unsigned TmpReg =
MRI->createVirtualRegister(OpRC);
2034 if (!Subtarget->hasVLX() && (OpRC->hasSuperClassEq(&X86::VR128RegClass) ||
2035 OpRC->hasSuperClassEq(&X86::VR256RegClass))) {
2036 assert(Subtarget->hasAVX2() &&
"AVX2-specific register classes!");
2037 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128RegClass);
2042 unsigned VStateReg =
MRI->createVirtualRegister(&X86::VR128RegClass);
2044 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::VMOV64toPQIrr), VStateReg)
2051 unsigned VBStateReg =
MRI->createVirtualRegister(OpRC);
2052 auto BroadcastI =
BuildMI(MBB, InsertPt, Loc,
2053 TII->get(Is128Bit ? X86::VPBROADCASTQrr
2054 : X86::VPBROADCASTQYrr),
2059 LLVM_DEBUG(
dbgs() <<
" Inserting broadcast: "; BroadcastI->dump();
2065 TII->get(Is128Bit ? X86::VPORrr : X86::VPORYrr), TmpReg)
2071 }
else if (OpRC->hasSuperClassEq(&X86::VR128XRegClass) ||
2072 OpRC->hasSuperClassEq(&X86::VR256XRegClass) ||
2073 OpRC->hasSuperClassEq(&X86::VR512RegClass)) {
2074 assert(Subtarget->hasAVX512() &&
"AVX512-specific register classes!");
2075 bool Is128Bit = OpRC->hasSuperClassEq(&X86::VR128XRegClass);
2076 bool Is256Bit = OpRC->hasSuperClassEq(&X86::VR256XRegClass);
2077 if (Is128Bit || Is256Bit)
2078 assert(Subtarget->hasVLX() &&
"AVX512VL-specific register classes!");
2081 unsigned VStateReg =
MRI->createVirtualRegister(OpRC);
2082 unsigned BroadcastOp =
2083 Is128Bit ? X86::VPBROADCASTQrZ128r
2084 : Is256Bit ? X86::VPBROADCASTQrZ256r : X86::VPBROADCASTQrZr;
2086 BuildMI(MBB, InsertPt, Loc,
TII->get(BroadcastOp), VStateReg)
2090 LLVM_DEBUG(
dbgs() <<
" Inserting broadcast: "; BroadcastI->dump();
2094 unsigned OrOp = Is128Bit ? X86::VPORQZ128rr
2095 : Is256Bit ? X86::VPORQZ256rr : X86::VPORQZrr;
2096 auto OrI =
BuildMI(MBB, InsertPt, Loc,
TII->get(OrOp), TmpReg)
2104 assert(OpRC->hasSuperClassEq(&X86::GR64RegClass) &&
2105 "Not a supported register class for address hardening!");
2109 auto OrI =
BuildMI(MBB, InsertPt, Loc,
TII->get(X86::OR64rr), TmpReg)
2119 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::SHRX64rr), TmpReg)
2131 "Should not have checked this register yet!");
2132 AddrRegToHardenedReg[Op->
getReg()] = TmpReg;
2134 ++NumAddrRegsHardened;
2139 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2142 MachineInstr *X86SpeculativeLoadHardeningPass::sinkPostLoadHardenedInst(
2145 "Cannot get here with a non-invariant load!");
2148 auto SinkCheckToSingleUse =
2165 "Data variant instruction being hardened!");
2173 assert(MemRefBeginIdx >= 0 &&
2174 "Should always have mem references here!");
2181 if ((BaseMO.
isReg() && BaseMO.
getReg() == DefReg) ||
2201 if (
UseMI.getDesc().getNumDefs() > 1)
2207 unsigned UseDefReg =
UseMI.getOperand(0).getReg();
2208 if (!
TRI->isVirtualRegister(UseDefReg) ||
2209 !canHardenRegister(UseDefReg))
2212 SingleUseMI = &
UseMI;
2217 return {SingleUseMI};
2231 bool X86SpeculativeLoadHardeningPass::canHardenRegister(
unsigned Reg) {
2232 auto *RC =
MRI->getRegClass(Reg);
2233 int RegBytes =
TRI->getRegSizeInBits(*RC) / 8;
2246 &X86::GR8_NOREXRegClass, &X86::GR16_NOREXRegClass,
2247 &X86::GR32_NOREXRegClass, &X86::GR64_NOREXRegClass};
2248 if (RC == NOREXRegClasses[
Log2_32(RegBytes)])
2252 &X86::GR8RegClass, &X86::GR16RegClass, &X86::GR32RegClass,
2253 &X86::GR64RegClass};
2271 unsigned X86SpeculativeLoadHardeningPass::hardenValueInRegister(
2274 assert(canHardenRegister(Reg) &&
"Cannot harden this register!");
2275 assert(
TRI->isVirtualRegister(Reg) &&
"Cannot harden a physical register!");
2277 auto *RC =
MRI->getRegClass(Reg);
2278 int Bytes =
TRI->getRegSizeInBits(*RC) / 8;
2280 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2284 unsigned SubRegImms[] = {X86::sub_8bit, X86::sub_16bit, X86::sub_32bit};
2285 unsigned SubRegImm = SubRegImms[
Log2_32(Bytes)];
2286 unsigned NarrowStateReg =
MRI->createVirtualRegister(RC);
2287 BuildMI(MBB, InsertPt, Loc,
TII->get(TargetOpcode::COPY), NarrowStateReg)
2288 .addReg(StateReg, 0, SubRegImm);
2289 StateReg = NarrowStateReg;
2292 unsigned FlagsReg = 0;
2294 FlagsReg = saveEFLAGS(MBB, InsertPt, Loc);
2296 unsigned NewReg =
MRI->createVirtualRegister(RC);
2297 unsigned OrOpCodes[] = {X86::OR8rr, X86::OR16rr, X86::OR32rr, X86::OR64rr};
2298 unsigned OrOpCode = OrOpCodes[
Log2_32(Bytes)];
2299 auto OrI =
BuildMI(MBB, InsertPt, Loc,
TII->get(OrOpCode), NewReg)
2307 restoreEFLAGS(MBB, InsertPt, Loc, FlagsReg);
2321 unsigned X86SpeculativeLoadHardeningPass::hardenPostLoad(
MachineInstr &MI) {
2326 unsigned OldDefReg = DefOp.
getReg();
2327 auto *DefRC =
MRI->getRegClass(OldDefReg);
2332 unsigned UnhardenedReg =
MRI->createVirtualRegister(DefRC);
2333 DefOp.setReg(UnhardenedReg);
2338 unsigned HardenedReg = hardenValueInRegister(
2339 UnhardenedReg, MBB, std::next(MI.
getIterator()), Loc);
2343 MRI->replaceRegWith( OldDefReg, HardenedReg);
2345 ++NumPostLoadRegsHardened;
2372 void X86SpeculativeLoadHardeningPass::hardenReturnInstr(
MachineInstr &MI) {
2385 mergePredStateIntoSP(MBB, InsertPt, Loc, PS->SSA.GetValueAtEndOfBlock(&MBB));
2418 void X86SpeculativeLoadHardeningPass::tracePredStateThroughCall(
2435 BuildMI(MBB, std::next(InsertPt), Loc,
TII->get(X86::LFENCE));
2437 ++NumLFENCEsInserted;
2443 unsigned StateReg = PS->SSA.GetValueAtEndOfBlock(&MBB);
2444 mergePredStateIntoSP(MBB, InsertPt, Loc, StateReg);
2462 unsigned ExpectedRetAddrReg = 0;
2487 ExpectedRetAddrReg =
MRI->createVirtualRegister(AddrRC);
2489 !Subtarget->isPositionIndependent()) {
2490 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::MOV64ri32), ExpectedRetAddrReg)
2493 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::LEA64r), ExpectedRetAddrReg)
2509 if (!ExpectedRetAddrReg) {
2510 ExpectedRetAddrReg =
MRI->createVirtualRegister(AddrRC);
2511 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::MOV64rm), ExpectedRetAddrReg)
2521 unsigned NewStateReg = extractPredStateFromSP(MBB, InsertPt, Loc);
2527 !Subtarget->isPositionIndependent()) {
2530 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::CMP64ri32))
2534 unsigned ActualRetAddrReg =
MRI->createVirtualRegister(AddrRC);
2535 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::LEA64r), ActualRetAddrReg)
2541 BuildMI(MBB, InsertPt, Loc,
TII->get(X86::CMP64rr))
2548 int PredStateSizeInBytes =
TRI->getRegSizeInBits(*PS->RC) / 8;
2551 unsigned UpdatedStateReg =
MRI->createVirtualRegister(PS->RC);
2552 auto CMovI =
BuildMI(MBB, InsertPt, Loc,
TII->get(CMovOp), UpdatedStateReg)
2559 PS->SSA.AddAvailableValue(&MBB, UpdatedStateReg);
2577 void X86SpeculativeLoadHardeningPass::hardenIndirectCallOrJumpInstr(
2581 case X86::FARCALL16m:
2582 case X86::FARCALL32m:
2583 case X86::FARCALL64:
2584 case X86::FARJMP16m:
2585 case X86::FARJMP32m:
2597 assert(!MI.
mayLoad() &&
"Found a lingering loading instruction!");
2607 unsigned OldTargetReg = TargetOp.
getReg();
2612 unsigned &HardenedTargetReg = AddrRegToHardenedReg[OldTargetReg];
2621 if (!HardenedTargetReg)
2622 HardenedTargetReg = hardenValueInRegister(
2626 TargetOp.setReg(HardenedTargetReg);
2628 ++NumCallsOrJumpsHardened;
2632 "X86 speculative load hardener",
false,
false)
2637 return new X86SpeculativeLoadHardeningPass();
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
bool isCall(QueryType Type=AnyInBundle) const
MachineBasicBlock * getMBB() const
LLVM_ATTRIBUTE_NORETURN void report_fatal_error(Error Err, bool gen_crash_diag=true)
Report a serious error, calling any installed error handler.
This class represents lattice values for constants.
MachineOperand * findRegisterDefOperand(unsigned Reg, bool isDead=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterDefOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
MCSymbol - Instances of this class represent a symbol name in the MC file, and MCSymbols are created ...
bool addRegisterDead(unsigned Reg, const TargetRegisterInfo *RegInfo, bool AddIfNotFound=false)
We have determined MI defined a register without a use.
void push_back(const T &Elt)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Describe properties that are true of each instruction in the target description file.
unsigned getReg() const
getReg - Returns the register number.
MachineOperand * findRegisterUseOperand(unsigned Reg, bool isKill=false, const TargetRegisterInfo *TRI=nullptr)
Wrapper for findRegisterUseOperandIdx, it returns a pointer to the MachineOperand rather than an inde...
static cl::opt< bool > HardenInterprocedurally(PASS_KEY "-ip", cl::desc("Harden interprocedurally by passing our state in and out of " "functions in the high bits of the stack pointer."), cl::init(true), cl::Hidden)
static const TargetRegisterClass * getRegClassForUnfoldedLoad(MachineFunction &MF, const X86InstrInfo &TII, unsigned Opcode)
Compute the register class for the unfolded load.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
STATISTIC(NumFunctions, "Total number of functions")
unsigned const TargetRegisterInfo * TRI
void setIsDead(bool Val=true)
static cl::opt< bool > FenceCallAndRet(PASS_KEY "-fence-call-and-ret", cl::desc("Use a full speculation fence to harden both call and ret edges " "rather than a lighter weight mitigation."), cl::init(false), cl::Hidden)
static bool hasVulnerableLoad(MachineFunction &MF)
Helper to scan a function for loads vulnerable to misspeculation that we want to harden.
void dump() const
dump - Print the current MachineFunction to cerr, useful for debugger use.
MachineSSAUpdater - This class updates SSA form for a set of virtual registers defined in multiple bl...
iterator_range< succ_iterator > successors()
A templated base class for SmallPtrSet which provides the typesafe interface that is common across al...
bool test(unsigned Idx) const
This file declares the MachineConstantPool class which is an abstract constant pool to keep track of ...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
CondCode getCondFromBranchOpc(unsigned Opc)
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
static MachineBasicBlock & splitEdge(MachineBasicBlock &MBB, MachineBasicBlock &Succ, int SuccCount, MachineInstr *Br, MachineInstr *&UncondBr, const X86InstrInfo &TII)
static cl::opt< bool > HardenEdgesWithLFENCE(PASS_KEY "-lfence", cl::desc("Use LFENCE along each conditional edge to harden against speculative " "loads rather than conditional movs and poisoned pointers."), cl::init(false), cl::Hidden)
auto reverse(ContainerTy &&C, typename std::enable_if< has_rbegin< ContainerTy >::value >::type *=nullptr) -> decltype(make_range(C.rbegin(), C.rend()))
static cl::opt< bool > EnablePostLoadHardening(PASS_KEY "-post-load", cl::desc("Harden the value loaded *after* it is loaded by " "flushing the loaded bits to 1. This is hard to do " "in general but can be done easily for GPRs."), cl::init(true), cl::Hidden)
static cl::opt< bool > EnableSpeculativeLoadHardening("x86-speculative-load-hardening", cl::desc("Force enable speculative load hardening"), cl::init(false), cl::Hidden)
bool insert(const value_type &X)
Insert a new element into the SetVector.
static void canonicalizePHIOperands(MachineFunction &MF)
Removing duplicate PHI operands to leave the PHI in a canonical and predictable form.
static cl::opt< bool > HardenLoads(PASS_KEY "-loads", cl::desc("Sanitize loads from memory. When disable, no " "significant security is provided."), cl::init(true), cl::Hidden)
ArrayRef - Represent a constant reference to an array (0 or more elements consecutively in memory)...
void setPostInstrSymbol(MachineFunction &MF, MCSymbol *Symbol)
Set a symbol that will be emitted just after the instruction itself.
instr_iterator insert(instr_iterator I, MachineInstr *M)
Insert MI into the instruction list before I, possibly inside a bundle.
void Initialize(unsigned V)
Initialize - Reset this object to get ready for a new set of SSA updates.
bool isBranch(QueryType Type=AnyInBundle) const
Returns true if this is a conditional, unconditional, or indirect branch.
Analysis containing CSE Info
void normalizeSuccProbs()
Normalize probabilities of all successors so that the sum of them becomes one.
StringRef getName() const
getName - Return the name of the corresponding LLVM function.
iterator find(const_arg_type_t< KeyT > Val)
bool isReturn(QueryType Type=AnyInBundle) const
MCContext & getContext() const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
initializer< Ty > init(const Ty &Val)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
MCSymbol * createTempSymbol(bool CanBeUnnamed=true)
Create and return a new assembler temporary symbol with a unique but unspecified name.
unsigned const MachineRegisterInfo * MRI
StringRef getName() const
Return the name of the corresponding LLVM basic block, or an empty string.
bool hasSuperClassEq(const TargetRegisterClass *RC) const
Returns true if RC is a super-class of or equal to this class.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
void getAnalysisUsage(AnalysisUsage &AU) const override
getAnalysisUsage - Subclasses that override getAnalysisUsage must call this.
MachineInstrBuilder & UseMI
LLVM_NODISCARD bool empty() const
std::pair< iterator, bool > insert(PtrType Ptr)
Inserts Ptr if and only if there is no element in the container equal to Ptr.
void setMBB(MachineBasicBlock *MBB)
Represent the analysis usage information of a pass.
bool any_of(R &&range, UnaryPredicate P)
Provide wrappers to std::any_of which take ranges instead of having to pass begin/end explicitly...
FunctionPass class - This class is used to implement most global optimizations.
size_type count(ConstPtrType Ptr) const
count - Return 1 if the specified pointer is in the set, 0 otherwise.
unsigned getOperandBias(const MCInstrDesc &Desc)
getOperandBias - compute whether all of the def operands are repeated in the uses and therefore shoul...
self_iterator getIterator()
std::pair< NoneType, bool > insert(const T &V)
insert - Insert an element into the set if it isn't already there.
iterator_range< pred_iterator > predecessors()
const MachineInstrBuilder & addSym(MCSymbol *Sym, unsigned char TargetFlags=0) const
unsigned insertBranch(MachineBasicBlock &MBB, MachineBasicBlock *TBB, MachineBasicBlock *FBB, ArrayRef< MachineOperand > Cond, const DebugLoc &DL, int *BytesAdded=nullptr) const override
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void setIsKill(bool Val=true)
void sort(IteratorTy Start, IteratorTy End)
iterator_range< T > make_range(T x, T y)
Convenience function for iterating over sub-ranges.
A SetVector that performs no allocations if smaller than a certain size.
Iterator for intrusive lists based on ilist_node.
SmallPtrSet - This class implements a set which is optimized for holding SmallSize or less elements...
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
bool erase(PtrType Ptr)
erase - If the set contains the specified pointer, remove it and return true, otherwise return false...
void initializeX86SpeculativeLoadHardeningPassPass(PassRegistry &)
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
CondCode GetOppositeBranchCondition(CondCode CC)
GetOppositeBranchCondition - Return the inverse of the specified cond, e.g.
static bool isDataInvariant(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value o...
LLVM_NODISCARD T pop_back_val()
const X86RegisterInfo & getRegisterInfo() const
getRegisterInfo - TargetInstrInfo is a superset of MRegister info.
FunctionPass * createX86SpeculativeLoadHardeningPass()
bool isLayoutSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB will be emitted immediately after this block, such that if this bloc...
const Function & getFunction() const
Return the LLVM function that this machine code represents.
void erase_if(Container &C, UnaryPredicate P)
Provide a container algorithm similar to C++ Library Fundamentals v2's erase_if which is equivalent t...
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
unsigned Log2_32(uint32_t Value)
Return the floor log base 2 of the specified value, -1 if the value is zero.
bool isLiveIn(MCPhysReg Reg, LaneBitmask LaneMask=LaneBitmask::getAll()) const
Return true if the specified register is in the live in set.
CodeModel::Model getCodeModel() const
Returns the code model.
static cl::opt< bool > HardenIndirectCallsAndJumps(PASS_KEY "-indirect", cl::desc("Harden indirect calls and jumps against using speculatively " "stored attacker controlled addresses. This is designed to " "mitigate Spectre v1.2 style attacks."), cl::init(true), cl::Hidden)
unsigned getCMovFromCond(CondCode CC, unsigned RegBytes, bool HasMemoryOperand=false)
Return a cmov opcode for the given condition, register size in bytes, and operand type...
void replaceSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New)
Replace successor OLD with NEW and update probability info.
const MachineBasicBlock * getParent() const
MachineRegisterInfo - Keep track of information for virtual and physical registers, including vreg register classes, use/def chains for registers, etc.
Representation of each machine instruction.
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
static bool isEFLAGSLive(MachineBasicBlock &MBB, MachineBasicBlock::iterator I, const TargetRegisterInfo &TRI)
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
static MachineOperand CreateMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0)
MachineRegisterInfo & getRegInfo()
getRegInfo - Return information about the registers currently in use.
bool isEHPad() const
Returns true if the block is a landing pad.
bool verify(Pass *p=nullptr, const char *Banner=nullptr, bool AbortOnError=true) const
Run the current MachineFunction through the machine code verifier, useful for debugger use...
LLVM_NODISCARD bool empty() const
bool exposesReturnsTwice() const
exposesReturnsTwice - Returns true if the function calls setjmp or any other similar functions with a...
void setReg(unsigned Reg)
Change the register this operand corresponds to.
bool isFI() const
isFI - Tests if this is a MO_FrameIndex operand.
unsigned GetValueInMiddleOfBlock(MachineBasicBlock *BB)
GetValueInMiddleOfBlock - Construct SSA form, materializing a value that is live in the middle of the...
void AddAvailableValue(MachineBasicBlock *BB, unsigned V)
AddAvailableValue - Indicate that a rewritten value is available at the end of the specified block wi...
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
bool isSuccessor(const MachineBasicBlock *MBB) const
Return true if the specified MBB is a successor of this block.
const LLVMTargetMachine & getTarget() const
getTarget - Return the target machine this machine code is compiled with
size_type count(const_arg_type_t< KeyT > Val) const
Return 1 if the specified key is in the map, 0 otherwise.
void splitSuccessor(MachineBasicBlock *Old, MachineBasicBlock *New, bool NormalizeSuccProbs=false)
Split the old successor into old plus new and updates the probability info.
bool isReg() const
isReg - Tests if this is a MO_Register operand.
X86 speculative load hardener
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
INITIALIZE_PASS_BEGIN(X86SpeculativeLoadHardeningPass, PASS_KEY, "X86 speculative load hardener", false, false) INITIALIZE_PASS_END(X86SpeculativeLoadHardeningPass
static bool isDataInvariantLoad(MachineInstr &MI)
Returns true if the instruction has no behavior (specified or otherwise) that is based on the value l...
iterator SkipPHIsLabelsAndDebug(iterator I)
Return the first instruction in MBB after I that is not a PHI, label or debug.
StringRef - Represent a constant reference to a string, i.e.
bool isMBB() const
isMBB - Tests if this is a MO_MachineBasicBlock operand.
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const MachineOperand & getOperand(unsigned i) const
unsigned getOpcodeAfterMemoryUnfold(unsigned Opc, bool UnfoldLoad, bool UnfoldStore, unsigned *LoadRegIndex=nullptr) const override
getOpcodeAfterMemoryUnfold - Returns the opcode of the would be new instruction after load / store ar...
int getMemoryOperandNo(uint64_t TSFlags)
getMemoryOperandNo - The function returns the MCInst operand # for the first field of the memory oper...
size_type count(const T &V) const
count - Return 1 if the element is in the set, 0 otherwise.