113 using namespace llvm;
115 #define DEBUG_TYPE "aarch64-speculation-hardening" 117 #define AARCH64_SPECULATION_HARDENING_NAME "AArch64 speculation hardening pass" 120 cl::desc(
"Sanitize loads from memory."),
143 unsigned MisspeculatingTaintReg;
144 unsigned MisspeculatingTaintReg32Bit;
145 bool UseControlFlowSpeculationBarrier;
151 bool &UsesFullSpeculationBarrier);
161 unsigned TmpReg)
const;
171 bool UsesFullSpeculationBarrier);
174 bool UsesFullSpeculationBarrier);
186 bool AArch64SpeculationHardening::endsWithCondControlFlow(
194 if (analyzeBranchCondCode.
empty())
202 FBB = MBB.getFallThrough();
210 assert(MBB.succ_size() == 2);
212 assert(analyzeBranchCondCode.
size() == 1 &&
"unknown Cond array format");
217 void AArch64SpeculationHardening::insertFullSpeculationBarrier(
221 BuildMI(MBB, MBBI, DL,
TII->get(AArch64::DSB)).addImm(0xf);
222 BuildMI(MBB, MBBI, DL,
TII->get(AArch64::ISB)).addImm(0xf);
225 void AArch64SpeculationHardening::insertTrackingCode(
228 if (UseControlFlowSpeculationBarrier) {
229 insertFullSpeculationBarrier(SplitEdgeBB, SplitEdgeBB.
begin(), DL);
231 BuildMI(SplitEdgeBB, SplitEdgeBB.
begin(), DL,
TII->get(AArch64::CSELXr))
232 .addDef(MisspeculatingTaintReg)
233 .
addUse(MisspeculatingTaintReg)
240 bool AArch64SpeculationHardening::instrumentControlFlow(
242 LLVM_DEBUG(
dbgs() <<
"Instrument control flow tracking on MBB: " << MBB);
249 if (!endsWithCondControlFlow(MBB, TBB, FBB, CondCode)) {
261 assert(SplitEdgeTBB !=
nullptr);
262 assert(SplitEdgeFBB !=
nullptr);
268 insertTrackingCode(*SplitEdgeTBB, CondCode, DL);
269 insertTrackingCode(*SplitEdgeFBB, InvCondCode, DL);
286 bool TmpRegisterNotAvailableEverywhere =
false;
307 unsigned TmpReg = RS.
FindUnusedReg(&AArch64::GPR64commonRegClass);
309 << ((TmpReg == 0) ?
"no register " :
"register ");
311 dbgs() <<
"to be available at MI " <<
MI);
313 TmpRegisterNotAvailableEverywhere =
true;
315 ReturnInstructions.
push_back({&MI, TmpReg});
317 CallInstructions.
push_back({&MI, TmpReg});
320 if (TmpRegisterNotAvailableEverywhere) {
325 insertFullSpeculationBarrier(MBB, MBB.
begin(),
327 UsesFullSpeculationBarrier =
true;
330 for (
auto MI_Reg : ReturnInstructions) {
331 assert(MI_Reg.second != 0);
334 <<
" About to insert Reg to SP taint propagation with temp register " 336 <<
" on instruction: " << *MI_Reg.first);
337 insertRegToSPTaintPropagation(MBB, MI_Reg.first, MI_Reg.second);
341 for (
auto MI_Reg : CallInstructions) {
342 assert(MI_Reg.second != 0);
344 "propagation with temp register " 346 <<
" around instruction: " << *MI_Reg.first);
348 insertSPToRegTaintPropagation(
351 insertRegToSPTaintPropagation(MBB, MI_Reg.first, MI_Reg.second);
358 void AArch64SpeculationHardening::insertSPToRegTaintPropagation(
363 if (UseControlFlowSpeculationBarrier) {
364 insertFullSpeculationBarrier(MBB, MBBI,
DebugLoc());
370 .addDef(AArch64::XZR)
376 .addDef(MisspeculatingTaintReg)
382 void AArch64SpeculationHardening::insertRegToSPTaintPropagation(
384 unsigned TmpReg)
const {
388 if (UseControlFlowSpeculationBarrier)
411 bool AArch64SpeculationHardening::functionUsesHardeningRegister(
419 if (
MI.readsRegister(MisspeculatingTaintReg,
TRI) ||
420 MI.modifiesRegister(MisspeculatingTaintReg,
TRI))
430 bool AArch64SpeculationHardening::makeGPRSpeculationSafe(
434 AArch64::GPR64allRegClass.
contains(Reg));
441 if (Reg == AArch64::SP || Reg == AArch64::WSP)
445 if (RegsAlreadyMasked[Reg])
448 const bool Is64Bit = AArch64::GPR64allRegClass.contains(Reg);
449 LLVM_DEBUG(
dbgs() <<
"About to harden register : " << Reg <<
"\n");
451 TII->get(Is64Bit ? AArch64::SpeculationSafeValueX
452 : AArch64::SpeculationSafeValueW))
455 RegsAlreadyMasked.set(Reg);
464 RegsAlreadyMasked.reset();
468 for (; MBBI !=
E; MBBI = NextMBBI) {
470 NextMBBI = std::next(MBBI);
485 return Op.isReg() && (AArch64::GPR32allRegClass.contains(
Op.getReg()) ||
486 AArch64::GPR64allRegClass.
contains(
Op.getReg()));
492 bool HardenLoadedData = AllDefsAreGPR;
493 bool HardenAddressLoadedFrom = !HardenLoadedData;
500 RegsAlreadyMasked.reset(*AI);
508 if (HardenLoadedData)
518 Modified |= makeGPRSpeculationSafe(MBB, NextMBBI, MI,
Def.getReg());
521 if (HardenAddressLoadedFrom)
525 unsigned Reg =
Use.getReg();
536 if (!(AArch64::GPR32allRegClass.
contains(Reg) ||
537 AArch64::GPR64allRegClass.
contains(Reg)))
539 Modified |= makeGPRSpeculationSafe(MBB, MBBI, MI, Reg);
547 bool AArch64SpeculationHardening::expandSpeculationSafeValue(
549 bool UsesFullSpeculationBarrier) {
557 case AArch64::SpeculationSafeValueW:
560 case AArch64::SpeculationSafeValueX:
564 if (!UseControlFlowSpeculationBarrier && !UsesFullSpeculationBarrier) {
572 RegsNeedingCSDBBeforeUse.set(*AI);
576 Is64Bit ?
TII->get(AArch64::ANDXrs) :
TII->get(AArch64::ANDWrs))
579 .
addUse(Is64Bit ? MisspeculatingTaintReg
580 : MisspeculatingTaintReg32Bit)
592 assert(!UseControlFlowSpeculationBarrier &&
"No need to insert CSDBs when " 593 "control flow miss-speculation " 594 "is already blocked");
596 BuildMI(MBB, MBBI, DL,
TII->get(AArch64::HINT)).addImm(0x14);
597 RegsNeedingCSDBBeforeUse.reset();
601 bool AArch64SpeculationHardening::lowerSpeculationSafeValuePseudos(
605 RegsNeedingCSDBBeforeUse.reset();
627 bool NeedToEmitBarrier =
false;
629 NeedToEmitBarrier =
true;
630 if (!NeedToEmitBarrier)
632 if (
Op.isReg() && RegsNeedingCSDBBeforeUse[
Op.getReg()]) {
633 NeedToEmitBarrier =
true;
637 if (NeedToEmitBarrier && !UsesFullSpeculationBarrier)
638 Modified |= insertCSDB(MBB, MBBI, DL);
641 expandSpeculationSafeValue(MBB, MBBI, UsesFullSpeculationBarrier);
646 if (RegsNeedingCSDBBeforeUse.any() && !UsesFullSpeculationBarrier)
647 Modified |= insertCSDB(MBB, MBBI, DL);
652 bool AArch64SpeculationHardening::runOnMachineFunction(
MachineFunction &MF) {
656 MisspeculatingTaintReg = AArch64::X16;
657 MisspeculatingTaintReg32Bit = AArch64::W16;
660 RegsNeedingCSDBBeforeUse.resize(
TRI->getNumRegs());
661 RegsAlreadyMasked.resize(
TRI->getNumRegs());
662 UseControlFlowSpeculationBarrier = functionUsesHardeningRegister(MF);
669 dbgs() <<
"***** AArch64SpeculationHardening - automatic insertion of " 670 "SpeculationSafeValue intrinsics *****\n");
672 Modified |= slhLoads(MBB);
678 <<
"***** AArch64SpeculationHardening - track control flow *****\n");
683 EntryBlocks.
push_back(LPI.LandingPadBlock);
684 for (
auto Entry : EntryBlocks)
685 insertSPToRegTaintPropagation(
686 *Entry, Entry->SkipPHIsLabelsAndDebug(Entry->begin()));
689 for (
auto &MBB : MF) {
690 bool UsesFullSpeculationBarrier =
false;
691 Modified |= instrumentControlFlow(MBB, UsesFullSpeculationBarrier);
693 lowerSpeculationSafeValuePseudos(MBB, UsesFullSpeculationBarrier);
701 return new AArch64SpeculationHardening();
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
instr_iterator instr_begin()
bool isCall(QueryType Type=AnyInBundle) const
instr_iterator instr_end()
This class represents lattice values for constants.
iterator_range< mop_iterator > uses()
Returns a range that includes all operands that are register uses.
virtual const TargetRegisterInfo * getRegisterInfo() const
getRegisterInfo - If register information is available, return it.
void push_back(const T &Elt)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
static CondCode getInvertedCondCode(CondCode Code)
unsigned getReg() const
getReg - Returns the register number.
bool hasFnAttribute(Attribute::AttrKind Kind) const
Return true if the function has the attribute.
bool all_of(R &&range, UnaryPredicate P)
Provide wrappers to std::all_of which take ranges instead of having to pass begin/end explicitly...
unsigned const TargetRegisterInfo * TRI
return AArch64::GPR64RegClass contains(Reg)
bool analyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, MachineBasicBlock *&FBB, SmallVectorImpl< MachineOperand > &Cond, bool AllowModify) const override
Analyze the branching code at the end of MBB, returning true if it cannot be understood (e...
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
Printable printReg(unsigned Reg, const TargetRegisterInfo *TRI=nullptr, unsigned SubIdx=0, const MachineRegisterInfo *MRI=nullptr)
Prints virtual and physical registers with or without a TRI instance.
A Use represents the edge between a Value definition and its users.
const MachineInstrBuilder & addUse(unsigned RegNo, unsigned Flags=0, unsigned SubReg=0) const
Add a virtual register use operand.
void eraseFromParent()
Unlink 'this' from the containing basic block and delete it.
bool isTerminator(QueryType Type=AnyInBundle) const
Returns true if this instruction part of the terminator for a basic block.
unsigned getOpcode() const
Returns the opcode of this MachineInstr.
void forward()
Move the internal MBB iterator and update register states.
This structure is used to retain landing pad info for the current function.
unsigned FindUnusedReg(const TargetRegisterClass *RC) const
Find an unused register of the specified register class.
void initializeAArch64SpeculationHardeningPass(PassRegistry &)
CondCode
ISD::CondCode enum - These are ordered carefully to make the bitfields below work out...
virtual const TargetInstrInfo * getInstrInfo() const
TargetInstrInfo - Interface to description of machine instruction set.
bool isReturn(QueryType Type=AnyInBundle) const
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
initializer< Ty > init(const Ty &Val)
void addLiveIn(MCPhysReg PhysReg, LaneBitmask LaneMask=LaneBitmask::getAll())
Adds the specified register as a live in.
This file declares the machine register scavenger class.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
MCRegAliasIterator enumerates all registers aliasing Reg.
iterator_range< mop_iterator > defs()
Returns a range over all explicit operands that are register definitions.
FunctionPass class - This class is used to implement most global optimizations.
TargetRegisterInfo base class - We assume that the target defines a static array of TargetRegisterDes...
const MachineBasicBlock & front() const
const std::vector< LandingPadInfo > & getLandingPads() const
Return a reference to the landing pad info for the current function.
MachineOperand class - Representation of each machine instruction operand.
This is a 'vector' (really, a variable-sized array), optimized for the case when the array is small...
FunctionPass * createAArch64SpeculationHardeningPass()
Returns an instance of the pseudo instruction expansion pass.
const Function & getFunction() const
Return the LLVM function that this machine code represents.
raw_ostream & dbgs()
dbgs() - This returns a reference to a raw_ostream for debugging messages.
cl::opt< bool > HardenLoads("aarch64-slh-loads", cl::Hidden, cl::desc("Sanitize loads from memory."), cl::init(true))
Representation of each machine instruction.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
void enterBasicBlock(MachineBasicBlock &MBB)
Start tracking liveness from the begin of basic block MBB.
static DebugLoc getDebugLoc(MachineBasicBlock::instr_iterator FirstMI, MachineBasicBlock::instr_iterator LastMI)
Return the first found DebugLoc that has a DILocation, given a range of instructions.
LLVM_NODISCARD bool empty() const
INITIALIZE_PASS(AArch64SpeculationHardening, "aarch64-speculation-hardening", AARCH64_SPECULATION_HARDENING_NAME, false, false) bool AArch64SpeculationHardening
#define AARCH64_SPECULATION_HARDENING_NAME
bool mayLoad(QueryType Type=AnyInBundle) const
Return true if this instruction could possibly read memory.
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
MachineBasicBlock * SplitCriticalEdge(MachineBasicBlock *Succ, Pass &P)
Split the critical edge from this block to the given successor block, and return the newly created bl...
StringRef - Represent a constant reference to a string, i.e.
const MachineOperand & getOperand(unsigned i) const