LLVM  8.0.1
RISCVExpandPseudoInsts.cpp
Go to the documentation of this file.
1 //===-- RISCVExpandPseudoInsts.cpp - Expand pseudo instructions -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that expands pseudo instructions into target
11 // instructions. This pass should be run after register allocation but before
12 // the post-regalloc scheduling pass.
13 //
14 //===----------------------------------------------------------------------===//
15 
16 #include "RISCV.h"
17 #include "RISCVInstrInfo.h"
18 #include "RISCVTargetMachine.h"
19 
23 
24 using namespace llvm;
25 
26 #define RISCV_EXPAND_PSEUDO_NAME "RISCV pseudo instruction expansion pass"
27 
28 namespace {
29 
30 class RISCVExpandPseudo : public MachineFunctionPass {
31 public:
32  const RISCVInstrInfo *TII;
33  static char ID;
34 
35  RISCVExpandPseudo() : MachineFunctionPass(ID) {
37  }
38 
39  bool runOnMachineFunction(MachineFunction &MF) override;
40 
41  StringRef getPassName() const override { return RISCV_EXPAND_PSEUDO_NAME; }
42 
43 private:
44  bool expandMBB(MachineBasicBlock &MBB);
45  bool expandMI(MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
46  MachineBasicBlock::iterator &NextMBBI);
47  bool expandAtomicBinOp(MachineBasicBlock &MBB,
49  bool IsMasked, int Width,
50  MachineBasicBlock::iterator &NextMBBI);
51  bool expandAtomicMinMaxOp(MachineBasicBlock &MBB,
53  AtomicRMWInst::BinOp, bool IsMasked, int Width,
54  MachineBasicBlock::iterator &NextMBBI);
55  bool expandAtomicCmpXchg(MachineBasicBlock &MBB,
56  MachineBasicBlock::iterator MBBI, bool IsMasked,
57  int Width, MachineBasicBlock::iterator &NextMBBI);
58 };
59 
60 char RISCVExpandPseudo::ID = 0;
61 
62 bool RISCVExpandPseudo::runOnMachineFunction(MachineFunction &MF) {
63  TII = static_cast<const RISCVInstrInfo *>(MF.getSubtarget().getInstrInfo());
64  bool Modified = false;
65  for (auto &MBB : MF)
66  Modified |= expandMBB(MBB);
67  return Modified;
68 }
69 
70 bool RISCVExpandPseudo::expandMBB(MachineBasicBlock &MBB) {
71  bool Modified = false;
72 
73  MachineBasicBlock::iterator MBBI = MBB.begin(), E = MBB.end();
74  while (MBBI != E) {
75  MachineBasicBlock::iterator NMBBI = std::next(MBBI);
76  Modified |= expandMI(MBB, MBBI, NMBBI);
77  MBBI = NMBBI;
78  }
79 
80  return Modified;
81 }
82 
83 bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB,
85  MachineBasicBlock::iterator &NextMBBI) {
86  switch (MBBI->getOpcode()) {
87  case RISCV::PseudoAtomicLoadNand32:
88  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, false, 32,
89  NextMBBI);
90  case RISCV::PseudoMaskedAtomicSwap32:
91  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Xchg, true, 32,
92  NextMBBI);
93  case RISCV::PseudoMaskedAtomicLoadAdd32:
94  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Add, true, 32, NextMBBI);
95  case RISCV::PseudoMaskedAtomicLoadSub32:
96  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Sub, true, 32, NextMBBI);
97  case RISCV::PseudoMaskedAtomicLoadNand32:
98  return expandAtomicBinOp(MBB, MBBI, AtomicRMWInst::Nand, true, 32,
99  NextMBBI);
100  case RISCV::PseudoMaskedAtomicLoadMax32:
101  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Max, true, 32,
102  NextMBBI);
103  case RISCV::PseudoMaskedAtomicLoadMin32:
104  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::Min, true, 32,
105  NextMBBI);
106  case RISCV::PseudoMaskedAtomicLoadUMax32:
107  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMax, true, 32,
108  NextMBBI);
109  case RISCV::PseudoMaskedAtomicLoadUMin32:
110  return expandAtomicMinMaxOp(MBB, MBBI, AtomicRMWInst::UMin, true, 32,
111  NextMBBI);
112  case RISCV::PseudoCmpXchg32:
113  return expandAtomicCmpXchg(MBB, MBBI, false, 32, NextMBBI);
114  case RISCV::PseudoMaskedCmpXchg32:
115  return expandAtomicCmpXchg(MBB, MBBI, true, 32, NextMBBI);
116  }
117 
118  return false;
119 }
120 
121 static unsigned getLRForRMW32(AtomicOrdering Ordering) {
122  switch (Ordering) {
123  default:
124  llvm_unreachable("Unexpected AtomicOrdering");
126  return RISCV::LR_W;
128  return RISCV::LR_W_AQ;
130  return RISCV::LR_W;
132  return RISCV::LR_W_AQ;
134  return RISCV::LR_W_AQ_RL;
135  }
136 }
137 
138 static unsigned getSCForRMW32(AtomicOrdering Ordering) {
139  switch (Ordering) {
140  default:
141  llvm_unreachable("Unexpected AtomicOrdering");
143  return RISCV::SC_W;
145  return RISCV::SC_W;
147  return RISCV::SC_W_RL;
149  return RISCV::SC_W_RL;
151  return RISCV::SC_W_AQ_RL;
152  }
153 }
154 
155 static void doAtomicBinOpExpansion(const RISCVInstrInfo *TII, MachineInstr &MI,
156  DebugLoc DL, MachineBasicBlock *ThisMBB,
157  MachineBasicBlock *LoopMBB,
158  MachineBasicBlock *DoneMBB,
159  AtomicRMWInst::BinOp BinOp, int Width) {
160  assert(Width == 32 && "RV64 atomic expansion currently unsupported");
161  unsigned DestReg = MI.getOperand(0).getReg();
162  unsigned ScratchReg = MI.getOperand(1).getReg();
163  unsigned AddrReg = MI.getOperand(2).getReg();
164  unsigned IncrReg = MI.getOperand(3).getReg();
165  AtomicOrdering Ordering =
166  static_cast<AtomicOrdering>(MI.getOperand(4).getImm());
167 
168  // .loop:
169  // lr.w dest, (addr)
170  // binop scratch, dest, val
171  // sc.w scratch, scratch, (addr)
172  // bnez scratch, loop
173  BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
174  .addReg(AddrReg);
175  switch (BinOp) {
176  default:
177  llvm_unreachable("Unexpected AtomicRMW BinOp");
178  case AtomicRMWInst::Nand:
179  BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
180  .addReg(DestReg)
181  .addReg(IncrReg);
182  BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
183  .addReg(ScratchReg)
184  .addImm(-1);
185  break;
186  }
187  BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
188  .addReg(AddrReg)
189  .addReg(ScratchReg);
190  BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
191  .addReg(ScratchReg)
192  .addReg(RISCV::X0)
193  .addMBB(LoopMBB);
194 }
195 
196 static void insertMaskedMerge(const RISCVInstrInfo *TII, DebugLoc DL,
197  MachineBasicBlock *MBB, unsigned DestReg,
198  unsigned OldValReg, unsigned NewValReg,
199  unsigned MaskReg, unsigned ScratchReg) {
200  assert(OldValReg != ScratchReg && "OldValReg and ScratchReg must be unique");
201  assert(OldValReg != MaskReg && "OldValReg and MaskReg must be unique");
202  assert(ScratchReg != MaskReg && "ScratchReg and MaskReg must be unique");
203 
204  // We select bits from newval and oldval using:
205  // https://graphics.stanford.edu/~seander/bithacks.html#MaskedMerge
206  // r = oldval ^ ((oldval ^ newval) & masktargetdata);
207  BuildMI(MBB, DL, TII->get(RISCV::XOR), ScratchReg)
208  .addReg(OldValReg)
209  .addReg(NewValReg);
210  BuildMI(MBB, DL, TII->get(RISCV::AND), ScratchReg)
211  .addReg(ScratchReg)
212  .addReg(MaskReg);
213  BuildMI(MBB, DL, TII->get(RISCV::XOR), DestReg)
214  .addReg(OldValReg)
215  .addReg(ScratchReg);
216 }
217 
218 static void doMaskedAtomicBinOpExpansion(
219  const RISCVInstrInfo *TII, MachineInstr &MI, DebugLoc DL,
220  MachineBasicBlock *ThisMBB, MachineBasicBlock *LoopMBB,
221  MachineBasicBlock *DoneMBB, AtomicRMWInst::BinOp BinOp, int Width) {
222  assert(Width == 32 && "RV64 atomic expansion currently unsupported");
223  unsigned DestReg = MI.getOperand(0).getReg();
224  unsigned ScratchReg = MI.getOperand(1).getReg();
225  unsigned AddrReg = MI.getOperand(2).getReg();
226  unsigned IncrReg = MI.getOperand(3).getReg();
227  unsigned MaskReg = MI.getOperand(4).getReg();
228  AtomicOrdering Ordering =
229  static_cast<AtomicOrdering>(MI.getOperand(5).getImm());
230 
231  // .loop:
232  // lr.w destreg, (alignedaddr)
233  // binop scratch, destreg, incr
234  // xor scratch, destreg, scratch
235  // and scratch, scratch, masktargetdata
236  // xor scratch, destreg, scratch
237  // sc.w scratch, scratch, (alignedaddr)
238  // bnez scratch, loop
239  BuildMI(LoopMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
240  .addReg(AddrReg);
241  switch (BinOp) {
242  default:
243  llvm_unreachable("Unexpected AtomicRMW BinOp");
244  case AtomicRMWInst::Xchg:
245  BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
246  .addReg(RISCV::X0)
247  .addReg(IncrReg);
248  break;
249  case AtomicRMWInst::Add:
250  BuildMI(LoopMBB, DL, TII->get(RISCV::ADD), ScratchReg)
251  .addReg(DestReg)
252  .addReg(IncrReg);
253  break;
254  case AtomicRMWInst::Sub:
255  BuildMI(LoopMBB, DL, TII->get(RISCV::SUB), ScratchReg)
256  .addReg(DestReg)
257  .addReg(IncrReg);
258  break;
259  case AtomicRMWInst::Nand:
260  BuildMI(LoopMBB, DL, TII->get(RISCV::AND), ScratchReg)
261  .addReg(DestReg)
262  .addReg(IncrReg);
263  BuildMI(LoopMBB, DL, TII->get(RISCV::XORI), ScratchReg)
264  .addReg(ScratchReg)
265  .addImm(-1);
266  break;
267  }
268 
269  insertMaskedMerge(TII, DL, LoopMBB, ScratchReg, DestReg, ScratchReg, MaskReg,
270  ScratchReg);
271 
272  BuildMI(LoopMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
273  .addReg(AddrReg)
274  .addReg(ScratchReg);
275  BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
276  .addReg(ScratchReg)
277  .addReg(RISCV::X0)
278  .addMBB(LoopMBB);
279 }
280 
281 bool RISCVExpandPseudo::expandAtomicBinOp(
283  AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
284  MachineBasicBlock::iterator &NextMBBI) {
285  MachineInstr &MI = *MBBI;
286  DebugLoc DL = MI.getDebugLoc();
287 
288  MachineFunction *MF = MBB.getParent();
289  auto LoopMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
290  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
291 
292  // Insert new MBBs.
293  MF->insert(++MBB.getIterator(), LoopMBB);
294  MF->insert(++LoopMBB->getIterator(), DoneMBB);
295 
296  // Set up successors and transfer remaining instructions to DoneMBB.
297  LoopMBB->addSuccessor(LoopMBB);
298  LoopMBB->addSuccessor(DoneMBB);
299  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
300  DoneMBB->transferSuccessors(&MBB);
301  MBB.addSuccessor(LoopMBB);
302 
303  if (!IsMasked)
304  doAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp, Width);
305  else
306  doMaskedAtomicBinOpExpansion(TII, MI, DL, &MBB, LoopMBB, DoneMBB, BinOp,
307  Width);
308 
309  NextMBBI = MBB.end();
310  MI.eraseFromParent();
311 
312  LivePhysRegs LiveRegs;
313  computeAndAddLiveIns(LiveRegs, *LoopMBB);
314  computeAndAddLiveIns(LiveRegs, *DoneMBB);
315 
316  return true;
317 }
318 
319 static void insertSext(const RISCVInstrInfo *TII, DebugLoc DL,
320  MachineBasicBlock *MBB, unsigned ValReg,
321  unsigned ShamtReg) {
322  BuildMI(MBB, DL, TII->get(RISCV::SLL), ValReg)
323  .addReg(ValReg)
324  .addReg(ShamtReg);
325  BuildMI(MBB, DL, TII->get(RISCV::SRA), ValReg)
326  .addReg(ValReg)
327  .addReg(ShamtReg);
328 }
329 
330 bool RISCVExpandPseudo::expandAtomicMinMaxOp(
332  AtomicRMWInst::BinOp BinOp, bool IsMasked, int Width,
333  MachineBasicBlock::iterator &NextMBBI) {
334  assert(IsMasked == true &&
335  "Should only need to expand masked atomic max/min");
336  assert(Width == 32 && "RV64 atomic expansion currently unsupported");
337 
338  MachineInstr &MI = *MBBI;
339  DebugLoc DL = MI.getDebugLoc();
340  MachineFunction *MF = MBB.getParent();
341  auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
342  auto LoopIfBodyMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
343  auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
344  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
345 
346  // Insert new MBBs.
347  MF->insert(++MBB.getIterator(), LoopHeadMBB);
348  MF->insert(++LoopHeadMBB->getIterator(), LoopIfBodyMBB);
349  MF->insert(++LoopIfBodyMBB->getIterator(), LoopTailMBB);
350  MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
351 
352  // Set up successors and transfer remaining instructions to DoneMBB.
353  LoopHeadMBB->addSuccessor(LoopIfBodyMBB);
354  LoopHeadMBB->addSuccessor(LoopTailMBB);
355  LoopIfBodyMBB->addSuccessor(LoopTailMBB);
356  LoopTailMBB->addSuccessor(LoopHeadMBB);
357  LoopTailMBB->addSuccessor(DoneMBB);
358  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
359  DoneMBB->transferSuccessors(&MBB);
360  MBB.addSuccessor(LoopHeadMBB);
361 
362  unsigned DestReg = MI.getOperand(0).getReg();
363  unsigned Scratch1Reg = MI.getOperand(1).getReg();
364  unsigned Scratch2Reg = MI.getOperand(2).getReg();
365  unsigned AddrReg = MI.getOperand(3).getReg();
366  unsigned IncrReg = MI.getOperand(4).getReg();
367  unsigned MaskReg = MI.getOperand(5).getReg();
368  bool IsSigned = BinOp == AtomicRMWInst::Min || BinOp == AtomicRMWInst::Max;
369  AtomicOrdering Ordering =
370  static_cast<AtomicOrdering>(MI.getOperand(IsSigned ? 7 : 6).getImm());
371 
372  //
373  // .loophead:
374  // lr.w destreg, (alignedaddr)
375  // and scratch2, destreg, mask
376  // mv scratch1, destreg
377  // [sext scratch2 if signed min/max]
378  // ifnochangeneeded scratch2, incr, .looptail
379  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
380  .addReg(AddrReg);
381  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), Scratch2Reg)
382  .addReg(DestReg)
383  .addReg(MaskReg);
384  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::ADDI), Scratch1Reg)
385  .addReg(DestReg)
386  .addImm(0);
387 
388  switch (BinOp) {
389  default:
390  llvm_unreachable("Unexpected AtomicRMW BinOp");
391  case AtomicRMWInst::Max: {
392  insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
393  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
394  .addReg(Scratch2Reg)
395  .addReg(IncrReg)
396  .addMBB(LoopTailMBB);
397  break;
398  }
399  case AtomicRMWInst::Min: {
400  insertSext(TII, DL, LoopHeadMBB, Scratch2Reg, MI.getOperand(6).getReg());
401  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGE))
402  .addReg(IncrReg)
403  .addReg(Scratch2Reg)
404  .addMBB(LoopTailMBB);
405  break;
406  }
407  case AtomicRMWInst::UMax:
408  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
409  .addReg(Scratch2Reg)
410  .addReg(IncrReg)
411  .addMBB(LoopTailMBB);
412  break;
413  case AtomicRMWInst::UMin:
414  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BGEU))
415  .addReg(IncrReg)
416  .addReg(Scratch2Reg)
417  .addMBB(LoopTailMBB);
418  break;
419  }
420 
421  // .loopifbody:
422  // xor scratch1, destreg, incr
423  // and scratch1, scratch1, mask
424  // xor scratch1, destreg, scratch1
425  insertMaskedMerge(TII, DL, LoopIfBodyMBB, Scratch1Reg, DestReg, IncrReg,
426  MaskReg, Scratch1Reg);
427 
428  // .looptail:
429  // sc.w scratch1, scratch1, (addr)
430  // bnez scratch1, loop
431  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), Scratch1Reg)
432  .addReg(AddrReg)
433  .addReg(Scratch1Reg);
434  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
435  .addReg(Scratch1Reg)
436  .addReg(RISCV::X0)
437  .addMBB(LoopHeadMBB);
438 
439  NextMBBI = MBB.end();
440  MI.eraseFromParent();
441 
442  LivePhysRegs LiveRegs;
443  computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
444  computeAndAddLiveIns(LiveRegs, *LoopIfBodyMBB);
445  computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
446  computeAndAddLiveIns(LiveRegs, *DoneMBB);
447 
448  return true;
449 }
450 
451 bool RISCVExpandPseudo::expandAtomicCmpXchg(
452  MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, bool IsMasked,
453  int Width, MachineBasicBlock::iterator &NextMBBI) {
454  assert(Width == 32 && "RV64 atomic expansion currently unsupported");
455  MachineInstr &MI = *MBBI;
456  DebugLoc DL = MI.getDebugLoc();
457  MachineFunction *MF = MBB.getParent();
458  auto LoopHeadMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
459  auto LoopTailMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
460  auto DoneMBB = MF->CreateMachineBasicBlock(MBB.getBasicBlock());
461 
462  // Insert new MBBs.
463  MF->insert(++MBB.getIterator(), LoopHeadMBB);
464  MF->insert(++LoopHeadMBB->getIterator(), LoopTailMBB);
465  MF->insert(++LoopTailMBB->getIterator(), DoneMBB);
466 
467  // Set up successors and transfer remaining instructions to DoneMBB.
468  LoopHeadMBB->addSuccessor(LoopTailMBB);
469  LoopHeadMBB->addSuccessor(DoneMBB);
470  LoopTailMBB->addSuccessor(DoneMBB);
471  LoopTailMBB->addSuccessor(LoopHeadMBB);
472  DoneMBB->splice(DoneMBB->end(), &MBB, MI, MBB.end());
473  DoneMBB->transferSuccessors(&MBB);
474  MBB.addSuccessor(LoopHeadMBB);
475 
476  unsigned DestReg = MI.getOperand(0).getReg();
477  unsigned ScratchReg = MI.getOperand(1).getReg();
478  unsigned AddrReg = MI.getOperand(2).getReg();
479  unsigned CmpValReg = MI.getOperand(3).getReg();
480  unsigned NewValReg = MI.getOperand(4).getReg();
481  AtomicOrdering Ordering =
482  static_cast<AtomicOrdering>(MI.getOperand(IsMasked ? 6 : 5).getImm());
483 
484  if (!IsMasked) {
485  // .loophead:
486  // lr.w dest, (addr)
487  // bne dest, cmpval, done
488  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
489  .addReg(AddrReg);
490  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
491  .addReg(DestReg)
492  .addReg(CmpValReg)
493  .addMBB(DoneMBB);
494  // .looptail:
495  // sc.w scratch, newval, (addr)
496  // bnez scratch, loophead
497  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
498  .addReg(AddrReg)
499  .addReg(NewValReg);
500  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
501  .addReg(ScratchReg)
502  .addReg(RISCV::X0)
503  .addMBB(LoopHeadMBB);
504  } else {
505  // .loophead:
506  // lr.w dest, (addr)
507  // and scratch, dest, mask
508  // bne scratch, cmpval, done
509  unsigned MaskReg = MI.getOperand(5).getReg();
510  BuildMI(LoopHeadMBB, DL, TII->get(getLRForRMW32(Ordering)), DestReg)
511  .addReg(AddrReg);
512  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::AND), ScratchReg)
513  .addReg(DestReg)
514  .addReg(MaskReg);
515  BuildMI(LoopHeadMBB, DL, TII->get(RISCV::BNE))
516  .addReg(ScratchReg)
517  .addReg(CmpValReg)
518  .addMBB(DoneMBB);
519 
520  // .looptail:
521  // xor scratch, dest, newval
522  // and scratch, scratch, mask
523  // xor scratch, dest, scratch
524  // sc.w scratch, scratch, (adrr)
525  // bnez scratch, loophead
526  insertMaskedMerge(TII, DL, LoopTailMBB, ScratchReg, DestReg, NewValReg,
527  MaskReg, ScratchReg);
528  BuildMI(LoopTailMBB, DL, TII->get(getSCForRMW32(Ordering)), ScratchReg)
529  .addReg(AddrReg)
530  .addReg(ScratchReg);
531  BuildMI(LoopTailMBB, DL, TII->get(RISCV::BNE))
532  .addReg(ScratchReg)
533  .addReg(RISCV::X0)
534  .addMBB(LoopHeadMBB);
535  }
536 
537  NextMBBI = MBB.end();
538  MI.eraseFromParent();
539 
540  LivePhysRegs LiveRegs;
541  computeAndAddLiveIns(LiveRegs, *LoopHeadMBB);
542  computeAndAddLiveIns(LiveRegs, *LoopTailMBB);
543  computeAndAddLiveIns(LiveRegs, *DoneMBB);
544 
545  return true;
546 }
547 
548 } // end of anonymous namespace
549 
550 INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo",
551  RISCV_EXPAND_PSEUDO_NAME, false, false)
552 namespace llvm {
553 
554 FunctionPass *createRISCVExpandPseudoPass() { return new RISCVExpandPseudo(); }
555 
556 } // end of namespace llvm
static PassRegistry * getPassRegistry()
getPassRegistry - Access the global registry object, which is automatically initialized at applicatio...
*p = old <signed v ? old : v
Definition: Instructions.h:722
This class represents lattice values for constants.
Definition: AllocatorList.h:24
void initializeRISCVExpandPseudoPass(PassRegistry &)
const DebugLoc & getDebugLoc() const
Returns the debug location id of this MachineInstr.
Definition: MachineInstr.h:383
unsigned getReg() const
getReg - Returns the register number.
void transferSuccessors(MachineBasicBlock *FromMBB)
Transfers all the successors from MBB to this machine basic block (i.e., copies all the successors Fr...
*p = old <unsigned v ? old : v
Definition: Instructions.h:726
*p = old >unsigned v ? old : v
Definition: Instructions.h:724
A debug info location.
Definition: DebugLoc.h:34
*p = old >signed v ? old : v
Definition: Instructions.h:720
MachineFunctionPass - This class adapts the FunctionPass interface to allow convenient creation of pa...
const HexagonInstrInfo * TII
AtomicOrdering
Atomic ordering for LLVM&#39;s memory model.
BinOp
This enumeration lists the possible modifications atomicrmw can make.
Definition: Instructions.h:704
Simple integer binary arithmetic operators.
Definition: ISDOpcodes.h:201
virtual const TargetInstrInfo * getInstrInfo() const
MachineBasicBlock * CreateMachineBasicBlock(const BasicBlock *bb=nullptr)
CreateMachineBasicBlock - Allocate a new MachineBasicBlock.
MachineInstrBuilder BuildMI(MachineFunction &MF, const DebugLoc &DL, const MCInstrDesc &MCID)
Builder interface. Specify how to create the initial instruction itself.
const TargetSubtargetInfo & getSubtarget() const
getSubtarget - Return the subtarget for which this machine code is being compiled.
static GCRegistry::Add< CoreCLRGC > E("coreclr", "CoreCLR-compatible GC")
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
self_iterator getIterator()
Definition: ilist_node.h:82
This file implements the LivePhysRegs utility for tracking liveness of physical registers.
#define llvm_unreachable(msg)
Marks that the current location is not supposed to be reachable.
void addSuccessor(MachineBasicBlock *Succ, BranchProbability Prob=BranchProbability::getUnknown())
Add Succ as a successor of this MachineBasicBlock.
int64_t getImm() const
void computeAndAddLiveIns(LivePhysRegs &LiveRegs, MachineBasicBlock &MBB)
Convenience function combining computeLiveIns() and addLiveIns().
INITIALIZE_PASS(RISCVExpandPseudo, "riscv-expand-pseudo", RISCV_EXPAND_PSEUDO_NAME, false, false) namespace llvm
Representation of each machine instruction.
Definition: MachineInstr.h:64
const MachineFunction * getParent() const
Return the MachineFunction containing this basic block.
const MachineInstrBuilder & addImm(int64_t Val) const
Add a new immediate operand.
Bitwise operators - logical and, logical or, logical xor.
Definition: ISDOpcodes.h:387
void splice(iterator Where, MachineBasicBlock *Other, iterator From)
Take an instruction from MBB &#39;Other&#39; at the position From, and insert it into this MBB right before &#39;...
A set of physical registers with utility functions to track liveness when walking backward/forward th...
Definition: LivePhysRegs.h:49
const BasicBlock * getBasicBlock() const
Return the LLVM basic block that this instance corresponded to originally.
#define RISCV_EXPAND_PSEUDO_NAME
const MachineInstrBuilder & addReg(unsigned RegNo, unsigned flags=0, unsigned SubReg=0) const
Add a new virtual register operand.
FunctionPass * createRISCVExpandPseudoPass()
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
void insert(iterator MBBI, MachineBasicBlock *MBB)
IRTranslator LLVM IR MI
StringRef - Represent a constant reference to a string, i.e.
Definition: StringRef.h:49
const MachineInstrBuilder & addMBB(MachineBasicBlock *MBB, unsigned char TargetFlags=0) const
const MachineOperand & getOperand(unsigned i) const
Definition: MachineInstr.h:414