LLVM  8.0.1
ExpandMemCmp.cpp
Go to the documentation of this file.
1 //===--- ExpandMemCmp.cpp - Expand memcmp() to load/stores ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass tries to expand memcmp() calls into optimally-sized loads and
11 // compares for the target.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/Statistic.h"
23 #include "llvm/IR/IRBuilder.h"
24 
25 using namespace llvm;
26 
27 #define DEBUG_TYPE "expandmemcmp"
28 
29 STATISTIC(NumMemCmpCalls, "Number of memcmp calls");
30 STATISTIC(NumMemCmpNotConstant, "Number of memcmp calls without constant size");
31 STATISTIC(NumMemCmpGreaterThanMax,
32  "Number of memcmp calls with size greater than max size");
33 STATISTIC(NumMemCmpInlined, "Number of inlined memcmp calls");
34 
36  "memcmp-num-loads-per-block", cl::Hidden, cl::init(1),
37  cl::desc("The number of loads per basic block for inline expansion of "
38  "memcmp that is only being compared against zero."));
39 
40 namespace {
41 
42 
43 // This class provides helper functions to expand a memcmp library call into an
44 // inline expansion.
45 class MemCmpExpansion {
46  struct ResultBlock {
47  BasicBlock *BB = nullptr;
48  PHINode *PhiSrc1 = nullptr;
49  PHINode *PhiSrc2 = nullptr;
50 
51  ResultBlock() = default;
52  };
53 
54  CallInst *const CI;
55  ResultBlock ResBlock;
56  const uint64_t Size;
57  unsigned MaxLoadSize;
58  uint64_t NumLoadsNonOneByte;
59  const uint64_t NumLoadsPerBlockForZeroCmp;
60  std::vector<BasicBlock *> LoadCmpBlocks;
61  BasicBlock *EndBlock;
62  PHINode *PhiRes;
63  const bool IsUsedForZeroCmp;
64  const DataLayout &DL;
65  IRBuilder<> Builder;
66  // Represents the decomposition in blocks of the expansion. For example,
67  // comparing 33 bytes on X86+sse can be done with 2x16-byte loads and
68  // 1x1-byte load, which would be represented as [{16, 0}, {16, 16}, {32, 1}.
69  struct LoadEntry {
70  LoadEntry(unsigned LoadSize, uint64_t Offset)
71  : LoadSize(LoadSize), Offset(Offset) {
72  }
73 
74  // The size of the load for this block, in bytes.
75  unsigned LoadSize;
76  // The offset of this load from the base pointer, in bytes.
77  uint64_t Offset;
78  };
79  using LoadEntryVector = SmallVector<LoadEntry, 8>;
80  LoadEntryVector LoadSequence;
81 
82  void createLoadCmpBlocks();
83  void createResultBlock();
84  void setupResultBlockPHINodes();
85  void setupEndBlockPHINodes();
86  Value *getCompareLoadPairs(unsigned BlockIndex, unsigned &LoadIndex);
87  void emitLoadCompareBlock(unsigned BlockIndex);
88  void emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
89  unsigned &LoadIndex);
90  void emitLoadCompareByteBlock(unsigned BlockIndex, unsigned OffsetBytes);
91  void emitMemCmpResultBlock();
92  Value *getMemCmpExpansionZeroCase();
93  Value *getMemCmpEqZeroOneBlock();
94  Value *getMemCmpOneBlock();
95  Value *getPtrToElementAtOffset(Value *Source, Type *LoadSizeType,
96  uint64_t OffsetBytes);
97 
98  static LoadEntryVector
99  computeGreedyLoadSequence(uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
100  unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte);
101  static LoadEntryVector
102  computeOverlappingLoadSequence(uint64_t Size, unsigned MaxLoadSize,
103  unsigned MaxNumLoads,
104  unsigned &NumLoadsNonOneByte);
105 
106 public:
107  MemCmpExpansion(CallInst *CI, uint64_t Size,
109  unsigned MaxNumLoads, const bool IsUsedForZeroCmp,
110  unsigned MaxLoadsPerBlockForZeroCmp, const DataLayout &TheDataLayout);
111 
112  unsigned getNumBlocks();
113  uint64_t getNumLoads() const { return LoadSequence.size(); }
114 
115  Value *getMemCmpExpansion();
116 };
117 
118 MemCmpExpansion::LoadEntryVector MemCmpExpansion::computeGreedyLoadSequence(
119  uint64_t Size, llvm::ArrayRef<unsigned> LoadSizes,
120  const unsigned MaxNumLoads, unsigned &NumLoadsNonOneByte) {
121  NumLoadsNonOneByte = 0;
122  LoadEntryVector LoadSequence;
123  uint64_t Offset = 0;
124  while (Size && !LoadSizes.empty()) {
125  const unsigned LoadSize = LoadSizes.front();
126  const uint64_t NumLoadsForThisSize = Size / LoadSize;
127  if (LoadSequence.size() + NumLoadsForThisSize > MaxNumLoads) {
128  // Do not expand if the total number of loads is larger than what the
129  // target allows. Note that it's important that we exit before completing
130  // the expansion to avoid using a ton of memory to store the expansion for
131  // large sizes.
132  return {};
133  }
134  if (NumLoadsForThisSize > 0) {
135  for (uint64_t I = 0; I < NumLoadsForThisSize; ++I) {
136  LoadSequence.push_back({LoadSize, Offset});
137  Offset += LoadSize;
138  }
139  if (LoadSize > 1)
140  ++NumLoadsNonOneByte;
141  Size = Size % LoadSize;
142  }
143  LoadSizes = LoadSizes.drop_front();
144  }
145  return LoadSequence;
146 }
147 
149 MemCmpExpansion::computeOverlappingLoadSequence(uint64_t Size,
150  const unsigned MaxLoadSize,
151  const unsigned MaxNumLoads,
152  unsigned &NumLoadsNonOneByte) {
153  // These are already handled by the greedy approach.
154  if (Size < 2 || MaxLoadSize < 2)
155  return {};
156 
157  // We try to do as many non-overlapping loads as possible starting from the
158  // beginning.
159  const uint64_t NumNonOverlappingLoads = Size / MaxLoadSize;
160  assert(NumNonOverlappingLoads && "there must be at least one load");
161  // There remain 0 to (MaxLoadSize - 1) bytes to load, this will be done with
162  // an overlapping load.
163  Size = Size - NumNonOverlappingLoads * MaxLoadSize;
164  // Bail if we do not need an overloapping store, this is already handled by
165  // the greedy approach.
166  if (Size == 0)
167  return {};
168  // Bail if the number of loads (non-overlapping + potential overlapping one)
169  // is larger than the max allowed.
170  if ((NumNonOverlappingLoads + 1) > MaxNumLoads)
171  return {};
172 
173  // Add non-overlapping loads.
174  LoadEntryVector LoadSequence;
175  uint64_t Offset = 0;
176  for (uint64_t I = 0; I < NumNonOverlappingLoads; ++I) {
177  LoadSequence.push_back({MaxLoadSize, Offset});
178  Offset += MaxLoadSize;
179  }
180 
181  // Add the last overlapping load.
182  assert(Size > 0 && Size < MaxLoadSize && "broken invariant");
183  LoadSequence.push_back({MaxLoadSize, Offset - (MaxLoadSize - Size)});
184  NumLoadsNonOneByte = 1;
185  return LoadSequence;
186 }
187 
188 // Initialize the basic block structure required for expansion of memcmp call
189 // with given maximum load size and memcmp size parameter.
190 // This structure includes:
191 // 1. A list of load compare blocks - LoadCmpBlocks.
192 // 2. An EndBlock, split from original instruction point, which is the block to
193 // return from.
194 // 3. ResultBlock, block to branch to for early exit when a
195 // LoadCmpBlock finds a difference.
196 MemCmpExpansion::MemCmpExpansion(
197  CallInst *const CI, uint64_t Size,
199  const unsigned MaxNumLoads, const bool IsUsedForZeroCmp,
200  const unsigned MaxLoadsPerBlockForZeroCmp, const DataLayout &TheDataLayout)
201  : CI(CI),
202  Size(Size),
203  MaxLoadSize(0),
204  NumLoadsNonOneByte(0),
205  NumLoadsPerBlockForZeroCmp(MaxLoadsPerBlockForZeroCmp),
206  IsUsedForZeroCmp(IsUsedForZeroCmp),
207  DL(TheDataLayout),
208  Builder(CI) {
209  assert(Size > 0 && "zero blocks");
210  // Scale the max size down if the target can load more bytes than we need.
211  llvm::ArrayRef<unsigned> LoadSizes(Options.LoadSizes);
212  while (!LoadSizes.empty() && LoadSizes.front() > Size) {
213  LoadSizes = LoadSizes.drop_front();
214  }
215  assert(!LoadSizes.empty() && "cannot load Size bytes");
216  MaxLoadSize = LoadSizes.front();
217  // Compute the decomposition.
218  unsigned GreedyNumLoadsNonOneByte = 0;
219  LoadSequence = computeGreedyLoadSequence(Size, LoadSizes, MaxNumLoads,
220  GreedyNumLoadsNonOneByte);
221  NumLoadsNonOneByte = GreedyNumLoadsNonOneByte;
222  assert(LoadSequence.size() <= MaxNumLoads && "broken invariant");
223  // If we allow overlapping loads and the load sequence is not already optimal,
224  // use overlapping loads.
225  if (Options.AllowOverlappingLoads &&
226  (LoadSequence.empty() || LoadSequence.size() > 2)) {
227  unsigned OverlappingNumLoadsNonOneByte = 0;
228  auto OverlappingLoads = computeOverlappingLoadSequence(
229  Size, MaxLoadSize, MaxNumLoads, OverlappingNumLoadsNonOneByte);
230  if (!OverlappingLoads.empty() &&
231  (LoadSequence.empty() ||
232  OverlappingLoads.size() < LoadSequence.size())) {
233  LoadSequence = OverlappingLoads;
234  NumLoadsNonOneByte = OverlappingNumLoadsNonOneByte;
235  }
236  }
237  assert(LoadSequence.size() <= MaxNumLoads && "broken invariant");
238 }
239 
240 unsigned MemCmpExpansion::getNumBlocks() {
241  if (IsUsedForZeroCmp)
242  return getNumLoads() / NumLoadsPerBlockForZeroCmp +
243  (getNumLoads() % NumLoadsPerBlockForZeroCmp != 0 ? 1 : 0);
244  return getNumLoads();
245 }
246 
247 void MemCmpExpansion::createLoadCmpBlocks() {
248  for (unsigned i = 0; i < getNumBlocks(); i++) {
249  BasicBlock *BB = BasicBlock::Create(CI->getContext(), "loadbb",
250  EndBlock->getParent(), EndBlock);
251  LoadCmpBlocks.push_back(BB);
252  }
253 }
254 
255 void MemCmpExpansion::createResultBlock() {
256  ResBlock.BB = BasicBlock::Create(CI->getContext(), "res_block",
257  EndBlock->getParent(), EndBlock);
258 }
259 
260 /// Return a pointer to an element of type `LoadSizeType` at offset
261 /// `OffsetBytes`.
262 Value *MemCmpExpansion::getPtrToElementAtOffset(Value *Source,
263  Type *LoadSizeType,
264  uint64_t OffsetBytes) {
265  if (OffsetBytes > 0) {
266  auto *ByteType = Type::getInt8Ty(CI->getContext());
267  Source = Builder.CreateGEP(
268  ByteType, Builder.CreateBitCast(Source, ByteType->getPointerTo()),
269  ConstantInt::get(ByteType, OffsetBytes));
270  }
271  return Builder.CreateBitCast(Source, LoadSizeType->getPointerTo());
272 }
273 
274 // This function creates the IR instructions for loading and comparing 1 byte.
275 // It loads 1 byte from each source of the memcmp parameters with the given
276 // GEPIndex. It then subtracts the two loaded values and adds this result to the
277 // final phi node for selecting the memcmp result.
278 void MemCmpExpansion::emitLoadCompareByteBlock(unsigned BlockIndex,
279  unsigned OffsetBytes) {
280  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
281  Type *LoadSizeType = Type::getInt8Ty(CI->getContext());
282  Value *Source1 =
283  getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType, OffsetBytes);
284  Value *Source2 =
285  getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType, OffsetBytes);
286 
287  Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
288  Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
289 
290  LoadSrc1 = Builder.CreateZExt(LoadSrc1, Type::getInt32Ty(CI->getContext()));
291  LoadSrc2 = Builder.CreateZExt(LoadSrc2, Type::getInt32Ty(CI->getContext()));
292  Value *Diff = Builder.CreateSub(LoadSrc1, LoadSrc2);
293 
294  PhiRes->addIncoming(Diff, LoadCmpBlocks[BlockIndex]);
295 
296  if (BlockIndex < (LoadCmpBlocks.size() - 1)) {
297  // Early exit branch if difference found to EndBlock. Otherwise, continue to
298  // next LoadCmpBlock,
299  Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_NE, Diff,
300  ConstantInt::get(Diff->getType(), 0));
301  BranchInst *CmpBr =
302  BranchInst::Create(EndBlock, LoadCmpBlocks[BlockIndex + 1], Cmp);
303  Builder.Insert(CmpBr);
304  } else {
305  // The last block has an unconditional branch to EndBlock.
306  BranchInst *CmpBr = BranchInst::Create(EndBlock);
307  Builder.Insert(CmpBr);
308  }
309 }
310 
311 /// Generate an equality comparison for one or more pairs of loaded values.
312 /// This is used in the case where the memcmp() call is compared equal or not
313 /// equal to zero.
314 Value *MemCmpExpansion::getCompareLoadPairs(unsigned BlockIndex,
315  unsigned &LoadIndex) {
316  assert(LoadIndex < getNumLoads() &&
317  "getCompareLoadPairs() called with no remaining loads");
318  std::vector<Value *> XorList, OrList;
319  Value *Diff;
320 
321  const unsigned NumLoads =
322  std::min(getNumLoads() - LoadIndex, NumLoadsPerBlockForZeroCmp);
323 
324  // For a single-block expansion, start inserting before the memcmp call.
325  if (LoadCmpBlocks.empty())
326  Builder.SetInsertPoint(CI);
327  else
328  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
329 
330  Value *Cmp = nullptr;
331  // If we have multiple loads per block, we need to generate a composite
332  // comparison using xor+or. The type for the combinations is the largest load
333  // type.
334  IntegerType *const MaxLoadType =
335  NumLoads == 1 ? nullptr
336  : IntegerType::get(CI->getContext(), MaxLoadSize * 8);
337  for (unsigned i = 0; i < NumLoads; ++i, ++LoadIndex) {
338  const LoadEntry &CurLoadEntry = LoadSequence[LoadIndex];
339 
340  IntegerType *LoadSizeType =
341  IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
342 
343  Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
344  CurLoadEntry.Offset);
345  Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
346  CurLoadEntry.Offset);
347 
348  // Get a constant or load a value for each source address.
349  Value *LoadSrc1 = nullptr;
350  if (auto *Source1C = dyn_cast<Constant>(Source1))
351  LoadSrc1 = ConstantFoldLoadFromConstPtr(Source1C, LoadSizeType, DL);
352  if (!LoadSrc1)
353  LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
354 
355  Value *LoadSrc2 = nullptr;
356  if (auto *Source2C = dyn_cast<Constant>(Source2))
357  LoadSrc2 = ConstantFoldLoadFromConstPtr(Source2C, LoadSizeType, DL);
358  if (!LoadSrc2)
359  LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
360 
361  if (NumLoads != 1) {
362  if (LoadSizeType != MaxLoadType) {
363  LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType);
364  LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType);
365  }
366  // If we have multiple loads per block, we need to generate a composite
367  // comparison using xor+or.
368  Diff = Builder.CreateXor(LoadSrc1, LoadSrc2);
369  Diff = Builder.CreateZExt(Diff, MaxLoadType);
370  XorList.push_back(Diff);
371  } else {
372  // If there's only one load per block, we just compare the loaded values.
373  Cmp = Builder.CreateICmpNE(LoadSrc1, LoadSrc2);
374  }
375  }
376 
377  auto pairWiseOr = [&](std::vector<Value *> &InList) -> std::vector<Value *> {
378  std::vector<Value *> OutList;
379  for (unsigned i = 0; i < InList.size() - 1; i = i + 2) {
380  Value *Or = Builder.CreateOr(InList[i], InList[i + 1]);
381  OutList.push_back(Or);
382  }
383  if (InList.size() % 2 != 0)
384  OutList.push_back(InList.back());
385  return OutList;
386  };
387 
388  if (!Cmp) {
389  // Pairwise OR the XOR results.
390  OrList = pairWiseOr(XorList);
391 
392  // Pairwise OR the OR results until one result left.
393  while (OrList.size() != 1) {
394  OrList = pairWiseOr(OrList);
395  }
396  Cmp = Builder.CreateICmpNE(OrList[0], ConstantInt::get(Diff->getType(), 0));
397  }
398 
399  return Cmp;
400 }
401 
402 void MemCmpExpansion::emitLoadCompareBlockMultipleLoads(unsigned BlockIndex,
403  unsigned &LoadIndex) {
404  Value *Cmp = getCompareLoadPairs(BlockIndex, LoadIndex);
405 
406  BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
407  ? EndBlock
408  : LoadCmpBlocks[BlockIndex + 1];
409  // Early exit branch if difference found to ResultBlock. Otherwise,
410  // continue to next LoadCmpBlock or EndBlock.
411  BranchInst *CmpBr = BranchInst::Create(ResBlock.BB, NextBB, Cmp);
412  Builder.Insert(CmpBr);
413 
414  // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
415  // since early exit to ResultBlock was not taken (no difference was found in
416  // any of the bytes).
417  if (BlockIndex == LoadCmpBlocks.size() - 1) {
418  Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
419  PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
420  }
421 }
422 
423 // This function creates the IR intructions for loading and comparing using the
424 // given LoadSize. It loads the number of bytes specified by LoadSize from each
425 // source of the memcmp parameters. It then does a subtract to see if there was
426 // a difference in the loaded values. If a difference is found, it branches
427 // with an early exit to the ResultBlock for calculating which source was
428 // larger. Otherwise, it falls through to the either the next LoadCmpBlock or
429 // the EndBlock if this is the last LoadCmpBlock. Loading 1 byte is handled with
430 // a special case through emitLoadCompareByteBlock. The special handling can
431 // simply subtract the loaded values and add it to the result phi node.
432 void MemCmpExpansion::emitLoadCompareBlock(unsigned BlockIndex) {
433  // There is one load per block in this case, BlockIndex == LoadIndex.
434  const LoadEntry &CurLoadEntry = LoadSequence[BlockIndex];
435 
436  if (CurLoadEntry.LoadSize == 1) {
437  MemCmpExpansion::emitLoadCompareByteBlock(BlockIndex, CurLoadEntry.Offset);
438  return;
439  }
440 
441  Type *LoadSizeType =
442  IntegerType::get(CI->getContext(), CurLoadEntry.LoadSize * 8);
443  Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
444  assert(CurLoadEntry.LoadSize <= MaxLoadSize && "Unexpected load type");
445 
446  Builder.SetInsertPoint(LoadCmpBlocks[BlockIndex]);
447 
448  Value *Source1 = getPtrToElementAtOffset(CI->getArgOperand(0), LoadSizeType,
449  CurLoadEntry.Offset);
450  Value *Source2 = getPtrToElementAtOffset(CI->getArgOperand(1), LoadSizeType,
451  CurLoadEntry.Offset);
452 
453  // Load LoadSizeType from the base address.
454  Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
455  Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
456 
457  if (DL.isLittleEndian()) {
459  Intrinsic::bswap, LoadSizeType);
460  LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1);
461  LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2);
462  }
463 
464  if (LoadSizeType != MaxLoadType) {
465  LoadSrc1 = Builder.CreateZExt(LoadSrc1, MaxLoadType);
466  LoadSrc2 = Builder.CreateZExt(LoadSrc2, MaxLoadType);
467  }
468 
469  // Add the loaded values to the phi nodes for calculating memcmp result only
470  // if result is not used in a zero equality.
471  if (!IsUsedForZeroCmp) {
472  ResBlock.PhiSrc1->addIncoming(LoadSrc1, LoadCmpBlocks[BlockIndex]);
473  ResBlock.PhiSrc2->addIncoming(LoadSrc2, LoadCmpBlocks[BlockIndex]);
474  }
475 
476  Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_EQ, LoadSrc1, LoadSrc2);
477  BasicBlock *NextBB = (BlockIndex == (LoadCmpBlocks.size() - 1))
478  ? EndBlock
479  : LoadCmpBlocks[BlockIndex + 1];
480  // Early exit branch if difference found to ResultBlock. Otherwise, continue
481  // to next LoadCmpBlock or EndBlock.
482  BranchInst *CmpBr = BranchInst::Create(NextBB, ResBlock.BB, Cmp);
483  Builder.Insert(CmpBr);
484 
485  // Add a phi edge for the last LoadCmpBlock to Endblock with a value of 0
486  // since early exit to ResultBlock was not taken (no difference was found in
487  // any of the bytes).
488  if (BlockIndex == LoadCmpBlocks.size() - 1) {
489  Value *Zero = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 0);
490  PhiRes->addIncoming(Zero, LoadCmpBlocks[BlockIndex]);
491  }
492 }
493 
494 // This function populates the ResultBlock with a sequence to calculate the
495 // memcmp result. It compares the two loaded source values and returns -1 if
496 // src1 < src2 and 1 if src1 > src2.
497 void MemCmpExpansion::emitMemCmpResultBlock() {
498  // Special case: if memcmp result is used in a zero equality, result does not
499  // need to be calculated and can simply return 1.
500  if (IsUsedForZeroCmp) {
501  BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
502  Builder.SetInsertPoint(ResBlock.BB, InsertPt);
503  Value *Res = ConstantInt::get(Type::getInt32Ty(CI->getContext()), 1);
504  PhiRes->addIncoming(Res, ResBlock.BB);
505  BranchInst *NewBr = BranchInst::Create(EndBlock);
506  Builder.Insert(NewBr);
507  return;
508  }
509  BasicBlock::iterator InsertPt = ResBlock.BB->getFirstInsertionPt();
510  Builder.SetInsertPoint(ResBlock.BB, InsertPt);
511 
512  Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, ResBlock.PhiSrc1,
513  ResBlock.PhiSrc2);
514 
515  Value *Res =
516  Builder.CreateSelect(Cmp, ConstantInt::get(Builder.getInt32Ty(), -1),
517  ConstantInt::get(Builder.getInt32Ty(), 1));
518 
519  BranchInst *NewBr = BranchInst::Create(EndBlock);
520  Builder.Insert(NewBr);
521  PhiRes->addIncoming(Res, ResBlock.BB);
522 }
523 
524 void MemCmpExpansion::setupResultBlockPHINodes() {
525  Type *MaxLoadType = IntegerType::get(CI->getContext(), MaxLoadSize * 8);
526  Builder.SetInsertPoint(ResBlock.BB);
527  // Note: this assumes one load per block.
528  ResBlock.PhiSrc1 =
529  Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src1");
530  ResBlock.PhiSrc2 =
531  Builder.CreatePHI(MaxLoadType, NumLoadsNonOneByte, "phi.src2");
532 }
533 
534 void MemCmpExpansion::setupEndBlockPHINodes() {
535  Builder.SetInsertPoint(&EndBlock->front());
536  PhiRes = Builder.CreatePHI(Type::getInt32Ty(CI->getContext()), 2, "phi.res");
537 }
538 
539 Value *MemCmpExpansion::getMemCmpExpansionZeroCase() {
540  unsigned LoadIndex = 0;
541  // This loop populates each of the LoadCmpBlocks with the IR sequence to
542  // handle multiple loads per block.
543  for (unsigned I = 0; I < getNumBlocks(); ++I) {
544  emitLoadCompareBlockMultipleLoads(I, LoadIndex);
545  }
546 
547  emitMemCmpResultBlock();
548  return PhiRes;
549 }
550 
551 /// A memcmp expansion that compares equality with 0 and only has one block of
552 /// load and compare can bypass the compare, branch, and phi IR that is required
553 /// in the general case.
554 Value *MemCmpExpansion::getMemCmpEqZeroOneBlock() {
555  unsigned LoadIndex = 0;
556  Value *Cmp = getCompareLoadPairs(0, LoadIndex);
557  assert(LoadIndex == getNumLoads() && "some entries were not consumed");
558  return Builder.CreateZExt(Cmp, Type::getInt32Ty(CI->getContext()));
559 }
560 
561 /// A memcmp expansion that only has one block of load and compare can bypass
562 /// the compare, branch, and phi IR that is required in the general case.
563 Value *MemCmpExpansion::getMemCmpOneBlock() {
564  Type *LoadSizeType = IntegerType::get(CI->getContext(), Size * 8);
565  Value *Source1 = CI->getArgOperand(0);
566  Value *Source2 = CI->getArgOperand(1);
567 
568  // Cast source to LoadSizeType*.
569  if (Source1->getType() != LoadSizeType)
570  Source1 = Builder.CreateBitCast(Source1, LoadSizeType->getPointerTo());
571  if (Source2->getType() != LoadSizeType)
572  Source2 = Builder.CreateBitCast(Source2, LoadSizeType->getPointerTo());
573 
574  // Load LoadSizeType from the base address.
575  Value *LoadSrc1 = Builder.CreateLoad(LoadSizeType, Source1);
576  Value *LoadSrc2 = Builder.CreateLoad(LoadSizeType, Source2);
577 
578  if (DL.isLittleEndian() && Size != 1) {
580  Intrinsic::bswap, LoadSizeType);
581  LoadSrc1 = Builder.CreateCall(Bswap, LoadSrc1);
582  LoadSrc2 = Builder.CreateCall(Bswap, LoadSrc2);
583  }
584 
585  if (Size < 4) {
586  // The i8 and i16 cases don't need compares. We zext the loaded values and
587  // subtract them to get the suitable negative, zero, or positive i32 result.
588  LoadSrc1 = Builder.CreateZExt(LoadSrc1, Builder.getInt32Ty());
589  LoadSrc2 = Builder.CreateZExt(LoadSrc2, Builder.getInt32Ty());
590  return Builder.CreateSub(LoadSrc1, LoadSrc2);
591  }
592 
593  // The result of memcmp is negative, zero, or positive, so produce that by
594  // subtracting 2 extended compare bits: sub (ugt, ult).
595  // If a target prefers to use selects to get -1/0/1, they should be able
596  // to transform this later. The inverse transform (going from selects to math)
597  // may not be possible in the DAG because the selects got converted into
598  // branches before we got there.
599  Value *CmpUGT = Builder.CreateICmpUGT(LoadSrc1, LoadSrc2);
600  Value *CmpULT = Builder.CreateICmpULT(LoadSrc1, LoadSrc2);
601  Value *ZextUGT = Builder.CreateZExt(CmpUGT, Builder.getInt32Ty());
602  Value *ZextULT = Builder.CreateZExt(CmpULT, Builder.getInt32Ty());
603  return Builder.CreateSub(ZextUGT, ZextULT);
604 }
605 
606 // This function expands the memcmp call into an inline expansion and returns
607 // the memcmp result.
608 Value *MemCmpExpansion::getMemCmpExpansion() {
609  // Create the basic block framework for a multi-block expansion.
610  if (getNumBlocks() != 1) {
611  BasicBlock *StartBlock = CI->getParent();
612  EndBlock = StartBlock->splitBasicBlock(CI, "endblock");
613  setupEndBlockPHINodes();
614  createResultBlock();
615 
616  // If return value of memcmp is not used in a zero equality, we need to
617  // calculate which source was larger. The calculation requires the
618  // two loaded source values of each load compare block.
619  // These will be saved in the phi nodes created by setupResultBlockPHINodes.
620  if (!IsUsedForZeroCmp) setupResultBlockPHINodes();
621 
622  // Create the number of required load compare basic blocks.
623  createLoadCmpBlocks();
624 
625  // Update the terminator added by splitBasicBlock to branch to the first
626  // LoadCmpBlock.
627  StartBlock->getTerminator()->setSuccessor(0, LoadCmpBlocks[0]);
628  }
629 
630  Builder.SetCurrentDebugLocation(CI->getDebugLoc());
631 
632  if (IsUsedForZeroCmp)
633  return getNumBlocks() == 1 ? getMemCmpEqZeroOneBlock()
634  : getMemCmpExpansionZeroCase();
635 
636  if (getNumBlocks() == 1)
637  return getMemCmpOneBlock();
638 
639  for (unsigned I = 0; I < getNumBlocks(); ++I) {
640  emitLoadCompareBlock(I);
641  }
642 
643  emitMemCmpResultBlock();
644  return PhiRes;
645 }
646 
647 // This function checks to see if an expansion of memcmp can be generated.
648 // It checks for constant compare size that is less than the max inline size.
649 // If an expansion cannot occur, returns false to leave as a library call.
650 // Otherwise, the library call is replaced with a new IR instruction sequence.
651 /// We want to transform:
652 /// %call = call signext i32 @memcmp(i8* %0, i8* %1, i64 15)
653 /// To:
654 /// loadbb:
655 /// %0 = bitcast i32* %buffer2 to i8*
656 /// %1 = bitcast i32* %buffer1 to i8*
657 /// %2 = bitcast i8* %1 to i64*
658 /// %3 = bitcast i8* %0 to i64*
659 /// %4 = load i64, i64* %2
660 /// %5 = load i64, i64* %3
661 /// %6 = call i64 @llvm.bswap.i64(i64 %4)
662 /// %7 = call i64 @llvm.bswap.i64(i64 %5)
663 /// %8 = sub i64 %6, %7
664 /// %9 = icmp ne i64 %8, 0
665 /// br i1 %9, label %res_block, label %loadbb1
666 /// res_block: ; preds = %loadbb2,
667 /// %loadbb1, %loadbb
668 /// %phi.src1 = phi i64 [ %6, %loadbb ], [ %22, %loadbb1 ], [ %36, %loadbb2 ]
669 /// %phi.src2 = phi i64 [ %7, %loadbb ], [ %23, %loadbb1 ], [ %37, %loadbb2 ]
670 /// %10 = icmp ult i64 %phi.src1, %phi.src2
671 /// %11 = select i1 %10, i32 -1, i32 1
672 /// br label %endblock
673 /// loadbb1: ; preds = %loadbb
674 /// %12 = bitcast i32* %buffer2 to i8*
675 /// %13 = bitcast i32* %buffer1 to i8*
676 /// %14 = bitcast i8* %13 to i32*
677 /// %15 = bitcast i8* %12 to i32*
678 /// %16 = getelementptr i32, i32* %14, i32 2
679 /// %17 = getelementptr i32, i32* %15, i32 2
680 /// %18 = load i32, i32* %16
681 /// %19 = load i32, i32* %17
682 /// %20 = call i32 @llvm.bswap.i32(i32 %18)
683 /// %21 = call i32 @llvm.bswap.i32(i32 %19)
684 /// %22 = zext i32 %20 to i64
685 /// %23 = zext i32 %21 to i64
686 /// %24 = sub i64 %22, %23
687 /// %25 = icmp ne i64 %24, 0
688 /// br i1 %25, label %res_block, label %loadbb2
689 /// loadbb2: ; preds = %loadbb1
690 /// %26 = bitcast i32* %buffer2 to i8*
691 /// %27 = bitcast i32* %buffer1 to i8*
692 /// %28 = bitcast i8* %27 to i16*
693 /// %29 = bitcast i8* %26 to i16*
694 /// %30 = getelementptr i16, i16* %28, i16 6
695 /// %31 = getelementptr i16, i16* %29, i16 6
696 /// %32 = load i16, i16* %30
697 /// %33 = load i16, i16* %31
698 /// %34 = call i16 @llvm.bswap.i16(i16 %32)
699 /// %35 = call i16 @llvm.bswap.i16(i16 %33)
700 /// %36 = zext i16 %34 to i64
701 /// %37 = zext i16 %35 to i64
702 /// %38 = sub i64 %36, %37
703 /// %39 = icmp ne i64 %38, 0
704 /// br i1 %39, label %res_block, label %loadbb3
705 /// loadbb3: ; preds = %loadbb2
706 /// %40 = bitcast i32* %buffer2 to i8*
707 /// %41 = bitcast i32* %buffer1 to i8*
708 /// %42 = getelementptr i8, i8* %41, i8 14
709 /// %43 = getelementptr i8, i8* %40, i8 14
710 /// %44 = load i8, i8* %42
711 /// %45 = load i8, i8* %43
712 /// %46 = zext i8 %44 to i32
713 /// %47 = zext i8 %45 to i32
714 /// %48 = sub i32 %46, %47
715 /// br label %endblock
716 /// endblock: ; preds = %res_block,
717 /// %loadbb3
718 /// %phi.res = phi i32 [ %48, %loadbb3 ], [ %11, %res_block ]
719 /// ret i32 %phi.res
720 static bool expandMemCmp(CallInst *CI, const TargetTransformInfo *TTI,
721  const TargetLowering *TLI, const DataLayout *DL) {
722  NumMemCmpCalls++;
723 
724  // Early exit from expansion if -Oz.
725  if (CI->getFunction()->optForMinSize())
726  return false;
727 
728  // Early exit from expansion if size is not a constant.
729  ConstantInt *SizeCast = dyn_cast<ConstantInt>(CI->getArgOperand(2));
730  if (!SizeCast) {
731  NumMemCmpNotConstant++;
732  return false;
733  }
734  const uint64_t SizeVal = SizeCast->getZExtValue();
735 
736  if (SizeVal == 0) {
737  return false;
738  }
739  // TTI call to check if target would like to expand memcmp. Also, get the
740  // available load sizes.
741  const bool IsUsedForZeroCmp = isOnlyUsedInZeroEqualityComparison(CI);
742  const auto *const Options = TTI->enableMemCmpExpansion(IsUsedForZeroCmp);
743  if (!Options) return false;
744 
745  const unsigned MaxNumLoads =
747 
748  unsigned NumLoadsPerBlock = MemCmpEqZeroNumLoadsPerBlock.getNumOccurrences()
751 
752  MemCmpExpansion Expansion(CI, SizeVal, *Options, MaxNumLoads,
753  IsUsedForZeroCmp, NumLoadsPerBlock, *DL);
754 
755  // Don't expand if this will require more loads than desired by the target.
756  if (Expansion.getNumLoads() == 0) {
757  NumMemCmpGreaterThanMax++;
758  return false;
759  }
760 
761  NumMemCmpInlined++;
762 
763  Value *Res = Expansion.getMemCmpExpansion();
764 
765  // Replace call with result of expansion and erase call.
766  CI->replaceAllUsesWith(Res);
767  CI->eraseFromParent();
768 
769  return true;
770 }
771 
772 
773 
774 class ExpandMemCmpPass : public FunctionPass {
775 public:
776  static char ID;
777 
778  ExpandMemCmpPass() : FunctionPass(ID) {
779  initializeExpandMemCmpPassPass(*PassRegistry::getPassRegistry());
780  }
781 
782  bool runOnFunction(Function &F) override {
783  if (skipFunction(F)) return false;
784 
785  auto *TPC = getAnalysisIfAvailable<TargetPassConfig>();
786  if (!TPC) {
787  return false;
788  }
789  const TargetLowering* TL =
790  TPC->getTM<TargetMachine>().getSubtargetImpl(F)->getTargetLowering();
791 
792  const TargetLibraryInfo *TLI =
793  &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
794  const TargetTransformInfo *TTI =
795  &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
796  auto PA = runImpl(F, TLI, TTI, TL);
797  return !PA.areAllPreserved();
798  }
799 
800 private:
801  void getAnalysisUsage(AnalysisUsage &AU) const override {
805  }
806 
808  const TargetTransformInfo *TTI,
809  const TargetLowering* TL);
810  // Returns true if a change was made.
811  bool runOnBlock(BasicBlock &BB, const TargetLibraryInfo *TLI,
812  const TargetTransformInfo *TTI, const TargetLowering* TL,
813  const DataLayout& DL);
814 };
815 
816 bool ExpandMemCmpPass::runOnBlock(
817  BasicBlock &BB, const TargetLibraryInfo *TLI,
818  const TargetTransformInfo *TTI, const TargetLowering* TL,
819  const DataLayout& DL) {
820  for (Instruction& I : BB) {
821  CallInst *CI = dyn_cast<CallInst>(&I);
822  if (!CI) {
823  continue;
824  }
825  LibFunc Func;
826  if (TLI->getLibFunc(ImmutableCallSite(CI), Func) &&
827  Func == LibFunc_memcmp && expandMemCmp(CI, TTI, TL, &DL)) {
828  return true;
829  }
830  }
831  return false;
832 }
833 
834 
836  Function &F, const TargetLibraryInfo *TLI, const TargetTransformInfo *TTI,
837  const TargetLowering* TL) {
838  const DataLayout& DL = F.getParent()->getDataLayout();
839  bool MadeChanges = false;
840  for (auto BBIt = F.begin(); BBIt != F.end();) {
841  if (runOnBlock(*BBIt, TLI, TTI, TL, DL)) {
842  MadeChanges = true;
843  // If changes were made, restart the function from the beginning, since
844  // the structure of the function was changed.
845  BBIt = F.begin();
846  } else {
847  ++BBIt;
848  }
849  }
850  return MadeChanges ? PreservedAnalyses::none() : PreservedAnalyses::all();
851 }
852 
853 } // namespace
854 
855 char ExpandMemCmpPass::ID = 0;
856 INITIALIZE_PASS_BEGIN(ExpandMemCmpPass, "expandmemcmp",
857  "Expand memcmp() to load/stores", false, false)
860 INITIALIZE_PASS_END(ExpandMemCmpPass, "expandmemcmp",
861  "Expand memcmp() to load/stores", false, false)
862 
864  return new ExpandMemCmpPass();
865 }
const T & front() const
front - Get the first element.
Definition: ArrayRef.h:152
SymbolTableList< Instruction >::iterator eraseFromParent()
This method unlinks &#39;this&#39; from the containing basic block and deletes it.
Definition: Instruction.cpp:68
A parsed version of the target data layout string in and methods for querying it. ...
Definition: DataLayout.h:111
Value * CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1949
static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT)
This is the entry point for all transforms.
void addIncoming(Value *V, BasicBlock *BB)
Add an incoming value to the end of the PHI list.
bool isOnlyUsedInZeroEqualityComparison(const Instruction *CxtI)
Value * CreateICmpNE(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1843
This class represents lattice values for constants.
Definition: AllocatorList.h:24
Value * CreateICmpULT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1855
LoadInst * CreateLoad(Type *Ty, Value *Ptr, const char *Name)
Provided to resolve &#39;CreateLoad(Ty, Ptr, "...")&#39; correctly, instead of converting the string to &#39;bool...
Definition: IRBuilder.h:1357
Value * CreateXor(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1200
iterator end()
Definition: Function.h:658
INITIALIZE_PASS_BEGIN(ExpandMemCmpPass, "expandmemcmp", "Expand memcmp() to load/stores", false, false) INITIALIZE_PASS_END(ExpandMemCmpPass
This class represents a function call, abstracting a target machine&#39;s calling convention.
FunctionPass * createExpandMemCmpPass()
LLVMContext & getContext() const
All values hold a context through their type.
Definition: Value.cpp:705
STATISTIC(NumFunctions, "Total number of functions")
F(f)
const Instruction * getTerminator() const LLVM_READONLY
Returns the terminator instruction if the block is well formed or null if the block is not well forme...
Definition: BasicBlock.cpp:138
void setSuccessor(unsigned Idx, BasicBlock *BB)
Update the specified successor to point at the provided block.
IntegerType * getInt32Ty()
Fetch the type representing a 32-bit integer.
Definition: IRBuilder.h:347
Value * getArgOperand(unsigned i) const
Definition: InstrTypes.h:1135
AnalysisUsage & addRequired()
#define INITIALIZE_PASS_DEPENDENCY(depName)
Definition: PassSupport.h:51
const DataLayout & getDataLayout() const
Get the data layout for the module&#39;s target platform.
Definition: Module.cpp:371
PointerType * getPointerTo(unsigned AddrSpace=0) const
Return a pointer to the current type.
Definition: Type.cpp:652
This provides a uniform API for creating instructions and inserting them into a basic block: either a...
Definition: IRBuilder.h:743
void initializeExpandMemCmpPassPass(PassRegistry &)
This class defines information used to lower LLVM code to legal SelectionDAG operators that the targe...
Value * CreateBitCast(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1732
virtual void getAnalysisUsage(AnalysisUsage &) const
getAnalysisUsage - This function should be overriden by passes that need analysis information to do t...
Definition: Pass.cpp:92
Type * getType() const
All values are typed, get the type of this value.
Definition: Value.h:245
expandmemcmp
Value * CreateICmpUGT(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1847
Predicate all(Predicate P0, Predicate P1)
True iff P0 and P1 are true.
bool isLittleEndian() const
Layout endianness...
Definition: DataLayout.h:221
Value * CreateSub(Value *LHS, Value *RHS, const Twine &Name="", bool HasNUW=false, bool HasNSW=false)
Definition: IRBuilder.h:1031
void SetCurrentDebugLocation(DebugLoc L)
Set location information used by debugging information.
Definition: IRBuilder.h:151
void replaceAllUsesWith(Value *V)
Change all uses of this to point to a new Value.
Definition: Value.cpp:429
Value * CreateZExt(Value *V, Type *DestTy, const Twine &Name="")
Definition: IRBuilder.h:1659
iterator begin()
Definition: Function.h:656
Function * getDeclaration(Module *M, ID id, ArrayRef< Type *> Tys=None)
Create or insert an LLVM Function declaration for an intrinsic, and return it.
Definition: Function.cpp:1020
void SetInsertPoint(BasicBlock *TheBB)
This specifies that created instructions should be appended to the end of the specified block...
Definition: IRBuilder.h:127
static PreservedAnalyses none()
Convenience factory function for the empty preserved set.
Definition: PassManager.h:157
Value * CreateOr(Value *LHS, Value *RHS, const Twine &Name="")
Definition: IRBuilder.h:1182
initializer< Ty > init(const Ty &Val)
Definition: CommandLine.h:423
If not nullptr, enable inline expansion of memcmp.
uint64_t getZExtValue() const
Return the constant as a 64-bit unsigned integer value after it has been zero extended as appropriate...
Definition: Constants.h:149
Wrapper pass for TargetTransformInfo.
A set of analyses that are preserved following a run of a transformation pass.
Definition: PassManager.h:154
LLVM Basic Block Representation.
Definition: BasicBlock.h:58
The instances of the Type class are immutable: once they are created, they are never changed...
Definition: Type.h:46
Conditional or Unconditional Branch instruction.
Value * CreateSelect(Value *C, Value *True, Value *False, const Twine &Name="", Instruction *MDFrom=nullptr)
Definition: IRBuilder.h:2021
const Instruction & front() const
Definition: BasicBlock.h:281
Represent the analysis usage information of a pass.
bool optForSize() const
Optimize this function for size (-Os) or minimum size (-Oz).
Definition: Function.h:598
FunctionPass class - This class is used to implement most global optimizations.
Definition: Pass.h:285
Class to represent integer types.
Definition: DerivedTypes.h:40
Expand memcmp() to load/stores"
const Function * getFunction() const
Return the function this instruction belongs to.
Definition: Instruction.cpp:60
Constant * ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty, const DataLayout &DL)
ConstantFoldLoadFromConstPtr - Return the value that a load from C would produce if it is constant an...
INITIALIZE_PASS_END(RegBankSelect, DEBUG_TYPE, "Assign register bank of generic virtual registers", false, false) RegBankSelect
PHINode * CreatePHI(Type *Ty, unsigned NumReservedValues, const Twine &Name="")
Definition: IRBuilder.h:1969
Value * CreateGEP(Value *Ptr, ArrayRef< Value *> IdxList, const Twine &Name="")
Definition: IRBuilder.h:1458
Iterator for intrusive lists based on ilist_node.
This is the shared class of boolean and integer constants.
Definition: Constants.h:84
This pass provides access to the codegen interfaces that are needed for IR-level transformations.
static cl::opt< unsigned > MemCmpEqZeroNumLoadsPerBlock("memcmp-num-loads-per-block", cl::Hidden, cl::init(1), cl::desc("The number of loads per basic block for inline expansion of " "memcmp that is only being compared against zero."))
This is a &#39;vector&#39; (really, a variable-sized array), optimized for the case when the array is small...
Definition: SmallVector.h:847
Provides information about what library functions are available for the current target.
const Module * getModule() const
Return the module owning the function this instruction belongs to or nullptr it the function does not...
Definition: Instruction.cpp:56
unsigned getMaxExpandSizeMemcmp(bool OptSize) const
Get maximum # of load operations permitted for memcmp.
bool getLibFunc(StringRef funcName, LibFunc &F) const
Searches for a particular function name.
const DebugLoc & getDebugLoc() const
Return the debug location for this node as a DebugLoc.
Definition: Instruction.h:311
ArrayRef< T > drop_front(size_t N=1) const
Drop the first N elements of the array.
Definition: ArrayRef.h:188
Establish a view to a call site for examination.
Definition: CallSite.h:711
const Function * getParent() const
Return the enclosing method, or null if none.
Definition: BasicBlock.h:107
#define I(x, y, z)
Definition: MD5.cpp:58
bool optForMinSize() const
Optimize this function for minimum size (-Oz).
Definition: Function.h:595
const MemCmpExpansionOptions * enableMemCmpExpansion(bool IsZeroCmp) const
LLVM_NODISCARD std::enable_if<!is_simple_type< Y >::value, typename cast_retty< X, const Y >::ret_type >::type dyn_cast(const Y &Val)
Definition: Casting.h:323
uint32_t Size
Definition: Profile.cpp:47
CallInst * CreateCall(FunctionType *FTy, Value *Callee, ArrayRef< Value *> Args=None, const Twine &Name="", MDNode *FPMathTag=nullptr)
Definition: IRBuilder.h:1974
BasicBlock * splitBasicBlock(iterator I, const Twine &BBName="")
Split the basic block into two basic blocks at the specified instruction.
Definition: BasicBlock.cpp:408
InstTy * Insert(InstTy *I, const Twine &Name="") const
Insert and return the specified instruction.
Definition: IRBuilder.h:794
assert(ImpDefSCC.getReg()==AMDGPU::SCC &&ImpDefSCC.isDef())
virtual unsigned getMemcmpEqZeroLoadsPerBlock() const
For memcmp expansion when the memcmp result is only compared equal or not-equal to 0...
Module * getParent()
Get the module that this global value is contained inside of...
Definition: GlobalValue.h:566
LLVM Value Representation.
Definition: Value.h:73
virtual bool runOnFunction(Function &F)=0
runOnFunction - Virtual method overriden by subclasses to do the per-function processing of the pass...
Primary interface to the complete machine description for the target machine.
Definition: TargetMachine.h:59
bool skipFunction(const Function &F) const
Optional passes call this function to check whether the pass should be skipped.
Definition: Pass.cpp:158
This pass exposes codegen information to IR-level passes.
hexagon widen stores
bool empty() const
empty - Check if the array is empty.
Definition: ArrayRef.h:144
This file describes how to lower LLVM code to machine code.
const BasicBlock * getParent() const
Definition: Instruction.h:67