hwasan: Enable -hwasan-allow-ifunc by default.
[lldb.git] / llvm / lib / Transforms / Instrumentation / HWAddressSanitizer.cpp
1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of HWAddressSanitizer, an address sanity checker
11 /// based on tagged addressing.
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/ADT/SmallVector.h"
15 #include "llvm/ADT/StringExtras.h"
16 #include "llvm/ADT/StringRef.h"
17 #include "llvm/ADT/Triple.h"
18 #include "llvm/IR/Attributes.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/IRBuilder.h"
26 #include "llvm/IR/InlineAsm.h"
27 #include "llvm/IR/InstVisitor.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/LLVMContext.h"
33 #include "llvm/IR/MDBuilder.h"
34 #include "llvm/IR/Module.h"
35 #include "llvm/IR/Type.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/Transforms/Instrumentation.h"
43 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
44 #include "llvm/Transforms/Utils/ModuleUtils.h"
45 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
46 #include <sstream>
47
48 using namespace llvm;
49
50 #define DEBUG_TYPE "hwasan"
51
52 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
53 static const char *const kHwasanInitName = "__hwasan_init";
54
55 static const char *const kHwasanShadowMemoryDynamicAddress =
56     "__hwasan_shadow_memory_dynamic_address";
57
58 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
59 static const size_t kNumberOfAccessSizes = 5;
60
61 static const size_t kDefaultShadowScale = 4;
62 static const uint64_t kDynamicShadowSentinel =
63     std::numeric_limits<uint64_t>::max();
64 static const unsigned kPointerTagShift = 56;
65
66 static const unsigned kShadowBaseAlignment = 32;
67
68 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
69     "hwasan-memory-access-callback-prefix",
70     cl::desc("Prefix for memory access callbacks"), cl::Hidden,
71     cl::init("__hwasan_"));
72
73 static cl::opt<bool>
74     ClInstrumentWithCalls("hwasan-instrument-with-calls",
75                 cl::desc("instrument reads and writes with callbacks"),
76                 cl::Hidden, cl::init(false));
77
78 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
79                                        cl::desc("instrument read instructions"),
80                                        cl::Hidden, cl::init(true));
81
82 static cl::opt<bool> ClInstrumentWrites(
83     "hwasan-instrument-writes", cl::desc("instrument write instructions"),
84     cl::Hidden, cl::init(true));
85
86 static cl::opt<bool> ClInstrumentAtomics(
87     "hwasan-instrument-atomics",
88     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
89     cl::init(true));
90
91 static cl::opt<bool> ClRecover(
92     "hwasan-recover",
93     cl::desc("Enable recovery mode (continue-after-error)."),
94     cl::Hidden, cl::init(false));
95
96 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
97                                        cl::desc("instrument stack (allocas)"),
98                                        cl::Hidden, cl::init(true));
99
100 static cl::opt<bool> ClUARRetagToZero(
101     "hwasan-uar-retag-to-zero",
102     cl::desc("Clear alloca tags before returning from the function to allow "
103              "non-instrumented and instrumented function calls mix. When set "
104              "to false, allocas are retagged before returning from the "
105              "function to detect use after return."),
106     cl::Hidden, cl::init(true));
107
108 static cl::opt<bool> ClGenerateTagsWithCalls(
109     "hwasan-generate-tags-with-calls",
110     cl::desc("generate new tags with runtime library calls"), cl::Hidden,
111     cl::init(false));
112
113 static cl::opt<int> ClMatchAllTag(
114     "hwasan-match-all-tag",
115     cl::desc("don't report bad accesses via pointers with this tag"),
116     cl::Hidden, cl::init(-1));
117
118 static cl::opt<bool> ClEnableKhwasan(
119     "hwasan-kernel",
120     cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
121     cl::Hidden, cl::init(false));
122
123 // These flags allow to change the shadow mapping and control how shadow memory
124 // is accessed. The shadow mapping looks like:
125 //    Shadow = (Mem >> scale) + offset
126
127 static cl::opt<unsigned long long> ClMappingOffset(
128     "hwasan-mapping-offset",
129     cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
130     cl::init(0));
131
132 static cl::opt<bool>
133     ClWithIfunc("hwasan-with-ifunc",
134                 cl::desc("Access dynamic shadow through an ifunc global on "
135                          "platforms that support this"),
136                 cl::Hidden, cl::init(false));
137
138 static cl::opt<bool> ClWithTls(
139     "hwasan-with-tls",
140     cl::desc("Access dynamic shadow through an thread-local pointer on "
141              "platforms that support this"),
142     cl::Hidden, cl::init(true));
143
144 static cl::opt<bool>
145     ClRecordStackHistory("hwasan-record-stack-history",
146                          cl::desc("Record stack frames with tagged allocations "
147                                   "in a thread-local ring buffer"),
148                          cl::Hidden, cl::init(true));
149 static cl::opt<bool>
150     ClCreateFrameDescriptions("hwasan-create-frame-descriptions",
151                               cl::desc("create static frame descriptions"),
152                               cl::Hidden, cl::init(true));
153
154 static cl::opt<bool>
155     ClInstrumentMemIntrinsics("hwasan-instrument-mem-intrinsics",
156                               cl::desc("instrument memory intrinsics"),
157                               cl::Hidden, cl::init(true));
158
159 static cl::opt<bool> ClInlineAllChecks("hwasan-inline-all-checks",
160                                        cl::desc("inline all checks"),
161                                        cl::Hidden, cl::init(false));
162
163 namespace {
164
165 /// An instrumentation pass implementing detection of addressability bugs
166 /// using tagged pointers.
167 class HWAddressSanitizer : public FunctionPass {
168 public:
169   // Pass identification, replacement for typeid.
170   static char ID;
171
172   explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
173       : FunctionPass(ID) {
174     this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
175     this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
176         ClEnableKhwasan : CompileKernel;
177   }
178
179   StringRef getPassName() const override { return "HWAddressSanitizer"; }
180
181   bool runOnFunction(Function &F) override;
182   bool doInitialization(Module &M) override;
183
184   void initializeCallbacks(Module &M);
185
186   Value *getDynamicShadowIfunc(IRBuilder<> &IRB);
187   Value *getDynamicShadowNonTls(IRBuilder<> &IRB);
188
189   void untagPointerOperand(Instruction *I, Value *Addr);
190   Value *shadowBase();
191   Value *memToShadow(Value *Shadow, IRBuilder<> &IRB);
192   void instrumentMemAccessInline(Value *Ptr, bool IsWrite,
193                                  unsigned AccessSizeIndex,
194                                  Instruction *InsertBefore);
195   void instrumentMemIntrinsic(MemIntrinsic *MI);
196   bool instrumentMemAccess(Instruction *I);
197   Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
198                                    uint64_t *TypeSize, unsigned *Alignment,
199                                    Value **MaybeMask);
200
201   bool isInterestingAlloca(const AllocaInst &AI);
202   bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
203   Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
204   Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
205   bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
206                        SmallVectorImpl<Instruction *> &RetVec, Value *StackTag);
207   Value *getNextTagWithCall(IRBuilder<> &IRB);
208   Value *getStackBaseTag(IRBuilder<> &IRB);
209   Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
210                      unsigned AllocaNo);
211   Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
212
213   Value *getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty);
214   Value *emitPrologue(IRBuilder<> &IRB, bool WithFrameRecord);
215
216 private:
217   LLVMContext *C;
218   std::string CurModuleUniqueId;
219   Triple TargetTriple;
220   FunctionCallee HWAsanMemmove, HWAsanMemcpy, HWAsanMemset;
221
222   // Frame description is a way to pass names/sizes of local variables
223   // to the run-time w/o adding extra executable code in every function.
224   // We do this by creating a separate section with {PC,Descr} pairs and passing
225   // the section beg/end to __hwasan_init_frames() at module init time.
226   std::string createFrameString(ArrayRef<AllocaInst*> Allocas);
227   void createFrameGlobal(Function &F, const std::string &FrameString);
228   // Get the section name for frame descriptions. Currently ELF-only.
229   const char *getFrameSection() { return "__hwasan_frames"; }
230   const char *getFrameSectionBeg() { return  "__start___hwasan_frames"; }
231   const char *getFrameSectionEnd() { return  "__stop___hwasan_frames"; }
232   GlobalVariable *createFrameSectionBound(Module &M, Type *Ty,
233                                           const char *Name) {
234     auto GV = new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
235                                  nullptr, Name);
236     GV->setVisibility(GlobalValue::HiddenVisibility);
237     return GV;
238   }
239
240   /// This struct defines the shadow mapping using the rule:
241   ///   shadow = (mem >> Scale) + Offset.
242   /// If InGlobal is true, then
243   ///   extern char __hwasan_shadow[];
244   ///   shadow = (mem >> Scale) + &__hwasan_shadow
245   /// If InTls is true, then
246   ///   extern char *__hwasan_tls;
247   ///   shadow = (mem>>Scale) + align_up(__hwasan_shadow, kShadowBaseAlignment)
248   struct ShadowMapping {
249     int Scale;
250     uint64_t Offset;
251     bool InGlobal;
252     bool InTls;
253
254     void init(Triple &TargetTriple);
255     unsigned getAllocaAlignment() const { return 1U << Scale; }
256   };
257   ShadowMapping Mapping;
258
259   Type *IntptrTy;
260   Type *Int8PtrTy;
261   Type *Int8Ty;
262   Type *Int32Ty;
263
264   bool CompileKernel;
265   bool Recover;
266
267   Function *HwasanCtorFunction;
268
269   FunctionCallee HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
270   FunctionCallee HwasanMemoryAccessCallbackSized[2];
271
272   FunctionCallee HwasanTagMemoryFunc;
273   FunctionCallee HwasanGenerateTagFunc;
274   FunctionCallee HwasanThreadEnterFunc;
275
276   Constant *ShadowGlobal;
277
278   Value *LocalDynamicShadow = nullptr;
279   GlobalValue *ThreadPtrGlobal = nullptr;
280 };
281
282 } // end anonymous namespace
283
284 char HWAddressSanitizer::ID = 0;
285
286 INITIALIZE_PASS_BEGIN(
287     HWAddressSanitizer, "hwasan",
288     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
289     false)
290 INITIALIZE_PASS_END(
291     HWAddressSanitizer, "hwasan",
292     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
293     false)
294
295 FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
296                                                  bool Recover) {
297   assert(!CompileKernel || Recover);
298   return new HWAddressSanitizer(CompileKernel, Recover);
299 }
300
301 /// Module-level initialization.
302 ///
303 /// inserts a call to __hwasan_init to the module's constructor list.
304 bool HWAddressSanitizer::doInitialization(Module &M) {
305   LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
306   auto &DL = M.getDataLayout();
307
308   TargetTriple = Triple(M.getTargetTriple());
309
310   Mapping.init(TargetTriple);
311
312   C = &(M.getContext());
313   CurModuleUniqueId = getUniqueModuleId(&M);
314   IRBuilder<> IRB(*C);
315   IntptrTy = IRB.getIntPtrTy(DL);
316   Int8PtrTy = IRB.getInt8PtrTy();
317   Int8Ty = IRB.getInt8Ty();
318   Int32Ty = IRB.getInt32Ty();
319
320   HwasanCtorFunction = nullptr;
321   if (!CompileKernel) {
322     std::tie(HwasanCtorFunction, std::ignore) =
323         createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
324                                             kHwasanInitName,
325                                             /*InitArgTypes=*/{},
326                                             /*InitArgs=*/{});
327     Comdat *CtorComdat = M.getOrInsertComdat(kHwasanModuleCtorName);
328     HwasanCtorFunction->setComdat(CtorComdat);
329     appendToGlobalCtors(M, HwasanCtorFunction, 0, HwasanCtorFunction);
330
331     // Create a zero-length global in __hwasan_frame so that the linker will
332     // always create start and stop symbols.
333     //
334     // N.B. If we ever start creating associated metadata in this pass this
335     // global will need to be associated with the ctor.
336     Type *Int8Arr0Ty = ArrayType::get(Int8Ty, 0);
337     auto GV =
338         new GlobalVariable(M, Int8Arr0Ty, /*isConstantGlobal*/ true,
339                            GlobalVariable::PrivateLinkage,
340                            Constant::getNullValue(Int8Arr0Ty), "__hwasan");
341     GV->setSection(getFrameSection());
342     GV->setComdat(CtorComdat);
343     appendToCompilerUsed(M, GV);
344
345     IRBuilder<> IRBCtor(HwasanCtorFunction->getEntryBlock().getTerminator());
346     IRBCtor.CreateCall(
347         declareSanitizerInitFunction(M, "__hwasan_init_frames",
348                                      {Int8PtrTy, Int8PtrTy}),
349         {createFrameSectionBound(M, Int8Ty, getFrameSectionBeg()),
350          createFrameSectionBound(M, Int8Ty, getFrameSectionEnd())});
351   }
352
353   if (!TargetTriple.isAndroid())
354     appendToCompilerUsed(
355         M, ThreadPtrGlobal = new GlobalVariable(
356                M, IntptrTy, false, GlobalVariable::ExternalLinkage, nullptr,
357                "__hwasan_tls", nullptr, GlobalVariable::InitialExecTLSModel));
358
359   return true;
360 }
361
362 void HWAddressSanitizer::initializeCallbacks(Module &M) {
363   IRBuilder<> IRB(*C);
364   for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
365     const std::string TypeStr = AccessIsWrite ? "store" : "load";
366     const std::string EndingStr = Recover ? "_noabort" : "";
367
368     HwasanMemoryAccessCallbackSized[AccessIsWrite] = M.getOrInsertFunction(
369         ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
370         FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false));
371
372     for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
373          AccessSizeIndex++) {
374       HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
375           M.getOrInsertFunction(
376               ClMemoryAccessCallbackPrefix + TypeStr +
377                   itostr(1ULL << AccessSizeIndex) + EndingStr,
378               FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false));
379     }
380   }
381
382   HwasanTagMemoryFunc = M.getOrInsertFunction(
383       "__hwasan_tag_memory", IRB.getVoidTy(), Int8PtrTy, Int8Ty, IntptrTy);
384   HwasanGenerateTagFunc =
385       M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty);
386
387   ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
388                                      ArrayType::get(IRB.getInt8Ty(), 0));
389
390   const std::string MemIntrinCallbackPrefix =
391       CompileKernel ? std::string("") : ClMemoryAccessCallbackPrefix;
392   HWAsanMemmove = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memmove",
393                                         IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
394                                         IRB.getInt8PtrTy(), IntptrTy);
395   HWAsanMemcpy = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memcpy",
396                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
397                                        IRB.getInt8PtrTy(), IntptrTy);
398   HWAsanMemset = M.getOrInsertFunction(MemIntrinCallbackPrefix + "memset",
399                                        IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
400                                        IRB.getInt32Ty(), IntptrTy);
401
402   HwasanThreadEnterFunc =
403       M.getOrInsertFunction("__hwasan_thread_enter", IRB.getVoidTy());
404 }
405
406 Value *HWAddressSanitizer::getDynamicShadowIfunc(IRBuilder<> &IRB) {
407   // An empty inline asm with input reg == output reg.
408   // An opaque no-op cast, basically.
409   InlineAsm *Asm = InlineAsm::get(
410       FunctionType::get(Int8PtrTy, {ShadowGlobal->getType()}, false),
411       StringRef(""), StringRef("=r,0"),
412       /*hasSideEffects=*/false);
413   return IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
414 }
415
416 Value *HWAddressSanitizer::getDynamicShadowNonTls(IRBuilder<> &IRB) {
417   // Generate code only when dynamic addressing is needed.
418   if (Mapping.Offset != kDynamicShadowSentinel)
419     return nullptr;
420
421   if (Mapping.InGlobal) {
422     return getDynamicShadowIfunc(IRB);
423   } else {
424     Value *GlobalDynamicAddress =
425         IRB.GetInsertBlock()->getParent()->getParent()->getOrInsertGlobal(
426             kHwasanShadowMemoryDynamicAddress, Int8PtrTy);
427     return IRB.CreateLoad(Int8PtrTy, GlobalDynamicAddress);
428   }
429 }
430
431 Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
432                                                      bool *IsWrite,
433                                                      uint64_t *TypeSize,
434                                                      unsigned *Alignment,
435                                                      Value **MaybeMask) {
436   // Skip memory accesses inserted by another instrumentation.
437   if (I->getMetadata("nosanitize")) return nullptr;
438
439   // Do not instrument the load fetching the dynamic shadow address.
440   if (LocalDynamicShadow == I)
441     return nullptr;
442
443   Value *PtrOperand = nullptr;
444   const DataLayout &DL = I->getModule()->getDataLayout();
445   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
446     if (!ClInstrumentReads) return nullptr;
447     *IsWrite = false;
448     *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
449     *Alignment = LI->getAlignment();
450     PtrOperand = LI->getPointerOperand();
451   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
452     if (!ClInstrumentWrites) return nullptr;
453     *IsWrite = true;
454     *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
455     *Alignment = SI->getAlignment();
456     PtrOperand = SI->getPointerOperand();
457   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
458     if (!ClInstrumentAtomics) return nullptr;
459     *IsWrite = true;
460     *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
461     *Alignment = 0;
462     PtrOperand = RMW->getPointerOperand();
463   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
464     if (!ClInstrumentAtomics) return nullptr;
465     *IsWrite = true;
466     *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
467     *Alignment = 0;
468     PtrOperand = XCHG->getPointerOperand();
469   }
470
471   if (PtrOperand) {
472     // Do not instrument accesses from different address spaces; we cannot deal
473     // with them.
474     Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
475     if (PtrTy->getPointerAddressSpace() != 0)
476       return nullptr;
477
478     // Ignore swifterror addresses.
479     // swifterror memory addresses are mem2reg promoted by instruction
480     // selection. As such they cannot have regular uses like an instrumentation
481     // function and it makes no sense to track them as memory.
482     if (PtrOperand->isSwiftError())
483       return nullptr;
484   }
485
486   return PtrOperand;
487 }
488
489 static unsigned getPointerOperandIndex(Instruction *I) {
490   if (LoadInst *LI = dyn_cast<LoadInst>(I))
491     return LI->getPointerOperandIndex();
492   if (StoreInst *SI = dyn_cast<StoreInst>(I))
493     return SI->getPointerOperandIndex();
494   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
495     return RMW->getPointerOperandIndex();
496   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
497     return XCHG->getPointerOperandIndex();
498   report_fatal_error("Unexpected instruction");
499   return -1;
500 }
501
502 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
503   size_t Res = countTrailingZeros(TypeSize / 8);
504   assert(Res < kNumberOfAccessSizes);
505   return Res;
506 }
507
508 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
509   if (TargetTriple.isAArch64())
510     return;
511
512   IRBuilder<> IRB(I);
513   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
514   Value *UntaggedPtr =
515       IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
516   I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
517 }
518
519 Value *HWAddressSanitizer::shadowBase() {
520   if (LocalDynamicShadow)
521     return LocalDynamicShadow;
522   return ConstantExpr::getIntToPtr(ConstantInt::get(IntptrTy, Mapping.Offset),
523                                    Int8PtrTy);
524 }
525
526 Value *HWAddressSanitizer::memToShadow(Value *Mem, IRBuilder<> &IRB) {
527   // Mem >> Scale
528   Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
529   if (Mapping.Offset == 0)
530     return IRB.CreateIntToPtr(Shadow, Int8PtrTy);
531   // (Mem >> Scale) + Offset
532   return IRB.CreateGEP(Int8Ty, shadowBase(), Shadow);
533 }
534
535 void HWAddressSanitizer::instrumentMemAccessInline(Value *Ptr, bool IsWrite,
536                                                    unsigned AccessSizeIndex,
537                                                    Instruction *InsertBefore) {
538   const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
539   IRBuilder<> IRB(InsertBefore);
540
541   if (!ClInlineAllChecks && TargetTriple.isAArch64() &&
542       TargetTriple.isOSBinFormatELF() && !Recover) {
543     Module *M = IRB.GetInsertBlock()->getParent()->getParent();
544     Ptr = IRB.CreateBitCast(Ptr, Int8PtrTy);
545     IRB.CreateCall(
546         Intrinsic::getDeclaration(M, Intrinsic::hwasan_check_memaccess),
547         {shadowBase(), Ptr, ConstantInt::get(Int32Ty, AccessInfo)});
548     return;
549   }
550
551   Value *PtrLong = IRB.CreatePointerCast(Ptr, IntptrTy);
552   Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
553                                   IRB.getInt8Ty());
554   Value *AddrLong = untagPointer(IRB, PtrLong);
555   Value *Shadow = memToShadow(AddrLong, IRB);
556   Value *MemTag = IRB.CreateLoad(Int8Ty, Shadow);
557   Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
558
559   int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
560       ClMatchAllTag : (CompileKernel ? 0xFF : -1);
561   if (matchAllTag != -1) {
562     Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
563         ConstantInt::get(PtrTag->getType(), matchAllTag));
564     TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
565   }
566
567   Instruction *CheckTerm =
568       SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
569                                 MDBuilder(*C).createBranchWeights(1, 100000));
570
571   IRB.SetInsertPoint(CheckTerm);
572   InlineAsm *Asm;
573   switch (TargetTriple.getArch()) {
574     case Triple::x86_64:
575       // The signal handler will find the data address in rdi.
576       Asm = InlineAsm::get(
577           FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
578           "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
579           "{rdi}",
580           /*hasSideEffects=*/true);
581       break;
582     case Triple::aarch64:
583     case Triple::aarch64_be:
584       // The signal handler will find the data address in x0.
585       Asm = InlineAsm::get(
586           FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
587           "brk #" + itostr(0x900 + AccessInfo),
588           "{x0}",
589           /*hasSideEffects=*/true);
590       break;
591     default:
592       report_fatal_error("unsupported architecture");
593   }
594   IRB.CreateCall(Asm, PtrLong);
595 }
596
597 void HWAddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
598   IRBuilder<> IRB(MI);
599   if (isa<MemTransferInst>(MI)) {
600     IRB.CreateCall(
601         isa<MemMoveInst>(MI) ? HWAsanMemmove : HWAsanMemcpy,
602         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
603          IRB.CreatePointerCast(MI->getOperand(1), IRB.getInt8PtrTy()),
604          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
605   } else if (isa<MemSetInst>(MI)) {
606     IRB.CreateCall(
607         HWAsanMemset,
608         {IRB.CreatePointerCast(MI->getOperand(0), IRB.getInt8PtrTy()),
609          IRB.CreateIntCast(MI->getOperand(1), IRB.getInt32Ty(), false),
610          IRB.CreateIntCast(MI->getOperand(2), IntptrTy, false)});
611   }
612   MI->eraseFromParent();
613 }
614
615 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
616   LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
617   bool IsWrite = false;
618   unsigned Alignment = 0;
619   uint64_t TypeSize = 0;
620   Value *MaybeMask = nullptr;
621
622   if (ClInstrumentMemIntrinsics && isa<MemIntrinsic>(I)) {
623     instrumentMemIntrinsic(cast<MemIntrinsic>(I));
624     return true;
625   }
626
627   Value *Addr =
628       isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
629
630   if (!Addr)
631     return false;
632
633   if (MaybeMask)
634     return false; //FIXME
635
636   IRBuilder<> IRB(I);
637   if (isPowerOf2_64(TypeSize) &&
638       (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
639       (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
640        Alignment >= TypeSize / 8)) {
641     size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
642     if (ClInstrumentWithCalls) {
643       IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
644                      IRB.CreatePointerCast(Addr, IntptrTy));
645     } else {
646       instrumentMemAccessInline(Addr, IsWrite, AccessSizeIndex, I);
647     }
648   } else {
649     IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
650                    {IRB.CreatePointerCast(Addr, IntptrTy),
651                     ConstantInt::get(IntptrTy, TypeSize / 8)});
652   }
653   untagPointerOperand(I, Addr);
654
655   return true;
656 }
657
658 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
659   uint64_t ArraySize = 1;
660   if (AI.isArrayAllocation()) {
661     const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
662     assert(CI && "non-constant array size");
663     ArraySize = CI->getZExtValue();
664   }
665   Type *Ty = AI.getAllocatedType();
666   uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
667   return SizeInBytes * ArraySize;
668 }
669
670 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
671                                    Value *Tag) {
672   size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
673                 ~(Mapping.getAllocaAlignment() - 1);
674
675   Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
676   if (ClInstrumentWithCalls) {
677     IRB.CreateCall(HwasanTagMemoryFunc,
678                    {IRB.CreatePointerCast(AI, Int8PtrTy), JustTag,
679                     ConstantInt::get(IntptrTy, Size)});
680   } else {
681     size_t ShadowSize = Size >> Mapping.Scale;
682     Value *ShadowPtr = memToShadow(IRB.CreatePointerCast(AI, IntptrTy), IRB);
683     // If this memset is not inlined, it will be intercepted in the hwasan
684     // runtime library. That's OK, because the interceptor skips the checks if
685     // the address is in the shadow region.
686     // FIXME: the interceptor is not as fast as real memset. Consider lowering
687     // llvm.memset right here into either a sequence of stores, or a call to
688     // hwasan_tag_memory.
689     IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
690   }
691   return true;
692 }
693
694 static unsigned RetagMask(unsigned AllocaNo) {
695   // A list of 8-bit numbers that have at most one run of non-zero bits.
696   // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
697   // masks.
698   // The list does not include the value 255, which is used for UAR.
699   static unsigned FastMasks[] = {
700       0,   1,   2,   3,   4,   6,   7,   8,   12,  14,  15, 16,  24,
701       28,  30,  31,  32,  48,  56,  60,  62,  63,  64,  96, 112, 120,
702       124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
703   return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
704 }
705
706 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
707   return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
708 }
709
710 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
711   if (ClGenerateTagsWithCalls)
712     return getNextTagWithCall(IRB);
713   // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
714   // first).
715   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
716   auto GetStackPointerFn =
717       Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
718   Value *StackPointer = IRB.CreateCall(
719       GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
720
721   // Extract some entropy from the stack pointer for the tags.
722   // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
723   // between functions).
724   Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
725   Value *StackTag =
726       IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
727                     "hwasan.stack.base.tag");
728   return StackTag;
729 }
730
731 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
732                                         AllocaInst *AI, unsigned AllocaNo) {
733   if (ClGenerateTagsWithCalls)
734     return getNextTagWithCall(IRB);
735   return IRB.CreateXor(StackTag,
736                        ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
737 }
738
739 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
740   if (ClUARRetagToZero)
741     return ConstantInt::get(IntptrTy, 0);
742   if (ClGenerateTagsWithCalls)
743     return getNextTagWithCall(IRB);
744   return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
745 }
746
747 // Add a tag to an address.
748 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
749                                       Value *PtrLong, Value *Tag) {
750   Value *TaggedPtrLong;
751   if (CompileKernel) {
752     // Kernel addresses have 0xFF in the most significant byte.
753     Value *ShiftedTag = IRB.CreateOr(
754         IRB.CreateShl(Tag, kPointerTagShift),
755         ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
756     TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
757   } else {
758     // Userspace can simply do OR (tag << 56);
759     Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
760     TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
761   }
762   return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
763 }
764
765 // Remove tag from an address.
766 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
767   Value *UntaggedPtrLong;
768   if (CompileKernel) {
769     // Kernel addresses have 0xFF in the most significant byte.
770     UntaggedPtrLong = IRB.CreateOr(PtrLong,
771         ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
772   } else {
773     // Userspace addresses have 0x00.
774     UntaggedPtrLong = IRB.CreateAnd(PtrLong,
775         ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
776   }
777   return UntaggedPtrLong;
778 }
779
780 Value *HWAddressSanitizer::getHwasanThreadSlotPtr(IRBuilder<> &IRB, Type *Ty) {
781   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
782   if (TargetTriple.isAArch64() && TargetTriple.isAndroid()) {
783     // Android provides a fixed TLS slot for sanitizers. See TLS_SLOT_SANITIZER
784     // in Bionic's libc/private/bionic_tls.h.
785     Function *ThreadPointerFunc =
786         Intrinsic::getDeclaration(M, Intrinsic::thread_pointer);
787     Value *SlotPtr = IRB.CreatePointerCast(
788         IRB.CreateConstGEP1_32(IRB.getInt8Ty(),
789                                IRB.CreateCall(ThreadPointerFunc), 0x30),
790         Ty->getPointerTo(0));
791     return SlotPtr;
792   }
793   if (ThreadPtrGlobal)
794     return ThreadPtrGlobal;
795
796
797   return nullptr;
798 }
799
800 // Creates a string with a description of the stack frame (set of Allocas).
801 // The string is intended to be human readable.
802 // The current form is: Size1 Name1; Size2 Name2; ...
803 std::string
804 HWAddressSanitizer::createFrameString(ArrayRef<AllocaInst *> Allocas) {
805   std::ostringstream Descr;
806   for (auto AI : Allocas)
807     Descr << getAllocaSizeInBytes(*AI) << " " <<  AI->getName().str() << "; ";
808   return Descr.str();
809 }
810
811 // Creates a global in the frame section which consists of two pointers:
812 // the function PC and the frame string constant.
813 void HWAddressSanitizer::createFrameGlobal(Function &F,
814                                            const std::string &FrameString) {
815   Module &M = *F.getParent();
816   auto DescrGV = createPrivateGlobalForString(M, FrameString, true);
817   auto PtrPairTy = StructType::get(F.getType(), DescrGV->getType());
818   auto GV = new GlobalVariable(
819       M, PtrPairTy, /*isConstantGlobal*/ true, GlobalVariable::PrivateLinkage,
820       ConstantStruct::get(PtrPairTy, (Constant *)&F, (Constant *)DescrGV),
821       "__hwasan");
822   GV->setSection(getFrameSection());
823   appendToCompilerUsed(M, GV);
824   // Put GV into the F's Comadat so that if F is deleted GV can be deleted too.
825   if (auto Comdat =
826           GetOrCreateFunctionComdat(F, TargetTriple, CurModuleUniqueId))
827     GV->setComdat(Comdat);
828 }
829
830 Value *HWAddressSanitizer::emitPrologue(IRBuilder<> &IRB,
831                                         bool WithFrameRecord) {
832   if (!Mapping.InTls)
833     return getDynamicShadowNonTls(IRB);
834
835   if (!WithFrameRecord && TargetTriple.isAndroid())
836     return getDynamicShadowIfunc(IRB);
837
838   Value *SlotPtr = getHwasanThreadSlotPtr(IRB, IntptrTy);
839   assert(SlotPtr);
840
841   Instruction *ThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
842
843   Function *F = IRB.GetInsertBlock()->getParent();
844   if (F->getFnAttribute("hwasan-abi").getValueAsString() == "interceptor") {
845     Value *ThreadLongEqZero =
846         IRB.CreateICmpEQ(ThreadLong, ConstantInt::get(IntptrTy, 0));
847     auto *Br = cast<BranchInst>(SplitBlockAndInsertIfThen(
848         ThreadLongEqZero, cast<Instruction>(ThreadLongEqZero)->getNextNode(),
849         false, MDBuilder(*C).createBranchWeights(1, 100000)));
850
851     IRB.SetInsertPoint(Br);
852     // FIXME: This should call a new runtime function with a custom calling
853     // convention to avoid needing to spill all arguments here.
854     IRB.CreateCall(HwasanThreadEnterFunc);
855     LoadInst *ReloadThreadLong = IRB.CreateLoad(IntptrTy, SlotPtr);
856
857     IRB.SetInsertPoint(&*Br->getSuccessor(0)->begin());
858     PHINode *ThreadLongPhi = IRB.CreatePHI(IntptrTy, 2);
859     ThreadLongPhi->addIncoming(ThreadLong, ThreadLong->getParent());
860     ThreadLongPhi->addIncoming(ReloadThreadLong, ReloadThreadLong->getParent());
861     ThreadLong = ThreadLongPhi;
862   }
863
864   // Extract the address field from ThreadLong. Unnecessary on AArch64 with TBI.
865   Value *ThreadLongMaybeUntagged =
866       TargetTriple.isAArch64() ? ThreadLong : untagPointer(IRB, ThreadLong);
867
868   if (WithFrameRecord) {
869     // Prepare ring buffer data.
870     auto PC = IRB.CreatePtrToInt(F, IntptrTy);
871     auto GetStackPointerFn =
872         Intrinsic::getDeclaration(F->getParent(), Intrinsic::frameaddress);
873     Value *SP = IRB.CreatePtrToInt(
874         IRB.CreateCall(GetStackPointerFn,
875                        {Constant::getNullValue(IRB.getInt32Ty())}),
876         IntptrTy);
877     // Mix SP and PC. TODO: also add the tag to the mix.
878     // Assumptions:
879     // PC is 0x0000PPPPPPPPPPPP  (48 bits are meaningful, others are zero)
880     // SP is 0xsssssssssssSSSS0  (4 lower bits are zero)
881     // We only really need ~20 lower non-zero bits (SSSS), so we mix like this:
882     //       0xSSSSPPPPPPPPPPPP
883     SP = IRB.CreateShl(SP, 44);
884
885     // Store data to ring buffer.
886     Value *RecordPtr =
887         IRB.CreateIntToPtr(ThreadLongMaybeUntagged, IntptrTy->getPointerTo(0));
888     IRB.CreateStore(IRB.CreateOr(PC, SP), RecordPtr);
889
890     // Update the ring buffer. Top byte of ThreadLong defines the size of the
891     // buffer in pages, it must be a power of two, and the start of the buffer
892     // must be aligned by twice that much. Therefore wrap around of the ring
893     // buffer is simply Addr &= ~((ThreadLong >> 56) << 12).
894     // The use of AShr instead of LShr is due to
895     //   https://bugs.llvm.org/show_bug.cgi?id=39030
896     // Runtime library makes sure not to use the highest bit.
897     Value *WrapMask = IRB.CreateXor(
898         IRB.CreateShl(IRB.CreateAShr(ThreadLong, 56), 12, "", true, true),
899         ConstantInt::get(IntptrTy, (uint64_t)-1));
900     Value *ThreadLongNew = IRB.CreateAnd(
901         IRB.CreateAdd(ThreadLong, ConstantInt::get(IntptrTy, 8)), WrapMask);
902     IRB.CreateStore(ThreadLongNew, SlotPtr);
903   }
904
905   // Get shadow base address by aligning RecordPtr up.
906   // Note: this is not correct if the pointer is already aligned.
907   // Runtime library will make sure this never happens.
908   Value *ShadowBase = IRB.CreateAdd(
909       IRB.CreateOr(
910           ThreadLongMaybeUntagged,
911           ConstantInt::get(IntptrTy, (1ULL << kShadowBaseAlignment) - 1)),
912       ConstantInt::get(IntptrTy, 1), "hwasan.shadow");
913   ShadowBase = IRB.CreateIntToPtr(ShadowBase, Int8PtrTy);
914   return ShadowBase;
915 }
916
917 bool HWAddressSanitizer::instrumentStack(
918     SmallVectorImpl<AllocaInst *> &Allocas,
919     SmallVectorImpl<Instruction *> &RetVec, Value *StackTag) {
920   // Ideally, we want to calculate tagged stack base pointer, and rewrite all
921   // alloca addresses using that. Unfortunately, offsets are not known yet
922   // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
923   // temp, shift-OR it into each alloca address and xor with the retag mask.
924   // This generates one extra instruction per alloca use.
925   for (unsigned N = 0; N < Allocas.size(); ++N) {
926     auto *AI = Allocas[N];
927     IRBuilder<> IRB(AI->getNextNode());
928
929     // Replace uses of the alloca with tagged address.
930     Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
931     Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
932     Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
933     std::string Name =
934         AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
935     Replacement->setName(Name + ".hwasan");
936
937     for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
938       Use &U = *UI++;
939       if (U.getUser() != AILong)
940         U.set(Replacement);
941     }
942
943     tagAlloca(IRB, AI, Tag);
944
945     for (auto RI : RetVec) {
946       IRB.SetInsertPoint(RI);
947
948       // Re-tag alloca memory with the special UAR tag.
949       Value *Tag = getUARTag(IRB, StackTag);
950       tagAlloca(IRB, AI, Tag);
951     }
952   }
953
954   return true;
955 }
956
957 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
958   return (AI.getAllocatedType()->isSized() &&
959           // FIXME: instrument dynamic allocas, too
960           AI.isStaticAlloca() &&
961           // alloca() may be called with 0 size, ignore it.
962           getAllocaSizeInBytes(AI) > 0 &&
963           // We are only interested in allocas not promotable to registers.
964           // Promotable allocas are common under -O0.
965           !isAllocaPromotable(&AI) &&
966           // inalloca allocas are not treated as static, and we don't want
967           // dynamic alloca instrumentation for them as well.
968           !AI.isUsedWithInAlloca() &&
969           // swifterror allocas are register promoted by ISel
970           !AI.isSwiftError());
971 }
972
973 bool HWAddressSanitizer::runOnFunction(Function &F) {
974   if (&F == HwasanCtorFunction)
975     return false;
976
977   if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
978     return false;
979
980   LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
981
982   SmallVector<Instruction*, 16> ToInstrument;
983   SmallVector<AllocaInst*, 8> AllocasToInstrument;
984   SmallVector<Instruction*, 8> RetVec;
985   for (auto &BB : F) {
986     for (auto &Inst : BB) {
987       if (ClInstrumentStack)
988         if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
989           // Realign all allocas. We don't want small uninteresting allocas to
990           // hide in instrumented alloca's padding.
991           if (AI->getAlignment() < Mapping.getAllocaAlignment())
992             AI->setAlignment(Mapping.getAllocaAlignment());
993           // Instrument some of them.
994           if (isInterestingAlloca(*AI))
995             AllocasToInstrument.push_back(AI);
996           continue;
997         }
998
999       if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
1000           isa<CleanupReturnInst>(Inst))
1001         RetVec.push_back(&Inst);
1002
1003       Value *MaybeMask = nullptr;
1004       bool IsWrite;
1005       unsigned Alignment;
1006       uint64_t TypeSize;
1007       Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
1008                                               &Alignment, &MaybeMask);
1009       if (Addr || isa<MemIntrinsic>(Inst))
1010         ToInstrument.push_back(&Inst);
1011     }
1012   }
1013
1014   if (AllocasToInstrument.empty() && ToInstrument.empty())
1015     return false;
1016
1017   if (ClCreateFrameDescriptions && !AllocasToInstrument.empty())
1018     createFrameGlobal(F, createFrameString(AllocasToInstrument));
1019
1020   initializeCallbacks(*F.getParent());
1021
1022   assert(!LocalDynamicShadow);
1023
1024   Instruction *InsertPt = &*F.getEntryBlock().begin();
1025   IRBuilder<> EntryIRB(InsertPt);
1026   LocalDynamicShadow = emitPrologue(EntryIRB,
1027                                     /*WithFrameRecord*/ ClRecordStackHistory &&
1028                                         !AllocasToInstrument.empty());
1029
1030   bool Changed = false;
1031   if (!AllocasToInstrument.empty()) {
1032     Value *StackTag =
1033         ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
1034     Changed |= instrumentStack(AllocasToInstrument, RetVec, StackTag);
1035   }
1036
1037   // If we split the entry block, move any allocas that were originally in the
1038   // entry block back into the entry block so that they aren't treated as
1039   // dynamic allocas.
1040   if (EntryIRB.GetInsertBlock() != &F.getEntryBlock()) {
1041     InsertPt = &*F.getEntryBlock().begin();
1042     for (auto II = EntryIRB.GetInsertBlock()->begin(),
1043               IE = EntryIRB.GetInsertBlock()->end();
1044          II != IE;) {
1045       Instruction *I = &*II++;
1046       if (auto *AI = dyn_cast<AllocaInst>(I))
1047         if (isa<ConstantInt>(AI->getArraySize()))
1048           I->moveBefore(InsertPt);
1049     }
1050   }
1051
1052   for (auto Inst : ToInstrument)
1053     Changed |= instrumentMemAccess(Inst);
1054
1055   LocalDynamicShadow = nullptr;
1056
1057   return Changed;
1058 }
1059
1060 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
1061   Scale = kDefaultShadowScale;
1062   if (ClMappingOffset.getNumOccurrences() > 0) {
1063     InGlobal = false;
1064     InTls = false;
1065     Offset = ClMappingOffset;
1066   } else if (ClEnableKhwasan || ClInstrumentWithCalls) {
1067     InGlobal = false;
1068     InTls = false;
1069     Offset = 0;
1070   } else if (ClWithIfunc) {
1071     InGlobal = true;
1072     InTls = false;
1073     Offset = kDynamicShadowSentinel;
1074   } else if (ClWithTls) {
1075     InGlobal = false;
1076     InTls = true;
1077     Offset = kDynamicShadowSentinel;
1078   } else {
1079     InGlobal = false;
1080     InTls = false;
1081     Offset = kDynamicShadowSentinel;
1082   }
1083 }