#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/type_traits.h"
-#include <algorithm>
+#include <algorithm>
#include <cstddef>
#include <initializer_list>
#include <iterator>
SmallVectorImpl<unsigned> &SortedIndices);
/// Returns true if the memory operations \p A and \p B are consecutive.
-/// This is a simple API that does not depend on the analysis pass.
+/// This is a simple API that does not depend on the analysis pass.
bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
ScalarEvolution &SE, bool CheckType = true);
/// accesses of a loop.
///
/// It runs the analysis for a loop on demand. This can be initiated by
-/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
+/// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
/// getResult return a LoopAccessInfo object. See this class for the
/// specifics of what information is provided.
class LoopAccessAnalysis
/// Contains a collection of routines for determining if a given instruction is
/// guaranteed to execute if a given point in control flow is reached. The most
/// common example is an instruction within a loop being provably executed if we
-/// branch to the header of it's containing loop.
+/// branch to the header of it's containing loop.
///
//===----------------------------------------------------------------------===//
bool isGuaranteedToExecute(const Instruction &Inst, const DominatorTree *DT,
const Loop *CurLoop,
const LoopSafetyInfo *SafetyInfo);
-
+
}
#endif
bool haveFastSqrt(Type *Ty) { return false; }
bool isFCmpOrdCheaperThanFCmpZero(Type *Ty) { return true; }
-
+
unsigned getFPOpCost(Type *Ty) { return TargetTransformInfo::TCC_Basic; }
int getIntImmCodeSizeCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
/// This is equivelent to saying that all instructions within the basic block
/// are guaranteed to transfer execution to their successor within the basic
/// block. This has the same assumptions w.r.t. undefined behavior as the
- /// instruction variant of this function.
+ /// instruction variant of this function.
bool isGuaranteedToTransferExecutionToSuccessor(const BasicBlock *BB);
/// Return true if this function can prove that the instruction I
const std::string &getName() const { return Name; }
/// By default, write barriers are replaced with simple store
- /// instructions. If true, you must provide a custom pass to lower
+ /// instructions. If true, you must provide a custom pass to lower
/// calls to \@llvm.gcwrite.
bool customWriteBarrier() const { return CustomWriteBarriers; }
/// By default, read barriers are replaced with simple load
- /// instructions. If true, you must provide a custom pass to lower
+ /// instructions. If true, you must provide a custom pass to lower
/// calls to \@llvm.gcread.
bool customReadBarrier() const { return CustomReadBarriers; }
}
/// By default, roots are left for the code generator so it can generate a
- /// stack map. If true, you must provide a custom pass to lower
+ /// stack map. If true, you must provide a custom pass to lower
/// calls to \@llvm.gcroot.
bool customRoots() const { return CustomRoots; }
/// setAction ({G_ADD, 0, LLT::scalar(32)}, Legal);
/// setLegalizeScalarToDifferentSizeStrategy(
/// G_ADD, 0, widenToLargerTypesAndNarrowToLargest);
- /// will end up defining getAction({G_ADD, 0, T}) to return the following
+ /// will end up defining getAction({G_ADD, 0, T}) to return the following
/// actions for different scalar types T:
/// LLT::scalar(1)..LLT::scalar(31): {WidenScalar, 0, LLT::scalar(32)}
/// LLT::scalar(32): {Legal, 0, LLT::scalar(32)}
VectorElementSizeChangeStrategies[OpcodeIdx][TypeIdx] = S;
}
- /// A SizeChangeStrategy for the common case where legalization for a
+ /// A SizeChangeStrategy for the common case where legalization for a
/// particular operation consists of only supporting a specific set of type
/// sizes. E.g.
/// setAction ({G_DIV, 0, LLT::scalar(32)}, Legal);
uint32_t r_symbolnum; // symbol index if r_extern == 1 else section index
bool r_pcrel; // was relocated pc-relative already
uint8_t r_length; // length = 2 ^ r_length
- bool r_extern; //
+ bool r_extern; //
uint8_t r_type; // if not 0, machine-specific relocation type.
bool r_scattered; // 1 = scattered, 0 = non-scattered
int32_t r_value; // the value the item to be relocated is referring
// to.
- public:
+ public:
uint32_t getPackedFields() const {
if (r_scattered)
- return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
+ return (1 << 31) | (r_pcrel << 30) | ((r_length & 3) << 28) |
((r_type & 15) << 24) | (r_address & 0x00FFFFFF);
else
return (r_symbolnum << 8) | (r_pcrel << 7) | ((r_length & 3) << 5) |
uint32_t getRawAddress() const { return r_address; }
MachORelocation(uint32_t addr, uint32_t index, bool pcrel, uint8_t len,
- bool ext, uint8_t type, bool scattered = false,
- int32_t value = 0) :
+ bool ext, uint8_t type, bool scattered = false,
+ int32_t value = 0) :
r_address(addr), r_symbolnum(index), r_pcrel(pcrel), r_length(len),
r_extern(ext), r_type(type), r_scattered(scattered), r_value(value) {}
};
/// basic block's address of label.
MMIAddrLabelMap *AddrLabelSymbols;
- // TODO: Ideally, what we'd like is to have a switch that allows emitting
+ // TODO: Ideally, what we'd like is to have a switch that allows emitting
// synchronous (precise at call-sites only) CFA into .eh_frame. However,
// even under this switch, we'd like .debug_frame to be precise when using
// -g. At this moment, there's no way to specify that some CFI directives
MachineInstr *Instr = nullptr; ///< Alternatively, a MachineInstr.
public:
- SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
+ SUnit *OrigNode = nullptr; ///< If not this, the node from which this node
/// was cloned. (SD scheduling only)
const MCSchedClassDesc *SchedClass =
// TODO:: we should change the STATEPOINT representation so that CC and
// Flags should be part of meta operands, with args and deopt operands, and
// gc operands all prefixed by their length and a type code. This would be
- // much more consistent.
+ // much more consistent.
public:
// These values are aboolute offsets into the operands of the statepoint
// instruction.
#include "llvm/Pass.h"
#include "llvm/Support/CodeGen.h"
-#include <cassert>
+#include <cassert>
#include <string>
namespace llvm {
/// stack frame offset. The first register is closest to the incoming stack
/// pointer if stack grows down, and vice versa.
/// Notice: This function does not take into account disabled CSRs.
- /// In most cases you will want to use instead the function
+ /// In most cases you will want to use instead the function
/// getCalleeSavedRegs that is implemented in MachineRegisterInfo.
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF) const = 0;
/// guaranteed to be restored before any uses. This is useful for targets that
/// have call sequences where a GOT register may be updated by the caller
/// prior to a call and is guaranteed to be restored (also by the caller)
- /// after the call.
+ /// after the call.
virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
const MachineFunction &MF) const {
return false;
/// may have side effects cannot be removed without semantically changing the
/// generated program.
bool isSafeToRemove() const;
-
+
/// Return true if the instruction is a variety of EH-block.
bool isEHPad() const {
switch (getOpcode()) {
void setDoesNotThrow() {
addAttribute(AttributeList::FunctionIndex, Attribute::NoUnwind);
}
-
+
/// Return the function called, or null if this is an
/// indirect function invocation.
///
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
- def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ],
+ def int_experimental_constrained_exp : Intrinsic<[ llvm_anyfloat_ty ],
[ LLVMMatchType<0>,
llvm_metadata_ty,
llvm_metadata_ty ]>;
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
def int_arm_stcl : GCCBuiltin<"__builtin_arm_stcl">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
-def int_arm_stc2 : GCCBuiltin<"__builtin_arm_stc2">,
+def int_arm_stc2 : GCCBuiltin<"__builtin_arm_stc2">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
def int_arm_stc2l : GCCBuiltin<"__builtin_arm_stc2l">,
Intrinsic<[], [llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty], []>;
//===- IntrinsicsPowerPC.td - Defines PowerPC intrinsics ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines all of the PowerPC-specific intrinsics.
/// PowerPC_Vec_BBB_Intrinsic - A PowerPC intrinsic that takes two v16i8
/// vectors and returns one. These intrinsics have no side effects.
-class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix>
+class PowerPC_Vec_BBB_Intrinsic<string GCCIntSuffix>
: PowerPC_Vec_Intrinsic<GCCIntSuffix,
[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
/// PowerPC_Vec_HHH_Intrinsic - A PowerPC intrinsic that takes two v8i16
/// vectors and returns one. These intrinsics have no side effects.
-class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix>
+class PowerPC_Vec_HHH_Intrinsic<string GCCIntSuffix>
: PowerPC_Vec_Intrinsic<GCCIntSuffix,
[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
/// PowerPC_Vec_WWW_Intrinsic - A PowerPC intrinsic that takes two v4i32
/// vectors and returns one. These intrinsics have no side effects.
-class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
+class PowerPC_Vec_WWW_Intrinsic<string GCCIntSuffix>
: PowerPC_Vec_Intrinsic<GCCIntSuffix,
[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
def int_ppc_altivec_vcmpgtud : GCCBuiltin<"__builtin_altivec_vcmpgtud">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty],
[IntrNoMem]>;
-
+
def int_ppc_altivec_vcmpequw : GCCBuiltin<"__builtin_altivec_vcmpequw">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
def int_ppc_altivec_vcmpnezw : GCCBuiltin<"__builtin_altivec_vcmpnezw">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty],
[IntrNoMem]>;
-
+
def int_ppc_altivec_vcmpequh : GCCBuiltin<"__builtin_altivec_vcmpequh">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem]>;
def int_ppc_altivec_vcmpnezw_p : GCCBuiltin<"__builtin_altivec_vcmpnezw_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v4i32_ty,llvm_v4i32_ty],
[IntrNoMem]>;
-
+
def int_ppc_altivec_vcmpequh_p : GCCBuiltin<"__builtin_altivec_vcmpequh_p">,
Intrinsic<[llvm_i32_ty],[llvm_i32_ty,llvm_v8i16_ty,llvm_v8i16_ty],
[IntrNoMem]>;
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumshs : GCCBuiltin<"__builtin_altivec_vmsumshs">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumubm : GCCBuiltin<"__builtin_altivec_vmsumubm">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vmsumuhm : GCCBuiltin<"__builtin_altivec_vmsumuhm">,
Intrinsic<[llvm_v4i32_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
// Other multiplies.
def int_ppc_altivec_vmladduhm : GCCBuiltin<"__builtin_altivec_vmladduhm">,
- Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
+ Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v8i16_ty], [IntrNoMem]>;
// Packs.
// Add Extended Quadword
def int_ppc_altivec_vaddeuqm : GCCBuiltin<"__builtin_altivec_vaddeuqm">,
- Intrinsic<[llvm_v1i128_ty],
+ Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
def int_ppc_altivec_vaddecuq : GCCBuiltin<"__builtin_altivec_vaddecuq">,
- Intrinsic<[llvm_v1i128_ty],
+ Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
// Sub Extended Quadword
def int_ppc_altivec_vsubeuqm : GCCBuiltin<"__builtin_altivec_vsubeuqm">,
- Intrinsic<[llvm_v1i128_ty],
+ Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
def int_ppc_altivec_vsubecuq : GCCBuiltin<"__builtin_altivec_vsubecuq">,
- Intrinsic<[llvm_v1i128_ty],
+ Intrinsic<[llvm_v1i128_ty],
[llvm_v1i128_ty, llvm_v1i128_ty, llvm_v1i128_ty],
[IntrNoMem]>;
}
// Right Shifts.
def int_ppc_altivec_vsr : PowerPC_Vec_WWW_Intrinsic<"vsr">;
def int_ppc_altivec_vsro : PowerPC_Vec_WWW_Intrinsic<"vsro">;
-
+
def int_ppc_altivec_vsrb : PowerPC_Vec_BBB_Intrinsic<"vsrb">;
def int_ppc_altivec_vsrh : PowerPC_Vec_HHH_Intrinsic<"vsrh">;
def int_ppc_altivec_vsrw : PowerPC_Vec_WWW_Intrinsic<"vsrw">;
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty], [IntrNoMem]>;
def int_ppc_altivec_vperm : GCCBuiltin<"__builtin_altivec_vperm_4si">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty, llvm_v16i8_ty], [IntrNoMem]>;
def int_ppc_altivec_vsel : GCCBuiltin<"__builtin_altivec_vsel_4si">,
- Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
+ Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty,
llvm_v4i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
def int_ppc_altivec_vgbbd : GCCBuiltin<"__builtin_altivec_vgbbd">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty], [IntrNoMem]>;
SpecificBumpPtrAllocator<AUFoldingSetNode> AUFoldingSetNodeAllocator;
// Maps from a pass to it's associated entry in UniqueAnalysisUsages. Does
- // not own the storage associated with either key or value..
+ // not own the storage associated with either key or value..
DenseMap<Pass *, AnalysisUsage*> AnUsageMap;
/// Collection of PassInfo objects found via analysis IDs and in this top
explicit Statepoint(CallSite CS) : Base(CS) {}
};
-/// Common base class for representing values projected from a statepoint.
+/// Common base class for representing values projected from a statepoint.
/// Currently, the only projections available are gc.result and gc.relocate.
class GCProjectionInst : public IntrinsicInst {
public:
void operator delete(void *Usr);
/// Placement delete - required by std, called if the ctor throws.
void operator delete(void *Usr, unsigned) {
- // Note: If a subclass manipulates the information which is required to calculate the
- // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
+ // Note: If a subclass manipulates the information which is required to calculate the
+ // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// to restore the changed information to the original value, since the dtor of that class
- // is not called if the ctor fails.
+ // is not called if the ctor fails.
User::operator delete(Usr);
#ifndef LLVM_ENABLE_EXCEPTIONS
}
/// Placement delete - required by std, called if the ctor throws.
void operator delete(void *Usr, unsigned, bool) {
- // Note: If a subclass manipulates the information which is required to calculate the
- // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
+ // Note: If a subclass manipulates the information which is required to calculate the
+ // Usr memory pointer, e.g. NumUserOperands, the operator delete of that subclass has
// to restore the changed information to the original value, since the dtor of that class
- // is not called if the ctor fails.
+ // is not called if the ctor fails.
User::operator delete(Usr);
#ifndef LLVM_ENABLE_EXCEPTIONS
llvm::LLVMContext Context;
(void)new llvm::Module("", Context);
(void)new llvm::UnreachableInst(Context);
- (void) llvm::createVerifierPass();
+ (void) llvm::createVerifierPass();
}
} ForceVMCoreLinking;
}
/// Returns true if at least one of the register writes performed by
/// \param Inst implicitly clears the upper portion of all super-registers.
- ///
+ ///
/// Example: on X86-64, a write to EAX implicitly clears the upper half of
/// RAX. Also (still on x86) an XMM write perfomed by an AVX 128-bit
/// instruction implicitly clears the upper portion of the correspondent
/// AsmCond - Class to support conditional assembly
///
/// The conditional assembly feature (.if, .else, .elseif and .endif) is
-/// implemented with AsmCond that tells us what we are in the middle of
+/// implemented with AsmCond that tells us what we are in the middle of
/// processing. Ignore can be either true or false. When true we are ignoring
/// the block of code in the middle of a conditional.
/// If the comment includes embedded \n's, they will each get the comment
/// prefix as appropriate. The added comment should not end with a \n.
/// By default, each comment is terminated with an end of line, i.e. the
- /// EOL param is set to true by default. If one prefers not to end the
- /// comment with a new line then the EOL param should be passed
+ /// EOL param is set to true by default. If one prefers not to end the
+ /// comment with a new line then the EOL param should be passed
/// with a false value.
virtual void AddComment(const Twine &T, bool EOL = true) {}
relocation_iterator locrel_begin() const;
relocation_iterator locrel_end() const;
-
+
void moveRelocationNext(DataRefImpl &Rel) const override;
uint64_t getRelocationOffset(DataRefImpl Rel) const override;
symbol_iterator getRelocationSymbol(DataRefImpl Rel) const override;
// should be a small number, we just do a linear search over a (dense)
// vector.
Pass *ResultPass = Resolver->findImplPass(PI);
- assert(ResultPass &&
+ assert(ResultPass &&
"getAnalysis*() called on an analysis that was not "
"'required' by pass!");
//
// This file defines PassRegistry, a class that is used in the initialization
// and registration of passes. At application startup, passes are registered
-// with the PassRegistry, which is later provided to the PassManager for
+// with the PassRegistry, which is later provided to the PassManager for
// dependency resolution and similar tasks.
//
//===----------------------------------------------------------------------===//
/// A CodeRegion associates some code with a counter
CodeRegion,
- /// An ExpansionRegion represents a file expansion region that associates
+ /// An ExpansionRegion represents a file expansion region that associates
/// a source range with the expansion of a virtual source file, such as
/// for a macro instantiation or #include file.
ExpansionRegion,
namespace llvm {
-/// An auxiliary type to facilitate extraction of 3-byte entities.
+/// An auxiliary type to facilitate extraction of 3-byte entities.
struct Uint24 {
uint8_t Bytes[3];
Uint24(uint8_t U) {
//===- TargetCallingConv.td - Target Calling Conventions ---*- tablegen -*-===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines the target-independent interfaces with which targets
// an instruction. Each MCInstPredicate class has a well-known semantic, and it
// is used by a PredicateExpander to generate code for MachineInstr and/or
// MCInst.
-//
+//
// MCInstPredicate definitions can be used to construct MCSchedPredicate
// definitions. An MCSchedPredicate can be used in place of a SchedPredicate
// when defining SchedReadVariant and SchedWriteVariant used by a processor
//
// New MCInstPredicate classes must be added to this file. For each new class
// XYZ, an "expandXYZ" method must be added to the PredicateExpander.
-//
+//
//===----------------------------------------------------------------------===//
// Forward declarations.
bool considerHoistingFromTo(BasicBlock &FromBlock, BasicBlock &ToBlock);
// If true, this pass is a nop unless the target architecture has branch
- // divergence.
+ // divergence.
const bool OnlyIfDivergentTarget = false;
TargetTransformInfo *TTI = nullptr;
/// vararg functions can be extracted. This is safe, if all vararg handling
/// code is extracted, including vastart. If AllowAlloca is true, then
/// extraction of blocks containing alloca instructions would be possible,
- /// however code extractor won't validate whether extraction is legal.
+ /// however code extractor won't validate whether extraction is legal.
CodeExtractor(ArrayRef<BasicBlock *> BBs, DominatorTree *DT = nullptr,
bool AggregateArgs = false, BlockFrequencyInfo *BFI = nullptr,
BranchProbabilityInfo *BPI = nullptr,
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Attributes.h"
-#include "llvm/IR/Instructions.h"
+#include "llvm/IR/Instructions.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/ValueMap.h"
#include "llvm/Support/AtomicOrdering.h"
private:
void loadAndParseMapFiles();
- SymbolRewriter::RewriteDescriptorList Descriptors;
+ SymbolRewriter::RewriteDescriptorList Descriptors;
};
} // end namespace llvm
Alias = SetMayAlias;
AST.TotalMayAliasSetSize += size();
} else {
- // First entry of must alias must have maximum size!
+ // First entry of must alias must have maximum size!
P->updateSizeAndAAInfo(Size, AAInfo);
}
assert(Result != NoAlias && "Cannot be part of must set!");
for (PointerMapType::iterator I = PointerMap.begin(), E = PointerMap.end();
I != E; ++I)
I->second->eraseFromList();
-
+
PointerMap.clear();
-
+
// The alias sets should all be clear now.
AliasSets.clear();
}
for (iterator I = begin(), E = end(); I != E;) {
iterator Cur = I++;
if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, AAInfo, AA)) continue;
-
+
if (!FoundSet) { // If this is the first alias set ptr can go into.
FoundSet = &*Cur; // Remember it.
} else { // Otherwise, we must merge the sets.
// Return the set!
return *Entry.getAliasSet(*this)->getForwardedTarget(*this);
}
-
+
if (AliasSet *AS = mergeAliasSetsForPointer(Pointer, Size, AAInfo)) {
// Add it to the alias set it aliases.
AS->addPointer(*this, Entry, Size, AAInfo);
return *AS;
}
-
+
// Otherwise create a new alias set to hold the loaded pointer.
AliasSets.push_back(new AliasSet());
AliasSets.back().addPointer(*this, Entry, Size, AAInfo);
AS->SetSize--;
TotalMayAliasSetSize--;
}
-
+
// Stop using the alias set.
AS->dropRef(*this);
-
+
PointerMap.erase(I);
}
}
char CFGPrinterLegacyPass::ID = 0;
-INITIALIZE_PASS(CFGPrinterLegacyPass, "dot-cfg", "Print CFG of function to 'dot' file",
+INITIALIZE_PASS(CFGPrinterLegacyPass, "dot-cfg", "Print CFG of function to 'dot' file",
false, true)
PreservedAnalyses CFGPrinterPass::run(Function &F,
OS << "Call graph node for function: '" << F->getName() << "'";
else
OS << "Call graph node <<null function>>";
-
+
OS << "<<" << this << ">> #uses=" << getNumReferences() << '\n';
for (const auto &I : *this) {
#define DEBUG_TYPE "cgscc-passmgr"
-static cl::opt<unsigned>
+static cl::opt<unsigned>
MaxIterations("max-cg-scc-iterations", cl::ReallyHidden, cl::init(4));
STATISTIC(MaxSCCIterations, "Maximum CGSCCPassMgr iterations on one SCC");
}
PassManagerType getPassManagerType() const override {
- return PMT_CallGraphPassManager;
+ return PMT_CallGraphPassManager;
}
-
+
private:
bool RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
bool &DevirtualizedCall);
-
+
bool RunPassOnSCC(Pass *P, CallGraphSCC &CurSCC,
CallGraph &CG, bool &CallGraphUpToDate,
bool &DevirtualizedCall);
if (EmitICRemark)
emitInstrCountChangedRemark(P, M, InstrCount);
}
-
+
// After the CGSCCPass is done, when assertions are enabled, use
// RefreshCallGraph to verify that the callgraph was correctly updated.
#ifndef NDEBUG
if (Changed)
RefreshCallGraph(CurSCC, CG, true);
#endif
-
+
return Changed;
}
-
+
assert(PM->getPassManagerType() == PMT_FunctionPassManager &&
"Invalid CGPassManager member");
FPPassManager *FPP = (FPPassManager*)P;
-
+
// Run pass P on all functions in the current SCC.
for (CallGraphNode *CGN : CurSCC) {
if (Function *F = CGN->getFunction()) {
F->getContext().yield();
}
}
-
+
// The function pass(es) modified the IR, they may have clobbered the
// callgraph.
if (Changed && CallGraphUpToDate) {
bool MadeChange = false;
bool DevirtualizedCall = false;
-
+
// Scan all functions in the SCC.
unsigned FunctionNo = 0;
for (CallGraphSCC::iterator SCCIdx = CurSCC.begin(), E = CurSCC.end();
CallGraphNode *CGN = *SCCIdx;
Function *F = CGN->getFunction();
if (!F || F->isDeclaration()) continue;
-
+
// Walk the function body looking for call sites. Sync up the call sites in
// CGN with those actually in the function.
// Keep track of the number of direct and indirect calls that were
// invalidated and removed.
unsigned NumDirectRemoved = 0, NumIndirectRemoved = 0;
-
+
// Get the set of call sites currently in the function.
for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) {
// If this call site is null, then the function pass deleted the call
CallSites.count(I->first) ||
// If the call edge is not from a call or invoke, or it is a
- // instrinsic call, then the function pass RAUW'd a call with
+ // instrinsic call, then the function pass RAUW'd a call with
// another value. This can happen when constant folding happens
// of well known functions etc.
!CallSite(I->first) ||
CallSite(I->first).getCalledFunction()->getIntrinsicID()))) {
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
-
+
// If this was an indirect call site, count it.
if (!I->second->getFunction())
++NumIndirectRemoved;
- else
+ else
++NumDirectRemoved;
-
+
// Just remove the edge from the set of callees, keep track of whether
// I points to the last element of the vector.
bool WasLast = I + 1 == E;
CGN->removeCallEdge(I);
-
+
// If I pointed to the last element of the vector, we have to bail out:
// iterator checking rejects comparisons of the resultant pointer with
// end.
E = CGN->end();
continue;
}
-
+
assert(!CallSites.count(I->first) &&
"Call site occurs in node multiple times");
-
+
CallSite CS(I->first);
if (CS) {
Function *Callee = CS.getCalledFunction();
}
++I;
}
-
+
// Loop over all of the instructions in the function, getting the callsites.
// Keep track of the number of direct/indirect calls added.
unsigned NumDirectAdded = 0, NumIndirectAdded = 0;
if (!CS) continue;
Function *Callee = CS.getCalledFunction();
if (Callee && Callee->isIntrinsic()) continue;
-
+
// If this call site already existed in the callgraph, just verify it
// matches up to expectations and remove it from CallSites.
DenseMap<Value*, CallGraphNode*>::iterator ExistingIt =
// Remove from CallSites since we have now seen it.
CallSites.erase(ExistingIt);
-
+
// Verify that the callee is right.
if (ExistingNode->getFunction() == CS.getCalledFunction())
continue;
-
+
// If we are in checking mode, we are not allowed to actually mutate
// the callgraph. If this is a case where we can infer that the
// callgraph is less precise than it could be (e.g. an indirect call
if (CheckingMode && CS.getCalledFunction() &&
ExistingNode->getFunction() == nullptr)
continue;
-
+
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
-
+
// If not, we either went from a direct call to indirect, indirect to
// direct, or direct to different direct.
CallGraphNode *CalleeNode;
MadeChange = true;
continue;
}
-
+
assert(!CheckingMode &&
"CallGraphSCCPass did not update the CallGraph correctly!");
CalleeNode = CG.getCallsExternalNode();
++NumIndirectAdded;
}
-
+
CGN->addCalledFunction(CS, CalleeNode);
MadeChange = true;
}
-
+
// We scanned the old callgraph node, removing invalidated call sites and
// then added back newly found call sites. One thing that can happen is
// that an old indirect call site was deleted and replaced with a new direct
if (NumIndirectRemoved > NumIndirectAdded &&
NumDirectRemoved < NumDirectAdded)
DevirtualizedCall = true;
-
+
// After scanning this function, if we still have entries in callsites, then
// they are dangling pointers. WeakTrackingVH should save us for this, so
// abort if
// this happens.
assert(CallSites.empty() && "Dangling pointers found in call sites map");
-
+
// Periodically do an explicit clear to remove tombstones when processing
// large scc's.
if ((FunctionNo & 15) == 15)
bool CGPassManager::RunAllPassesOnSCC(CallGraphSCC &CurSCC, CallGraph &CG,
bool &DevirtualizedCall) {
bool Changed = false;
-
+
// Keep track of whether the callgraph is known to be up-to-date or not.
// The CGSSC pass manager runs two types of passes:
// CallGraphSCC Passes and other random function passes. Because other
for (unsigned PassNo = 0, e = getNumContainedPasses();
PassNo != e; ++PassNo) {
Pass *P = getContainedPass(PassNo);
-
+
// If we're in -debug-pass=Executions mode, construct the SCC node list,
// otherwise avoid constructing this string as it is expensive.
if (isPassDebuggingExecutionsOrMore()) {
dumpPassInfo(P, EXECUTION_MSG, ON_CG_MSG, Functions);
}
dumpRequiredSet(P);
-
+
initializeAnalysisImpl(P);
-
+
// Actually run this pass on the current SCC.
Changed |= RunPassOnSCC(P, CurSCC, CG,
CallGraphUpToDate, DevirtualizedCall);
-
+
if (Changed)
dumpPassInfo(P, MODIFICATION_MSG, ON_CG_MSG, "");
dumpPreservedSet(P);
-
- verifyPreservedAnalysis(P);
+
+ verifyPreservedAnalysis(P);
removeNotPreservedAnalysis(P);
recordAvailableAnalysis(P);
removeDeadPasses(P, "", ON_CG_MSG);
}
-
+
// If the callgraph was left out of date (because the last pass run was a
// functionpass), refresh it before we move on to the next SCC.
if (!CallGraphUpToDate)
bool CGPassManager::runOnModule(Module &M) {
CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
bool Changed = doInitialization(CG);
-
+
// Walk the callgraph in bottom-up SCC order.
scc_iterator<CallGraph*> CGI = scc_begin(&CG);
DevirtualizedCall = false;
Changed |= RunAllPassesOnSCC(CurSCC, CG, DevirtualizedCall);
} while (Iteration++ < MaxIterations && DevirtualizedCall);
-
+
if (DevirtualizedCall)
LLVM_DEBUG(dbgs() << " CGSCCPASSMGR: Stopped iteration after "
<< Iteration
/// Initialize CG
bool CGPassManager::doInitialization(CallGraph &CG) {
bool Changed = false;
- for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
+ for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
if (PMDataManager *PM = getContainedPass(i)->getAsPMDataManager()) {
assert(PM->getPassManagerType() == PMT_FunctionPassManager &&
"Invalid CGPassManager member");
/// Finalize CG
bool CGPassManager::doFinalization(CallGraph &CG) {
bool Changed = false;
- for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
+ for (unsigned i = 0, e = getNumContainedPasses(); i != e; ++i) {
if (PMDataManager *PM = getContainedPass(i)->getAsPMDataManager()) {
assert(PM->getPassManagerType() == PMT_FunctionPassManager &&
"Invalid CGPassManager member");
Nodes[i] = New;
break;
}
-
+
// Update the active scc_iterator so that it doesn't contain dangling
// pointers to the old CallGraphNode.
scc_iterator<CallGraph*> *CGI = (scc_iterator<CallGraph*>*)Context;
/// Assign pass manager to manage this pass.
void CallGraphSCCPass::assignPassManager(PMStack &PMS,
PassManagerType PreferredType) {
- // Find CGPassManager
+ // Find CGPassManager
while (!PMS.empty() &&
PMS.top()->getPassManagerType() > PMT_CallGraphPassManager)
PMS.pop();
assert(!PMS.empty() && "Unable to handle Call Graph Pass");
CGPassManager *CGP;
-
+
if (PMS.top()->getPassManagerType() == PMT_CallGraphPassManager)
CGP = (CGPassManager*)PMS.top();
else {
- // Create new Call Graph SCC Pass Manager if it does not exist.
+ // Create new Call Graph SCC Pass Manager if it does not exist.
assert(!PMS.empty() && "Unable to create Call Graph Pass Manager");
PMDataManager *PMD = PMS.top();
class PrintCallGraphPass : public CallGraphSCCPass {
std::string Banner;
raw_ostream &OS; // raw_ostream to print on.
-
+
public:
static char ID;
}
return false;
}
-
+
StringRef getPassName() const override { return "Print CallGraph IR"; }
};
-
+
} // end anonymous namespace.
char PrintCallGraphPass::ID = 0;
// Analysis already completed for this function.
return;
Analyzed = true;
-
+
Visited.clear();
AliveBits.clear();
APInt DemandedBits::getDemandedBits(Instruction *I) {
performAnalysis();
-
+
const DataLayout &DL = I->getModule()->getDataLayout();
auto Found = AliveBits.find(I);
if (Found != AliveBits.end())
if (Constant *C = GV->getInitializer())
if (!C->isNullValue())
return false;
-
+
// Walk the user list of the global. If we find anything other than a direct
// load or store, bail out.
for (User *U : GV->users()) {
return true;
}
-void GlobalsAAResult::CollectSCCMembership(CallGraph &CG) {
+void GlobalsAAResult::CollectSCCMembership(CallGraph &CG) {
// We do a bottom-up SCC traversal of the call graph. In other words, we
// visit all callees before callers (leaf-first).
unsigned SCCID = 0;
Inputs.push_back(V);
do {
const Value *Input = Inputs.pop_back_val();
-
+
if (isa<GlobalValue>(Input) || isa<Argument>(Input) || isa<CallInst>(Input) ||
isa<InvokeInst>(Input))
// Arguments to functions or returns from functions are inherently
if (auto *LI = dyn_cast<LoadInst>(Input)) {
Inputs.push_back(GetUnderlyingObject(LI->getPointerOperand(), DL));
continue;
- }
+ }
if (auto *SI = dyn_cast<SelectInst>(Input)) {
const Value *LHS = GetUnderlyingObject(SI->getTrueValue(), DL);
const Value *RHS = GetUnderlyingObject(SI->getFalseValue(), DL);
}
continue;
}
-
+
return false;
} while (!Inputs.empty());
// non-addr-taken globals.
continue;
}
-
+
// Recurse through a limited number of selects, loads and PHIs. This is an
// arbitrary depth of 4, lower numbers could be used to fix compile time
// issues if needed, but this is generally expected to be only be important
// frequently arranged such that dominating ones come first and we quickly
// find a path to function entry. TODO: We should consider explicitly
// canonicalizing to make this true rather than relying on this happy
- // accident.
+ // accident.
for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
ValueLatticeElement EdgeResult;
if (!getEdgeValue(Val, *PI, BB, EdgeResult))
/// Calculate Start and End points of memory access.
/// Let's assume A is the first access and B is a memory access on N-th loop
-/// iteration. Then B is calculated as:
-/// B = A + Step*N .
+/// iteration. Then B is calculated as:
+/// B = A + Step*N .
/// Step value may be positive or negative.
/// N is a calculated back-edge taken count:
/// N = (TripCount > 0) ? RoundDown(TripCount -1 , VF) : 0
return false;
}
-/// Given a non-constant (unknown) dependence-distance \p Dist between two
+/// Given a non-constant (unknown) dependence-distance \p Dist between two
/// memory accesses, that have the same stride whose absolute value is given
/// in \p Stride, and that have the same type size \p TypeByteSize,
/// in a loop whose takenCount is \p BackedgeTakenCount, check if it is
// If we can prove that
// (**) |Dist| > BackedgeTakenCount * Step
- // where Step is the absolute stride of the memory accesses in bytes,
+ // where Step is the absolute stride of the memory accesses in bytes,
// then there is no dependence.
//
- // Ratioanle:
- // We basically want to check if the absolute distance (|Dist/Step|)
- // is >= the loop iteration count (or > BackedgeTakenCount).
- // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
- // Section 4.2.1); Note, that for vectorization it is sufficient to prove
+ // Ratioanle:
+ // We basically want to check if the absolute distance (|Dist/Step|)
+ // is >= the loop iteration count (or > BackedgeTakenCount).
+ // This is equivalent to the Strong SIV Test (Practical Dependence Testing,
+ // Section 4.2.1); Note, that for vectorization it is sufficient to prove
// that the dependence distance is >= VF; This is checked elsewhere.
- // But in some cases we can prune unknown dependence distances early, and
- // even before selecting the VF, and without a runtime test, by comparing
- // the distance against the loop iteration count. Since the vectorized code
- // will be executed only if LoopCount >= VF, proving distance >= LoopCount
+ // But in some cases we can prune unknown dependence distances early, and
+ // even before selecting the VF, and without a runtime test, by comparing
+ // the distance against the loop iteration count. Since the vectorized code
+ // will be executed only if LoopCount >= VF, proving distance >= LoopCount
// also guarantees that distance >= VF.
//
const uint64_t ByteStride = Stride * TypeByteSize;
uint64_t DistTypeSize = DL.getTypeAllocSize(Dist.getType());
uint64_t ProductTypeSize = DL.getTypeAllocSize(Product->getType());
- // The dependence distance can be positive/negative, so we sign extend Dist;
- // The multiplication of the absolute stride in bytes and the
+ // The dependence distance can be positive/negative, so we sign extend Dist;
+ // The multiplication of the absolute stride in bytes and the
// backdgeTakenCount is non-negative, so we zero extend Product.
if (DistTypeSize > ProductTypeSize)
CastedProduct = SE.getZeroExtendExpr(Product, Dist.getType());
"versioning:");
LLVM_DEBUG(dbgs() << " Ptr: " << *Ptr << " Stride: " << *Stride << "\n");
- // Avoid adding the "Stride == 1" predicate when we know that
+ // Avoid adding the "Stride == 1" predicate when we know that
// Stride >= Trip-Count. Such a predicate will effectively optimize a single
// or zero iteration loop, as Trip-Count <= Stride == 1.
- //
+ //
// TODO: We are currently not making a very informed decision on when it is
// beneficial to apply stride versioning. It might make more sense that the
- // users of this analysis (such as the vectorizer) will trigger it, based on
- // their specific cost considerations; For example, in cases where stride
+ // users of this analysis (such as the vectorizer) will trigger it, based on
+ // their specific cost considerations; For example, in cases where stride
// versioning does not help resolving memory accesses/dependences, the
- // vectorizer should evaluate the cost of the runtime test, and the benefit
- // of various possible stride specializations, considering the alternatives
- // of using gather/scatters (if available).
-
+ // vectorizer should evaluate the cost of the runtime test, and the benefit
+ // of various possible stride specializations, considering the alternatives
+ // of using gather/scatters (if available).
+
const SCEV *StrideExpr = PSE->getSCEV(Stride);
- const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
+ const SCEV *BETakenCount = PSE->getBackedgeTakenCount();
// Match the types so we can compare the stride and the BETakenCount.
- // The Stride can be positive/negative, so we sign extend Stride;
+ // The Stride can be positive/negative, so we sign extend Stride;
// The backdgeTakenCount is non-negative, so we zero extend BETakenCount.
const DataLayout &DL = TheLoop->getHeader()->getModule()->getDataLayout();
uint64_t StrideTypeSize = DL.getTypeAllocSize(StrideExpr->getType());
CastedBECount = SE->getZeroExtendExpr(BETakenCount, StrideExpr->getType());
const SCEV *StrideMinusBETaken = SE->getMinusSCEV(CastedStride, CastedBECount);
// Since TripCount == BackEdgeTakenCount + 1, checking:
- // "Stride >= TripCount" is equivalent to checking:
+ // "Stride >= TripCount" is equivalent to checking:
// Stride - BETakenCount > 0
if (SE->isKnownPositive(StrideMinusBETaken)) {
LLVM_DEBUG(
} else {
SmallVector<NonLocalDepResult, 4> NLDI;
assert( (isa<LoadInst>(Inst) || isa<StoreInst>(Inst) ||
- isa<VAArgInst>(Inst)) && "Unknown memory instruction!");
+ isa<VAArgInst>(Inst)) && "Unknown memory instruction!");
MDA.getNonLocalPointerDependency(Inst, NLDI);
DepSet &InstDeps = Deps[Inst];
}
- void printInfoComment(const Value &V, formatted_raw_ostream &OS) override {
+ void printInfoComment(const Value &V, formatted_raw_ostream &OS) override {
if (!MustExec.count(&V))
return;
OS << " ; (mustexec in " << NumLoops << " loops: ";
else
OS << " ; (mustexec in: ";
-
+
bool first = true;
for (const Loop *L : Loops) {
if (!first)
MustExecuteAnnotatedWriter Writer(F, DT, LI);
F.print(dbgs(), &Writer);
-
+
return false;
}
// Construct the extended SCEV: (Ext ix (Trunc iy (Expr) to ix) to iy)
// for each of StartVal and Accum
- auto getExtendedExpr = [&](const SCEV *Expr,
+ auto getExtendedExpr = [&](const SCEV *Expr,
bool CreateSignExtend) -> const SCEV * {
assert(isLoopInvariant(Expr, L) && "Expr is expected to be invariant");
const SCEV *TruncatedExpr = getTruncateExpr(Expr, TruncTy);
return Rewrite;
}
-// FIXME: This utility is currently required because the Rewriter currently
-// does not rewrite this expression:
-// {0, +, (sext ix (trunc iy to ix) to iy)}
+// FIXME: This utility is currently required because the Rewriter currently
+// does not rewrite this expression:
+// {0, +, (sext ix (trunc iy to ix) to iy)}
// into {0, +, %step},
-// even when the following Equal predicate exists:
+// even when the following Equal predicate exists:
// "%step == (sext ix (trunc iy to ix) to iy)".
bool PredicatedScalarEvolution::areAddRecsEqualWithPreds(
const SCEVAddRecExpr *AR1, const SCEVAddRecExpr *AR2) const {
static Optional<ReductionData> getReductionData(Instruction *I) {
Value *L, *R;
if (m_BinOp(m_Value(L), m_Value(R)).match(I))
- return ReductionData(RK_Arithmetic, I->getOpcode(), L, R);
+ return ReductionData(RK_Arithmetic, I->getOpcode(), L, R);
if (auto *SI = dyn_cast<SelectInst>(I)) {
if (m_SMin(m_Value(L), m_Value(R)).match(SI) ||
m_SMax(m_Value(L), m_Value(R)).match(SI) ||
m_UnordFMin(m_Value(L), m_Value(R)).match(SI) ||
m_UnordFMax(m_Value(L), m_Value(R)).match(SI)) {
auto *CI = cast<CmpInst>(SI->getCondition());
- return ReductionData(RK_MinMax, CI->getOpcode(), L, R);
- }
+ return ReductionData(RK_MinMax, CI->getOpcode(), L, R);
+ }
if (m_UMin(m_Value(L), m_Value(R)).match(SI) ||
m_UMax(m_Value(L), m_Value(R)).match(SI)) {
auto *CI = cast<CmpInst>(SI->getCondition());
// We look for a sequence of shuffle,shuffle,add triples like the following
// that builds a pairwise reduction tree.
- //
+ //
// (X0, X1, X2, X3)
// (X0 + X1, X2 + X3, undef, undef)
// ((X0 + X1) + (X2 + X3), undef, undef, undef)
- //
+ //
// %rdx.shuf.0.0 = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 0, i32 2 , i32 undef, i32 undef>
// %rdx.shuf.0.1 = shufflevector <4 x float> %rdx, <4 x float> undef,
// We look for a sequence of shuffles and adds like the following matching one
// fadd, shuffle vector pair at a time.
- //
+ //
// %rdx.shuf = shufflevector <4 x float> %rdx, <4 x float> undef,
// <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
// %bin.rdx = fadd <4 x float> %rdx, %rdx.shuf
unsigned MaskStart = 1;
Instruction *RdxOp = RdxStart;
- SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
+ SmallVector<int, 32> ShuffleMask(NumVecElems, 0);
unsigned NumVecElemsRemain = NumVecElems;
while (NumVecElemsRemain - 1) {
// Check for the right reduction operation.
case Instruction::InsertElement: {
const InsertElementInst * IE = cast<InsertElementInst>(I);
ConstantInt *CI = dyn_cast<ConstantInt>(IE->getOperand(2));
- unsigned Idx = -1;
+ unsigned Idx = -1;
if (CI)
Idx = CI->getZExtValue();
return getVectorInstrCost(I->getOpcode(),
// TODO: Identify and add costs for insert/extract subvector, etc.
if (Shuffle->changesLength())
return -1;
-
+
if (Shuffle->isIdentity())
return 0;
#include <cassert>
#include <cstdint>
#include <iterator>
-#include <utility>
+#include <utility>
using namespace llvm;
using namespace llvm::PatternMatch;
// If either of the values is known to be non-negative, adding them can only
// overflow if the second is also non-negative, so we can assume that.
- // Two non-negative numbers will only overflow if there is a carry to the
+ // Two non-negative numbers will only overflow if there is a carry to the
// sign bit, so we can check if even when the values are as big as possible
// there is no overflow to the sign bit.
if (LHSKnown.isNonNegative() || RHSKnown.isNonNegative()) {
}
// If we reached here it means that we know nothing about the sign bits.
- // In this case we can't know if there will be an overflow, since by
+ // In this case we can't know if there will be an overflow, since by
// changing the sign bits any two values can be made to overflow.
return false;
}
// operands.
bool LHSOrRHSKnownNonNegative =
(LHSKnown.isNonNegative() || RHSKnown.isNonNegative());
- bool LHSOrRHSKnownNegative =
+ bool LHSOrRHSKnownNegative =
(LHSKnown.isNegative() || RHSKnown.isNegative());
if (LHSOrRHSKnownNonNegative || LHSOrRHSKnownNegative) {
KnownBits AddKnown = computeKnownBits(Add, DL, /*Depth=*/0, AC, CxtI, DT);
SPR = matchMinMaxOfMinMax(Pred, CmpLHS, CmpRHS, TrueVal, FalseVal, Depth);
if (SPR.Flavor != SelectPatternFlavor::SPF_UNKNOWN)
return SPR;
-
+
if (Pred != CmpInst::ICMP_SGT && Pred != CmpInst::ICMP_SLT)
return {SPF_UNKNOWN, SPNB_NA, false};
case FCmpInst::FCMP_OLE: return {SPF_FMINNUM, NaNBehavior, Ordered};
}
}
-
+
if (isKnownNegation(TrueVal, FalseVal)) {
// Sign-extending LHS does not change its sign, so TrueVal/FalseVal can
// match against either LHS or sext(LHS).
}
/// parseIndirectSymbol:
-/// ::= GlobalVar '=' OptionalLinkage OptionalPreemptionSpecifier
+/// ::= GlobalVar '=' OptionalLinkage OptionalPreemptionSpecifier
/// OptionalVisibility OptionalDLLStorageClass
/// OptionalThreadLocal OptionalUnnamedAddr
// 'alias|ifunc' IndirectSymbol
Lex.Lex();
return false;
}
-
+
template <>
bool LLParser::ParseMDField(LocTy Loc, StringRef Name,
DwarfAttEncodingField &Result) {
continue;
// The mapping from OriginalId to GUID may return a GUID
// that corresponds to a static variable. Filter it out here.
- // This can happen when
+ // This can happen when
// 1) There is a call to a library function which does not have
// a CallValidId;
// 2) There is a static variable with the OriginalGUID identical
MachineBasicBlock::iterator End,
unsigned InsertPosIndex,
DbgValueVector &DbgValues) = 0;
-
+
/// Update liveness information to account for the current
/// instruction, which will not be scheduled.
virtual void Observe(MachineInstr &MI, unsigned Count,
uint64_t OffsetInBits = 0;
unsigned DwarfVersion;
- /// Sometimes we need to add a DW_OP_bit_piece to describe a subregister.
+ /// Sometimes we need to add a DW_OP_bit_piece to describe a subregister.
unsigned SubRegisterSizeInBits = 0;
unsigned SubRegisterOffsetInBits = 0;
}
} else {
ScopeVars.Locals.push_back(Var);
- }
+ }
return true;
}
addString(MDie, dwarf::DW_AT_LLVM_include_path, M->getIncludePath());
if (!M->getISysRoot().empty())
addString(MDie, dwarf::DW_AT_LLVM_isysroot, M->getISysRoot());
-
+
return &MDie;
}
}
void DwarfTypeUnit::emitHeader(bool UseOffsets) {
- DwarfUnit::emitCommonHeader(UseOffsets,
+ DwarfUnit::emitCommonHeader(UseOffsets,
DD->useSplitDwarf() ? dwarf::DW_UT_split_type
: dwarf::DW_UT_type);
Asm->OutStreamer->AddComment("Type Signature");
/// Convert an atomic load of a non-integral type to an integer load of the
/// equivalent bitwidth. See the function comment on
-/// convertAtomicStoreToIntegerType for background.
+/// convertAtomicStoreToIntegerType for background.
LoadInst *AtomicExpand::convertAtomicLoadToIntegerType(LoadInst *LI) {
auto *M = LI->getModule();
Type *NewTy = getCorrespondingIntegerType(LI->getType(),
M->getDataLayout());
IRBuilder<> Builder(LI);
-
+
Value *Addr = LI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
Value *NewAddr = Builder.CreateBitCast(Addr, PT);
-
+
auto *NewLI = Builder.CreateLoad(NewAddr);
NewLI->setAlignment(LI->getAlignment());
NewLI->setVolatile(LI->isVolatile());
Type *NewTy = getCorrespondingIntegerType(SI->getValueOperand()->getType(),
M->getDataLayout());
Value *NewVal = Builder.CreateBitCast(SI->getValueOperand(), NewTy);
-
+
Value *Addr = SI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
/// the equivalent bitwidth. We used to not support pointer cmpxchg in the
/// IR. As a migration step, we convert back to what use to be the standard
/// way to represent a pointer cmpxchg so that we can update backends one by
-/// one.
+/// one.
AtomicCmpXchgInst *AtomicExpand::convertCmpXchgToIntegerType(AtomicCmpXchgInst *CI) {
auto *M = CI->getModule();
Type *NewTy = getCorrespondingIntegerType(CI->getCompareOperand()->getType(),
M->getDataLayout());
IRBuilder<> Builder(CI);
-
+
Value *Addr = CI->getPointerOperand();
Type *PT = PointerType::get(NewTy,
Addr->getType()->getPointerAddressSpace());
Value *NewCmp = Builder.CreatePtrToInt(CI->getCompareOperand(), NewTy);
Value *NewNewVal = Builder.CreatePtrToInt(CI->getNewValOperand(), NewTy);
-
-
+
+
auto *NewCI = Builder.CreateAtomicCmpXchg(NewAddr, NewCmp, NewNewVal,
CI->getSuccessOrdering(),
CI->getFailureOrdering(),
//===----------------------------------------------------------------------===//
//
// This file contains the boilerplate required to define our various built in
-// gc lowering strategies.
+// gc lowering strategies.
//
//===----------------------------------------------------------------------===//
// Kill instructions can define registers but are really nops, and there
// might be a real definition earlier that needs to be paired with uses
// dominated by this kill.
-
+
// FIXME: It may be possible to remove the isKill() restriction once PR18663
// has been properly fixed. There can be value in processing kills as seen
// in the AggressiveAntiDepBreaker class.
auto NMI = GCStrategyMap.find(Name);
if (NMI != GCStrategyMap.end())
return NMI->getValue();
-
+
for (auto& Entry : GCRegistry::entries()) {
if (Name == Entry.getName()) {
std::unique_ptr<GCStrategy> S = Entry.instantiate();
}
if (GCRegistry::begin() == GCRegistry::end()) {
- // In normal operation, the registry should not be empty. There should
+ // In normal operation, the registry should not be empty. There should
// be the builtin GCs if nothing else. The most likely scenario here is
- // that we got here without running the initializers used by the Registry
+ // that we got here without running the initializers used by the Registry
// itself and it's registration mechanism.
- const std::string error = ("unsupported GC: " + Name).str() +
+ const std::string error = ("unsupported GC: " + Name).str() +
" (did you remember to link and initialize the CodeGen library?)";
report_fatal_error(error);
} else
// - it makes linker optimizations less useful (order files, LOHs, ...)
// - it forces usage of indexed addressing (which isn't necessarily "free")
// - it can increase register pressure when the uses are disparate enough.
-//
+//
// We use heuristics to discover the best global grouping we can (cf cl::opts).
//
// ===---------------------------------------------------------------------===//
case Intrinsic::memcpy:
M.getOrInsertFunction("memcpy",
Type::getInt8PtrTy(Context),
- Type::getInt8PtrTy(Context),
- Type::getInt8PtrTy(Context),
+ Type::getInt8PtrTy(Context),
+ Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context));
break;
case Intrinsic::memmove:
M.getOrInsertFunction("memmove",
Type::getInt8PtrTy(Context),
- Type::getInt8PtrTy(Context),
- Type::getInt8PtrTy(Context),
+ Type::getInt8PtrTy(Context),
+ Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context));
break;
case Intrinsic::memset:
M.getOrInsertFunction("memset",
Type::getInt8PtrTy(Context),
- Type::getInt8PtrTy(Context),
- Type::getInt32Ty(M.getContext()),
+ Type::getInt8PtrTy(Context),
+ Type::getInt32Ty(M.getContext()),
DL.getIntPtrType(Context));
break;
case Intrinsic::sqrt:
"bswap.5");
Value* Tmp4 = Builder.CreateLShr(V, ConstantInt::get(V->getType(), 8),
"bswap.4");
- Value* Tmp3 = Builder.CreateLShr(V,
+ Value* Tmp3 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 24),
"bswap.3");
- Value* Tmp2 = Builder.CreateLShr(V,
+ Value* Tmp2 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 40),
"bswap.2");
- Value* Tmp1 = Builder.CreateLShr(V,
+ Value* Tmp1 = Builder.CreateLShr(V,
ConstantInt::get(V->getType(), 56),
"bswap.1");
Tmp7 = Builder.CreateAnd(Tmp7,
for (unsigned n = 0; n < WordSize; ++n) {
Value *PartValue = V;
- for (unsigned i = 1, ct = 0; i < (BitSize>64 ? 64 : BitSize);
+ for (unsigned i = 1, ct = 0; i < (BitSize>64 ? 64 : BitSize);
i <<= 1, ++ct) {
Value *MaskCst = ConstantInt::get(V->getType(), MaskValues[ct]);
Value *LHS = Builder.CreateAnd(PartValue, MaskCst, "cppop.and1");
case Intrinsic::siglongjmp: {
// Insert the call to abort
- ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
+ ReplaceCallWith("abort", CI, CS.arg_end(), CS.arg_end(),
Type::getVoidTy(Context));
break;
}
case Intrinsic::bswap:
CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getArgOperand(0), CI));
break;
-
+
case Intrinsic::ctlz:
CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getArgOperand(0), CI));
break;
CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
break;
}
-
+
case Intrinsic::get_dynamic_area_offset:
errs() << "WARNING: this target does not support the custom llvm.get."
"dynamic.area.offset. It is being lowered to a constant 0\n";
case Intrinsic::assume:
case Intrinsic::var_annotation:
break; // Strip out these intrinsics
-
+
case Intrinsic::memcpy: {
Type *IntPtr = DL.getIntPtrType(Context);
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
/// address the spill location in a target independent way.
int LiveDebugValues::extractSpillBaseRegAndOffset(const MachineInstr &MI,
unsigned &Reg) {
- assert(MI.hasOneMemOperand() &&
+ assert(MI.hasOneMemOperand() &&
"Spill instruction does not have exactly one memory operand?");
auto MMOI = MI.memoperands_begin();
const PseudoSourceValue *PVal = (*MMOI)->getPseudoValue();
int FI;
const MachineMemOperand *MMO;
- // TODO: Handle multiple stores folded into one.
+ // TODO: Handle multiple stores folded into one.
if (!MI.hasOneMemOperand())
return false;
MMI.deleteMachineFunctionFor(F);
return true;
}
-
+
StringRef getPassName() const override {
return "Free MachineFunction";
- }
+ }
};
} // end anonymous namespace
// AA (where each "A" is an instruction).
//
// We might have some portion of the module that looks like this:
- // AAAAAA (6 A's)
+ // AAAAAA (6 A's)
//
// In this case, there are 5 different copies of "AA" in this range, but
// at most 3 can be outlined. If only outlining 3 of these is going to
assert(FromReg != ToReg && "Cannot replace a reg with itself");
const TargetRegisterInfo *TRI = getTargetRegisterInfo();
-
+
// TODO: This could be more efficient by bulk changing the operands.
for (reg_iterator I = reg_begin(FromReg), E = reg_end(); I != E; ) {
MachineOperand &O = *I;
private:
MachineInstr *PHI;
unsigned idx;
-
+
public:
explicit PHI_iterator(MachineInstr *P) // begin iterator
: PHI(P), idx(1) {}
PHI_iterator(MachineInstr *P, bool) // end iterator
: PHI(P), idx(PHI->getNumOperands()) {}
- PHI_iterator &operator++() { idx += 2; return *this; }
+ PHI_iterator &operator++() { idx += 2; return *this; }
bool operator==(const PHI_iterator& x) const { return idx == x.idx; }
bool operator!=(const PHI_iterator& x) const { return !operator==(x); }
}
ToSplit.insert(std::make_pair(FromBB, ToBB));
-
+
return true;
}
// Debug values should not be included in any calculations.
if (UseMI.isDebugInstr())
return false;
-
+
bool HasPhysRegs = false;
for (MachineInstr::const_mop_iterator I = UseMI.operands_begin(),
E = UseMI.operands_end(); I != E; ++I) {
computeInstrDepths(MBB);
if (!TBI.HasValidInstrHeights)
computeInstrHeights(MBB);
-
+
return Trace(*this, TBI);
}
auto VerifyStackMapConstant = [&](unsigned Offset) {
if (!MI->getOperand(Offset).isImm() ||
- MI->getOperand(Offset).getImm() != StackMaps::ConstantOp ||
- !MI->getOperand(Offset + 1).isImm())
+ MI->getOperand(Offset).getImm() != StackMaps::ConstantOp ||
+ !MI->getOperand(Offset + 1).isImm())
report("stack map constant to STATEPOINT not well formed!", MI);
};
const unsigned VarStart = StatepointOpers(MI).getVarIdx();
// Allow one node which will masked along with any loads found.
if (NodeToMask)
return false;
-
- // Also ensure that the node to be masked only produces one data result.
+
+ // Also ensure that the node to be masked only produces one data result.
NodeToMask = Op.getNode();
if (NodeToMask->getNumValues() > 1) {
bool HasValue = false;
return nullptr;
// At this point we've matched or extracted a shift op on each side.
-
+
if (LHSShift.getOperand(0) != RHSShift.getOperand(0))
return nullptr; // Not shifting the same value.
N10.getOperand(0))),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N10.getOperand(1)),
- N0, Flags);
+ N0, Flags);
}
}
N0.getOperand(2).getOperand(0),
N0.getOperand(2).getOperand(1),
DAG.getNode(ISD::FNEG, SL, VT,
- N1), Flags), Flags);
+ N1), Flags), Flags);
}
// fold (fsub x, (fma y, z, (fmul u, v)))
N1.getOperand(1),
DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, N20),
- N21, N0, Flags), Flags);
+ N21, N0, Flags), Flags);
}
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N020.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
- N1), Flags), Flags);
+ N1), Flags), Flags);
}
}
}
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N002.getOperand(1)),
DAG.getNode(ISD::FNEG, SL, VT,
- N1), Flags), Flags);
+ N1), Flags), Flags);
}
}
}
VT, N1200)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1201),
- N0, Flags), Flags);
+ N0, Flags), Flags);
}
}
VT, N1020)),
DAG.getNode(ISD::FP_EXTEND, SL, VT,
N1021),
- N0, Flags), Flags);
+ N0, Flags), Flags);
}
}
}
Y, Flags);
if (XC1 && XC1->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT, X.getOperand(0), Y,
- DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
+ DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
}
return SDValue();
};
if (XC0 && XC0->isExactlyValue(-1.0))
return DAG.getNode(PreferredFusedOpcode, SL, VT,
DAG.getNode(ISD::FNEG, SL, VT, X.getOperand(1)), Y,
- DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
+ DAG.getNode(ISD::FNEG, SL, VT, Y), Flags);
auto XC1 = isConstOrConstSplatFP(X.getOperand(1));
if (XC1 && XC1->isExactlyValue(+1.0))
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
- if (Options.UnsafeFPMath ||
+ if (Options.UnsafeFPMath ||
(Flags.hasNoNaNs() && Flags.hasNoSignedZeros())) {
// fold (fmul A, 0) -> 0
if (N1CFP && N1CFP->isZero())
return N1;
- }
+ }
if (Options.UnsafeFPMath || Flags.hasAllowReassociation()) {
// fmul (fmul X, C1), C2 -> fmul X, C1 * C2
SDValue DAGCombiner::visitFSQRT(SDNode *N) {
SDNodeFlags Flags = N->getFlags();
- if (!DAG.getTarget().Options.UnsafeFPMath &&
+ if (!DAG.getTarget().Options.UnsafeFPMath &&
!Flags.hasApproximateFuncs())
return SDValue();
{
MachineInstr *CurLastLocalValue = getLastLocalValue();
if (CurLastLocalValue != SavedLastLocalValue) {
- // Find the first local value instruction to be deleted.
+ // Find the first local value instruction to be deleted.
// This is the instruction after SavedLastLocalValue if it is non-NULL.
// Otherwise it's the first instruction in the block.
MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
// PHI node handling may have generated local value instructions,
// even though it failed to handle all PHI nodes.
- // We remove these instructions because SelectionDAGISel will generate
+ // We remove these instructions because SelectionDAGISel will generate
// them again.
removeDeadLocalValueCode(SavedLastLocalValue);
return false;
DbgLoc = DebugLoc();
// Undo phi node updates, because they will be added again by SelectionDAG.
if (isa<TerminatorInst>(I)) {
- // PHI node handling may have generated local value instructions.
+ // PHI node handling may have generated local value instructions.
// We remove them because SelectionDAGISel will generate them again.
removeDeadLocalValueCode(SavedLastLocalValue);
FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
// of Endianness. LLVM's APFloat representation is not Endian sensitive,
// and so always converts into a 128-bit APInt in a non-Endian-sensitive
// way. However, APInt's are serialized in an Endian-sensitive fashion,
- // so on big-Endian targets, the two doubles are output in the wrong
+ // so on big-Endian targets, the two doubles are output in the wrong
// order. Fix this by manually flipping the order of the high 64 bits
// and the low 64 bits here.
if (DAG.getDataLayout().isBigEndian() &&
switch (N->getOpcode()) {
case ISD::ConstantFP: // Leaf node.
- case ISD::CopyFromReg: // Operand is a register that we know to be left
+ case ISD::CopyFromReg: // Operand is a register that we know to be left
// unchanged by SoftenFloatResult().
case ISD::Register: // Leaf node.
return true;
if (N->getNumOperands() == 3)
return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2), 0);
- return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2,
+ return SDValue(DAG.UpdateNodeOperands(N, N->getOperand(0), Op1, Op2,
N->getOperand(3)),
0);
}
SDValue SoftenFloatRes_XINT_TO_FP(SDNode *N);
// Return true if we can skip softening the given operand or SDNode because
- // either it was soften before by SoftenFloatResult and references to the
+ // either it was soften before by SoftenFloatResult and references to the
// operand were replaced by ReplaceValueWith or it's value type is legal in HW
// registers and the operand can be left unchanged.
bool CanSkipSoftenFloatOperand(SDNode *N, unsigned OpNo);
SDValue ExpandCTLZ(SDValue Op);
SDValue ExpandCTTZ_ZERO_UNDEF(SDValue Op);
SDValue ExpandStrictFPOp(SDValue Op);
-
+
/// Implements vector promotion.
///
/// This is essentially just bitcasting the operands to a different type and
// equivalent. For instance, if ISD::FSQRT is legal then ISD::STRICT_FSQRT
// is also legal, but if ISD::FSQRT requires expansion then so does
// ISD::STRICT_FSQRT.
- Action = TLI.getStrictFPOperationAction(Node->getOpcode(),
+ Action = TLI.getStrictFPOperationAction(Node->getOpcode(),
Node->getValueType(0));
break;
case ISD::ADD:
Action = TLI.getOperationAction(Node->getOpcode(), Node->getValueType(0));
break;
case ISD::FP_ROUND_INREG:
- Action = TLI.getOperationAction(Node->getOpcode(),
+ Action = TLI.getOperationAction(Node->getOpcode(),
cast<VTSDNode>(Node->getOperand(1))->getVT());
break;
case ISD::SINT_TO_FP:
case ISD::UINT_TO_FP:
- Action = TLI.getOperationAction(Node->getOpcode(),
+ Action = TLI.getOperationAction(Node->getOpcode(),
Node->getOperand(0).getValueType());
break;
case ISD::MSCATTER:
case ISD::CTTZ_ZERO_UNDEF:
return ExpandCTTZ_ZERO_UNDEF(Op);
case ISD::STRICT_FADD:
- case ISD::STRICT_FSUB:
+ case ISD::STRICT_FSUB:
case ISD::STRICT_FMUL:
case ISD::STRICT_FDIV:
case ISD::STRICT_FSQRT:
SmallVector<SDValue, 32> OpChains;
for (unsigned i = 0; i < NumElems; ++i) {
SmallVector<SDValue, 4> Opers;
- SDValue Idx = DAG.getConstant(i, dl,
+ SDValue Idx = DAG.getConstant(i, dl,
TLI.getVectorIdxTy(DAG.getDataLayout()));
// The Chain is the first operand.
Opers.push_back(Chain);
- // Now process the remaining operands.
+ // Now process the remaining operands.
for (unsigned j = 1; j < NumOpers; ++j) {
SDValue Oper = Op.getOperand(j);
EVT OperVT = Oper.getValueType();
if (OperVT.isVector())
- Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
+ Oper = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, dl,
EltVT, Oper, Idx);
Opers.push_back(Oper);
}
-
+
SDValue ScalarOp = DAG.getNode(Op->getOpcode(), dl, ValueVTs, Opers);
OpValues.push_back(ScalarOp.getValue(0));
OpsLo.push_back(Chain);
OpsHi.push_back(Chain);
- // Now process the remaining operands.
+ // Now process the remaining operands.
for (unsigned i = 1; i < NumOps; ++i) {
- SDValue Op = N->getOperand(i);
- SDValue OpLo = Op;
- SDValue OpHi = Op;
+ SDValue Op = N->getOperand(i);
+ SDValue OpLo = Op;
+ SDValue OpHi = Op;
EVT InVT = Op.getValueType();
- if (InVT.isVector()) {
+ if (InVT.isVector()) {
// If the input also splits, handle it directly for a
// compile time speedup. Otherwise split it by hand.
if (getTypeAction(InVT) == TargetLowering::TypeSplitVector)
EVT HiValueVTs[] = {HiVT, MVT::Other};
Lo = DAG.getNode(N->getOpcode(), dl, LoValueVTs, OpsLo);
Hi = DAG.getNode(N->getOpcode(), dl, HiValueVTs, OpsHi);
-
+
// Build a factor node to remember that this Op is independent of the
// other one.
- Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
+ Chain = DAG.getNode(ISD::TokenFactor, dl, MVT::Other,
Lo.getValue(1), Hi.getValue(1));
// Legalize the chain result - switch anything that used the old chain to
unsigned DbgSDNodeOrder) {
if (auto *FISDN = dyn_cast<FrameIndexSDNode>(N.getNode())) {
// Construct a FrameIndexDbgValue for FrameIndexSDNodes so we can describe
- // stack slot locations.
+ // stack slot locations.
//
// Consider "int x = 0; int *px = &x;". There are two kinds of interesting
// debug values here after optimization:
Builder.getFrameIndexTy()));
} else if (LiveInOnly) {
// If this value is live in (not live-on-return, or live-through), we can
- // treat it the same way patchpoint treats it's "live in" values. We'll
- // end up folding some of these into stack references, but they'll be
+ // treat it the same way patchpoint treats it's "live in" values. We'll
+ // end up folding some of these into stack references, but they'll be
// handled by the register allocator. Note that we do not have the notion
- // of a late use so these values might be placed in registers which are
+ // of a late use so these values might be placed in registers which are
// clobbered by the call. This is fine for live-in.
Ops.push_back(Incoming);
} else {
auto isGCValue =[&](const Value *V) {
return is_contained(SI.Ptrs, V) || is_contained(SI.Bases, V);
};
-
+
// Before we actually start lowering (and allocating spill slots for values),
// reserve any stack slots which we judge to be profitable to reuse for a
// particular value. This is purely an optimization over the code below and
}
if (!Active)
return false;
-
+
// struct FrameMap {
// int32_t NumRoots; // Number of roots in stack frame.
// int32_t NumMeta; // Number of metadata descriptors. May be < NumRoots.
if (!F.hasGC() ||
F.getGC() != std::string("shadow-stack"))
return false;
-
+
LLVMContext &Context = F.getContext();
// Find calls to llvm.gcroot.
/// - Create a SplitEditor from a SplitAnalysis.
/// - Start a new live interval with openIntv.
/// - Mark the places where the new interval is entered using enterIntv*
-/// - Mark the ranges where the new interval is used with useIntv*
+/// - Mark the ranges where the new interval is used with useIntv*
/// - Mark the places where the interval is exited with exitIntv*.
/// - Finish the current interval with closeIntv and repeat from 2.
/// - Rewrite instructions with finish().
setOperationAction(ISD::CTTZ_ZERO_UNDEF, VT, Expand);
setOperationAction(ISD::BITREVERSE, VT, Expand);
-
+
// These library functions default to expand.
setOperationAction(ISD::FROUND, VT, Expand);
setOperationAction(ISD::FPOWI, VT, Expand);
// STATEPOINT Deopt Spill - live-through, read only, indirect
// STATEPOINT Deopt Alloca - live-through, read only, direct
// (We're currently conservative and mark the deopt slots read/write in
- // practice.)
+ // practice.)
// STATEPOINT GC Spill - live-through, read/write, indirect
// STATEPOINT GC Alloca - live-through, read/write, direct
// The live-in vs live-through is handled already (the live through ones are
*Fast = true;
return true;
}
-
+
// This is a misaligned access.
return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Fast);
}
"Enable unification-based CFL-AA"),
clEnumValN(CFLAAType::Andersen, "anders",
"Enable inclusion-based CFL-AA"),
- clEnumValN(CFLAAType::Both, "both",
+ clEnumValN(CFLAAType::Both, "both",
"Enable both variants of CFL-AA")));
/// Option names for limiting the codegen pipeline.
cl::desc("Demote catchswitch BBs only (for wasm EH)"), cl::init(false));
namespace {
-
+
class WinEHPrepare : public FunctionPass {
public:
static char ID; // Pass identification, replacement for typeid.
}
bool
-DWARFAbbreviationDeclaration::extract(DataExtractor Data,
+DWARFAbbreviationDeclaration::extract(DataExtractor Data,
uint32_t* OffsetPtr) {
clear();
const uint32_t Offset = *OffsetPtr;
while (rangesData.isValidOffset(offset)) {
if (Error E = rangeList.extract(rangesData, &offset)) {
WithColor::error() << toString(std::move(E)) << '\n';
- break;
+ break;
}
rangeList.dump(OS);
}
// of the function's code, not the descriptor.
uint64_t OpdOffset = SymbolAddress - OpdAddress;
uint32_t OpdOffset32 = OpdOffset;
- if (OpdOffset == OpdOffset32 &&
+ if (OpdOffset == OpdOffset32 &&
OpdExtractor->isValidOffsetForAddress(OpdOffset32))
SymbolAddress = OpdExtractor->getAddress(&OpdOffset32);
}
LLVMMCJITCompilerOptions options;
memset(&options, 0, sizeof(options)); // Most fields are zero by default.
options.CodeModel = LLVMCodeModelJITDefault;
-
+
memcpy(PassedOptions, &options,
std::min(sizeof(options), SizeOfPassedOptions));
}
"LLVM library mismatch.");
return 1;
}
-
+
// Defend against the user having an old version of the API by ensuring that
// any fields they didn't see are cleared. We must defend against fields being
// set to the bitwise equivalent of zero, and assume that this means "do the
// default" as if that option hadn't been available.
LLVMInitializeMCJITCompilerOptions(&options, sizeof(options));
memcpy(&options, PassedOptions, SizeOfPassedOptions);
-
+
TargetOptions targetOptions;
targetOptions.EnableFastISel = options.EnableFastISel;
std::unique_ptr<Module> Mod(unwrap(M));
unsigned NumArgs,
LLVMGenericValueRef *Args) {
unwrap(EE)->finalizeObject();
-
+
std::vector<GenericValue> ArgVec;
ArgVec.reserve(NumArgs);
for (unsigned I = 0; I != NumArgs; ++I)
ArgVec.push_back(*unwrap(Args[I]));
-
+
GenericValue *Result = new GenericValue();
*Result = unwrap(EE)->runFunction(unwrap<Function>(F), ArgVec);
return wrap(Result);
void *LLVMGetPointerToGlobal(LLVMExecutionEngineRef EE, LLVMValueRef Global) {
unwrap(EE)->finalizeObject();
-
+
return unwrap(EE)->getPointerToGlobal(unwrap<GlobalValue>(Global));
}
LLVMMemoryManagerAllocateDataSectionCallback AllocateDataSection,
LLVMMemoryManagerFinalizeMemoryCallback FinalizeMemory,
LLVMMemoryManagerDestroyCallback Destroy) {
-
+
if (!AllocateCodeSection || !AllocateDataSection || !FinalizeMemory ||
!Destroy)
return nullptr;
-
+
SimpleBindingMMFunctions functions;
functions.AllocateCodeSection = AllocateCodeSection;
functions.AllocateDataSection = AllocateDataSection;
*
*===----------------------------------------------------------------------===*
*
- * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API internal config.
*
* NOTE: This file comes in a style different from the rest of LLVM
#define __itt_thread_id() GetCurrentThreadId()
#define __itt_thread_yield() SwitchToThread()
#ifndef ITT_SIMPLE_INIT
-ITT_INLINE long
+ITT_INLINE long
__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
{
}
#endif /* ITT_ARCH==ITT_ARCH_IA64 */
#ifndef ITT_SIMPLE_INIT
-ITT_INLINE long
+ITT_INLINE long
__itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
ITT_INLINE long __itt_interlocked_increment(volatile long* ptr)
{
*
*===----------------------------------------------------------------------===*
*
- * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
+ * This file provides Intel(R) Performance Analyzer JIT (Just-In-Time)
* Profiling API declaration.
*
* NOTE: This file comes in a style different from the rest of LLVM
{
/* shutdown */
-
- /*
+
+ /*
* Program exiting EventSpecificData NA
*/
- iJVM_EVENT_TYPE_SHUTDOWN = 2,
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
/* JIT profiling */
-
- /*
+
+ /*
* issued after method code jitted into memory but before code is executed
* EventSpecificData is an iJIT_Method_Load
*/
- iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
- /* issued before unload. Method code will no longer be executed, but code
- * and info are still in memory. The VTune profiler may capture method
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
* code only at this point EventSpecificData is iJIT_Method_Id
*/
- iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
/* Method Profiling */
- /* method name, Id and stack is supplied
- * issued when a method is about to be entered EventSpecificData is
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
* iJIT_Method_NIDS
*/
- iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
- /* method name, Id and stack is supplied
- * issued when a method is about to be left EventSpecificData is
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
* iJIT_Method_NIDS
*/
- iJVM_EVENT_TYPE_LEAVE_NIDS
+ iJVM_EVENT_TYPE_LEAVE_NIDS
} iJIT_JVM_EVENT;
typedef enum _iJIT_ModeFlags
{
/* No need to Notify VTune, since VTune is not running */
- iJIT_NO_NOTIFICATIONS = 0x0000,
+ iJIT_NO_NOTIFICATIONS = 0x0000,
- /* when turned on the jit must call
+ /* when turned on the jit must call
* iJIT_NotifyEvent
* (
* iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
* )
* for all the method already jitted
*/
- iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
/* when turned on the jit must call
* iJIT_NotifyEvent
* iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
* ) for all the method that are unloaded
*/
- iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
/* when turned on the jit must instrument all
* the currently jited code with calls on
* method entries
*/
- iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
/* when turned on the jit must instrument all
* the currently jited code with calls
* on method exit
*/
- iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
} iJIT_ModeFlags;
typedef enum _iJIT_IsProfilingActiveFlags
{
/* No profiler is running. Currently not used */
- iJIT_NOTHING_RUNNING = 0x0000,
+ iJIT_NOTHING_RUNNING = 0x0000,
/* Sampling is running. This is the default value
* returned by iJIT_IsProfilingActive()
*/
- iJIT_SAMPLING_ON = 0x0001,
-
+ iJIT_SAMPLING_ON = 0x0001,
+
/* Call Graph is running */
iJIT_CALLGRAPH_ON = 0x0002
/* Id of the method (same as the one passed in
* the iJIT_Method_Load struct
*/
- unsigned int method_id;
+ unsigned int method_id;
} *piJIT_Method_Id, iJIT_Method_Id;
typedef struct _iJIT_Method_NIDS
{
/* unique method ID */
- unsigned int method_id;
+ unsigned int method_id;
/* NOTE: no need to fill this field, it's filled by VTune */
- unsigned int stack_id;
+ unsigned int stack_id;
/* method name (just the method, without the class) */
- char* method_name;
+ char* method_name;
} *piJIT_Method_NIDS, iJIT_Method_NIDS;
/* structures for the events:
unsigned int Offset;
/* source line number from the beginning of the source file */
- unsigned int LineNumber;
+ unsigned int LineNumber;
} *pLineNumberInfo, LineNumberInfo;
typedef struct _iJIT_Method_Load
{
/* unique method ID - can be any unique value, (except 0 - 999) */
- unsigned int method_id;
+ unsigned int method_id;
/* method name (can be with or without the class and signature, in any case
* the class name will be added to it)
*/
- char* method_name;
+ char* method_name;
/* virtual address of that method - This determines the method range for the
* iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
*/
- void* method_load_address;
+ void* method_load_address;
/* Size in memory - Must be exact */
- unsigned int method_size;
+ unsigned int method_size;
/* Line Table size in number of entries - Zero if none */
unsigned int line_number_size;
/* Pointer to the beginning of the line numbers info array */
- pLineNumberInfo line_number_table;
+ pLineNumberInfo line_number_table;
/* unique class ID */
- unsigned int class_id;
-
+ unsigned int class_id;
+
/* class file name */
- char* class_file_name;
+ char* class_file_name;
/* source file name */
- char* source_file_name;
+ char* source_file_name;
/* bits supplied by the user for saving in the JIT file */
- void* user_data;
+ void* user_data;
/* the size of the user data buffer */
- unsigned int user_data_size;
+ unsigned int user_data_size;
/* NOTE: no need to fill this field, it's filled by VTune */
- iJDEnvironmentType env;
+ iJDEnvironmentType env;
} *piJIT_Method_Load, iJIT_Method_Load;
int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
/* The new mode call back routine */
-void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
iJIT_ModeChangedEx NewModeCallBackFuncEx);
iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
}
}
-static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
+static void executeFDivInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
IMPLEMENT_BINARY_OPERATOR(/, Float);
}
}
-static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
+static void executeFRemInst(GenericValue &Dest, GenericValue Src1,
GenericValue Src2, Type *Ty) {
switch (Ty->getTypeID()) {
case Type::FloatTyID:
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
-
+
switch (I.getPredicate()) {
case ICmpInst::ICMP_EQ: R = executeICMP_EQ(Src1, Src2, Ty); break;
case ICmpInst::ICMP_NE: R = executeICMP_NE(Src1, Src2, Ty); break;
dbgs() << "Don't know how to handle this ICmp predicate!\n-->" << I;
llvm_unreachable(nullptr);
}
-
+
SetValue(&I, R, SF);
}
Src2.AggregateVal[_i].DoubleVal)));
}
} else if (Ty->isFloatTy())
- Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
+ Dest.IntVal = APInt(1,(Src1.FloatVal == Src1.FloatVal &&
Src2.FloatVal == Src2.FloatVal));
else {
- Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
+ Dest.IntVal = APInt(1,(Src1.DoubleVal == Src1.DoubleVal &&
Src2.DoubleVal == Src2.DoubleVal));
}
return Dest;
Src2.AggregateVal[_i].DoubleVal)));
}
} else if (Ty->isFloatTy())
- Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
+ Dest.IntVal = APInt(1,(Src1.FloatVal != Src1.FloatVal ||
Src2.FloatVal != Src2.FloatVal));
else {
- Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
+ Dest.IntVal = APInt(1,(Src1.DoubleVal != Src1.DoubleVal ||
Src2.DoubleVal != Src2.DoubleVal));
}
return Dest;
GenericValue Src1 = getOperandValue(I.getOperand(0), SF);
GenericValue Src2 = getOperandValue(I.getOperand(1), SF);
GenericValue R; // Result
-
+
switch (I.getPredicate()) {
default:
dbgs() << "Don't know how to handle this FCmp predicate!\n-->" << I;
llvm_unreachable(nullptr);
break;
- case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
+ case FCmpInst::FCMP_FALSE: R = executeFCMP_BOOL(Src1, Src2, Ty, false);
break;
- case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
+ case FCmpInst::FCMP_TRUE: R = executeFCMP_BOOL(Src1, Src2, Ty, true);
break;
case FCmpInst::FCMP_ORD: R = executeFCMP_ORD(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UNO: R = executeFCMP_UNO(Src1, Src2, Ty); break;
case FCmpInst::FCMP_UGE: R = executeFCMP_UGE(Src1, Src2, Ty); break;
case FCmpInst::FCMP_OGE: R = executeFCMP_OGE(Src1, Src2, Ty); break;
}
-
+
SetValue(&I, R, SF);
}
-static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
+static GenericValue executeCmpInst(unsigned predicate, GenericValue Src1,
GenericValue Src2, Type *Ty) {
GenericValue Result;
switch (predicate) {
case Instruction::FRem:
if (cast<VectorType>(Ty)->getElementType()->isFloatTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
- R.AggregateVal[i].FloatVal =
+ R.AggregateVal[i].FloatVal =
fmod(Src1.AggregateVal[i].FloatVal, Src2.AggregateVal[i].FloatVal);
else {
if (cast<VectorType>(Ty)->getElementType()->isDoubleTy())
for (unsigned i = 0; i < R.AggregateVal.size(); ++i)
- R.AggregateVal[i].DoubleVal =
+ R.AggregateVal[i].DoubleVal =
fmod(Src1.AggregateVal[i].DoubleVal, Src2.AggregateVal[i].DoubleVal);
else {
dbgs() << "Unhandled type for Rem instruction: " << *Ty << "\n";
Type *Ty = I.getType()->getElementType(); // Type to be allocated
// Get the number of elements being allocated by the array...
- unsigned NumElements =
+ unsigned NumElements =
getOperandValue(I.getOperand(0), SF).IntVal.getZExtValue();
unsigned TypeSize = (size_t)getDataLayout().getTypeAllocSize(Ty);
GenericValue IdxGV = getOperandValue(I.getOperand(), SF);
int64_t Idx;
- unsigned BitWidth =
+ unsigned BitWidth =
cast<IntegerType>(I.getOperand()->getType())->getBitWidth();
if (BitWidth == 32)
Idx = (int64_t)(int32_t)IdxGV.IntVal.getZExtValue();
case Instruction::And: Dest.IntVal = Op0.IntVal & Op1.IntVal; break;
case Instruction::Or: Dest.IntVal = Op0.IntVal | Op1.IntVal; break;
case Instruction::Xor: Dest.IntVal = Op0.IntVal ^ Op1.IntVal; break;
- case Instruction::Shl:
+ case Instruction::Shl:
Dest.IntVal = Op0.IntVal.shl(Op1.IntVal.getZExtValue());
break;
- case Instruction::LShr:
+ case Instruction::LShr:
Dest.IntVal = Op0.IntVal.lshr(Op1.IntVal.getZExtValue());
break;
- case Instruction::AShr:
+ case Instruction::AShr:
Dest.IntVal = Op0.IntVal.ashr(Op1.IntVal.getZExtValue());
break;
default:
// Handle non-varargs arguments...
unsigned i = 0;
- for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
+ for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end();
AI != E; ++AI, ++i)
SetValue(&*AI, ArgVals[i], StackFrame);
void visitLoadInst(LoadInst &I);
void visitStoreInst(StoreInst &I);
void visitGetElementPtrInst(GetElementPtrInst &I);
- void visitPHINode(PHINode &PN) {
- llvm_unreachable("PHI nodes already handled!");
+ void visitPHINode(PHINode &PN) {
+ llvm_unreachable("PHI nodes already handled!");
}
void visitTruncInst(TruncInst &I);
void visitZExtInst(ZExtInst &I);
ExecutionContext &SF);
GenericValue executeBitCastInst(Value *SrcVal, Type *DstTy,
ExecutionContext &SF);
- GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
+ GenericValue executeCastOperation(Instruction::CastOps opcode, Value *SrcVal,
Type *Ty, ExecutionContext &SF);
void popStackAndReturnValueToCaller(Type *RetTy, GenericValue Result);
void RTDyldMemoryManager::registerEHFramesInProcess(uint8_t *Addr,
size_t Size) {
- // On Linux __register_frame takes a single argument:
+ // On Linux __register_frame takes a single argument:
// a pointer to the start of the .eh_frame section.
- // How can it find the end? Because crtendS.o is linked
+ // How can it find the end? Because crtendS.o is linked
// in and it has an .eh_frame section with four zero chars.
__register_frame(Addr);
}
return (uint64_t)&__morestack;
#endif
#endif // __linux__ && __GLIBC__
-
+
// See ARM_MATH_IMPORTS definition for explanation
#if defined(__BIONIC__) && defined(__arm__)
if (Name.compare(0, 8, "__aeabi_") == 0) {
} else {
processSimpleRelocation(SectionID, Offset, RelType, Value);
}
-
+
} else if (Arch == Triple::ppc64 || Arch == Triple::ppc64le) {
if (RelType == ELF::R_PPC64_REL24) {
// Determine ABI variant in use for this object.
Args.push_back("-passes=gvn");
} else if (Opt == "sccp") {
Args.push_back("-passes=sccp");
-
+
} else if (Opt == "loop_predication") {
Args.push_back("-passes=loop-predication");
} else if (Opt == "guard_widening") {
Args.push_back("-passes=strength-reduce");
} else if (Opt == "irce") {
Args.push_back("-passes=irce");
-
+
} else if (Triple(Opt).getArch()) {
Args.push_back("-mtriple=" + Opt.str());
} else {
auto M = parseModule(Data, Size, Context);
if (!M || verifyModule(*M, &errs()))
return nullptr;
-
+
return M;
}
Name.startswith("avx512.mask3.vfmsubadd.") || // Added in 7.0
Name.startswith("avx512.mask.shuf.i") || // Added in 6.0
Name.startswith("avx512.mask.shuf.f") || // Added in 6.0
- Name.startswith("avx512.kunpck") || //added in 6.0
+ Name.startswith("avx512.kunpck") || //added in 6.0
Name.startswith("avx2.pabs.") || // Added in 6.0
Name.startswith("avx512.mask.pabs.") || // Added in 6.0
Name.startswith("avx512.broadcastm") || // Added in 6.0
if (FT->isVarArg())
Result += "vararg";
// Ensure nested function types are distinguishable.
- Result += "f";
+ Result += "f";
} else if (isa<VectorType>(Ty)) {
Result += "v" + utostr(Ty->getVectorNumElements()) +
getMangledTypeStr(Ty->getVectorElementType());
FunctionType *InlineAsm::getFunctionType() const {
return FTy;
}
-
+
/// Parse - Analyze the specified string (e.g. "==&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
isCommutative = false;
isIndirect = false;
currentAlternativeIndex = 0;
-
+
// Parse prefixes.
if (*I == '~') {
Type = isClobber;
}
if (I == E) return true; // Just a prefix, like "==" or "~".
-
+
// Parse the modifiers.
bool DoneWithModifiers = false;
while (!DoneWithModifiers) {
case '*': // Register preferencing.
return true; // Not supported.
}
-
+
if (!DoneWithModifiers) {
++I;
if (I == E) return true; // Just prefixes and modifiers!
}
}
-
+
// Parse the various constraints.
while (I != E) {
if (*I == '{') { // Physical register reference.
if (N >= ConstraintsSoFar.size() || ConstraintsSoFar[N].Type != isOutput||
Type != isInput)
return true; // Invalid constraint number.
-
+
// If Operand N already has a matching input, reject this. An output
// can't be constrained to the same value as multiple inputs.
if (isMultipleAlternative) {
InlineAsm::ConstraintInfoVector
InlineAsm::ParseConstraints(StringRef Constraints) {
ConstraintInfoVector Result;
-
+
// Scan the constraints string.
for (StringRef::iterator I = Constraints.begin(),
E = Constraints.end(); I != E; ) {
}
Result.push_back(Info);
-
+
// ConstraintEnd may be either the next comma or the end of the string. In
// the former case, we skip the comma.
I = ConstraintEnd;
} // don't allow "xyz,"
}
}
-
+
return Result;
}
/// specified function type, and otherwise validate the constraint string.
bool InlineAsm::Verify(FunctionType *Ty, StringRef ConstStr) {
if (Ty->isVarArg()) return false;
-
+
ConstraintInfoVector Constraints = ParseConstraints(ConstStr);
-
+
// Error parsing constraints.
if (Constraints.empty() && !ConstStr.empty()) return false;
-
+
unsigned NumOutputs = 0, NumInputs = 0, NumClobbers = 0;
unsigned NumIndirect = 0;
-
+
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
switch (Constraints[i].Type) {
case InlineAsm::isOutput:
break;
}
}
-
+
switch (NumOutputs) {
case 0:
if (!Ty->getReturnType()->isVoidTy()) return false;
if (!STy || STy->getNumElements() != NumOutputs)
return false;
break;
- }
-
+ }
+
if (Ty->getNumParams() != NumInputs) return false;
return true;
}
"Calling a function with bad signature!");
for (unsigned i = 0; i != Args.size(); ++i)
- assert((i >= FTy->getNumParams() ||
+ assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Args[i]->getType()) &&
"Calling a function with a bad signature!");
#endif
assert(((!InsertBefore && InsertAtEnd) || (InsertBefore && !InsertAtEnd)) &&
"createMalloc needs either InsertBefore or InsertAtEnd");
- // malloc(type) becomes:
+ // malloc(type) becomes:
// bitcast (i8* malloc(typeSize)) to type*
// malloc(type, arraySize) becomes:
// bitcast (i8* malloc(typeSize*arraySize)) to type*
/// responsibility of the caller.
Instruction *CallInst::CreateMalloc(BasicBlock *InsertAtEnd,
Type *IntPtrTy, Type *AllocTy,
- Value *AllocSize, Value *ArraySize,
+ Value *AllocSize, Value *ArraySize,
Function *MallocF, const Twine &Name) {
return createMalloc(nullptr, InsertAtEnd, IntPtrTy, AllocTy, AllocSize,
ArraySize, None, MallocF, Name);
"Invoking a function with bad signature");
for (unsigned i = 0, e = Args.size(); i != e; i++)
- assert((i >= FTy->getNumParams() ||
+ assert((i >= FTy->getNumParams() ||
FTy->getParamType(i) == Args[i]->getType()) &&
"Invoking a function with a bad signature!");
#endif
// UnreachableInst Implementation
//===----------------------------------------------------------------------===//
-UnreachableInst::UnreachableInst(LLVMContext &Context,
+UnreachableInst::UnreachableInst(LLVMContext &Context,
Instruction *InsertBefore)
: TerminatorInst(Type::getVoidTy(Context), Instruction::Unreachable,
nullptr, 0, InsertBefore) {
bool AllocaInst::isStaticAlloca() const {
// Must be constant size.
if (!isa<ConstantInt>(getArraySize())) return false;
-
+
// Must be in the entry block.
const BasicBlock *Parent = getParent();
return Parent == &Parent->getParent()->front() && !isUsedWithInAlloca();
setName(Name);
}
-LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
+LoadInst::LoadInst(Value *Ptr, const Twine &Name, bool isVolatile,
unsigned Align, AtomicOrdering Order,
SyncScope::ID SSID,
BasicBlock *InsertAE)
// FenceInst Implementation
//===----------------------------------------------------------------------===//
-FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SyncScope::ID SSID,
Instruction *InsertBefore)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertBefore) {
setSyncScopeID(SSID);
}
-FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
+FenceInst::FenceInst(LLVMContext &C, AtomicOrdering Ordering,
SyncScope::ID SSID,
BasicBlock *InsertAtEnd)
: Instruction(Type::getVoidTy(C), Fence, nullptr, 0, InsertAtEnd) {
setName(Name);
}
-bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
+bool InsertElementInst::isValidOperands(const Value *Vec, const Value *Elt,
const Value *Index) {
if (!Vec->getType()->isVectorTy())
return false; // First operand of insertelement must be vector type.
-
+
if (Elt->getType() != cast<VectorType>(Vec->getType())->getElementType())
return false;// Second operand of insertelement must be vector element type.
-
+
if (!Index->getType()->isIntegerTy())
return false; // Third operand of insertelement must be i32.
return true;
// V1 and V2 must be vectors of the same type.
if (!V1->getType()->isVectorTy() || V1->getType() != V2->getType())
return false;
-
+
// Mask must be vector of i32.
auto *MaskTy = dyn_cast<VectorType>(Mask->getType());
if (!MaskTy || !MaskTy->getElementType()->isIntegerTy(32))
}
return true;
}
-
+
if (const auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
unsigned V1Size = cast<VectorType>(V1->getType())->getNumElements();
for (unsigned i = 0, e = MaskTy->getNumElements(); i != e; ++i)
return false;
return true;
}
-
+
// The bitcode reader can create a place holder for a forward reference
// used as the shuffle mask. When this occurs, the shuffle mask will
// fall into this case and fail. To avoid this error, do this bit of
void ShuffleVectorInst::getShuffleMask(const Constant *Mask,
SmallVectorImpl<int> &Result) {
unsigned NumElts = Mask->getType()->getVectorNumElements();
-
+
if (auto *CDS = dyn_cast<ConstantDataSequential>(Mask)) {
for (unsigned i = 0; i != NumElts; ++i)
Result.push_back(CDS->getElementAsInteger(i));
return;
- }
+ }
for (unsigned i = 0; i != NumElts; ++i) {
Constant *C = Mask->getAggregateElement(i);
Result.push_back(isa<UndefValue>(C) ? -1 :
// InsertValueInst Class
//===----------------------------------------------------------------------===//
-void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
+void InsertValueInst::init(Value *Agg, Value *Val, ArrayRef<unsigned> Idxs,
const Twine &Name) {
assert(getNumOperands() == 2 && "NumOperands not initialized?");
AssertOK();
}
-BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
+BinaryOperator::BinaryOperator(BinaryOps iType, Value *S1, Value *S2,
Type *Ty, const Twine &Name,
BasicBlock *InsertAtEnd)
: Instruction(Ty, iType,
"Tried to create a floating-point operation on a "
"non-floating-point type!");
break;
- case UDiv:
- case SDiv:
+ case UDiv:
+ case SDiv:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
assert(getType()->isIntOrIntVectorTy() &&
assert(getType()->isFPOrFPVectorTy() &&
"Incorrect operand type (not floating point) for FDIV");
break;
- case URem:
- case SRem:
+ case URem:
+ case SRem:
assert(getType() == LHS->getType() &&
"Arithmetic operation should return same type as operands!");
assert(getType()->isIntOrIntVectorTy() &&
Type *DstTy = getType();
if (SrcTy == DstTy)
return true;
-
+
// Pointer to pointer is always lossless.
if (SrcTy->isPointerTy())
return DstTy->isPointerTy();
/// This function determines if the CastInst does not require any bits to be
/// changed in order to effect the cast. Essentially, it identifies cases where
-/// no code gen is necessary for the cast, hence the name no-op cast. For
+/// no code gen is necessary for the cast, hence the name no-op cast. For
/// example, the following are all no-op casts:
/// # bitcast i32* %x to i8*
-/// # bitcast <2 x i32> %x to <4 x i16>
+/// # bitcast <2 x i32> %x to <4 x i16>
/// # ptrtoint i32* %x to i32 ; on 32-bit plaforms only
/// Determine if the described cast is a no-op.
bool CastInst::isNoopCast(Instruction::CastOps Opcode,
default: llvm_unreachable("Invalid CastOp");
case Instruction::Trunc:
case Instruction::ZExt:
- case Instruction::SExt:
+ case Instruction::SExt:
case Instruction::FPTrunc:
case Instruction::FPExt:
case Instruction::UIToFP:
Type *DstIntPtrTy) {
// Define the 144 possibilities for these two cast instructions. The values
// in this matrix determine what to do in a given situation and select the
- // case in the switch below. The rows correspond to firstOp, the columns
+ // case in the switch below. The rows correspond to firstOp, the columns
// correspond to secondOp. In looking at the table below, keep in mind
// the following cast properties:
//
int ElimCase = CastResults[firstOp-Instruction::CastOpsBegin]
[secondOp-Instruction::CastOpsBegin];
switch (ElimCase) {
- case 0:
+ case 0:
// Categorically disallowed.
return 0;
- case 1:
+ case 1:
// Allowed, use first cast's opcode.
return firstOp;
- case 2:
+ case 2:
// Allowed, use second cast's opcode.
return secondOp;
- case 3:
+ case 3:
// No-op cast in second op implies firstOp as long as the DestTy
// is integer and we are not converting between a vector and a
// non-vector type.
if (DstTy->isFloatingPointTy())
return firstOp;
return 0;
- case 5:
+ case 5:
// No-op cast in first op implies secondOp as long as the SrcTy
// is an integer.
if (SrcTy->isIntegerTy())
case 17:
// (sitofp (zext x)) -> (uitofp x)
return Instruction::UIToFP;
- case 99:
+ case 99:
// Cast combination can't happen (error in input). This is for all cases
// where the MidTy is not the same for the two cast instructions.
llvm_unreachable("Invalid Cast Combination");
}
}
-CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
+CastInst *CastInst::Create(Instruction::CastOps op, Value *S, Type *Ty,
const Twine &Name, Instruction *InsertBefore) {
assert(castIsValid(op, S, Ty) && "Invalid cast!");
// Construct and return the appropriate CastInst subclass
}
}
-CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return Create(Instruction::ZExt, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
+CastInst *CastInst::CreateZExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return Create(Instruction::ZExt, S, Ty, Name, InsertAtEnd);
}
-CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
Instruction *InsertBefore) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return Create(Instruction::SExt, S, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
+CastInst *CastInst::CreateSExtOrBitCast(Value *S, Type *Ty,
const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
}
CastInst *CastInst::CreateTruncOrBitCast(Value *S, Type *Ty,
- const Twine &Name,
+ const Twine &Name,
BasicBlock *InsertAtEnd) {
if (S->getType()->getScalarSizeInBits() == Ty->getScalarSizeInBits())
return Create(Instruction::BitCast, S, Ty, Name, InsertAtEnd);
return Create(opcode, C, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
+CastInst *CastInst::CreateIntegerCast(Value *C, Type *Ty,
bool isSigned, const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(C->getType()->isIntOrIntVectorTy() && Ty->isIntOrIntVectorTy() &&
return Create(opcode, C, Ty, Name, InsertAtEnd);
}
-CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
- const Twine &Name,
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
+ const Twine &Name,
Instruction *InsertBefore) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
"Invalid cast");
return Create(opcode, C, Ty, Name, InsertBefore);
}
-CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
- const Twine &Name,
+CastInst *CastInst::CreateFPCast(Value *C, Type *Ty,
+ const Twine &Name,
BasicBlock *InsertAtEnd) {
assert(C->getType()->isFPOrFPVectorTy() && Ty->isFPOrFPVectorTy() &&
"Invalid cast");
return DestBits == SrcBits;
// Casting from something else
return SrcTy->isPointerTy();
- }
+ }
if (DestTy->isFloatingPointTy()) { // Casting to floating pt
if (SrcTy->isIntegerTy()) // Casting from integral
return true;
if (SrcTy->isPointerTy()) // Casting from pointer
return true;
return SrcTy->isIntegerTy(); // Casting from integral
- }
+ }
if (DestTy->isX86_MMXTy()) {
if (SrcTy->isVectorTy())
return DestBits == SrcBits; // 64-bit vector to MMX
return BitCast; // Same size, No-op cast
}
} else if (SrcTy->isFloatingPointTy()) { // Casting from floating pt
- if (DestIsSigned)
+ if (DestIsSigned)
return FPToSI; // FP -> sint
else
- return FPToUI; // FP -> uint
+ return FPToUI; // FP -> uint
} else if (SrcTy->isVectorTy()) {
assert(DestBits == SrcBits &&
"Casting vector to integer of different width");
/// could be broken out into the separate constructors but it is useful to have
/// it in one place and to eliminate the redundant code for getting the sizes
/// of the types involved.
-bool
+bool
CastInst::castIsValid(Instruction::CastOps op, Value *S, Type *DstTy) {
// Check for type sanity on the arguments
Type *SrcTy = S->getType();
case Instruction::ZExt:
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
SrcLength == DstLength && SrcBitSize < DstBitSize;
- case Instruction::SExt:
+ case Instruction::SExt:
return SrcTy->isIntOrIntVectorTy() && DstTy->isIntOrIntVectorTy() &&
SrcLength == DstLength && SrcBitSize < DstBitSize;
case Instruction::FPTrunc:
TruncInst::TruncInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
+) : CastInst(Ty, Trunc, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal Trunc");
}
ZExtInst::ZExtInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
+) : CastInst(Ty, ZExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
}
ZExtInst::ZExtInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
+) : CastInst(Ty, ZExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal ZExt");
}
SExtInst::SExtInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, SExt, S, Name, InsertBefore) {
+) : CastInst(Ty, SExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
}
SExtInst::SExtInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
+) : CastInst(Ty, SExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SExt");
}
FPTruncInst::FPTruncInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
+) : CastInst(Ty, FPTrunc, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
}
FPTruncInst::FPTruncInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
+) : CastInst(Ty, FPTrunc, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPTrunc");
}
FPExtInst::FPExtInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
+) : CastInst(Ty, FPExt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
}
FPExtInst::FPExtInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
+) : CastInst(Ty, FPExt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPExt");
}
UIToFPInst::UIToFPInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
+) : CastInst(Ty, UIToFP, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
}
UIToFPInst::UIToFPInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
+) : CastInst(Ty, UIToFP, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal UIToFP");
}
SIToFPInst::SIToFPInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
+) : CastInst(Ty, SIToFP, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
}
SIToFPInst::SIToFPInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
+) : CastInst(Ty, SIToFP, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal SIToFP");
}
FPToUIInst::FPToUIInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
+) : CastInst(Ty, FPToUI, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
}
FPToUIInst::FPToUIInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
+) : CastInst(Ty, FPToUI, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToUI");
}
FPToSIInst::FPToSIInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
+) : CastInst(Ty, FPToSI, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
}
FPToSIInst::FPToSIInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
+) : CastInst(Ty, FPToSI, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal FPToSI");
}
PtrToIntInst::PtrToIntInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
+) : CastInst(Ty, PtrToInt, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
PtrToIntInst::PtrToIntInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
+) : CastInst(Ty, PtrToInt, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal PtrToInt");
}
IntToPtrInst::IntToPtrInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
+) : CastInst(Ty, IntToPtr, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
}
IntToPtrInst::IntToPtrInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
+) : CastInst(Ty, IntToPtr, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal IntToPtr");
}
BitCastInst::BitCastInst(
Value *S, Type *Ty, const Twine &Name, Instruction *InsertBefore
-) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
+) : CastInst(Ty, BitCast, S, Name, InsertBefore) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
}
BitCastInst::BitCastInst(
Value *S, Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd
-) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
+) : CastInst(Ty, BitCast, S, Name, InsertAtEnd) {
assert(castIsValid(getOpcode(), S, Ty) && "Illegal BitCast");
}
return new ICmpInst(CmpInst::Predicate(predicate),
S1, S2, Name);
}
-
+
if (InsertBefore)
return new FCmpInst(InsertBefore, CmpInst::Predicate(predicate),
S1, S2, Name);
ICmpInst::Predicate ICmpInst::getSignedPredicate(Predicate pred) {
switch (pred) {
default: llvm_unreachable("Unknown icmp predicate!");
- case ICMP_EQ: case ICMP_NE:
- case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
+ case ICMP_EQ: case ICMP_NE:
+ case ICMP_SGT: case ICMP_SLT: case ICMP_SGE: case ICMP_SLE:
return pred;
case ICMP_UGT: return ICMP_SGT;
case ICMP_ULT: return ICMP_SLT;
ICmpInst::Predicate ICmpInst::getUnsignedPredicate(Predicate pred) {
switch (pred) {
default: llvm_unreachable("Unknown icmp predicate!");
- case ICMP_EQ: case ICMP_NE:
- case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
+ case ICMP_EQ: case ICMP_NE:
+ case ICMP_UGT: case ICMP_ULT: case ICMP_UGE: case ICMP_ULE:
return pred;
case ICMP_SGT: return ICMP_UGT;
case ICMP_SLT: return ICMP_ULT;
case ICMP_ULT: return ICMP_UGT;
case ICMP_UGE: return ICMP_ULE;
case ICMP_ULE: return ICMP_UGE;
-
+
case FCMP_FALSE: case FCMP_TRUE:
case FCMP_OEQ: case FCMP_ONE:
case FCMP_UEQ: case FCMP_UNE:
bool CmpInst::isUnsigned(Predicate predicate) {
switch (predicate) {
default: return false;
- case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
+ case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_ULE: case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE: return true;
}
}
bool CmpInst::isSigned(Predicate predicate) {
switch (predicate) {
default: return false;
- case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
+ case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_SLE: case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE: return true;
}
}
bool CmpInst::isOrdered(Predicate predicate) {
switch (predicate) {
default: return false;
- case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
- case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
+ case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_OGT:
+ case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_OLE:
case FCmpInst::FCMP_ORD: return true;
}
}
-
+
bool CmpInst::isUnordered(Predicate predicate) {
switch (predicate) {
default: return false;
- case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
- case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
+ case FCmpInst::FCMP_UEQ: case FCmpInst::FCMP_UNE: case FCmpInst::FCMP_UGT:
+ case FCmpInst::FCMP_ULT: case FCmpInst::FCMP_UGE: case FCmpInst::FCMP_ULE:
case FCmpInst::FCMP_UNO: return true;
}
}
void IndirectBrInst::growOperands() {
unsigned e = getNumOperands();
unsigned NumOps = e*2;
-
+
ReservedSpace = NumOps;
growHungoffUses(ReservedSpace);
}
/// indirectbr instruction.
void IndirectBrInst::removeDestination(unsigned idx) {
assert(idx < getNumOperands()-1 && "Successor index out of range!");
-
+
unsigned NumOps = getNumOperands();
Use *OL = getOperandList();
// Replace this value with the last one.
OL[idx+1] = OL[NumOps-1];
-
+
// Nuke the last value.
OL[NumOps-1].set(nullptr);
setNumHungOffUseOperands(NumOps-1);
StoreInst *StoreInst::cloneImpl() const {
return new StoreInst(getOperand(0), getOperand(1), isVolatile(),
getAlignment(), getOrdering(), getSyncScopeID());
-
+
}
AtomicCmpXchgInst *AtomicCmpXchgInst::cloneImpl() const {
//
//===----------------------------------------------------------------------===//
//
-// This file declares LLVMContextImpl, the opaque implementation
+// This file declares LLVMContextImpl, the opaque implementation
// of LLVMContext.
//
//===----------------------------------------------------------------------===//
/// OwnedModules - The set of modules instantiated in this context, and which
/// will be automatically deleted if this context is deleted.
SmallPtrSet<Module*, 4> OwnedModules;
-
+
LLVMContext::InlineAsmDiagHandlerTy InlineAsmDiagHandler = nullptr;
void *InlineAsmDiagContext = nullptr;
using ArrayConstantsTy = ConstantUniqueMap<ConstantArray>;
ArrayConstantsTy ArrayConstants;
-
+
using StructConstantsTy = ConstantUniqueMap<ConstantStruct>;
StructConstantsTy StructConstants;
-
+
using VectorConstantsTy = ConstantUniqueMap<ConstantVector>;
VectorConstantsTy VectorConstants;
Type VoidTy, LabelTy, HalfTy, FloatTy, DoubleTy, MetadataTy, TokenTy;
Type X86_FP80Ty, FP128Ty, PPC_FP128Ty, X86_MMXTy;
IntegerType Int1Ty, Int8Ty, Int16Ty, Int32Ty, Int64Ty, Int128Ty;
-
+
/// TypeAllocator - All dynamically allocated types are allocated from this.
/// They live forever until the context is torn down.
BumpPtrAllocator TypeAllocator;
-
+
DenseMap<unsigned, IntegerType*> IntegerTypes;
using FunctionTypeSet = DenseSet<FunctionType *, FunctionTypeKeyInfo>;
StructTypeSet AnonStructTypes;
StringMap<StructType*> NamedStructTypes;
unsigned NamedStructTypesUniqueID = 0;
-
+
DenseMap<std::pair<Type *, uint64_t>, ArrayType*> ArrayTypes;
DenseMap<std::pair<Type *, unsigned>, VectorType*> VectorTypes;
DenseMap<Type*, PointerType*> PointerTypes; // Pointers in AddrSpace = 0
/// whether or not a value has an entry in this map.
using ValueHandlesTy = DenseMap<Value *, ValueHandleBase *>;
ValueHandlesTy ValueHandles;
-
+
/// CustomMDKindNames - Map to hold the metadata string to ID mapping.
StringMap<unsigned> CustomMDKindNames;
// Do it.
*Dest = Src;
-
+
// Get the new SymTab object.
ValueSymbolTable *NewST = getSymTab(getListOwner());
-
+
// If there is nothing to do, quick exit.
if (OldST == NewST) return;
-
+
// Move all the elements from the old symtab to the new one.
ListTy &ItemList = getList(getListOwner());
if (ItemList.empty()) return;
-
+
if (OldST) {
// Remove all entries from the previous symtab.
for (auto I = ItemList.begin(); I != ItemList.end(); ++I)
if (I->hasName())
NewST->reinsertValue(&*I);
}
-
+
}
template <typename ValueSubClass>
// *V << "\n");
return;
}
-
+
// Otherwise, there is a naming conflict. Rename this value.
SmallString<256> UniqueName(V->getName().begin(), V->getName().end());
// << *V << "\n");
return &*IterBool.first;
}
-
+
// Otherwise, there is a naming conflict. Rename this value.
SmallString<256> UniqueName(Name.begin(), Name.end());
return makeUniqueName(V, UniqueName);
int TempFD;
llvm::sys::path::remove_filename(CachePath);
sys::path::append(TempFilename, CachePath, "Thin-%%%%%%.tmp.o");
- std::error_code EC =
+ std::error_code EC =
sys::fs::createUniqueFile(TempFilename, TempFD, TempFilename);
if (EC) {
errs() << "Error: " << EC.message() << "\n";
raw_fd_ostream OS(TempFD, /* ShouldClose */ true);
OS << OutputBuffer.getBuffer();
}
- // Rename temp file to final destination; rename is atomic
+ // Rename temp file to final destination; rename is atomic
EC = sys::fs::rename(TempFilename, EntryPath);
if (EC)
sys::fs::remove(TempFilename);
if (SavedObjectsDirectoryPath.empty()) {
// We need to generated a memory buffer for the linker.
if (!CacheEntryPath.empty()) {
- // When cache is enabled, reload from the cache if possible.
+ // When cache is enabled, reload from the cache if possible.
// Releasing the buffer from the heap and reloading it from the
- // cache file with mmap helps us to lower memory pressure.
- // The freed memory can be used for the next input file.
+ // cache file with mmap helps us to lower memory pressure.
+ // The freed memory can be used for the next input file.
// The final binary link will read from the VFS cache (hopefully!)
// or from disk (if the memory pressure was too high).
auto ReloadedBufferOrErr = CacheEntry.tryLoadingBuffer();
if (!IsVerboseAsm) return;
T.toVector(CommentToEmit);
-
+
if (EOL)
CommentToEmit.push_back('\n'); // Place comment in a new line.
}
EmitEOL();
}
// FIXME: Currently emit unprefix'ed registers.
- // The intel_syntax directive has one optional argument
+ // The intel_syntax directive has one optional argument
// with may have a value of prefix or noprefix.
}
break;
}
- case MCFragment::FT_Data:
+ case MCFragment::FT_Data:
++stats::EmittedDataFragments;
OS << cast<MCDataFragment>(F).getContents();
break;
// LLVMCreateDisasm() creates a disassembler for the TripleName. Symbolic
// disassembly is supported by passing a block of information in the DisInfo
// parameter and specifying the TagType and callback functions as described in
-// the header llvm-c/Disassembler.h . The pointer to the block and the
+// the header llvm-c/Disassembler.h . The pointer to the block and the
// functions can all be passed as NULL. If successful, this returns a
// disassembler context. If not, it returns NULL.
//
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
-// This file defines the interface for the Disassembly library's disassembler
+// This file defines the interface for the Disassembly library's disassembler
// context. The disassembler is responsible for producing strings for
// individual instructions according to a given architecture and disassembly
// syntax.
// Parameters of the state machine, are next.
MCOS->EmitIntValue(context.getAsmInfo()->getMinInstAlignment(), 1);
- // maximum_operations_per_instruction
+ // maximum_operations_per_instruction
// For non-VLIW architectures this field is always 1.
// FIXME: VLIW architectures need to update this field accordingly.
if (LineTableVersion >= 4)
if (getLexer().is(AsmToken::Comma) ||
getLexer().is(AsmToken::EndOfStatement))
break;
-
+
unsigned CurSize;
if (getLexer().is(AsmToken::String)) {
CurSize = getTok().getIdentifier().size() + 2;
void MCStreamer::EmitCFIGnuArgsSize(int64_t Size) {
MCSymbol *Label = EmitCFILabel();
- MCCFIInstruction Instruction =
+ MCCFIInstruction Instruction =
MCCFIInstruction::createGnuArgsSize(Label, Size);
MCDwarfFrameInfo *CurFrame = getCurrentDwarfFrameInfo();
if (!CurFrame)
const DataRegionData *Data = &(*it);
uint64_t Start = getSymbolAddress(*Data->Start, Layout);
uint64_t End;
- if (Data->End)
+ if (Data->End)
End = getSymbolAddress(*Data->End, Layout);
else
report_fatal_error("Data region not terminated");
bool COFFObjectFile::isSectionVirtual(DataRefImpl Ref) const {
const coff_section *Sec = toSec(Ref);
- // In COFF, a virtual section won't have any in-file
+ // In COFF, a virtual section won't have any in-file
// content, so the file pointer to the content will be zero.
return Sec->PointerToRawData == 0;
}
if (compareAbsoluteValue(V) == cmpLessThan)
V = scalbn(V, -1, rmNearestTiesToEven);
V.sign = sign;
-
+
fs = subtract(V, rmNearestTiesToEven);
assert(fs==opOK);
}
*===------------------------------------------------------------------------=*/
/*
* Copyright 2001-2004 Unicode, Inc.
- *
+ *
* Disclaimer
- *
+ *
* This source code is provided as is by Unicode, Inc. No claims are
* made as to fitness for any particular purpose. No warranties of any
* kind are expressed or implied. The recipient agrees to determine
* purchased on magnetic or optical media from Unicode, Inc., the
* sole remedy for any claim will be exchange of defective media
* within 90 days of receipt.
- *
+ *
* Limitations on Rights to Redistribute This Code
- *
+ *
* Unicode, Inc. hereby grants the right to freely use the information
* supplied in this file in the creation of products supporting the
* Unicode Standard, and to make copies of this file in any form
* This table contains as many values as there might be trailing bytes
* in a UTF-8 sequence.
*/
-static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
+static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
0x03C82080UL, 0xFA082080UL, 0x82082080UL };
/*
/* --------------------------------------------------------------------- */
ConversionResult ConvertUTF32toUTF16 (
- const UTF32** sourceStart, const UTF32* sourceEnd,
+ const UTF32** sourceStart, const UTF32* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
ConversionResult result = conversionOK;
const UTF32* source = *sourceStart;
/* --------------------------------------------------------------------- */
ConversionResult ConvertUTF16toUTF32 (
- const UTF16** sourceStart, const UTF16* sourceEnd,
+ const UTF16** sourceStart, const UTF16* sourceEnd,
UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags) {
ConversionResult result = conversionOK;
const UTF16* source = *sourceStart;
return result;
}
ConversionResult ConvertUTF16toUTF8 (
- const UTF16** sourceStart, const UTF16* sourceEnd,
+ const UTF16** sourceStart, const UTF16* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
ConversionResult result = conversionOK;
const UTF16* source = *sourceStart;
UTF32 ch;
unsigned short bytesToWrite = 0;
const UTF32 byteMask = 0xBF;
- const UTF32 byteMark = 0x80;
+ const UTF32 byteMark = 0x80;
const UTF16* oldSource = source; /* In case we have to back up because of target overflow. */
ch = *source++;
/* If we have a surrogate pair, convert to UTF32 first. */
/* --------------------------------------------------------------------- */
ConversionResult ConvertUTF32toUTF8 (
- const UTF32** sourceStart, const UTF32* sourceEnd,
+ const UTF32** sourceStart, const UTF32* sourceEnd,
UTF8** targetStart, UTF8* targetEnd, ConversionFlags flags) {
ConversionResult result = conversionOK;
const UTF32* source = *sourceStart;
UTF32 ch;
unsigned short bytesToWrite = 0;
const UTF32 byteMask = 0xBF;
- const UTF32 byteMark = 0x80;
+ const UTF32 byteMark = 0x80;
ch = *source++;
if (flags == strictConversion ) {
/* UTF-16 surrogate values are illegal in UTF-32 */
ch = UNI_REPLACEMENT_CHAR;
result = sourceIllegal;
}
-
+
target += bytesToWrite;
if (target > targetEnd) {
--source; /* Back up source pointer! */
/* --------------------------------------------------------------------- */
ConversionResult ConvertUTF8toUTF16 (
- const UTF8** sourceStart, const UTF8* sourceEnd,
+ const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, ConversionFlags flags) {
ConversionResult result = conversionOK;
const UTF8* source = *sourceStart;
/* --------------------------------------------------------------------- */
static ConversionResult ConvertUTF8toUTF32Impl(
- const UTF8** sourceStart, const UTF8* sourceEnd,
+ const UTF8** sourceStart, const UTF8* sourceEnd,
UTF32** targetStart, UTF32* targetEnd, ConversionFlags flags,
Boolean InputIsPartial) {
ConversionResult result = conversionOK;
/// Called when the separate crash-recovery thread was finished, to
/// indicate that we don't need to clear the thread-local CurrentContext.
- void setSwitchedThread() {
+ void setSwitchedThread() {
#if defined(LLVM_ENABLE_THREADS) && LLVM_ENABLE_THREADS != 0
SwitchedThread = true;
#endif
delete tmp;
}
tlIsRecoveringFromCrash->set(PC);
-
+
CrashRecoveryContextImpl *CRCI = (CrashRecoveryContextImpl *) Impl;
delete CRCI;
}
assert(PredClosure.count(Node) && "Invalid node!");
return PredClosure[Node].end();
}
-
+
succ_iterator_ty succ_begin(change_ty Node) {
assert(Successors.count(Node) && "Invalid node!");
return Successors[Node].begin();
Worklist.pop_back();
std::set<change_ty> &ChangeSuccs = SuccClosure[Change];
- for (pred_iterator_ty it = pred_begin(Change),
+ for (pred_iterator_ty it = pred_begin(Change),
ie = pred_end(Change); it != ie; ++it) {
SuccClosure[*it].insert(Change);
SuccClosure[*it].insert(ChangeSuccs.begin(), ChangeSuccs.end());
for (succ_closure_iterator_ty it2 = succ_closure_begin(*it),
ie2 = succ_closure_end(*it); it2 != ie2; ++it2)
PredClosure[*it2].insert(*it);
-
+
// Dump useful debug info.
LLVM_DEBUG({
llvm::errs() << "-- DAGDeltaAlgorithmImpl --\n";
const int MaxErrStrLen = 2000;
char buffer[MaxErrStrLen];
buffer[0] = '\0';
-#endif
+#endif
#ifdef HAVE_STRERROR_R
// strerror_r is thread-safe.
unsigned Units = Size / 4;
unsigned Pos = 0;
const unsigned *Base = (const unsigned*) String.data();
-
+
// If the string is aligned do a bulk transfer.
if (!((intptr_t)Base & 3)) {
Bits.append(Base, Base + Units);
}
}
}
-
+
// With the leftover bits.
unsigned V = 0;
// Pos will have overshot size by 4 - #bytes left over.
Bits.append(ID.Bits.begin(), ID.Bits.end());
}
-/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to
+/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used to
/// lookup the node in the FoldingSetBase.
unsigned FoldingSetNodeID::ComputeHash() const {
return FoldingSetNodeIDRef(Bits.data(), Bits.size()).ComputeHash();
// The low bit is set if this is the pointer back to the bucket.
if (reinterpret_cast<intptr_t>(NextInBucketPtr) & 1)
return nullptr;
-
+
return static_cast<FoldingSetBase::Node*>(NextInBucketPtr);
}
assert(isPowerOf2_32(NewBucketCount) && "Bad bucket count!");
void **OldBuckets = Buckets;
unsigned OldNumBuckets = NumBuckets;
-
+
// Clear out new buckets.
Buckets = AllocateBuckets(NewBucketCount);
// Set NumBuckets only if allocation of new buckets was succesful
- NumBuckets = NewBucketCount;
+ NumBuckets = NewBucketCount;
NumNodes = 0;
// Walk the old buckets, rehashing nodes into their new place.
TempID.clear();
}
}
-
+
free(OldBuckets);
}
unsigned IDHash = ID.ComputeHash();
void **Bucket = GetBucketFor(IDHash, Buckets, NumBuckets);
void *Probe = *Bucket;
-
+
InsertPos = nullptr;
-
+
FoldingSetNodeID TempID;
while (Node *NodeInBucket = GetNextPtr(Probe)) {
if (NodeEquals(NodeInBucket, ID, IDHash, TempID))
Probe = NodeInBucket->getNextInBucket();
}
-
+
// Didn't find the node, return null with the bucket as the InsertPos.
InsertPos = Bucket;
return nullptr;
}
/// InsertNode - Insert the specified node into the folding set, knowing that it
-/// is not already in the map. InsertPos must be obtained from
+/// is not already in the map. InsertPos must be obtained from
/// FindNodeOrInsertPos.
void FoldingSetBase::InsertNode(Node *N, void *InsertPos) {
assert(!N->getNextInBucket());
}
++NumNodes;
-
+
/// The insert position is actually a bucket pointer.
void **Bucket = static_cast<void**>(InsertPos);
-
+
void *Next = *Bucket;
-
+
// If this is the first insertion into this bucket, its next pointer will be
// null. Pretend as if it pointed to itself, setting the low bit to indicate
// that it is a pointer to the bucket.
// Remember what N originally pointed to, either a bucket or another node.
void *NodeNextPtr = Ptr;
-
+
// Chase around the list until we find the node (or bucket) which points to N.
while (true) {
if (Node *NodeInBucket = GetNextPtr(Ptr)) {
// Advance pointer.
Ptr = NodeInBucket->getNextInBucket();
-
+
// We found a node that points to N, change it to point to N's next node,
// removing N from the list.
if (Ptr == N) {
} else {
void **Bucket = GetBucketPtr(Ptr);
Ptr = *Bucket;
-
+
// If we found that the bucket points to N, update the bucket to point to
// whatever is next.
if (Ptr == N) {
while (*Bucket != reinterpret_cast<void*>(-1) &&
(!*Bucket || !GetNextPtr(*Bucket)))
++Bucket;
-
+
NodePtr = static_cast<FoldingSetNode*>(*Bucket);
}
if (FoldingSetNode *NextNodeInBucket = GetNextPtr(Probe))
NodePtr = NextNodeInBucket;
else {
- // Otherwise, this is the last link in this bucket.
+ // Otherwise, this is the last link in this bucket.
void **Bucket = GetBucketPtr(Probe);
// Skip to the next non-null non-self-cycle bucket.
++Bucket;
} while (*Bucket != reinterpret_cast<void*>(-1) &&
(!*Bucket || !GetNextPtr(*Bucket)));
-
+
NodePtr = static_cast<FoldingSetNode*>(*Bucket);
}
}
///
/// \param NewCol - The column to move to.
///
-formatted_raw_ostream &formatted_raw_ostream::PadToColumn(unsigned NewCol) {
+formatted_raw_ostream &formatted_raw_ostream::PadToColumn(unsigned NewCol) {
// Figure out what's in the buffer and add it to the column count.
ComputePosition(getBufferStart(), GetNumBytesInBuffer());
Ptr.store(Tmp, std::memory_order_release);
DeleterFn = Deleter;
-
+
// Add to list of managed statics.
Next = StaticList;
StaticList = this;
"Partially initialized ManagedStatic!?");
Ptr = Creator();
DeleterFn = Deleter;
-
+
// Add to list of managed statics.
Next = StaticList;
StaticList = this;
// Destroy memory.
DeleterFn(Ptr);
-
+
// Cleanup.
Ptr = nullptr;
DeleterFn = nullptr;
}
ErrorOr<std::unique_ptr<MemoryBuffer>>
-MemoryBuffer::getFileSlice(const Twine &FilePath, uint64_t MapSize,
+MemoryBuffer::getFileSlice(const Twine &FilePath, uint64_t MapSize,
uint64_t Offset, bool IsVolatile) {
return getFileAux<MemoryBuffer>(FilePath, -1, MapSize, Offset, false,
IsVolatile);
//===- PrettyStackTrace.cpp - Pretty Crash Handling -----------------------===//
-//
+//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
-//
+//
//===----------------------------------------------------------------------===//
//
// This file defines some helpful functions for dealing with the possibility of
static void PrintCurStackTrace(raw_ostream &OS) {
// Don't print an empty trace.
if (!PrettyStackTraceHead) return;
-
+
// If there are pretty stack frames registered, walk and emit them.
OS << "Stack dump:\n";
-
+
PrintStack(OS);
OS.flush();
}
// If any clients of llvm try to link to libCrashReporterClient.a themselves,
// only one crash info struct will be used.
extern "C" {
-CRASH_REPORTER_CLIENT_HIDDEN
-struct crashreporter_annotations_t gCRAnnotations
- __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION)))
+CRASH_REPORTER_CLIENT_HIDDEN
+struct crashreporter_annotations_t gCRAnnotations
+ __attribute__((section("__DATA," CRASHREPORTER_ANNOTATIONS_SECTION)))
#if CRASHREPORTER_ANNOTATIONS_VERSION < 5
= { CRASHREPORTER_ANNOTATIONS_VERSION, 0, 0, 0, 0, 0, 0 };
#else
raw_svector_ostream Stream(TmpStr);
PrintCurStackTrace(Stream);
}
-
+
if (!TmpStr.empty()) {
#ifdef HAVE_CRASHREPORTERCLIENT_H
// Cast to void to avoid warning.
(void)CRSetCrashLogMessage(TmpStr.c_str());
-#elif HAVE_CRASHREPORTER_INFO
+#elif HAVE_CRASHREPORTER_INFO
__crashreporter_info__ = strdup(TmpStr.c_str());
#endif
errs() << TmpStr.str();
}
-
+
#endif
}
std::pair<unsigned, unsigned> LineAndCol;
StringRef BufferID = "<unknown>";
std::string LineStr;
-
+
if (Loc.isValid()) {
unsigned CurBuf = FindBufferContainingLoc(Loc);
assert(CurBuf && "Invalid or unspecified location!");
const MemoryBuffer *CurMB = getMemoryBuffer(CurBuf);
BufferID = CurMB->getBufferIdentifier();
-
+
// Scan backward to find the start of the line.
const char *LineStart = Loc.getPointer();
const char *BufStart = CurMB->getBufferStart();
for (unsigned i = 0, e = Ranges.size(); i != e; ++i) {
SMRange R = Ranges[i];
if (!R.isValid()) continue;
-
+
// If the line doesn't contain any part of the range, then ignore it.
if (R.Start.getPointer() > LineEnd || R.End.getPointer() < LineStart)
continue;
-
+
// Ignore pieces of the range that go onto other lines.
if (R.Start.getPointer() < LineStart)
R.Start = SMLoc::getFromPointer(LineStart);
if (R.End.getPointer() > LineEnd)
R.End = SMLoc::getFromPointer(LineEnd);
-
+
// Translate from SMLoc ranges to column ranges.
// FIXME: Handle multibyte characters.
ColRanges.push_back(std::make_pair(R.Start.getPointer()-LineStart,
LineAndCol = getLineAndColumn(Loc, CurBuf);
}
-
+
return SMDiagnostic(*this, Loc, BufferID, LineAndCol.first,
LineAndCol.second-1, Kind, Msg.str(),
LineStr, ColRanges, FixIts);
// Build the line with the caret and ranges.
std::string CaretLine(NumColumns+1, ' ');
-
+
// Expand any ranges.
for (unsigned r = 0, e = Ranges.size(); r != e; ++r) {
std::pair<unsigned, unsigned> R = Ranges[r];
// Finally, plop on the caret.
if (unsigned(ColumnNo) <= NumColumns)
CaretLine[ColumnNo] = '^';
- else
+ else
CaretLine[NumColumns] = '^';
-
+
// ... and remove trailing whitespace so the output doesn't wrap for it. We
// know that the line isn't completely empty because it has the caret in it at
// least.
CaretLine.erase(CaretLine.find_last_not_of(' ')+1);
-
+
printSourceLine(S, LineContents);
if (ShowColors)
++OutCol;
continue;
}
-
+
// Okay, we have a tab. Insert the appropriate number of characters.
do {
S << CaretLine[i];
// Print out the replacement line, matching tabs in the source line.
if (FixItInsertionLine.empty())
return;
-
+
for (size_t i = 0, e = FixItInsertionLine.size(), OutCol = 0; i < e; ++i) {
if (i >= LineContents.size() || LineContents[i] != '\t') {
S << FixItInsertionLine[i];
table_t::iterator I = InternTable.find(Key);
if (I != InternTable.end())
return PooledStringPtr(&*I);
-
+
entry_t *S = entry_t::Create(Key);
S->getValue().Pool = this;
InternTable.insert(S);
-
+
return PooledStringPtr(S);
}
Str = Str.substr(2);
return 16;
}
-
+
if (Str.startswith("0b") || Str.startswith("0B")) {
Str = Str.substr(2);
return 2;
// convenience to some clients.
if (T.Name)
return;
-
+
// Add to the list of targets.
T.Next = FirstTarget;
FirstTarget = &T;
Current = skip_while(&Scanner::skip_ns_char, Current);
StringRef Name(NameStart, Current - NameStart);
Current = skip_while(&Scanner::skip_s_white, Current);
-
+
Token T;
if (Name == "YAML") {
Current = skip_while(&Scanner::skip_ns_char, Current);
int llvm_regcomp(llvm_regex_t *, const char *, int);
size_t llvm_regerror(int, const llvm_regex_t *, char *, size_t);
-int llvm_regexec(const llvm_regex_t *, const char *, size_t,
+int llvm_regexec(const llvm_regex_t *, const char *, size_t,
llvm_regmatch_t [], int);
void llvm_regfree(llvm_regex_t *);
size_t llvm_strlcpy(char *dst, const char *src, size_t siz);
/// FindFirstNonCommonLetter - Find the first character in the keys of the
/// string pairs that is not shared across the whole set of strings. All
/// strings are assumed to have the same length.
-static unsigned
+static unsigned
FindFirstNonCommonLetter(const std::vector<const
StringMatcher::StringPair*> &Matches) {
assert(!Matches.empty());
for (unsigned i = 0, e = Matches[0]->first.size(); i != e; ++i) {
// Check to see if letter i is the same across the set.
char Letter = Matches[0]->first[i];
-
+
for (unsigned str = 0, e = Matches.size(); str != e; ++str)
if (Matches[str]->first[i] != Letter)
return i;
}
-
+
return Matches[0]->first.size();
}
unsigned IndentCount, bool IgnoreDuplicates) const {
assert(!Matches.empty() && "Must have at least one string to match!");
std::string Indent(IndentCount * 2 + 4, ' ');
-
+
// If we have verified that the entire string matches, we're done: output the
// matching code.
if (CharNo == Matches[0]->first.size()) {
// If the to-execute code has \n's in it, indent each subsequent line.
StringRef Code = Matches[0]->second;
-
+
std::pair<StringRef, StringRef> Split = Code.split('\n');
OS << Indent << Split.first << "\t // \"" << Matches[0]->first << "\"\n";
}
return false;
}
-
+
// Bucket the matches by the character we are comparing.
std::map<char, std::vector<const StringPair*>> MatchesByLetter;
-
+
for (unsigned i = 0, e = Matches.size(); i != e; ++i)
MatchesByLetter[Matches[i]->first[CharNo]].push_back(Matches[i]);
-
-
+
+
// If we have exactly one bucket to match, see how many characters are common
// across the whole set and match all of them at once.
if (MatchesByLetter.size() == 1) {
unsigned FirstNonCommonLetter = FindFirstNonCommonLetter(Matches);
unsigned NumChars = FirstNonCommonLetter-CharNo;
-
+
// Emit code to break out if the prefix doesn't match.
if (NumChars == 1) {
// Do the comparison with if (Str[1] != 'f')
return EmitStringMatcherForChar(Matches, FirstNonCommonLetter, IndentCount,
IgnoreDuplicates);
}
-
+
// Otherwise, we have multiple possible things, emit a switch on the
// character.
OS << Indent << "switch (" << StrVariableName << "[" << CharNo << "]) {\n";
OS << Indent << "default: break;\n";
-
- for (std::map<char, std::vector<const StringPair*>>::iterator LI =
+
+ for (std::map<char, std::vector<const StringPair*>>::iterator LI =
MatchesByLetter.begin(), E = MatchesByLetter.end(); LI != E; ++LI) {
// TODO: escape hard stuff (like \n) if we ever care about it.
OS << Indent << "case '" << LI->first << "':\t // "
IgnoreDuplicates))
OS << Indent << " break;\n";
}
-
+
OS << Indent << "}\n";
return true;
}
void StringMatcher::Emit(unsigned Indent, bool IgnoreDuplicates) const {
// If nothing to match, just fall through.
if (Matches.empty()) return;
-
+
// First level categorization: group strings by length.
std::map<unsigned, std::vector<const StringPair*>> MatchesByLength;
-
+
for (unsigned i = 0, e = Matches.size(); i != e; ++i)
MatchesByLength[Matches[i].first.size()].push_back(&Matches[i]);
-
+
// Output a switch statement on length and categorize the elements within each
// bin.
OS.indent(Indent*2+2) << "switch (" << StrVariableName << ".size()) {\n";
OS.indent(Indent*2+2) << "default: break;\n";
-
+
for (std::map<unsigned, std::vector<const StringPair*>>::iterator LI =
MatchesByLength.begin(), E = MatchesByLength.end(); LI != E; ++LI) {
OS.indent(Indent*2+2) << "case " << LI->first << ":\t // "
if (EmitStringMatcherForChar(LI->second, 0, Indent, IgnoreDuplicates))
OS.indent(Indent*2+4) << "break;\n";
}
-
+
OS.indent(Indent*2+2) << "}\n";
}
MBB.rend(),
[&LRU](MachineInstr &MI) { LRU.accumulate(MI); });
- if (!LRU.available(AArch64::LR))
+ if (!LRU.available(AArch64::LR))
Flags |= MachineOutlinerMBBFlags::LRUnavailableSomewhere;
return Flags;
// ahead and skip over them.
if (MI.isKill())
return outliner::InstrType::Invisible;
-
+
// Is this a terminator for a basic block?
if (MI.isTerminator()) {
// Is this the end of a function?
if (MI.getParent()->succ_empty())
return outliner::InstrType::Legal;
-
+
// It's not, so don't outline it.
return outliner::InstrType::Illegal;
}
Optional<bool> hasRedZone() const { return HasRedZone; }
void setHasRedZone(bool s) { HasRedZone = s; }
-
+
int getVarArgsStackIndex() const { return VarArgsStackIndex; }
void setVarArgsStackIndex(int Index) { VarArgsStackIndex = Index; }
unsigned NumVectorInstToHideOverhead = 10;
int MaxMergeDistance = 64;
- if (Ty->isVectorTy() && SE &&
+ if (Ty->isVectorTy() && SE &&
!BaseT::isConstantStridedAccessLessThan(SE, Ptr, MaxMergeDistance + 1))
return NumVectorInstToHideOverhead;
// globals from all functions in PromotedGlobals.
for (auto *GV : AFI->getGlobalsPromotedToConstantPool())
PromotedGlobals.insert(GV);
-
+
// Calculate this function's optimization goal.
unsigned OptimizationGoal;
if (F.hasFnAttribute(Attribute::OptimizeNone))
if (Subtarget->isThumb1Only())
EmitAlignment(2);
-
+
MCSymbol *JTISymbol = GetARMJTIPICJumpTableLabel(JTI);
OutStreamer->EmitLabel(JTISymbol);
// don't know for sure yet whether we'll need that, so we guess based
// on whether there are any local variables that would trigger it.
unsigned StackAlign = TFI->getStackAlignment();
- if (TFI->hasFP(MF) &&
+ if (TFI->hasFP(MF) &&
!((MFI.getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
if (isFrameOffsetLegal(MI, getFrameRegister(MF), FPOffset))
return false;
unsigned DeadSize = 0;
bool CanDeleteLEA = false;
bool BaseRegKill = false;
-
+
unsigned IdxReg = ~0U;
bool IdxRegKill = true;
if (isThumb2) {
bool isLSDA() const { return Kind == ARMCP::CPLSDA; }
bool isMachineBasicBlock() const{ return Kind == ARMCP::CPMachineBasicBlock; }
bool isPromotedGlobal() const{ return Kind == ARMCP::CPPromotedGlobal; }
-
+
int getExistingMachineCPValue(MachineConstantPool *CP,
unsigned Alignment) override;
// Debug location must be unknown since the first debug location is used
// to determine the end of the prologue.
DebugLoc dl;
-
+
unsigned FramePtr = RegInfo->getFrameRegister(MF);
// Determine the sizes of each callee-save spill areas and record which frame
return CurDAG->getMachineNode(Opc, dl, MVT::i32, Ops);
}
};
-
+
if (Range->second == 0) {
// 1. Mask includes the LSB -> Simply shift the top N bits off
NewN = EmitShift(ARM::tLSLri, X, 31 - Range->first);
MachineMemOperand::MOLoad, 4, 4);
cast<MachineSDNode>(ResNode)->setMemRefs(MemOp, MemOp+1);
-
+
ReplaceNode(N, ResNode);
return;
}
assert(N3.getOpcode() == ISD::Register);
unsigned CC = (unsigned) cast<ConstantSDNode>(N2)->getZExtValue();
-
+
if (InFlag.getOpcode() == ARMISD::CMPZ) {
bool SwitchEQNEToPLMI;
SelectCMPZ(InFlag.getNode(), SwitchEQNEToPLMI);
// Other cases are autogenerated.
break;
}
-
+
case ARMISD::VZIP: {
unsigned Opc = 0;
EVT VT = N->getValueType(0);
// need to be duplicated) or duplicating the constant wouldn't increase code
// size (implying the constant is no larger than 4 bytes).
const Function &F = DAG.getMachineFunction().getFunction();
-
+
// We rely on this decision to inline being idemopotent and unrelated to the
// use-site. We know that if we inline a variable at one use site, we'll
// inline it elsewhere too (and reuse the constant pool entry). Fast-isel
return SDValue();
// SoftFP: read half-precision arguments:
//
- // t2: i32,ch = ...
+ // t2: i32,ch = ...
// t7: i16 = truncate t2 <~~~~ Op
// t8: f16 = bitcast t7 <~~~~ N
//
return SDValue();
}
- // Half-precision return values
+ // Half-precision return values
if (SrcVT == MVT::f16 && DstVT == MVT::i16) {
if (!HasFullFP16)
return SDValue();
auto *RHS = dyn_cast<ConstantSDNode>(Op->getOperand(1));
if (!RHS || RHS->getZExtValue() != 4)
return false;
-
+
Offset = Op->getOperand(1);
Base = Op->getOperand(0);
AM = ISD::POST_INC;
return true;
}
-
+
bool isInc;
bool isLegal = false;
if (Subtarget->isThumb2())