aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2013-12-22 00:07:40 +0000
committerDimitry Andric <dim@FreeBSD.org>2013-12-22 00:07:40 +0000
commitbfef399519ca9b8a4b4c6b563253bad7e0eeffe0 (patch)
treedf8df0b0067b381eab470a3b8f28d14a552a6340 /lib/CodeGen
parent6a0372513edbc473b538d2f724efac50405d6fef (diff)
downloadsrc-bfef399519ca9b8a4b4c6b563253bad7e0eeffe0.tar.gz
src-bfef399519ca9b8a4b4c6b563253bad7e0eeffe0.zip
Vendor import of clang release_34 branch r197841 (effectively, 3.4 RC3):vendor/clang/clang-release_34-r197841
Notes
Notes: svn path=/vendor/clang/dist/; revision=259701 svn path=/vendor/clang/clang-release_34-r197841/; revision=259703; tag=vendor/clang/clang-release_34-r197841
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h147
-rw-r--r--lib/CodeGen/BackendUtil.cpp40
-rw-r--r--lib/CodeGen/CGAtomic.cpp231
-rw-r--r--lib/CodeGen/CGBlocks.cpp40
-rw-r--r--lib/CodeGen/CGBuiltin.cpp2556
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp4
-rw-r--r--lib/CodeGen/CGCXX.cpp249
-rw-r--r--lib/CodeGen/CGCXXABI.cpp42
-rw-r--r--lib/CodeGen/CGCXXABI.h157
-rw-r--r--lib/CodeGen/CGCall.cpp237
-rw-r--r--lib/CodeGen/CGCall.h200
-rw-r--r--lib/CodeGen/CGClass.cpp393
-rw-r--r--lib/CodeGen/CGCleanup.cpp39
-rw-r--r--lib/CodeGen/CGCleanup.h12
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp1355
-rw-r--r--lib/CodeGen/CGDebugInfo.h176
-rw-r--r--lib/CodeGen/CGDecl.cpp119
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp80
-rw-r--r--lib/CodeGen/CGException.cpp33
-rw-r--r--lib/CodeGen/CGExpr.cpp939
-rw-r--r--lib/CodeGen/CGExprAgg.cpp285
-rw-r--r--lib/CodeGen/CGExprCXX.cpp235
-rw-r--r--lib/CodeGen/CGExprComplex.cpp167
-rw-r--r--lib/CodeGen/CGExprConstant.cpp95
-rw-r--r--lib/CodeGen/CGExprScalar.cpp402
-rw-r--r--lib/CodeGen/CGObjC.cpp30
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp59
-rw-r--r--lib/CodeGen/CGObjCMac.cpp29
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp1
-rw-r--r--lib/CodeGen/CGRTTI.cpp62
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp54
-rw-r--r--lib/CodeGen/CGStmt.cpp184
-rw-r--r--lib/CodeGen/CGVTT.cpp34
-rw-r--r--lib/CodeGen/CGVTables.cpp363
-rw-r--r--lib/CodeGen/CGVTables.h40
-rw-r--r--lib/CodeGen/CGValue.h30
-rw-r--r--lib/CodeGen/CMakeLists.txt2
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp69
-rw-r--r--lib/CodeGen/CodeGenAction.cpp17
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp152
-rw-r--r--lib/CodeGen/CodeGenFunction.h714
-rw-r--r--lib/CodeGen/CodeGenModule.cpp723
-rw-r--r--lib/CodeGen/CodeGenModule.h120
-rw-r--r--lib/CodeGen/CodeGenTBAA.cpp73
-rw-r--r--lib/CodeGen/CodeGenTBAA.h2
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp7
-rw-r--r--lib/CodeGen/CodeGenTypes.h7
-rw-r--r--lib/CodeGen/EHScopeStack.h489
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp514
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp1289
-rw-r--r--lib/CodeGen/MicrosoftVBTables.cpp233
-rw-r--r--lib/CodeGen/MicrosoftVBTables.h129
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp36
-rw-r--r--lib/CodeGen/TargetInfo.cpp842
-rw-r--r--lib/CodeGen/TargetInfo.h34
55 files changed, 9873 insertions, 4698 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index df6dc7216a47..468fe0439967 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -25,6 +25,7 @@ namespace clang {
class TargetInfo;
namespace CodeGen {
+ class CGCXXABI;
class CGFunctionInfo;
class CodeGenFunction;
class CodeGenTypes;
@@ -35,151 +36,6 @@ namespace clang {
// the targets to support this, since the Targets currently live in a
// layer below types n'stuff.
- /// ABIArgInfo - Helper class to encapsulate information about how a
- /// specific C type should be passed to or returned from a function.
- class ABIArgInfo {
- public:
- enum Kind {
- /// Direct - Pass the argument directly using the normal converted LLVM
- /// type, or by coercing to another specified type stored in
- /// 'CoerceToType'). If an offset is specified (in UIntData), then the
- /// argument passed is offset by some number of bytes in the memory
- /// representation. A dummy argument is emitted before the real argument
- /// if the specified type stored in "PaddingType" is not zero.
- Direct,
-
- /// Extend - Valid only for integer argument types. Same as 'direct'
- /// but also emit a zero/sign extension attribute.
- Extend,
-
- /// Indirect - Pass the argument indirectly via a hidden pointer
- /// with the specified alignment (0 indicates default alignment).
- Indirect,
-
- /// Ignore - Ignore the argument (treat as void). Useful for void and
- /// empty structs.
- Ignore,
-
- /// Expand - Only valid for aggregate argument types. The structure should
- /// be expanded into consecutive arguments for its constituent fields.
- /// Currently expand is only allowed on structures whose fields
- /// are all scalar types or are themselves expandable types.
- Expand,
-
- KindFirst=Direct, KindLast=Expand
- };
-
- private:
- Kind TheKind;
- llvm::Type *TypeData;
- llvm::Type *PaddingType;
- unsigned UIntData;
- bool BoolData0;
- bool BoolData1;
- bool InReg;
- bool PaddingInReg;
-
- ABIArgInfo(Kind K, llvm::Type *TD, unsigned UI, bool B0, bool B1, bool IR,
- bool PIR, llvm::Type* P)
- : TheKind(K), TypeData(TD), PaddingType(P), UIntData(UI), BoolData0(B0),
- BoolData1(B1), InReg(IR), PaddingInReg(PIR) {}
-
- public:
- ABIArgInfo() : TheKind(Direct), TypeData(0), UIntData(0) {}
-
- static ABIArgInfo getDirect(llvm::Type *T = 0, unsigned Offset = 0,
- llvm::Type *Padding = 0) {
- return ABIArgInfo(Direct, T, Offset, false, false, false, false, Padding);
- }
- static ABIArgInfo getDirectInReg(llvm::Type *T = 0) {
- return ABIArgInfo(Direct, T, 0, false, false, true, false, 0);
- }
- static ABIArgInfo getExtend(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0, false, false, false, false, 0);
- }
- static ABIArgInfo getExtendInReg(llvm::Type *T = 0) {
- return ABIArgInfo(Extend, T, 0, false, false, true, false, 0);
- }
- static ABIArgInfo getIgnore() {
- return ABIArgInfo(Ignore, 0, 0, false, false, false, false, 0);
- }
- static ABIArgInfo getIndirect(unsigned Alignment, bool ByVal = true
- , bool Realign = false
- , llvm::Type *Padding = 0) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, false, false,
- Padding);
- }
- static ABIArgInfo getIndirectInReg(unsigned Alignment, bool ByVal = true
- , bool Realign = false) {
- return ABIArgInfo(Indirect, 0, Alignment, ByVal, Realign, true, false, 0);
- }
- static ABIArgInfo getExpand() {
- return ABIArgInfo(Expand, 0, 0, false, false, false, false, 0);
- }
- static ABIArgInfo getExpandWithPadding(bool PaddingInReg,
- llvm::Type *Padding) {
- return ABIArgInfo(Expand, 0, 0, false, false, false, PaddingInReg,
- Padding);
- }
-
- Kind getKind() const { return TheKind; }
- bool isDirect() const { return TheKind == Direct; }
- bool isExtend() const { return TheKind == Extend; }
- bool isIgnore() const { return TheKind == Ignore; }
- bool isIndirect() const { return TheKind == Indirect; }
- bool isExpand() const { return TheKind == Expand; }
-
- bool canHaveCoerceToType() const {
- return TheKind == Direct || TheKind == Extend;
- }
-
- // Direct/Extend accessors
- unsigned getDirectOffset() const {
- assert((isDirect() || isExtend()) && "Not a direct or extend kind");
- return UIntData;
- }
-
- llvm::Type *getPaddingType() const {
- return PaddingType;
- }
-
- bool getPaddingInReg() const {
- return PaddingInReg;
- }
-
- llvm::Type *getCoerceToType() const {
- assert(canHaveCoerceToType() && "Invalid kind!");
- return TypeData;
- }
-
- void setCoerceToType(llvm::Type *T) {
- assert(canHaveCoerceToType() && "Invalid kind!");
- TypeData = T;
- }
-
- bool getInReg() const {
- assert((isDirect() || isExtend() || isIndirect()) && "Invalid kind!");
- return InReg;
- }
-
- // Indirect accessors
- unsigned getIndirectAlign() const {
- assert(TheKind == Indirect && "Invalid kind!");
- return UIntData;
- }
-
- bool getIndirectByVal() const {
- assert(TheKind == Indirect && "Invalid kind!");
- return BoolData0;
- }
-
- bool getIndirectRealign() const {
- assert(TheKind == Indirect && "Invalid kind!");
- return BoolData1;
- }
-
- void dump() const;
- };
/// ABIInfo - Target specific hooks for defining how a type should be
/// passed or returned from functions.
@@ -194,6 +50,7 @@ namespace clang {
virtual ~ABIInfo();
+ CodeGen::CGCXXABI &getCXXABI() const;
ASTContext &getContext() const;
llvm::LLVMContext &getVMContext() const;
const llvm::DataLayout &getDataLayout() const;
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 45079c098984..90b0f68bd693 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -154,6 +154,14 @@ static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase
PM.add(createObjCARCOptPass());
}
+static void addSampleProfileLoaderPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper &>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ PM.add(createSampleProfileLoaderPass(CGOpts.SampleProfileFile));
+}
+
static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
PM.add(createBoundsCheckingPass());
@@ -206,6 +214,14 @@ static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
PM.add(createThreadSanitizerPass(CGOpts.SanitizerBlacklistFile));
}
+static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ PM.add(createDataFlowSanitizerPass(CGOpts.SanitizerBlacklistFile));
+}
+
void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
unsigned OptLevel = CodeGenOpts.OptimizationLevel;
CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
@@ -220,10 +236,17 @@ void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts);
PMBuilder.OptLevel = OptLevel;
PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
+ PMBuilder.BBVectorize = CodeGenOpts.VectorizeBB;
+ PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
+ PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
- PMBuilder.DisableSimplifyLibCalls = !CodeGenOpts.SimplifyLibCalls;
PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+ PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
+
+ if (!CodeGenOpts.SampleProfileFile.empty())
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addSampleProfileLoaderPass);
// In ObjC ARC mode, add the main ARC optimization passes.
if (LangOpts.ObjCAutoRefCount) {
@@ -235,7 +258,7 @@ void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
addObjCARCOptPass);
}
- if (LangOpts.Sanitize.Bounds) {
+ if (LangOpts.Sanitize.LocalBounds) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addBoundsCheckingPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
@@ -263,6 +286,13 @@ void EmitAssemblyHelper::CreatePasses(TargetMachine *TM) {
addThreadSanitizerPass);
}
+ if (LangOpts.Sanitize.DataFlow) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addDataFlowSanitizerPass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addDataFlowSanitizerPass);
+ }
+
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
@@ -410,13 +440,10 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
// Set frame pointer elimination mode.
if (!CodeGenOpts.DisableFPElim) {
Options.NoFramePointerElim = false;
- Options.NoFramePointerElimNonLeaf = false;
} else if (CodeGenOpts.OmitLeafFramePointer) {
Options.NoFramePointerElim = false;
- Options.NoFramePointerElimNonLeaf = true;
} else {
Options.NoFramePointerElim = true;
- Options.NoFramePointerElimNonLeaf = true;
}
if (CodeGenOpts.UseInitArray)
@@ -452,11 +479,9 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
Options.UseSoftFloat = CodeGenOpts.SoftFloat;
Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
- Options.RealignStack = CodeGenOpts.StackRealignment;
Options.DisableTailCalls = CodeGenOpts.DisableTailCalls;
Options.TrapFuncName = CodeGenOpts.TrapFuncName;
Options.PositionIndependentExecutable = LangOpts.PIELevel != 0;
- Options.SSPBufferSize = CodeGenOpts.SSPBufferSize;
Options.EnableSegmentedStacks = CodeGenOpts.EnableSegmentedStacks;
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
@@ -529,6 +554,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
Action != Backend_EmitLL);
TargetMachine *TM = CreateTargetMachine(UsesCodeGen);
if (UsesCodeGen && !TM) return;
+ llvm::OwningPtr<TargetMachine> TMOwner(CodeGenOpts.DisableFree ? 0 : TM);
CreatePasses(TM);
switch (Action) {
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 0b48a5c66435..0df2a4000e28 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -15,6 +15,8 @@
#include "CGCall.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
@@ -92,7 +94,7 @@ namespace {
return (ValueSizeInBits != AtomicSizeInBits);
}
- void emitMemSetZeroIfNecessary(LValue dest) const;
+ bool emitMemSetZeroIfNecessary(LValue dest) const;
llvm::Value *getAtomicSizeValue() const {
CharUnits size = CGF.getContext().toCharUnitsFromBits(AtomicSizeInBits);
@@ -105,7 +107,8 @@ namespace {
/// Turn an atomic-layout object into an r-value.
RValue convertTempToRValue(llvm::Value *addr,
- AggValueSlot resultSlot) const;
+ AggValueSlot resultSlot,
+ SourceLocation loc) const;
/// Copy an atomic r-value into atomic-layout memory.
void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
@@ -163,21 +166,22 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
return !isFullSizeType(CGF.CGM, type->getStructElementType(0),
AtomicSizeInBits / 2);
- // Just be pessimistic about aggregates.
+ // Padding in structs has an undefined bit pattern. User beware.
case TEK_Aggregate:
- return true;
+ return false;
}
llvm_unreachable("bad evaluation kind");
}
-void AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
+bool AtomicInfo::emitMemSetZeroIfNecessary(LValue dest) const {
llvm::Value *addr = dest.getAddress();
if (!requiresMemSetZero(addr->getType()->getPointerElementType()))
- return;
+ return false;
CGF.Builder.CreateMemSet(addr, llvm::ConstantInt::get(CGF.Int8Ty, 0),
AtomicSizeInBits / 8,
dest.getAlignment().getQuantity());
+ return true;
}
static void
@@ -317,6 +321,22 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
return DeclPtr;
}
+static void
+AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
+ bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
+ SourceLocation Loc) {
+ if (UseOptimizedLibcall) {
+ // Load value and pass it to the function directly.
+ unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
+ Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
+ Args.add(RValue::get(Val), ValTy);
+ } else {
+ // Non-optimized functions always take a reference.
+ Args.add(RValue::get(CGF.EmitCastToVoidPtr(Val)),
+ CGF.getContext().VoidPtrTy);
+ }
+}
+
RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
QualType AtomicTy = E->getPtr()->getType()->getPointeeType();
QualType MemTy = AtomicTy;
@@ -424,67 +444,142 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
+ bool UseOptimizedLibcall = false;
+ switch (E->getOp()) {
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ // For these, only library calls for certain sizes exist.
+ UseOptimizedLibcall = true;
+ break;
+ default:
+ // Only use optimized library calls for sizes for which they exist.
+ if (Size == 1 || Size == 2 || Size == 4 || Size == 8)
+ UseOptimizedLibcall = true;
+ break;
+ }
- SmallVector<QualType, 5> Params;
CallArgList Args;
- // Size is always the first parameter
- Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
- getContext().getSizeType());
- // Atomic address is always the second parameter
- Args.add(RValue::get(EmitCastToVoidPtr(Ptr)),
- getContext().VoidPtrTy);
+ if (!UseOptimizedLibcall) {
+ // For non-optimized library calls, the size is the first parameter
+ Args.add(RValue::get(llvm::ConstantInt::get(SizeTy, Size)),
+ getContext().getSizeType());
+ }
+ // Atomic address is the first or second parameter
+ Args.add(RValue::get(EmitCastToVoidPtr(Ptr)), getContext().VoidPtrTy);
- const char* LibCallName;
- QualType RetTy = getContext().VoidTy;
+ std::string LibCallName;
+ QualType RetTy;
+ bool HaveRetTy = false;
switch (E->getOp()) {
// There is only one libcall for compare an exchange, because there is no
// optimisation benefit possible from a libcall version of a weak compare
// and exchange.
- // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // bool __atomic_compare_exchange(size_t size, void *mem, void *expected,
// void *desired, int success, int failure)
+ // bool __atomic_compare_exchange_N(T *mem, T *expected, T desired,
+ // int success, int failure)
case AtomicExpr::AO__c11_atomic_compare_exchange_weak:
case AtomicExpr::AO__c11_atomic_compare_exchange_strong:
case AtomicExpr::AO__atomic_compare_exchange:
case AtomicExpr::AO__atomic_compare_exchange_n:
LibCallName = "__atomic_compare_exchange";
RetTy = getContext().BoolTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
- getContext().VoidPtrTy);
- Args.add(RValue::get(EmitCastToVoidPtr(Val2)),
- getContext().VoidPtrTy);
- Args.add(RValue::get(Order),
- getContext().IntTy);
+ HaveRetTy = true;
+ Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
+ E->getExprLoc());
+ Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
// void __atomic_exchange(size_t size, void *mem, void *val, void *return,
// int order)
+ // T __atomic_exchange_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_exchange:
case AtomicExpr::AO__atomic_exchange_n:
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
- Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
- getContext().VoidPtrTy);
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
+ // void __atomic_store_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_store:
case AtomicExpr::AO__atomic_store:
case AtomicExpr::AO__atomic_store_n:
LibCallName = "__atomic_store";
- Args.add(RValue::get(EmitCastToVoidPtr(Val1)),
- getContext().VoidPtrTy);
+ RetTy = getContext().VoidTy;
+ HaveRetTy = true;
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
+ // T __atomic_load_N(T *mem, int order)
case AtomicExpr::AO__c11_atomic_load:
case AtomicExpr::AO__atomic_load:
case AtomicExpr::AO__atomic_load_n:
LibCallName = "__atomic_load";
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ break;
+ // T __atomic_fetch_add_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_fetch_add:
+ case AtomicExpr::AO__atomic_fetch_add:
+ LibCallName = "__atomic_fetch_add";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
+ break;
+ // T __atomic_fetch_and_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_fetch_and:
+ case AtomicExpr::AO__atomic_fetch_and:
+ LibCallName = "__atomic_fetch_and";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
+ break;
+ // T __atomic_fetch_or_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_fetch_or:
+ case AtomicExpr::AO__atomic_fetch_or:
+ LibCallName = "__atomic_fetch_or";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
+ break;
+ // T __atomic_fetch_sub_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_fetch_sub:
+ case AtomicExpr::AO__atomic_fetch_sub:
+ LibCallName = "__atomic_fetch_sub";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
+ break;
+ // T __atomic_fetch_xor_N(T *mem, T val, int order)
+ case AtomicExpr::AO__c11_atomic_fetch_xor:
+ case AtomicExpr::AO__atomic_fetch_xor:
+ LibCallName = "__atomic_fetch_xor";
+ AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
+ E->getExprLoc());
break;
default: return EmitUnsupportedRValue(E, "atomic library call");
}
+
+ // Optimized functions have the size in their name.
+ if (UseOptimizedLibcall)
+ LibCallName += "_" + llvm::utostr(Size);
+ // By default, assume we return a value of the atomic type.
+ if (!HaveRetTy) {
+ if (UseOptimizedLibcall) {
+ // Value is returned directly.
+ RetTy = MemTy;
+ } else {
+ // Value is returned through parameter before the order.
+ RetTy = getContext().VoidTy;
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
+ getContext().VoidPtrTy);
+ }
+ }
// order is always the last parameter
Args.add(RValue::get(Order),
getContext().IntTy);
@@ -495,11 +590,11 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
- if (E->isCmpXChg())
+ if (!RetTy->isVoidType())
return Res;
if (E->getType()->isVoidType())
return RValue::get(0);
- return convertTempToRValue(Dest, E->getType());
+ return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -554,7 +649,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
}
if (E->getType()->isVoidType())
return RValue::get(0);
- return convertTempToRValue(OrigDest, E->getType());
+ return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -616,7 +711,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
Builder.SetInsertPoint(ContBB);
if (E->getType()->isVoidType())
return RValue::get(0);
- return convertTempToRValue(OrigDest, E->getType());
+ return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
}
llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
@@ -628,48 +723,30 @@ llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
}
RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
- AggValueSlot resultSlot) const {
- if (EvaluationKind == TEK_Aggregate) {
- // Nothing to do if the result is ignored.
- if (resultSlot.isIgnored()) return resultSlot.asRValue();
-
- assert(resultSlot.getAddr() == addr || hasPadding());
-
- // In these cases, we should have emitted directly into the result slot.
- if (!hasPadding() || resultSlot.isValueOfAtomic())
- return resultSlot.asRValue();
-
- // Otherwise, fall into the common path.
- }
+ AggValueSlot resultSlot,
+ SourceLocation loc) const {
+ if (EvaluationKind == TEK_Aggregate)
+ return resultSlot.asRValue();
// Drill into the padding structure if we have one.
if (hasPadding())
addr = CGF.Builder.CreateStructGEP(addr, 0);
- // If we're emitting to an aggregate, copy into the result slot.
- if (EvaluationKind == TEK_Aggregate) {
- CGF.EmitAggregateCopy(resultSlot.getAddr(), addr, getValueType(),
- resultSlot.isVolatile());
- return resultSlot.asRValue();
- }
-
// Otherwise, just convert the temporary to an r-value using the
// normal conversion routine.
- return CGF.convertTempToRValue(addr, getValueType());
+ return CGF.convertTempToRValue(addr, getValueType(), loc);
}
/// Emit a load from an l-value of atomic type. Note that the r-value
/// we produce is an r-value of the atomic *value* type.
-RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
+RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
+ AggValueSlot resultSlot) {
AtomicInfo atomics(*this, src);
// Check whether we should use a library call.
if (atomics.shouldUseLibcall()) {
llvm::Value *tempAddr;
- if (resultSlot.isValueOfAtomic()) {
- assert(atomics.getEvaluationKind() == TEK_Aggregate);
- tempAddr = resultSlot.getPaddedAtomicAddr();
- } else if (!resultSlot.isIgnored() && !atomics.hasPadding()) {
+ if (!resultSlot.isIgnored()) {
assert(atomics.getEvaluationKind() == TEK_Aggregate);
tempAddr = resultSlot.getAddr();
} else {
@@ -690,7 +767,7 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
emitAtomicLibcall(*this, "__atomic_load", getContext().VoidTy, args);
// Produce the r-value.
- return atomics.convertTempToRValue(tempAddr, resultSlot);
+ return atomics.convertTempToRValue(tempAddr, resultSlot, loc);
}
// Okay, we're doing this natively.
@@ -733,16 +810,10 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
llvm::Value *temp;
bool tempIsVolatile = false;
CharUnits tempAlignment;
- if (atomics.getEvaluationKind() == TEK_Aggregate &&
- (!atomics.hasPadding() || resultSlot.isValueOfAtomic())) {
+ if (atomics.getEvaluationKind() == TEK_Aggregate) {
assert(!resultSlot.isIgnored());
- if (resultSlot.isValueOfAtomic()) {
- temp = resultSlot.getPaddedAtomicAddr();
- tempAlignment = atomics.getAtomicAlignment();
- } else {
- temp = resultSlot.getAddr();
- tempAlignment = atomics.getValueAlignment();
- }
+ temp = resultSlot.getAddr();
+ tempAlignment = atomics.getValueAlignment();
tempIsVolatile = resultSlot.isVolatile();
} else {
temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
@@ -754,7 +825,7 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, AggValueSlot resultSlot) {
Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
->setVolatile(tempIsVolatile);
- return atomics.convertTempToRValue(temp, resultSlot);
+ return atomics.convertTempToRValue(temp, resultSlot, loc);
}
@@ -812,8 +883,7 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
/// Note that the r-value is expected to be an r-value *of the atomic
/// type*; this means that for aggregate r-values, it should include
/// storage for any padding that was necessary.
-void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
- bool isInit) {
+void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
// If this is an aggregate r-value, it should agree in type except
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
@@ -910,13 +980,11 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
}
case TEK_Aggregate: {
- // Memset the buffer first if there's any possibility of
- // uninitialized internal bits.
- atomics.emitMemSetZeroIfNecessary(dest);
-
- // HACK: whether the initializer actually has an atomic type
- // doesn't really seem reliable right now.
+ // Fix up the destination if the initializer isn't an expression
+ // of atomic type.
+ bool Zeroed = false;
if (!init->getType()->isAtomicType()) {
+ Zeroed = atomics.emitMemSetZeroIfNecessary(dest);
dest = atomics.projectValue(dest);
}
@@ -924,7 +992,10 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
AggValueSlot slot = AggValueSlot::forLValue(dest,
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
+ AggValueSlot::IsNotAliased,
+ Zeroed ? AggValueSlot::IsZeroed :
+ AggValueSlot::IsNotZeroed);
+
EmitAggExpr(init, slot);
return;
}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index ded019e64ae8..692f9a0dd63f 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -63,14 +63,16 @@ static llvm::Constant *buildDisposeHelper(CodeGenModule &CGM,
/// buildBlockDescriptor is accessed from 5th field of the Block_literal
/// meta-data and contains stationary information about the block literal.
/// Its definition will have 4 (or optinally 6) words.
+/// \code
/// struct Block_descriptor {
/// unsigned long reserved;
/// unsigned long size; // size of Block_literal metadata in bytes.
/// void *copy_func_helper_decl; // optional copy helper.
/// void *destroy_func_decl; // optioanl destructor helper.
-/// void *block_method_encoding_address;//@encode for block literal signature.
+/// void *block_method_encoding_address; // @encode for block literal signature.
/// void *block_layout_info; // encoding of captured block variables.
/// };
+/// \endcode
static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
@@ -353,14 +355,9 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// First, 'this'.
if (block->capturesCXXThis()) {
- const DeclContext *DC = block->getDeclContext();
- for (; isa<BlockDecl>(DC); DC = cast<BlockDecl>(DC)->getDeclContext())
- ;
- QualType thisType;
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(DC))
- thisType = C.getPointerType(C.getRecordType(RD));
- else
- thisType = cast<CXXMethodDecl>(DC)->getThisType(C);
+ assert(CGF && CGF->CurFuncDecl && isa<CXXMethodDecl>(CGF->CurFuncDecl) &&
+ "Can't capture 'this' outside a method");
+ QualType thisType = cast<CXXMethodDecl>(CGF->CurFuncDecl)->getThisType(C);
llvm::Type *llvmType = CGM.getTypes().ConvertType(thisType);
std::pair<CharUnits,CharUnits> tinfo
@@ -837,7 +834,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
type->isBlockPointerType()) {
// Load the block and do a simple retain.
LValue srcLV = MakeAddrLValue(src, type, align);
- llvm::Value *value = EmitLoadOfScalar(srcLV);
+ llvm::Value *value = EmitLoadOfScalar(srcLV, SourceLocation());
value = EmitARCRetainNonBlock(value);
// Do a primitive store to the block field.
@@ -934,7 +931,7 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
}
-RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
+RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
const BlockPointerType *BPT =
E->getCallee()->getType()->getAs<BlockPointerType>();
@@ -1092,8 +1089,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
bool IsLambdaConversionToBlock) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
- // Check if we should generate debug info for this block function.
- maybeInitializeDebugInfo();
CurGD = GD;
BlockInfo = &blockInfo;
@@ -1167,9 +1162,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
Alloca->setAlignment(Align);
// Set the DebugLocation to empty, so the store is recognized as a
// frame setup instruction by llvm::DwarfDebug::beginFunction().
- Builder.DisableDebugLocations();
+ NoLocation NL(*this, Builder);
Builder.CreateAlignedStore(BlockPointer, Alloca, Align);
- Builder.EnableDebugLocations();
BlockPointerDbgLoc = Alloca;
}
@@ -1307,9 +1301,6 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
IdentifierInfo *II
= &CGM.getContext().Idents.get("__copy_helper_block_");
- // Check if we should generate debug info for this block helper function.
- maybeInitializeDebugInfo();
-
FunctionDecl *FD = FunctionDecl::Create(C,
C.getTranslationUnitDecl(),
SourceLocation(),
@@ -1317,7 +1308,10 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
SC_Static,
false,
false);
+ // Create a scope with an artificial location for the body of this function.
+ ArtificialLocation AL(*this, Builder);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+ AL.Emit();
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1479,9 +1473,6 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__destroy_helper_block_", &CGM.getModule());
- // Check if we should generate debug info for this block destroy function.
- maybeInitializeDebugInfo();
-
IdentifierInfo *II
= &CGM.getContext().Idents.get("__destroy_helper_block_");
@@ -1490,7 +1481,10 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
SourceLocation(), II, C.VoidTy, 0,
SC_Static,
false, false);
+ // Create a scope with an artificial location for the body of this function.
+ ArtificialLocation AL(*this, Builder);
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
+ AL.Emit();
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1781,8 +1775,6 @@ generateByrefCopyHelper(CodeGenFunction &CGF,
SC_Static,
false, false);
- // Initialize debug info if necessary.
- CGF.maybeInitializeDebugInfo();
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsCopy()) {
@@ -1854,8 +1846,6 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
SourceLocation(), II, R, 0,
SC_Static,
false, false);
- // Initialize debug info if necessary.
- CGF.maybeInitializeDebugInfo();
CGF.StartFunction(FD, R, Fn, FI, args, SourceLocation());
if (byrefInfo.needsDispose()) {
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index d18767897f39..7726ad309d8a 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -19,6 +19,7 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
@@ -165,7 +166,7 @@ static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
const CallExpr *E, llvm::Value *calleeValue) {
- return CGF.EmitCall(E->getCallee()->getType(), calleeValue,
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(),
ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
}
@@ -408,8 +409,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
assert(CI);
uint64_t val = CI->getZExtValue();
CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
-
- Value *F = CGM.getIntrinsic(Intrinsic::objectsize, ResType);
+ // FIXME: Get right address space.
+ llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
}
case Builtin::BI__builtin_prefetch: {
@@ -602,6 +604,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BIalloca:
+ case Builtin::BI_alloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
@@ -1282,18 +1285,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BIsqrt:
case Builtin::BIsqrtf:
case Builtin::BIsqrtl: {
- // TODO: there is currently no set of optimizer flags
- // sufficient for us to rewrite sqrt to @llvm.sqrt.
- // -fmath-errno=0 is not good enough; we need finiteness.
- // We could probably precondition the call with an ult
- // against 0, but is that worth the complexity?
- break;
+ // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
+ // in finite- or unsafe-math mode (the intrinsic has different semantics
+ // for handling negative numbers compared to the library function, so
+ // -fmath-errno=0 is not enough).
+ if (!FD->hasAttr<ConstAttr>())
+ break;
+ if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
+ CGM.getCodeGenOpts().NoNaNsFPMath))
+ break;
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = Arg0->getType();
+ Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
+ return RValue::get(Builder.CreateCall(F, Arg0));
}
case Builtin::BIpow:
case Builtin::BIpowf:
case Builtin::BIpowl: {
- // Rewrite sqrt to intrinsic if allowed.
+ // Transform a call to pow* into a @llvm.pow.* intrinsic call.
if (!FD->hasAttr<ConstAttr>())
break;
Value *Base = EmitScalarExpr(E->getArg(0));
@@ -1301,6 +1311,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgType = Base->getType();
Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
return RValue::get(Builder.CreateCall2(F, Base, Exponent));
+ break;
}
case Builtin::BIfma:
@@ -1345,10 +1356,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
}
+ case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
+ case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
@@ -1382,12 +1395,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Intrinsic::ID IntrinsicId;
switch (BuiltinID) {
default: llvm_unreachable("Unknown multiprecision builtin id.");
+ case Builtin::BI__builtin_addcb:
case Builtin::BI__builtin_addcs:
case Builtin::BI__builtin_addc:
case Builtin::BI__builtin_addcl:
case Builtin::BI__builtin_addcll:
IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
break;
+ case Builtin::BI__builtin_subcb:
case Builtin::BI__builtin_subcs:
case Builtin::BI__builtin_subc:
case Builtin::BI__builtin_subcl:
@@ -1410,6 +1425,79 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CarryOutStore->setAlignment(CarryOutPtr.second);
return RValue::get(Sum2);
}
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow: {
+
+ // We translate all of these builtins directly to the relevant llvm IR node.
+
+ // Scalarize our inputs.
+ llvm::Value *X = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Y = EmitScalarExpr(E->getArg(1));
+ std::pair<llvm::Value *, unsigned> SumOutPtr =
+ EmitPointerWithAlignment(E->getArg(2));
+
+ // Decide which of the overflow intrinsics we are lowering to:
+ llvm::Intrinsic::ID IntrinsicId;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unknown security overflow builtin id.");
+ case Builtin::BI__builtin_uadd_overflow:
+ case Builtin::BI__builtin_uaddl_overflow:
+ case Builtin::BI__builtin_uaddll_overflow:
+ IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_usub_overflow:
+ case Builtin::BI__builtin_usubl_overflow:
+ case Builtin::BI__builtin_usubll_overflow:
+ IntrinsicId = llvm::Intrinsic::usub_with_overflow;
+ break;
+ case Builtin::BI__builtin_umul_overflow:
+ case Builtin::BI__builtin_umull_overflow:
+ case Builtin::BI__builtin_umulll_overflow:
+ IntrinsicId = llvm::Intrinsic::umul_with_overflow;
+ break;
+ case Builtin::BI__builtin_sadd_overflow:
+ case Builtin::BI__builtin_saddl_overflow:
+ case Builtin::BI__builtin_saddll_overflow:
+ IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
+ break;
+ case Builtin::BI__builtin_ssub_overflow:
+ case Builtin::BI__builtin_ssubl_overflow:
+ case Builtin::BI__builtin_ssubll_overflow:
+ IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
+ break;
+ case Builtin::BI__builtin_smul_overflow:
+ case Builtin::BI__builtin_smull_overflow:
+ case Builtin::BI__builtin_smulll_overflow:
+ IntrinsicId = llvm::Intrinsic::smul_with_overflow;
+ break;
+ }
+
+
+ llvm::Value *Carry;
+ llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
+ llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
+ SumOutStore->setAlignment(SumOutPtr.second);
+
+ return RValue::get(Carry);
+ }
+ case Builtin::BI__builtin_addressof:
+ return RValue::get(EmitLValue(E->getArg(0)).getAddress());
case Builtin::BI__noop:
return RValue::get(0);
}
@@ -1512,6 +1600,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
return EmitX86BuiltinExpr(BuiltinID, E);
case llvm::Triple::ppc:
case llvm::Triple::ppc64:
+ case llvm::Triple::ppc64le:
return EmitPPCBuiltinExpr(BuiltinID, E);
default:
return 0;
@@ -1519,24 +1608,28 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
}
static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
- NeonTypeFlags TypeFlags) {
+ NeonTypeFlags TypeFlags,
+ bool V1Ty=false) {
int IsQuad = TypeFlags.isQuad();
switch (TypeFlags.getEltType()) {
case NeonTypeFlags::Int8:
case NeonTypeFlags::Poly8:
- return llvm::VectorType::get(CGF->Int8Ty, 8 << IsQuad);
+ return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
case NeonTypeFlags::Int16:
case NeonTypeFlags::Poly16:
case NeonTypeFlags::Float16:
- return llvm::VectorType::get(CGF->Int16Ty, 4 << IsQuad);
+ return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
case NeonTypeFlags::Int32:
- return llvm::VectorType::get(CGF->Int32Ty, 2 << IsQuad);
+ return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
case NeonTypeFlags::Int64:
- return llvm::VectorType::get(CGF->Int64Ty, 1 << IsQuad);
+ case NeonTypeFlags::Poly64:
+ return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
case NeonTypeFlags::Float32:
- return llvm::VectorType::get(CGF->FloatTy, 2 << IsQuad);
+ return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
+ case NeonTypeFlags::Float64:
+ return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
}
- llvm_unreachable("Invalid NeonTypeFlags element type!");
+ llvm_unreachable("Unknown vector element type!");
}
Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
@@ -1568,6 +1661,39 @@ Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
}
+// \brief Right-shift a vector by a constant.
+Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
+ llvm::Type *Ty, bool usgn,
+ const char *name) {
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+
+ int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
+ int EltSize = VTy->getScalarSizeInBits();
+
+ Vec = Builder.CreateBitCast(Vec, Ty);
+
+ // lshr/ashr are undefined when the shift amount is equal to the vector
+ // element size.
+ if (ShiftAmt == EltSize) {
+ if (usgn) {
+ // Right-shifting an unsigned value by its size yields 0.
+ llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
+ return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
+ } else {
+ // Right-shifting a signed value by its size is equivalent
+ // to a shift of size-1.
+ --ShiftAmt;
+ Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
+ }
+ }
+
+ Shift = EmitNeonShiftVector(Shift, Ty, false);
+ if (usgn)
+ return Builder.CreateLShr(Vec, Shift, name);
+ else
+ return Builder.CreateAShr(Vec, Shift, name);
+}
+
/// GetPointeeAlignment - Given an expression with a pointer type, find the
/// alignment of the type referenced by the pointer. Skip over implicit
/// casts.
@@ -1623,8 +1749,1140 @@ CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
return std::make_pair(EmitScalarExpr(Addr), Align);
}
+static Value *EmitAArch64ScalarBuiltinExpr(CodeGenFunction &CGF,
+ unsigned BuiltinID,
+ const CallExpr *E) {
+ unsigned int Int = 0;
+ // Scalar result generated across vectors
+ bool AcrossVec = false;
+ // Extend element of one-element vector
+ bool ExtendEle = false;
+ bool OverloadInt = false;
+ bool OverloadCmpInt = false;
+ bool IsFpCmpZInt = false;
+ bool OverloadCvtInt = false;
+ bool OverloadWideInt = false;
+ bool OverloadNarrowInt = false;
+ const char *s = NULL;
+
+ SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ Ops.push_back(CGF.EmitScalarExpr(E->getArg(i)));
+ }
+
+ // AArch64 scalar builtins are not overloaded, they do not have an extra
+ // argument that specifies the vector type, need to handle each case.
+ switch (BuiltinID) {
+ default: break;
+ case AArch64::BI__builtin_neon_vdups_lane_f32:
+ case AArch64::BI__builtin_neon_vdupd_lane_f64:
+ case AArch64::BI__builtin_neon_vdups_laneq_f32:
+ case AArch64::BI__builtin_neon_vdupd_laneq_f64: {
+ return CGF.Builder.CreateExtractElement(Ops[0], Ops[1], "vdup_lane");
+ }
+ case AArch64::BI__builtin_neon_vdupb_lane_i8:
+ case AArch64::BI__builtin_neon_vduph_lane_i16:
+ case AArch64::BI__builtin_neon_vdups_lane_i32:
+ case AArch64::BI__builtin_neon_vdupd_lane_i64:
+ case AArch64::BI__builtin_neon_vdupb_laneq_i8:
+ case AArch64::BI__builtin_neon_vduph_laneq_i16:
+ case AArch64::BI__builtin_neon_vdups_laneq_i32:
+ case AArch64::BI__builtin_neon_vdupd_laneq_i64: {
+ // The backend treats Neon scalar types as v1ix types
+ // So we want to dup lane from any vector to v1ix vector
+ // with shufflevector
+ s = "vdup_lane";
+ Value* SV = llvm::ConstantVector::getSplat(1, cast<ConstantInt>(Ops[1]));
+ Value *Result = CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], SV, s);
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ // AArch64 intrinsic one-element vector type cast to
+ // scalar type expected by the builtin
+ return CGF.Builder.CreateBitCast(Result, Ty, s);
+ }
+ case AArch64::BI__builtin_neon_vqdmlalh_lane_s16 :
+ case AArch64::BI__builtin_neon_vqdmlalh_laneq_s16 :
+ case AArch64::BI__builtin_neon_vqdmlals_lane_s32 :
+ case AArch64::BI__builtin_neon_vqdmlals_laneq_s32 :
+ case AArch64::BI__builtin_neon_vqdmlslh_lane_s16 :
+ case AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 :
+ case AArch64::BI__builtin_neon_vqdmlsls_lane_s32 :
+ case AArch64::BI__builtin_neon_vqdmlsls_laneq_s32 : {
+ Int = Intrinsic::arm_neon_vqadds;
+ if (BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_lane_s16 ||
+ BuiltinID == AArch64::BI__builtin_neon_vqdmlslh_laneq_s16 ||
+ BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_lane_s32 ||
+ BuiltinID == AArch64::BI__builtin_neon_vqdmlsls_laneq_s32) {
+ Int = Intrinsic::arm_neon_vqsubs;
+ }
+ // create vqdmull call with b * c[i]
+ llvm::Type *Ty = CGF.ConvertType(E->getArg(1)->getType());
+ llvm::VectorType *OpVTy = llvm::VectorType::get(Ty, 1);
+ Ty = CGF.ConvertType(E->getArg(0)->getType());
+ llvm::VectorType *ResVTy = llvm::VectorType::get(Ty, 1);
+ Value *F = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, ResVTy);
+ Value *V = UndefValue::get(OpVTy);
+ llvm::Constant *CI = ConstantInt::get(CGF.Int32Ty, 0);
+ SmallVector<Value *, 2> MulOps;
+ MulOps.push_back(Ops[1]);
+ MulOps.push_back(Ops[2]);
+ MulOps[0] = CGF.Builder.CreateInsertElement(V, MulOps[0], CI);
+ MulOps[1] = CGF.Builder.CreateExtractElement(MulOps[1], Ops[3], "extract");
+ MulOps[1] = CGF.Builder.CreateInsertElement(V, MulOps[1], CI);
+ Value *MulRes = CGF.Builder.CreateCall2(F, MulOps[0], MulOps[1]);
+ // create vqadds call with a +/- vqdmull result
+ F = CGF.CGM.getIntrinsic(Int, ResVTy);
+ SmallVector<Value *, 2> AddOps;
+ AddOps.push_back(Ops[0]);
+ AddOps.push_back(MulRes);
+ V = UndefValue::get(ResVTy);
+ AddOps[0] = CGF.Builder.CreateInsertElement(V, AddOps[0], CI);
+ Value *AddRes = CGF.Builder.CreateCall2(F, AddOps[0], AddOps[1]);
+ return CGF.Builder.CreateBitCast(AddRes, Ty);
+ }
+ case AArch64::BI__builtin_neon_vfmas_lane_f32:
+ case AArch64::BI__builtin_neon_vfmas_laneq_f32:
+ case AArch64::BI__builtin_neon_vfmad_lane_f64:
+ case AArch64::BI__builtin_neon_vfmad_laneq_f64: {
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ Value *F = CGF.CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[2] = CGF.Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+ return CGF.Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+ }
+ // Scalar Floating-point Multiply Extended
+ case AArch64::BI__builtin_neon_vmulxs_f32:
+ case AArch64::BI__builtin_neon_vmulxd_f64: {
+ Int = Intrinsic::aarch64_neon_vmulx;
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ return CGF.EmitNeonCall(CGF.CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
+ }
+ case AArch64::BI__builtin_neon_vmul_n_f64: {
+ // v1f64 vmul_n_f64 should be mapped to Neon scalar mul lane
+ llvm::Type *VTy = GetNeonType(&CGF,
+ NeonTypeFlags(NeonTypeFlags::Float64, false, false));
+ Ops[0] = CGF.Builder.CreateBitCast(Ops[0], VTy);
+ llvm::Value *Idx = llvm::ConstantInt::get(CGF.Int32Ty, 0);
+ Ops[0] = CGF.Builder.CreateExtractElement(Ops[0], Idx, "extract");
+ Value *Result = CGF.Builder.CreateFMul(Ops[0], Ops[1]);
+ return CGF.Builder.CreateBitCast(Result, VTy);
+ }
+ case AArch64::BI__builtin_neon_vget_lane_i8:
+ case AArch64::BI__builtin_neon_vget_lane_i16:
+ case AArch64::BI__builtin_neon_vget_lane_i32:
+ case AArch64::BI__builtin_neon_vget_lane_i64:
+ case AArch64::BI__builtin_neon_vget_lane_f32:
+ case AArch64::BI__builtin_neon_vget_lane_f64:
+ case AArch64::BI__builtin_neon_vgetq_lane_i8:
+ case AArch64::BI__builtin_neon_vgetq_lane_i16:
+ case AArch64::BI__builtin_neon_vgetq_lane_i32:
+ case AArch64::BI__builtin_neon_vgetq_lane_i64:
+ case AArch64::BI__builtin_neon_vgetq_lane_f32:
+ case AArch64::BI__builtin_neon_vgetq_lane_f64:
+ return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vget_lane_i8, E);
+ case AArch64::BI__builtin_neon_vset_lane_i8:
+ case AArch64::BI__builtin_neon_vset_lane_i16:
+ case AArch64::BI__builtin_neon_vset_lane_i32:
+ case AArch64::BI__builtin_neon_vset_lane_i64:
+ case AArch64::BI__builtin_neon_vset_lane_f32:
+ case AArch64::BI__builtin_neon_vset_lane_f64:
+ case AArch64::BI__builtin_neon_vsetq_lane_i8:
+ case AArch64::BI__builtin_neon_vsetq_lane_i16:
+ case AArch64::BI__builtin_neon_vsetq_lane_i32:
+ case AArch64::BI__builtin_neon_vsetq_lane_i64:
+ case AArch64::BI__builtin_neon_vsetq_lane_f32:
+ case AArch64::BI__builtin_neon_vsetq_lane_f64:
+ return CGF.EmitARMBuiltinExpr(ARM::BI__builtin_neon_vset_lane_i8, E);
+ // Crypto
+ case AArch64::BI__builtin_neon_vsha1h_u32:
+ Int = Intrinsic::arm_neon_sha1h;
+ s = "sha1h"; OverloadInt = true; break;
+ case AArch64::BI__builtin_neon_vsha1cq_u32:
+ Int = Intrinsic::aarch64_neon_sha1c;
+ s = "sha1c"; break;
+ case AArch64::BI__builtin_neon_vsha1pq_u32:
+ Int = Intrinsic::aarch64_neon_sha1p;
+ s = "sha1p"; break;
+ case AArch64::BI__builtin_neon_vsha1mq_u32:
+ Int = Intrinsic::aarch64_neon_sha1m;
+ s = "sha1m"; break;
+ // Scalar Add
+ case AArch64::BI__builtin_neon_vaddd_s64:
+ Int = Intrinsic::aarch64_neon_vaddds;
+ s = "vaddds"; break;
+ case AArch64::BI__builtin_neon_vaddd_u64:
+ Int = Intrinsic::aarch64_neon_vadddu;
+ s = "vadddu"; break;
+ // Scalar Sub
+ case AArch64::BI__builtin_neon_vsubd_s64:
+ Int = Intrinsic::aarch64_neon_vsubds;
+ s = "vsubds"; break;
+ case AArch64::BI__builtin_neon_vsubd_u64:
+ Int = Intrinsic::aarch64_neon_vsubdu;
+ s = "vsubdu"; break;
+ // Scalar Saturating Add
+ case AArch64::BI__builtin_neon_vqaddb_s8:
+ case AArch64::BI__builtin_neon_vqaddh_s16:
+ case AArch64::BI__builtin_neon_vqadds_s32:
+ case AArch64::BI__builtin_neon_vqaddd_s64:
+ Int = Intrinsic::arm_neon_vqadds;
+ s = "vqadds"; OverloadInt = true; break;
+ case AArch64::BI__builtin_neon_vqaddb_u8:
+ case AArch64::BI__builtin_neon_vqaddh_u16:
+ case AArch64::BI__builtin_neon_vqadds_u32:
+ case AArch64::BI__builtin_neon_vqaddd_u64:
+ Int = Intrinsic::arm_neon_vqaddu;
+ s = "vqaddu"; OverloadInt = true; break;
+ // Scalar Saturating Sub
+ case AArch64::BI__builtin_neon_vqsubb_s8:
+ case AArch64::BI__builtin_neon_vqsubh_s16:
+ case AArch64::BI__builtin_neon_vqsubs_s32:
+ case AArch64::BI__builtin_neon_vqsubd_s64:
+ Int = Intrinsic::arm_neon_vqsubs;
+ s = "vqsubs"; OverloadInt = true; break;
+ case AArch64::BI__builtin_neon_vqsubb_u8:
+ case AArch64::BI__builtin_neon_vqsubh_u16:
+ case AArch64::BI__builtin_neon_vqsubs_u32:
+ case AArch64::BI__builtin_neon_vqsubd_u64:
+ Int = Intrinsic::arm_neon_vqsubu;
+ s = "vqsubu"; OverloadInt = true; break;
+ // Scalar Shift Left
+ case AArch64::BI__builtin_neon_vshld_s64:
+ Int = Intrinsic::aarch64_neon_vshlds;
+ s = "vshlds"; break;
+ case AArch64::BI__builtin_neon_vshld_u64:
+ Int = Intrinsic::aarch64_neon_vshldu;
+ s = "vshldu"; break;
+ // Scalar Saturating Shift Left
+ case AArch64::BI__builtin_neon_vqshlb_s8:
+ case AArch64::BI__builtin_neon_vqshlh_s16:
+ case AArch64::BI__builtin_neon_vqshls_s32:
+ case AArch64::BI__builtin_neon_vqshld_s64:
+ Int = Intrinsic::aarch64_neon_vqshls;
+ s = "vqshls"; OverloadInt = true; break;
+ case AArch64::BI__builtin_neon_vqshlb_u8:
+ case AArch64::BI__builtin_neon_vqshlh_u16:
+ case AArch64::BI__builtin_neon_vqshls_u32:
+ case AArch64::BI__builtin_neon_vqshld_u64:
+ Int = Intrinsic::aarch64_neon_vqshlu;
+ s = "vqshlu"; OverloadInt = true; break;
+ // Scalar Rouding Shift Left
+ case AArch64::BI__builtin_neon_vrshld_s64:
+ Int = Intrinsic::aarch64_neon_vrshlds;
+ s = "vrshlds"; break;
+ case AArch64::BI__builtin_neon_vrshld_u64:
+ Int = Intrinsic::aarch64_neon_vrshldu;
+ s = "vrshldu"; break;
+ // Scalar Saturating Rouding Shift Left
+ case AArch64::BI__builtin_neon_vqrshlb_s8:
+ case AArch64::BI__builtin_neon_vqrshlh_s16:
+ case AArch64::BI__builtin_neon_vqrshls_s32:
+ case AArch64::BI__builtin_neon_vqrshld_s64:
+ Int = Intrinsic::aarch64_neon_vqrshls;
+ s = "vqrshls"; OverloadInt = true; break;
+ case AArch64::BI__builtin_neon_vqrshlb_u8:
+ case AArch64::BI__builtin_neon_vqrshlh_u16:
+ case AArch64::BI__builtin_neon_vqrshls_u32:
+ case AArch64::BI__builtin_neon_vqrshld_u64:
+ Int = Intrinsic::aarch64_neon_vqrshlu;
+ s = "vqrshlu"; OverloadInt = true; break;
+ // Scalar Reduce Pairwise Add
+ case AArch64::BI__builtin_neon_vpaddd_s64:
+ case AArch64::BI__builtin_neon_vpaddd_u64:
+ Int = Intrinsic::aarch64_neon_vpadd; s = "vpadd";
+ break;
+ case AArch64::BI__builtin_neon_vpadds_f32:
+ Int = Intrinsic::aarch64_neon_vpfadd; s = "vpfadd";
+ break;
+ case AArch64::BI__builtin_neon_vpaddd_f64:
+ Int = Intrinsic::aarch64_neon_vpfaddq; s = "vpfaddq";
+ break;
+ // Scalar Reduce Pairwise Floating Point Max
+ case AArch64::BI__builtin_neon_vpmaxs_f32:
+ Int = Intrinsic::aarch64_neon_vpmax; s = "vpmax";
+ break;
+ case AArch64::BI__builtin_neon_vpmaxqd_f64:
+ Int = Intrinsic::aarch64_neon_vpmaxq; s = "vpmaxq";
+ break;
+ // Scalar Reduce Pairwise Floating Point Min
+ case AArch64::BI__builtin_neon_vpmins_f32:
+ Int = Intrinsic::aarch64_neon_vpmin; s = "vpmin";
+ break;
+ case AArch64::BI__builtin_neon_vpminqd_f64:
+ Int = Intrinsic::aarch64_neon_vpminq; s = "vpminq";
+ break;
+ // Scalar Reduce Pairwise Floating Point Maxnm
+ case AArch64::BI__builtin_neon_vpmaxnms_f32:
+ Int = Intrinsic::aarch64_neon_vpfmaxnm; s = "vpfmaxnm";
+ break;
+ case AArch64::BI__builtin_neon_vpmaxnmqd_f64:
+ Int = Intrinsic::aarch64_neon_vpfmaxnmq; s = "vpfmaxnmq";
+ break;
+ // Scalar Reduce Pairwise Floating Point Minnm
+ case AArch64::BI__builtin_neon_vpminnms_f32:
+ Int = Intrinsic::aarch64_neon_vpfminnm; s = "vpfminnm";
+ break;
+ case AArch64::BI__builtin_neon_vpminnmqd_f64:
+ Int = Intrinsic::aarch64_neon_vpfminnmq; s = "vpfminnmq";
+ break;
+ // The followings are intrinsics with scalar results generated AcrossVec vectors
+ case AArch64::BI__builtin_neon_vaddlv_s8:
+ case AArch64::BI__builtin_neon_vaddlv_s16:
+ case AArch64::BI__builtin_neon_vaddlvq_s8:
+ case AArch64::BI__builtin_neon_vaddlvq_s16:
+ case AArch64::BI__builtin_neon_vaddlvq_s32:
+ Int = Intrinsic::aarch64_neon_saddlv;
+ AcrossVec = true; ExtendEle = true; s = "saddlv"; break;
+ case AArch64::BI__builtin_neon_vaddlv_u8:
+ case AArch64::BI__builtin_neon_vaddlv_u16:
+ case AArch64::BI__builtin_neon_vaddlvq_u8:
+ case AArch64::BI__builtin_neon_vaddlvq_u16:
+ case AArch64::BI__builtin_neon_vaddlvq_u32:
+ Int = Intrinsic::aarch64_neon_uaddlv;
+ AcrossVec = true; ExtendEle = true; s = "uaddlv"; break;
+ case AArch64::BI__builtin_neon_vmaxv_s8:
+ case AArch64::BI__builtin_neon_vmaxv_s16:
+ case AArch64::BI__builtin_neon_vmaxvq_s8:
+ case AArch64::BI__builtin_neon_vmaxvq_s16:
+ case AArch64::BI__builtin_neon_vmaxvq_s32:
+ Int = Intrinsic::aarch64_neon_smaxv;
+ AcrossVec = true; ExtendEle = false; s = "smaxv"; break;
+ case AArch64::BI__builtin_neon_vmaxv_u8:
+ case AArch64::BI__builtin_neon_vmaxv_u16:
+ case AArch64::BI__builtin_neon_vmaxvq_u8:
+ case AArch64::BI__builtin_neon_vmaxvq_u16:
+ case AArch64::BI__builtin_neon_vmaxvq_u32:
+ Int = Intrinsic::aarch64_neon_umaxv;
+ AcrossVec = true; ExtendEle = false; s = "umaxv"; break;
+ case AArch64::BI__builtin_neon_vminv_s8:
+ case AArch64::BI__builtin_neon_vminv_s16:
+ case AArch64::BI__builtin_neon_vminvq_s8:
+ case AArch64::BI__builtin_neon_vminvq_s16:
+ case AArch64::BI__builtin_neon_vminvq_s32:
+ Int = Intrinsic::aarch64_neon_sminv;
+ AcrossVec = true; ExtendEle = false; s = "sminv"; break;
+ case AArch64::BI__builtin_neon_vminv_u8:
+ case AArch64::BI__builtin_neon_vminv_u16:
+ case AArch64::BI__builtin_neon_vminvq_u8:
+ case AArch64::BI__builtin_neon_vminvq_u16:
+ case AArch64::BI__builtin_neon_vminvq_u32:
+ Int = Intrinsic::aarch64_neon_uminv;
+ AcrossVec = true; ExtendEle = false; s = "uminv"; break;
+ case AArch64::BI__builtin_neon_vaddv_s8:
+ case AArch64::BI__builtin_neon_vaddv_s16:
+ case AArch64::BI__builtin_neon_vaddvq_s8:
+ case AArch64::BI__builtin_neon_vaddvq_s16:
+ case AArch64::BI__builtin_neon_vaddvq_s32:
+ case AArch64::BI__builtin_neon_vaddvq_s64:
+ case AArch64::BI__builtin_neon_vaddv_u8:
+ case AArch64::BI__builtin_neon_vaddv_u16:
+ case AArch64::BI__builtin_neon_vaddvq_u8:
+ case AArch64::BI__builtin_neon_vaddvq_u16:
+ case AArch64::BI__builtin_neon_vaddvq_u32:
+ case AArch64::BI__builtin_neon_vaddvq_u64:
+ case AArch64::BI__builtin_neon_vaddv_f32:
+ case AArch64::BI__builtin_neon_vaddvq_f32:
+ case AArch64::BI__builtin_neon_vaddvq_f64:
+ Int = Intrinsic::aarch64_neon_vaddv;
+ AcrossVec = true; ExtendEle = false; s = "vaddv"; break;
+ case AArch64::BI__builtin_neon_vmaxv_f32:
+ case AArch64::BI__builtin_neon_vmaxvq_f32:
+ case AArch64::BI__builtin_neon_vmaxvq_f64:
+ Int = Intrinsic::aarch64_neon_vmaxv;
+ AcrossVec = true; ExtendEle = false; s = "vmaxv"; break;
+ case AArch64::BI__builtin_neon_vminv_f32:
+ case AArch64::BI__builtin_neon_vminvq_f32:
+ case AArch64::BI__builtin_neon_vminvq_f64:
+ Int = Intrinsic::aarch64_neon_vminv;
+ AcrossVec = true; ExtendEle = false; s = "vminv"; break;
+ case AArch64::BI__builtin_neon_vmaxnmv_f32:
+ case AArch64::BI__builtin_neon_vmaxnmvq_f32:
+ case AArch64::BI__builtin_neon_vmaxnmvq_f64:
+ Int = Intrinsic::aarch64_neon_vmaxnmv;
+ AcrossVec = true; ExtendEle = false; s = "vmaxnmv"; break;
+ case AArch64::BI__builtin_neon_vminnmv_f32:
+ case AArch64::BI__builtin_neon_vminnmvq_f32:
+ case AArch64::BI__builtin_neon_vminnmvq_f64:
+ Int = Intrinsic::aarch64_neon_vminnmv;
+ AcrossVec = true; ExtendEle = false; s = "vminnmv"; break;
+ // Scalar Integer Saturating Doubling Multiply Half High
+ case AArch64::BI__builtin_neon_vqdmulhh_s16:
+ case AArch64::BI__builtin_neon_vqdmulhs_s32:
+ Int = Intrinsic::arm_neon_vqdmulh;
+ s = "vqdmulh"; OverloadInt = true; break;
+ // Scalar Integer Saturating Rounding Doubling Multiply Half High
+ case AArch64::BI__builtin_neon_vqrdmulhh_s16:
+ case AArch64::BI__builtin_neon_vqrdmulhs_s32:
+ Int = Intrinsic::arm_neon_vqrdmulh;
+ s = "vqrdmulh"; OverloadInt = true; break;
+ // Scalar Floating-point Reciprocal Step and
+ case AArch64::BI__builtin_neon_vrecpss_f32:
+ case AArch64::BI__builtin_neon_vrecpsd_f64:
+ Int = Intrinsic::arm_neon_vrecps;
+ s = "vrecps"; OverloadInt = true; break;
+ // Scalar Floating-point Reciprocal Square Root Step
+ case AArch64::BI__builtin_neon_vrsqrtss_f32:
+ case AArch64::BI__builtin_neon_vrsqrtsd_f64:
+ Int = Intrinsic::arm_neon_vrsqrts;
+ s = "vrsqrts"; OverloadInt = true; break;
+ // Scalar Signed Integer Convert To Floating-point
+ case AArch64::BI__builtin_neon_vcvts_f32_s32:
+ Int = Intrinsic::aarch64_neon_vcvtf32_s32,
+ s = "vcvtf"; OverloadInt = false; break;
+ case AArch64::BI__builtin_neon_vcvtd_f64_s64:
+ Int = Intrinsic::aarch64_neon_vcvtf64_s64,
+ s = "vcvtf"; OverloadInt = false; break;
+ // Scalar Unsigned Integer Convert To Floating-point
+ case AArch64::BI__builtin_neon_vcvts_f32_u32:
+ Int = Intrinsic::aarch64_neon_vcvtf32_u32,
+ s = "vcvtf"; OverloadInt = false; break;
+ case AArch64::BI__builtin_neon_vcvtd_f64_u64:
+ Int = Intrinsic::aarch64_neon_vcvtf64_u64,
+ s = "vcvtf"; OverloadInt = false; break;
+ // Scalar Floating-point Converts
+ case AArch64::BI__builtin_neon_vcvtxd_f32_f64:
+ Int = Intrinsic::aarch64_neon_fcvtxn;
+ s = "vcvtxn"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtas_s32_f32:
+ case AArch64::BI__builtin_neon_vcvtad_s64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtas;
+ s = "vcvtas"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtas_u32_f32:
+ case AArch64::BI__builtin_neon_vcvtad_u64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtau;
+ s = "vcvtau"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtms_s32_f32:
+ case AArch64::BI__builtin_neon_vcvtmd_s64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtms;
+ s = "vcvtms"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtms_u32_f32:
+ case AArch64::BI__builtin_neon_vcvtmd_u64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtmu;
+ s = "vcvtmu"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtns_s32_f32:
+ case AArch64::BI__builtin_neon_vcvtnd_s64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtns;
+ s = "vcvtns"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtns_u32_f32:
+ case AArch64::BI__builtin_neon_vcvtnd_u64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtnu;
+ s = "vcvtnu"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtps_s32_f32:
+ case AArch64::BI__builtin_neon_vcvtpd_s64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtps;
+ s = "vcvtps"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvtps_u32_f32:
+ case AArch64::BI__builtin_neon_vcvtpd_u64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtpu;
+ s = "vcvtpu"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvts_s32_f32:
+ case AArch64::BI__builtin_neon_vcvtd_s64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtzs;
+ s = "vcvtzs"; OverloadCvtInt = true; break;
+ case AArch64::BI__builtin_neon_vcvts_u32_f32:
+ case AArch64::BI__builtin_neon_vcvtd_u64_f64:
+ Int = Intrinsic::aarch64_neon_fcvtzu;
+ s = "vcvtzu"; OverloadCvtInt = true; break;
+ // Scalar Floating-point Reciprocal Estimate
+ case AArch64::BI__builtin_neon_vrecpes_f32:
+ case AArch64::BI__builtin_neon_vrecped_f64:
+ Int = Intrinsic::arm_neon_vrecpe;
+ s = "vrecpe"; OverloadInt = true; break;
+ // Scalar Floating-point Reciprocal Exponent
+ case AArch64::BI__builtin_neon_vrecpxs_f32:
+ case AArch64::BI__builtin_neon_vrecpxd_f64:
+ Int = Intrinsic::aarch64_neon_vrecpx;
+ s = "vrecpx"; OverloadInt = true; break;
+ // Scalar Floating-point Reciprocal Square Root Estimate
+ case AArch64::BI__builtin_neon_vrsqrtes_f32:
+ case AArch64::BI__builtin_neon_vrsqrted_f64:
+ Int = Intrinsic::arm_neon_vrsqrte;
+ s = "vrsqrte"; OverloadInt = true; break;
+ // Scalar Compare Equal
+ case AArch64::BI__builtin_neon_vceqd_s64:
+ case AArch64::BI__builtin_neon_vceqd_u64:
+ Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
+ OverloadCmpInt = true; break;
+ // Scalar Compare Equal To Zero
+ case AArch64::BI__builtin_neon_vceqzd_s64:
+ case AArch64::BI__builtin_neon_vceqzd_u64:
+ Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
+ OverloadCmpInt = true; break;
+ // Scalar Compare Greater Than or Equal
+ case AArch64::BI__builtin_neon_vcged_s64:
+ Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
+ OverloadCmpInt = true; break;
+ case AArch64::BI__builtin_neon_vcged_u64:
+ Int = Intrinsic::aarch64_neon_vchs; s = "vcge";
+ OverloadCmpInt = true; break;
+ // Scalar Compare Greater Than or Equal To Zero
+ case AArch64::BI__builtin_neon_vcgezd_s64:
+ Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
+ OverloadCmpInt = true; break;
+ // Scalar Compare Greater Than
+ case AArch64::BI__builtin_neon_vcgtd_s64:
+ Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
+ OverloadCmpInt = true; break;
+ case AArch64::BI__builtin_neon_vcgtd_u64:
+ Int = Intrinsic::aarch64_neon_vchi; s = "vcgt";
+ OverloadCmpInt = true; break;
+ // Scalar Compare Greater Than Zero
+ case AArch64::BI__builtin_neon_vcgtzd_s64:
+ Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
+ OverloadCmpInt = true; break;
+ // Scalar Compare Less Than or Equal
+ case AArch64::BI__builtin_neon_vcled_s64:
+ Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ case AArch64::BI__builtin_neon_vcled_u64:
+ Int = Intrinsic::aarch64_neon_vchs; s = "vchs";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ // Scalar Compare Less Than or Equal To Zero
+ case AArch64::BI__builtin_neon_vclezd_s64:
+ Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
+ OverloadCmpInt = true; break;
+ // Scalar Compare Less Than
+ case AArch64::BI__builtin_neon_vcltd_s64:
+ Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ case AArch64::BI__builtin_neon_vcltd_u64:
+ Int = Intrinsic::aarch64_neon_vchi; s = "vchi";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ // Scalar Compare Less Than Zero
+ case AArch64::BI__builtin_neon_vcltzd_s64:
+ Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(Ops[0]->getType()));
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Equal
+ case AArch64::BI__builtin_neon_vceqs_f32:
+ case AArch64::BI__builtin_neon_vceqd_f64:
+ Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Equal To Zero
+ case AArch64::BI__builtin_neon_vceqzs_f32:
+ case AArch64::BI__builtin_neon_vceqzd_f64:
+ Int = Intrinsic::aarch64_neon_vceq; s = "vceq";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
+ IsFpCmpZInt = true;
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Greater Than Or Equal
+ case AArch64::BI__builtin_neon_vcges_f32:
+ case AArch64::BI__builtin_neon_vcged_f64:
+ Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Greater Than Or Equal To Zero
+ case AArch64::BI__builtin_neon_vcgezs_f32:
+ case AArch64::BI__builtin_neon_vcgezd_f64:
+ Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
+ IsFpCmpZInt = true;
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Greather Than
+ case AArch64::BI__builtin_neon_vcgts_f32:
+ case AArch64::BI__builtin_neon_vcgtd_f64:
+ Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Greather Than Zero
+ case AArch64::BI__builtin_neon_vcgtzs_f32:
+ case AArch64::BI__builtin_neon_vcgtzd_f64:
+ Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
+ IsFpCmpZInt = true;
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Less Than or Equal
+ case AArch64::BI__builtin_neon_vcles_f32:
+ case AArch64::BI__builtin_neon_vcled_f64:
+ Int = Intrinsic::aarch64_neon_vcge; s = "vcge";
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Less Than Or Equal To Zero
+ case AArch64::BI__builtin_neon_vclezs_f32:
+ case AArch64::BI__builtin_neon_vclezd_f64:
+ Int = Intrinsic::aarch64_neon_vclez; s = "vcle";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
+ IsFpCmpZInt = true;
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Compare Less Than Zero
+ case AArch64::BI__builtin_neon_vclts_f32:
+ case AArch64::BI__builtin_neon_vcltd_f64:
+ Int = Intrinsic::aarch64_neon_vcgt; s = "vcgt";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ // Scalar Floating-point Compare Less Than Zero
+ case AArch64::BI__builtin_neon_vcltzs_f32:
+ case AArch64::BI__builtin_neon_vcltzd_f64:
+ Int = Intrinsic::aarch64_neon_vcltz; s = "vclt";
+ // Add implicit zero operand.
+ Ops.push_back(llvm::Constant::getNullValue(CGF.FloatTy));
+ IsFpCmpZInt = true;
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Absolute Compare Greater Than Or Equal
+ case AArch64::BI__builtin_neon_vcages_f32:
+ case AArch64::BI__builtin_neon_vcaged_f64:
+ Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Absolute Compare Greater Than
+ case AArch64::BI__builtin_neon_vcagts_f32:
+ case AArch64::BI__builtin_neon_vcagtd_f64:
+ Int = Intrinsic::aarch64_neon_vcagt; s = "vcagt";
+ OverloadCmpInt = true; break;
+ // Scalar Floating-point Absolute Compare Less Than Or Equal
+ case AArch64::BI__builtin_neon_vcales_f32:
+ case AArch64::BI__builtin_neon_vcaled_f64:
+ Int = Intrinsic::aarch64_neon_vcage; s = "vcage";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ // Scalar Floating-point Absolute Compare Less Than
+ case AArch64::BI__builtin_neon_vcalts_f32:
+ case AArch64::BI__builtin_neon_vcaltd_f64:
+ Int = Intrinsic::aarch64_neon_vcagt; s = "vcalt";
+ OverloadCmpInt = true; std::swap(Ops[0], Ops[1]); break;
+ // Scalar Compare Bitwise Test Bits
+ case AArch64::BI__builtin_neon_vtstd_s64:
+ case AArch64::BI__builtin_neon_vtstd_u64:
+ Int = Intrinsic::aarch64_neon_vtstd; s = "vtst";
+ OverloadCmpInt = true; break;
+ // Scalar Absolute Value
+ case AArch64::BI__builtin_neon_vabsd_s64:
+ Int = Intrinsic::aarch64_neon_vabs;
+ s = "vabs"; OverloadInt = false; break;
+ // Scalar Absolute Difference
+ case AArch64::BI__builtin_neon_vabds_f32:
+ case AArch64::BI__builtin_neon_vabdd_f64:
+ Int = Intrinsic::aarch64_neon_vabd;
+ s = "vabd"; OverloadInt = true; break;
+ // Scalar Signed Saturating Absolute Value
+ case AArch64::BI__builtin_neon_vqabsb_s8:
+ case AArch64::BI__builtin_neon_vqabsh_s16:
+ case AArch64::BI__builtin_neon_vqabss_s32:
+ case AArch64::BI__builtin_neon_vqabsd_s64:
+ Int = Intrinsic::arm_neon_vqabs;
+ s = "vqabs"; OverloadInt = true; break;
+ // Scalar Negate
+ case AArch64::BI__builtin_neon_vnegd_s64:
+ Int = Intrinsic::aarch64_neon_vneg;
+ s = "vneg"; OverloadInt = false; break;
+ // Scalar Signed Saturating Negate
+ case AArch64::BI__builtin_neon_vqnegb_s8:
+ case AArch64::BI__builtin_neon_vqnegh_s16:
+ case AArch64::BI__builtin_neon_vqnegs_s32:
+ case AArch64::BI__builtin_neon_vqnegd_s64:
+ Int = Intrinsic::arm_neon_vqneg;
+ s = "vqneg"; OverloadInt = true; break;
+ // Scalar Signed Saturating Accumulated of Unsigned Value
+ case AArch64::BI__builtin_neon_vuqaddb_s8:
+ case AArch64::BI__builtin_neon_vuqaddh_s16:
+ case AArch64::BI__builtin_neon_vuqadds_s32:
+ case AArch64::BI__builtin_neon_vuqaddd_s64:
+ Int = Intrinsic::aarch64_neon_vuqadd;
+ s = "vuqadd"; OverloadInt = true; break;
+ // Scalar Unsigned Saturating Accumulated of Signed Value
+ case AArch64::BI__builtin_neon_vsqaddb_u8:
+ case AArch64::BI__builtin_neon_vsqaddh_u16:
+ case AArch64::BI__builtin_neon_vsqadds_u32:
+ case AArch64::BI__builtin_neon_vsqaddd_u64:
+ Int = Intrinsic::aarch64_neon_vsqadd;
+ s = "vsqadd"; OverloadInt = true; break;
+ // Signed Saturating Doubling Multiply-Add Long
+ case AArch64::BI__builtin_neon_vqdmlalh_s16:
+ case AArch64::BI__builtin_neon_vqdmlals_s32:
+ Int = Intrinsic::aarch64_neon_vqdmlal;
+ s = "vqdmlal"; OverloadWideInt = true; break;
+ // Signed Saturating Doubling Multiply-Subtract Long
+ case AArch64::BI__builtin_neon_vqdmlslh_s16:
+ case AArch64::BI__builtin_neon_vqdmlsls_s32:
+ Int = Intrinsic::aarch64_neon_vqdmlsl;
+ s = "vqdmlsl"; OverloadWideInt = true; break;
+ // Signed Saturating Doubling Multiply Long
+ case AArch64::BI__builtin_neon_vqdmullh_s16:
+ case AArch64::BI__builtin_neon_vqdmulls_s32:
+ Int = Intrinsic::arm_neon_vqdmull;
+ s = "vqdmull"; OverloadWideInt = true; break;
+ // Scalar Signed Saturating Extract Unsigned Narrow
+ case AArch64::BI__builtin_neon_vqmovunh_s16:
+ case AArch64::BI__builtin_neon_vqmovuns_s32:
+ case AArch64::BI__builtin_neon_vqmovund_s64:
+ Int = Intrinsic::arm_neon_vqmovnsu;
+ s = "vqmovun"; OverloadNarrowInt = true; break;
+ // Scalar Signed Saturating Extract Narrow
+ case AArch64::BI__builtin_neon_vqmovnh_s16:
+ case AArch64::BI__builtin_neon_vqmovns_s32:
+ case AArch64::BI__builtin_neon_vqmovnd_s64:
+ Int = Intrinsic::arm_neon_vqmovns;
+ s = "vqmovn"; OverloadNarrowInt = true; break;
+ // Scalar Unsigned Saturating Extract Narrow
+ case AArch64::BI__builtin_neon_vqmovnh_u16:
+ case AArch64::BI__builtin_neon_vqmovns_u32:
+ case AArch64::BI__builtin_neon_vqmovnd_u64:
+ Int = Intrinsic::arm_neon_vqmovnu;
+ s = "vqmovn"; OverloadNarrowInt = true; break;
+ // Scalar Signed Shift Right (Immediate)
+ case AArch64::BI__builtin_neon_vshrd_n_s64:
+ Int = Intrinsic::aarch64_neon_vshrds_n;
+ s = "vsshr"; OverloadInt = false; break;
+ // Scalar Unsigned Shift Right (Immediate)
+ case AArch64::BI__builtin_neon_vshrd_n_u64:
+ Int = Intrinsic::aarch64_neon_vshrdu_n;
+ s = "vushr"; OverloadInt = false; break;
+ // Scalar Signed Rounding Shift Right (Immediate)
+ case AArch64::BI__builtin_neon_vrshrd_n_s64:
+ Int = Intrinsic::aarch64_neon_vsrshr;
+ s = "vsrshr"; OverloadInt = true; break;
+ // Scalar Unsigned Rounding Shift Right (Immediate)
+ case AArch64::BI__builtin_neon_vrshrd_n_u64:
+ Int = Intrinsic::aarch64_neon_vurshr;
+ s = "vurshr"; OverloadInt = true; break;
+ // Scalar Signed Shift Right and Accumulate (Immediate)
+ case AArch64::BI__builtin_neon_vsrad_n_s64:
+ Int = Intrinsic::aarch64_neon_vsrads_n;
+ s = "vssra"; OverloadInt = false; break;
+ // Scalar Unsigned Shift Right and Accumulate (Immediate)
+ case AArch64::BI__builtin_neon_vsrad_n_u64:
+ Int = Intrinsic::aarch64_neon_vsradu_n;
+ s = "vusra"; OverloadInt = false; break;
+ // Scalar Signed Rounding Shift Right and Accumulate (Immediate)
+ case AArch64::BI__builtin_neon_vrsrad_n_s64:
+ Int = Intrinsic::aarch64_neon_vrsrads_n;
+ s = "vsrsra"; OverloadInt = false; break;
+ // Scalar Unsigned Rounding Shift Right and Accumulate (Immediate)
+ case AArch64::BI__builtin_neon_vrsrad_n_u64:
+ Int = Intrinsic::aarch64_neon_vrsradu_n;
+ s = "vursra"; OverloadInt = false; break;
+ // Scalar Signed/Unsigned Shift Left (Immediate)
+ case AArch64::BI__builtin_neon_vshld_n_s64:
+ case AArch64::BI__builtin_neon_vshld_n_u64:
+ Int = Intrinsic::aarch64_neon_vshld_n;
+ s = "vshl"; OverloadInt = false; break;
+ // Signed Saturating Shift Left (Immediate)
+ case AArch64::BI__builtin_neon_vqshlb_n_s8:
+ case AArch64::BI__builtin_neon_vqshlh_n_s16:
+ case AArch64::BI__builtin_neon_vqshls_n_s32:
+ case AArch64::BI__builtin_neon_vqshld_n_s64:
+ Int = Intrinsic::aarch64_neon_vqshls_n;
+ s = "vsqshl"; OverloadInt = true; break;
+ // Unsigned Saturating Shift Left (Immediate)
+ case AArch64::BI__builtin_neon_vqshlb_n_u8:
+ case AArch64::BI__builtin_neon_vqshlh_n_u16:
+ case AArch64::BI__builtin_neon_vqshls_n_u32:
+ case AArch64::BI__builtin_neon_vqshld_n_u64:
+ Int = Intrinsic::aarch64_neon_vqshlu_n;
+ s = "vuqshl"; OverloadInt = true; break;
+ // Signed Saturating Shift Left Unsigned (Immediate)
+ case AArch64::BI__builtin_neon_vqshlub_n_s8:
+ case AArch64::BI__builtin_neon_vqshluh_n_s16:
+ case AArch64::BI__builtin_neon_vqshlus_n_s32:
+ case AArch64::BI__builtin_neon_vqshlud_n_s64:
+ Int = Intrinsic::aarch64_neon_vsqshlu;
+ s = "vsqshlu"; OverloadInt = true; break;
+ // Shift Right And Insert (Immediate)
+ case AArch64::BI__builtin_neon_vsrid_n_s64:
+ case AArch64::BI__builtin_neon_vsrid_n_u64:
+ Int = Intrinsic::aarch64_neon_vsri;
+ s = "vsri"; OverloadInt = true; break;
+ // Shift Left And Insert (Immediate)
+ case AArch64::BI__builtin_neon_vslid_n_s64:
+ case AArch64::BI__builtin_neon_vslid_n_u64:
+ Int = Intrinsic::aarch64_neon_vsli;
+ s = "vsli"; OverloadInt = true; break;
+ // Signed Saturating Shift Right Narrow (Immediate)
+ case AArch64::BI__builtin_neon_vqshrnh_n_s16:
+ case AArch64::BI__builtin_neon_vqshrns_n_s32:
+ case AArch64::BI__builtin_neon_vqshrnd_n_s64:
+ Int = Intrinsic::aarch64_neon_vsqshrn;
+ s = "vsqshrn"; OverloadInt = true; break;
+ // Unsigned Saturating Shift Right Narrow (Immediate)
+ case AArch64::BI__builtin_neon_vqshrnh_n_u16:
+ case AArch64::BI__builtin_neon_vqshrns_n_u32:
+ case AArch64::BI__builtin_neon_vqshrnd_n_u64:
+ Int = Intrinsic::aarch64_neon_vuqshrn;
+ s = "vuqshrn"; OverloadInt = true; break;
+ // Signed Saturating Rounded Shift Right Narrow (Immediate)
+ case AArch64::BI__builtin_neon_vqrshrnh_n_s16:
+ case AArch64::BI__builtin_neon_vqrshrns_n_s32:
+ case AArch64::BI__builtin_neon_vqrshrnd_n_s64:
+ Int = Intrinsic::aarch64_neon_vsqrshrn;
+ s = "vsqrshrn"; OverloadInt = true; break;
+ // Unsigned Saturating Rounded Shift Right Narrow (Immediate)
+ case AArch64::BI__builtin_neon_vqrshrnh_n_u16:
+ case AArch64::BI__builtin_neon_vqrshrns_n_u32:
+ case AArch64::BI__builtin_neon_vqrshrnd_n_u64:
+ Int = Intrinsic::aarch64_neon_vuqrshrn;
+ s = "vuqrshrn"; OverloadInt = true; break;
+ // Signed Saturating Shift Right Unsigned Narrow (Immediate)
+ case AArch64::BI__builtin_neon_vqshrunh_n_s16:
+ case AArch64::BI__builtin_neon_vqshruns_n_s32:
+ case AArch64::BI__builtin_neon_vqshrund_n_s64:
+ Int = Intrinsic::aarch64_neon_vsqshrun;
+ s = "vsqshrun"; OverloadInt = true; break;
+ // Signed Saturating Rounded Shift Right Unsigned Narrow (Immediate)
+ case AArch64::BI__builtin_neon_vqrshrunh_n_s16:
+ case AArch64::BI__builtin_neon_vqrshruns_n_s32:
+ case AArch64::BI__builtin_neon_vqrshrund_n_s64:
+ Int = Intrinsic::aarch64_neon_vsqrshrun;
+ s = "vsqrshrun"; OverloadInt = true; break;
+ // Scalar Signed Fixed-point Convert To Floating-Point (Immediate)
+ case AArch64::BI__builtin_neon_vcvts_n_f32_s32:
+ Int = Intrinsic::aarch64_neon_vcvtf32_n_s32;
+ s = "vcvtf"; OverloadInt = false; break;
+ case AArch64::BI__builtin_neon_vcvtd_n_f64_s64:
+ Int = Intrinsic::aarch64_neon_vcvtf64_n_s64;
+ s = "vcvtf"; OverloadInt = false; break;
+ // Scalar Unsigned Fixed-point Convert To Floating-Point (Immediate)
+ case AArch64::BI__builtin_neon_vcvts_n_f32_u32:
+ Int = Intrinsic::aarch64_neon_vcvtf32_n_u32;
+ s = "vcvtf"; OverloadInt = false; break;
+ case AArch64::BI__builtin_neon_vcvtd_n_f64_u64:
+ Int = Intrinsic::aarch64_neon_vcvtf64_n_u64;
+ s = "vcvtf"; OverloadInt = false; break;
+ // Scalar Floating-point Convert To Signed Fixed-point (Immediate)
+ case AArch64::BI__builtin_neon_vcvts_n_s32_f32:
+ Int = Intrinsic::aarch64_neon_vcvts_n_s32_f32;
+ s = "fcvtzs"; OverloadInt = false; break;
+ case AArch64::BI__builtin_neon_vcvtd_n_s64_f64:
+ Int = Intrinsic::aarch64_neon_vcvtd_n_s64_f64;
+ s = "fcvtzs"; OverloadInt = false; break;
+ // Scalar Floating-point Convert To Unsigned Fixed-point (Immediate)
+ case AArch64::BI__builtin_neon_vcvts_n_u32_f32:
+ Int = Intrinsic::aarch64_neon_vcvts_n_u32_f32;
+ s = "fcvtzu"; OverloadInt = false; break;
+ case AArch64::BI__builtin_neon_vcvtd_n_u64_f64:
+ Int = Intrinsic::aarch64_neon_vcvtd_n_u64_f64;
+ s = "fcvtzu"; OverloadInt = false; break;
+ }
+
+ if (!Int)
+ return 0;
+
+ // AArch64 scalar builtin that returns scalar type
+ // and should be mapped to AArch64 intrinsic that returns
+ // one-element vector type.
+ Function *F = 0;
+ if (AcrossVec) {
+ // Gen arg type
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ llvm::Type *Ty = CGF.ConvertType(Arg->getType());
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Type *ETy = VTy->getElementType();
+ llvm::VectorType *RTy = llvm::VectorType::get(ETy, 1);
+
+ if (ExtendEle) {
+ assert(!ETy->isFloatingPointTy());
+ RTy = llvm::VectorType::getExtendedElementVectorType(RTy);
+ }
+
+ llvm::Type *Tys[2] = {RTy, VTy};
+ F = CGF.CGM.getIntrinsic(Int, Tys);
+ assert(E->getNumArgs() == 1);
+ } else if (OverloadInt) {
+ // Determine the type of this overloaded AArch64 intrinsic
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
+ assert(VTy);
+
+ F = CGF.CGM.getIntrinsic(Int, VTy);
+ } else if (OverloadWideInt || OverloadNarrowInt) {
+ // Determine the type of this overloaded AArch64 intrinsic
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ llvm::Type *Ty = CGF.ConvertType(Arg->getType());
+ llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
+ llvm::VectorType *RTy = OverloadWideInt ?
+ llvm::VectorType::getExtendedElementVectorType(VTy) :
+ llvm::VectorType::getTruncatedElementVectorType(VTy);
+ F = CGF.CGM.getIntrinsic(Int, RTy);
+ } else if (OverloadCmpInt) {
+ // Determine the types of this overloaded AArch64 intrinsic
+ SmallVector<llvm::Type *, 3> Tys;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
+ Tys.push_back(VTy);
+ Ty = CGF.ConvertType(Arg->getType());
+ VTy = llvm::VectorType::get(Ty, 1);
+ Tys.push_back(VTy);
+ if(IsFpCmpZInt)
+ VTy = llvm::VectorType::get(CGF.FloatTy, 1);
+ Tys.push_back(VTy);
+
+ F = CGF.CGM.getIntrinsic(Int, Tys);
+ } else if (OverloadCvtInt) {
+ // Determine the types of this overloaded AArch64 intrinsic
+ SmallVector<llvm::Type *, 2> Tys;
+ const Expr *Arg = E->getArg(E->getNumArgs()-1);
+ llvm::Type *Ty = CGF.ConvertType(E->getCallReturnType());
+ llvm::VectorType *VTy = llvm::VectorType::get(Ty, 1);
+ Tys.push_back(VTy);
+ Ty = CGF.ConvertType(Arg->getType());
+ VTy = llvm::VectorType::get(Ty, 1);
+ Tys.push_back(VTy);
+
+ F = CGF.CGM.getIntrinsic(Int, Tys);
+ } else
+ F = CGF.CGM.getIntrinsic(Int);
+
+ Value *Result = CGF.EmitNeonCall(F, Ops, s);
+ llvm::Type *ResultType = CGF.ConvertType(E->getType());
+ // AArch64 intrinsic one-element vector type cast to
+ // scalar type expected by the builtin
+ return CGF.Builder.CreateBitCast(Result, ResultType, s);
+}
+
+Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
+ Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
+ const CmpInst::Predicate Ip, const Twine &Name) {
+ llvm::Type *OTy = ((llvm::User *)Op)->getOperand(0)->getType();
+ if (OTy->isPointerTy())
+ OTy = Ty;
+ Op = Builder.CreateBitCast(Op, OTy);
+ if (((llvm::VectorType *)OTy)->getElementType()->isFloatingPointTy()) {
+ Op = Builder.CreateFCmp(Fp, Op, ConstantAggregateZero::get(OTy));
+ } else {
+ Op = Builder.CreateICmp(Ip, Op, ConstantAggregateZero::get(OTy));
+ }
+ return Builder.CreateZExt(Op, Ty, Name);
+}
+
+static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
+ Value *ExtOp, Value *IndexOp,
+ llvm::Type *ResTy, unsigned IntID,
+ const char *Name) {
+ SmallVector<Value *, 2> TblOps;
+ if (ExtOp)
+ TblOps.push_back(ExtOp);
+
+ // Build a vector containing sequential number like (0, 1, 2, ..., 15)
+ SmallVector<Constant*, 16> Indices;
+ llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
+ for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
+ Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
+ Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
+ }
+ Value *SV = llvm::ConstantVector::get(Indices);
+
+ int PairPos = 0, End = Ops.size() - 1;
+ while (PairPos < End) {
+ TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
+ Ops[PairPos+1], SV, Name));
+ PairPos += 2;
+ }
+
+ // If there's an odd number of 64-bit lookup table, fill the high 64-bit
+ // of the 128-bit lookup table with zero.
+ if (PairPos == End) {
+ Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
+ TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
+ ZeroTbl, SV, Name));
+ }
+
+ TblTy = llvm::VectorType::get(TblTy->getElementType(),
+ 2*TblTy->getNumElements());
+ llvm::Type *Tys[2] = { ResTy, TblTy };
+
+ Function *TblF;
+ TblOps.push_back(IndexOp);
+ TblF = CGF.CGM.getIntrinsic(IntID, Tys);
+
+ return CGF.EmitNeonCall(TblF, TblOps, Name);
+}
+
+static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF,
+ unsigned BuiltinID,
+ const CallExpr *E) {
+ unsigned int Int = 0;
+ const char *s = NULL;
+
+ unsigned TblPos;
+ switch (BuiltinID) {
+ default:
+ return 0;
+ case AArch64::BI__builtin_neon_vtbl1_v:
+ case AArch64::BI__builtin_neon_vqtbl1_v:
+ case AArch64::BI__builtin_neon_vqtbl1q_v:
+ case AArch64::BI__builtin_neon_vtbl2_v:
+ case AArch64::BI__builtin_neon_vqtbl2_v:
+ case AArch64::BI__builtin_neon_vqtbl2q_v:
+ case AArch64::BI__builtin_neon_vtbl3_v:
+ case AArch64::BI__builtin_neon_vqtbl3_v:
+ case AArch64::BI__builtin_neon_vqtbl3q_v:
+ case AArch64::BI__builtin_neon_vtbl4_v:
+ case AArch64::BI__builtin_neon_vqtbl4_v:
+ case AArch64::BI__builtin_neon_vqtbl4q_v:
+ TblPos = 0;
+ break;
+ case AArch64::BI__builtin_neon_vtbx1_v:
+ case AArch64::BI__builtin_neon_vqtbx1_v:
+ case AArch64::BI__builtin_neon_vqtbx1q_v:
+ case AArch64::BI__builtin_neon_vtbx2_v:
+ case AArch64::BI__builtin_neon_vqtbx2_v:
+ case AArch64::BI__builtin_neon_vqtbx2q_v:
+ case AArch64::BI__builtin_neon_vtbx3_v:
+ case AArch64::BI__builtin_neon_vqtbx3_v:
+ case AArch64::BI__builtin_neon_vqtbx3q_v:
+ case AArch64::BI__builtin_neon_vtbx4_v:
+ case AArch64::BI__builtin_neon_vqtbx4_v:
+ case AArch64::BI__builtin_neon_vqtbx4q_v:
+ TblPos = 1;
+ break;
+ }
+
+ assert(E->getNumArgs() >= 3);
+
+ // Get the last argument, which specifies the vector type.
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs() - 1);
+ if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
+ return 0;
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(Result.getZExtValue());
+ llvm::VectorType *VTy = GetNeonType(&CGF, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ Ops.push_back(CGF.EmitScalarExpr(E->getArg(i)));
+ }
+
+ Arg = E->getArg(TblPos);
+ llvm::Type *TblTy = CGF.ConvertType(Arg->getType());
+ llvm::VectorType *VTblTy = cast<llvm::VectorType>(TblTy);
+ llvm::Type *Tys[2] = { Ty, VTblTy };
+ unsigned nElts = VTy->getNumElements();
+
+ // AArch64 scalar builtins are not overloaded, they do not have an extra
+ // argument that specifies the vector type, need to handle each case.
+ SmallVector<Value *, 2> TblOps;
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vtbl1_v: {
+ TblOps.push_back(Ops[0]);
+ return packTBLDVectorList(CGF, TblOps, 0, Ops[1], Ty,
+ Intrinsic::aarch64_neon_vtbl1, "vtbl1");
+ }
+ case AArch64::BI__builtin_neon_vtbl2_v: {
+ TblOps.push_back(Ops[0]);
+ TblOps.push_back(Ops[1]);
+ return packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
+ Intrinsic::aarch64_neon_vtbl1, "vtbl1");
+ }
+ case AArch64::BI__builtin_neon_vtbl3_v: {
+ TblOps.push_back(Ops[0]);
+ TblOps.push_back(Ops[1]);
+ TblOps.push_back(Ops[2]);
+ return packTBLDVectorList(CGF, TblOps, 0, Ops[3], Ty,
+ Intrinsic::aarch64_neon_vtbl2, "vtbl2");
+ }
+ case AArch64::BI__builtin_neon_vtbl4_v: {
+ TblOps.push_back(Ops[0]);
+ TblOps.push_back(Ops[1]);
+ TblOps.push_back(Ops[2]);
+ TblOps.push_back(Ops[3]);
+ return packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+ Intrinsic::aarch64_neon_vtbl2, "vtbl2");
+ }
+ case AArch64::BI__builtin_neon_vtbx1_v: {
+ TblOps.push_back(Ops[1]);
+ Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[2], Ty,
+ Intrinsic::aarch64_neon_vtbl1, "vtbl1");
+
+ llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
+ Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
+ Value *CmpRes = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
+ CmpRes = CGF.Builder.CreateSExt(CmpRes, Ty);
+
+ SmallVector<Value *, 4> BslOps;
+ BslOps.push_back(CmpRes);
+ BslOps.push_back(Ops[0]);
+ BslOps.push_back(TblRes);
+ Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty);
+ return CGF.EmitNeonCall(BslF, BslOps, "vbsl");
+ }
+ case AArch64::BI__builtin_neon_vtbx2_v: {
+ TblOps.push_back(Ops[1]);
+ TblOps.push_back(Ops[2]);
+ return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
+ Intrinsic::aarch64_neon_vtbx1, "vtbx1");
+ }
+ case AArch64::BI__builtin_neon_vtbx3_v: {
+ TblOps.push_back(Ops[1]);
+ TblOps.push_back(Ops[2]);
+ TblOps.push_back(Ops[3]);
+ Value *TblRes = packTBLDVectorList(CGF, TblOps, 0, Ops[4], Ty,
+ Intrinsic::aarch64_neon_vtbl2, "vtbl2");
+
+ llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
+ Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
+ Value *CmpRes = CGF.Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
+ TwentyFourV);
+ CmpRes = CGF.Builder.CreateSExt(CmpRes, Ty);
+
+ SmallVector<Value *, 4> BslOps;
+ BslOps.push_back(CmpRes);
+ BslOps.push_back(Ops[0]);
+ BslOps.push_back(TblRes);
+ Function *BslF = CGF.CGM.getIntrinsic(Intrinsic::arm_neon_vbsl, Ty);
+ return CGF.EmitNeonCall(BslF, BslOps, "vbsl");
+ }
+ case AArch64::BI__builtin_neon_vtbx4_v: {
+ TblOps.push_back(Ops[1]);
+ TblOps.push_back(Ops[2]);
+ TblOps.push_back(Ops[3]);
+ TblOps.push_back(Ops[4]);
+ return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
+ Intrinsic::aarch64_neon_vtbx2, "vtbx2");
+ }
+ case AArch64::BI__builtin_neon_vqtbl1_v:
+ case AArch64::BI__builtin_neon_vqtbl1q_v:
+ Int = Intrinsic::aarch64_neon_vtbl1; s = "vtbl1"; break;
+ case AArch64::BI__builtin_neon_vqtbl2_v:
+ case AArch64::BI__builtin_neon_vqtbl2q_v: {
+ Int = Intrinsic::aarch64_neon_vtbl2; s = "vtbl2"; break;
+ case AArch64::BI__builtin_neon_vqtbl3_v:
+ case AArch64::BI__builtin_neon_vqtbl3q_v:
+ Int = Intrinsic::aarch64_neon_vtbl3; s = "vtbl3"; break;
+ case AArch64::BI__builtin_neon_vqtbl4_v:
+ case AArch64::BI__builtin_neon_vqtbl4q_v:
+ Int = Intrinsic::aarch64_neon_vtbl4; s = "vtbl4"; break;
+ case AArch64::BI__builtin_neon_vqtbx1_v:
+ case AArch64::BI__builtin_neon_vqtbx1q_v:
+ Int = Intrinsic::aarch64_neon_vtbx1; s = "vtbx1"; break;
+ case AArch64::BI__builtin_neon_vqtbx2_v:
+ case AArch64::BI__builtin_neon_vqtbx2q_v:
+ Int = Intrinsic::aarch64_neon_vtbx2; s = "vtbx2"; break;
+ case AArch64::BI__builtin_neon_vqtbx3_v:
+ case AArch64::BI__builtin_neon_vqtbx3q_v:
+ Int = Intrinsic::aarch64_neon_vtbx3; s = "vtbx3"; break;
+ case AArch64::BI__builtin_neon_vqtbx4_v:
+ case AArch64::BI__builtin_neon_vqtbx4q_v:
+ Int = Intrinsic::aarch64_neon_vtbx4; s = "vtbx4"; break;
+ }
+ }
+
+ if (!Int)
+ return 0;
+
+ Function *F = CGF.CGM.getIntrinsic(Int, Tys);
+ return CGF.EmitNeonCall(F, Ops, s);
+}
+
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
+ // Process AArch64 scalar builtins
+ if (Value *Result = EmitAArch64ScalarBuiltinExpr(*this, BuiltinID, E))
+ return Result;
+
+ // Process AArch64 table lookup builtins
+ if (Value *Result = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E))
+ return Result;
+
if (BuiltinID == AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 &&
"Variadic __clear_cache slipped through on AArch64");
@@ -1639,17 +2897,1039 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
- return 0;
+ SmallVector<Value *, 4> Ops;
+ llvm::Value *Align = 0; // Alignment for load/store
+ for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
+ if (i == 0) {
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vst1_x2_v:
+ case AArch64::BI__builtin_neon_vst1q_x2_v:
+ case AArch64::BI__builtin_neon_vst1_x3_v:
+ case AArch64::BI__builtin_neon_vst1q_x3_v:
+ case AArch64::BI__builtin_neon_vst1_x4_v:
+ case AArch64::BI__builtin_neon_vst1q_x4_v:
+ // Handle ld1/st1 lane in this function a little different from ARM.
+ case AArch64::BI__builtin_neon_vld1_lane_v:
+ case AArch64::BI__builtin_neon_vld1q_lane_v:
+ case AArch64::BI__builtin_neon_vst1_lane_v:
+ case AArch64::BI__builtin_neon_vst1q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ std::pair<llvm::Value *, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(0));
+ Ops.push_back(Src.first);
+ Align = Builder.getInt32(Src.second);
+ continue;
+ }
+ }
+ if (i == 1) {
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vld1_x2_v:
+ case AArch64::BI__builtin_neon_vld1q_x2_v:
+ case AArch64::BI__builtin_neon_vld1_x3_v:
+ case AArch64::BI__builtin_neon_vld1q_x3_v:
+ case AArch64::BI__builtin_neon_vld1_x4_v:
+ case AArch64::BI__builtin_neon_vld1q_x4_v:
+ // Handle ld1/st1 dup lane in this function a little different from ARM.
+ case AArch64::BI__builtin_neon_vld2_dup_v:
+ case AArch64::BI__builtin_neon_vld2q_dup_v:
+ case AArch64::BI__builtin_neon_vld3_dup_v:
+ case AArch64::BI__builtin_neon_vld3q_dup_v:
+ case AArch64::BI__builtin_neon_vld4_dup_v:
+ case AArch64::BI__builtin_neon_vld4q_dup_v:
+ case AArch64::BI__builtin_neon_vld2_lane_v:
+ case AArch64::BI__builtin_neon_vld2q_lane_v:
+ // Get the alignment for the argument in addition to the value;
+ // we'll use it later.
+ std::pair<llvm::Value *, unsigned> Src =
+ EmitPointerWithAlignment(E->getArg(1));
+ Ops.push_back(Src.first);
+ Align = Builder.getInt32(Src.second);
+ continue;
+ }
+ }
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ }
+
+ // Get the last argument, which specifies the vector type.
+ llvm::APSInt Result;
+ const Expr *Arg = E->getArg(E->getNumArgs() - 1);
+ if (!Arg->isIntegerConstantExpr(Result, getContext()))
+ return 0;
+
+ // Determine the type of this overloaded NEON intrinsic.
+ NeonTypeFlags Type(Result.getZExtValue());
+ bool usgn = Type.isUnsigned();
+ bool quad = Type.isQuad();
+
+ llvm::VectorType *VTy = GetNeonType(this, Type);
+ llvm::Type *Ty = VTy;
+ if (!Ty)
+ return 0;
+
+ unsigned Int;
+ switch (BuiltinID) {
+ default:
+ return 0;
+
+ // AArch64 builtins mapping to legacy ARM v7 builtins.
+ // FIXME: the mapped builtins listed correspond to what has been tested
+ // in aarch64-neon-intrinsics.c so far.
+ case AArch64::BI__builtin_neon_vuzp_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzp_v, E);
+ case AArch64::BI__builtin_neon_vuzpq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vuzpq_v, E);
+ case AArch64::BI__builtin_neon_vzip_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzip_v, E);
+ case AArch64::BI__builtin_neon_vzipq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vzipq_v, E);
+ case AArch64::BI__builtin_neon_vtrn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrn_v, E);
+ case AArch64::BI__builtin_neon_vtrnq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtrnq_v, E);
+ case AArch64::BI__builtin_neon_vext_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vext_v, E);
+ case AArch64::BI__builtin_neon_vextq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vextq_v, E);
+ case AArch64::BI__builtin_neon_vmul_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmul_v, E);
+ case AArch64::BI__builtin_neon_vmulq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmulq_v, E);
+ case AArch64::BI__builtin_neon_vabd_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabd_v, E);
+ case AArch64::BI__builtin_neon_vabdq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabdq_v, E);
+ case AArch64::BI__builtin_neon_vfma_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfma_v, E);
+ case AArch64::BI__builtin_neon_vfmaq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vfmaq_v, E);
+ case AArch64::BI__builtin_neon_vbsl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbsl_v, E);
+ case AArch64::BI__builtin_neon_vbslq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vbslq_v, E);
+ case AArch64::BI__builtin_neon_vrsqrts_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrts_v, E);
+ case AArch64::BI__builtin_neon_vrsqrtsq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrtsq_v, E);
+ case AArch64::BI__builtin_neon_vrecps_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecps_v, E);
+ case AArch64::BI__builtin_neon_vrecpsq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpsq_v, E);
+ case AArch64::BI__builtin_neon_vcale_v:
+ if (VTy->getVectorNumElements() == 1) {
+ std::swap(Ops[0], Ops[1]);
+ } else {
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcale_v, E);
+ }
+ case AArch64::BI__builtin_neon_vcage_v:
+ if (VTy->getVectorNumElements() == 1) {
+ // Determine the types of this overloaded AArch64 intrinsic
+ SmallVector<llvm::Type *, 3> Tys;
+ Tys.push_back(VTy);
+ VTy = llvm::VectorType::get(DoubleTy, 1);
+ Tys.push_back(VTy);
+ Tys.push_back(VTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vcage, Tys);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcage_v, E);
+ case AArch64::BI__builtin_neon_vcaleq_v:
+ std::swap(Ops[0], Ops[1]);
+ case AArch64::BI__builtin_neon_vcageq_v: {
+ Function *F;
+ if (VTy->getElementType()->isIntegerTy(64))
+ F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgeq);
+ else
+ F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgeq);
+ return EmitNeonCall(F, Ops, "vcage");
+ }
+ case AArch64::BI__builtin_neon_vcalt_v:
+ if (VTy->getVectorNumElements() == 1) {
+ std::swap(Ops[0], Ops[1]);
+ } else {
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcalt_v, E);
+ }
+ case AArch64::BI__builtin_neon_vcagt_v:
+ if (VTy->getVectorNumElements() == 1) {
+ // Determine the types of this overloaded AArch64 intrinsic
+ SmallVector<llvm::Type *, 3> Tys;
+ Tys.push_back(VTy);
+ VTy = llvm::VectorType::get(DoubleTy, 1);
+ Tys.push_back(VTy);
+ Tys.push_back(VTy);
+ Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vcagt, Tys);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcagt_v, E);
+ case AArch64::BI__builtin_neon_vcaltq_v:
+ std::swap(Ops[0], Ops[1]);
+ case AArch64::BI__builtin_neon_vcagtq_v: {
+ Function *F;
+ if (VTy->getElementType()->isIntegerTy(64))
+ F = CGM.getIntrinsic(Intrinsic::aarch64_neon_vacgtq);
+ else
+ F = CGM.getIntrinsic(Intrinsic::arm_neon_vacgtq);
+ return EmitNeonCall(F, Ops, "vcagt");
+ }
+ case AArch64::BI__builtin_neon_vtst_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtst_v, E);
+ case AArch64::BI__builtin_neon_vtstq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vtstq_v, E);
+ case AArch64::BI__builtin_neon_vhadd_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhadd_v, E);
+ case AArch64::BI__builtin_neon_vhaddq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhaddq_v, E);
+ case AArch64::BI__builtin_neon_vhsub_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsub_v, E);
+ case AArch64::BI__builtin_neon_vhsubq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vhsubq_v, E);
+ case AArch64::BI__builtin_neon_vrhadd_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhadd_v, E);
+ case AArch64::BI__builtin_neon_vrhaddq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrhaddq_v, E);
+ case AArch64::BI__builtin_neon_vqadd_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqadd_v, E);
+ case AArch64::BI__builtin_neon_vqaddq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqaddq_v, E);
+ case AArch64::BI__builtin_neon_vqsub_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsub_v, E);
+ case AArch64::BI__builtin_neon_vqsubq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqsubq_v, E);
+ case AArch64::BI__builtin_neon_vshl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_v, E);
+ case AArch64::BI__builtin_neon_vshlq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_v, E);
+ case AArch64::BI__builtin_neon_vqshl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_v, E);
+ case AArch64::BI__builtin_neon_vqshlq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_v, E);
+ case AArch64::BI__builtin_neon_vrshl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshl_v, E);
+ case AArch64::BI__builtin_neon_vrshlq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrshlq_v, E);
+ case AArch64::BI__builtin_neon_vqrshl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshl_v, E);
+ case AArch64::BI__builtin_neon_vqrshlq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrshlq_v, E);
+ case AArch64::BI__builtin_neon_vaddhn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vaddhn_v, E);
+ case AArch64::BI__builtin_neon_vraddhn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vraddhn_v, E);
+ case AArch64::BI__builtin_neon_vsubhn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsubhn_v, E);
+ case AArch64::BI__builtin_neon_vrsubhn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsubhn_v, E);
+ case AArch64::BI__builtin_neon_vmull_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmull_v, E);
+ case AArch64::BI__builtin_neon_vqdmull_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmull_v, E);
+ case AArch64::BI__builtin_neon_vqdmlal_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlal_v, E);
+ case AArch64::BI__builtin_neon_vqdmlsl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmlsl_v, E);
+ case AArch64::BI__builtin_neon_vmax_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmax_v, E);
+ case AArch64::BI__builtin_neon_vmaxq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmaxq_v, E);
+ case AArch64::BI__builtin_neon_vmin_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmin_v, E);
+ case AArch64::BI__builtin_neon_vminq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vminq_v, E);
+ case AArch64::BI__builtin_neon_vpmax_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmax_v, E);
+ case AArch64::BI__builtin_neon_vpmin_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpmin_v, E);
+ case AArch64::BI__builtin_neon_vpadd_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadd_v, E);
+ case AArch64::BI__builtin_neon_vqdmulh_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulh_v, E);
+ case AArch64::BI__builtin_neon_vqdmulhq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqdmulhq_v, E);
+ case AArch64::BI__builtin_neon_vqrdmulh_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulh_v, E);
+ case AArch64::BI__builtin_neon_vqrdmulhq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqrdmulhq_v, E);
+
+ // Shift by immediate
+ case AArch64::BI__builtin_neon_vshr_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshr_n_v, E);
+ case AArch64::BI__builtin_neon_vshrq_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshrq_n_v, E);
+ case AArch64::BI__builtin_neon_vrshr_n_v:
+ case AArch64::BI__builtin_neon_vrshrq_n_v:
+ Int = usgn ? Intrinsic::aarch64_neon_vurshr
+ : Intrinsic::aarch64_neon_vsrshr;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n");
+ case AArch64::BI__builtin_neon_vsra_n_v:
+ if (VTy->getElementType()->isIntegerTy(64)) {
+ Int = usgn ? Intrinsic::aarch64_neon_vsradu_n
+ : Intrinsic::aarch64_neon_vsrads_n;
+ return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vsra_n");
+ }
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsra_n_v, E);
+ case AArch64::BI__builtin_neon_vsraq_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vsraq_n_v, E);
+ case AArch64::BI__builtin_neon_vrsra_n_v:
+ if (VTy->getElementType()->isIntegerTy(64)) {
+ Int = usgn ? Intrinsic::aarch64_neon_vrsradu_n
+ : Intrinsic::aarch64_neon_vrsrads_n;
+ return EmitNeonCall(CGM.getIntrinsic(Int), Ops, "vrsra_n");
+ }
+ // fall through
+ case AArch64::BI__builtin_neon_vrsraq_n_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Int = usgn ? Intrinsic::aarch64_neon_vurshr
+ : Intrinsic::aarch64_neon_vsrshr;
+ Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
+ return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
+ }
+ case AArch64::BI__builtin_neon_vshl_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshl_n_v, E);
+ case AArch64::BI__builtin_neon_vshlq_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vshlq_n_v, E);
+ case AArch64::BI__builtin_neon_vqshl_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshl_n_v, E);
+ case AArch64::BI__builtin_neon_vqshlq_n_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqshlq_n_v, E);
+ case AArch64::BI__builtin_neon_vqshlu_n_v:
+ case AArch64::BI__builtin_neon_vqshluq_n_v:
+ Int = Intrinsic::aarch64_neon_vsqshlu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n");
+ case AArch64::BI__builtin_neon_vsri_n_v:
+ case AArch64::BI__builtin_neon_vsriq_n_v:
+ Int = Intrinsic::aarch64_neon_vsri;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsri_n");
+ case AArch64::BI__builtin_neon_vsli_n_v:
+ case AArch64::BI__builtin_neon_vsliq_n_v:
+ Int = Intrinsic::aarch64_neon_vsli;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsli_n");
+ case AArch64::BI__builtin_neon_vshll_n_v: {
+ llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ if (usgn)
+ Ops[0] = Builder.CreateZExt(Ops[0], VTy);
+ else
+ Ops[0] = Builder.CreateSExt(Ops[0], VTy);
+ Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
+ return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
+ }
+ case AArch64::BI__builtin_neon_vshrn_n_v: {
+ llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
+ if (usgn)
+ Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
+ return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
+ }
+ case AArch64::BI__builtin_neon_vqshrun_n_v:
+ Int = Intrinsic::aarch64_neon_vsqshrun;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
+ case AArch64::BI__builtin_neon_vrshrn_n_v:
+ Int = Intrinsic::aarch64_neon_vrshrn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
+ case AArch64::BI__builtin_neon_vqrshrun_n_v:
+ Int = Intrinsic::aarch64_neon_vsqrshrun;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
+ case AArch64::BI__builtin_neon_vqshrn_n_v:
+ Int = usgn ? Intrinsic::aarch64_neon_vuqshrn
+ : Intrinsic::aarch64_neon_vsqshrn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
+ case AArch64::BI__builtin_neon_vqrshrn_n_v:
+ Int = usgn ? Intrinsic::aarch64_neon_vuqrshrn
+ : Intrinsic::aarch64_neon_vsqrshrn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
+
+ // Convert
+ case AArch64::BI__builtin_neon_vmovl_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovl_v, E);
+ case AArch64::BI__builtin_neon_vcvt_n_f32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_f32_v, E);
+ case AArch64::BI__builtin_neon_vcvtq_n_f32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_f32_v, E);
+ case AArch64::BI__builtin_neon_vcvt_n_f64_v:
+ case AArch64::BI__builtin_neon_vcvtq_n_f64_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+ llvm::Type *Tys[2] = { FloatTy, Ty };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfxu2fp
+ : Intrinsic::arm_neon_vcvtfxs2fp;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+ case AArch64::BI__builtin_neon_vcvt_n_s32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_s32_v, E);
+ case AArch64::BI__builtin_neon_vcvtq_n_s32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_s32_v, E);
+ case AArch64::BI__builtin_neon_vcvt_n_u32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_n_u32_v, E);
+ case AArch64::BI__builtin_neon_vcvtq_n_u32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_n_u32_v, E);
+ case AArch64::BI__builtin_neon_vcvt_n_s64_v:
+ case AArch64::BI__builtin_neon_vcvt_n_u64_v:
+ case AArch64::BI__builtin_neon_vcvtq_n_s64_v:
+ case AArch64::BI__builtin_neon_vcvtq_n_u64_v: {
+ llvm::Type *FloatTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+ llvm::Type *Tys[2] = { Ty, FloatTy };
+ Int = usgn ? Intrinsic::arm_neon_vcvtfp2fxu
+ : Intrinsic::arm_neon_vcvtfp2fxs;
+ Function *F = CGM.getIntrinsic(Int, Tys);
+ return EmitNeonCall(F, Ops, "vcvt_n");
+ }
+
+ // Load/Store
+ case AArch64::BI__builtin_neon_vld1_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_v, E);
+ case AArch64::BI__builtin_neon_vld1q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_v, E);
+ case AArch64::BI__builtin_neon_vld2_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2_v, E);
+ case AArch64::BI__builtin_neon_vld2q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_v, E);
+ case AArch64::BI__builtin_neon_vld3_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_v, E);
+ case AArch64::BI__builtin_neon_vld3q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_v, E);
+ case AArch64::BI__builtin_neon_vld4_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_v, E);
+ case AArch64::BI__builtin_neon_vld4q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_v, E);
+ case AArch64::BI__builtin_neon_vst1_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1_v, E);
+ case AArch64::BI__builtin_neon_vst1q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1q_v, E);
+ case AArch64::BI__builtin_neon_vst2_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_v, E);
+ case AArch64::BI__builtin_neon_vst2q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_v, E);
+ case AArch64::BI__builtin_neon_vst3_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_v, E);
+ case AArch64::BI__builtin_neon_vst3q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_v, E);
+ case AArch64::BI__builtin_neon_vst4_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_v, E);
+ case AArch64::BI__builtin_neon_vst4q_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_v, E);
+ case AArch64::BI__builtin_neon_vld1_x2_v:
+ case AArch64::BI__builtin_neon_vld1q_x2_v:
+ case AArch64::BI__builtin_neon_vld1_x3_v:
+ case AArch64::BI__builtin_neon_vld1q_x3_v:
+ case AArch64::BI__builtin_neon_vld1_x4_v:
+ case AArch64::BI__builtin_neon_vld1q_x4_v: {
+ unsigned Int;
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vld1_x2_v:
+ case AArch64::BI__builtin_neon_vld1q_x2_v:
+ Int = Intrinsic::aarch64_neon_vld1x2;
+ break;
+ case AArch64::BI__builtin_neon_vld1_x3_v:
+ case AArch64::BI__builtin_neon_vld1q_x3_v:
+ Int = Intrinsic::aarch64_neon_vld1x3;
+ break;
+ case AArch64::BI__builtin_neon_vld1_x4_v:
+ case AArch64::BI__builtin_neon_vld1q_x4_v:
+ Int = Intrinsic::aarch64_neon_vld1x4;
+ break;
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld1xN");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ case AArch64::BI__builtin_neon_vst1_x2_v:
+ case AArch64::BI__builtin_neon_vst1q_x2_v:
+ case AArch64::BI__builtin_neon_vst1_x3_v:
+ case AArch64::BI__builtin_neon_vst1q_x3_v:
+ case AArch64::BI__builtin_neon_vst1_x4_v:
+ case AArch64::BI__builtin_neon_vst1q_x4_v: {
+ Ops.push_back(Align);
+ unsigned Int;
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vst1_x2_v:
+ case AArch64::BI__builtin_neon_vst1q_x2_v:
+ Int = Intrinsic::aarch64_neon_vst1x2;
+ break;
+ case AArch64::BI__builtin_neon_vst1_x3_v:
+ case AArch64::BI__builtin_neon_vst1q_x3_v:
+ Int = Intrinsic::aarch64_neon_vst1x3;
+ break;
+ case AArch64::BI__builtin_neon_vst1_x4_v:
+ case AArch64::BI__builtin_neon_vst1q_x4_v:
+ Int = Intrinsic::aarch64_neon_vst1x4;
+ break;
+ }
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
+ }
+ case AArch64::BI__builtin_neon_vld1_lane_v:
+ case AArch64::BI__builtin_neon_vld1q_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ty = llvm::PointerType::getUnqual(VTy->getElementType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ LoadInst *Ld = Builder.CreateLoad(Ops[0]);
+ Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
+ }
+ case AArch64::BI__builtin_neon_vld2_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E);
+ case AArch64::BI__builtin_neon_vld2q_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_lane_v, E);
+ case AArch64::BI__builtin_neon_vld3_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_lane_v, E);
+ case AArch64::BI__builtin_neon_vld3q_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_lane_v, E);
+ case AArch64::BI__builtin_neon_vld4_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_lane_v, E);
+ case AArch64::BI__builtin_neon_vld4q_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_lane_v, E);
+ case AArch64::BI__builtin_neon_vst1_lane_v:
+ case AArch64::BI__builtin_neon_vst1q_lane_v: {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ StoreInst *St =
+ Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
+ St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
+ return St;
+ }
+ case AArch64::BI__builtin_neon_vst2_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_lane_v, E);
+ case AArch64::BI__builtin_neon_vst2q_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_lane_v, E);
+ case AArch64::BI__builtin_neon_vst3_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_lane_v, E);
+ case AArch64::BI__builtin_neon_vst3q_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_lane_v, E);
+ case AArch64::BI__builtin_neon_vst4_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_lane_v, E);
+ case AArch64::BI__builtin_neon_vst4q_lane_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_lane_v, E);
+ case AArch64::BI__builtin_neon_vld1_dup_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_dup_v, E);
+ case AArch64::BI__builtin_neon_vld1q_dup_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_dup_v, E);
+ case AArch64::BI__builtin_neon_vld2_dup_v:
+ case AArch64::BI__builtin_neon_vld2q_dup_v:
+ case AArch64::BI__builtin_neon_vld3_dup_v:
+ case AArch64::BI__builtin_neon_vld3q_dup_v:
+ case AArch64::BI__builtin_neon_vld4_dup_v:
+ case AArch64::BI__builtin_neon_vld4q_dup_v: {
+ // Handle 64-bit x 1 elements as a special-case. There is no "dup" needed.
+ if (VTy->getElementType()->getPrimitiveSizeInBits() == 64 &&
+ VTy->getNumElements() == 1) {
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vld2_dup_v:
+ Int = Intrinsic::arm_neon_vld2;
+ break;
+ case AArch64::BI__builtin_neon_vld3_dup_v:
+ Int = Intrinsic::arm_neon_vld3;
+ break;
+ case AArch64::BI__builtin_neon_vld4_dup_v:
+ Int = Intrinsic::arm_neon_vld4;
+ break;
+ default:
+ llvm_unreachable("unknown vld_dup intrinsic?");
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+ switch (BuiltinID) {
+ case AArch64::BI__builtin_neon_vld2_dup_v:
+ case AArch64::BI__builtin_neon_vld2q_dup_v:
+ Int = Intrinsic::arm_neon_vld2lane;
+ break;
+ case AArch64::BI__builtin_neon_vld3_dup_v:
+ case AArch64::BI__builtin_neon_vld3q_dup_v:
+ Int = Intrinsic::arm_neon_vld3lane;
+ break;
+ case AArch64::BI__builtin_neon_vld4_dup_v:
+ case AArch64::BI__builtin_neon_vld4q_dup_v:
+ Int = Intrinsic::arm_neon_vld4lane;
+ break;
+ }
+ Function *F = CGM.getIntrinsic(Int, Ty);
+ llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
+
+ SmallVector<Value *, 6> Args;
+ Args.push_back(Ops[1]);
+ Args.append(STy->getNumElements(), UndefValue::get(Ty));
+
+ llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
+ Args.push_back(CI);
+ Args.push_back(Align);
+
+ Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
+ // splat lane 0 to all elts in each vector of the result.
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Value *Val = Builder.CreateExtractValue(Ops[1], i);
+ Value *Elt = Builder.CreateBitCast(Val, Ty);
+ Elt = EmitNeonSplat(Elt, CI);
+ Elt = Builder.CreateBitCast(Elt, Val->getType());
+ Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
+ }
+ Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ return Builder.CreateStore(Ops[1], Ops[0]);
+ }
+
+ // Crypto
+ case AArch64::BI__builtin_neon_vaeseq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aese, Ty),
+ Ops, "aese");
+ case AArch64::BI__builtin_neon_vaesdq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesd, Ty),
+ Ops, "aesd");
+ case AArch64::BI__builtin_neon_vaesmcq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesmc, Ty),
+ Ops, "aesmc");
+ case AArch64::BI__builtin_neon_vaesimcq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_aesimc, Ty),
+ Ops, "aesimc");
+ case AArch64::BI__builtin_neon_vsha1su1q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su1, Ty),
+ Ops, "sha1su1");
+ case AArch64::BI__builtin_neon_vsha256su0q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su0, Ty),
+ Ops, "sha256su0");
+ case AArch64::BI__builtin_neon_vsha1su0q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1su0, Ty),
+ Ops, "sha1su0");
+ case AArch64::BI__builtin_neon_vsha256hq_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h, Ty),
+ Ops, "sha256h");
+ case AArch64::BI__builtin_neon_vsha256h2q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256h2, Ty),
+ Ops, "sha256h2");
+ case AArch64::BI__builtin_neon_vsha256su1q_v:
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha256su1, Ty),
+ Ops, "sha256su1");
+ case AArch64::BI__builtin_neon_vmul_lane_v:
+ case AArch64::BI__builtin_neon_vmul_laneq_v: {
+ // v1f64 vmul_lane should be mapped to Neon scalar mul lane
+ bool Quad = false;
+ if (BuiltinID == AArch64::BI__builtin_neon_vmul_laneq_v)
+ Quad = true;
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ llvm::Type *VTy = GetNeonType(this,
+ NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
+ Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
+ Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
+ Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
+ return Builder.CreateBitCast(Result, Ty);
+ }
+
+ // AArch64-only builtins
+ case AArch64::BI__builtin_neon_vfmaq_laneq_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
+ return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
+ }
+ case AArch64::BI__builtin_neon_vfmaq_lane_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getNumElements() / 2);
+ Ops[2] = Builder.CreateBitCast(Ops[2], STy);
+ Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
+ cast<ConstantInt>(Ops[3]));
+ Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
+
+ return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
+ }
+ case AArch64::BI__builtin_neon_vfma_lane_v: {
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ // v1f64 fma should be mapped to Neon scalar f64 fma
+ if (VTy && VTy->getElementType() == DoubleTy) {
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
+ llvm::Type *VTy = GetNeonType(this,
+ NeonTypeFlags(NeonTypeFlags::Float64, false, false));
+ Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
+ Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
+ Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+ return Builder.CreateBitCast(Result, Ty);
+ }
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+ Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
+ return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
+ }
+ case AArch64::BI__builtin_neon_vfma_laneq_v: {
+ llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
+ // v1f64 fma should be mapped to Neon scalar f64 fma
+ if (VTy && VTy->getElementType() == DoubleTy) {
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
+ llvm::Type *VTy = GetNeonType(this,
+ NeonTypeFlags(NeonTypeFlags::Float64, false, true));
+ Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
+ Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
+ Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+ return Builder.CreateBitCast(Result, Ty);
+ }
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+
+ llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
+ VTy->getNumElements() * 2);
+ Ops[2] = Builder.CreateBitCast(Ops[2], STy);
+ Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
+ cast<ConstantInt>(Ops[3]));
+ Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
+
+ return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
+ }
+ case AArch64::BI__builtin_neon_vfms_v:
+ case AArch64::BI__builtin_neon_vfmsq_v: {
+ Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
+ Ops[1] = Builder.CreateFNeg(Ops[1]);
+ Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
+
+ // LLVM's fma intrinsic puts the accumulator in the last position, but the
+ // AArch64 intrinsic has it first.
+ return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
+ }
+ case AArch64::BI__builtin_neon_vmaxnm_v:
+ case AArch64::BI__builtin_neon_vmaxnmq_v: {
+ Int = Intrinsic::aarch64_neon_vmaxnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
+ }
+ case AArch64::BI__builtin_neon_vminnm_v:
+ case AArch64::BI__builtin_neon_vminnmq_v: {
+ Int = Intrinsic::aarch64_neon_vminnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
+ }
+ case AArch64::BI__builtin_neon_vpmaxnm_v:
+ case AArch64::BI__builtin_neon_vpmaxnmq_v: {
+ Int = Intrinsic::aarch64_neon_vpmaxnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
+ }
+ case AArch64::BI__builtin_neon_vpminnm_v:
+ case AArch64::BI__builtin_neon_vpminnmq_v: {
+ Int = Intrinsic::aarch64_neon_vpminnm;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
+ }
+ case AArch64::BI__builtin_neon_vpmaxq_v: {
+ Int = usgn ? Intrinsic::arm_neon_vpmaxu : Intrinsic::arm_neon_vpmaxs;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
+ }
+ case AArch64::BI__builtin_neon_vpminq_v: {
+ Int = usgn ? Intrinsic::arm_neon_vpminu : Intrinsic::arm_neon_vpmins;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
+ }
+ case AArch64::BI__builtin_neon_vpaddq_v: {
+ Int = Intrinsic::arm_neon_vpadd;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpadd");
+ }
+ case AArch64::BI__builtin_neon_vmulx_v:
+ case AArch64::BI__builtin_neon_vmulxq_v: {
+ Int = Intrinsic::aarch64_neon_vmulx;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
+ }
+ case AArch64::BI__builtin_neon_vpaddl_v:
+ case AArch64::BI__builtin_neon_vpaddlq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpaddl_v, E);
+ case AArch64::BI__builtin_neon_vpadal_v:
+ case AArch64::BI__builtin_neon_vpadalq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vpadal_v, E);
+ case AArch64::BI__builtin_neon_vqabs_v:
+ case AArch64::BI__builtin_neon_vqabsq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqabs_v, E);
+ case AArch64::BI__builtin_neon_vqneg_v:
+ case AArch64::BI__builtin_neon_vqnegq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqneg_v, E);
+ case AArch64::BI__builtin_neon_vabs_v:
+ case AArch64::BI__builtin_neon_vabsq_v: {
+ if (VTy->getElementType()->isFloatingPointTy()) {
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
+ }
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vabs_v, E);
+ }
+ case AArch64::BI__builtin_neon_vsqadd_v:
+ case AArch64::BI__builtin_neon_vsqaddq_v: {
+ Int = Intrinsic::aarch64_neon_usqadd;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
+ }
+ case AArch64::BI__builtin_neon_vuqadd_v:
+ case AArch64::BI__builtin_neon_vuqaddq_v: {
+ Int = Intrinsic::aarch64_neon_suqadd;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
+ }
+ case AArch64::BI__builtin_neon_vcls_v:
+ case AArch64::BI__builtin_neon_vclsq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcls_v, E);
+ case AArch64::BI__builtin_neon_vclz_v:
+ case AArch64::BI__builtin_neon_vclzq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vclz_v, E);
+ case AArch64::BI__builtin_neon_vcnt_v:
+ case AArch64::BI__builtin_neon_vcntq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcnt_v, E);
+ case AArch64::BI__builtin_neon_vrbit_v:
+ case AArch64::BI__builtin_neon_vrbitq_v:
+ Int = Intrinsic::aarch64_neon_rbit;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
+ case AArch64::BI__builtin_neon_vmovn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vmovn_v, E);
+ case AArch64::BI__builtin_neon_vqmovun_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovun_v, E);
+ case AArch64::BI__builtin_neon_vqmovn_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vqmovn_v, E);
+ case AArch64::BI__builtin_neon_vcvt_f16_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f16_v, E);
+ case AArch64::BI__builtin_neon_vcvt_f32_f16:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_f16, E);
+ case AArch64::BI__builtin_neon_vcvt_f32_f64: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false));
+ return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
+ }
+ case AArch64::BI__builtin_neon_vcvtx_f32_v: {
+ llvm::Type *EltTy = FloatTy;
+ llvm::Type *ResTy = llvm::VectorType::get(EltTy, 2);
+ llvm::Type *Tys[2] = { ResTy, Ty };
+ Int = Intrinsic::aarch64_neon_fcvtxn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtx_f32_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvt_f64_f32: {
+ llvm::Type *OpTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, false));
+ Ops[0] = Builder.CreateBitCast(Ops[0], OpTy);
+ return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
+ }
+ case AArch64::BI__builtin_neon_vcvt_f64_v:
+ case AArch64::BI__builtin_neon_vcvtq_f64_v: {
+ Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
+ Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+ return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
+ : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
+ }
+ case AArch64::BI__builtin_neon_vrndn_v:
+ case AArch64::BI__builtin_neon_vrndnq_v: {
+ Int = Intrinsic::aarch64_neon_frintn;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
+ }
+ case AArch64::BI__builtin_neon_vrnda_v:
+ case AArch64::BI__builtin_neon_vrndaq_v: {
+ Int = Intrinsic::round;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
+ }
+ case AArch64::BI__builtin_neon_vrndp_v:
+ case AArch64::BI__builtin_neon_vrndpq_v: {
+ Int = Intrinsic::ceil;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
+ }
+ case AArch64::BI__builtin_neon_vrndm_v:
+ case AArch64::BI__builtin_neon_vrndmq_v: {
+ Int = Intrinsic::floor;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
+ }
+ case AArch64::BI__builtin_neon_vrndx_v:
+ case AArch64::BI__builtin_neon_vrndxq_v: {
+ Int = Intrinsic::rint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
+ }
+ case AArch64::BI__builtin_neon_vrnd_v:
+ case AArch64::BI__builtin_neon_vrndq_v: {
+ Int = Intrinsic::trunc;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnd");
+ }
+ case AArch64::BI__builtin_neon_vrndi_v:
+ case AArch64::BI__builtin_neon_vrndiq_v: {
+ Int = Intrinsic::nearbyint;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
+ }
+ case AArch64::BI__builtin_neon_vcvt_s32_v:
+ case AArch64::BI__builtin_neon_vcvt_u32_v:
+ case AArch64::BI__builtin_neon_vcvtq_s32_v:
+ case AArch64::BI__builtin_neon_vcvtq_u32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvtq_u32_v, E);
+ case AArch64::BI__builtin_neon_vcvt_s64_v:
+ case AArch64::BI__builtin_neon_vcvt_u64_v:
+ case AArch64::BI__builtin_neon_vcvtq_s64_v:
+ case AArch64::BI__builtin_neon_vcvtq_u64_v: {
+ llvm::Type *DoubleTy =
+ GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
+ Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
+ return usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
+ : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
+ }
+ case AArch64::BI__builtin_neon_vcvtn_s32_v:
+ case AArch64::BI__builtin_neon_vcvtnq_s32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvtn_s64_v:
+ case AArch64::BI__builtin_neon_vcvtnq_s64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtns;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtns_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvtn_u32_v:
+ case AArch64::BI__builtin_neon_vcvtnq_u32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtnu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvtn_u64_v:
+ case AArch64::BI__builtin_neon_vcvtnq_u64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtnu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtnu_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvtp_s32_v:
+ case AArch64::BI__builtin_neon_vcvtpq_s32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtps;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvtp_s64_v:
+ case AArch64::BI__builtin_neon_vcvtpq_s64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtps;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtps_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvtp_u32_v:
+ case AArch64::BI__builtin_neon_vcvtpq_u32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtpu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvtp_u64_v:
+ case AArch64::BI__builtin_neon_vcvtpq_u64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtpu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtpu_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvtm_s32_v:
+ case AArch64::BI__builtin_neon_vcvtmq_s32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtms;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvtm_s64_v:
+ case AArch64::BI__builtin_neon_vcvtmq_s64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtms;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtms_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvtm_u32_v:
+ case AArch64::BI__builtin_neon_vcvtmq_u32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtmu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvtm_u64_v:
+ case AArch64::BI__builtin_neon_vcvtmq_u64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtmu;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtmu_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvta_s32_v:
+ case AArch64::BI__builtin_neon_vcvtaq_s32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtas;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvta_s64_v:
+ case AArch64::BI__builtin_neon_vcvtaq_s64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtas;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtas_f64");
+ }
+ case AArch64::BI__builtin_neon_vcvta_u32_v:
+ case AArch64::BI__builtin_neon_vcvtaq_u32_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(FloatTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtau;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f32");
+ }
+ case AArch64::BI__builtin_neon_vcvta_u64_v:
+ case AArch64::BI__builtin_neon_vcvtaq_u64_v: {
+ llvm::Type *OpTy = llvm::VectorType::get(DoubleTy, VTy->getNumElements());
+ llvm::Type *Tys[2] = { Ty, OpTy };
+ Int = Intrinsic::aarch64_neon_fcvtau;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtau_f64");
+ }
+ case AArch64::BI__builtin_neon_vrecpe_v:
+ case AArch64::BI__builtin_neon_vrecpeq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrecpe_v, E);
+ case AArch64::BI__builtin_neon_vrsqrte_v:
+ case AArch64::BI__builtin_neon_vrsqrteq_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vrsqrte_v, E);
+ case AArch64::BI__builtin_neon_vsqrt_v:
+ case AArch64::BI__builtin_neon_vsqrtq_v: {
+ Int = Intrinsic::sqrt;
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
+ }
+ case AArch64::BI__builtin_neon_vcvt_f32_v:
+ case AArch64::BI__builtin_neon_vcvtq_f32_v:
+ return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vcvt_f32_v, E);
+ case AArch64::BI__builtin_neon_vceqz_v:
+ case AArch64::BI__builtin_neon_vceqzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
+ ICmpInst::ICMP_EQ, "vceqz");
+ case AArch64::BI__builtin_neon_vcgez_v:
+ case AArch64::BI__builtin_neon_vcgezq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
+ ICmpInst::ICMP_SGE, "vcgez");
+ case AArch64::BI__builtin_neon_vclez_v:
+ case AArch64::BI__builtin_neon_vclezq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
+ ICmpInst::ICMP_SLE, "vclez");
+ case AArch64::BI__builtin_neon_vcgtz_v:
+ case AArch64::BI__builtin_neon_vcgtzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
+ ICmpInst::ICMP_SGT, "vcgtz");
+ case AArch64::BI__builtin_neon_vcltz_v:
+ case AArch64::BI__builtin_neon_vcltzq_v:
+ return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
+ ICmpInst::ICMP_SLT, "vcltz");
+ }
}
Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == ARM::BI__clear_cache) {
+ assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
- // Oddly people write this call without args on occasion and gcc accepts
- // it - it's also marked as varargs in the description file.
SmallVector<Value*, 2> Ops;
- for (unsigned i = 0; i < E->getNumArgs(); i++)
+ for (unsigned i = 0; i < 2; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
@@ -1657,11 +3937,14 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
- if (BuiltinID == ARM::BI__builtin_arm_ldrexd) {
+ if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
+ (BuiltinID == ARM::BI__builtin_arm_ldrex &&
+ getContext().getTypeSize(E->getType()) == 64)) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
- Value *Val = Builder.CreateCall(F, LdPtr, "ldrexd");
+ Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
+ "ldrexd");
Value *Val0 = Builder.CreateExtractValue(Val, 1);
Value *Val1 = Builder.CreateExtractValue(Val, 0);
@@ -1670,15 +3953,37 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
- return Builder.CreateOr(Val, Val1);
+ Val = Builder.CreateOr(Val, Val1);
+ return Builder.CreateBitCast(Val, ConvertType(E->getType()));
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_ldrex) {
+ Value *LoadAddr = EmitScalarExpr(E->getArg(0));
+
+ QualType Ty = E->getType();
+ llvm::Type *RealResTy = ConvertType(Ty);
+ llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(Ty));
+ LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_ldrex, LoadAddr->getType());
+ Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
+
+ if (RealResTy->isPointerTy())
+ return Builder.CreateIntToPtr(Val, RealResTy);
+ else {
+ Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
+ return Builder.CreateBitCast(Val, RealResTy);
+ }
}
- if (BuiltinID == ARM::BI__builtin_arm_strexd) {
+ if (BuiltinID == ARM::BI__builtin_arm_strexd ||
+ (BuiltinID == ARM::BI__builtin_arm_strex &&
+ getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
- Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int64Ty, One);
+ Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
@@ -1687,10 +3992,83 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
Value *Arg1 = Builder.CreateExtractValue(Val, 1);
- Value *StPtr = EmitScalarExpr(E->getArg(1));
+ Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
}
+ if (BuiltinID == ARM::BI__builtin_arm_strex) {
+ Value *StoreVal = EmitScalarExpr(E->getArg(0));
+ Value *StoreAddr = EmitScalarExpr(E->getArg(1));
+
+ QualType Ty = E->getArg(0)->getType();
+ llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
+ getContext().getTypeSize(Ty));
+ StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
+
+ if (StoreVal->getType()->isPointerTy())
+ StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
+ else {
+ StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
+ }
+
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_strex, StoreAddr->getType());
+ return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_clrex) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
+ return Builder.CreateCall(F);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_sevl) {
+ Function *F = CGM.getIntrinsic(Intrinsic::arm_sevl);
+ return Builder.CreateCall(F);
+ }
+
+ // CRC32
+ Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
+ switch (BuiltinID) {
+ case ARM::BI__builtin_arm_crc32b:
+ CRCIntrinsicID = Intrinsic::arm_crc32b; break;
+ case ARM::BI__builtin_arm_crc32cb:
+ CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
+ case ARM::BI__builtin_arm_crc32h:
+ CRCIntrinsicID = Intrinsic::arm_crc32h; break;
+ case ARM::BI__builtin_arm_crc32ch:
+ CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
+ case ARM::BI__builtin_arm_crc32w:
+ case ARM::BI__builtin_arm_crc32d:
+ CRCIntrinsicID = Intrinsic::arm_crc32w; break;
+ case ARM::BI__builtin_arm_crc32cw:
+ case ARM::BI__builtin_arm_crc32cd:
+ CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
+ }
+
+ if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
+ Value *Arg0 = EmitScalarExpr(E->getArg(0));
+ Value *Arg1 = EmitScalarExpr(E->getArg(1));
+
+ // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
+ // intrinsics, hence we need different codegen for these cases.
+ if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
+ BuiltinID == ARM::BI__builtin_arm_crc32cd) {
+ Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
+ Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
+ Value *Arg1b = Builder.CreateLShr(Arg1, C1);
+ Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
+
+ Function *F = CGM.getIntrinsic(CRCIntrinsicID);
+ Value *Res = Builder.CreateCall2(F, Arg0, Arg1a);
+ return Builder.CreateCall2(F, Res, Arg1b);
+ } else {
+ Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
+
+ Function *F = CGM.getIntrinsic(CRCIntrinsicID);
+ return Builder.CreateCall2(F, Arg0, Arg1);
+ }
+ }
+
SmallVector<Value*, 4> Ops;
llvm::Value *Align = 0;
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
@@ -1836,9 +4214,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vabsq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vabs, Ty),
Ops, "vabs");
- case ARM::BI__builtin_neon_vaddhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vaddhn, Ty),
- Ops, "vaddhn");
+ case ARM::BI__builtin_neon_vaddhn_v: {
+ llvm::VectorType *SrcTy =
+ llvm::VectorType::getExtendedElementVectorType(VTy);
+
+ // %sum = add <4 x i32> %lhs, %rhs
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
+ Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
+
+ // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
+ SrcTy->getScalarSizeInBits() / 2);
+ ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
+ Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
+
+ // %res = trunc <4 x i32> %high to <4 x i16>
+ return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
+ }
case ARM::BI__builtin_neon_vcale_v:
std::swap(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vcage_v: {
@@ -2142,6 +4535,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vmulp, Ty),
Ops, "vmul");
case ARM::BI__builtin_neon_vmull_v:
+ // FIXME: the integer vmull operations could be emitted in terms of pure
+ // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
+ // hoisting the exts outside loops. Until global ISel comes along that can
+ // see through such movement this leads to bad CodeGen. So we need an
+ // intrinsic for now.
Int = usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
@@ -2195,12 +4593,28 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vqaddq_v:
Int = usgn ? Intrinsic::arm_neon_vqaddu : Intrinsic::arm_neon_vqadds;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqadd");
- case ARM::BI__builtin_neon_vqdmlal_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlal, Ty),
- Ops, "vqdmlal");
- case ARM::BI__builtin_neon_vqdmlsl_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmlsl, Ty),
- Ops, "vqdmlsl");
+ case ARM::BI__builtin_neon_vqdmlal_v: {
+ SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
+ Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
+ MulOps, "vqdmlal");
+
+ SmallVector<Value *, 2> AddOps;
+ AddOps.push_back(Ops[0]);
+ AddOps.push_back(Mul);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqadds, Ty),
+ AddOps, "vqdmlal");
+ }
+ case ARM::BI__builtin_neon_vqdmlsl_v: {
+ SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
+ Value *Mul = EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmull, Ty),
+ MulOps, "vqdmlsl");
+
+ SmallVector<Value *, 2> SubOps;
+ SubOps.push_back(Ops[0]);
+ SubOps.push_back(Mul);
+ return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqsubs, Ty),
+ SubOps, "vqdmlsl");
+ }
case ARM::BI__builtin_neon_vqdmulh_v:
case ARM::BI__builtin_neon_vqdmulhq_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqdmulh, Ty),
@@ -2320,12 +4734,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops, "vshrn_n", 1, true);
case ARM::BI__builtin_neon_vshr_n_v:
case ARM::BI__builtin_neon_vshrq_n_v:
- Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
- if (usgn)
- return Builder.CreateLShr(Ops[0], Ops[1], "vshr_n");
- else
- return Builder.CreateAShr(Ops[0], Ops[1], "vshr_n");
+ return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, usgn, "vshr_n");
case ARM::BI__builtin_neon_vsri_n_v:
case ARM::BI__builtin_neon_vsriq_n_v:
rightShift = true;
@@ -2337,12 +4746,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case ARM::BI__builtin_neon_vsra_n_v:
case ARM::BI__builtin_neon_vsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
- Ops[2] = EmitNeonShiftVector(Ops[2], Ty, false);
- if (usgn)
- Ops[1] = Builder.CreateLShr(Ops[1], Ops[2], "vsra_n");
- else
- Ops[1] = Builder.CreateAShr(Ops[1], Ops[2], "vsra_n");
+ Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
return Builder.CreateAdd(Ops[0], Ops[1]);
case ARM::BI__builtin_neon_vst1_v:
case ARM::BI__builtin_neon_vst1q_v:
@@ -2400,9 +4804,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Ops.push_back(Align);
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst4lane, Ty),
Ops, "");
- case ARM::BI__builtin_neon_vsubhn_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vsubhn, Ty),
- Ops, "vsubhn");
+ case ARM::BI__builtin_neon_vsubhn_v: {
+ llvm::VectorType *SrcTy =
+ llvm::VectorType::getExtendedElementVectorType(VTy);
+
+ // %sum = add <4 x i32> %lhs, %rhs
+ Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
+ Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
+ Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
+
+ // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
+ Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
+ SrcTy->getScalarSizeInBits() / 2);
+ ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
+ Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
+
+ // %res = trunc <4 x i32> %high to <4 x i16>
+ return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
+ }
case ARM::BI__builtin_neon_vtbl1_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
Ops, "vtbl1");
@@ -2560,19 +4979,15 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
case X86::BI__builtin_ia32_ldmxcsr: {
- llvm::Type *PtrTy = Int8PtrTy;
- Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
- Builder.CreateBitCast(Tmp, PtrTy));
+ Builder.CreateBitCast(Tmp, Int8PtrTy));
}
case X86::BI__builtin_ia32_stmxcsr: {
- llvm::Type *PtrTy = Int8PtrTy;
- Value *One = llvm::ConstantInt::get(Int32Ty, 1);
- Value *Tmp = Builder.CreateAlloca(Int32Ty, One);
+ Value *Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
- Builder.CreateBitCast(Tmp, PtrTy));
+ Builder.CreateBitCast(Tmp, Int8PtrTy));
return Builder.CreateLoad(Tmp, "stmxcsr");
}
case X86::BI__builtin_ia32_storehps:
@@ -2697,7 +5112,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movntpd256:
case X86::BI__builtin_ia32_movntdq:
case X86::BI__builtin_ia32_movntdq256:
- case X86::BI__builtin_ia32_movnti: {
+ case X86::BI__builtin_ia32_movnti:
+ case X86::BI__builtin_ia32_movnti64: {
llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
Builder.getInt32(1));
@@ -2707,7 +5123,16 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
"cast");
StoreInst *SI = Builder.CreateStore(Ops[1], BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
- SI->setAlignment(16);
+
+ // If the operand is an integer, we can't assume alignment. Otherwise,
+ // assume natural alignment.
+ QualType ArgTy = E->getArg(1)->getType();
+ unsigned Align;
+ if (ArgTy->isIntegerType())
+ Align = 1;
+ else
+ Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
+ SI->setAlignment(Align);
return SI;
}
// 3DNow!
@@ -2761,6 +5186,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
return Builder.CreateExtractValue(Call, 1);
}
+ // AVX2 broadcast
+ case X86::BI__builtin_ia32_vbroadcastsi256: {
+ Value *VecTmp = CreateMemTemp(E->getArg(0)->getType());
+ Builder.CreateStore(Ops[0], VecTmp);
+ Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
+ return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
+ }
}
}
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index fc72008af886..eaf31bb6f67f 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -44,8 +44,8 @@ RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
}
llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
- CGF.EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
- E->arg_begin(), E->arg_end(), TargetDecl);
+ CGF.EmitCall(E->getCallee()->getType(), Callee, E->getLocStart(),
+ ReturnValue, E->arg_begin(), E->arg_end(), TargetDecl);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 983cb9224ade..cfb2d6291b8a 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -34,6 +34,11 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
+ // Producing an alias to a base class ctor/dtor can degrade debug quality
+ // as the debugger cannot tell them appart.
+ if (getCodeGenOpts().OptimizationLevel == 0)
+ return true;
+
// If the destructor doesn't have a trivial body, we have to emit it
// separately.
if (!D->hasTrivialBody())
@@ -82,60 +87,44 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!UniqueBase)
return true;
- /// If we don't have a definition for the destructor yet, don't
- /// emit. We can't emit aliases to declarations; that's just not
- /// how aliases work.
- const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
- if (!BaseD->isImplicit() && !BaseD->hasBody())
- return true;
-
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
return true;
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
- GlobalDecl(BaseD, Dtor_Base));
+ GlobalDecl(BaseD, Dtor_Base),
+ false);
}
/// Try to emit a definition as a global alias for another definition.
+/// If \p InEveryTU is true, we know that an equivalent alias can be produced
+/// in every translation unit.
bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
- GlobalDecl TargetDecl) {
+ GlobalDecl TargetDecl,
+ bool InEveryTU) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
// The alias will use the linkage of the referrent. If we can't
// support aliases with that linkage, fail.
- llvm::GlobalValue::LinkageTypes Linkage
- = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl()));
-
- switch (Linkage) {
- // We can definitely emit aliases to definitions with external linkage.
- case llvm::GlobalValue::ExternalLinkage:
- case llvm::GlobalValue::ExternalWeakLinkage:
- break;
-
- // Same with local linkage.
- case llvm::GlobalValue::InternalLinkage:
- case llvm::GlobalValue::PrivateLinkage:
- case llvm::GlobalValue::LinkerPrivateLinkage:
- break;
-
- // We should try to support linkonce linkages.
- case llvm::GlobalValue::LinkOnceAnyLinkage:
- case llvm::GlobalValue::LinkOnceODRLinkage:
- return true;
+ llvm::GlobalValue::LinkageTypes Linkage = getFunctionLinkage(AliasDecl);
- // Other linkages will probably never be supported.
- default:
+ // We can't use an alias if the linkage is not valid for one.
+ if (!llvm::GlobalAlias::isValidLinkage(Linkage))
return true;
- }
- llvm::GlobalValue::LinkageTypes TargetLinkage
- = getFunctionLinkage(cast<FunctionDecl>(TargetDecl.getDecl()));
+ llvm::GlobalValue::LinkageTypes TargetLinkage =
+ getFunctionLinkage(TargetDecl);
- if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
- return true;
+ // Check if we have it already.
+ StringRef MangledName = getMangledName(AliasDecl);
+ llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
+ if (Entry && !Entry->isDeclaration())
+ return false;
+ if (Replacements.count(MangledName))
+ return false;
// Derive the type for the alias.
llvm::PointerType *AliasType
@@ -149,15 +138,41 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
if (Ref->getType() != AliasType)
Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+ // Instead of creating as alias to a linkonce_odr, replace all of the uses
+ // of the aliassee.
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
+ (TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage ||
+ !TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
+ // FIXME: An extern template instanciation will create functions with
+ // linkage "AvailableExternally". In libc++, some classes also define
+ // members with attribute "AlwaysInline" and expect no reference to
+ // be generated. It is desirable to reenable this optimisation after
+ // corresponding LLVM changes.
+ Replacements[MangledName] = Aliasee;
+ return false;
+ }
+
+ if (!InEveryTU) {
+ /// If we don't have a definition for the destructor yet, don't
+ /// emit. We can't emit aliases to declarations; that's just not
+ /// how aliases work.
+ if (Ref->isDeclaration())
+ return true;
+ }
+
+ // Don't create an alias to a linker weak symbol. This avoids producing
+ // different COMDATs in different TUs. Another option would be to
+ // output the alias both for weak_odr and linkonce_odr, but that
+ // requires explicit comdat support in the IL.
+ if (llvm::GlobalValue::isWeakForLinker(TargetLinkage))
+ return true;
+
// Create the alias with no name.
llvm::GlobalAlias *Alias =
new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
// Switch any previous uses to the alias.
- StringRef MangledName = getMangledName(AliasDecl);
- llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
if (Entry) {
- assert(Entry->isDeclaration() && "definition already exists for alias");
assert(Entry->getType() == AliasType &&
"declaration exists with different type");
Alias->takeName(Entry);
@@ -173,37 +188,26 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
return false;
}
-void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
- // The constructor used for constructing this as a complete class;
- // constucts the virtual bases, then calls the base constructor.
- if (!D->getParent()->isAbstract()) {
- // We don't need to emit the complete ctor if the class is abstract.
- EmitGlobal(GlobalDecl(D, Ctor_Complete));
- }
-
- // The constructor used for constructing this as a base class;
- // ignores virtual bases.
- if (getTarget().getCXXABI().hasConstructorVariants())
- EmitGlobal(GlobalDecl(D, Ctor_Base));
-}
-
void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
CXXCtorType ctorType) {
// The complete constructor is equivalent to the base constructor
// for classes with no virtual bases. Try to emit it as an alias.
if (getTarget().getCXXABI().hasConstructorVariants() &&
- ctorType == Ctor_Complete &&
!ctor->getParent()->getNumVBases() &&
- !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
- GlobalDecl(ctor, Ctor_Base)))
- return;
+ (ctorType == Ctor_Complete || ctorType == Ctor_Base)) {
+ bool ProducedAlias =
+ !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
+ GlobalDecl(ctor, Ctor_Base), true);
+ if (ctorType == Ctor_Complete && ProducedAlias)
+ return;
+ }
const CGFunctionInfo &fnInfo =
getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
llvm::Function *fn =
cast<llvm::Function>(GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo));
- setFunctionLinkage(ctor, fn);
+ setFunctionLinkage(GlobalDecl(ctor, ctorType), fn);
CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
@@ -229,31 +233,22 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
/*ForVTable=*/false));
}
-void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
- // The destructor in a virtual table is always a 'deleting'
- // destructor, which calls the complete destructor and then uses the
- // appropriate operator delete.
- if (D->isVirtual())
- EmitGlobal(GlobalDecl(D, Dtor_Deleting));
-
- // The destructor used for destructing this as a most-derived class;
- // call the base destructor and then destructs any virtual bases.
- EmitGlobal(GlobalDecl(D, Dtor_Complete));
-
- // The destructor used for destructing this as a base class; ignores
- // virtual bases.
- EmitGlobal(GlobalDecl(D, Dtor_Base));
-}
-
void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
CXXDtorType dtorType) {
// The complete destructor is equivalent to the base destructor for
// classes with no virtual bases, so try to emit it as an alias.
- if (dtorType == Dtor_Complete &&
- !dtor->getParent()->getNumVBases() &&
- !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
- GlobalDecl(dtor, Dtor_Base)))
- return;
+ if (!dtor->getParent()->getNumVBases() &&
+ (dtorType == Dtor_Complete || dtorType == Dtor_Base)) {
+ bool ProducedAlias =
+ !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
+ GlobalDecl(dtor, Dtor_Base), true);
+ if (ProducedAlias) {
+ if (dtorType == Dtor_Complete)
+ return;
+ if (dtor->isVirtual())
+ getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete));
+ }
+ }
// The base destructor is equivalent to the base destructor of its
// base class if there is exactly one non-virtual base class with a
@@ -267,7 +262,7 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
llvm::Function *fn =
cast<llvm::Function>(GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo));
- setFunctionLinkage(dtor, fn);
+ setFunctionLinkage(GlobalDecl(dtor, dtorType), fn);
CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
@@ -278,47 +273,51 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
CXXDtorType dtorType,
- const CGFunctionInfo *fnInfo) {
+ const CGFunctionInfo *fnInfo,
+ llvm::FunctionType *fnType) {
GlobalDecl GD(dtor, dtorType);
StringRef name = getMangledName(GD);
if (llvm::GlobalValue *existing = GetGlobalValue(name))
return existing;
- if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
-
- llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
+ if (!fnType) {
+ if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
+ fnType = getTypes().GetFunctionType(*fnInfo);
+ }
return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
/*ForVTable=*/false));
}
-static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VTableIndex,
- llvm::Value *This, llvm::Type *Ty) {
+static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ llvm::Type *Ty,
+ const CXXRecordDecl *RD) {
+ assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
+ "No kext in Microsoft ABI");
+ GD = GD.getCanonicalDecl();
+ CodeGenModule &CGM = CGF.CGM;
+ llvm::Value *VTable = CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
Ty = Ty->getPointerTo()->getPointerTo();
-
- llvm::Value *VTable = CGF.GetVTablePtr(This, Ty);
- llvm::Value *VFuncPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfn");
+ VTable = CGF.Builder.CreateBitCast(VTable, Ty);
+ assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
+ uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
+ uint64_t AddressPoint =
+ CGM.getItaniumVTableContext().getVTableLayout(RD)
+ .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += AddressPoint;
+ llvm::Value *VFuncPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
return CGF.Builder.CreateLoad(VFuncPtr);
}
-llvm::Value *
-CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
- llvm::Type *Ty) {
- MD = MD->getCanonicalDecl();
- uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
-
- return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
-}
-
-/// BuildVirtualCall - This routine is to support gcc's kext ABI making
+/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
llvm::Value *
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
NestedNameSpecifier *Qual,
llvm::Type *Ty) {
- llvm::Value *VTable = 0;
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
@@ -330,20 +329,8 @@ CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD))
return BuildAppleKextVirtualDestructorCall(DD, Dtor_Complete, RD);
-
- VTable = CGM.getVTables().GetAddrOfVTable(RD);
- Ty = Ty->getPointerTo()->getPointerTo();
- VTable = Builder.CreateBitCast(VTable, Ty);
- assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
- MD = MD->getCanonicalDecl();
- uint64_t VTableIndex = CGM.getVTableContext().getMethodVTableIndex(MD);
- uint64_t AddressPoint =
- CGM.getVTableContext().getVTableLayout(RD)
- .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
- VTableIndex += AddressPoint;
- llvm::Value *VFuncPtr =
- Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- return Builder.CreateLoad(VFuncPtr);
+
+ return ::BuildAppleKextVirtualCall(*this, MD, Ty, RD);
}
/// BuildVirtualCall - This routine makes indirect vtable call for
@@ -353,42 +340,16 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
const CXXDestructorDecl *DD,
CXXDtorType Type,
const CXXRecordDecl *RD) {
- llvm::Value * Callee = 0;
const CXXMethodDecl *MD = cast<CXXMethodDecl>(DD);
// FIXME. Dtor_Base dtor is always direct!!
// It need be somehow inline expanded into the caller.
// -O does that. But need to support -O0 as well.
if (MD->isVirtual() && Type != Dtor_Base) {
// Compute the function type we're calling.
- const CGFunctionInfo &FInfo =
- CGM.getTypes().arrangeCXXDestructor(cast<CXXDestructorDecl>(MD),
- Dtor_Complete);
+ const CGFunctionInfo &FInfo =
+ CGM.getTypes().arrangeCXXDestructor(DD, Dtor_Complete);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
-
- llvm::Value *VTable = CGM.getVTables().GetAddrOfVTable(RD);
- Ty = Ty->getPointerTo()->getPointerTo();
- VTable = Builder.CreateBitCast(VTable, Ty);
- DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
- uint64_t VTableIndex =
- CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
- uint64_t AddressPoint =
- CGM.getVTableContext().getVTableLayout(RD)
- .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
- VTableIndex += AddressPoint;
- llvm::Value *VFuncPtr =
- Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- Callee = Builder.CreateLoad(VFuncPtr);
+ return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
- return Callee;
+ return 0;
}
-
-llvm::Value *
-CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
- llvm::Value *This, llvm::Type *Ty) {
- DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
- uint64_t VTableIndex =
- CGM.getVTableContext().getMethodVTableIndex(GlobalDecl(DD, Type));
-
- return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
-}
-
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 68fecb2d0cb8..412b27814ac8 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -1,4 +1,4 @@
-//===----- CGCXXABI.cpp - Interface to C++ ABIs -----------------*- C++ -*-===//
+//===----- CGCXXABI.cpp - Interface to C++ ABIs ---------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -212,13 +212,6 @@ llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
-void CGCXXABI::EmitGuardedInit(CodeGenFunction &CGF,
- const VarDecl &D,
- llvm::GlobalVariable *GV,
- bool PerformInit) {
- ErrorUnsupportedABI(CGF, "static local variable initialization");
-}
-
void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
const VarDecl &D,
llvm::Constant *dtor,
@@ -227,7 +220,7 @@ void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
// The default behavior is to use atexit.
- CGF.registerGlobalDtorWithAtExit(dtor, addr);
+ CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
}
/// Returns the adjustment, in bytes, required for the given
@@ -251,8 +244,31 @@ llvm::Constant *CGCXXABI::getMemberPointerAdjustment(const CastExpr *E) {
E->path_end());
}
-llvm::BasicBlock *CGCXXABI::EmitCtorCompleteObjectHandler(
- CodeGenFunction &CGF) {
+CharUnits CGCXXABI::getMemberPointerPathAdjustment(const APValue &MP) {
+ // TODO: Store base specifiers in APValue member pointer paths so we can
+ // easily reuse CGM.GetNonVirtualBaseClassOffset().
+ const ValueDecl *MPD = MP.getMemberPointerDecl();
+ CharUnits ThisAdjustment = CharUnits::Zero();
+ ArrayRef<const CXXRecordDecl*> Path = MP.getMemberPointerPath();
+ bool DerivedMember = MP.isMemberPointerToDerivedMember();
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(MPD->getDeclContext());
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ const CXXRecordDecl *Base = RD;
+ const CXXRecordDecl *Derived = Path[I];
+ if (DerivedMember)
+ std::swap(Base, Derived);
+ ThisAdjustment +=
+ getContext().getASTRecordLayout(Derived).getBaseClassOffset(Base);
+ RD = Path[I];
+ }
+ if (DerivedMember)
+ ThisAdjustment = -ThisAdjustment;
+ return ThisAdjustment;
+}
+
+llvm::BasicBlock *
+CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) {
if (CGM.getTarget().getCXXABI().hasConstructorVariants())
llvm_unreachable("shouldn't be called in this ABI");
@@ -270,3 +286,7 @@ LValue CGCXXABI::EmitThreadLocalDeclRefExpr(CodeGenFunction &CGF,
ErrorUnsupportedABI(CGF, "odr-use of thread_local global");
return LValue();
}
+
+bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
+ return false;
+}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 1e4da631d6f6..9e9a2a7aaf9b 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -97,8 +97,12 @@ public:
return *MangleCtx;
}
- /// Returns true if the given instance method is one of the
- /// kinds that the ABI says returns 'this'.
+ /// Returns true if the given constructor or destructor is one of the
+ /// kinds that the ABI says returns 'this' (only applies when called
+ /// non-virtually for destructors).
+ ///
+ /// There currently is no way to indicate if a destructor returns 'this'
+ /// when called virtually, and code generation does not support the case.
virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
/// Returns true if the given record type should be returned indirectly.
@@ -192,6 +196,12 @@ protected:
/// is required.
llvm::Constant *getMemberPointerAdjustment(const CastExpr *E);
+ /// \brief Computes the non-virtual adjustment needed for a member pointer
+ /// conversion along an inheritance path stored in an APValue. Unlike
+ /// getMemberPointerAdjustment(), the adjustment can be negative if the path
+ /// is from a derived type to a base type.
+ CharUnits getMemberPointerPathAdjustment(const APValue &MP);
+
public:
/// Adjust the given non-null pointer to an object of polymorphic
/// type to point to the complete object.
@@ -202,11 +212,16 @@ public:
llvm::Value *ptr,
QualType type) = 0;
+ virtual llvm::Value *GetVirtualBaseClassOffset(CodeGenFunction &CGF,
+ llvm::Value *This,
+ const CXXRecordDecl *ClassDecl,
+ const CXXRecordDecl *BaseClassDecl) = 0;
+
/// Build the signature of the given constructor variant by adding
- /// any required parameters. For convenience, ResTy has been
- /// initialized to 'void', and ArgTys has been initialized with the
- /// type of 'this' (although this may be changed by the ABI) and
- /// will have the formal parameters added to it afterwards.
+ /// any required parameters. For convenience, ArgTys has been initialized
+ /// with the type of 'this' and ResTy has been initialized with the type of
+ /// 'this' if HasThisReturn(GlobalDecl(Ctor, T)) is true or 'void' otherwise
+ /// (although both may be changed by the ABI).
///
/// If there are ever any ABIs where the implicit parameters are
/// intermixed with the formal parameters, we can address those
@@ -216,46 +231,138 @@ public:
CanQualType &ResTy,
SmallVectorImpl<CanQualType> &ArgTys) = 0;
- virtual llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF);
+ virtual llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD);
+
+ /// Emit the code to initialize hidden members required
+ /// to handle virtual inheritance, if needed by the ABI.
+ virtual void
+ initializeHiddenVirtualInheritanceMembers(CodeGenFunction &CGF,
+ const CXXRecordDecl *RD) {}
+
+ /// Emit constructor variants required by this ABI.
+ virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
/// Build the signature of the given destructor variant by adding
- /// any required parameters. For convenience, ResTy has been
- /// initialized to 'void' and ArgTys has been initialized with the
- /// type of 'this' (although this may be changed by the ABI).
+ /// any required parameters. For convenience, ArgTys has been initialized
+ /// with the type of 'this' and ResTy has been initialized with the type of
+ /// 'this' if HasThisReturn(GlobalDecl(Dtor, T)) is true or 'void' otherwise
+ /// (although both may be changed by the ABI).
virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
CXXDtorType T,
CanQualType &ResTy,
SmallVectorImpl<CanQualType> &ArgTys) = 0;
+ /// Returns true if the given destructor type should be emitted as a linkonce
+ /// delegating thunk, regardless of whether the dtor is defined in this TU or
+ /// not.
+ virtual bool useThunkForDtorVariant(const CXXDestructorDecl *Dtor,
+ CXXDtorType DT) const = 0;
+
+ /// Emit destructor variants required by this ABI.
+ virtual void EmitCXXDestructors(const CXXDestructorDecl *D) = 0;
+
+ /// Get the type of the implicit "this" parameter used by a method. May return
+ /// zero if no specific type is applicable, e.g. if the ABI expects the "this"
+ /// parameter to point to some artificial offset in a complete object due to
+ /// vbases being reordered.
+ virtual const CXXRecordDecl *
+ getThisArgumentTypeForMethod(const CXXMethodDecl *MD) {
+ return MD->getParent();
+ }
+
+ /// Perform ABI-specific "this" argument adjustment required prior to
+ /// a virtual function call.
+ virtual llvm::Value *adjustThisArgumentForVirtualCall(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ llvm::Value *This) {
+ return This;
+ }
+
/// Build the ABI-specific portion of the parameter list for a
/// function. This generally involves a 'this' parameter and
/// possibly some extra data for constructors and destructors.
///
/// ABIs may also choose to override the return type, which has been
- /// initialized with the formal return type of the function.
+ /// initialized with the type of 'this' if HasThisReturn(CGF.CurGD) is true or
+ /// the formal return type of the function otherwise.
virtual void BuildInstanceFunctionParams(CodeGenFunction &CGF,
QualType &ResTy,
FunctionArgList &Params) = 0;
+ /// Perform ABI-specific "this" parameter adjustment in a virtual function
+ /// prologue.
+ virtual llvm::Value *adjustThisParameterInVirtualFunctionPrologue(
+ CodeGenFunction &CGF, GlobalDecl GD, llvm::Value *This) {
+ return This;
+ }
+
/// Emit the ABI-specific prolog for the function.
virtual void EmitInstanceFunctionProlog(CodeGenFunction &CGF) = 0;
/// Emit the constructor call. Return the function that is called.
- virtual llvm::Value *EmitConstructorCall(CodeGenFunction &CGF,
+ virtual void EmitConstructorCall(CodeGenFunction &CGF,
const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating,
+ CXXCtorType Type,
+ bool ForVirtualBase, bool Delegating,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) = 0;
+ /// Emits the VTable definitions required for the given record type.
+ virtual void emitVTableDefinitions(CodeGenVTables &CGVT,
+ const CXXRecordDecl *RD) = 0;
+
+ /// Get the address point of the vtable for the given base subobject while
+ /// building a constructor or a destructor. On return, NeedsVirtualOffset
+ /// tells if a virtual base adjustment is needed in order to get the offset
+ /// of the base subobject.
+ virtual llvm::Value *getVTableAddressPointInStructor(
+ CodeGenFunction &CGF, const CXXRecordDecl *RD, BaseSubobject Base,
+ const CXXRecordDecl *NearestVBase, bool &NeedsVirtualOffset) = 0;
+
+ /// Get the address point of the vtable for the given base subobject while
+ /// building a constexpr.
+ virtual llvm::Constant *
+ getVTableAddressPointForConstExpr(BaseSubobject Base,
+ const CXXRecordDecl *VTableClass) = 0;
+
+ /// Get the address of the vtable for the given record decl which should be
+ /// used for the vptr at the given offset in RD.
+ virtual llvm::GlobalVariable *getAddrOfVTable(const CXXRecordDecl *RD,
+ CharUnits VPtrOffset) = 0;
+
+ /// Build a virtual function pointer in the ABI-specific way.
+ virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ llvm::Value *This,
+ llvm::Type *Ty) = 0;
+
/// Emit the ABI-specific virtual destructor call.
- virtual RValue EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- SourceLocation CallLoc,
- ReturnValueSlot ReturnValue,
- llvm::Value *This) = 0;
+ virtual void EmitVirtualDestructorCall(CodeGenFunction &CGF,
+ const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType,
+ SourceLocation CallLoc,
+ llvm::Value *This) = 0;
+
+ virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ CallArgList &CallArgs) {}
+
+ /// Emit any tables needed to implement virtual inheritance. For Itanium,
+ /// this emits virtual table tables. For the MSVC++ ABI, this emits virtual
+ /// base tables.
+ virtual void emitVirtualInheritanceTables(const CXXRecordDecl *RD) = 0;
+
+ virtual void setThunkLinkage(llvm::Function *Thunk, bool ForVTable) = 0;
+
+ virtual llvm::Value *performThisAdjustment(CodeGenFunction &CGF,
+ llvm::Value *This,
+ const ThisAdjustment &TA) = 0;
+
+ virtual llvm::Value *performReturnAdjustment(CodeGenFunction &CGF,
+ llvm::Value *Ret,
+ const ReturnAdjustment &RA) = 0;
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
@@ -266,6 +373,10 @@ public:
/// Gets the deleted virtual member call name.
virtual StringRef GetDeletedVirtualCallName() = 0;
+ /// \brief Returns true iff static data members that are initialized in the
+ /// class definition should have linkonce linkage.
+ virtual bool isInlineInitializedStaticDataMemberLinkOnce() { return false; }
+
/**************************** Array cookies ******************************/
/// Returns the extra size required in order to store the array
@@ -312,6 +423,9 @@ public:
QualType ElementType, llvm::Value *&NumElements,
llvm::Value *&AllocPtr, CharUnits &CookieSize);
+ /// Return whether the given global decl needs a VTT parameter.
+ virtual bool NeedsVTTParameter(GlobalDecl GD);
+
protected:
/// Returns the extra size required in order to store the array
/// cookie for the given type. Assumes that an array cookie is
@@ -344,7 +458,8 @@ public:
/// - a static local variable
/// - a static data member of a class template instantiation
virtual void EmitGuardedInit(CodeGenFunction &CGF, const VarDecl &D,
- llvm::GlobalVariable *DeclPtr, bool PerformInit);
+ llvm::GlobalVariable *DeclPtr,
+ bool PerformInit) = 0;
/// Emit code to force the execution of a destructor during global
/// teardown. The default implementation of this uses atexit.
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index b0f460ec0e7b..22f2467021e1 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -1,4 +1,4 @@
-//===--- CGCall.cpp - Encapsulate calling convention details ----*- C++ -*-===//
+//===--- CGCall.cpp - Encapsulate calling convention details --------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -22,6 +22,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Attributes.h"
@@ -41,6 +42,8 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
+ case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
+ case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
@@ -103,24 +106,12 @@ static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
return arrangeLLVMFunctionInfo(CGT, prefix, FTP, FTP->getExtInfo());
}
-/// Given the formal ext-info of a C++ instance method, adjust it
-/// according to the C++ ABI in effect.
-static void adjustCXXMethodInfo(CodeGenTypes &CGT,
- FunctionType::ExtInfo &extInfo,
- bool isVariadic) {
- if (extInfo.getCC() == CC_Default) {
- CallingConv CC = CGT.getContext().getDefaultCXXMethodCallConv(isVariadic);
- extInfo = extInfo.withCallingConv(CC);
- }
-}
-
/// Arrange the argument and result information for a free function (i.e.
/// not a C++ or ObjC instance method) of the given type.
static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
SmallVectorImpl<CanQualType> &prefix,
CanQual<FunctionProtoType> FTP) {
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- adjustCXXMethodInfo(CGT, extInfo, FTP->isVariadic());
return arrangeLLVMFunctionInfo(CGT, prefix, FTP, extInfo);
}
@@ -160,6 +151,8 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
+/// (Zero value of RD means we don't have any meaningful "this" argument type,
+/// so fall back to a generic pointer type).
/// The member function must be an ordinary function, i.e. not a
/// constructor or destructor.
const CGFunctionInfo &
@@ -168,7 +161,10 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
SmallVector<CanQualType, 16> argTypes;
// Add the 'this' pointer.
- argTypes.push_back(GetThisType(Context, RD));
+ if (RD)
+ argTypes.push_back(GetThisType(Context, RD));
+ else
+ argTypes.push_back(Context.VoidPtrTy);
return ::arrangeCXXMethodType(*this, argTypes,
FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
@@ -180,14 +176,15 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
/// constructor or destructor.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
- assert(!isa<CXXConstructorDecl>(MD) && "wrong method for contructors!");
+ assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
CanQual<FunctionProtoType> prototype = GetFormalType(MD);
if (MD->isInstance()) {
// The abstract case is perfectly fine.
- return arrangeCXXMethodType(MD->getParent(), prototype.getTypePtr());
+ const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
+ return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
}
return arrangeFreeFunctionType(prototype);
@@ -200,7 +197,10 @@ CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
CXXCtorType ctorKind) {
SmallVector<CanQualType, 16> argTypes;
argTypes.push_back(GetThisType(Context, D->getParent()));
- CanQualType resultType = Context.VoidTy;
+
+ GlobalDecl GD(D, ctorKind);
+ CanQualType resultType =
+ TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
@@ -213,7 +213,6 @@ CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
argTypes.push_back(FTP->getArgType(i));
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- adjustCXXMethodInfo(*this, extInfo, FTP->isVariadic());
return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo, required);
}
@@ -225,7 +224,10 @@ CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType dtorKind) {
SmallVector<CanQualType, 2> argTypes;
argTypes.push_back(GetThisType(Context, D->getParent()));
- CanQualType resultType = Context.VoidTy;
+
+ GlobalDecl GD(D, dtorKind);
+ CanQualType resultType =
+ TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
@@ -234,7 +236,6 @@ CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- adjustCXXMethodInfo(*this, extInfo, false);
return arrangeLLVMFunctionInfo(resultType, argTypes, extInfo,
RequiredArgs::All);
}
@@ -322,6 +323,7 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
/// additional number of formal parameters considered required.
static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
+ CodeGenModule &CGM,
const CallArgList &args,
const FunctionType *fnType,
unsigned numExtraRequiredArgs) {
@@ -340,8 +342,9 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
// explicitly use the variadic convention for unprototyped calls,
// treat all of the arguments as required but preserve the nominal
// possibility of variadics.
- } else if (CGT.CGM.getTargetCodeGenInfo()
- .isNoProtoCallVariadic(args, cast<FunctionNoProtoType>(fnType))) {
+ } else if (CGM.getTargetCodeGenInfo()
+ .isNoProtoCallVariadic(args,
+ cast<FunctionNoProtoType>(fnType))) {
required = RequiredArgs(args.size());
}
@@ -356,7 +359,7 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, args, fnType, 0);
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
}
/// A block function call is essentially a free-function call with an
@@ -364,7 +367,7 @@ CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
const CGFunctionInfo &
CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, args, fnType, 1);
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
}
const CGFunctionInfo &
@@ -393,7 +396,6 @@ CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
argTypes.push_back(Context.getCanonicalParamType(i->Ty));
FunctionType::ExtInfo info = FPT->getExtInfo();
- adjustCXXMethodInfo(*this, info, FPT->isVariadic());
return arrangeLLVMFunctionInfo(GetReturnType(FPT->getResultType()),
argTypes, info, required);
}
@@ -642,6 +644,10 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
/// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
/// are either integers or pointers. This does a truncation of the value if it
/// is too large or a zero extension if it is too small.
+///
+/// This behaves as if the value were coerced through memory, so on big-endian
+/// targets the high bits are preserved in a truncation, while little-endian
+/// targets preserve the low bits.
static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
llvm::Type *Ty,
CodeGenFunction &CGF) {
@@ -661,8 +667,25 @@ static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
if (isa<llvm::PointerType>(DestIntTy))
DestIntTy = CGF.IntPtrTy;
- if (Val->getType() != DestIntTy)
- Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+ if (Val->getType() != DestIntTy) {
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ if (DL.isBigEndian()) {
+ // Preserve the high bits on big-endian targets.
+ // That is what memory coercion does.
+ uint64_t SrcSize = DL.getTypeAllocSizeInBits(Val->getType());
+ uint64_t DstSize = DL.getTypeAllocSizeInBits(DestIntTy);
+ if (SrcSize > DstSize) {
+ Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
+ Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
+ } else {
+ Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
+ Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
+ }
+ } else {
+ // Little-endian targets preserve the low bits. No shifts required.
+ Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
+ }
+ }
if (isa<llvm::PointerType>(Ty))
Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
@@ -1024,25 +1047,29 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// Attributes that should go on the function, but not the call site.
if (!CodeGenOpts.DisableFPElim) {
FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "false");
} else if (CodeGenOpts.OmitLeafFramePointer) {
FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
} else {
FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf", "true");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
}
FuncAttrs.addAttribute("less-precise-fpmad",
- CodeGenOpts.LessPreciseFPMAD ? "true" : "false");
+ llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
FuncAttrs.addAttribute("no-infs-fp-math",
- CodeGenOpts.NoInfsFPMath ? "true" : "false");
+ llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
FuncAttrs.addAttribute("no-nans-fp-math",
- CodeGenOpts.NoNaNsFPMath ? "true" : "false");
+ llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
FuncAttrs.addAttribute("unsafe-fp-math",
- CodeGenOpts.UnsafeFPMath ? "true" : "false");
+ llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
FuncAttrs.addAttribute("use-soft-float",
- CodeGenOpts.SoftFloat ? "true" : "false");
+ llvm::toStringRef(CodeGenOpts.SoftFloat));
+ FuncAttrs.addAttribute("stack-protector-buffer-size",
+ llvm::utostr(CodeGenOpts.SSPBufferSize));
+
+ if (!CodeGenOpts.StackRealignment)
+ FuncAttrs.addAttribute("no-realign-stack");
}
QualType RetTy = FI.getReturnType();
@@ -1050,12 +1077,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
- if (RetTy->hasSignedIntegerRepresentation())
- RetAttrs.addAttribute(llvm::Attribute::SExt);
- else if (RetTy->hasUnsignedIntegerRepresentation())
- RetAttrs.addAttribute(llvm::Attribute::ZExt);
- break;
+ if (RetTy->hasSignedIntegerRepresentation())
+ RetAttrs.addAttribute(llvm::Attribute::SExt);
+ else if (RetTy->hasUnsignedIntegerRepresentation())
+ RetAttrs.addAttribute(llvm::Attribute::ZExt);
+ // FALL THROUGH
case ABIArgInfo::Direct:
+ if (RetAI.getInReg())
+ RetAttrs.addAttribute(llvm::Attribute::InReg);
+ break;
case ABIArgInfo::Ignore:
break;
@@ -1263,7 +1293,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
// Load scalar value from indirect argument.
CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
- V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty);
+ V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty,
+ Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
@@ -1294,6 +1325,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
+ if (MD->isVirtual() && Arg == CXXABIThisDecl)
+ V = CGM.getCXXABI().
+ adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
+ }
+
// Because of merging of function types from multiple decls it is
// possible for the type of an argument to not match the corresponding
// type in the function type. Since we are codegening the callee
@@ -1371,7 +1409,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Match to what EmitParmDecl is expecting for this type.
if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
- V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty);
+ V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
}
@@ -1609,20 +1647,9 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
return store;
}
-/// Check whether 'this' argument of a callsite matches 'this' of the caller.
-static bool checkThisPointer(llvm::Value *ThisArg, llvm::Value *This) {
- if (ThisArg == This)
- return true;
- // Check whether ThisArg is a bitcast of This.
- llvm::BitCastInst *Bitcast;
- if ((Bitcast = dyn_cast<llvm::BitCastInst>(ThisArg)) &&
- Bitcast->getOperand(0) == This)
- return true;
- return false;
-}
-
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
- bool EmitRetDbgLoc) {
+ bool EmitRetDbgLoc,
+ SourceLocation EndLoc) {
// Functions with no result always return void.
if (ReturnValue == 0) {
Builder.CreateRetVoid();
@@ -1639,7 +1666,8 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
switch (getEvaluationKind(RetTy)) {
case TEK_Complex: {
ComplexPairTy RT =
- EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy));
+ EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
+ EndLoc);
EmitStoreOfComplex(RT,
MakeNaturalAlignAddrLValue(CurFn->arg_begin(), RetTy),
/*isInit*/ true);
@@ -1667,8 +1695,10 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
// If there is a dominating store to ReturnValue, we can elide
// the load, zap the store, and usually zap the alloca.
if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
- // Reuse the debug location from the store unless we're told not to.
- if (EmitRetDbgLoc)
+ // Reuse the debug location from the store unless there is
+ // cleanup code to be emitted between the store and return
+ // instruction.
+ if (EmitRetDbgLoc && !AutoreleaseResult)
RetDbgLoc = SI->getDebugLoc();
// Get the stored value and nuke the now-dead store.
RV = SI->getValueOperand();
@@ -1715,26 +1745,14 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm_unreachable("Invalid ABI kind for return argument");
}
- // If this function returns 'this', the last instruction is a CallInst
- // that returns 'this', and 'this' argument of the CallInst points to
- // the same object as CXXThisValue, use the return value from the CallInst.
- // We will not need to keep 'this' alive through the callsite. It also enables
- // optimizations in the backend, such as tail call optimization.
- if (CalleeWithThisReturn && CGM.getCXXABI().HasThisReturn(CurGD)) {
- llvm::BasicBlock *IP = Builder.GetInsertBlock();
- llvm::CallInst *Callsite;
- if (!IP->empty() && (Callsite = dyn_cast<llvm::CallInst>(&IP->back())) &&
- Callsite->getCalledFunction() == CalleeWithThisReturn &&
- checkThisPointer(Callsite->getOperand(0), CXXThisValue))
- RV = Builder.CreateBitCast(Callsite, RetAI.getCoerceToType());
- }
llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
if (!RetDbgLoc.isUnknown())
Ret->setDebugLoc(RetDbgLoc);
}
void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
- const VarDecl *param) {
+ const VarDecl *param,
+ SourceLocation loc) {
// StartFunction converted the ABI-lowered parameter(s) into a
// local alloca. We need to turn that into an r-value suitable
// for EmitCall.
@@ -1755,7 +1773,7 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
- args.add(convertTempToRValue(local, type), type);
+ args.add(convertTempToRValue(local, type, loc), type);
}
static bool isProvablyNull(llvm::Value *addr) {
@@ -1814,7 +1832,7 @@ static void emitWriteback(CodeGenFunction &CGF,
CGF.EmitARCIntrinsicUse(writeback.ToUse);
// Load the old value (primitively).
- llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV);
+ llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
// Put the new value in place (primitively).
CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
@@ -1839,6 +1857,19 @@ static void emitWritebacks(CodeGenFunction &CGF,
emitWriteback(CGF, *i);
}
+static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
+ const CallArgList &CallArgs) {
+ assert(CGF.getTarget().getCXXABI().isArgumentDestroyedByCallee());
+ ArrayRef<CallArgList::CallArgCleanup> Cleanups =
+ CallArgs.getCleanupsToDeactivate();
+ // Iterate in reverse to increase the likelihood of popping the cleanup.
+ for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
+ I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
+ CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
+ I->IsActiveIP->eraseFromParent();
+ }
+}
+
static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
if (uop->getOpcode() == UO_AddrOf)
@@ -1930,7 +1961,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Perform a copy if necessary.
if (shouldCopy) {
- RValue srcRV = CGF.EmitLoadOfLValue(srcLV);
+ RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
assert(srcRV.isScalar());
llvm::Value *src = srcRV.getScalarVal();
@@ -1987,16 +2018,47 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
if (E->isGLValue()) {
assert(E->getObjectKind() == OK_Ordinary);
- return args.add(EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0),
- type);
+ return args.add(EmitReferenceBindingToExpr(E), type);
+ }
+
+ bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
+
+ // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
+ // However, we still have to push an EH-only cleanup in case we unwind before
+ // we make it to the call.
+ if (HasAggregateEvalKind &&
+ CGM.getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
+ const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
+ if (RD && RD->hasNonTrivialDestructor()) {
+ AggValueSlot Slot = CreateAggTemp(type, "agg.arg.tmp");
+ Slot.setExternallyDestructed();
+ EmitAggExpr(E, Slot);
+ RValue RV = Slot.asRValue();
+ args.add(RV, type);
+
+ pushDestroy(EHCleanup, RV.getAggregateAddr(), type, destroyCXXObject,
+ /*useEHCleanupForArray*/ true);
+ // This unreachable is a temporary marker which will be removed later.
+ llvm::Instruction *IsActive = Builder.CreateUnreachable();
+ args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
+ return;
+ }
}
- if (hasAggregateEvaluationKind(type) &&
- isa<ImplicitCastExpr>(E) &&
+ if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
assert(L.isSimple());
- args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
+ if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
+ args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
+ } else {
+ // We can't represent a misaligned lvalue in the CallArgList, so copy
+ // to an aligned temporary now.
+ llvm::Value *tmp = CreateMemTemp(type);
+ EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
+ L.getAlignment());
+ args.add(RValue::getAggregate(tmp), type);
+ }
return;
}
@@ -2127,7 +2189,7 @@ static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
}
void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- SmallVector<llvm::Value*,16> &Args,
+ SmallVectorImpl<llvm::Value *> &Args,
llvm::FunctionType *IRFuncTy) {
if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
unsigned NumElts = AT->getSize().getZExtValue();
@@ -2135,7 +2197,7 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
llvm::Value *Addr = RV.getAggregateAddr();
for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
- RValue EltRV = convertTempToRValue(EltAddr, EltTy);
+ RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
}
} else if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -2159,7 +2221,7 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
}
}
if (LargestFD) {
- RValue FldRV = EmitRValueForField(LV, LargestFD);
+ RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
}
} else {
@@ -2167,7 +2229,7 @@ void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
i != e; ++i) {
FieldDecl *FD = *i;
- RValue FldRV = EmitRValueForField(LV, FD);
+ RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
}
}
@@ -2394,6 +2456,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
+ if (!CallArgs.getCleanupsToDeactivate().empty())
+ deactivateArgCleanupsBeforeCall(*this, CallArgs);
+
// If the callee is a bitcast of a function to a varargs pointer to function
// type, check to see if we can remove the bitcast. This handles some cases
// with unprototyped functions.
@@ -2482,7 +2547,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
switch (RetAI.getKind()) {
case ABIArgInfo::Indirect:
- return convertTempToRValue(Args[0], RetTy);
+ return convertTempToRValue(Args[0], RetTy, SourceLocation());
case ABIArgInfo::Ignore:
// If we are ignoring an argument that had a result, make sure to
@@ -2540,7 +2605,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- return convertTempToRValue(DestPtr, RetTy);
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
case ABIArgInfo::Expand:
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 85c3320ec0ee..532cb59c62e5 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -16,6 +16,7 @@
#define CLANG_CODEGEN_CGCALL_H
#include "CGValue.h"
+#include "EHScopeStack.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/FoldingSet.h"
@@ -67,6 +68,14 @@ namespace CodeGen {
llvm::Value *ToUse;
};
+ struct CallArgCleanup {
+ EHScopeStack::stable_iterator Cleanup;
+
+ /// The "is active" insertion point. This instruction is temporary and
+ /// will be removed after insertion.
+ llvm::Instruction *IsActiveIP;
+ };
+
void add(RValue rvalue, QualType type, bool needscopy = false) {
push_back(CallArg(rvalue, type, needscopy));
}
@@ -92,57 +101,25 @@ namespace CodeGen {
writeback_iterator writeback_begin() const { return Writebacks.begin(); }
writeback_iterator writeback_end() const { return Writebacks.end(); }
- private:
- SmallVector<Writeback, 1> Writebacks;
- };
-
- /// A class for recording the number of arguments that a function
- /// signature requires.
- class RequiredArgs {
- /// The number of required arguments, or ~0 if the signature does
- /// not permit optional arguments.
- unsigned NumRequired;
- public:
- enum All_t { All };
-
- RequiredArgs(All_t _) : NumRequired(~0U) {}
- explicit RequiredArgs(unsigned n) : NumRequired(n) {
- assert(n != ~0U);
- }
-
- /// Compute the arguments required by the given formal prototype,
- /// given that there may be some additional, non-formal arguments
- /// in play.
- static RequiredArgs forPrototypePlus(const FunctionProtoType *prototype,
- unsigned additional) {
- if (!prototype->isVariadic()) return All;
- return RequiredArgs(prototype->getNumArgs() + additional);
- }
-
- static RequiredArgs forPrototype(const FunctionProtoType *prototype) {
- return forPrototypePlus(prototype, 0);
- }
-
- static RequiredArgs forPrototype(CanQual<FunctionProtoType> prototype) {
- return forPrototype(prototype.getTypePtr());
+ void addArgCleanupDeactivation(EHScopeStack::stable_iterator Cleanup,
+ llvm::Instruction *IsActiveIP) {
+ CallArgCleanup ArgCleanup;
+ ArgCleanup.Cleanup = Cleanup;
+ ArgCleanup.IsActiveIP = IsActiveIP;
+ CleanupsToDeactivate.push_back(ArgCleanup);
}
- static RequiredArgs forPrototypePlus(CanQual<FunctionProtoType> prototype,
- unsigned additional) {
- return forPrototypePlus(prototype.getTypePtr(), additional);
+ ArrayRef<CallArgCleanup> getCleanupsToDeactivate() const {
+ return CleanupsToDeactivate;
}
- bool allowsOptionalArgs() const { return NumRequired != ~0U; }
- unsigned getNumRequiredArgs() const {
- assert(allowsOptionalArgs());
- return NumRequired;
- }
+ private:
+ SmallVector<Writeback, 1> Writebacks;
- unsigned getOpaqueData() const { return NumRequired; }
- static RequiredArgs getFromOpaqueData(unsigned value) {
- if (value == ~0U) return All;
- return RequiredArgs(value);
- }
+ /// Deactivate these cleanups immediately before making the call. This
+ /// is used to cleanup objects that are owned by the callee once the call
+ /// occurs.
+ SmallVector<CallArgCleanup, 1> CleanupsToDeactivate;
};
/// FunctionArgList - Type for representing both the decl and type
@@ -151,137 +128,6 @@ namespace CodeGen {
class FunctionArgList : public SmallVector<const VarDecl*, 16> {
};
- /// CGFunctionInfo - Class to encapsulate the information about a
- /// function definition.
- class CGFunctionInfo : public llvm::FoldingSetNode {
- struct ArgInfo {
- CanQualType type;
- ABIArgInfo info;
- };
-
- /// The LLVM::CallingConv to use for this function (as specified by the
- /// user).
- unsigned CallingConvention : 8;
-
- /// The LLVM::CallingConv to actually use for this function, which may
- /// depend on the ABI.
- unsigned EffectiveCallingConvention : 8;
-
- /// The clang::CallingConv that this was originally created with.
- unsigned ASTCallingConvention : 8;
-
- /// Whether this function is noreturn.
- unsigned NoReturn : 1;
-
- /// Whether this function is returns-retained.
- unsigned ReturnsRetained : 1;
-
- /// How many arguments to pass inreg.
- unsigned HasRegParm : 1;
- unsigned RegParm : 4;
-
- RequiredArgs Required;
-
- unsigned NumArgs;
- ArgInfo *getArgsBuffer() {
- return reinterpret_cast<ArgInfo*>(this+1);
- }
- const ArgInfo *getArgsBuffer() const {
- return reinterpret_cast<const ArgInfo*>(this + 1);
- }
-
- CGFunctionInfo() : Required(RequiredArgs::All) {}
-
- public:
- static CGFunctionInfo *create(unsigned llvmCC,
- const FunctionType::ExtInfo &extInfo,
- CanQualType resultType,
- ArrayRef<CanQualType> argTypes,
- RequiredArgs required);
-
- typedef const ArgInfo *const_arg_iterator;
- typedef ArgInfo *arg_iterator;
-
- const_arg_iterator arg_begin() const { return getArgsBuffer() + 1; }
- const_arg_iterator arg_end() const { return getArgsBuffer() + 1 + NumArgs; }
- arg_iterator arg_begin() { return getArgsBuffer() + 1; }
- arg_iterator arg_end() { return getArgsBuffer() + 1 + NumArgs; }
-
- unsigned arg_size() const { return NumArgs; }
-
- bool isVariadic() const { return Required.allowsOptionalArgs(); }
- RequiredArgs getRequiredArgs() const { return Required; }
-
- bool isNoReturn() const { return NoReturn; }
-
- /// In ARC, whether this function retains its return value. This
- /// is not always reliable for call sites.
- bool isReturnsRetained() const { return ReturnsRetained; }
-
- /// getASTCallingConvention() - Return the AST-specified calling
- /// convention.
- CallingConv getASTCallingConvention() const {
- return CallingConv(ASTCallingConvention);
- }
-
- /// getCallingConvention - Return the user specified calling
- /// convention, which has been translated into an LLVM CC.
- unsigned getCallingConvention() const { return CallingConvention; }
-
- /// getEffectiveCallingConvention - Return the actual calling convention to
- /// use, which may depend on the ABI.
- unsigned getEffectiveCallingConvention() const {
- return EffectiveCallingConvention;
- }
- void setEffectiveCallingConvention(unsigned Value) {
- EffectiveCallingConvention = Value;
- }
-
- bool getHasRegParm() const { return HasRegParm; }
- unsigned getRegParm() const { return RegParm; }
-
- FunctionType::ExtInfo getExtInfo() const {
- return FunctionType::ExtInfo(isNoReturn(),
- getHasRegParm(), getRegParm(),
- getASTCallingConvention(),
- isReturnsRetained());
- }
-
- CanQualType getReturnType() const { return getArgsBuffer()[0].type; }
-
- ABIArgInfo &getReturnInfo() { return getArgsBuffer()[0].info; }
- const ABIArgInfo &getReturnInfo() const { return getArgsBuffer()[0].info; }
-
- void Profile(llvm::FoldingSetNodeID &ID) {
- ID.AddInteger(getASTCallingConvention());
- ID.AddBoolean(NoReturn);
- ID.AddBoolean(ReturnsRetained);
- ID.AddBoolean(HasRegParm);
- ID.AddInteger(RegParm);
- ID.AddInteger(Required.getOpaqueData());
- getReturnType().Profile(ID);
- for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
- it->type.Profile(ID);
- }
- static void Profile(llvm::FoldingSetNodeID &ID,
- const FunctionType::ExtInfo &info,
- RequiredArgs required,
- CanQualType resultType,
- ArrayRef<CanQualType> argTypes) {
- ID.AddInteger(info.getCC());
- ID.AddBoolean(info.getNoReturn());
- ID.AddBoolean(info.getProducesResult());
- ID.AddBoolean(info.getHasRegParm());
- ID.AddInteger(info.getRegParm());
- ID.AddInteger(required.getOpaqueData());
- resultType.Profile(ID);
- for (ArrayRef<CanQualType>::iterator
- i = argTypes.begin(), e = argTypes.end(); i != e; ++i) {
- i->Profile(ID);
- }
- }
- };
-
/// ReturnValueSlot - Contains the address where the return value of a
/// function can be stored, and whether the address is volatile or not.
class ReturnValueSlot {
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 3fd075701d03..4848d7565d40 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -17,10 +17,12 @@
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "clang/AST/CXXInheritance.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/EvaluatedExprVisitor.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/TargetBuiltins.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
using namespace clang;
@@ -198,7 +200,8 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
// Compute the virtual offset.
llvm::Value *VirtualOffset = 0;
if (VBase) {
- VirtualOffset = GetVirtualBaseClassOffset(Value, Derived, VBase);
+ VirtualOffset =
+ CGM.getCXXABI().GetVirtualBaseClassOffset(*this, Value, Derived, VBase);
}
// Apply both offsets.
@@ -285,7 +288,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
bool ForVirtualBase,
bool Delegating) {
- if (!CodeGenVTables::needsVTTParameter(GD)) {
+ if (!CGM.getCXXABI().NeedsVTTParameter(GD)) {
// This constructor/destructor does not need a VTT parameter.
return 0;
}
@@ -303,7 +306,7 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
} else if (RD == Base) {
// If the record matches the base, this is the complete ctor/dtor
// variant calling the base variant in a class with virtual bases.
- assert(!CodeGenVTables::needsVTTParameter(CurGD) &&
+ assert(!CGM.getCXXABI().NeedsVTTParameter(CurGD) &&
"doing no-op VTT offset in base dtor/ctor?");
assert(!ForVirtualBase && "Can't have same class as virtual base!");
SubVTTIndex = 0;
@@ -318,7 +321,7 @@ llvm::Value *CodeGenFunction::GetVTTParameter(GlobalDecl GD,
assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
}
- if (CodeGenVTables::needsVTTParameter(CurGD)) {
+ if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
// A VTT parameter was passed to the constructor, use it.
VTT = LoadCXXVTT();
VTT = Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
@@ -432,52 +435,45 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
unsigned Index) {
if (Index == ArrayIndexes.size()) {
LValue LV = LHS;
- { // Scope for Cleanups.
- CodeGenFunction::RunCleanupsScope Cleanups(CGF);
-
- if (ArrayIndexVar) {
- // If we have an array index variable, load it and use it as an offset.
- // Then, increment the value.
- llvm::Value *Dest = LHS.getAddress();
- llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
- Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
- llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
- Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
- CGF.Builder.CreateStore(Next, ArrayIndexVar);
-
- // Update the LValue.
- LV.setAddress(Dest);
- CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
- LV.setAlignment(std::min(Align, LV.getAlignment()));
- }
- switch (CGF.getEvaluationKind(T)) {
- case TEK_Scalar:
- CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
- break;
- case TEK_Complex:
- CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
- break;
- case TEK_Aggregate: {
- AggValueSlot Slot =
- AggValueSlot::forLValue(LV,
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
-
- CGF.EmitAggExpr(Init, Slot);
- break;
- }
- }
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *Dest = LHS.getAddress();
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+
+ // Update the LValue.
+ LV.setAddress(Dest);
+ CharUnits Align = CGF.getContext().getTypeAlignInChars(T);
+ LV.setAlignment(std::min(Align, LV.getAlignment()));
}
- // Now, outside of the initializer cleanup scope, destroy the backing array
- // for a std::initializer_list member.
- CGF.MaybeEmitStdInitializerListCleanup(LV.getAddress(), Init);
+ switch (CGF.getEvaluationKind(T)) {
+ case TEK_Scalar:
+ CGF.EmitScalarInit(Init, /*decl*/ 0, LV, false);
+ break;
+ case TEK_Complex:
+ CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
+ break;
+ case TEK_Aggregate: {
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LV,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+
+ CGF.EmitAggExpr(Init, Slot);
+ break;
+ }
+ }
return;
}
-
+
const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
assert(Array && "Array initialization without the array type?");
llvm::Value *IndexVar
@@ -511,16 +507,12 @@ static void EmitAggMemberInitializer(CodeGenFunction &CGF,
CGF.EmitBlock(ForBody);
llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
-
- {
- CodeGenFunction::RunCleanupsScope Cleanups(CGF);
-
- // Inside the loop body recurse to emit the inner loop or, eventually, the
- // constructor call.
- EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
- Array->getElementType(), ArrayIndexes, Index + 1);
- }
-
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
+ Array->getElementType(), ArrayIndexes, Index + 1);
+
CGF.EmitBlock(ContinueBlock);
// Emit the increment of the loop counter.
@@ -573,7 +565,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// in the AST, we could generalize it more easily.
const ConstantArrayType *Array
= CGF.getContext().getAsConstantArrayType(FieldType);
- if (Array && Constructor->isImplicitlyDefined() &&
+ if (Array && Constructor->isDefaulted() &&
Constructor->isCopyOrMoveConstructor()) {
QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
@@ -713,7 +705,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
CGM.getTarget().getCXXABI().hasConstructorVariants()) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, Ctor->getLocEnd());
- EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
return;
}
@@ -725,7 +717,7 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
if (IsTryBody)
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
- EHScopeStack::stable_iterator CleanupDepth = EHStack.stable_begin();
+ RunCleanupsScope RunCleanups(*this);
// TODO: in restricted cases, we can emit the vbase initializers of
// a complete ctor and then delegate to the base ctor.
@@ -744,13 +736,36 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// initializers, which includes (along the exceptional path) the
// destructors for those members and bases that were fully
// constructed.
- PopCleanupBlocks(CleanupDepth);
+ RunCleanups.ForceCleanup();
if (IsTryBody)
ExitCXXTryStmt(*cast<CXXTryStmt>(Body), true);
}
namespace {
+ /// RAII object to indicate that codegen is copying the value representation
+ /// instead of the object representation. Useful when copying a struct or
+ /// class which has uninitialized members and we're only performing
+ /// lvalue-to-rvalue conversion on the object but not its members.
+ class CopyingValueRepresentation {
+ public:
+ explicit CopyingValueRepresentation(CodeGenFunction &CGF)
+ : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) {
+ SO.Bool = false;
+ SO.Enum = false;
+ CGF.SanOpts = &SO;
+ }
+ ~CopyingValueRepresentation() {
+ CGF.SanOpts = OldSanOpts;
+ }
+ private:
+ CodeGenFunction &CGF;
+ SanitizerOptions SO;
+ const SanitizerOptions *OldSanOpts;
+ };
+}
+
+namespace {
class FieldMemcpyizer {
public:
FieldMemcpyizer(CodeGenFunction &CGF, const CXXRecordDecl *ClassDecl,
@@ -859,8 +874,12 @@ namespace {
}
void addNextField(FieldDecl *F) {
- assert(F->getFieldIndex() == LastAddedFieldIndex + 1 &&
- "Cannot aggregate non-contiguous fields.");
+ // For the most part, the following invariant will hold:
+ // F->getFieldIndex() == LastAddedFieldIndex + 1
+ // The one exception is that Sema won't add a copy-initializer for an
+ // unnamed bitfield, which will show up here as a gap in the sequence.
+ assert(F->getFieldIndex() >= LastAddedFieldIndex + 1 &&
+ "Cannot aggregate fields out of order.");
LastAddedFieldIndex = F->getFieldIndex();
// The 'first' and 'last' fields are chosen by offset, rather than field
@@ -891,7 +910,7 @@ namespace {
/// constructor.
static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
FunctionArgList &Args) {
- if (CD->isCopyOrMoveConstructor() && CD->isImplicitlyDefined())
+ if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
return Args[Args.size() - 1];
return 0;
}
@@ -925,7 +944,7 @@ namespace {
FunctionArgList &Args)
: FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
ConstructorDecl(CD),
- MemcpyableCtor(CD->isImplicitlyDefined() &&
+ MemcpyableCtor(CD->isDefaulted() &&
CD->isCopyOrMoveConstructor() &&
CGF.getLangOpts().getGC() == LangOptions::NonGC),
Args(Args) { }
@@ -945,9 +964,10 @@ namespace {
if (AggregatedInits.size() <= 1) {
// This memcpy is too small to be worthwhile. Fall back on default
// codegen.
- for (unsigned i = 0; i < AggregatedInits.size(); ++i) {
+ if (!AggregatedInits.empty()) {
+ CopyingValueRepresentation CVR(CGF);
EmitMemberInitializer(CGF, ConstructorDecl->getParent(),
- AggregatedInits[i], ConstructorDecl, Args);
+ AggregatedInits[0], ConstructorDecl, Args);
}
reset();
return;
@@ -986,8 +1006,8 @@ namespace {
private:
// Returns the memcpyable field copied by the given statement, if one
- // exists. Otherwise r
- FieldDecl* getMemcpyableField(Stmt *S) {
+ // exists. Otherwise returns null.
+ FieldDecl *getMemcpyableField(Stmt *S) {
if (!AssignmentsMemcpyable)
return 0;
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(S)) {
@@ -1081,8 +1101,10 @@ namespace {
void emitAggregatedStmts() {
if (AggregatedStmts.size() <= 1) {
- for (unsigned i = 0; i < AggregatedStmts.size(); ++i)
- CGF.EmitStmt(AggregatedStmts[i]);
+ if (!AggregatedStmts.empty()) {
+ CopyingValueRepresentation CVR(CGF);
+ CGF.EmitStmt(AggregatedStmts[0]);
+ }
reset();
}
@@ -1115,7 +1137,8 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
!CGM.getTarget().getCXXABI().hasConstructorVariants()) {
// The ABIs that don't have constructor variants need to put a branch
// before the virtual base initialization code.
- BaseCtorContinueBB = CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this);
+ BaseCtorContinueBB =
+ CGM.getCXXABI().EmitCtorCompleteObjectHandler(*this, ClassDecl);
assert(BaseCtorContinueBB);
}
@@ -1270,16 +1293,19 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// If this is the complete variant, just invoke the base variant;
// the epilogue will destruct the virtual bases. But we can't do
// this optimization if the body is a function-try-block, because
- // we'd introduce *two* handler blocks.
+ // we'd introduce *two* handler blocks. In the Microsoft ABI, we
+ // always delegate because we might not have a definition in this TU.
switch (DtorType) {
case Dtor_Deleting: llvm_unreachable("already handled deleting case");
case Dtor_Complete:
+ assert((Body || getTarget().getCXXABI().isMicrosoft()) &&
+ "can't emit a dtor without a body for non-Microsoft ABIs");
+
// Enter the cleanup scopes for virtual bases.
EnterDtorCleanups(Dtor, Dtor_Complete);
- if (!isTryBody &&
- CGM.getTarget().getCXXABI().hasDestructorVariants()) {
+ if (!isTryBody) {
EmitCXXDestructorCall(Dtor, Dtor_Base, /*ForVirtualBase=*/false,
/*Delegating=*/false, LoadCXXThis());
break;
@@ -1287,6 +1313,8 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// Fallthrough: act like we're in the base variant.
case Dtor_Base:
+ assert(Body);
+
// Enter the cleanup scopes for fields and non-virtual bases.
EnterDtorCleanups(Dtor, Dtor_Base);
@@ -1635,17 +1663,6 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd) {
-
- CGDebugInfo *DI = getDebugInfo();
- if (DI &&
- CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo) {
- // If debug info for this class has not been emitted then this is the
- // right time to do so.
- const CXXRecordDecl *Parent = D->getParent();
- DI->getOrCreateRecordType(CGM.getContext().getTypeDeclType(Parent),
- Parent->getLocation());
- }
-
// If this is a trivial constructor, just emit what's needed.
if (D->isTrivial()) {
if (ArgBeg == ArgEnd) {
@@ -1667,11 +1684,8 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
// Non-trivial constructors are handled in an ABI-specific manner.
- llvm::Value *Callee = CGM.getCXXABI().EmitConstructorCall(*this, D, Type,
- ForVirtualBase, Delegating, This, ArgBeg, ArgEnd);
- if (CGM.getCXXABI().HasThisReturn(CurGD) &&
- CGM.getCXXABI().HasThisReturn(GlobalDecl(D, Type)))
- CalleeWithThisReturn = Callee;
+ CGM.getCXXABI().EmitConstructorCall(*this, D, Type, ForVirtualBase,
+ Delegating, This, ArgBeg, ArgEnd);
}
void
@@ -1686,8 +1700,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
return;
}
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D,
- clang::Ctor_Complete);
+ llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete);
assert(D->isInstance() &&
"Trying to emit a member call expr on a static method!");
@@ -1730,7 +1743,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
void
CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
- const FunctionArgList &Args) {
+ const FunctionArgList &Args,
+ SourceLocation Loc) {
CallArgList DelegateArgs;
FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
@@ -1747,7 +1761,7 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
DelegateArgs.add(RValue::get(VTT), VoidPP);
- if (CodeGenVTables::needsVTTParameter(CurGD)) {
+ if (CGM.getCXXABI().NeedsVTTParameter(CurGD)) {
assert(I != E && "cannot skip vtt parameter, already done with args");
assert((*I)->getType() == VoidPP && "skipping parameter not of vtt type");
++I;
@@ -1757,15 +1771,13 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
// Explicit arguments.
for (; I != E; ++I) {
const VarDecl *param = *I;
- EmitDelegateCallArg(DelegateArgs, param);
+ // FIXME: per-argument source location
+ EmitDelegateCallArg(DelegateArgs, param, Loc);
}
llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
Callee, ReturnValueSlot(), DelegateArgs, Ctor);
- if (CGM.getCXXABI().HasThisReturn(CurGD) &&
- CGM.getCXXABI().HasThisReturn(GlobalDecl(Ctor, CtorType)))
- CalleeWithThisReturn = Callee;
}
namespace {
@@ -1818,8 +1830,8 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
bool ForVirtualBase,
bool Delegating,
llvm::Value *This) {
- llvm::Value *VTT = GetVTTParameter(GlobalDecl(DD, Type),
- ForVirtualBase, Delegating);
+ GlobalDecl GD(DD, Type);
+ llvm::Value *VTT = GetVTTParameter(GD, ForVirtualBase, Delegating);
llvm::Value *Callee = 0;
if (getLangOpts().AppleKext)
Callee = BuildAppleKextVirtualDestructorCall(DD, Type,
@@ -1827,14 +1839,14 @@ void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
if (!Callee)
Callee = CGM.GetAddrOfCXXDestructor(DD, Type);
-
+
+ if (DD->isVirtual())
+ This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, GD, This);
+
// FIXME: Provide a source location here.
EmitCXXMemberCall(DD, SourceLocation(), Callee, ReturnValueSlot(), This,
VTT, getContext().getPointerType(getContext().VoidPtrTy),
0, 0);
- if (CGM.getCXXABI().HasThisReturn(CurGD) &&
- CGM.getCXXABI().HasThisReturn(GlobalDecl(DD, Type)))
- CalleeWithThisReturn = Callee;
}
namespace {
@@ -1868,69 +1880,30 @@ void CodeGenFunction::PushDestructorCleanup(QualType T, llvm::Value *Addr) {
PushDestructorCleanup(D, Addr);
}
-llvm::Value *
-CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
- const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl) {
- llvm::Value *VTablePtr = GetVTablePtr(This, Int8PtrTy);
- CharUnits VBaseOffsetOffset =
- CGM.getVTableContext().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
-
- llvm::Value *VBaseOffsetPtr =
- Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset.getQuantity(),
- "vbase.offset.ptr");
- llvm::Type *PtrDiffTy =
- ConvertType(getContext().getPointerDiffType());
-
- VBaseOffsetPtr = Builder.CreateBitCast(VBaseOffsetPtr,
- PtrDiffTy->getPointerTo());
-
- llvm::Value *VBaseOffset = Builder.CreateLoad(VBaseOffsetPtr, "vbase.offset");
-
- return VBaseOffset;
-}
-
void
CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
CharUnits OffsetFromNearestVBase,
- llvm::Constant *VTable,
const CXXRecordDecl *VTableClass) {
- const CXXRecordDecl *RD = Base.getBase();
-
// Compute the address point.
- llvm::Value *VTableAddressPoint;
-
- // Check if we need to use a vtable from the VTT.
- if (CodeGenVTables::needsVTTParameter(CurGD) &&
- (RD->getNumVBases() || NearestVBase)) {
- // Get the secondary vpointer index.
- uint64_t VirtualPointerIndex =
- CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
-
- /// Load the VTT.
- llvm::Value *VTT = LoadCXXVTT();
- if (VirtualPointerIndex)
- VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
-
- // And load the address point from the VTT.
- VTableAddressPoint = Builder.CreateLoad(VTT);
- } else {
- uint64_t AddressPoint =
- CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
- VTableAddressPoint =
- Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
- }
+ bool NeedsVirtualOffset;
+ llvm::Value *VTableAddressPoint =
+ CGM.getCXXABI().getVTableAddressPointInStructor(
+ *this, VTableClass, Base, NearestVBase, NeedsVirtualOffset);
+ if (!VTableAddressPoint)
+ return;
// Compute where to store the address point.
llvm::Value *VirtualOffset = 0;
CharUnits NonVirtualOffset = CharUnits::Zero();
- if (CodeGenVTables::needsVTTParameter(CurGD) && NearestVBase) {
+ if (NeedsVirtualOffset) {
// We need to use the virtual base offset offset because the virtual base
// might have a different offset in the most derived class.
- VirtualOffset = GetVirtualBaseClassOffset(LoadCXXThis(), VTableClass,
- NearestVBase);
+ VirtualOffset = CGM.getCXXABI().GetVirtualBaseClassOffset(*this,
+ LoadCXXThis(),
+ VTableClass,
+ NearestVBase);
NonVirtualOffset = OffsetFromNearestVBase;
} else {
// We can just use the base offset in the complete class.
@@ -1958,7 +1931,6 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
const CXXRecordDecl *NearestVBase,
CharUnits OffsetFromNearestVBase,
bool BaseIsNonVirtualPrimaryBase,
- llvm::Constant *VTable,
const CXXRecordDecl *VTableClass,
VisitedVirtualBasesSetTy& VBases) {
// If this base is a non-virtual primary base the address point has already
@@ -1966,7 +1938,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
if (!BaseIsNonVirtualPrimaryBase) {
// Initialize the vtable pointer for this base.
InitializeVTablePointer(Base, NearestVBase, OffsetFromNearestVBase,
- VTable, VTableClass);
+ VTableClass);
}
const CXXRecordDecl *RD = Base.getBase();
@@ -2009,7 +1981,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
I->isVirtual() ? BaseDecl : NearestVBase,
BaseOffsetFromNearestVBase,
BaseDeclIsNonVirtualPrimaryBase,
- VTable, VTableClass, VBases);
+ VTableClass, VBases);
}
}
@@ -2018,16 +1990,15 @@ void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
if (!RD->isDynamicClass())
return;
- // Get the VTable.
- llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
-
// Initialize the vtable pointers for this class and all of its bases.
VisitedVirtualBasesSetTy VBases;
InitializeVTablePointers(BaseSubobject(RD, CharUnits::Zero()),
/*NearestVBase=*/0,
/*OffsetFromNearestVBase=*/CharUnits::Zero(),
- /*BaseIsNonVirtualPrimaryBase=*/false,
- VTable, RD, VBases);
+ /*BaseIsNonVirtualPrimaryBase=*/false, RD, VBases);
+
+ if (RD->getNumVBases())
+ CGM.getCXXABI().initializeHiddenVirtualInheritanceMembers(*this, RD);
}
llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
@@ -2038,29 +2009,6 @@ llvm::Value *CodeGenFunction::GetVTablePtr(llvm::Value *This,
return VTable;
}
-static const CXXRecordDecl *getMostDerivedClassDecl(const Expr *Base) {
- const Expr *E = Base;
-
- while (true) {
- E = E->IgnoreParens();
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
-
- break;
- }
-
- QualType DerivedType = E->getType();
- if (const PointerType *PTy = DerivedType->getAs<PointerType>())
- DerivedType = PTy->getPointeeType();
-
- return cast<CXXRecordDecl>(DerivedType->castAs<RecordType>()->getDecl());
-}
// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
// quite what we want.
@@ -2087,10 +2035,14 @@ static const Expr *skipNoOpCastsAndParens(const Expr *E) {
}
}
-/// canDevirtualizeMemberFunctionCall - Checks whether the given virtual member
-/// function call on the given expr can be devirtualized.
-static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
- const CXXMethodDecl *MD) {
+bool
+CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
+ const CXXMethodDecl *MD) {
+ // When building with -fapple-kext, all calls must go through the vtable since
+ // the kernel linker can do runtime patching of vtables.
+ if (getLangOpts().AppleKext)
+ return false;
+
// If the most derived class is marked final, we know that no subclass can
// override this member function and so we can devirtualize it. For example:
//
@@ -2101,7 +2053,7 @@ static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
// b->f();
// }
//
- const CXXRecordDecl *MostDerivedClassDecl = getMostDerivedClassDecl(Base);
+ const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
if (MostDerivedClassDecl->hasAttr<FinalAttr>())
return true;
@@ -2124,7 +2076,14 @@ static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
return false;
}
-
+
+ // We can devirtualize calls on an object accessed by a class member access
+ // expression, since by C++11 [basic.life]p6 we know that it can't refer to
+ // a derived class object constructed in the same location.
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
+ if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
+ return VD->getType()->isRecordType();
+
// We can always devirtualize calls on temporary object expressions.
if (isa<CXXConstructExpr>(Base))
return true;
@@ -2141,20 +2100,6 @@ static bool canDevirtualizeMemberFunctionCall(const Expr *Base,
return false;
}
-static bool UseVirtualCall(ASTContext &Context,
- const CXXOperatorCallExpr *CE,
- const CXXMethodDecl *MD) {
- if (!MD->isVirtual())
- return false;
-
- // When building with -fapple-kext, all calls must go through the vtable since
- // the kernel linker can do runtime patching of vtables.
- if (Context.getLangOpts().AppleKext)
- return true;
-
- return !canDevirtualizeMemberFunctionCall(CE->getArg(0), MD);
-}
-
llvm::Value *
CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
const CXXMethodDecl *MD,
@@ -2163,20 +2108,15 @@ CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodDeclaration(MD));
- if (UseVirtualCall(getContext(), E, MD))
- return BuildVirtualCall(MD, This, fnType);
+ if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD))
+ return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType);
return CGM.GetAddrOfFunction(MD, fnType);
}
-void CodeGenFunction::EmitForwardingCallToLambda(const CXXRecordDecl *lambda,
- CallArgList &callArgs) {
- // Lookup the call operator
- DeclarationName operatorName
- = getContext().DeclarationNames.getCXXOperatorName(OO_Call);
- CXXMethodDecl *callOperator =
- cast<CXXMethodDecl>(lambda->lookup(operatorName).front());
-
+void CodeGenFunction::EmitForwardingCallToLambda(
+ const CXXMethodDecl *callOperator,
+ CallArgList &callArgs) {
// Get the address of the call operator.
const CGFunctionInfo &calleeFnInfo =
CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
@@ -2225,10 +2165,11 @@ void CodeGenFunction::EmitLambdaBlockInvokeBody() {
for (BlockDecl::param_const_iterator I = BD->param_begin(),
E = BD->param_end(); I != E; ++I) {
ParmVarDecl *param = *I;
- EmitDelegateCallArg(CallArgs, param);
+ EmitDelegateCallArg(CallArgs, param, param->getLocStart());
}
-
- EmitForwardingCallToLambda(Lambda, CallArgs);
+ assert(!Lambda->isGenericLambda() &&
+ "generic lambda interconversion to block not implemented");
+ EmitForwardingCallToLambda(Lambda->getLambdaCallOperator(), CallArgs);
}
void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
@@ -2239,7 +2180,7 @@ void CodeGenFunction::EmitLambdaToBlockPointerBody(FunctionArgList &Args) {
return;
}
- EmitFunctionBody(Args);
+ EmitFunctionBody(Args, cast<FunctionDecl>(CurGD.getDecl())->getBody());
}
void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
@@ -2256,10 +2197,22 @@ void CodeGenFunction::EmitLambdaDelegatingInvokeBody(const CXXMethodDecl *MD) {
for (FunctionDecl::param_const_iterator I = MD->param_begin(),
E = MD->param_end(); I != E; ++I) {
ParmVarDecl *param = *I;
- EmitDelegateCallArg(CallArgs, param);
+ EmitDelegateCallArg(CallArgs, param, param->getLocStart());
}
-
- EmitForwardingCallToLambda(Lambda, CallArgs);
+ const CXXMethodDecl *CallOp = Lambda->getLambdaCallOperator();
+ // For a generic lambda, find the corresponding call operator specialization
+ // to which the call to the static-invoker shall be forwarded.
+ if (Lambda->isGenericLambda()) {
+ assert(MD->isFunctionTemplateSpecialization());
+ const TemplateArgumentList *TAL = MD->getTemplateSpecializationArgs();
+ FunctionTemplateDecl *CallOpTemplate = CallOp->getDescribedFunctionTemplate();
+ void *InsertPos = 0;
+ FunctionDecl *CorrespondingCallOpSpecialization =
+ CallOpTemplate->findSpecialization(TAL->data(), TAL->size(), InsertPos);
+ assert(CorrespondingCallOpSpecialization);
+ CallOp = cast<CXXMethodDecl>(CorrespondingCallOpSpecialization);
+ }
+ EmitForwardingCallToLambda(CallOp, CallArgs);
}
void CodeGenFunction::EmitLambdaStaticInvokeFunction(const CXXMethodDecl *MD) {
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index ba6b56c2676f..65de4d498d1a 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -17,8 +17,8 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCleanup.h"
+#include "CodeGenFunction.h"
using namespace clang;
using namespace CodeGen;
@@ -371,8 +371,7 @@ void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
}
/// Pops cleanup blocks until the given savepoint is reached.
-void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
- SourceLocation EHLoc) {
+void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
assert(Old.isValid());
while (EHStack.stable_begin() != Old) {
@@ -384,8 +383,35 @@ void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
bool FallThroughIsBranchThrough =
Old.strictlyEncloses(Scope.getEnclosingNormalCleanup());
- PopCleanupBlock(FallThroughIsBranchThrough, EHLoc);
+ PopCleanupBlock(FallThroughIsBranchThrough);
+ }
+}
+
+/// Pops cleanup blocks until the given savepoint is reached, then add the
+/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
+void
+CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
+ size_t OldLifetimeExtendedSize) {
+ PopCleanupBlocks(Old);
+
+ // Move our deferred cleanups onto the EH stack.
+ for (size_t I = OldLifetimeExtendedSize,
+ E = LifetimeExtendedCleanupStack.size(); I != E; /**/) {
+ // Alignment should be guaranteed by the vptrs in the individual cleanups.
+ assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) &&
+ "misaligned cleanup stack entry");
+
+ LifetimeExtendedCleanupHeader &Header =
+ reinterpret_cast<LifetimeExtendedCleanupHeader&>(
+ LifetimeExtendedCleanupStack[I]);
+ I += sizeof(Header);
+
+ EHStack.pushCopyOfCleanup(Header.getKind(),
+ &LifetimeExtendedCleanupStack[I],
+ Header.getSize());
+ I += Header.getSize();
}
+ LifetimeExtendedCleanupStack.resize(OldLifetimeExtendedSize);
}
static llvm::BasicBlock *CreateNormalEntry(CodeGenFunction &CGF,
@@ -533,8 +559,7 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
/// Pops a cleanup block. If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
-void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough,
- SourceLocation EHLoc) {
+void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
assert(!EHStack.empty() && "cleanup stack is empty!");
assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
@@ -836,7 +861,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough,
// Emit the EH cleanup if required.
if (RequiresEHCleanup) {
if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, EHLoc);
+ DI->EmitLocation(Builder, CurEHLocation);
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index d8dbe41bf0f3..1bd6bba523fb 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -14,13 +14,15 @@
#ifndef CLANG_CODEGEN_CGCLEANUP_H
#define CLANG_CODEGEN_CGCLEANUP_H
-/// EHScopeStack is defined in CodeGenFunction.h, but its
-/// implementation is in this file and in CGCleanup.cpp.
-#include "CodeGenFunction.h"
+#include "EHScopeStack.h"
+#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/SmallVector.h"
namespace llvm {
- class Value;
- class BasicBlock;
+class BasicBlock;
+class Value;
+class ConstantInt;
+class AllocaInst;
}
namespace clang {
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index ddcb931e46c5..fcb26f0da615 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -13,6 +13,7 @@
#include "CGDebugInfo.h"
#include "CGBlocks.h"
+#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
@@ -36,12 +37,13 @@
#include "llvm/IR/Module.h"
#include "llvm/Support/Dwarf.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
using namespace clang;
using namespace clang::CodeGen;
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
- : CGM(CGM), DBuilder(CGM.getModule()),
- BlockLiteralGenericSet(false) {
+ : CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
+ DBuilder(CGM.getModule()) {
CreateCompileUnit();
}
@@ -50,9 +52,54 @@ CGDebugInfo::~CGDebugInfo() {
"Region stack mismatch, stack not empty!");
}
+
+NoLocation::NoLocation(CodeGenFunction &CGF, CGBuilderTy &B)
+ : DI(CGF.getDebugInfo()), Builder(B) {
+ if (DI) {
+ SavedLoc = DI->getLocation();
+ DI->CurLoc = SourceLocation();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ }
+}
+
+NoLocation::~NoLocation() {
+ if (DI) {
+ assert(Builder.getCurrentDebugLocation().isUnknown());
+ DI->CurLoc = SavedLoc;
+ }
+}
+
+ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B)
+ : DI(CGF.getDebugInfo()), Builder(B) {
+ if (DI) {
+ SavedLoc = DI->getLocation();
+ DI->CurLoc = SourceLocation();
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ }
+}
+
+void ArtificialLocation::Emit() {
+ if (DI) {
+ // Sync the Builder.
+ DI->EmitLocation(Builder, SavedLoc);
+ DI->CurLoc = SourceLocation();
+ // Construct a location that has a valid scope, but no line info.
+ assert(!DI->LexicalBlockStack.empty());
+ llvm::DIDescriptor Scope(DI->LexicalBlockStack.back());
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(0, 0, Scope));
+ }
+}
+
+ArtificialLocation::~ArtificialLocation() {
+ if (DI) {
+ assert(Builder.getCurrentDebugLocation().getLine() == 0);
+ DI->CurLoc = SavedLoc;
+ }
+}
+
void CGDebugInfo::setLocation(SourceLocation Loc) {
// If the new location isn't valid return.
- if (!Loc.isValid()) return;
+ if (Loc.isInvalid()) return;
CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
@@ -112,7 +159,7 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
}
/// getFunctionName - Get function name for the given FunctionDecl. If the
-/// name is constructred on demand (e.g. C++ destructor) then the name
+/// name is constructed on demand (e.g. C++ destructor) then the name
/// is stored on the side.
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
assert (FD && "Invalid FunctionDecl!");
@@ -138,10 +185,7 @@ StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
}
// Copy this name on the side and use its reference.
- OS.flush();
- char *StrPtr = DebugInfoNames.Allocate<char>(NS.size());
- memcpy(StrPtr, NS.data(), NS.size());
- return StringRef(StrPtr, NS.size());
+ return internString(OS.str());
}
StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
@@ -149,35 +193,37 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
llvm::raw_svector_ostream OS(MethodName);
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
- if (const ObjCImplementationDecl *OID =
+ if (const ObjCImplementationDecl *OID =
dyn_cast<const ObjCImplementationDecl>(DC)) {
OS << OID->getName();
- } else if (const ObjCInterfaceDecl *OID =
+ } else if (const ObjCInterfaceDecl *OID =
dyn_cast<const ObjCInterfaceDecl>(DC)) {
OS << OID->getName();
- } else if (const ObjCCategoryImplDecl *OCD =
+ } else if (const ObjCCategoryImplDecl *OCD =
dyn_cast<const ObjCCategoryImplDecl>(DC)){
OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
OCD->getIdentifier()->getNameStart() << ')';
+ } else if (isa<ObjCProtocolDecl>(DC)) {
+ // We can extract the type of the class from the self pointer.
+ if (ImplicitParamDecl* SelfDecl = OMD->getSelfDecl()) {
+ QualType ClassTy =
+ cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
+ ClassTy.print(OS, PrintingPolicy(LangOptions()));
+ }
}
OS << ' ' << OMD->getSelector().getAsString() << ']';
- char *StrPtr = DebugInfoNames.Allocate<char>(OS.tell());
- memcpy(StrPtr, MethodName.begin(), OS.tell());
- return StringRef(StrPtr, OS.tell());
+ return internString(OS.str());
}
/// getSelectorName - Return selector name. This is used for debugging
/// info.
StringRef CGDebugInfo::getSelectorName(Selector S) {
- const std::string &SName = S.getAsString();
- char *StrPtr = DebugInfoNames.Allocate<char>(SName.size());
- memcpy(StrPtr, SName.data(), SName.size());
- return StringRef(StrPtr, SName.size());
+ return internString(S.getAsString());
}
/// getClassName - Get class name including template argument list.
-StringRef
+StringRef
CGDebugInfo::getClassName(const RecordDecl *RD) {
const ClassTemplateSpecializationDecl *Spec
= dyn_cast<ClassTemplateSpecializationDecl>(RD);
@@ -206,11 +252,7 @@ CGDebugInfo::getClassName(const RecordDecl *RD) {
}
// Copy this name on the side and use its reference.
- size_t Length = Name.size() + TemplateArgList.size();
- char *StrPtr = DebugInfoNames.Allocate<char>(Length);
- memcpy(StrPtr, Name.data(), Name.size());
- memcpy(StrPtr + Name.size(), TemplateArgList.data(), TemplateArgList.size());
- return StringRef(StrPtr, Length);
+ return internString(Name, TemplateArgList);
}
/// getOrCreateFile - Get the file debug info descriptor for the input location.
@@ -280,9 +322,7 @@ StringRef CGDebugInfo::getCurrentDirname() {
return CWDName;
SmallString<256> CWD;
llvm::sys::fs::current_path(CWD);
- char *CompDirnamePtr = DebugInfoNames.Allocate<char>(CWD.size());
- memcpy(CompDirnamePtr, CWD.data(), CWD.size());
- return CWDName = StringRef(CompDirnamePtr, CWD.size());
+ return CWDName = internString(CWD);
}
/// CreateCompileUnit - Create new compile unit.
@@ -301,21 +341,20 @@ void CGDebugInfo::CreateCompileUnit() {
std::string MainFileDir;
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
MainFileDir = MainFile->getDir()->getName();
- if (MainFileDir != ".")
- MainFileName = MainFileDir + "/" + MainFileName;
+ if (MainFileDir != ".") {
+ llvm::SmallString<1024> MainFileDirSS(MainFileDir);
+ llvm::sys::path::append(MainFileDirSS, MainFileName);
+ MainFileName = MainFileDirSS.str();
+ }
}
// Save filename string.
- char *FilenamePtr = DebugInfoNames.Allocate<char>(MainFileName.length());
- memcpy(FilenamePtr, MainFileName.c_str(), MainFileName.length());
- StringRef Filename(FilenamePtr, MainFileName.length());
+ StringRef Filename = internString(MainFileName);
// Save split dwarf file string.
std::string SplitDwarfFile = CGM.getCodeGenOpts().SplitDwarfFile;
- char *SplitDwarfPtr = DebugInfoNames.Allocate<char>(SplitDwarfFile.length());
- memcpy(SplitDwarfPtr, SplitDwarfFile.c_str(), SplitDwarfFile.length());
- StringRef SplitDwarfFilename(SplitDwarfPtr, SplitDwarfFile.length());
-
+ StringRef SplitDwarfFilename = internString(SplitDwarfFile);
+
unsigned LangTag;
const LangOptions &LO = CGM.getLangOpts();
if (LO.CPlusPlus) {
@@ -339,12 +378,11 @@ void CGDebugInfo::CreateCompileUnit() {
RuntimeVers = LO.ObjCRuntime.isNonFragile() ? 2 : 1;
// Create new compile unit.
- DBuilder.createCompileUnit(LangTag, Filename, getCurrentDirname(),
- Producer, LO.Optimize,
- CGM.getCodeGenOpts().DwarfDebugFlags,
- RuntimeVers, SplitDwarfFilename);
// FIXME - Eliminate TheCU.
- TheCU = llvm::DICompileUnit(DBuilder.getCU());
+ TheCU = DBuilder.createCompileUnit(LangTag, Filename, getCurrentDirname(),
+ Producer, LO.Optimize,
+ CGM.getCodeGenOpts().DwarfDebugFlags,
+ RuntimeVers, SplitDwarfFilename);
}
/// CreateType - Get the Basic type from the cache or create a new
@@ -360,12 +398,11 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
case BuiltinType::Dependent:
llvm_unreachable("Unexpected builtin type");
case BuiltinType::NullPtr:
- return DBuilder.
- createNullPtrType(BT->getName(CGM.getLangOpts()));
+ return DBuilder.createNullPtrType();
case BuiltinType::Void:
return llvm::DIType();
case BuiltinType::ObjCClass:
- if (ClassTy.Verify())
+ if (ClassTy)
return ClassTy;
ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_class", TheCU,
@@ -377,16 +414,16 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
// Class isa;
// } *id;
- if (ObjTy.Verify())
+ if (ObjTy)
return ObjTy;
- if (!ClassTy.Verify())
+ if (!ClassTy)
ClassTy = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
"objc_class", TheCU,
getOrCreateMainFile(), 0);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
-
+
llvm::DIType ISATy = DBuilder.createPointerType(ClassTy, Size);
ObjTy =
@@ -398,7 +435,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
return ObjTy;
}
case BuiltinType::ObjCSel: {
- if (SelTy.Verify())
+ if (SelTy)
return SelTy;
SelTy =
DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
@@ -411,7 +448,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
return getOrCreateStructPtrType("opencl_image1d_t",
OCLImage1dDITy);
case BuiltinType::OCLImage1dArray:
- return getOrCreateStructPtrType("opencl_image1d_array_t",
+ return getOrCreateStructPtrType("opencl_image1d_array_t",
OCLImage1dArrayDITy);
case BuiltinType::OCLImage1dBuffer:
return getOrCreateStructPtrType("opencl_image1d_buffer_t",
@@ -471,7 +508,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
uint64_t Align = CGM.getContext().getTypeAlign(BT);
- llvm::DIType DbgTy =
+ llvm::DIType DbgTy =
DBuilder.createBasicType(BTName, Size, Align, Encoding);
return DbgTy;
}
@@ -484,7 +521,7 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- llvm::DIType DbgTy =
+ llvm::DIType DbgTy =
DBuilder.createBasicType("complex", Size, Align, Encoding);
return DbgTy;
@@ -523,7 +560,7 @@ llvm::DIType CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile Unit) {
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
llvm::DIType DbgTy = DBuilder.createQualifiedType(Tag, FromTy);
-
+
return DbgTy;
}
@@ -537,20 +574,48 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
llvm::DIType DbgTy =
- CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
Ty->getPointeeType(), Unit);
return DbgTy;
}
llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
Ty->getPointeeType(), Unit);
}
+/// In C++ mode, types have linkage, so we can rely on the ODR and
+/// on their mangled names, if they're external.
+static SmallString<256>
+getUniqueTagTypeName(const TagType *Ty, CodeGenModule &CGM,
+ llvm::DICompileUnit TheCU) {
+ SmallString<256> FullName;
+ // FIXME: ODR should apply to ObjC++ exactly the same wasy it does to C++.
+ // For now, only apply ODR with C++.
+ const TagDecl *TD = Ty->getDecl();
+ if (TheCU.getLanguage() != llvm::dwarf::DW_LANG_C_plus_plus ||
+ !TD->isExternallyVisible())
+ return FullName;
+ // Microsoft Mangler does not have support for mangleCXXRTTIName yet.
+ if (CGM.getTarget().getCXXABI().isMicrosoft())
+ return FullName;
+
+ // TODO: This is using the RTTI name. Is there a better way to get
+ // a unique string for a type?
+ llvm::raw_svector_ostream Out(FullName);
+ CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(QualType(Ty, 0), Out);
+ Out.flush();
+ return FullName;
+}
+
// Creates a forward declaration for a RecordDecl in the given context.
-llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
- llvm::DIDescriptor Ctx) {
+llvm::DICompositeType
+CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
+ llvm::DIDescriptor Ctx) {
+ const RecordDecl *RD = Ty->getDecl();
+ if (llvm::DIType T = getTypeOrNull(CGM.getContext().getRecordType(RD)))
+ return llvm::DICompositeType(T);
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
unsigned Line = getLineNumber(RD->getLocation());
StringRef RDName = getClassName(RD);
@@ -566,74 +631,18 @@ llvm::DIType CGDebugInfo::createRecordFwdDecl(const RecordDecl *RD,
}
// Create the type.
- return DBuilder.createForwardDecl(Tag, RDName, Ctx, DefUnit, Line);
-}
-
-// Walk up the context chain and create forward decls for record decls,
-// and normal descriptors for namespaces.
-llvm::DIDescriptor CGDebugInfo::createContextChain(const Decl *Context) {
- if (!Context)
- return TheCU;
-
- // See if we already have the parent.
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
- I = RegionMap.find(Context);
- if (I != RegionMap.end()) {
- llvm::Value *V = I->second;
- return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(V));
- }
-
- // Check namespace.
- if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
- return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl));
-
- if (const RecordDecl *RD = dyn_cast<RecordDecl>(Context)) {
- if (!RD->isDependentType()) {
- llvm::DIType Ty = getOrCreateLimitedType(CGM.getContext().getTypeDeclType(RD),
- getOrCreateMainFile());
- return llvm::DIDescriptor(Ty);
- }
- }
- return TheCU;
-}
-
-/// CreatePointeeType - Create Pointee type. If Pointee is a record
-/// then emit record's fwd if debug info size reduction is enabled.
-llvm::DIType CGDebugInfo::CreatePointeeType(QualType PointeeTy,
- llvm::DIFile Unit) {
- if (CGM.getCodeGenOpts().getDebugInfo() != CodeGenOptions::LimitedDebugInfo)
- return getOrCreateType(PointeeTy, Unit);
-
- // Limit debug info for the pointee type.
-
- // If we have an existing type, use that, it's still smaller than creating
- // a new type.
- llvm::DIType Ty = getTypeOrNull(PointeeTy);
- if (Ty.Verify()) return Ty;
-
- // Handle qualifiers.
- if (PointeeTy.hasLocalQualifiers())
- return CreateQualifiedType(PointeeTy, Unit);
-
- if (const RecordType *RTy = dyn_cast<RecordType>(PointeeTy)) {
- RecordDecl *RD = RTy->getDecl();
- llvm::DIDescriptor FDContext =
- getContextDescriptor(cast<Decl>(RD->getDeclContext()));
- llvm::DIType RetTy = createRecordFwdDecl(RD, FDContext);
- TypeCache[QualType(RTy, 0).getAsOpaquePtr()] = RetTy;
- return RetTy;
- }
- return getOrCreateType(PointeeTy, Unit);
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+ return DBuilder.createForwardDecl(Tag, RDName, Ctx, DefUnit, Line, 0, 0, 0,
+ FullName);
}
llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
- const Type *Ty,
+ const Type *Ty,
QualType PointeeTy,
llvm::DIFile Unit) {
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
- return DBuilder.createReferenceType(Tag,
- CreatePointeeType(PointeeTy, Unit));
+ return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit));
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
@@ -642,25 +651,24 @@ llvm::DIType CGDebugInfo::CreatePointerLikeType(unsigned Tag,
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return DBuilder.createPointerType(CreatePointeeType(PointeeTy, Unit),
- Size, Align);
+ return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
+ Align);
}
-llvm::DIType CGDebugInfo::getOrCreateStructPtrType(StringRef Name, llvm::DIType &Cache) {
- if (Cache.Verify())
- return Cache;
- Cache =
- DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- Name, TheCU, getOrCreateMainFile(),
- 0);
- unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- Cache = DBuilder.createPointerType(Cache, Size);
+llvm::DIType CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
+ llvm::DIType &Cache) {
+ if (Cache)
return Cache;
+ Cache = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type, Name,
+ TheCU, getOrCreateMainFile(), 0);
+ unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ Cache = DBuilder.createPointerType(Cache, Size);
+ return Cache;
}
llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
llvm::DIFile Unit) {
- if (BlockLiteralGenericSet)
+ if (BlockLiteralGeneric)
return BlockLiteralGeneric;
SmallVector<llvm::Value *, 8> EltTys;
@@ -716,7 +724,6 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
Unit, LineNo, FieldOffset, 0,
Flags, llvm::DIType(), Elements);
- BlockLiteralGenericSet = true;
BlockLiteralGeneric = DBuilder.createPointerType(EltTy, Size);
return BlockLiteralGeneric;
}
@@ -725,16 +732,16 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
// Typedefs are derived from some other type. If we have a typedef of a
// typedef, make sure to emit the whole chain.
llvm::DIType Src = getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
- if (!Src.Verify())
+ if (!Src)
return llvm::DIType();
// We don't set size information, but do specify where the typedef was
// declared.
unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
const TypedefNameDecl *TyDecl = Ty->getDecl();
-
+
llvm::DIDescriptor TypedefContext =
getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
-
+
return
DBuilder.createTypedef(Src, TyDecl->getName(), Unit, Line, TypedefContext);
}
@@ -767,7 +774,7 @@ llvm::DIType CGDebugInfo::createFieldType(StringRef name,
AccessSpecifier AS,
uint64_t offsetInBits,
llvm::DIFile tunit,
- llvm::DIDescriptor scope) {
+ llvm::DIScope scope) {
llvm::DIType debugType = getOrCreateType(type, tunit);
// Get the location for the field.
@@ -839,20 +846,15 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
}
}
-/// CollectRecordStaticField - Helper for CollectRecordFields.
-void CGDebugInfo::
-CollectRecordStaticField(const VarDecl *Var,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DIType RecordTy) {
+/// Helper for CollectRecordFields.
+llvm::DIDerivedType
+CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
+ llvm::DIType RecordTy) {
// Create the descriptor for the static variable, with or without
// constant initializers.
llvm::DIFile VUnit = getOrCreateFile(Var->getLocation());
llvm::DIType VTy = getOrCreateType(Var->getType(), VUnit);
- // Do not describe enums as static members.
- if (VTy.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
- return;
-
unsigned LineNumber = getLineNumber(Var->getLocation());
StringRef VName = Var->getName();
llvm::Constant *C = NULL;
@@ -873,10 +875,10 @@ CollectRecordStaticField(const VarDecl *Var,
else if (Access == clang::AS_protected)
Flags |= llvm::DIDescriptor::FlagProtected;
- llvm::DIType GV = DBuilder.createStaticMemberType(RecordTy, VName, VUnit,
- LineNumber, VTy, Flags, C);
- elements.push_back(GV);
+ llvm::DIDerivedType GV = DBuilder.createStaticMemberType(
+ RecordTy, VName, VUnit, LineNumber, VTy, Flags, C);
StaticDataMemberCache[Var->getCanonicalDecl()] = llvm::WeakVH(GV);
+ return GV;
}
/// CollectRecordNormalField - Helper for CollectRecordFields.
@@ -908,10 +910,10 @@ CollectRecordNormalField(const FieldDecl *field, uint64_t OffsetInBits,
/// CollectRecordFields - A helper function to collect debug info for
/// record fields. This is used while creating debug info entry for a Record.
-void CGDebugInfo::
-CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectRecordFields(const RecordDecl *record,
+ llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Value *> &elements,
+ llvm::DICompositeType RecordTy) {
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
if (CXXDecl && CXXDecl->isLambda())
@@ -922,24 +924,22 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
// Field number for non-static fields.
unsigned fieldNo = 0;
- // Bookkeeping for an ms struct, which ignores certain fields.
- bool IsMsStruct = record->isMsStruct(CGM.getContext());
- const FieldDecl *LastFD = 0;
-
// Static and non-static members should appear in the same order as
// the corresponding declarations in the source program.
for (RecordDecl::decl_iterator I = record->decls_begin(),
E = record->decls_end(); I != E; ++I)
- if (const VarDecl *V = dyn_cast<VarDecl>(*I))
- CollectRecordStaticField(V, elements, RecordTy);
- else if (FieldDecl *field = dyn_cast<FieldDecl>(*I)) {
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are
- // completely ignored; we don't even count them.
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield((field), LastFD))
- continue;
- LastFD = field;
- }
+ if (const VarDecl *V = dyn_cast<VarDecl>(*I)) {
+ // Reuse the existing static member declaration if one exists
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
+ StaticDataMemberCache.find(V->getCanonicalDecl());
+ if (MI != StaticDataMemberCache.end()) {
+ assert(MI->second &&
+ "Static data member declaration should still exist");
+ elements.push_back(
+ llvm::DIDerivedType(cast<llvm::MDNode>(MI->second)));
+ } else
+ elements.push_back(CreateRecordStaticField(V, RecordTy));
+ } else if (FieldDecl *field = dyn_cast<FieldDecl>(*I)) {
CollectRecordNormalField(field, layout.getFieldOffset(fieldNo),
tunit, elements, RecordTy);
@@ -952,17 +952,17 @@ CollectRecordFields(const RecordDecl *record, llvm::DIFile tunit,
/// getOrCreateMethodType - CXXMethodDecl's type is a FunctionType. This
/// function type is not updated to include implicit "this" pointer. Use this
/// routine to get a method type which includes "this" pointer.
-llvm::DIType
+llvm::DICompositeType
CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile Unit) {
const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
if (Method->isStatic())
- return getOrCreateType(QualType(Func, 0), Unit);
+ return llvm::DICompositeType(getOrCreateType(QualType(Func, 0), Unit));
return getOrCreateInstanceMethodType(Method->getThisType(CGM.getContext()),
Func, Unit);
}
-llvm::DIType CGDebugInfo::getOrCreateInstanceMethodType(
+llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile Unit) {
// Add "this" pointer.
llvm::DIArray Args = llvm::DICompositeType(
@@ -984,7 +984,8 @@ llvm::DIType CGDebugInfo::getOrCreateInstanceMethodType(
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
- llvm::DIType ThisPtrType = DBuilder.createPointerType(PointeeType, Size, Align);
+ llvm::DIType ThisPtrType =
+ DBuilder.createPointerType(PointeeType, Size, Align);
TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
// TODO: This and the artificial type below are misleading, the
// types aren't artificial the argument is, but the current
@@ -1007,7 +1008,7 @@ llvm::DIType CGDebugInfo::getOrCreateInstanceMethodType(
return DBuilder.createSubroutineType(Unit, EltTypeArray);
}
-/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
+/// isFunctionLocalClass - Return true if CXXRecordDecl is defined
/// inside a function.
static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
if (const CXXRecordDecl *NRD = dyn_cast<CXXRecordDecl>(RD->getDeclContext()))
@@ -1023,11 +1024,11 @@ llvm::DISubprogram
CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
llvm::DIFile Unit,
llvm::DIType RecordTy) {
- bool IsCtorOrDtor =
+ bool IsCtorOrDtor =
isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
-
+
StringRef MethodName = getFunctionName(Method);
- llvm::DIType MethodTy = getOrCreateMethodType(Method, Unit);
+ llvm::DICompositeType MethodTy = getOrCreateMethodType(Method, Unit);
// Since a single ctor/dtor corresponds to multiple functions, it doesn't
// make sense to give a single ctor/dtor a linkage name.
@@ -1036,24 +1037,32 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
MethodLinkageName = CGM.getMangledName(Method);
// Get the location for the method.
- llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
- unsigned MethodLine = getLineNumber(Method->getLocation());
+ llvm::DIFile MethodDefUnit;
+ unsigned MethodLine = 0;
+ if (!Method->isImplicit()) {
+ MethodDefUnit = getOrCreateFile(Method->getLocation());
+ MethodLine = getLineNumber(Method->getLocation());
+ }
// Collect virtual method info.
llvm::DIType ContainingType;
- unsigned Virtuality = 0;
+ unsigned Virtuality = 0;
unsigned VIndex = 0;
-
+
if (Method->isVirtual()) {
if (Method->isPure())
Virtuality = llvm::dwarf::DW_VIRTUALITY_pure_virtual;
else
Virtuality = llvm::dwarf::DW_VIRTUALITY_virtual;
-
+
// It doesn't make sense to give a virtual destructor a vtable index,
// since a single destructor has two entries in the vtable.
- if (!isa<CXXDestructorDecl>(Method))
- VIndex = CGM.getVTableContext().getMethodVTableIndex(Method);
+ // FIXME: Add proper support for debug info for virtual calls in
+ // the Microsoft ABI, where we may use multiple vptrs to make a vftable
+ // lookup if we have multiple or virtual inheritance.
+ if (!isa<CXXDestructorDecl>(Method) &&
+ !CGM.getTarget().getCXXABI().isMicrosoft())
+ VIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(Method);
ContainingType = RecordTy;
}
@@ -1068,7 +1077,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DIDescriptor::FlagExplicit;
- } else if (const CXXConversionDecl *CXXC =
+ } else if (const CXXConversionDecl *CXXC =
dyn_cast<CXXConversionDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DIDescriptor::FlagExplicit;
@@ -1078,21 +1087,21 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
llvm::DISubprogram SP =
- DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
+ DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false,
+ MethodTy, /*isLocalToUnit=*/false,
/* isDefinition=*/ false,
Virtuality, VIndex, ContainingType,
Flags, CGM.getLangOpts().Optimize, NULL,
TParamsArray);
-
+
SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
return SP;
}
/// CollectCXXMemberFunctions - A helper function to collect debug info for
-/// C++ member functions. This is used while creating debug info entry for
+/// C++ member functions. This is used while creating debug info entry for
/// a Record.
void CGDebugInfo::
CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
@@ -1104,40 +1113,42 @@ CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
// the functions.
for(DeclContext::decl_iterator I = RD->decls_begin(),
E = RD->decls_end(); I != E; ++I) {
- Decl *D = *I;
- if (D->isImplicit() && !D->isUsed())
- continue;
-
- if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
- else if (FunctionTemplateDecl *FTD = dyn_cast<FunctionTemplateDecl>(D))
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(*I)) {
+ // Reuse the existing member function declaration if it exists.
+ // It may be associated with the declaration of the type & should be
+ // reused as we're building the definition.
+ //
+ // This situation can arise in the vtable-based debug info reduction where
+ // implicit members are emitted in a non-vtable TU.
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
+ SPCache.find(Method->getCanonicalDecl());
+ if (MI == SPCache.end()) {
+ // If the member is implicit, lazily create it when we see the
+ // definition, not before. (an ODR-used implicit default ctor that's
+ // never actually code generated should not produce debug info)
+ if (!Method->isImplicit())
+ EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
+ } else
+ EltTys.push_back(MI->second);
+ } else if (const FunctionTemplateDecl *FTD =
+ dyn_cast<FunctionTemplateDecl>(*I)) {
+ // Add any template specializations that have already been seen. Like
+ // implicit member functions, these may have been added to a declaration
+ // in the case of vtable-based debug info reduction.
for (FunctionTemplateDecl::spec_iterator SI = FTD->spec_begin(),
- SE = FTD->spec_end(); SI != SE; ++SI)
- EltTys.push_back(CreateCXXMemberFunction(cast<CXXMethodDecl>(*SI), Unit,
- RecordTy));
- }
-}
-
-/// CollectCXXFriends - A helper function to collect debug info for
-/// C++ base classes. This is used while creating debug info entry for
-/// a Record.
-void CGDebugInfo::
-CollectCXXFriends(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys,
- llvm::DIType RecordTy) {
- for (CXXRecordDecl::friend_iterator BI = RD->friend_begin(),
- BE = RD->friend_end(); BI != BE; ++BI) {
- if ((*BI)->isUnsupportedFriend())
- continue;
- if (TypeSourceInfo *TInfo = (*BI)->getFriendType())
- EltTys.push_back(DBuilder.createFriend(RecordTy,
- getOrCreateType(TInfo->getType(),
- Unit)));
+ SE = FTD->spec_end();
+ SI != SE; ++SI) {
+ llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
+ SPCache.find(cast<CXXMethodDecl>(*SI)->getCanonicalDecl());
+ if (MI != SPCache.end())
+ EltTys.push_back(MI->second);
+ }
+ }
}
}
/// CollectCXXBases - A helper function to collect debug info for
-/// C++ base classes. This is used while creating debug info entry for
+/// C++ base classes. This is used while creating debug info entry for
/// a Record.
void CGDebugInfo::
CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
@@ -1149,30 +1160,30 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
BE = RD->bases_end(); BI != BE; ++BI) {
unsigned BFlags = 0;
uint64_t BaseOffset;
-
+
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(BI->getType()->getAs<RecordType>()->getDecl());
-
+
if (BI->isVirtual()) {
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
- BaseOffset =
- 0 - CGM.getVTableContext()
+ BaseOffset =
+ 0 - CGM.getItaniumVTableContext()
.getVirtualBaseOffsetOffset(RD, Base).getQuantity();
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
-
+
AccessSpecifier Access = BI->getAccessSpecifier();
if (Access == clang::AS_private)
BFlags |= llvm::DIDescriptor::FlagPrivate;
else if (Access == clang::AS_protected)
BFlags |= llvm::DIDescriptor::FlagProtected;
-
- llvm::DIType DTy =
- DBuilder.createInheritance(RecordTy,
+
+ llvm::DIType DTy =
+ DBuilder.createInheritance(RecordTy,
getOrCreateType(BI->getType(), Unit),
BaseOffset, BFlags);
EltTys.push_back(DTy);
@@ -1182,23 +1193,119 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
/// CollectTemplateParams - A helper function to collect template parameters.
llvm::DIArray CGDebugInfo::
CollectTemplateParams(const TemplateParameterList *TPList,
- const TemplateArgumentList &TAList,
+ ArrayRef<TemplateArgument> TAList,
llvm::DIFile Unit) {
- SmallVector<llvm::Value *, 16> TemplateParams;
+ SmallVector<llvm::Value *, 16> TemplateParams;
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
- const NamedDecl *ND = TPList->getParam(i);
- if (TA.getKind() == TemplateArgument::Type) {
+ StringRef Name;
+ if (TPList)
+ Name = TPList->getParam(i)->getName();
+ switch (TA.getKind()) {
+ case TemplateArgument::Type: {
llvm::DIType TTy = getOrCreateType(TA.getAsType(), Unit);
llvm::DITemplateTypeParameter TTP =
- DBuilder.createTemplateTypeParameter(TheCU, ND->getName(), TTy);
+ DBuilder.createTemplateTypeParameter(TheCU, Name, TTy);
TemplateParams.push_back(TTP);
- } else if (TA.getKind() == TemplateArgument::Integral) {
+ } break;
+ case TemplateArgument::Integral: {
llvm::DIType TTy = getOrCreateType(TA.getIntegralType(), Unit);
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, ND->getName(), TTy,
- TA.getAsIntegral().getZExtValue());
- TemplateParams.push_back(TVP);
+ DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy,
+ llvm::ConstantInt::get(CGM.getLLVMContext(), TA.getAsIntegral()));
+ TemplateParams.push_back(TVP);
+ } break;
+ case TemplateArgument::Declaration: {
+ const ValueDecl *D = TA.getAsDecl();
+ bool InstanceMember = D->isCXXInstanceMember();
+ QualType T = InstanceMember
+ ? CGM.getContext().getMemberPointerType(
+ D->getType(), cast<RecordDecl>(D->getDeclContext())
+ ->getTypeForDecl())
+ : CGM.getContext().getPointerType(D->getType());
+ llvm::DIType TTy = getOrCreateType(T, Unit);
+ llvm::Value *V = 0;
+ // Variable pointer template parameters have a value that is the address
+ // of the variable.
+ if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ V = CGM.GetAddrOfGlobalVar(VD);
+ // Member function pointers have special support for building them, though
+ // this is currently unsupported in LLVM CodeGen.
+ if (InstanceMember) {
+ if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(D))
+ V = CGM.getCXXABI().EmitMemberPointer(method);
+ } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ V = CGM.GetAddrOfFunction(FD);
+ // Member data pointers have special handling too to compute the fixed
+ // offset within the object.
+ if (isa<FieldDecl>(D)) {
+ // These five lines (& possibly the above member function pointer
+ // handling) might be able to be refactored to use similar code in
+ // CodeGenModule::getMemberPointerConstant
+ uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
+ CharUnits chars =
+ CGM.getContext().toCharUnitsFromBits((int64_t) fieldOffset);
+ V = CGM.getCXXABI().EmitMemberDataPointer(
+ cast<MemberPointerType>(T.getTypePtr()), chars);
+ }
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
+ V->stripPointerCasts());
+ TemplateParams.push_back(TVP);
+ } break;
+ case TemplateArgument::NullPtr: {
+ QualType T = TA.getNullPtrType();
+ llvm::DIType TTy = getOrCreateType(T, Unit);
+ llvm::Value *V = 0;
+ // Special case member data pointer null values since they're actually -1
+ // instead of zero.
+ if (const MemberPointerType *MPT =
+ dyn_cast<MemberPointerType>(T.getTypePtr()))
+ // But treat member function pointers as simple zero integers because
+ // it's easier than having a special case in LLVM's CodeGen. If LLVM
+ // CodeGen grows handling for values of non-null member function
+ // pointers then perhaps we could remove this special case and rely on
+ // EmitNullMemberPointer for member function pointers.
+ if (MPT->isMemberDataPointer())
+ V = CGM.getCXXABI().EmitNullMemberPointer(MPT);
+ if (!V)
+ V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateValueParameter(TheCU, Name, TTy, V);
+ TemplateParams.push_back(TVP);
+ } break;
+ case TemplateArgument::Template: {
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateTemplateParameter(
+ TheCU, Name, llvm::DIType(),
+ TA.getAsTemplate().getAsTemplateDecl()
+ ->getQualifiedNameAsString());
+ TemplateParams.push_back(TVP);
+ } break;
+ case TemplateArgument::Pack: {
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateParameterPack(
+ TheCU, Name, llvm::DIType(),
+ CollectTemplateParams(NULL, TA.getPackAsArray(), Unit));
+ TemplateParams.push_back(TVP);
+ } break;
+ case TemplateArgument::Expression: {
+ const Expr *E = TA.getAsExpr();
+ QualType T = E->getType();
+ llvm::Value *V = CGM.EmitConstantExpr(E, T);
+ assert(V && "Expression in template argument isn't constant");
+ llvm::DIType TTy = getOrCreateType(T, Unit);
+ llvm::DITemplateValueParameter TVP =
+ DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
+ V->stripPointerCasts());
+ TemplateParams.push_back(TVP);
+ } break;
+ // And the following should never occur:
+ case TemplateArgument::TemplateExpansion:
+ case TemplateArgument::Null:
+ llvm_unreachable(
+ "These argument types shouldn't exist in concrete types");
}
}
return DBuilder.getOrCreateArray(TemplateParams);
@@ -1213,8 +1320,8 @@ CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
const TemplateParameterList *TList =
FD->getTemplateSpecializationInfo()->getTemplate()
->getTemplateParameters();
- return
- CollectTemplateParams(TList, *FD->getTemplateSpecializationArgs(), Unit);
+ return CollectTemplateParams(
+ TList, FD->getTemplateSpecializationArgs()->asArray(), Unit);
}
return llvm::DIArray();
}
@@ -1227,12 +1334,12 @@ CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TSpecial,
llvm::PointerUnion<ClassTemplateDecl *,
ClassTemplatePartialSpecializationDecl *>
PU = TSpecial->getSpecializedTemplateOrPartial();
-
+
TemplateParameterList *TPList = PU.is<ClassTemplateDecl *>() ?
PU.get<ClassTemplateDecl *>()->getTemplateParameters() :
PU.get<ClassTemplatePartialSpecializationDecl *>()->getTemplateParameters();
const TemplateArgumentList &TAList = TSpecial->getTemplateInstantiationArgs();
- return CollectTemplateParams(TPList, TAList, Unit);
+ return CollectTemplateParams(TPList, TAList.asArray(), Unit);
}
/// getOrCreateVTablePtrType - Return debug info descriptor for vtable.
@@ -1255,13 +1362,8 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
/// getVTableName - Get vtable name for the given Class.
StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
- // Construct gdb compatible name name.
- std::string Name = "_vptr$" + RD->getNameAsString();
-
- // Copy this name on the side and use its reference.
- char *StrPtr = DebugInfoNames.Allocate<char>(Name.length());
- memcpy(StrPtr, Name.data(), Name.length());
- return StringRef(StrPtr, Name.length());
+ // Copy the gdb compatible name on the side and use its reference.
+ return internString("_vptr$", RD->getNameAsString());
}
@@ -1283,15 +1385,16 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
llvm::DIType VPTR
= DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
- 0, Size, 0, 0, llvm::DIDescriptor::FlagArtificial,
+ 0, Size, 0, 0,
+ llvm::DIDescriptor::FlagArtificial,
getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
}
-/// getOrCreateRecordType - Emit record type's standalone debug info.
-llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
+/// getOrCreateRecordType - Emit record type's standalone debug info.
+llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
SourceLocation Loc) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(RTy, getOrCreateFile(Loc));
return T;
}
@@ -1300,15 +1403,76 @@ llvm::DIType CGDebugInfo::getOrCreateRecordType(QualType RTy,
/// debug info.
llvm::DIType CGDebugInfo::getOrCreateInterfaceType(QualType D,
SourceLocation Loc) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
llvm::DIType T = getOrCreateType(D, getOrCreateFile(Loc));
RetainedTypes.push_back(D.getAsOpaquePtr());
return T;
}
+void CGDebugInfo::completeType(const RecordDecl *RD) {
+ if (DebugKind > CodeGenOptions::LimitedDebugInfo ||
+ !CGM.getLangOpts().CPlusPlus)
+ completeRequiredType(RD);
+}
+
+void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
+ if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXDecl->isDynamicClass())
+ return;
+
+ QualType Ty = CGM.getContext().getRecordType(RD);
+ llvm::DIType T = getTypeOrNull(Ty);
+ if (T && T.isForwardDecl())
+ completeClassData(RD);
+}
+
+void CGDebugInfo::completeClassData(const RecordDecl *RD) {
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+ QualType Ty = CGM.getContext().getRecordType(RD);
+ void* TyPtr = Ty.getAsOpaquePtr();
+ if (CompletedTypeCache.count(TyPtr))
+ return;
+ llvm::DIType Res = CreateTypeDefinition(Ty->castAs<RecordType>());
+ assert(!Res.isForwardDecl());
+ CompletedTypeCache[TyPtr] = Res;
+ TypeCache[TyPtr] = Res;
+}
+
/// CreateType - get structure or union type.
llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
+ const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
+ // Always emit declarations for types that aren't required to be complete when
+ // in limit-debug-info mode. If the type is later found to be required to be
+ // complete this declaration will be upgraded to a definition by
+ // `completeRequiredType`.
+ // If the type is dynamic, only emit the definition in TUs that require class
+ // data. This is handled by `completeClassData`.
+ llvm::DICompositeType T(getTypeOrNull(QualType(Ty, 0)));
+ // If we've already emitted the type, just use that, even if it's only a
+ // declaration. The completeType, completeRequiredType, and completeClassData
+ // callbacks will handle promoting the declaration to a definition.
+ if (T ||
+ (DebugKind <= CodeGenOptions::LimitedDebugInfo &&
+ // Under -flimit-debug-info, emit only a declaration unless the type is
+ // required to be complete.
+ !RD->isCompleteDefinitionRequired() && CGM.getLangOpts().CPlusPlus) ||
+ // If the class is dynamic, only emit a declaration. A definition will be
+ // emitted whenever the vtable is emitted.
+ (CXXDecl && CXXDecl->hasDefinition() && CXXDecl->isDynamicClass()) || T) {
+ llvm::DIDescriptor FDContext =
+ getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+ if (!T)
+ T = getOrCreateRecordFwdDecl(Ty, FDContext);
+ return T;
+ }
+
+ return CreateTypeDefinition(Ty);
+}
+
+llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
+ RecordDecl *RD = Ty->getDecl();
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
@@ -1320,14 +1484,16 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// may refer to the forward decl if the struct is recursive) and replace all
// uses of the forward declaration with the final definition.
- llvm::DICompositeType FwdDecl(
- getOrCreateLimitedType(QualType(Ty, 0), DefUnit));
- assert(FwdDecl.Verify() &&
- "The debug type of a RecordType should be a DICompositeType");
+ llvm::DICompositeType FwdDecl(getOrCreateLimitedType(Ty, DefUnit));
+ assert(FwdDecl.isCompositeType() &&
+ "The debug type of a RecordType should be a llvm::DICompositeType");
if (FwdDecl.isForwardDecl())
return FwdDecl;
+ if (const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
+ CollectContainingType(CXXDecl, FwdDecl);
+
// Push the struct on region stack.
LexicalBlockStack.push_back(&*FwdDecl);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
@@ -1337,6 +1503,7 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// Convert all the elements.
SmallVector<llvm::Value *, 16> EltTys;
+ // what about nested types?
// Note: The split of CXXDecl information here is intentional, the
// gdb tests will depend on a certain ordering at printout. The debug
@@ -1350,20 +1517,14 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty) {
// Collect data fields (including static variables and any initializers).
CollectRecordFields(RD, DefUnit, EltTys, FwdDecl);
- llvm::DIArray TParamsArray;
- if (CXXDecl) {
+ if (CXXDecl)
CollectCXXMemberFunctions(CXXDecl, DefUnit, EltTys, FwdDecl);
- CollectCXXFriends(CXXDecl, DefUnit, EltTys, FwdDecl);
- if (const ClassTemplateSpecializationDecl *TSpecial
- = dyn_cast<ClassTemplateSpecializationDecl>(RD))
- TParamsArray = CollectCXXTemplateParams(TSpecial, DefUnit);
- }
LexicalBlockStack.pop_back();
RegionMap.erase(Ty->getDecl());
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- FwdDecl.setTypeArray(Elements, TParamsArray);
+ FwdDecl.setTypeArray(Elements);
RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
return FwdDecl;
@@ -1376,6 +1537,31 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
return getOrCreateType(Ty->getBaseType(), Unit);
}
+
+/// \return true if Getter has the default name for the property PD.
+static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
+ const ObjCMethodDecl *Getter) {
+ assert(PD);
+ if (!Getter)
+ return true;
+
+ assert(Getter->getDeclName().isObjCZeroArgSelector());
+ return PD->getName() ==
+ Getter->getDeclName().getObjCSelector().getNameForSlot(0);
+}
+
+/// \return true if Setter has the default name for the property PD.
+static bool hasDefaultSetterName(const ObjCPropertyDecl *PD,
+ const ObjCMethodDecl *Setter) {
+ assert(PD);
+ if (!Setter)
+ return true;
+
+ assert(Setter->getDeclName().isObjCOneArgSelector());
+ return SelectorTable::constructSetterName(PD->getName()) ==
+ Setter->getDeclName().getObjCSelector().getNameForSlot(0);
+}
+
/// CreateType - get objective-c interface type.
llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIFile Unit) {
@@ -1418,8 +1604,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// will find it and we're emitting the complete type.
QualType QualTy = QualType(Ty, 0);
CompletedTypeCache[QualTy.getAsOpaquePtr()] = RealDecl;
- // Push the struct on region stack.
+ // Push the struct on region stack.
LexicalBlockStack.push_back(static_cast<llvm::MDNode*>(RealDecl));
RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
@@ -1432,12 +1618,13 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
if (!SClassTy.isValid())
return llvm::DIType();
-
+
llvm::DIType InhTag =
DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
+ // Create entries for all of the properties.
for (ObjCContainerDecl::prop_iterator I = ID->prop_begin(),
E = ID->prop_end(); I != E; ++I) {
const ObjCPropertyDecl *PD = *I;
@@ -1449,9 +1636,9 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::MDNode *PropertyNode =
DBuilder.createObjCProperty(PD->getName(),
PUnit, PLine,
- (Getter && Getter->isImplicit()) ? "" :
+ hasDefaultGetterName(PD, Getter) ? "" :
getSelectorName(PD->getGetterName()),
- (Setter && Setter->isImplicit()) ? "" :
+ hasDefaultSetterName(PD, Setter) ? "" :
getSelectorName(PD->getSetterName()),
PD->getPropertyAttributes(),
getOrCreateType(PD->getType(), PUnit));
@@ -1465,7 +1652,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
if (!FieldTy.isValid())
return llvm::DIType();
-
+
StringRef FieldName = Field->getName();
// Ignore unnamed fields.
@@ -1483,8 +1670,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// Bit size, align and offset of the type.
FieldSize = Field->isBitField()
- ? Field->getBitWidthValue(CGM.getContext())
- : CGM.getContext().getTypeSize(FType);
+ ? Field->getBitWidthValue(CGM.getContext())
+ : CGM.getContext().getTypeSize(FType);
FieldAlign = CGM.getContext().getTypeAlign(FType);
}
@@ -1512,7 +1699,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::MDNode *PropertyNode = NULL;
if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
- if (ObjCPropertyImplDecl *PImpD =
+ if (ObjCPropertyImplDecl *PImpD =
ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
SourceLocation Loc = PD->getLocation();
@@ -1523,9 +1710,9 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
PropertyNode =
DBuilder.createObjCProperty(PD->getName(),
PUnit, PLine,
- (Getter && Getter->isImplicit()) ? "" :
+ hasDefaultGetterName(PD, Getter) ? "" :
getSelectorName(PD->getGetterName()),
- (Setter && Setter->isImplicit()) ? "" :
+ hasDefaultSetterName(PD, Setter) ? "" :
getSelectorName(PD->getSetterName()),
PD->getPropertyAttributes(),
getOrCreateType(PD->getType(), PUnit));
@@ -1547,7 +1734,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// private ivars that we would miss otherwise.
if (ID->getImplementation() == 0)
CompletedTypeCache.erase(QualTy.getAsOpaquePtr());
-
+
LexicalBlockStack.pop_back();
return RealDecl;
}
@@ -1585,7 +1772,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
Align = 0;
else
Align = CGM.getContext().getTypeAlign(Ty->getElementType());
- } else if (Ty->isDependentSizedArrayType() || Ty->isIncompleteType()) {
+ } else if (Ty->isIncompleteType()) {
Size = 0;
Align = 0;
} else {
@@ -1610,7 +1797,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
int64_t Count = -1; // Count == -1 is an unbounded array.
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
Count = CAT->getSize().getZExtValue();
-
+
// FIXME: Verify this is right for VLAs.
Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
EltTy = Ty->getElementType();
@@ -1618,30 +1805,30 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
- llvm::DIType DbgTy =
+ llvm::DIType DbgTy =
DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
SubscriptArray);
return DbgTy;
}
-llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
+llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
Ty, Ty->getPointeeType(), Unit);
}
-llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
+llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
Ty, Ty->getPointeeType(), Unit);
}
-llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
+llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile U) {
llvm::DIType ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U);
if (!Ty->getPointeeType()->isFunctionType())
return DBuilder.createMemberPointerType(
- CreatePointeeType(Ty->getPointeeType(), U), ClassType);
+ getOrCreateType(Ty->getPointeeType(), U), ClassType);
return DBuilder.createMemberPointerType(getOrCreateInstanceMethodType(
CGM.getContext().getPointerType(
QualType(Ty->getClass(), Ty->getPointeeType().getCVRQualifiers())),
@@ -1649,7 +1836,7 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
ClassType);
}
-llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
+llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
llvm::DIFile U) {
// Ignore the atomic wrapping
// FIXME: What is the correct representation?
@@ -1657,7 +1844,8 @@ llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
}
/// CreateEnumType - get enumeration type.
-llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
+llvm::DIType CGDebugInfo::CreateEnumType(const EnumType *Ty) {
+ const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
uint64_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
@@ -1665,6 +1853,8 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
}
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+
// If this is just a forward declaration, construct an appropriately
// marked node and just return it.
if (!ED->getDefinition()) {
@@ -1675,7 +1865,7 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
StringRef EDName = ED->getName();
return DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_enumeration_type,
EDName, EDContext, DefUnit, Line, 0,
- Size, Align);
+ Size, Align, FullName);
}
// Create DIEnumerator elements for each enumerator.
@@ -1686,7 +1876,7 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
Enum != EnumEnd; ++Enum) {
Enumerators.push_back(
DBuilder.createEnumerator(Enum->getName(),
- Enum->getInitVal().getZExtValue()));
+ Enum->getInitVal().getSExtValue()));
}
// Return a CompositeType for the enum itself.
@@ -1694,21 +1884,25 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumDecl *ED) {
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
- llvm::DIDescriptor EnumContext =
+ llvm::DIDescriptor EnumContext =
getContextDescriptor(cast<Decl>(ED->getDeclContext()));
llvm::DIType ClassTy = ED->isFixed() ?
getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
- llvm::DIType DbgTy =
+ llvm::DIType DbgTy =
DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
Size, Align, EltArray,
- ClassTy);
+ ClassTy, FullName);
return DbgTy;
}
static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
Qualifiers Quals;
do {
- Quals += T.getLocalQualifiers();
+ Qualifiers InnerQuals = T.getLocalQualifiers();
+ // Qualifiers::operator+() doesn't like it if you add a Qualifier
+ // that is already there.
+ Quals += Qualifiers::removeCommonQualifiers(Quals, InnerQuals);
+ Quals += InnerQuals;
QualType LastT = T;
switch (T->getTypeClass()) {
default:
@@ -1741,21 +1935,25 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
case Type::Auto:
- T = cast<AutoType>(T)->getDeducedType();
+ QualType DT = cast<AutoType>(T)->getDeducedType();
+ if (DT.isNull())
+ return T;
+ T = DT;
break;
}
-
+
assert(T != LastT && "Type unwrapping failed to unwrap!");
(void)LastT;
} while (true);
}
-/// getType - Get the type from the cache or return null type if it doesn't exist.
+/// getType - Get the type from the cache or return null type if it doesn't
+/// exist.
llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
-
+
// Check for existing entry.
if (Ty->getTypeClass() == Type::ObjCInterface) {
llvm::Value *V = getCachedInterfaceTypeOrNull(Ty);
@@ -1793,10 +1991,7 @@ llvm::DIType CGDebugInfo::getCompletedTypeOrNull(QualType Ty) {
}
// Verify that any cached debug info still exists.
- if (V != 0)
- return llvm::DIType(cast<llvm::MDNode>(V));
-
- return llvm::DIType();
+ return llvm::DIType(cast_or_null<llvm::MDNode>(V));
}
/// getCachedInterfaceTypeOrNull - Get the type from the interface
@@ -1824,9 +2019,7 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
// Unwrap the type as needed for debug information.
Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
- llvm::DIType T = getCompletedTypeOrNull(Ty);
-
- if (T.Verify())
+ if (llvm::DIType T = getCompletedTypeOrNull(Ty))
return T;
// Otherwise create the type.
@@ -1836,29 +2029,33 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
// And update the type cache.
TypeCache[TyPtr] = Res;
+ // FIXME: this getTypeOrNull call seems silly when we just inserted the type
+ // into the cache - but getTypeOrNull has a special case for cached interface
+ // types. We should probably just pull that out as a special case for the
+ // "else" block below & skip the otherwise needless lookup.
llvm::DIType TC = getTypeOrNull(Ty);
- if (TC.Verify() && TC.isForwardDecl())
+ if (TC && TC.isForwardDecl())
ReplaceMap.push_back(std::make_pair(TyPtr, static_cast<llvm::Value*>(TC)));
else if (ObjCInterfaceDecl* Decl = getObjCInterfaceDecl(Ty)) {
// Interface types may have elements added to them by a
// subsequent implementation or extension, so we keep them in
// the ObjCInterfaceCache together with a checksum. Instead of
- // the (possibly) incomplete interace type, we return a forward
+ // the (possibly) incomplete interface type, we return a forward
// declaration that gets RAUW'd in CGDebugInfo::finalize().
- llvm::DenseMap<void *, std::pair<llvm::WeakVH, unsigned > >
- ::iterator it = ObjCInterfaceCache.find(TyPtr);
- if (it != ObjCInterfaceCache.end())
- TC = llvm::DIType(cast<llvm::MDNode>(it->second.first));
- else
- TC = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
- Decl->getName(), TheCU, Unit,
- getLineNumber(Decl->getLocation()),
- TheCU.getLanguage());
+ std::pair<llvm::WeakVH, unsigned> &V = ObjCInterfaceCache[TyPtr];
+ if (V.first)
+ return llvm::DIType(cast<llvm::MDNode>(V.first));
+ TC = DBuilder.createForwardDecl(llvm::dwarf::DW_TAG_structure_type,
+ Decl->getName(), TheCU, Unit,
+ getLineNumber(Decl->getLocation()),
+ TheCU.getLanguage());
// Store the forward declaration in the cache.
- ObjCInterfaceCache[TyPtr] = std::make_pair(TC, Checksum(Decl));
+ V.first = TC;
+ V.second = Checksum(Decl);
// Register the type for replacement in finalize().
ReplaceMap.push_back(std::make_pair(TyPtr, static_cast<llvm::Value*>(TC)));
+
return TC;
}
@@ -1868,19 +2065,25 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
return Res;
}
-/// Currently the checksum merely consists of the number of ivars.
-unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl
- *InterfaceDecl) {
- unsigned IvarNo = 0;
- for (const ObjCIvarDecl *Ivar = InterfaceDecl->all_declared_ivar_begin();
- Ivar != 0; Ivar = Ivar->getNextIvar()) ++IvarNo;
- return IvarNo;
+/// Currently the checksum of an interface includes the number of
+/// ivars and property accessors.
+unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl *ID) {
+ // The assumption is that the number of ivars can only increase
+ // monotonically, so it is safe to just use their current number as
+ // a checksum.
+ unsigned Sum = 0;
+ for (const ObjCIvarDecl *Ivar = ID->all_declared_ivar_begin();
+ Ivar != 0; Ivar = Ivar->getNextIvar())
+ ++Sum;
+
+ return Sum;
}
ObjCInterfaceDecl *CGDebugInfo::getObjCInterfaceDecl(QualType Ty) {
switch (Ty->getTypeClass()) {
case Type::ObjCObjectPointer:
- return getObjCInterfaceDecl(cast<ObjCObjectPointerType>(Ty)->getPointeeType());
+ return getObjCInterfaceDecl(cast<ObjCObjectPointerType>(Ty)
+ ->getPointeeType());
case Type::ObjCInterface:
return cast<ObjCInterfaceType>(Ty)->getDecl();
default:
@@ -1895,7 +2098,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
return CreateQualifiedType(Ty, Unit);
const char *Diag = 0;
-
+
// Work out details of type.
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -1920,6 +2123,10 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
return CreateType(cast<ComplexType>(Ty));
case Type::Pointer:
return CreateType(cast<PointerType>(Ty), Unit);
+ case Type::Decayed:
+ // Decayed types are just pointers in LLVM and DWARF.
+ return CreateType(
+ cast<PointerType>(cast<DecayedType>(Ty)->getDecayedType()), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef:
@@ -1927,7 +2134,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
case Type::Record:
return CreateType(cast<RecordType>(Ty));
case Type::Enum:
- return CreateEnumType(cast<EnumType>(Ty)->getDecl());
+ return CreateEnumType(cast<EnumType>(Ty));
case Type::FunctionProto:
case Type::FunctionNoProto:
return CreateType(cast<FunctionType>(Ty), Unit);
@@ -1956,10 +2163,13 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
case Type::TypeOf:
case Type::Decltype:
case Type::UnaryTransform:
- case Type::Auto:
+ case Type::PackExpansion:
llvm_unreachable("type should have been unwrapped!");
+ case Type::Auto:
+ Diag = "auto";
+ break;
}
-
+
assert(Diag && "Fall through without a diagnostic?");
unsigned DiagID = CGM.getDiags().getCustomDiagID(DiagnosticsEngine::Error,
"debug information for %0 is not yet supported");
@@ -1970,117 +2180,119 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
/// getOrCreateLimitedType - Get the type from the cache or create a new
/// limited type if necessary.
-llvm::DIType CGDebugInfo::getOrCreateLimitedType(QualType Ty,
+llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
llvm::DIFile Unit) {
- if (Ty.isNull())
- return llvm::DIType();
+ QualType QTy(Ty, 0);
- // Unwrap the type as needed for debug information.
- Ty = UnwrapTypeForDebugInfo(Ty, CGM.getContext());
-
- llvm::DIType T = getTypeOrNull(Ty);
+ llvm::DICompositeType T(getTypeOrNull(QTy));
// We may have cached a forward decl when we could have created
// a non-forward decl. Go ahead and create a non-forward decl
// now.
- if (T.Verify() && !T.isForwardDecl()) return T;
+ if (T && !T.isForwardDecl()) return T;
// Otherwise create the type.
- llvm::DIType Res = CreateLimitedTypeNode(Ty, Unit);
+ llvm::DICompositeType Res = CreateLimitedType(Ty);
- if (T.Verify() && T.isForwardDecl())
- ReplaceMap.push_back(std::make_pair(Ty.getAsOpaquePtr(),
- static_cast<llvm::Value*>(T)));
+ // Propagate members from the declaration to the definition
+ // CreateType(const RecordType*) will overwrite this with the members in the
+ // correct order if the full type is needed.
+ Res.setTypeArray(T.getTypeArray());
+
+ if (T && T.isForwardDecl())
+ ReplaceMap.push_back(
+ std::make_pair(QTy.getAsOpaquePtr(), static_cast<llvm::Value *>(T)));
// And update the type cache.
- TypeCache[Ty.getAsOpaquePtr()] = Res;
+ TypeCache[QTy.getAsOpaquePtr()] = Res;
return Res;
}
// TODO: Currently used for context chains when limiting debug info.
-llvm::DIType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
+llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
-
+
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
unsigned Line = getLineNumber(RD->getLocation());
StringRef RDName = getClassName(RD);
- llvm::DIDescriptor RDContext;
- if (CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo)
- RDContext = createContextChain(cast<Decl>(RD->getDeclContext()));
- else
- RDContext = getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+ llvm::DIDescriptor RDContext =
+ getContextDescriptor(cast<Decl>(RD->getDeclContext()));
+
+ // If we ended up creating the type during the context chain construction,
+ // just return that.
+ // FIXME: this could be dealt with better if the type was recorded as
+ // completed before we started this (see the CompletedTypeCache usage in
+ // CGDebugInfo::CreateTypeDefinition(const RecordType*) - that would need to
+ // be pushed to before context creation, but after it was known to be
+ // destined for completion (might still have an issue if this caller only
+ // required a declaration but the context construction ended up creating a
+ // definition)
+ llvm::DICompositeType T(getTypeOrNull(CGM.getContext().getRecordType(RD)));
+ if (T && (!T.isForwardDecl() || !RD->getDefinition()))
+ return T;
// If this is just a forward declaration, construct an appropriately
// marked node and just return it.
if (!RD->getDefinition())
- return createRecordFwdDecl(RD, RDContext);
+ return getOrCreateRecordFwdDecl(Ty, RDContext);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
llvm::DICompositeType RealDecl;
-
+
+ SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
+
if (RD->isUnion())
RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIArray());
+ Size, Align, 0, llvm::DIArray(), 0,
+ FullName);
else if (RD->isClass()) {
// FIXME: This could be a struct type giving a default visibility different
// than C++ class type, but needs llvm metadata changes first.
RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
Size, Align, 0, 0, llvm::DIType(),
llvm::DIArray(), llvm::DIType(),
- llvm::DIArray());
+ llvm::DIArray(), FullName);
} else
RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIType(), llvm::DIArray());
+ Size, Align, 0, llvm::DIType(),
+ llvm::DIArray(), 0, llvm::DIType(),
+ FullName);
RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
- if (CXXDecl) {
- // A class's primary base or the class itself contains the vtable.
- llvm::DICompositeType ContainingType;
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
- // Seek non virtual primary base root.
- while (1) {
- const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
- const CXXRecordDecl *PBT = BRL.getPrimaryBase();
- if (PBT && !BRL.isPrimaryBaseVirtual())
- PBase = PBT;
- else
- break;
- }
- ContainingType = llvm::DICompositeType(
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), DefUnit));
- } else if (CXXDecl->isDynamicClass())
- ContainingType = RealDecl;
-
- RealDecl.setContainingType(ContainingType);
- }
- return llvm::DIType(RealDecl);
+ if (const ClassTemplateSpecializationDecl *TSpecial =
+ dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ RealDecl.setTypeArray(llvm::DIArray(),
+ CollectCXXTemplateParams(TSpecial, DefUnit));
+ return RealDecl;
}
-/// CreateLimitedTypeNode - Create a new debug type node, but only forward
-/// declare composite types that haven't been processed yet.
-llvm::DIType CGDebugInfo::CreateLimitedTypeNode(QualType Ty,llvm::DIFile Unit) {
-
- // Work out details of type.
- switch (Ty->getTypeClass()) {
-#define TYPE(Class, Base)
-#define ABSTRACT_TYPE(Class, Base)
-#define NON_CANONICAL_TYPE(Class, Base)
-#define DEPENDENT_TYPE(Class, Base) case Type::Class:
- #include "clang/AST/TypeNodes.def"
- llvm_unreachable("Dependent types cannot show up in debug information");
+void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
+ llvm::DICompositeType RealDecl) {
+ // A class's primary base or the class itself contains the vtable.
+ llvm::DICompositeType ContainingType;
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (const CXXRecordDecl *PBase = RL.getPrimaryBase()) {
+ // Seek non virtual primary base root.
+ while (1) {
+ const ASTRecordLayout &BRL = CGM.getContext().getASTRecordLayout(PBase);
+ const CXXRecordDecl *PBT = BRL.getPrimaryBase();
+ if (PBT && !BRL.isPrimaryBaseVirtual())
+ PBase = PBT;
+ else
+ break;
+ }
+ ContainingType = llvm::DICompositeType(
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0),
+ getOrCreateFile(RD->getLocation())));
+ } else if (RD->isDynamicClass())
+ ContainingType = RealDecl;
- case Type::Record:
- return CreateLimitedType(cast<RecordType>(Ty));
- default:
- return CreateTypeNode(Ty, Unit);
- }
+ RealDecl.setContainingType(ContainingType);
}
/// CreateMemberType - Create new member and increase Offset by FType's size.
@@ -2097,21 +2309,57 @@ llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
return Ty;
}
+llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
+ // We only need a declaration (not a definition) of the type - so use whatever
+ // we would otherwise do to get a type for a pointee. (forward declarations in
+ // limited debug info, full definitions (if the type definition is available)
+ // in unlimited debug info)
+ if (const TypeDecl *TD = dyn_cast<TypeDecl>(D))
+ return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
+ getOrCreateFile(TD->getLocation()));
+ // Otherwise fall back to a fairly rudimentary cache of existing declarations.
+ // This doesn't handle providing declarations (for functions or variables) for
+ // entities without definitions in this TU, nor when the definition proceeds
+ // the call to this function.
+ // FIXME: This should be split out into more specific maps with support for
+ // emitting forward declarations and merging definitions with declarations,
+ // the same way as we do for types.
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator I =
+ DeclCache.find(D->getCanonicalDecl());
+ if (I == DeclCache.end())
+ return llvm::DIDescriptor();
+ llvm::Value *V = I->second;
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(V));
+}
+
/// getFunctionDeclaration - Return debug info descriptor to describe method
/// declaration for the given method definition.
llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
+ if (!D || DebugKind == CodeGenOptions::DebugLineTablesOnly)
+ return llvm::DISubprogram();
+
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
if (!FD) return llvm::DISubprogram();
// Setup context.
- getContextDescriptor(cast<Decl>(D->getDeclContext()));
+ llvm::DIScope S = getContextDescriptor(cast<Decl>(D->getDeclContext()));
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
MI = SPCache.find(FD->getCanonicalDecl());
+ if (MI == SPCache.end()) {
+ if (const CXXMethodDecl *MD =
+ dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) {
+ llvm::DICompositeType T(S);
+ llvm::DISubprogram SP =
+ CreateCXXMemberFunction(MD, getOrCreateFile(MD->getLocation()), T);
+ T.addMember(SP);
+ return SP;
+ }
+ }
if (MI != SPCache.end()) {
llvm::Value *V = MI->second;
llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
- if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ if (SP.isSubprogram() && !SP.isDefinition())
return SP;
}
@@ -2123,7 +2371,7 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
if (MI != SPCache.end()) {
llvm::Value *V = MI->second;
llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
- if (SP.isSubprogram() && !llvm::DISubprogram(SP).isDefinition())
+ if (SP.isSubprogram() && !SP.isDefinition())
return SP;
}
}
@@ -2132,9 +2380,15 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
// getOrCreateFunctionType - Construct DIType. If it is a c++ method, include
// implicit parameter "this".
-llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
- QualType FnType,
- llvm::DIFile F) {
+llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
+ QualType FnType,
+ llvm::DIFile F) {
+ if (!D || DebugKind == CodeGenOptions::DebugLineTablesOnly)
+ // Create fake but valid subroutine type. Otherwise
+ // llvm::DISubprogram::Verify() would return false, and
+ // subprogram DIE will miss DW_AT_decl_file and
+ // DW_AT_decl_line fields.
+ return DBuilder.createSubroutineType(F, DBuilder.getOrCreateArray(None));
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
@@ -2143,7 +2397,14 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
SmallVector<llvm::Value *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
- Elts.push_back(getOrCreateType(OMethod->getResultType(), F));
+ QualType ResultTy = OMethod->getResultType();
+
+ // Replace the instancetype keyword with the actual type.
+ if (ResultTy == CGM.getContext().getObjCInstanceType())
+ ResultTy = CGM.getContext().getPointerType(
+ QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
+
+ Elts.push_back(getOrCreateType(ResultTy, F));
// "self" pointer is always first argument.
QualType SelfDeclTy = OMethod->getSelfDecl()->getType();
llvm::DIType SelfTy = getOrCreateType(SelfDeclTy, F);
@@ -2152,14 +2413,14 @@ llvm::DIType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
llvm::DIType CmdTy = getOrCreateType(OMethod->getCmdDecl()->getType(), F);
Elts.push_back(DBuilder.createArtificialType(CmdTy));
// Get rest of the arguments.
- for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
+ for (ObjCMethodDecl::param_const_iterator PI = OMethod->param_begin(),
PE = OMethod->param_end(); PI != PE; ++PI)
Elts.push_back(getOrCreateType((*PI)->getType(), F));
llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
return DBuilder.createSubroutineType(F, EltTypeArray);
}
- return getOrCreateType(FnType, F);
+ return llvm::DICompositeType(getOrCreateType(FnType, F));
}
/// EmitFunctionStart - Constructs the debug code for entering a function.
@@ -2187,7 +2448,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
llvm::DIArray TParamsArray;
if (!HasDecl) {
// Use llvm function name.
- Name = Fn->getName();
+ LinkageName = Fn->getName();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
@@ -2214,16 +2475,16 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (LinkageName == Name ||
(!CGM.getCodeGenOpts().EmitGcovArcs &&
!CGM.getCodeGenOpts().EmitGcovNotes &&
- CGM.getCodeGenOpts().getDebugInfo() <= CodeGenOptions::DebugLineTablesOnly))
+ DebugKind <= CodeGenOptions::DebugLineTablesOnly))
LinkageName = StringRef();
- if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
+ if (DebugKind >= CodeGenOptions::LimitedDebugInfo) {
if (const NamespaceDecl *NSDecl =
- dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
FDContext = getOrCreateNameSpace(NSDecl);
else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
- FDContext = getContextDescriptor(cast<Decl>(RDecl->getDeclContext()));
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl));
// Collect template parameters.
TParamsArray = CollectFunctionTemplateParams(FD, Unit);
@@ -2243,28 +2504,15 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
if (!HasDecl || D->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- llvm::DIType DIFnType;
- llvm::DISubprogram SPDecl;
- if (HasDecl &&
- CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo) {
- DIFnType = getOrCreateFunctionType(D, FnType, Unit);
- SPDecl = getFunctionDeclaration(D);
- } else {
- // Create fake but valid subroutine type. Otherwise
- // llvm::DISubprogram::Verify() would return false, and
- // subprogram DIE will miss DW_AT_decl_file and
- // DW_AT_decl_line fields.
- SmallVector<llvm::Value*, 16> Elts;
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
- DIFnType = DBuilder.createSubroutineType(Unit, EltTypeArray);
- }
- llvm::DISubprogram SP;
- SP = DBuilder.createFunction(FDContext, Name, LinkageName, Unit,
- LineNo, DIFnType,
- Fn->hasInternalLinkage(), true/*definition*/,
- getLineNumber(CurLoc), Flags,
- CGM.getLangOpts().Optimize,
- Fn, TParamsArray, SPDecl);
+ llvm::DISubprogram SP =
+ DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit),
+ Fn->hasInternalLinkage(), true /*definition*/,
+ getLineNumber(CurLoc), Flags,
+ CGM.getLangOpts().Optimize, Fn, TParamsArray,
+ getFunctionDeclaration(D));
+ if (HasDecl)
+ DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(SP)));
// Push function on region stack.
llvm::MDNode *SPN = SP;
@@ -2274,10 +2522,10 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
}
/// EmitLocation - Emit metadata to indicate a change in line/column
-/// information in the source file.
+/// information in the source file. If the location is invalid, the
+/// previous location will be reused.
void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc,
bool ForceColumnInfo) {
-
// Update our current location
setLocation(Loc);
@@ -2292,7 +2540,7 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc,
Builder.getCurrentDebugLocation().getScope(CGM.getLLVMContext()) ==
LexicalBlockStack.back())
return;
-
+
// Update last state.
PrevLoc = CurLoc;
@@ -2319,7 +2567,8 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
/// EmitLexicalBlockStart - Constructs the debug code for entering a declarative
/// region - beginning of a DW_TAG_lexical_block.
-void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc) {
+void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
+ SourceLocation Loc) {
// Set our current location.
setLocation(Loc);
@@ -2334,7 +2583,8 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder, SourceLocation Loc
/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
/// region - end of a DW_TAG_lexical_block.
-void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder, SourceLocation Loc) {
+void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
+ SourceLocation Loc) {
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
// Provide an entry in the line table for the end of the block.
@@ -2355,7 +2605,7 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
FnBeginRegionCount.pop_back();
}
-// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+// EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
// See BuildByRefType.
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *XOffset) {
@@ -2364,9 +2614,9 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
-
+
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
- QualType Type = VD->getType();
+ QualType Type = VD->getType();
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
@@ -2388,21 +2638,23 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
Qualifiers::ObjCLifetime Lifetime;
if (CGM.getContext().getByrefLifetime(Type,
Lifetime, HasByrefExtendedLayout)
- && HasByrefExtendedLayout)
+ && HasByrefExtendedLayout) {
+ FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
EltTys.push_back(CreateMemberType(Unit, FType,
"__byref_variable_layout",
&FieldOffset));
-
+ }
+
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
CGM.getTarget().getPointerAlign(0))) {
- CharUnits FieldOffsetInBytes
+ CharUnits FieldOffsetInBytes
= CGM.getContext().toCharUnitsFromBits(FieldOffset);
CharUnits AlignedOffsetInBytes
= FieldOffsetInBytes.RoundUpToAlignment(Align);
CharUnits NumPaddingBytes
= AlignedOffsetInBytes - FieldOffsetInBytes;
-
+
if (NumPaddingBytes.isPositive()) {
llvm::APInt pad(32, NumPaddingBytes.getQuantity());
FType = CGM.getContext().getConstantArrayType(CGM.getContext().CharTy,
@@ -2410,40 +2662,45 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
EltTys.push_back(CreateMemberType(Unit, FType, "", &FieldOffset));
}
}
-
+
FType = Type;
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
FieldSize = CGM.getContext().getTypeSize(FType);
FieldAlign = CGM.getContext().toBits(Align);
- *XOffset = FieldOffset;
+ *XOffset = FieldOffset;
FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
0, FieldSize, FieldAlign,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
-
+
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
-
+
unsigned Flags = llvm::DIDescriptor::FlagBlockByrefStruct;
-
+
return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
llvm::DIType(), Elements);
}
/// EmitDeclare - Emit local variable declaration debug info.
void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
- llvm::Value *Storage,
+ llvm::Value *Storage,
unsigned ArgNo, CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
- llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
+ bool Unwritten =
+ VD->isImplicit() || (isa<Decl>(VD->getDeclContext()) &&
+ cast<Decl>(VD->getDeclContext())->isImplicit());
+ llvm::DIFile Unit;
+ if (!Unwritten)
+ Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
uint64_t XOffset = 0;
if (VD->hasAttr<BlocksAttr>())
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
- else
+ else
Ty = getOrCreateType(VD->getType(), Unit);
// If there is no debug info for this type then do not emit debug info
@@ -2451,24 +2708,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
if (!Ty)
return;
- if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage)) {
- // If Storage is an aggregate returned as 'sret' then let debugger know
- // about this.
- if (Arg->hasStructRetAttr())
- Ty = DBuilder.createReferenceType(llvm::dwarf::DW_TAG_reference_type, Ty);
- else if (CXXRecordDecl *Record = VD->getType()->getAsCXXRecordDecl()) {
- // If an aggregate variable has non trivial destructor or non trivial copy
- // constructor than it is pass indirectly. Let debug info know about this
- // by using reference of the aggregate type as a argument type.
- if (Record->hasNonTrivialCopyConstructor() ||
- !Record->hasTrivialDestructor())
- Ty = DBuilder.createReferenceType(llvm::dwarf::DW_TAG_reference_type, Ty);
- }
- }
-
// Get location information.
- unsigned Line = getLineNumber(VD->getLocation());
- unsigned Column = getColumnNumber(VD->getLocation());
+ unsigned Line = 0;
+ unsigned Column = 0;
+ if (!Unwritten) {
+ Line = getLineNumber(VD->getLocation());
+ Column = getColumnNumber(VD->getLocation());
+ }
unsigned Flags = 0;
if (VD->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
@@ -2479,6 +2725,10 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// otherwise it is 'self' or 'this'.
if (isa<ImplicitParamDecl>(VD) && ArgNo == 1)
Flags |= llvm::DIDescriptor::FlagObjectPointer;
+ if (llvm::Argument *Arg = dyn_cast<llvm::Argument>(Storage))
+ if (Arg->getType()->isPointerTy() && !Arg->hasByValAttr() &&
+ !VD->getType()->isPointerType())
+ Flags |= llvm::DIDescriptor::FlagIndirectVariable;
llvm::MDNode *Scope = LexicalBlockStack.back();
@@ -2501,33 +2751,18 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
// Create the descriptor for the variable.
llvm::DIVariable D =
- DBuilder.createComplexVariable(Tag,
+ DBuilder.createComplexVariable(Tag,
llvm::DIDescriptor(Scope),
VD->getName(), Unit, Line, Ty,
addr, ArgNo);
-
- // Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
- Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
- return;
- } else if (isa<VariableArrayType>(VD->getType())) {
- // These are "complex" variables in that they need an op_deref.
- // Create the descriptor for the variable.
- llvm::Value *Addr = llvm::ConstantInt::get(CGM.Int64Ty,
- llvm::DIBuilder::OpDeref);
- llvm::DIVariable D =
- DBuilder.createComplexVariable(Tag,
- llvm::DIDescriptor(Scope),
- Name, Unit, Line, Ty,
- Addr, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
- }
+ } else if (isa<VariableArrayType>(VD->getType()))
+ Flags |= llvm::DIDescriptor::FlagIndirectVariable;
} else if (const RecordType *RT = dyn_cast<RecordType>(VD->getType())) {
// If VD is an anonymous union then Storage represents value for
// all union fields.
@@ -2539,18 +2774,18 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
FieldDecl *Field = *I;
llvm::DIType FieldTy = getOrCreateType(Field->getType(), Unit);
StringRef FieldName = Field->getName();
-
+
// Ignore unnamed fields. Do not ignore unnamed records.
if (FieldName.empty() && !isa<RecordType>(Field->getType()))
continue;
-
+
// Use VarDecl's Tag, Scope and Line number.
llvm::DIVariable D =
DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
- FieldName, Unit, Line, FieldTy,
+ FieldName, Unit, Line, FieldTy,
CGM.getLangOpts().Optimize, Flags,
ArgNo);
-
+
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
@@ -2575,7 +2810,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_auto_variable, Storage, 0, Builder);
}
@@ -2585,9 +2820,10 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
/// never happen though, since creating a type for the implicit self
/// argument implies that we already parsed the interface definition
/// and the ivar declarations in the implementation.
-llvm::DIType CGDebugInfo::CreateSelfType(const QualType &QualTy, llvm::DIType Ty) {
+llvm::DIType CGDebugInfo::CreateSelfType(const QualType &QualTy,
+ llvm::DIType Ty) {
llvm::DIType CachedTy = getTypeOrNull(QualTy);
- if (CachedTy.Verify()) Ty = CachedTy;
+ if (CachedTy) Ty = CachedTy;
else DEBUG(llvm::dbgs() << "No cached type for self.");
return DBuilder.createObjectPointerType(Ty);
}
@@ -2596,20 +2832,20 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
llvm::Value *Storage,
CGBuilderTy &Builder,
const CGBlockInfo &blockInfo) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
-
+
if (Builder.GetInsertBlock() == 0)
return;
-
+
bool isByRef = VD->hasAttr<BlocksAttr>();
-
+
uint64_t XOffset = 0;
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
llvm::DIType Ty;
if (isByRef)
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset);
- else
+ else
Ty = getOrCreateType(VD->getType(), Unit);
// Self is passed along as an implicit non-arg variable in a
@@ -2649,7 +2885,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
// Create the descriptor for the variable.
llvm::DIVariable D =
- DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
+ DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
llvm::DIDescriptor(LexicalBlockStack.back()),
VD->getName(), Unit, Line, Ty, addr);
@@ -2665,7 +2901,7 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
unsigned ArgNo,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
EmitDeclare(VD, llvm::dwarf::DW_TAG_arg_variable, AI, ArgNo, Builder);
}
@@ -2683,7 +2919,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *Arg,
llvm::Value *LocalAddr,
CGBuilderTy &Builder) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
ASTContext &C = CGM.getContext();
const BlockDecl *blockDecl = block.getBlockDecl();
@@ -2692,7 +2928,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIFile tunit = getOrCreateFile(loc);
unsigned line = getLineNumber(loc);
unsigned column = getColumnNumber(loc);
-
+
// Build the debug-info type for the block literal.
getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
@@ -2812,7 +3048,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Create the descriptor for the parameter.
llvm::DIVariable debugVar =
DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
- llvm::DIDescriptor(scope),
+ llvm::DIDescriptor(scope),
Arg->getName(), tunit, line, type,
CGM.getLangOpts().Optimize, flags,
cast<llvm::Argument>(Arg)->getArgNo() + 1);
@@ -2831,25 +3067,32 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
DbgDecl->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
}
-/// getStaticDataMemberDeclaration - If D is an out-of-class definition of
-/// a static data member of a class, find its corresponding in-class
-/// declaration.
-llvm::DIDerivedType CGDebugInfo::getStaticDataMemberDeclaration(const Decl *D) {
- if (cast<VarDecl>(D)->isStaticDataMember()) {
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
- MI = StaticDataMemberCache.find(D->getCanonicalDecl());
- if (MI != StaticDataMemberCache.end())
- // Verify the info still exists.
- if (llvm::Value *V = MI->second)
- return llvm::DIDerivedType(cast<llvm::MDNode>(V));
- }
- return llvm::DIDerivedType();
+/// If D is an out-of-class definition of a static data member of a class, find
+/// its corresponding in-class declaration.
+llvm::DIDerivedType
+CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
+ if (!D->isStaticDataMember())
+ return llvm::DIDerivedType();
+ llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
+ StaticDataMemberCache.find(D->getCanonicalDecl());
+ if (MI != StaticDataMemberCache.end()) {
+ assert(MI->second && "Static data member declaration should still exist");
+ return llvm::DIDerivedType(cast<llvm::MDNode>(MI->second));
+ }
+
+ // If the member wasn't found in the cache, lazily construct and add it to the
+ // type (used when a limited form of the type is emitted).
+ llvm::DICompositeType Ctxt(
+ getContextDescriptor(cast<Decl>(D->getDeclContext())));
+ llvm::DIDerivedType T = CreateRecordStaticField(D, Ctxt);
+ Ctxt.addMember(T);
+ return T;
}
/// EmitGlobalVariable - Emit information about a global variable.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
unsigned LineNo = getLineNumber(D->getLocation());
@@ -2873,18 +3116,19 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
LinkageName = Var->getName();
if (LinkageName == DeclName)
LinkageName = StringRef();
- llvm::DIDescriptor DContext =
+ llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
- DBuilder.createStaticVariable(DContext, DeclName, LinkageName,
- Unit, LineNo, getOrCreateType(T, Unit),
- Var->hasInternalLinkage(), Var,
- getStaticDataMemberDeclaration(D));
+ llvm::DIGlobalVariable GV = DBuilder.createStaticVariable(
+ DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasInternalLinkage(), Var,
+ getOrCreateStaticDataMemberDeclarationOrNull(D));
+ DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(GV)));
}
/// EmitGlobalVariable - Emit information about an objective-c interface.
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *ID) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
unsigned LineNo = getLineNumber(ID->getLocation());
@@ -2908,9 +3152,9 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
}
/// EmitGlobalVariable - Emit global variable's debug info.
-void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
llvm::Constant *Init) {
- assert(CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo);
+ assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
// Create the descriptor for the variable.
llvm::DIFile Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
@@ -2923,34 +3167,79 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
// Do not use DIGlobalVariable for enums.
if (Ty.getTag() == llvm::dwarf::DW_TAG_enumeration_type)
return;
- DBuilder.createStaticVariable(Unit, Name, Name, Unit,
- getLineNumber(VD->getLocation()),
- Ty, true, Init,
- getStaticDataMemberDeclaration(VD));
+ llvm::DIGlobalVariable GV = DBuilder.createStaticVariable(
+ Unit, Name, Name, Unit, getLineNumber(VD->getLocation()), Ty, true, Init,
+ getOrCreateStaticDataMemberDeclarationOrNull(cast<VarDecl>(VD)));
+ DeclCache.insert(std::make_pair(VD->getCanonicalDecl(), llvm::WeakVH(GV)));
+}
+
+llvm::DIScope CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
+ if (!LexicalBlockStack.empty())
+ return llvm::DIScope(LexicalBlockStack.back());
+ return getContextDescriptor(D);
}
void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
- llvm::DIScope Scope =
- LexicalBlockStack.empty()
- ? getContextDescriptor(cast<Decl>(UD.getDeclContext()))
- : llvm::DIScope(LexicalBlockStack.back());
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
DBuilder.createImportedModule(
- Scope, getOrCreateNameSpace(UD.getNominatedNamespace()),
+ getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
+ getOrCreateNameSpace(UD.getNominatedNamespace()),
getLineNumber(UD.getLocation()));
}
+void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
+ assert(UD.shadow_size() &&
+ "We shouldn't be codegening an invalid UsingDecl containing no decls");
+ // Emitting one decl is sufficient - debuggers can detect that this is an
+ // overloaded name & provide lookup for all the overloads.
+ const UsingShadowDecl &USD = **UD.shadow_begin();
+ if (llvm::DIDescriptor Target =
+ getDeclarationOrDefinition(USD.getUnderlyingDecl()))
+ DBuilder.createImportedDeclaration(
+ getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target,
+ getLineNumber(USD.getLocation()));
+}
+
+llvm::DIImportedEntity
+CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return llvm::DIImportedEntity(0);
+ llvm::WeakVH &VH = NamespaceAliasCache[&NA];
+ if (VH)
+ return llvm::DIImportedEntity(cast<llvm::MDNode>(VH));
+ llvm::DIImportedEntity R(0);
+ if (const NamespaceAliasDecl *Underlying =
+ dyn_cast<NamespaceAliasDecl>(NA.getAliasedNamespace()))
+ // This could cache & dedup here rather than relying on metadata deduping.
+ R = DBuilder.createImportedModule(
+ getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
+ EmitNamespaceAlias(*Underlying), getLineNumber(NA.getLocation()),
+ NA.getName());
+ else
+ R = DBuilder.createImportedModule(
+ getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
+ getOrCreateNameSpace(cast<NamespaceDecl>(NA.getAliasedNamespace())),
+ getLineNumber(NA.getLocation()), NA.getName());
+ VH = R;
+ return R;
+}
+
/// getOrCreateNamesSpace - Return namespace descriptor for the given
/// namespace decl.
-llvm::DINameSpace
+llvm::DINameSpace
CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
- llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
+ NSDecl = NSDecl->getCanonicalDecl();
+ llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
NameSpaceCache.find(NSDecl);
if (I != NameSpaceCache.end())
return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
-
+
unsigned LineNo = getLineNumber(NSDecl->getLocation());
llvm::DIFile FileD = getOrCreateFile(NSDecl->getLocation());
- llvm::DIDescriptor Context =
+ llvm::DIDescriptor Context =
getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
llvm::DINameSpace NS =
DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
@@ -2965,7 +3254,7 @@ void CGDebugInfo::finalize() {
// Verify that the debug info still exists.
if (llvm::Value *V = VI->second)
Ty = llvm::DIType(cast<llvm::MDNode>(V));
-
+
llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
TypeCache.find(VI->first);
if (it != TypeCache.end()) {
@@ -2974,7 +3263,7 @@ void CGDebugInfo::finalize() {
RepTy = llvm::DIType(cast<llvm::MDNode>(V));
}
- if (Ty.Verify() && Ty.isForwardDecl() && RepTy.Verify())
+ if (Ty && Ty.isForwardDecl() && RepTy)
Ty.replaceAllUsesWith(RepTy);
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 4080492a1c68..0ca274f56881 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
+#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/DIBuilder.h"
#include "llvm/DebugInfo.h"
@@ -35,6 +36,7 @@ namespace clang {
class ObjCIvarDecl;
class ClassTemplateSpecializationDecl;
class GlobalDecl;
+ class UsingDecl;
namespace CodeGen {
class CodeGenModule;
@@ -45,7 +47,10 @@ namespace CodeGen {
/// and is responsible for emitting to llvm globals or pass directly to
/// the backend.
class CGDebugInfo {
+ friend class NoLocation;
+ friend class ArtificialLocation;
CodeGenModule &CGM;
+ const CodeGenOptions::DebugInfoKind DebugKind;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit TheCU;
SourceLocation CurLoc, PrevLoc;
@@ -57,14 +62,14 @@ class CGDebugInfo {
llvm::DIType OCLImage2dDITy, OCLImage2dArrayDITy;
llvm::DIType OCLImage3dDITy;
llvm::DIType OCLEventDITy;
-
+ llvm::DIType BlockLiteralGeneric;
+
/// TypeCache - Cache of previously constructed Types.
llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
/// ObjCInterfaceCache - Cache of previously constructed interfaces
/// which may change. Storing a pair of DIType and checksum.
- llvm::DenseMap<void *, std::pair<llvm::WeakVH, unsigned > >
- ObjCInterfaceCache;
+ llvm::DenseMap<void *, std::pair<llvm::WeakVH, unsigned> > ObjCInterfaceCache;
/// RetainedTypes - list of interfaces we want to keep even if orphaned.
std::vector<void *> RetainedTypes;
@@ -76,9 +81,6 @@ class CGDebugInfo {
/// compilation.
std::vector<std::pair<void *, llvm::WeakVH> >ReplaceMap;
- bool BlockLiteralGenericSet;
- llvm::DIType BlockLiteralGeneric;
-
// LexicalBlockStack - Keep track of our current nested lexical block.
std::vector<llvm::TrackingVH<llvm::MDNode> > LexicalBlockStack;
llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
@@ -94,22 +96,28 @@ class CGDebugInfo {
llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ /// \brief Cache declarations relevant to DW_TAG_imported_declarations (C++
+ /// using declarations) that aren't covered by other more specific caches.
+ llvm::DenseMap<const Decl *, llvm::WeakVH> DeclCache;
llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
+ llvm::DenseMap<const NamespaceAliasDecl *, llvm::WeakVH> NamespaceAliasCache;
llvm::DenseMap<const Decl *, llvm::WeakVH> StaticDataMemberCache;
/// Helper functions for getOrCreateType.
unsigned Checksum(const ObjCInterfaceDecl *InterfaceDecl);
llvm::DIType CreateType(const BuiltinType *Ty);
llvm::DIType CreateType(const ComplexType *Ty);
- llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile F);
- llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile F);
+ llvm::DIType CreateQualifiedType(QualType Ty, llvm::DIFile Fg);
+ llvm::DIType CreateType(const TypedefType *Ty, llvm::DIFile Fg);
llvm::DIType CreateType(const ObjCObjectPointerType *Ty,
llvm::DIFile F);
llvm::DIType CreateType(const PointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const BlockPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const FunctionType *Ty, llvm::DIFile F);
- llvm::DIType CreateType(const RecordType *Ty);
- llvm::DIType CreateLimitedType(const RecordType *Ty);
+ llvm::DIType CreateType(const RecordType *Tyg);
+ llvm::DIType CreateTypeDefinition(const RecordType *Ty);
+ llvm::DICompositeType CreateLimitedType(const RecordType *Ty);
+ void CollectContainingType(const CXXRecordDecl *RD, llvm::DICompositeType CT);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
@@ -118,19 +126,19 @@ class CGDebugInfo {
llvm::DIType CreateType(const RValueReferenceType *Ty, llvm::DIFile Unit);
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const AtomicType *Ty, llvm::DIFile F);
- llvm::DIType CreateEnumType(const EnumDecl *ED);
+ llvm::DIType CreateEnumType(const EnumType *Ty);
llvm::DIType CreateSelfType(const QualType &QualTy, llvm::DIType Ty);
llvm::DIType getTypeOrNull(const QualType);
llvm::DIType getCompletedTypeOrNull(const QualType);
- llvm::DIType getOrCreateMethodType(const CXXMethodDecl *Method,
- llvm::DIFile F);
- llvm::DIType getOrCreateInstanceMethodType(
+ llvm::DICompositeType getOrCreateMethodType(const CXXMethodDecl *Method,
+ llvm::DIFile F);
+ llvm::DICompositeType getOrCreateInstanceMethodType(
QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile Unit);
- llvm::DIType getOrCreateFunctionType(const Decl *D, QualType FnType,
- llvm::DIFile F);
+ llvm::DICompositeType getOrCreateFunctionType(const Decl *D, QualType FnType,
+ llvm::DIFile F);
llvm::DIType getOrCreateVTablePtrType(llvm::DIFile F);
llvm::DINameSpace getOrCreateNameSpace(const NamespaceDecl *N);
- llvm::DIType CreatePointeeType(QualType PointeeTy, llvm::DIFile F);
+ llvm::DIType getOrCreateTypeDeclaration(QualType PointeeTy, llvm::DIFile F);
llvm::DIType CreatePointerLikeType(unsigned Tag,
const Type *Ty, QualType PointeeTy,
llvm::DIFile F);
@@ -141,29 +149,24 @@ class CGDebugInfo {
llvm::DISubprogram CreateCXXMemberFunction(const CXXMethodDecl *Method,
llvm::DIFile F,
llvm::DIType RecordTy);
-
+
void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
llvm::DIFile F,
SmallVectorImpl<llvm::Value *> &E,
llvm::DIType T);
- void CollectCXXFriends(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &EltTys,
- llvm::DIType RecordTy);
-
void CollectCXXBases(const CXXRecordDecl *Decl,
llvm::DIFile F,
SmallVectorImpl<llvm::Value *> &EltTys,
llvm::DIType RecordTy);
-
+
llvm::DIArray
CollectTemplateParams(const TemplateParameterList *TPList,
- const TemplateArgumentList &TAList,
+ ArrayRef<TemplateArgument> TAList,
llvm::DIFile Unit);
llvm::DIArray
CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit);
- llvm::DIArray
+ llvm::DIArray
CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TS,
llvm::DIFile F);
@@ -171,22 +174,21 @@ class CGDebugInfo {
uint64_t sizeInBitsOverride, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
llvm::DIFile tunit,
- llvm::DIDescriptor scope);
+ llvm::DIScope scope);
// Helpers for collecting fields of a record.
void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
SmallVectorImpl<llvm::Value *> &E,
llvm::DIType RecordTy);
- void CollectRecordStaticField(const VarDecl *Var,
- SmallVectorImpl<llvm::Value *> &E,
- llvm::DIType RecordTy);
+ llvm::DIDerivedType CreateRecordStaticField(const VarDecl *Var,
+ llvm::DIType RecordTy);
void CollectRecordNormalField(const FieldDecl *Field, uint64_t OffsetInBits,
llvm::DIFile F,
SmallVectorImpl<llvm::Value *> &E,
llvm::DIType RecordTy);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
SmallVectorImpl<llvm::Value *> &E,
- llvm::DIType RecordTy);
+ llvm::DICompositeType RecordTy);
void CollectVTableInfo(const CXXRecordDecl *Decl,
llvm::DIFile F,
@@ -195,7 +197,7 @@ class CGDebugInfo {
// CreateLexicalBlock - Create a new lexical block node and push it on
// the stack.
void CreateLexicalBlock(SourceLocation Loc);
-
+
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
@@ -206,6 +208,9 @@ public:
/// invalid it is ignored.
void setLocation(SourceLocation Loc);
+ /// getLocation - Return the current source location.
+ SourceLocation getLocation() const { return CurLoc; }
+
/// EmitLocation - Emit metadata to indicate a change in line/column
/// information in the source file.
/// \param ForceColumnInfo Assume DebugColumnInfo option is true.
@@ -265,20 +270,30 @@ public:
/// \brief - Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
- /// getOrCreateRecordType - Emit record type's standalone debug info.
+ /// \brief - Emit C++ using declaration.
+ void EmitUsingDecl(const UsingDecl &UD);
+
+ /// \brief - Emit C++ namespace alias.
+ llvm::DIImportedEntity EmitNamespaceAlias(const NamespaceAliasDecl &NA);
+
+ /// getOrCreateRecordType - Emit record type's standalone debug info.
llvm::DIType getOrCreateRecordType(QualType Ty, SourceLocation L);
/// getOrCreateInterfaceType - Emit an objective c interface type standalone
/// debug info.
llvm::DIType getOrCreateInterfaceType(QualType Ty,
- SourceLocation Loc);
+ SourceLocation Loc);
+
+ void completeType(const RecordDecl *RD);
+ void completeRequiredType(const RecordDecl *RD);
+ void completeClassData(const RecordDecl *RD);
private:
/// EmitDeclare - Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, unsigned Tag, llvm::Value *AI,
unsigned ArgNo, CGBuilderTy &Builder);
- // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
+ // EmitTypeForVarWithBlocksAttr - Build up structure info for the byref.
// See BuildByRefType.
llvm::DIType EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *OffSet);
@@ -286,10 +301,12 @@ private:
/// getContextDescriptor - Get context info for the decl.
llvm::DIScope getContextDescriptor(const Decl *Decl);
- /// createRecordFwdDecl - Create a forward decl for a RecordType in a given
- /// context.
- llvm::DIType createRecordFwdDecl(const RecordDecl *, llvm::DIDescriptor);
-
+ llvm::DIScope getCurrentContextDescriptor(const Decl *Decl);
+
+ /// \brief Create a forward decl for a RecordType in a given context.
+ llvm::DICompositeType getOrCreateRecordFwdDecl(const RecordType *,
+ llvm::DIDescriptor);
+
/// createContextChain - Create a set of decls for the context chain.
llvm::DIDescriptor createContextChain(const Decl *Decl);
@@ -299,7 +316,7 @@ private:
/// CreateCompileUnit - Create new compile unit.
void CreateCompileUnit();
- /// getOrCreateFile - Get the file debug info descriptor for the input
+ /// getOrCreateFile - Get the file debug info descriptor for the input
/// location.
llvm::DIFile getOrCreateFile(SourceLocation Loc);
@@ -308,43 +325,43 @@ private:
/// getOrCreateType - Get the type from the cache or create a new type if
/// necessary.
- llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile F);
+ llvm::DIType getOrCreateType(QualType Ty, llvm::DIFile Fg);
/// getOrCreateLimitedType - Get the type from the cache or create a new
/// partial type if necessary.
- llvm::DIType getOrCreateLimitedType(QualType Ty, llvm::DIFile F);
+ llvm::DIType getOrCreateLimitedType(const RecordType *Ty, llvm::DIFile F);
/// CreateTypeNode - Create type metadata for a source language type.
- llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile F);
+ llvm::DIType CreateTypeNode(QualType Ty, llvm::DIFile Fg);
/// getObjCInterfaceDecl - return the underlying ObjCInterfaceDecl
/// if Ty is an ObjCInterface or a pointer to one.
ObjCInterfaceDecl* getObjCInterfaceDecl(QualType Ty);
- /// CreateLimitedTypeNode - Create type metadata for a source language
- /// type, but only partial types for records.
- llvm::DIType CreateLimitedTypeNode(QualType Ty, llvm::DIFile F);
-
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name, uint64_t *Offset);
+ /// \brief Retrieve the DIDescriptor, if any, for the canonical form of this
+ /// declaration.
+ llvm::DIDescriptor getDeclarationOrDefinition(const Decl *D);
+
/// getFunctionDeclaration - Return debug info descriptor to describe method
/// declaration for the given method definition.
llvm::DISubprogram getFunctionDeclaration(const Decl *D);
- /// getStaticDataMemberDeclaration - Return debug info descriptor to
- /// describe in-class static data member declaration for the given
- /// out-of-class definition.
- llvm::DIDerivedType getStaticDataMemberDeclaration(const Decl *D);
+ /// Return debug info descriptor to describe in-class static data member
+ /// declaration for the given out-of-class definition.
+ llvm::DIDerivedType
+ getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D);
/// getFunctionName - Get function name for the given FunctionDecl. If the
- /// name is constructred on demand (e.g. C++ destructor) then the name
+ /// name is constructed on demand (e.g. C++ destructor) then the name
/// is stored on the side.
StringRef getFunctionName(const FunctionDecl *FD);
/// getObjCMethodName - Returns the unmangled name of an Objective-C method.
- /// This is the display name for the debugging info.
+ /// This is the display name for the debugging info.
StringRef getObjCMethodName(const ObjCMethodDecl *FD);
/// getSelectorName - Return selector name. This is used for debugging
@@ -361,11 +378,62 @@ private:
/// then use current location.
unsigned getLineNumber(SourceLocation Loc);
- /// getColumnNumber - Get column number for the location. If location is
+ /// getColumnNumber - Get column number for the location. If location is
/// invalid then use current location.
/// \param Force Assume DebugColumnInfo option is true.
unsigned getColumnNumber(SourceLocation Loc, bool Force=false);
+
+ /// internString - Allocate a copy of \p A using the DebugInfoNames allocator
+ /// and return a reference to it. If multiple arguments are given the strings
+ /// are concatenated.
+ StringRef internString(StringRef A, StringRef B = StringRef()) {
+ char *Data = DebugInfoNames.Allocate<char>(A.size() + B.size());
+ std::memcpy(Data, A.data(), A.size());
+ std::memcpy(Data + A.size(), B.data(), B.size());
+ return StringRef(Data, A.size() + B.size());
+ }
};
+
+/// NoLocation - An RAII object that temporarily disables debug
+/// locations. This is useful for emitting instructions that should be
+/// counted towards the function prologue.
+class NoLocation {
+ SourceLocation SavedLoc;
+ CGDebugInfo *DI;
+ CGBuilderTy &Builder;
+public:
+ NoLocation(CodeGenFunction &CGF, CGBuilderTy &B);
+ /// ~NoLocation - Autorestore everything back to normal.
+ ~NoLocation();
+};
+
+/// ArtificialLocation - An RAII object that temporarily switches to
+/// an artificial debug location that has a valid scope, but no line
+/// information. This is useful when emitting compiler-generated
+/// helper functions that have no source location associated with
+/// them. The DWARF specification allows the compiler to use the
+/// special line number 0 to indicate code that can not be attributed
+/// to any source location.
+///
+/// This is necessary because passing an empty SourceLocation to
+/// CGDebugInfo::setLocation() will result in the last valid location
+/// being reused.
+class ArtificialLocation {
+ SourceLocation SavedLoc;
+ CGDebugInfo *DI;
+ CGBuilderTy &Builder;
+public:
+ ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B);
+
+ /// Set the current location to line 0, but within the current scope
+ /// (= the top of the LexicalBlockStack).
+ void Emit();
+
+ /// ~ArtificialLocation - Autorestore everything back to normal.
+ ~ArtificialLocation();
+};
+
+
} // namespace CodeGen
} // namespace clang
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 3ce6dec6a53c..66d6b33eb6f0 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/GlobalVariable.h"
@@ -37,6 +38,8 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::UnresolvedUsingTypename:
case Decl::ClassTemplateSpecialization:
case Decl::ClassTemplatePartialSpecialization:
+ case Decl::VarTemplateSpecialization:
+ case Decl::VarTemplatePartialSpecialization:
case Decl::TemplateTypeParm:
case Decl::UnresolvedUsingValue:
case Decl::NonTypeTemplateParm:
@@ -52,6 +55,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::ParmVar:
case Decl::ImplicitParam:
case Decl::ClassTemplate:
+ case Decl::VarTemplate:
case Decl::FunctionTemplate:
case Decl::TypeAliasTemplate:
case Decl::TemplateTemplateParm:
@@ -72,15 +76,13 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Block:
case Decl::Captured:
case Decl::ClassScopeFunctionSpecialization:
+ case Decl::UsingShadow:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
case Decl::Enum: // enum X;
case Decl::EnumConstant: // enum ? { X = ? }
case Decl::CXXRecord: // struct/union/class X; [C++]
- case Decl::Using: // using X; [C++]
- case Decl::UsingShadow:
- case Decl::NamespaceAlias:
case Decl::StaticAssert: // static_assert(X, ""); [C++0x]
case Decl::Label: // __label__ x;
case Decl::Import:
@@ -89,6 +91,14 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
// None of these decls require codegen support.
return;
+ case Decl::NamespaceAlias:
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitNamespaceAlias(cast<NamespaceAliasDecl>(D));
+ return;
+ case Decl::Using: // using X; [C++]
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitUsingDecl(cast<UsingDecl>(D));
+ return;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
@@ -114,35 +124,32 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
/// EmitVarDecl - This method handles emission of any variable declaration
/// inside a function, including static vars etc.
void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
- switch (D.getStorageClass()) {
- case SC_None:
- case SC_Auto:
- case SC_Register:
- return EmitAutoVarDecl(D);
- case SC_Static: {
+ if (D.isStaticLocal()) {
llvm::GlobalValue::LinkageTypes Linkage =
llvm::GlobalValue::InternalLinkage;
- // If the function definition has some sort of weak linkage, its
- // static variables should also be weak so that they get properly
- // uniqued. We can't do this in C, though, because there's no
- // standard way to agree on which variables are the same (i.e.
- // there's no mangling).
- if (getLangOpts().CPlusPlus)
- if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
- Linkage = CurFn->getLinkage();
+ // If the variable is externally visible, it must have weak linkage so it
+ // can be uniqued.
+ if (D.isExternallyVisible()) {
+ Linkage = llvm::GlobalValue::LinkOnceODRLinkage;
+
+ // FIXME: We need to force the emission/use of a guard variable for
+ // some variables even if we can constant-evaluate them because
+ // we can't guarantee every translation unit will constant-evaluate them.
+ }
return EmitStaticVarDecl(D, Linkage);
}
- case SC_Extern:
- case SC_PrivateExtern:
+
+ if (D.hasExternalStorage())
// Don't emit it now, allow it to be emitted lazily on its first use.
return;
- case SC_OpenCLWorkGroupLocal:
+
+ if (D.getStorageClass() == SC_OpenCLWorkGroupLocal)
return CGM.getOpenCLRuntime().EmitWorkGroupLocalVarDecl(*this, D);
- }
- llvm_unreachable("Unknown storage class");
+ assert(D.hasLocalStorage());
+ return EmitAutoVarDecl(D);
}
static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
@@ -200,8 +207,7 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- if (Linkage != llvm::GlobalValue::InternalLinkage)
- GV->setVisibility(CurFn->getVisibility());
+ CGM.setGlobalVisibility(GV, &D);
if (D.getTLSKind())
CGM.setTLSMode(GV, D);
@@ -420,7 +426,8 @@ namespace {
// byref or something.
DeclRefExpr DRE(const_cast<VarDecl*>(&Var), false,
Var.getType(), VK_LValue, SourceLocation());
- llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE));
+ llvm::Value *value = CGF.EmitLoadOfScalar(CGF.EmitDeclRefLValue(&DRE),
+ SourceLocation());
CGF.EmitExtendGCLifetime(value);
}
};
@@ -647,7 +654,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init,
// might have to initialize with a barrier. We have to do this for
// both __weak and __strong, but __weak got filtered out above.
if (accessedByInit && lifetime == Qualifiers::OCL_Strong) {
- llvm::Value *oldValue = EmitLoadOfScalar(lvalue);
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue, init->getExprLoc());
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
EmitARCRelease(oldValue, ARCImpreciseLifetime);
return;
@@ -838,19 +845,19 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
bool NRVO = getLangOpts().ElideConstructors &&
D.isNRVOVariable();
- // If this value is a POD array or struct with a statically
- // determinable constant initializer, there are optimizations we can do.
+ // If this value is an array or struct with a statically determinable
+ // constant initializer, there are optimizations we can do.
//
// TODO: We should constant-evaluate the initializer of any variable,
// as long as it is initialized by a constant expression. Currently,
// isConstantInitializer produces wrong answers for structs with
// reference or bitfield members, and a few other cases, and checking
// for POD-ness protects us from some of these.
- if (D.getInit() &&
- (Ty->isArrayType() || Ty->isRecordType()) &&
- (Ty.isPODType(getContext()) ||
- getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
- D.getInit()->isConstantInitializer(getContext(), false)) {
+ if (D.getInit() && (Ty->isArrayType() || Ty->isRecordType()) &&
+ (D.isConstexpr() ||
+ ((Ty.isPODType(getContext()) ||
+ getContext().getBaseElementType(Ty)->isObjCObjectPointerType()) &&
+ D.getInit()->isConstantInitializer(getContext(), false)))) {
// If the variable's a const type, and it's neither an NRVO
// candidate nor a __block variable and has no mutable members,
@@ -1078,7 +1085,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
capturedByInit ? emission.Address : emission.getObjectAddress(*this);
llvm::Constant *constant = 0;
- if (emission.IsConstantAggregate) {
+ if (emission.IsConstantAggregate || D.isConstexpr()) {
assert(!capturedByInit && "constant init contains a capturing block?");
constant = CGM.EmitConstantInit(D, this);
}
@@ -1089,6 +1096,13 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
return EmitExprAsInit(Init, &D, lv, capturedByInit);
}
+ if (!emission.IsConstantAggregate) {
+ // For simple scalar/complex initialization, store the value directly.
+ LValue lv = MakeAddrLValue(Loc, type, alignment);
+ lv.setNonGC(true);
+ return EmitStoreThroughLValue(RValue::get(constant), lv, true);
+ }
+
// If this is a simple aggregate initialization, we can optimize it
// in various ways.
bool isVolatile = type.isVolatileQualified();
@@ -1151,7 +1165,7 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
QualType type = D->getType();
if (type->isReferenceType()) {
- RValue rvalue = EmitReferenceBindingToExpr(init, D);
+ RValue rvalue = EmitReferenceBindingToExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
EmitStoreThroughLValue(rvalue, lvalue, true);
@@ -1178,7 +1192,6 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased));
}
- MaybeEmitStdInitializerListCleanup(lvalue.getAddress(), init);
return;
}
llvm_unreachable("bad evaluation kind");
@@ -1331,6 +1344,26 @@ void CodeGenFunction::pushDestroy(CleanupKind cleanupKind, llvm::Value *addr,
destroyer, useEHCleanupForArray);
}
+void CodeGenFunction::pushLifetimeExtendedDestroy(
+ CleanupKind cleanupKind, llvm::Value *addr, QualType type,
+ Destroyer *destroyer, bool useEHCleanupForArray) {
+ assert(!isInConditionalBranch() &&
+ "performing lifetime extension from within conditional");
+
+ // Push an EH-only cleanup for the object now.
+ // FIXME: When popping normal cleanups, we need to keep this EH cleanup
+ // around in case a temporary's destructor throws an exception.
+ if (cleanupKind & EHCleanup)
+ EHStack.pushCleanup<DestroyObject>(
+ static_cast<CleanupKind>(cleanupKind & ~NormalCleanup), addr, type,
+ destroyer, useEHCleanupForArray);
+
+ // Remember that we need to push a full cleanup for the object at the
+ // end of the full-expression.
+ pushCleanupAfterFullExpr<DestroyObject>(
+ cleanupKind, addr, type, destroyer, useEHCleanupForArray);
+}
+
/// emitDestroy - Immediately perform the destruction of the given
/// object.
///
@@ -1608,10 +1641,18 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
}
llvm::Value *DeclPtr;
+ bool HasNonScalarEvalKind = !CodeGenFunction::hasScalarEvaluationKind(Ty);
// If this is an aggregate or variable sized value, reuse the input pointer.
- if (!Ty->isConstantSizeType() ||
- !CodeGenFunction::hasScalarEvaluationKind(Ty)) {
+ if (HasNonScalarEvalKind || !Ty->isConstantSizeType()) {
DeclPtr = Arg;
+ // Push a destructor cleanup for this parameter if the ABI requires it.
+ if (HasNonScalarEvalKind &&
+ getTarget().getCXXABI().isArgumentDestroyedByCallee()) {
+ if (const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl()) {
+ if (RD->hasNonTrivialDestructor())
+ pushDestroy(QualType::DK_cxx_destructor, DeclPtr, Ty);
+ }
+ }
} else {
// Otherwise, create a temporary to hold the value.
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty),
@@ -1649,7 +1690,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
// use objc_storeStrong(&dest, value) for retaining the
// object. But first, store a null into 'dest' because
// objc_storeStrong attempts to release its old value.
- llvm::Value * Null = CGM.EmitNullConstant(D.getType());
+ llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
EmitARCStoreStrongCall(lv.getAddress(), Arg, true);
doStore = false;
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 9ffcff276623..7bdb9eb0a4a6 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -96,13 +96,14 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
CXXDestructorDecl *dtor = record->getDestructor();
function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
- argument = addr;
+ argument = llvm::ConstantExpr::getBitCast(
+ addr, CGF.getTypes().ConvertType(type)->getPointerTo());
// Otherwise, the standard logic requires a helper function.
} else {
- function = CodeGenFunction(CGM).generateDestroyHelper(addr, type,
- CGF.getDestroyer(dtorKind),
- CGF.needsEHCleanup(dtorKind));
+ function = CodeGenFunction(CGM)
+ .generateDestroyHelper(addr, type, CGF.getDestroyer(dtorKind),
+ CGF.needsEHCleanup(dtorKind), &D);
argument = llvm::Constant::getNullValue(CGF.Int8PtrTy);
}
@@ -149,7 +150,7 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
assert(PerformInit && "cannot have constant initializer which needs "
"destruction for reference");
unsigned Alignment = getContext().getDeclAlign(&D).getQuantity();
- RValue RV = EmitReferenceBindingToExpr(Init, &D);
+ RValue RV = EmitReferenceBindingToExpr(Init);
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
@@ -161,23 +162,24 @@ CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
-static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
+static llvm::Constant *createAtExitStub(CodeGenModule &CGM, const VarDecl &VD,
llvm::Constant *dtor,
llvm::Constant *addr) {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
+ }
llvm::Function *fn =
- CreateGlobalInitOrDestructFunction(CGM, ty,
- Twine("__dtor_", addr->getName()));
+ CreateGlobalInitOrDestructFunction(CGM, ty, FnName.str());
CodeGenFunction CGF(CGM);
- // Initialize debug info if needed.
- CGF.maybeInitializeDebugInfo();
-
- CGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, fn,
- CGM.getTypes().arrangeNullaryFunction(),
- FunctionArgList(), SourceLocation());
+ CGF.StartFunction(&VD, CGM.getContext().VoidTy, fn,
+ CGM.getTypes().arrangeNullaryFunction(), FunctionArgList(),
+ SourceLocation());
llvm::CallInst *call = CGF.Builder.CreateCall(dtor, addr);
@@ -192,10 +194,11 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM,
}
/// Register a global destructor using the C atexit runtime function.
-void CodeGenFunction::registerGlobalDtorWithAtExit(llvm::Constant *dtor,
+void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
+ llvm::Constant *dtor,
llvm::Constant *addr) {
// Create a function which calls the destructor.
- llvm::Constant *dtorStub = createAtExitStub(CGM, dtor, addr);
+ llvm::Constant *dtorStub = createAtExitStub(CGM, VD, dtor, addr);
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
@@ -257,10 +260,15 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
llvm::GlobalVariable *Addr,
bool PerformInit) {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ SmallString<256> FnName;
+ {
+ llvm::raw_svector_ostream Out(FnName);
+ getCXXABI().getMangleContext().mangleDynamicInitializer(D, Out);
+ }
// Create a variable initialization function.
llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, "__cxx_global_var_init");
+ CreateGlobalInitOrDestructFunction(*this, FTy, FnName.str());
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
PerformInit);
@@ -278,6 +286,20 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
OrderGlobalInits Key(order, PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
DelayedCXXInitPosition.erase(D);
+ } else if (D->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
+ D->getTemplateSpecializationKind() != TSK_Undeclared) {
+ // C++ [basic.start.init]p2:
+ // Definitions of explicitly specialized class template static data
+ // members have ordered initialization. Other class template static data
+ // members (i.e., implicitly or explicitly instantiated specializations)
+ // have unordered initialization.
+ //
+ // As a consequence, we can put them into their own llvm.global_ctors entry.
+ // This should allow GlobalOpt to fire more often, and allow us to implement
+ // the Microsoft C++ ABI, which uses COMDAT elimination to avoid double
+ // initializaiton.
+ AddGlobalCtor(Fn);
+ DelayedCXXInitPosition.erase(D);
} else {
llvm::DenseMap<const Decl *, unsigned>::iterator I =
DelayedCXXInitPosition.find(D);
@@ -386,8 +408,8 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
llvm::GlobalVariable *Addr,
bool PerformInit) {
// Check if we need to emit debug info for variable initializer.
- if (!D->hasAttr<NoDebugAttr>())
- maybeInitializeDebugInfo();
+ if (D->hasAttr<NoDebugAttr>())
+ DebugInfo = NULL; // disable debug info indefinitely for this function
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
@@ -410,9 +432,6 @@ void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
ArrayRef<llvm::Constant *> Decls,
llvm::GlobalVariable *Guard) {
- // Initialize debug info if needed.
- maybeInitializeDebugInfo();
-
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -459,9 +478,6 @@ CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
- // Initialize debug info if needed.
- maybeInitializeDebugInfo();
-
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), SourceLocation());
@@ -481,11 +497,9 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
/// generateDestroyHelper - Generates a helper function which, when
/// invoked, destroys the given object.
-llvm::Function *
-CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
- QualType type,
- Destroyer *destroyer,
- bool useEHCleanupForArray) {
+llvm::Function *CodeGenFunction::generateDestroyHelper(
+ llvm::Constant *addr, QualType type, Destroyer *destroyer,
+ bool useEHCleanupForArray, const VarDecl *VD) {
FunctionArgList args;
ImplicitParamDecl dst(0, SourceLocation(), 0, getContext().VoidPtrTy);
args.push_back(&dst);
@@ -498,11 +512,7 @@ CodeGenFunction::generateDestroyHelper(llvm::Constant *addr,
llvm::Function *fn =
CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
- // Initialize debug info if needed.
- maybeInitializeDebugInfo();
-
- StartFunction(GlobalDecl(), getContext().VoidTy, fn, FI, args,
- SourceLocation());
+ StartFunction(VD, getContext().VoidTy, fn, FI, args, SourceLocation());
emitDestroy(addr, type, destroyer, useEHCleanupForArray);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index a088d78641fa..39a992aab17d 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -89,7 +89,7 @@ static llvm::Constant *getEndCatchFn(CodeGenModule &CGM) {
}
static llvm::Constant *getUnexpectedFn(CodeGenModule &CGM) {
- // void __cxa_call_unexepcted(void *thrown_exception);
+ // void __cxa_call_unexpected(void *thrown_exception);
llvm::FunctionType *FTy =
llvm::FunctionType::get(CGM.VoidTy, CGM.Int8PtrTy, /*IsVarArgs=*/false);
@@ -766,6 +766,11 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
+ SourceLocation SavedLocation;
+ if (CGDebugInfo *DI = getDebugInfo()) {
+ SavedLocation = DI->getLocation();
+ DI->EmitLocation(Builder, CurEHLocation);
+ }
const EHPersonality &personality = EHPersonality::get(getLangOpts());
@@ -887,6 +892,8 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Restore the old IR generation state.
Builder.restoreIP(savedIP);
+ if (CGDebugInfo *DI = getDebugInfo())
+ DI->EmitLocation(Builder, SavedLocation);
return lpad;
}
@@ -938,7 +945,8 @@ static llvm::Value *CallBeginCatch(CodeGenFunction &CGF,
/// parameter during catch initialization.
static void InitCatchParam(CodeGenFunction &CGF,
const VarDecl &CatchParam,
- llvm::Value *ParamAddr) {
+ llvm::Value *ParamAddr,
+ SourceLocation Loc) {
// Load the exception from where the landing pad saved it.
llvm::Value *Exn = CGF.getExceptionFromSlot();
@@ -1045,11 +1053,11 @@ static void InitCatchParam(CodeGenFunction &CGF,
CGF.getContext().getDeclAlign(&CatchParam));
switch (TEK) {
case TEK_Complex:
- CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV), destLV,
+ CGF.EmitStoreOfComplex(CGF.EmitLoadOfComplex(srcLV, Loc), destLV,
/*init*/ true);
return;
case TEK_Scalar: {
- llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV);
+ llvm::Value *ExnLoad = CGF.EmitLoadOfScalar(srcLV, Loc);
CGF.EmitStoreOfScalar(ExnLoad, destLV, /*init*/ true);
return;
}
@@ -1143,7 +1151,7 @@ static void BeginCatch(CodeGenFunction &CGF, const CXXCatchStmt *S) {
// Emit the local.
CodeGenFunction::AutoVarEmission var = CGF.EmitAutoVarAlloca(*CatchParam);
- InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF));
+ InitCatchParam(CGF, *CatchParam, var.getObjectAddress(CGF), S->getLocStart());
CGF.EmitAutoVarCleanups(var);
}
@@ -1612,8 +1620,15 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
// end of the function by FinishFunction.
TerminateHandler = createBasicBlock("terminate.handler");
Builder.SetInsertPoint(TerminateHandler);
- llvm::CallInst *TerminateCall = EmitNounwindRuntimeCall(getTerminateFn(CGM));
- TerminateCall->setDoesNotReturn();
+ llvm::CallInst *terminateCall;
+ if (useClangCallTerminate(CGM)) {
+ // Load the exception pointer.
+ llvm::Value *exn = getExceptionFromSlot();
+ terminateCall = EmitNounwindRuntimeCall(getClangCallTerminateFn(CGM), exn);
+ } else {
+ terminateCall = EmitNounwindRuntimeCall(getTerminateFn(CGM));
+ }
+ terminateCall->setDoesNotReturn();
Builder.CreateUnreachable();
// Restore the saved insertion state.
@@ -1683,3 +1698,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
return EHResumeBlock;
}
+
+void CodeGenFunction::EmitSEHTryStmt(const SEHTryStmt &S) {
+ CGM.ErrorUnsupported(&S, "SEH __try");
+}
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 64670c5e81e4..cb990b243fba 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -171,244 +171,240 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
llvm_unreachable("bad evaluation kind");
}
-static llvm::Value *
-CreateReferenceTemporary(CodeGenFunction &CGF, QualType Type,
- const NamedDecl *InitializedDecl) {
- if (const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
- if (VD->hasGlobalStorage()) {
- SmallString<256> Name;
- llvm::raw_svector_ostream Out(Name);
- CGF.CGM.getCXXABI().getMangleContext().mangleReferenceTemporary(VD, Out);
- Out.flush();
-
- llvm::Type *RefTempTy = CGF.ConvertTypeForMem(Type);
-
- // Create the reference temporary.
- llvm::GlobalVariable *RefTemp =
- new llvm::GlobalVariable(CGF.CGM.getModule(),
- RefTempTy, /*isConstant=*/false,
- llvm::GlobalValue::InternalLinkage,
- llvm::Constant::getNullValue(RefTempTy),
- Name.str());
- // If we're binding to a thread_local variable, the temporary is also
- // thread local.
- if (VD->getTLSKind())
- CGF.CGM.setTLSMode(RefTemp, *VD);
- return RefTemp;
- }
- }
-
- return CGF.CreateMemTemp(Type, "ref.tmp");
-}
-
-static llvm::Value *
-EmitExprForReferenceBinding(CodeGenFunction &CGF, const Expr *E,
- llvm::Value *&ReferenceTemporary,
- const CXXDestructorDecl *&ReferenceTemporaryDtor,
- const InitListExpr *&ReferenceInitializerList,
- QualType &ObjCARCReferenceLifetimeType,
- const NamedDecl *InitializedDecl) {
- const MaterializeTemporaryExpr *M = NULL;
- E = E->findMaterializedTemporary(M);
+static void
+pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
+ const Expr *E, llvm::Value *ReferenceTemporary) {
// Objective-C++ ARC:
// If we are binding a reference to a temporary that has ownership, we
// need to perform retain/release operations on the temporary.
- if (M && CGF.getLangOpts().ObjCAutoRefCount &&
- M->getType()->isObjCLifetimeType() &&
- (M->getType().getObjCLifetime() == Qualifiers::OCL_Strong ||
- M->getType().getObjCLifetime() == Qualifiers::OCL_Weak ||
- M->getType().getObjCLifetime() == Qualifiers::OCL_Autoreleasing))
- ObjCARCReferenceLifetimeType = M->getType();
-
- if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(E)) {
- CGF.enterFullExpression(EWC);
- CodeGenFunction::RunCleanupsScope Scope(CGF);
-
- return EmitExprForReferenceBinding(CGF, EWC->getSubExpr(),
- ReferenceTemporary,
- ReferenceTemporaryDtor,
- ReferenceInitializerList,
- ObjCARCReferenceLifetimeType,
- InitializedDecl);
- }
-
- if (E->isGLValue()) {
- // Emit the expression as an lvalue.
- LValue LV = CGF.EmitLValue(E);
- assert(LV.isSimple());
- return LV.getAddress();
- }
-
- if (!ObjCARCReferenceLifetimeType.isNull()) {
- ReferenceTemporary = CreateReferenceTemporary(CGF,
- ObjCARCReferenceLifetimeType,
- InitializedDecl);
-
-
- LValue RefTempDst = CGF.MakeAddrLValue(ReferenceTemporary,
- ObjCARCReferenceLifetimeType);
-
- CGF.EmitScalarInit(E, dyn_cast_or_null<ValueDecl>(InitializedDecl),
- RefTempDst, false);
-
- bool ExtendsLifeOfTemporary = false;
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(InitializedDecl)) {
- if (Var->extendsLifetimeOfTemporary())
- ExtendsLifeOfTemporary = true;
- } else if (InitializedDecl && isa<FieldDecl>(InitializedDecl)) {
- ExtendsLifeOfTemporary = true;
- }
-
- if (!ExtendsLifeOfTemporary) {
- // Since the lifetime of this temporary isn't going to be extended,
- // we need to clean it up ourselves at the end of the full expression.
- switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
-
- case Qualifiers::OCL_Strong: {
- assert(!ObjCARCReferenceLifetimeType->isArrayType());
- CleanupKind cleanupKind = CGF.getARCCleanupKind();
- CGF.pushDestroy(cleanupKind,
- ReferenceTemporary,
- ObjCARCReferenceLifetimeType,
- CodeGenFunction::destroyARCStrongImprecise,
- cleanupKind & EHCleanup);
- break;
- }
-
- case Qualifiers::OCL_Weak:
+ //
+ // FIXME: This should be looking at E, not M.
+ if (CGF.getLangOpts().ObjCAutoRefCount &&
+ M->getType()->isObjCLifetimeType()) {
+ QualType ObjCARCReferenceLifetimeType = M->getType();
+ switch (Qualifiers::ObjCLifetime Lifetime =
+ ObjCARCReferenceLifetimeType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ // Carry on to normal cleanup handling.
+ break;
+
+ case Qualifiers::OCL_Autoreleasing:
+ // Nothing to do; cleaned up by an autorelease pool.
+ return;
+
+ case Qualifiers::OCL_Strong:
+ case Qualifiers::OCL_Weak:
+ switch (StorageDuration Duration = M->getStorageDuration()) {
+ case SD_Static:
+ // Note: we intentionally do not register a cleanup to release
+ // the object on program termination.
+ return;
+
+ case SD_Thread:
+ // FIXME: We should probably register a cleanup in this case.
+ return;
+
+ case SD_Automatic:
+ case SD_FullExpression:
assert(!ObjCARCReferenceLifetimeType->isArrayType());
- CGF.pushDestroy(NormalAndEHCleanup,
- ReferenceTemporary,
- ObjCARCReferenceLifetimeType,
- CodeGenFunction::destroyARCWeak,
- /*useEHCleanupForArray*/ true);
- break;
+ CodeGenFunction::Destroyer *Destroy;
+ CleanupKind CleanupKind;
+ if (Lifetime == Qualifiers::OCL_Strong) {
+ const ValueDecl *VD = M->getExtendingDecl();
+ bool Precise =
+ VD && isa<VarDecl>(VD) && VD->hasAttr<ObjCPreciseLifetimeAttr>();
+ CleanupKind = CGF.getARCCleanupKind();
+ Destroy = Precise ? &CodeGenFunction::destroyARCStrongPrecise
+ : &CodeGenFunction::destroyARCStrongImprecise;
+ } else {
+ // __weak objects always get EH cleanups; otherwise, exceptions
+ // could cause really nasty crashes instead of mere leaks.
+ CleanupKind = NormalAndEHCleanup;
+ Destroy = &CodeGenFunction::destroyARCWeak;
+ }
+ if (Duration == SD_FullExpression)
+ CGF.pushDestroy(CleanupKind, ReferenceTemporary,
+ ObjCARCReferenceLifetimeType, *Destroy,
+ CleanupKind & EHCleanup);
+ else
+ CGF.pushLifetimeExtendedDestroy(CleanupKind, ReferenceTemporary,
+ ObjCARCReferenceLifetimeType,
+ *Destroy, CleanupKind & EHCleanup);
+ return;
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary cannot have dynamic storage duration");
}
-
- ObjCARCReferenceLifetimeType = QualType();
+ llvm_unreachable("unknown storage duration");
}
-
- return ReferenceTemporary;
}
- SmallVector<SubobjectAdjustment, 2> Adjustments;
- E = E->skipRValueSubobjectAdjustments(Adjustments);
- if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E))
- if (opaque->getType()->isRecordType())
- return CGF.EmitOpaqueValueLValue(opaque).getAddress();
+ CXXDestructorDecl *ReferenceTemporaryDtor = 0;
+ if (const RecordType *RT =
+ E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()) {
+ // Get the destructor for the reference temporary.
+ CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!ClassDecl->hasTrivialDestructor())
+ ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ }
- // Create a reference temporary if necessary.
- AggValueSlot AggSlot = AggValueSlot::ignored();
- if (CGF.hasAggregateEvaluationKind(E->getType())) {
- ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
- InitializedDecl);
- CharUnits Alignment = CGF.getContext().getTypeAlignInChars(E->getType());
- AggValueSlot::IsDestructed_t isDestructed
- = AggValueSlot::IsDestructed_t(InitializedDecl != 0);
- AggSlot = AggValueSlot::forAddr(ReferenceTemporary, Alignment,
- Qualifiers(), isDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
- }
-
- if (InitializedDecl) {
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E)) {
- if (ILE->initializesStdInitializerList()) {
- ReferenceInitializerList = ILE;
- }
- }
- else if (const RecordType *RT =
- E->getType()->getBaseElementTypeUnsafe()->getAs<RecordType>()){
- // Get the destructor for the reference temporary.
- CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(RT->getDecl());
- if (!ClassDecl->hasTrivialDestructor())
- ReferenceTemporaryDtor = ClassDecl->getDestructor();
+ if (!ReferenceTemporaryDtor)
+ return;
+
+ // Call the destructor for the temporary.
+ switch (M->getStorageDuration()) {
+ case SD_Static:
+ case SD_Thread: {
+ llvm::Constant *CleanupFn;
+ llvm::Constant *CleanupArg;
+ if (E->getType()->isArrayType()) {
+ CleanupFn = CodeGenFunction(CGF.CGM).generateDestroyHelper(
+ cast<llvm::Constant>(ReferenceTemporary), E->getType(),
+ CodeGenFunction::destroyCXXObject, CGF.getLangOpts().Exceptions,
+ dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
+ CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
+ } else {
+ CleanupFn =
+ CGF.CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
}
+ CGF.CGM.getCXXABI().registerGlobalDtor(
+ CGF, *cast<VarDecl>(M->getExtendingDecl()), CleanupFn, CleanupArg);
+ break;
}
- RValue RV = CGF.EmitAnyExpr(E, AggSlot);
-
- // Check if need to perform derived-to-base casts and/or field accesses, to
- // get from the temporary object we created (and, potentially, for which we
- // extended the lifetime) to the subobject we're binding the reference to.
- if (!Adjustments.empty()) {
- llvm::Value *Object = RV.getAggregateAddr();
- for (unsigned I = Adjustments.size(); I != 0; --I) {
- SubobjectAdjustment &Adjustment = Adjustments[I-1];
- switch (Adjustment.Kind) {
- case SubobjectAdjustment::DerivedToBaseAdjustment:
- Object =
- CGF.GetAddressOfBaseClass(Object,
- Adjustment.DerivedToBase.DerivedClass,
- Adjustment.DerivedToBase.BasePath->path_begin(),
- Adjustment.DerivedToBase.BasePath->path_end(),
- /*NullCheckValue=*/false);
- break;
-
- case SubobjectAdjustment::FieldAdjustment: {
- LValue LV = CGF.MakeAddrLValue(Object, E->getType());
- LV = CGF.EmitLValueForField(LV, Adjustment.Field);
- if (LV.isSimple()) {
- Object = LV.getAddress();
- break;
- }
-
- // For non-simple lvalues, we actually have to create a copy of
- // the object we're binding to.
- QualType T = Adjustment.Field->getType().getNonReferenceType()
- .getUnqualifiedType();
- Object = CreateReferenceTemporary(CGF, T, InitializedDecl);
- LValue TempLV = CGF.MakeAddrLValue(Object,
- Adjustment.Field->getType());
- CGF.EmitStoreThroughLValue(CGF.EmitLoadOfLValue(LV), TempLV);
- break;
- }
+ case SD_FullExpression:
+ CGF.pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
+ CodeGenFunction::destroyCXXObject,
+ CGF.getLangOpts().Exceptions);
+ break;
- case SubobjectAdjustment::MemberPointerAdjustment: {
- llvm::Value *Ptr = CGF.EmitScalarExpr(Adjustment.Ptr.RHS);
- Object = CGF.CGM.getCXXABI().EmitMemberDataPointerAddress(
- CGF, Object, Ptr, Adjustment.Ptr.MPT);
- break;
- }
- }
+ case SD_Automatic:
+ CGF.pushLifetimeExtendedDestroy(NormalAndEHCleanup,
+ ReferenceTemporary, E->getType(),
+ CodeGenFunction::destroyCXXObject,
+ CGF.getLangOpts().Exceptions);
+ break;
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary cannot have dynamic storage duration");
+ }
+}
+
+static llvm::Value *
+createReferenceTemporary(CodeGenFunction &CGF,
+ const MaterializeTemporaryExpr *M, const Expr *Inner) {
+ switch (M->getStorageDuration()) {
+ case SD_FullExpression:
+ case SD_Automatic:
+ return CGF.CreateMemTemp(Inner->getType(), "ref.tmp");
+
+ case SD_Thread:
+ case SD_Static:
+ return CGF.CGM.GetAddrOfGlobalTemporary(M, Inner);
+
+ case SD_Dynamic:
+ llvm_unreachable("temporary can't have dynamic storage duration");
+ }
+ llvm_unreachable("unknown storage duration");
+}
+
+LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
+ const MaterializeTemporaryExpr *M) {
+ const Expr *E = M->GetTemporaryExpr();
+
+ if (getLangOpts().ObjCAutoRefCount &&
+ M->getType()->isObjCLifetimeType() &&
+ M->getType().getObjCLifetime() != Qualifiers::OCL_None &&
+ M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ // FIXME: Fold this into the general case below.
+ llvm::Value *Object = createReferenceTemporary(*this, M, E);
+ LValue RefTempDst = MakeAddrLValue(Object, M->getType());
+
+ if (llvm::GlobalVariable *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
+ // We should not have emitted the initializer for this temporary as a
+ // constant.
+ assert(!Var->hasInitializer());
+ Var->setInitializer(CGM.EmitNullConstant(E->getType()));
}
- return Object;
+ EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
+
+ pushTemporaryCleanup(*this, M, E, Object);
+ return RefTempDst;
}
- if (RV.isAggregate())
- return RV.getAggregateAddr();
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+
+ for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
+ EmitIgnoredExpr(CommaLHSs[I]);
- // Create a temporary variable that we can bind the reference to.
- ReferenceTemporary = CreateReferenceTemporary(CGF, E->getType(),
- InitializedDecl);
+ if (const OpaqueValueExpr *opaque = dyn_cast<OpaqueValueExpr>(E)) {
+ if (opaque->getType()->isRecordType()) {
+ assert(Adjustments.empty());
+ return EmitOpaqueValueLValue(opaque);
+ }
+ }
+ // Create and initialize the reference temporary.
+ llvm::Value *Object = createReferenceTemporary(*this, M, E);
+ if (llvm::GlobalVariable *Var = dyn_cast<llvm::GlobalVariable>(Object)) {
+ // If the temporary is a global and has a constant initializer, we may
+ // have already initialized it.
+ if (!Var->hasInitializer()) {
+ Var->setInitializer(CGM.EmitNullConstant(E->getType()));
+ EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
+ }
+ } else {
+ EmitAnyExprToMem(E, Object, Qualifiers(), /*IsInit*/true);
+ }
+ pushTemporaryCleanup(*this, M, E, Object);
+
+ // Perform derived-to-base casts and/or field accesses, to get from the
+ // temporary object we created (and, potentially, for which we extended
+ // the lifetime) to the subobject we're binding the reference to.
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object =
+ GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
+ Adjustment.DerivedToBase.BasePath->path_begin(),
+ Adjustment.DerivedToBase.BasePath->path_end(),
+ /*NullCheckValue=*/ false);
+ break;
- LValue tempLV = CGF.MakeNaturalAlignAddrLValue(ReferenceTemporary,
- E->getType());
- if (RV.isScalar())
- CGF.EmitStoreOfScalar(RV.getScalarVal(), tempLV, /*init*/ true);
- else
- CGF.EmitStoreOfComplex(RV.getComplexVal(), tempLV, /*init*/ true);
- return ReferenceTemporary;
+ case SubobjectAdjustment::FieldAdjustment: {
+ LValue LV = MakeAddrLValue(Object, E->getType());
+ LV = EmitLValueForField(LV, Adjustment.Field);
+ assert(LV.isSimple() &&
+ "materialized temporary field is not a simple lvalue");
+ Object = LV.getAddress();
+ break;
+ }
+
+ case SubobjectAdjustment::MemberPointerAdjustment: {
+ llvm::Value *Ptr = EmitScalarExpr(Adjustment.Ptr.RHS);
+ Object = CGM.getCXXABI().EmitMemberDataPointerAddress(
+ *this, Object, Ptr, Adjustment.Ptr.MPT);
+ break;
+ }
+ }
+ }
+
+ return MakeAddrLValue(Object, M->getType());
}
RValue
-CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
- const NamedDecl *InitializedDecl) {
- llvm::Value *ReferenceTemporary = 0;
- const CXXDestructorDecl *ReferenceTemporaryDtor = 0;
- const InitListExpr *ReferenceInitializerList = 0;
- QualType ObjCARCReferenceLifetimeType;
- llvm::Value *Value = EmitExprForReferenceBinding(*this, E, ReferenceTemporary,
- ReferenceTemporaryDtor,
- ReferenceInitializerList,
- ObjCARCReferenceLifetimeType,
- InitializedDecl);
+CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E) {
+ // Emit the expression as an lvalue.
+ LValue LV = EmitLValue(E);
+ assert(LV.isSimple());
+ llvm::Value *Value = LV.getAddress();
+
if (SanitizePerformTypeCheck && !E->getType()->isFunctionType()) {
// C++11 [dcl.ref]p5 (as amended by core issue 453):
// If a glvalue to which a reference is directly bound designates neither
@@ -418,80 +414,7 @@ CodeGenFunction::EmitReferenceBindingToExpr(const Expr *E,
QualType Ty = E->getType();
EmitTypeCheck(TCK_ReferenceBinding, E->getExprLoc(), Value, Ty);
}
- if (!ReferenceTemporaryDtor && !ReferenceInitializerList &&
- ObjCARCReferenceLifetimeType.isNull())
- return RValue::get(Value);
-
- // Make sure to call the destructor for the reference temporary.
- const VarDecl *VD = dyn_cast_or_null<VarDecl>(InitializedDecl);
- if (VD && VD->hasGlobalStorage()) {
- if (ReferenceTemporaryDtor) {
- llvm::Constant *CleanupFn;
- llvm::Constant *CleanupArg;
- if (E->getType()->isArrayType()) {
- CleanupFn = CodeGenFunction(CGM).generateDestroyHelper(
- cast<llvm::Constant>(ReferenceTemporary), E->getType(),
- destroyCXXObject, getLangOpts().Exceptions);
- CleanupArg = llvm::Constant::getNullValue(Int8PtrTy);
- } else {
- CleanupFn =
- CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
- CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
- }
- CGM.getCXXABI().registerGlobalDtor(*this, *VD, CleanupFn, CleanupArg);
- } else if (ReferenceInitializerList) {
- // FIXME: This is wrong. We need to register a global destructor to clean
- // up the initializer_list object, rather than adding it as a local
- // cleanup.
- EmitStdInitializerListCleanup(ReferenceTemporary,
- ReferenceInitializerList);
- } else {
- assert(!ObjCARCReferenceLifetimeType.isNull() && !VD->getTLSKind());
- // Note: We intentionally do not register a global "destructor" to
- // release the object.
- }
-
- return RValue::get(Value);
- }
- if (ReferenceTemporaryDtor) {
- if (E->getType()->isArrayType())
- pushDestroy(NormalAndEHCleanup, ReferenceTemporary, E->getType(),
- destroyCXXObject, getLangOpts().Exceptions);
- else
- PushDestructorCleanup(ReferenceTemporaryDtor, ReferenceTemporary);
- } else if (ReferenceInitializerList) {
- EmitStdInitializerListCleanup(ReferenceTemporary,
- ReferenceInitializerList);
- } else {
- switch (ObjCARCReferenceLifetimeType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- llvm_unreachable(
- "Not a reference temporary that needs to be deallocated");
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- // Nothing to do.
- break;
-
- case Qualifiers::OCL_Strong: {
- bool precise = VD && VD->hasAttr<ObjCPreciseLifetimeAttr>();
- CleanupKind cleanupKind = getARCCleanupKind();
- pushDestroy(cleanupKind, ReferenceTemporary, ObjCARCReferenceLifetimeType,
- precise ? destroyARCStrongPrecise : destroyARCStrongImprecise,
- cleanupKind & EHCleanup);
- break;
- }
-
- case Qualifiers::OCL_Weak: {
- // __weak objects always get EH cleanups; otherwise, exceptions
- // could cause really nasty crashes instead of mere leaks.
- pushDestroy(NormalAndEHCleanup, ReferenceTemporary,
- ObjCARCReferenceLifetimeType, destroyARCWeak, true);
- break;
- }
- }
- }
-
return RValue::get(Value);
}
@@ -553,7 +476,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// The glvalue must refer to a large enough storage region.
// FIXME: If Address Sanitizer is enabled, insert dynamic instrumentation
// to check this.
- llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, IntPtrTy);
+ // FIXME: Get object address space
+ llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
+ llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
llvm::Value *Min = Builder.getFalse();
llvm::Value *CastAddr = Builder.CreateBitCast(Address, Int8PtrTy);
llvm::Value *LargeEnough =
@@ -716,7 +641,8 @@ static llvm::Value *getArrayIndexingBound(
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
llvm::Value *Index, QualType IndexType,
bool Accessed) {
- assert(SanOpts->Bounds && "should not be called unless adding bounds checks");
+ assert(SanOpts->ArrayBounds &&
+ "should not be called unless adding bounds checks");
QualType IndexedType;
llvm::Value *Bound = getArrayIndexingBound(*this, Base, IndexedType);
@@ -741,13 +667,13 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
CodeGenFunction::ComplexPairTy CodeGenFunction::
EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
- ComplexPairTy InVal = EmitLoadOfComplex(LV);
-
+ ComplexPairTy InVal = EmitLoadOfComplex(LV, E->getExprLoc());
+
llvm::Value *NextVal;
if (isa<llvm::IntegerType>(InVal.first->getType())) {
uint64_t AmountVal = isInc ? 1 : -1;
NextVal = llvm::ConstantInt::get(InVal.first->getType(), AmountVal, true);
-
+
// Add the inc/dec to the real part.
NextVal = Builder.CreateAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
} else {
@@ -756,16 +682,16 @@ EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
if (!isInc)
FVal.changeSign();
NextVal = llvm::ConstantFP::get(getLLVMContext(), FVal);
-
+
// Add the inc/dec to the real part.
NextVal = Builder.CreateFAdd(InVal.first, NextVal, isInc ? "inc" : "dec");
}
-
+
ComplexPairTy IncVal(NextVal, InVal.second);
-
+
// Store the updated result through the lvalue.
EmitStoreOfComplex(IncVal, LV, /*init*/ false);
-
+
// If this is a postinc, return the value read from memory, otherwise use the
// updated value.
return isPre ? IncVal : InVal;
@@ -787,7 +713,7 @@ RValue CodeGenFunction::GetUndefRValue(QualType Ty) {
llvm::Value *U = llvm::UndefValue::get(EltTy);
return RValue::getComplex(std::make_pair(U, U));
}
-
+
// If this is a use of an undefined aggregate type, the aggregate must have an
// identifiable address. Just because the contents of the value are undefined
// doesn't mean that the address can't be taken and compared.
@@ -817,7 +743,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV;
- if (SanOpts->Bounds && isa<ArraySubscriptExpr>(E))
+ if (SanOpts->ArrayBounds && isa<ArraySubscriptExpr>(E))
LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
else
LV = EmitLValue(E);
@@ -899,8 +825,6 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
return EmitLValue(cleanups->getSubExpr());
}
- case Expr::CXXScalarValueInitExprClass:
- return EmitNullInitializationLValue(cast<CXXScalarValueInitExpr>(E));
case Expr::CXXDefaultArgExprClass:
return EmitLValue(cast<CXXDefaultArgExpr>(E)->getExpr());
case Expr::CXXDefaultInitExprClass: {
@@ -931,7 +855,7 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
case Expr::BinaryConditionalOperatorClass:
return EmitConditionalOperatorLValue(cast<BinaryConditionalOperator>(E));
case Expr::ChooseExprClass:
- return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr(getContext()));
+ return EmitLValue(cast<ChooseExpr>(E)->getChosenSubExpr());
case Expr::OpaqueValueExprClass:
return EmitOpaqueValueLValue(cast<OpaqueValueExpr>(E));
case Expr::SubstNonTypeTemplateParmExprClass:
@@ -1047,7 +971,7 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
llvm::Constant *C = CGM.EmitConstantValue(result.Val, resultType, this);
// Make sure we emit a debug reference to the global variable.
- // This should probably fire even for
+ // This should probably fire even for
if (isa<VarDecl>(value)) {
if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
EmitDeclRefExprDbgValue(refExpr, C);
@@ -1063,10 +987,11 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
return ConstantEmission::forValue(C);
}
-llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue) {
+llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
+ SourceLocation Loc) {
return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getAlignment().getQuantity(),
- lvalue.getType(), lvalue.getTBAAInfo(),
+ lvalue.getType(), Loc, lvalue.getTBAAInfo(),
lvalue.getTBAABaseType(), lvalue.getTBAAOffset());
}
@@ -1128,21 +1053,22 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
}
llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
- unsigned Alignment, QualType Ty,
- llvm::MDNode *TBAAInfo,
- QualType TBAABaseType,
- uint64_t TBAAOffset) {
+ unsigned Alignment, QualType Ty,
+ SourceLocation Loc,
+ llvm::MDNode *TBAAInfo,
+ QualType TBAABaseType,
+ uint64_t TBAAOffset) {
// For better performance, handle vector loads differently.
if (Ty->isVectorType()) {
llvm::Value *V;
const llvm::Type *EltTy =
cast<llvm::PointerType>(Addr->getType())->getElementType();
-
+
const llvm::VectorType *VTy = cast<llvm::VectorType>(EltTy);
-
+
// Handle vectors of size 3, like size 4 for better performance.
if (VTy->getNumElements() == 3) {
-
+
// Bitcast to vec4 type.
llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
4);
@@ -1175,9 +1101,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
LValue lvalue = LValue::MakeAddr(Addr, Ty,
CharUnits::fromQuantity(Alignment),
getContext(), TBAAInfo);
- return EmitAtomicLoad(lvalue).getScalarVal();
+ return EmitAtomicLoad(lvalue, Loc).getScalarVal();
}
-
+
llvm::LoadInst *Load = Builder.CreateLoad(Addr);
if (Volatile)
Load->setVolatile(true);
@@ -1186,7 +1112,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
if (TBAAInfo) {
llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
TBAAOffset);
- CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
+ if (TBAAPath)
+ CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
}
if ((SanOpts->Bool && hasBooleanRepresentation(Ty)) ||
@@ -1205,9 +1132,12 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
Load, llvm::ConstantInt::get(getLLVMContext(), Min));
Check = Builder.CreateAnd(Upper, Lower);
}
- // FIXME: Provide a SourceLocation.
- EmitCheck(Check, "load_invalid_value", EmitCheckTypeDescriptor(Ty),
- EmitCheckValue(Load), CRK_Recoverable);
+ llvm::Constant *StaticArgs[] = {
+ EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty)
+ };
+ EmitCheck(Check, "load_invalid_value", StaticArgs, EmitCheckValue(Load),
+ CRK_Recoverable);
}
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
@@ -1243,11 +1173,10 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
bool Volatile, unsigned Alignment,
- QualType Ty,
- llvm::MDNode *TBAAInfo,
+ QualType Ty, llvm::MDNode *TBAAInfo,
bool isInit, QualType TBAABaseType,
uint64_t TBAAOffset) {
-
+
// Handle vectors differently to get better performance.
if (Ty->isVectorType()) {
llvm::Type *SrcTy = Value->getType();
@@ -1255,20 +1184,17 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
// Handle vec3 special.
if (VecTy->getNumElements() == 3) {
llvm::LLVMContext &VMContext = getLLVMContext();
-
+
// Our source is a vec3, do a shuffle vector to make it a vec4.
SmallVector<llvm::Constant*, 4> Mask;
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext),
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
0));
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext),
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
1));
- Mask.push_back(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext),
+ Mask.push_back(llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
2));
Mask.push_back(llvm::UndefValue::get(llvm::Type::getInt32Ty(VMContext)));
-
+
llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
Value = Builder.CreateShuffleVector(Value,
llvm::UndefValue::get(VecTy),
@@ -1282,7 +1208,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
Addr = Builder.CreateBitCast(Addr, MemTy, "storetmp");
}
}
-
+
Value = EmitToMemory(Value, Ty);
if (Ty->isAtomicType()) {
@@ -1300,7 +1226,8 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, llvm::Value *Addr,
if (TBAAInfo) {
llvm::MDNode *TBAAPath = CGM.getTBAAStructTagInfo(TBAABaseType, TBAAInfo,
TBAAOffset);
- CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/);
+ if (TBAAPath)
+ CGM.DecorateInstruction(Store, TBAAPath, false/*ConvertTypeToTag*/);
}
}
@@ -1315,7 +1242,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
/// EmitLoadOfLValue - Given an expression that represents a value lvalue, this
/// method emits the address of the lvalue, then loads the result as an rvalue,
/// returning the rvalue.
-RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
+RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
llvm::Value *AddrWeakObj = LV.getAddress();
@@ -1332,7 +1259,7 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV) {
assert(!LV.getType()->isFunctionType());
// Everything needs a load.
- return RValue::get(EmitLoadOfScalar(LV));
+ return RValue::get(EmitLoadOfScalar(LV, Loc));
}
if (LV.isVectorElt()) {
@@ -1420,7 +1347,8 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
/// EmitStoreThroughLValue - Store the specified rvalue into the specified
/// lvalue, where both are guaranteed to the have the same type, and that type
/// is 'Ty'.
-void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit) {
+void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
+ bool isInit) {
if (!Dst.isSimple()) {
if (Dst.isVectorElt()) {
// Read/modify/write the vector, inserting the new element.
@@ -1489,7 +1417,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst, bool isInit
llvm::Value *RHS = EmitScalarExpr(Dst.getBaseIvarExp());
llvm::Value *dst = RHS;
RHS = Builder.CreatePtrToInt(RHS, ResultType, "sub.ptr.rhs.cast");
- llvm::Value *LHS =
+ llvm::Value *LHS =
Builder.CreatePtrToInt(LvalueDst, ResultType, "sub.ptr.lhs.cast");
llvm::Value *BytesBetween = Builder.CreateSub(LHS, RHS, "ivar.offset");
CGM.getObjCRuntime().EmitObjCIvarAssign(*this, src, dst,
@@ -1625,6 +1553,12 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
for (unsigned i = 0; i != NumDstElts; ++i)
Mask.push_back(Builder.getInt32(i));
+ // When the vector size is odd and .odd or .hi is used, the last element
+ // of the Elts constant array will be one past the size of the vector.
+ // Ignore the last element here, if it is greater than the mask size.
+ if (getAccessedFieldNo(NumSrcElts - 1, Elts) == Mask.size())
+ NumSrcElts--;
+
// modify when what gets shuffled in
for (unsigned i = 0; i != NumSrcElts; ++i)
Mask[getAccessedFieldNo(i, Elts)] = Builder.getInt32(i+NumDstElts);
@@ -1654,12 +1588,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
bool IsMemberAccess=false) {
if (Ctx.getLangOpts().getGC() == LangOptions::NonGC)
return;
-
+
if (isa<ObjCIvarRefExpr>(E)) {
QualType ExpTy = E->getType();
if (IsMemberAccess && ExpTy->isPointerType()) {
// If ivar is a structure pointer, assigning to field of
- // this struct follows gcc's behavior and makes it a non-ivar
+ // this struct follows gcc's behavior and makes it a non-ivar
// writer-barrier conservatively.
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
if (ExpTy->isRecordType()) {
@@ -1673,7 +1607,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LV.setObjCArray(E->getType()->isArrayType());
return;
}
-
+
if (const DeclRefExpr *Exp = dyn_cast<DeclRefExpr>(E)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(Exp->getDecl())) {
if (VD->hasGlobalStorage()) {
@@ -1684,12 +1618,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
LV.setObjCArray(E->getType()->isArrayType());
return;
}
-
+
if (const UnaryOperator *Exp = dyn_cast<UnaryOperator>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
-
+
if (const ParenExpr *Exp = dyn_cast<ParenExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
if (LV.isObjCIvar()) {
@@ -1699,7 +1633,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (ExpTy->isPointerType())
ExpTy = ExpTy->getAs<PointerType>()->getPointeeType();
if (ExpTy->isRecordType())
- LV.setObjCIvar(false);
+ LV.setObjCIvar(false);
}
return;
}
@@ -1713,7 +1647,7 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
}
-
+
if (const CStyleCastExpr *Exp = dyn_cast<CStyleCastExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getSubExpr(), LV, IsMemberAccess);
return;
@@ -1726,12 +1660,12 @@ static void setObjCGCLValueClass(const ASTContext &Ctx, const Expr *E,
if (const ArraySubscriptExpr *Exp = dyn_cast<ArraySubscriptExpr>(E)) {
setObjCGCLValueClass(Ctx, Exp->getBase(), LV);
- if (LV.isObjCIvar() && !LV.isObjCArray())
- // Using array syntax to assigning to what an ivar points to is not
+ if (LV.isObjCIvar() && !LV.isObjCArray())
+ // Using array syntax to assigning to what an ivar points to is not
// same as assigning to the ivar itself. {id *Names;} Names[i] = 0;
- LV.setObjCIvar(false);
+ LV.setObjCIvar(false);
else if (LV.isGlobalObjCRef() && !LV.isObjCArray())
- // Using array syntax to assigning to what global points to is not
+ // Using array syntax to assigning to what global points to is not
// same as assigning to the global itself. {id *G;} G[i] = 0;
LV.setGlobalObjCRef(false);
return;
@@ -1793,6 +1727,13 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
return CGF.MakeAddrLValue(V, E->getType(), Alignment);
}
+static LValue EmitCapturedFieldLValue(CodeGenFunction &CGF, const FieldDecl *FD,
+ llvm::Value *ThisValue) {
+ QualType TagType = CGF.getContext().getTagDeclType(FD->getParent());
+ LValue LV = CGF.MakeNaturalAlignAddrLValue(ThisValue, TagType);
+ return CGF.EmitLValueForField(LV, FD);
+}
+
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
const NamedDecl *ND = E->getDecl();
CharUnits Alignment = getContext().getDeclAlign(ND);
@@ -1838,16 +1779,17 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
bool isBlockVariable = VD->hasAttr<BlocksAttr>();
llvm::Value *V = LocalDeclMap.lookup(VD);
- if (!V && VD->isStaticLocal())
+ if (!V && VD->isStaticLocal())
V = CGM.getStaticLocalDeclAddress(VD);
// Use special handling for lambdas.
if (!V) {
if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
- QualType LambdaTagType = getContext().getTagDeclType(FD->getParent());
- LValue LambdaLV = MakeNaturalAlignAddrLValue(CXXABIThisValue,
- LambdaTagType);
- return EmitLValueForField(LambdaLV, FD);
+ return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
+ } else if (CapturedStmtInfo) {
+ if (const FieldDecl *FD = CapturedStmtInfo->lookup(VD))
+ return EmitCapturedFieldLValue(*this, FD,
+ CapturedStmtInfo->getContextValue());
}
assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
@@ -1888,8 +1830,8 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
return LV;
}
- if (const FunctionDecl *fn = dyn_cast<FunctionDecl>(ND))
- return EmitFunctionDeclLValue(*this, E, fn);
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
+ return EmitFunctionDeclLValue(*this, E, FD);
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -1945,7 +1887,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
case UO_PreDec: {
LValue LV = EmitLValue(E->getSubExpr());
bool isInc = E->getOpcode() == UO_PreInc;
-
+
if (E->getType()->isAnyComplexType())
EmitComplexPrePostIncDec(E, LV, isInc, true/*isPre*/);
else
@@ -2008,8 +1950,9 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
case PredefinedExpr::Func:
case PredefinedExpr::Function:
case PredefinedExpr::LFunction:
+ case PredefinedExpr::FuncDName:
case PredefinedExpr::PrettyFunction: {
- unsigned IdentType = E->getIdentType();
+ PredefinedExpr::IdentType IdentType = E->getIdentType();
std::string GlobalVarName;
switch (IdentType) {
@@ -2020,6 +1963,9 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
case PredefinedExpr::Function:
GlobalVarName = "__FUNCTION__.";
break;
+ case PredefinedExpr::FuncDName:
+ GlobalVarName = "__FUNCDNAME__.";
+ break;
case PredefinedExpr::LFunction:
GlobalVarName = "L__FUNCTION__.";
break;
@@ -2033,17 +1979,28 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
FnName = FnName.substr(1);
GlobalVarName += FnName;
+ // If this is outside of a function use the top level decl.
const Decl *CurDecl = CurCodeDecl;
- if (CurDecl == 0)
+ if (CurDecl == 0 || isa<VarDecl>(CurDecl))
CurDecl = getContext().getTranslationUnitDecl();
- std::string FunctionName =
- (isa<BlockDecl>(CurDecl)
- ? FnName.str()
- : PredefinedExpr::ComputeName((PredefinedExpr::IdentType)IdentType,
- CurDecl));
+ const Type *ElemType = E->getType()->getArrayElementTypeNoTypeQual();
+ std::string FunctionName;
+ if (isa<BlockDecl>(CurDecl)) {
+ // Blocks use the mangled function name.
+ // FIXME: ComputeName should handle blocks.
+ FunctionName = FnName.str();
+ } else if (isa<CapturedDecl>(CurDecl)) {
+ // For a captured statement, the function name is its enclosing
+ // function name not the one compiler generated.
+ FunctionName = PredefinedExpr::ComputeName(IdentType, CurDecl);
+ } else {
+ FunctionName = PredefinedExpr::ComputeName(IdentType, CurDecl);
+ assert(cast<ConstantArrayType>(E->getType())->getSize() - 1 ==
+ FunctionName.size() &&
+ "Computed __func__ length differs from type!");
+ }
- const Type* ElemType = E->getType()->getArrayElementTypeNoTypeQual();
llvm::Constant *C;
if (ElemType->isWideCharType()) {
SmallString<32> RawChars;
@@ -2076,7 +2033,10 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
/// followed by an array of i8 containing the type name. TypeKind is 0 for an
/// integer, 1 for a floating point value, and -1 for anything else.
llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
- // FIXME: Only emit each type's descriptor once.
+ // Only emit each type's descriptor once.
+ if (llvm::Constant *C = CGM.getTypeDescriptor(T))
+ return C;
+
uint16_t TypeKind = -1;
uint16_t TypeInfo = 0;
@@ -2109,6 +2069,10 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
llvm::GlobalVariable::PrivateLinkage,
Descriptor);
GV->setUnnamedAddr(true);
+
+ // Remember the descriptor for this type.
+ CGM.setTypeDescriptor(T, GV);
+
return GV;
}
@@ -2151,12 +2115,10 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
llvm::Constant *Data[] = {
- // FIXME: Only emit each file name once.
- PLoc.isValid() ? cast<llvm::Constant>(
- Builder.CreateGlobalStringPtr(PLoc.getFilename()))
+ PLoc.isValid() ? CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src")
: llvm::Constant::getNullValue(Int8PtrTy),
- Builder.getInt32(PLoc.getLine()),
- Builder.getInt32(PLoc.getColumn())
+ Builder.getInt32(PLoc.isValid() ? PLoc.getLine() : 0),
+ Builder.getInt32(PLoc.isValid() ? PLoc.getColumn() : 0)
};
return llvm::ConstantStruct::getAnon(Data);
@@ -2271,12 +2233,12 @@ static const Expr *isSimpleArrayDecayOperand(const Expr *E) {
const CastExpr *CE = dyn_cast<CastExpr>(E);
if (CE == 0 || CE->getCastKind() != CK_ArrayToPointerDecay)
return 0;
-
+
// If this is a decay from variable width array, bail out.
const Expr *SubExpr = CE->getSubExpr();
if (SubExpr->getType()->isVariableArrayType())
return 0;
-
+
return SubExpr;
}
@@ -2287,7 +2249,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
QualType IdxTy = E->getIdx()->getType();
bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
- if (SanOpts->Bounds)
+ if (SanOpts->ArrayBounds)
EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
// If the base is a vector type, then we are forming a vector element lvalue
@@ -2360,7 +2322,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
llvm::Value *ArrayPtr = ArrayLV.getAddress();
llvm::Value *Zero = llvm::ConstantInt::get(Int32Ty, 0);
llvm::Value *Args[] = { Zero, Idx };
-
+
// Propagate the alignment from the array itself to the result.
ArrayAlignment = ArrayLV.getAlignment();
@@ -2381,7 +2343,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
assert(!T.isNull() &&
"CodeGenFunction::EmitArraySubscriptExpr(): Illegal base type");
-
+
// Limit the alignment to that of the result type.
LValue LV;
if (!ArrayAlignment.isZero()) {
@@ -2404,7 +2366,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
static
llvm::Constant *GenerateConstantVector(CGBuilderTy &Builder,
- SmallVector<unsigned, 4> &Elts) {
+ SmallVectorImpl<unsigned> &Elts) {
SmallVector<llvm::Constant*, 4> CElts;
for (unsigned i = 0, e = Elts.size(); i != e; ++i)
CElts.push_back(Builder.getInt32(Elts[i]));
@@ -2435,7 +2397,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
assert(E->getBase()->getType()->isVectorType() &&
"Result must be a vector");
llvm::Value *Vec = EmitScalarExpr(E->getBase());
-
+
// Store the vector to memory (because LValue wants an address).
llvm::Value *VecMem = CreateMemTemp(E->getBase()->getType());
Builder.CreateStore(Vec, VecMem);
@@ -2444,7 +2406,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
QualType type =
E->getType().withCVRQualifiers(Base.getQuals().getCVRQualifiers());
-
+
// Encode the element access list into a vector of unsigned indices.
SmallVector<unsigned, 4> Indices;
E->getEncodedElementAccess(Indices);
@@ -2485,7 +2447,7 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
setObjCGCLValueClass(getContext(), E, LV);
return LV;
}
-
+
if (VarDecl *VD = dyn_cast<VarDecl>(ND))
return EmitGlobalVarDeclLValue(*this, E, VD);
@@ -2567,7 +2529,8 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
tbaa = CGM.getTBAAInfo(getContext().CharTy);
else
tbaa = CGM.getTBAAInfo(type);
- CGM.DecorateInstruction(load, tbaa);
+ if (tbaa)
+ CGM.DecorateInstruction(load, tbaa);
}
addr = load;
@@ -2580,7 +2543,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
cvr = 0; // qualifiers don't recursively apply to referencee
}
}
-
+
// Make sure that the address is pointing to the right type. This is critical
// for both unions and structs. A union needs a bitcast, a struct element
// will need a bitcast if the LLVM type laid out doesn't match the desired
@@ -2618,11 +2581,11 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
return LV;
}
-LValue
-CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
+LValue
+CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
const FieldDecl *Field) {
QualType FieldType = Field->getType();
-
+
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
@@ -2657,7 +2620,7 @@ LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr *E){
if (E->getType()->isVariablyModifiedType())
// make sure to emit the VLA size.
EmitVariablyModifiedType(E->getType());
-
+
llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr *InitExpr = E->getInitializer();
LValue Result = MakeAddrLValue(DeclPtr, E->getType());
@@ -2705,19 +2668,19 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
ConditionalEvaluation eval(*this);
EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock);
-
+
// Any temporaries created here are conditional.
EmitBlock(lhsBlock);
eval.begin(*this);
LValue lhs = EmitLValue(expr->getTrueExpr());
eval.end(*this);
-
+
if (!lhs.isSimple())
return EmitUnsupportedLValue(expr, "conditional operator");
lhsBlock = Builder.GetInsertBlock();
Builder.CreateBr(contBlock);
-
+
// Any temporaries created here are conditional.
EmitBlock(rhsBlock);
eval.begin(*this);
@@ -2746,26 +2709,6 @@ EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
switch (E->getCastKind()) {
case CK_ToVoid:
- return EmitUnsupportedLValue(E, "unexpected cast lvalue");
-
- case CK_Dependent:
- llvm_unreachable("dependent cast kind in IR gen!");
-
- case CK_BuiltinFnToFnPtr:
- llvm_unreachable("builtin functions are handled elsewhere");
-
- // These two casts are currently treated as no-ops, although they could
- // potentially be real operations depending on the target's ABI.
- case CK_NonAtomicToAtomic:
- case CK_AtomicToNonAtomic:
-
- case CK_NoOp:
- case CK_LValueToRValue:
- if (!E->getSubExpr()->Classify(getContext()).isPRValue()
- || E->getType()->isRecordType())
- return EmitLValue(E->getSubExpr());
- // Fall through to synthesize a temporary.
-
case CK_BitCast:
case CK_ArrayToPointerDecay:
case CK_FunctionToPointerDecay:
@@ -2799,16 +2742,20 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ARCProduceObject:
case CK_ARCConsumeObject:
case CK_ARCReclaimReturnedObject:
- case CK_ARCExtendBlockObject:
- case CK_CopyAndAutoreleaseBlockObject: {
- // These casts only produce lvalues when we're binding a reference to a
- // temporary realized from a (converted) pure rvalue. Emit the expression
- // as a value, copy it into a temporary, and return an lvalue referring to
- // that temporary.
- llvm::Value *V = CreateMemTemp(E->getType(), "ref.temp");
- EmitAnyExprToMem(E, V, E->getType().getQualifiers(), false);
- return MakeAddrLValue(V, E->getType());
- }
+ case CK_ARCExtendBlockObject:
+ case CK_CopyAndAutoreleaseBlockObject:
+ return EmitUnsupportedLValue(E, "unexpected cast lvalue");
+
+ case CK_Dependent:
+ llvm_unreachable("dependent cast kind in IR gen!");
+
+ case CK_BuiltinFnToFnPtr:
+ llvm_unreachable("builtin functions are handled elsewhere");
+
+ // These are never l-values; just use the aggregate emission code.
+ case CK_NonAtomicToAtomic:
+ case CK_AtomicToNonAtomic:
+ return EmitAggExprToLValue(E);
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
@@ -2821,53 +2768,55 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_UserDefinedConversion:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
+ case CK_NoOp:
+ case CK_LValueToRValue:
return EmitLValue(E->getSubExpr());
-
+
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
- const RecordType *DerivedClassTy =
+ const RecordType *DerivedClassTy =
E->getSubExpr()->getType()->getAs<RecordType>();
- CXXRecordDecl *DerivedClassDecl =
+ CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
-
+
LValue LV = EmitLValue(E->getSubExpr());
llvm::Value *This = LV.getAddress();
-
+
// Perform the derived-to-base conversion
- llvm::Value *Base =
- GetAddressOfBaseClass(This, DerivedClassDecl,
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, DerivedClassDecl,
E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
-
+
return MakeAddrLValue(Base, E->getType());
}
case CK_ToUnion:
return EmitAggExprToLValue(E);
case CK_BaseToDerived: {
const RecordType *DerivedClassTy = E->getType()->getAs<RecordType>();
- CXXRecordDecl *DerivedClassDecl =
+ CXXRecordDecl *DerivedClassDecl =
cast<CXXRecordDecl>(DerivedClassTy->getDecl());
-
+
LValue LV = EmitLValue(E->getSubExpr());
+ // Perform the base-to-derived conversion
+ llvm::Value *Derived =
+ GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
+ E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false);
+
// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (SanitizePerformTypeCheck)
EmitTypeCheck(TCK_DowncastReference, E->getExprLoc(),
- LV.getAddress(), E->getType());
+ Derived, E->getType());
- // Perform the base-to-derived conversion
- llvm::Value *Derived =
- GetAddressOfDerivedClass(LV.getAddress(), DerivedClassDecl,
- E->path_begin(), E->path_end(),
- /*NullCheckValue=*/false);
-
return MakeAddrLValue(Derived, E->getType());
}
case CK_LValueBitCast: {
// This must be a reinterpret_cast (or c-style equivalent).
const ExplicitCastExpr *CE = cast<ExplicitCastExpr>(E);
-
+
LValue LV = EmitLValue(E->getSubExpr());
llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
ConvertType(CE->getTypeAsWritten()));
@@ -2876,23 +2825,15 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
QualType ToType = getContext().getLValueReferenceType(E->getType());
- llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
+ llvm::Value *V = Builder.CreateBitCast(LV.getAddress(),
ConvertType(ToType));
return MakeAddrLValue(V, E->getType());
}
case CK_ZeroToOCLEvent:
llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
}
-
- llvm_unreachable("Unhandled lvalue cast kind?");
-}
-LValue CodeGenFunction::EmitNullInitializationLValue(
- const CXXScalarValueInitExpr *E) {
- QualType Ty = E->getType();
- LValue LV = MakeAddrLValue(CreateMemTemp(Ty), Ty);
- EmitNullInitialization(LV.getAddress(), Ty);
- return LV;
+ llvm_unreachable("Unhandled lvalue cast kind?");
}
LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
@@ -2900,23 +2841,18 @@ LValue CodeGenFunction::EmitOpaqueValueLValue(const OpaqueValueExpr *e) {
return getOpaqueLValueMapping(e);
}
-LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
- const MaterializeTemporaryExpr *E) {
- RValue RV = EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
- return MakeAddrLValue(RV.getScalarVal(), E->getType());
-}
-
RValue CodeGenFunction::EmitRValueForField(LValue LV,
- const FieldDecl *FD) {
+ const FieldDecl *FD,
+ SourceLocation Loc) {
QualType FT = FD->getType();
LValue FieldLV = EmitLValueForField(LV, FD);
switch (getEvaluationKind(FT)) {
case TEK_Complex:
- return RValue::getComplex(EmitLoadOfComplex(FieldLV));
+ return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
case TEK_Aggregate:
return FieldLV.asAggregateRValue();
case TEK_Scalar:
- return EmitLoadOfLValue(FieldLV);
+ return EmitLoadOfLValue(FieldLV, Loc);
}
llvm_unreachable("bad evaluation kind");
}
@@ -2925,7 +2861,7 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
// Expression Emission
//===--------------------------------------------------------------------===//
-RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
+RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
if (CGDebugInfo *DI = getDebugInfo()) {
SourceLocation Loc = E->getLocStart();
@@ -2956,7 +2892,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
- if (const CXXPseudoDestructorExpr *PseudoDtor
+ if (const CXXPseudoDestructorExpr *PseudoDtor
= dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
QualType DestroyedType = PseudoDtor->getDestroyedType();
if (getLangOpts().ObjCAutoRefCount &&
@@ -2969,7 +2905,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
Expr *BaseExpr = PseudoDtor->getBase();
llvm::Value *BaseValue = NULL;
Qualifiers BaseQuals;
-
+
// If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
if (PseudoDtor->isArrow()) {
BaseValue = EmitScalarExpr(BaseExpr);
@@ -2981,15 +2917,15 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
QualType BaseTy = BaseExpr->getType();
BaseQuals = BaseTy.getQualifiers();
}
-
+
switch (PseudoDtor->getDestroyedType().getObjCLifetime()) {
case Qualifiers::OCL_None:
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Autoreleasing:
break;
-
+
case Qualifiers::OCL_Strong:
- EmitARCRelease(Builder.CreateLoad(BaseValue,
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
PseudoDtor->getDestroyedType().isVolatileQualified()),
ARCPreciseLifetime);
break;
@@ -3003,16 +2939,16 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
// The result shall only be used as the operand for the function call
// operator (), and the result of such a call has type void. The only
// effect is the evaluation of the postfix-expression before the dot or
- // arrow.
+ // arrow.
EmitScalarExpr(E->getCallee());
}
-
+
return RValue::get(0);
}
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
- return EmitCall(E->getCallee()->getType(), Callee, ReturnValue,
- E->arg_begin(), E->arg_end(), TargetDecl);
+ return EmitCall(E->getCallee()->getType(), Callee, E->getLocStart(),
+ ReturnValue, E->arg_begin(), E->arg_end(), TargetDecl);
}
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
@@ -3068,7 +3004,7 @@ LValue CodeGenFunction::EmitCallExprLValue(const CallExpr *E) {
if (!RV.isScalar())
return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
-
+
assert(E->getCallReturnType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
@@ -3095,7 +3031,8 @@ CodeGenFunction::EmitCXXTypeidLValue(const CXXTypeidExpr *E) {
}
llvm::Value *CodeGenFunction::EmitCXXUuidofExpr(const CXXUuidofExpr *E) {
- return CGM.GetAddrOfUuidDescriptor(E);
+ return Builder.CreateBitCast(CGM.GetAddrOfUuidDescriptor(E),
+ ConvertType(E->getType())->getPointerTo());
}
LValue CodeGenFunction::EmitCXXUuidofLValue(const CXXUuidofExpr *E) {
@@ -3120,19 +3057,19 @@ CodeGenFunction::EmitLambdaLValue(const LambdaExpr *E) {
LValue CodeGenFunction::EmitObjCMessageExprLValue(const ObjCMessageExpr *E) {
RValue RV = EmitObjCMessageExpr(E);
-
+
if (!RV.isScalar())
return MakeAddrLValue(RV.getAggregateAddr(), E->getType());
-
+
assert(E->getMethodDecl()->getResultType()->isReferenceType() &&
"Can't have a scalar return unless the return type is a "
"reference type!");
-
+
return MakeAddrLValue(RV.getScalarVal(), E->getType());
}
LValue CodeGenFunction::EmitObjCSelectorLValue(const ObjCSelectorExpr *E) {
- llvm::Value *V =
+ llvm::Value *V =
CGM.getObjCRuntime().GetSelector(*this, E->getSelector(), true);
return MakeAddrLValue(V, E->getType());
}
@@ -3168,7 +3105,7 @@ LValue CodeGenFunction::EmitObjCIvarRefLValue(const ObjCIvarRefExpr *E) {
BaseQuals = ObjectTy.getQualifiers();
}
- LValue LV =
+ LValue LV =
EmitLValueForIvar(ObjectTy, BaseValue, E->getDecl(),
BaseQuals.getCVRQualifiers());
setObjCGCLValueClass(getContext(), E, LV);
@@ -3182,6 +3119,7 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+ SourceLocation CallLoc,
ReturnValueSlot ReturnValue,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
@@ -3196,8 +3134,60 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
const FunctionType *FnType
= cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+ // Force column info to differentiate multiple inlined call sites on
+ // the same line, analoguous to EmitCallExpr.
+ bool ForceColumnInfo = false;
+ if (const FunctionDecl* FD = dyn_cast_or_null<const FunctionDecl>(TargetDecl))
+ ForceColumnInfo = FD->isInlineSpecified();
+
+ if (getLangOpts().CPlusPlus && SanOpts->Function &&
+ (!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
+ if (llvm::Constant *PrefixSig =
+ CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
+ llvm::Constant *FTRTTIConst =
+ CGM.GetAddrOfRTTIDescriptor(QualType(FnType, 0), /*ForEH=*/true);
+ llvm::Type *PrefixStructTyElems[] = {
+ PrefixSig->getType(),
+ FTRTTIConst->getType()
+ };
+ llvm::StructType *PrefixStructTy = llvm::StructType::get(
+ CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
+
+ llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
+ Callee, llvm::PointerType::getUnqual(PrefixStructTy));
+ llvm::Value *CalleeSigPtr =
+ Builder.CreateConstGEP2_32(CalleePrefixStruct, 0, 0);
+ llvm::Value *CalleeSig = Builder.CreateLoad(CalleeSigPtr);
+ llvm::Value *CalleeSigMatch = Builder.CreateICmpEQ(CalleeSig, PrefixSig);
+
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ llvm::BasicBlock *TypeCheck = createBasicBlock("typecheck");
+ Builder.CreateCondBr(CalleeSigMatch, TypeCheck, Cont);
+
+ EmitBlock(TypeCheck);
+ llvm::Value *CalleeRTTIPtr =
+ Builder.CreateConstGEP2_32(CalleePrefixStruct, 0, 1);
+ llvm::Value *CalleeRTTI = Builder.CreateLoad(CalleeRTTIPtr);
+ llvm::Value *CalleeRTTIMatch =
+ Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(CallLoc),
+ EmitCheckTypeDescriptor(CalleeType)
+ };
+ EmitCheck(CalleeRTTIMatch,
+ "function_type_mismatch",
+ StaticData,
+ Callee,
+ CRK_Recoverable);
+
+ Builder.CreateBr(Cont);
+ EmitBlock(Cont);
+ }
+ }
+
CallArgList Args;
- EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd);
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd,
+ ForceColumnInfo);
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
@@ -3250,15 +3240,16 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
/// Given the address of a temporary variable, produce an r-value of
/// its type.
RValue CodeGenFunction::convertTempToRValue(llvm::Value *addr,
- QualType type) {
+ QualType type,
+ SourceLocation loc) {
LValue lvalue = MakeNaturalAlignAddrLValue(addr, type);
switch (getEvaluationKind(type)) {
case TEK_Complex:
- return RValue::getComplex(EmitLoadOfComplex(lvalue));
+ return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
case TEK_Aggregate:
return lvalue.asAggregateRValue();
case TEK_Scalar:
- return RValue::get(EmitLoadOfScalar(lvalue));
+ return RValue::get(EmitLoadOfScalar(lvalue, loc));
}
llvm_unreachable("bad evaluation kind");
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index b974e1dcc682..9d0f3a9661a6 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -29,14 +29,6 @@ using namespace CodeGen;
// Aggregate Expression Emitter
//===----------------------------------------------------------------------===//
-llvm::Value *AggValueSlot::getPaddedAtomicAddr() const {
- assert(isValueOfAtomic());
- llvm::GEPOperator *op = cast<llvm::GEPOperator>(getAddr());
- assert(op->getNumIndices() == 2);
- assert(op->hasAllZeroIndices());
- return op->getPointerOperand();
-}
-
namespace {
class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
CodeGenFunction &CGF;
@@ -91,7 +83,6 @@ public:
void EmitMoveFromReturnSlot(const Expr *E, RValue Src);
- void EmitStdInitializerList(llvm::Value *DestPtr, InitListExpr *InitList);
void EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
QualType elementType, InitListExpr *E);
@@ -177,6 +168,7 @@ public:
void VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E);
void VisitCXXConstructExpr(const CXXConstructExpr *E);
void VisitLambdaExpr(LambdaExpr *E);
+ void VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E);
void VisitExprWithCleanups(ExprWithCleanups *E);
void VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
void VisitCXXTypeidExpr(CXXTypeidExpr *E) { EmitAggLoadOfLValue(E); }
@@ -202,38 +194,6 @@ public:
CGF.EmitAtomicExpr(E, EnsureSlot(E->getType()).getAddr());
}
};
-
-/// A helper class for emitting expressions into the value sub-object
-/// of a padded atomic type.
-class ValueDestForAtomic {
- AggValueSlot Dest;
-public:
- ValueDestForAtomic(CodeGenFunction &CGF, AggValueSlot dest, QualType type)
- : Dest(dest) {
- assert(!Dest.isValueOfAtomic());
- if (!Dest.isIgnored() && CGF.CGM.isPaddedAtomicType(type)) {
- llvm::Value *valueAddr = CGF.Builder.CreateStructGEP(Dest.getAddr(), 0);
- Dest = AggValueSlot::forAddr(valueAddr,
- Dest.getAlignment(),
- Dest.getQualifiers(),
- Dest.isExternallyDestructed(),
- Dest.requiresGCollection(),
- Dest.isPotentiallyAliased(),
- Dest.isZeroed(),
- AggValueSlot::IsValueOfAtomic);
- }
- }
-
- const AggValueSlot &getDest() const { return Dest; }
-
- ~ValueDestForAtomic() {
- // Kill the GEP if we made one and it didn't end up used.
- if (Dest.isValueOfAtomic()) {
- llvm::Instruction *addr = cast<llvm::GetElementPtrInst>(Dest.getAddr());
- if (addr->use_empty()) addr->eraseFromParent();
- }
- }
-};
} // end anonymous namespace.
//===----------------------------------------------------------------------===//
@@ -248,8 +208,7 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
// If the type of the l-value is atomic, then do an atomic load.
if (LV.getType()->isAtomicType()) {
- ValueDestForAtomic valueDest(CGF, Dest, LV.getType());
- CGF.EmitAtomicLoad(LV, valueDest.getDest());
+ CGF.EmitAtomicLoad(LV, E->getExprLoc(), Dest);
return;
}
@@ -345,89 +304,70 @@ void AggExprEmitter::EmitCopy(QualType type, const AggValueSlot &dest,
std::min(dest.getAlignment(), src.getAlignment()));
}
-static QualType GetStdInitializerListElementType(QualType T) {
- // Just assume that this is really std::initializer_list.
- ClassTemplateSpecializationDecl *specialization =
- cast<ClassTemplateSpecializationDecl>(T->castAs<RecordType>()->getDecl());
- return specialization->getTemplateArgs()[0].getAsType();
-}
-
-/// \brief Prepare cleanup for the temporary array.
-static void EmitStdInitializerListCleanup(CodeGenFunction &CGF,
- QualType arrayType,
- llvm::Value *addr,
- const InitListExpr *initList) {
- QualType::DestructionKind dtorKind = arrayType.isDestructedType();
- if (!dtorKind)
- return; // Type doesn't need destroying.
- if (dtorKind != QualType::DK_cxx_destructor) {
- CGF.ErrorUnsupported(initList, "ObjC ARC type in initializer_list");
- return;
- }
-
- CodeGenFunction::Destroyer *destroyer = CGF.getDestroyer(dtorKind);
- CGF.pushDestroy(NormalAndEHCleanup, addr, arrayType, destroyer,
- /*EHCleanup=*/true);
-}
-
/// \brief Emit the initializer for a std::initializer_list initialized with a
/// real initializer list.
-void AggExprEmitter::EmitStdInitializerList(llvm::Value *destPtr,
- InitListExpr *initList) {
- // We emit an array containing the elements, then have the init list point
- // at the array.
- ASTContext &ctx = CGF.getContext();
- unsigned numInits = initList->getNumInits();
- QualType element = GetStdInitializerListElementType(initList->getType());
- llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
- QualType array = ctx.getConstantArrayType(element, size, ArrayType::Normal,0);
- llvm::Type *LTy = CGF.ConvertTypeForMem(array);
- llvm::AllocaInst *alloc = CGF.CreateTempAlloca(LTy);
- alloc->setAlignment(ctx.getTypeAlignInChars(array).getQuantity());
- alloc->setName(".initlist.");
-
- EmitArrayInit(alloc, cast<llvm::ArrayType>(LTy), element, initList);
-
- // FIXME: The diagnostics are somewhat out of place here.
- RecordDecl *record = initList->getType()->castAs<RecordType>()->getDecl();
- RecordDecl::field_iterator field = record->field_begin();
- if (field == record->field_end()) {
- CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+void
+AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
+ // Emit an array containing the elements. The array is externally destructed
+ // if the std::initializer_list object is.
+ ASTContext &Ctx = CGF.getContext();
+ LValue Array = CGF.EmitLValue(E->getSubExpr());
+ assert(Array.isSimple() && "initializer_list array not a simple lvalue");
+ llvm::Value *ArrayPtr = Array.getAddress();
+
+ const ConstantArrayType *ArrayType =
+ Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
+ assert(ArrayType && "std::initializer_list constructed from non-array");
+
+ // FIXME: Perform the checks on the field types in SemaInit.
+ RecordDecl *Record = E->getType()->castAs<RecordType>()->getDecl();
+ RecordDecl::field_iterator Field = Record->field_begin();
+ if (Field == Record->field_end()) {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
- QualType elementPtr = ctx.getPointerType(element.withConst());
-
// Start pointer.
- if (!ctx.hasSameType(field->getType(), elementPtr)) {
- CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ if (!Field->getType()->isPointerType() ||
+ !Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType())) {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
- LValue DestLV = CGF.MakeNaturalAlignAddrLValue(destPtr, initList->getType());
- LValue start = CGF.EmitLValueForFieldInitialization(DestLV, *field);
- llvm::Value *arrayStart = Builder.CreateStructGEP(alloc, 0, "arraystart");
- CGF.EmitStoreThroughLValue(RValue::get(arrayStart), start);
- ++field;
-
- if (field == record->field_end()) {
- CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+
+ AggValueSlot Dest = EnsureSlot(E->getType());
+ LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
+ Dest.getAlignment());
+ LValue Start = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
+ llvm::Value *Zero = llvm::ConstantInt::get(CGF.PtrDiffTy, 0);
+ llvm::Value *IdxStart[] = { Zero, Zero };
+ llvm::Value *ArrayStart =
+ Builder.CreateInBoundsGEP(ArrayPtr, IdxStart, "arraystart");
+ CGF.EmitStoreThroughLValue(RValue::get(ArrayStart), Start);
+ ++Field;
+
+ if (Field == Record->field_end()) {
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
- LValue endOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *field);
- if (ctx.hasSameType(field->getType(), elementPtr)) {
+
+ llvm::Value *Size = Builder.getInt(ArrayType->getSize());
+ LValue EndOrLength = CGF.EmitLValueForFieldInitialization(DestLV, *Field);
+ if (Field->getType()->isPointerType() &&
+ Ctx.hasSameType(Field->getType()->getPointeeType(),
+ ArrayType->getElementType())) {
// End pointer.
- llvm::Value *arrayEnd = Builder.CreateStructGEP(alloc,numInits, "arrayend");
- CGF.EmitStoreThroughLValue(RValue::get(arrayEnd), endOrLength);
- } else if(ctx.hasSameType(field->getType(), ctx.getSizeType())) {
+ llvm::Value *IdxEnd[] = { Zero, Size };
+ llvm::Value *ArrayEnd =
+ Builder.CreateInBoundsGEP(ArrayPtr, IdxEnd, "arrayend");
+ CGF.EmitStoreThroughLValue(RValue::get(ArrayEnd), EndOrLength);
+ } else if (Ctx.hasSameType(Field->getType(), Ctx.getSizeType())) {
// Length.
- CGF.EmitStoreThroughLValue(RValue::get(Builder.getInt(size)), endOrLength);
+ CGF.EmitStoreThroughLValue(RValue::get(Size), EndOrLength);
} else {
- CGF.ErrorUnsupported(initList, "weird std::initializer_list");
+ CGF.ErrorUnsupported(E, "weird std::initializer_list");
return;
}
-
- if (!Dest.isExternallyDestructed())
- EmitStdInitializerListCleanup(CGF, array, alloc, initList);
}
/// \brief Emit initialization of an array from an initializer list.
@@ -490,15 +430,8 @@ void AggExprEmitter::EmitArrayInit(llvm::Value *DestPtr, llvm::ArrayType *AType,
if (endOfInit) Builder.CreateStore(element, endOfInit);
}
- // If these are nested std::initializer_list inits, do them directly,
- // because they are conceptually the same "location".
- InitListExpr *initList = dyn_cast<InitListExpr>(E->getInit(i));
- if (initList && initList->initializesStdInitializerList()) {
- EmitStdInitializerList(element, initList);
- } else {
- LValue elementLV = CGF.MakeAddrLValue(element, elementType);
- EmitInitializationToLValue(E->getInit(i), elementLV);
- }
+ LValue elementLV = CGF.MakeAddrLValue(element, elementType);
+ EmitInitializationToLValue(E->getInit(i), elementLV);
}
// Check whether there's a non-trivial array-fill expression.
@@ -679,34 +612,33 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
}
// If we're converting an r-value of non-atomic type to an r-value
- // of atomic type, just make an atomic temporary, emit into that,
- // and then copy the value out. (FIXME: do we need to
- // zero-initialize it first?)
+ // of atomic type, just emit directly into the relevant sub-object.
if (isToAtomic) {
- ValueDestForAtomic valueDest(CGF, Dest, atomicType);
- CGF.EmitAggExpr(E->getSubExpr(), valueDest.getDest());
+ AggValueSlot valueDest = Dest;
+ if (!valueDest.isIgnored() && CGF.CGM.isPaddedAtomicType(atomicType)) {
+ // Zero-initialize. (Strictly speaking, we only need to intialize
+ // the padding at the end, but this is simpler.)
+ if (!Dest.isZeroed())
+ CGF.EmitNullInitialization(Dest.getAddr(), atomicType);
+
+ // Build a GEP to refer to the subobject.
+ llvm::Value *valueAddr =
+ CGF.Builder.CreateStructGEP(valueDest.getAddr(), 0);
+ valueDest = AggValueSlot::forAddr(valueAddr,
+ valueDest.getAlignment(),
+ valueDest.getQualifiers(),
+ valueDest.isExternallyDestructed(),
+ valueDest.requiresGCollection(),
+ valueDest.isPotentiallyAliased(),
+ AggValueSlot::IsZeroed);
+ }
+
+ CGF.EmitAggExpr(E->getSubExpr(), valueDest);
return;
}
// Otherwise, we're converting an atomic type to a non-atomic type.
-
- // If the dest is a value-of-atomic subobject, drill back out.
- if (Dest.isValueOfAtomic()) {
- AggValueSlot atomicSlot =
- AggValueSlot::forAddr(Dest.getPaddedAtomicAddr(),
- Dest.getAlignment(),
- Dest.getQualifiers(),
- Dest.isExternallyDestructed(),
- Dest.requiresGCollection(),
- Dest.isPotentiallyAliased(),
- Dest.isZeroed(),
- AggValueSlot::IsNotValueOfAtomic);
- CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
- return;
- }
-
- // Otherwise, make an atomic temporary, emit into that, and then
- // copy the value out.
+ // Make an atomic temporary, emit into that, and then copy the value out.
AggValueSlot atomicSlot =
CGF.CreateAggTemp(atomicType, "atomic-to-nonatomic.temp");
CGF.EmitAggExpr(E->getSubExpr(), atomicSlot);
@@ -988,7 +920,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
}
void AggExprEmitter::VisitChooseExpr(const ChooseExpr *CE) {
- Visit(CE->getChosenSubExpr(CGF.getContext()));
+ Visit(CE->getChosenSubExpr());
}
void AggExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
@@ -1078,7 +1010,7 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
void
-AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
+AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
QualType type = LV.getType();
// FIXME: Ignore result?
// FIXME: Are initializers affected by volatile?
@@ -1088,7 +1020,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr* E, LValue LV) {
} else if (isa<ImplicitValueInitExpr>(E) || isa<CXXScalarValueInitExpr>(E)) {
return EmitNullInitializationToLValue(LV);
} else if (type->isReferenceType()) {
- RValue RV = CGF.EmitReferenceBindingToExpr(E, /*InitializedDecl=*/0);
+ RValue RV = CGF.EmitReferenceBindingToExpr(E);
return CGF.EmitStoreThroughLValue(RV, LV);
}
@@ -1159,12 +1091,8 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
- if (E->initializesStdInitializerList()) {
- EmitStdInitializerList(Dest.getAddr(), E);
- return;
- }
-
AggValueSlot Dest = EnsureSlot(E->getType());
+
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddr(), E->getType(),
Dest.getAlignment());
@@ -1545,58 +1473,3 @@ void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
alignment.getQuantity(), isVolatile,
/*TBAATag=*/0, TBAAStructTag);
}
-
-void CodeGenFunction::MaybeEmitStdInitializerListCleanup(llvm::Value *loc,
- const Expr *init) {
- const ExprWithCleanups *cleanups = dyn_cast<ExprWithCleanups>(init);
- if (cleanups)
- init = cleanups->getSubExpr();
-
- if (isa<InitListExpr>(init) &&
- cast<InitListExpr>(init)->initializesStdInitializerList()) {
- // We initialized this std::initializer_list with an initializer list.
- // A backing array was created. Push a cleanup for it.
- EmitStdInitializerListCleanup(loc, cast<InitListExpr>(init));
- }
-}
-
-static void EmitRecursiveStdInitializerListCleanup(CodeGenFunction &CGF,
- llvm::Value *arrayStart,
- const InitListExpr *init) {
- // Check if there are any recursive cleanups to do, i.e. if we have
- // std::initializer_list<std::initializer_list<obj>> list = {{obj()}};
- // then we need to destroy the inner array as well.
- for (unsigned i = 0, e = init->getNumInits(); i != e; ++i) {
- const InitListExpr *subInit = dyn_cast<InitListExpr>(init->getInit(i));
- if (!subInit || !subInit->initializesStdInitializerList())
- continue;
-
- // This one needs to be destroyed. Get the address of the std::init_list.
- llvm::Value *offset = llvm::ConstantInt::get(CGF.SizeTy, i);
- llvm::Value *loc = CGF.Builder.CreateInBoundsGEP(arrayStart, offset,
- "std.initlist");
- CGF.EmitStdInitializerListCleanup(loc, subInit);
- }
-}
-
-void CodeGenFunction::EmitStdInitializerListCleanup(llvm::Value *loc,
- const InitListExpr *init) {
- ASTContext &ctx = getContext();
- QualType element = GetStdInitializerListElementType(init->getType());
- unsigned numInits = init->getNumInits();
- llvm::APInt size(ctx.getTypeSize(ctx.getSizeType()), numInits);
- QualType array =ctx.getConstantArrayType(element, size, ArrayType::Normal, 0);
- QualType arrayPtr = ctx.getPointerType(array);
- llvm::Type *arrayPtrType = ConvertType(arrayPtr);
-
- // lvalue is the location of a std::initializer_list, which as its first
- // element has a pointer to the array we want to destroy.
- llvm::Value *startPointer = Builder.CreateStructGEP(loc, 0, "startPointer");
- llvm::Value *startAddress = Builder.CreateLoad(startPointer, "startAddress");
-
- ::EmitRecursiveStdInitializerListCleanup(*this, startAddress, init);
-
- llvm::Value *arrayAddress =
- Builder.CreateBitCast(startAddress, arrayPtrType, "arrayAddress");
- ::EmitStdInitializerListCleanup(*this, array, arrayAddress, init);
-}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 83c8ace98cd4..cc7b24d5e83e 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -16,6 +16,7 @@
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/Support/CallSite.h"
@@ -62,99 +63,6 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
Callee, ReturnValue, Args, MD);
}
-// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
-// quite what we want.
-static const Expr *skipNoOpCastsAndParens(const Expr *E) {
- while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- continue;
- }
-
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_Extension) {
- E = UO->getSubExpr();
- continue;
- }
- }
- return E;
- }
-}
-
-/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
-/// expr can be devirtualized.
-static bool canDevirtualizeMemberFunctionCalls(ASTContext &Context,
- const Expr *Base,
- const CXXMethodDecl *MD) {
-
- // When building with -fapple-kext, all calls must go through the vtable since
- // the kernel linker can do runtime patching of vtables.
- if (Context.getLangOpts().AppleKext)
- return false;
-
- // If the most derived class is marked final, we know that no subclass can
- // override this member function and so we can devirtualize it. For example:
- //
- // struct A { virtual void f(); }
- // struct B final : A { };
- //
- // void f(B *b) {
- // b->f();
- // }
- //
- const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
- if (MostDerivedClassDecl->hasAttr<FinalAttr>())
- return true;
-
- // If the member function is marked 'final', we know that it can't be
- // overridden and can therefore devirtualize it.
- if (MD->hasAttr<FinalAttr>())
- return true;
-
- // Similarly, if the class itself is marked 'final' it can't be overridden
- // and we can therefore devirtualize the member function call.
- if (MD->getParent()->hasAttr<FinalAttr>())
- return true;
-
- Base = skipNoOpCastsAndParens(Base);
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- // This is a record decl. We know the type and can devirtualize it.
- return VD->getType()->isRecordType();
- }
-
- return false;
- }
-
- // We can devirtualize calls on an object accessed by a class member access
- // expression, since by C++11 [basic.life]p6 we know that it can't refer to
- // a derived class object constructed in the same location.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(Base))
- if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
- return VD->getType()->isRecordType();
-
- // We can always devirtualize calls on temporary object expressions.
- if (isa<CXXConstructExpr>(Base))
- return true;
-
- // And calls on bound temporaries.
- if (isa<CXXBindTemporaryExpr>(Base))
- return true;
-
- // Check if this is a call expr that returns a record type.
- if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
- return CE->getCallReturnType()->isRecordType();
-
- // We can't devirtualize the call.
- return false;
-}
-
static CXXRecordDecl *getCXXRecord(const Expr *E) {
QualType T = E->getType();
if (const PointerType *PTy = T->getAs<PointerType>())
@@ -175,22 +83,12 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
const MemberExpr *ME = cast<MemberExpr>(callee);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
- CGDebugInfo *DI = getDebugInfo();
- if (DI &&
- CGM.getCodeGenOpts().getDebugInfo() == CodeGenOptions::LimitedDebugInfo &&
- !isa<CallExpr>(ME->getBase())) {
- QualType PQTy = ME->getBase()->IgnoreParenImpCasts()->getType();
- if (const PointerType * PTy = dyn_cast<PointerType>(PQTy)) {
- DI->getOrCreateRecordType(PTy->getPointeeType(),
- MD->getParent()->getLocation());
- }
- }
-
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
return EmitCall(getContext().getPointerType(MD->getType()), Callee,
- ReturnValue, CE->arg_begin(), CE->arg_end());
+ CE->getLocStart(), ReturnValue, CE->arg_begin(),
+ CE->arg_end());
}
// Compute the object pointer.
@@ -198,8 +96,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
const CXXMethodDecl *DevirtualizedMethod = NULL;
- if (CanUseVirtualCall &&
- canDevirtualizeMemberFunctionCalls(getContext(), Base, MD)) {
+ if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
DevirtualizedMethod = MD->getCorrespondingMethodInClass(BestDynamicDecl);
assert(DevirtualizedMethod);
@@ -271,7 +168,7 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
else
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
- llvm::Type *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+ llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
@@ -280,34 +177,37 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
-
llvm::Value *Callee;
+
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
+ assert(CE->arg_begin() == CE->arg_end() &&
+ "Destructor shouldn't have explicit parameters");
+ assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
- assert(CE->arg_begin() == CE->arg_end() &&
- "Virtual destructor shouldn't have explicit parameters");
- return CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor,
- Dtor_Complete,
- CE->getExprLoc(),
- ReturnValue, This);
+ CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
+ CE->getExprLoc(), This);
} else {
if (getLangOpts().AppleKext &&
MD->isVirtual() &&
ME->hasQualifier())
Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
else if (!DevirtualizedMethod)
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Dtor, Dtor_Complete), Ty);
+ Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
}
+ EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
+ /*ImplicitParam=*/0, QualType(), 0, 0);
}
- } else if (const CXXConstructorDecl *Ctor =
- dyn_cast<CXXConstructorDecl>(MD)) {
+ return RValue::get(0);
+ }
+
+ if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
} else if (UseVirtualCall) {
- Callee = BuildVirtualCall(MD, This, Ty);
+ Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
} else {
if (getLangOpts().AppleKext &&
MD->isVirtual() &&
@@ -320,6 +220,9 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
}
}
+ if (MD->isVirtual())
+ This = CGM.getCXXABI().adjustThisArgumentForVirtualCall(*this, MD, This);
+
return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
/*ImplicitParam=*/0, QualType(),
CE->arg_begin(), CE->arg_end());
@@ -371,8 +274,8 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
- return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required), Callee,
- ReturnValue, Args);
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
+ Callee, ReturnValue, Args);
}
RValue
@@ -540,8 +443,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
assert(!getContext().getAsConstantArrayType(E->getType())
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
- EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src,
- E->arg_begin(), E->arg_end());
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
}
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
@@ -818,7 +720,7 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
QualType AllocType, llvm::Value *NewPtr) {
-
+ // FIXME: Refactor with EmitExprAsInit.
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
@@ -838,8 +740,6 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased);
CGF.EmitAggExpr(Init, Slot);
-
- CGF.MaybeEmitStdInitializerListCleanup(NewPtr, Init);
return;
}
}
@@ -866,10 +766,22 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
QualType::DestructionKind dtorKind = elementType.isDestructedType();
EHScopeStack::stable_iterator cleanup;
llvm::Instruction *cleanupDominator = 0;
+
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
initializerElements = ILE->getNumInits();
+ // If this is a multi-dimensional array new, we will initialize multiple
+ // elements with each init list element.
+ QualType AllocType = E->getAllocatedType();
+ if (const ConstantArrayType *CAT = dyn_cast_or_null<ConstantArrayType>(
+ AllocType->getAsArrayTypeUnsafe())) {
+ unsigned AS = explicitPtr->getType()->getPointerAddressSpace();
+ llvm::Type *AllocPtrTy = ConvertTypeForMem(AllocType)->getPointerTo(AS);
+ explicitPtr = Builder.CreateBitCast(explicitPtr, AllocPtrTy);
+ initializerElements *= getContext().getConstantArrayElementCount(CAT);
+ }
+
// Enter a partial-destruction cleanup if necessary.
if (needsEHCleanup(dtorKind)) {
// In principle we could tell the cleanup where we are more
@@ -888,12 +800,16 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
// element. TODO: some of these stores can be trivially
// observed to be unnecessary.
if (endOfInit) Builder.CreateStore(explicitPtr, endOfInit);
- StoreAnyExprIntoOneUnit(*this, ILE->getInit(i), elementType, explicitPtr);
- explicitPtr =Builder.CreateConstGEP1_32(explicitPtr, 1, "array.exp.next");
+ StoreAnyExprIntoOneUnit(*this, ILE->getInit(i),
+ ILE->getInit(i)->getType(), explicitPtr);
+ explicitPtr = Builder.CreateConstGEP1_32(explicitPtr, 1,
+ "array.exp.next");
}
// The remaining elements are filled with the array filler expression.
Init = ILE->getArrayFiller();
+
+ explicitPtr = Builder.CreateBitCast(explicitPtr, beginPtr->getType());
}
// Create the continuation block.
@@ -1012,6 +928,41 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
StoreAnyExprIntoOneUnit(CGF, Init, E->getAllocatedType(), NewPtr);
}
+/// Emit a call to an operator new or operator delete function, as implicitly
+/// created by new-expressions and delete-expressions.
+static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
+ const FunctionDecl *Callee,
+ const FunctionProtoType *CalleeType,
+ const CallArgList &Args) {
+ llvm::Instruction *CallOrInvoke;
+ llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
+ RValue RV =
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
+ CalleeAddr, ReturnValueSlot(), Args,
+ Callee, &CallOrInvoke);
+
+ /// C++1y [expr.new]p10:
+ /// [In a new-expression,] an implementation is allowed to omit a call
+ /// to a replaceable global allocation function.
+ ///
+ /// We model such elidable calls with the 'builtin' attribute.
+ llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
+ if (Callee->isReplaceableGlobalAllocationFunction() &&
+ Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
+ // FIXME: Add addAttribute to CallSite.
+ if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
+ CI->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::Builtin);
+ else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
+ II->addAttribute(llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::Builtin);
+ else
+ llvm_unreachable("unexpected kind of call instruction");
+ }
+
+ return RV;
+}
+
namespace {
/// A cleanup to call the given 'operator delete' function upon
/// abnormal exit from a new expression.
@@ -1061,9 +1012,7 @@ namespace {
DeleteArgs.add(getPlacementArgs()[I], *AI++);
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
- CGF.CGM.GetAddrOfFunction(OperatorDelete),
- ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
}
};
@@ -1122,9 +1071,7 @@ namespace {
}
// Call 'operator delete'.
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, FPT),
- CGF.CGM.GetAddrOfFunction(OperatorDelete),
- ReturnValueSlot(), DeleteArgs, OperatorDelete);
+ EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
}
};
}
@@ -1237,10 +1184,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// TODO: kill any unnecessary computations done for the size
// argument.
} else {
- RV = EmitCall(CGM.getTypes().arrangeFreeFunctionCall(allocatorArgs,
- allocatorType),
- CGM.GetAddrOfFunction(allocator), ReturnValueSlot(),
- allocatorArgs, allocator);
+ RV = EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
}
// Emit a null check on the allocation result if the allocation
@@ -1360,9 +1304,7 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
DeleteArgs.add(RValue::get(Size), SizeTy);
// Emit the call to delete.
- EmitCall(CGM.getTypes().arrangeFreeFunctionCall(DeleteArgs, DeleteFTy),
- CGM.GetAddrOfFunction(DeleteFD), ReturnValueSlot(),
- DeleteArgs, DeleteFD);
+ EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
}
namespace {
@@ -1415,8 +1357,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
// FIXME: Provide a source location here.
CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
- SourceLocation(),
- ReturnValueSlot(), Ptr);
+ SourceLocation(), Ptr);
if (UseGlobalDelete) {
CGF.PopCleanupBlock();
@@ -1519,9 +1460,7 @@ namespace {
}
// Emit the call to delete.
- CGF.EmitCall(CGF.getTypes().arrangeFreeFunctionCall(Args, DeleteFTy),
- CGF.CGM.GetAddrOfFunction(OperatorDelete),
- ReturnValueSlot(), Args, OperatorDelete);
+ EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
}
};
}
@@ -1667,8 +1606,8 @@ llvm::Value *CodeGenFunction::EmitCXXTypeidExpr(const CXXTypeidExpr *E) {
ConvertType(E->getType())->getPointerTo();
if (E->isTypeOperand()) {
- llvm::Constant *TypeInfo =
- CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand());
+ llvm::Constant *TypeInfo =
+ CGM.GetAddrOfRTTIDescriptor(E->getTypeOperand(getContext()));
return Builder.CreateBitCast(TypeInfo, StdTypeInfoPtrTy);
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 36f974a31329..73d5bcb13a95 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -18,6 +18,7 @@
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
+#include <algorithm>
using namespace clang;
using namespace CodeGen;
@@ -69,10 +70,10 @@ public:
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
ComplexPairTy EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(CGF.EmitLValue(E));
+ return EmitLoadOfLValue(CGF.EmitLValue(E), E->getExprLoc());
}
- ComplexPairTy EmitLoadOfLValue(LValue LV);
+ ComplexPairTy EmitLoadOfLValue(LValue LV, SourceLocation Loc);
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
@@ -81,6 +82,9 @@ public:
/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
ComplexPairTy EmitComplexToComplexCast(ComplexPairTy Val, QualType SrcType,
QualType DestType);
+ /// EmitComplexToComplexCast - Emit a cast from scalar value Val to DestType.
+ ComplexPairTy EmitScalarToComplexCast(llvm::Value *Val, QualType SrcType,
+ QualType DestType);
//===--------------------------------------------------------------------===//
// Visitor Methods
@@ -109,11 +113,12 @@ public:
ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
if (result.isReference())
- return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E),
+ E->getExprLoc());
- llvm::ConstantStruct *pair =
- cast<llvm::ConstantStruct>(result.getValue());
- return ComplexPairTy(pair->getOperand(0), pair->getOperand(1));
+ llvm::Constant *pair = result.getValue();
+ return ComplexPairTy(pair->getAggregateElement(0U),
+ pair->getAggregateElement(1U));
}
return EmitLoadOfLValue(E);
}
@@ -127,7 +132,7 @@ public:
ComplexPairTy VisitMemberExpr(const Expr *E) { return EmitLoadOfLValue(E); }
ComplexPairTy VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
- return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
return CGF.getOpaqueRValueMapping(E).getComplexVal();
}
@@ -215,7 +220,7 @@ public:
LValue EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &),
- ComplexPairTy &Val);
+ RValue &Val);
ComplexPairTy EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)
(const BinOpInfo &));
@@ -287,26 +292,34 @@ public:
/// EmitLoadOfLValue - Given an RValue reference for a complex, emit code to
/// load the real and imaginary pieces, returning them as Real/Imag.
-ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue) {
+ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
+ SourceLocation loc) {
assert(lvalue.isSimple() && "non-simple complex l-value?");
if (lvalue.getType()->isAtomicType())
- return CGF.EmitAtomicLoad(lvalue).getComplexVal();
+ return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
llvm::Value *SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
+ unsigned AlignR = lvalue.getAlignment().getQuantity();
+ ASTContext &C = CGF.getContext();
+ QualType ComplexTy = lvalue.getType();
+ unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
+ unsigned AlignI = std::min(AlignR, ComplexAlign);
llvm::Value *Real=0, *Imag=0;
if (!IgnoreReal || isVolatile) {
llvm::Value *RealP = Builder.CreateStructGEP(SrcPtr, 0,
SrcPtr->getName() + ".realp");
- Real = Builder.CreateLoad(RealP, isVolatile, SrcPtr->getName() + ".real");
+ Real = Builder.CreateAlignedLoad(RealP, AlignR, isVolatile,
+ SrcPtr->getName() + ".real");
}
if (!IgnoreImag || isVolatile) {
llvm::Value *ImagP = Builder.CreateStructGEP(SrcPtr, 1,
SrcPtr->getName() + ".imagp");
- Imag = Builder.CreateLoad(ImagP, isVolatile, SrcPtr->getName() + ".imag");
+ Imag = Builder.CreateAlignedLoad(ImagP, AlignI, isVolatile,
+ SrcPtr->getName() + ".imag");
}
return ComplexPairTy(Real, Imag);
}
@@ -322,10 +335,16 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val,
llvm::Value *Ptr = lvalue.getAddress();
llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, "real");
llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, "imag");
-
- // TODO: alignment
- Builder.CreateStore(Val.first, RealPtr, lvalue.isVolatileQualified());
- Builder.CreateStore(Val.second, ImagPtr, lvalue.isVolatileQualified());
+ unsigned AlignR = lvalue.getAlignment().getQuantity();
+ ASTContext &C = CGF.getContext();
+ QualType ComplexTy = lvalue.getType();
+ unsigned ComplexAlign = C.getTypeAlignInChars(ComplexTy).getQuantity();
+ unsigned AlignI = std::min(AlignR, ComplexAlign);
+
+ Builder.CreateAlignedStore(Val.first, RealPtr, AlignR,
+ lvalue.isVolatileQualified());
+ Builder.CreateAlignedStore(Val.second, ImagPtr, AlignI,
+ lvalue.isVolatileQualified());
}
@@ -358,7 +377,10 @@ ComplexPairTy ComplexExprEmitter::VisitCallExpr(const CallExpr *E) {
ComplexPairTy ComplexExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
- return CGF.EmitCompoundStmt(*E->getSubStmt(), true).getComplexVal();
+ llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(), true);
+ assert(RetAlloca && "Expected complex return value");
+ return EmitLoadOfLValue(CGF.MakeAddrLValue(RetAlloca, E->getType()),
+ E->getExprLoc());
}
/// EmitComplexToComplexCast - Emit a cast from complex value Val to DestType.
@@ -377,6 +399,17 @@ ComplexPairTy ComplexExprEmitter::EmitComplexToComplexCast(ComplexPairTy Val,
return Val;
}
+ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
+ QualType SrcType,
+ QualType DestType) {
+ // Convert the input element to the element type of the complex.
+ DestType = DestType->castAs<ComplexType>()->getElementType();
+ Val = CGF.EmitScalarConversion(Val, SrcType, DestType);
+
+ // Return (realval, 0).
+ return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
+}
+
ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
QualType DestTy) {
switch (CK) {
@@ -397,7 +430,8 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
V = Builder.CreateBitCast(V,
CGF.ConvertType(CGF.getContext().getPointerType(DestTy)));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy,
- origLV.getAlignment()));
+ origLV.getAlignment()),
+ Op->getExprLoc());
}
case CK_BitCast:
@@ -444,16 +478,9 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
- case CK_IntegralRealToComplex: {
- llvm::Value *Elt = CGF.EmitScalarExpr(Op);
-
- // Convert the input element to the element type of the complex.
- DestTy = DestTy->castAs<ComplexType>()->getElementType();
- Elt = CGF.EmitScalarConversion(Elt, Op->getType(), DestTy);
-
- // Return (realval, 0).
- return ComplexPairTy(Elt, llvm::Constant::getNullValue(Elt->getType()));
- }
+ case CK_IntegralRealToComplex:
+ return EmitScalarToComplexCast(CGF.EmitScalarExpr(Op),
+ Op->getType(), DestTy);
case CK_FloatingComplexCast:
case CK_FloatingComplexToIntegralComplex:
@@ -608,7 +635,7 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
LValue ComplexExprEmitter::
EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&),
- ComplexPairTy &Val) {
+ RValue &Val) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
QualType LHSTy = E->getLHS()->getType();
@@ -628,20 +655,29 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
LValue LHS = CGF.EmitLValue(E->getLHS());
- // Load from the l-value.
- ComplexPairTy LHSComplexPair = EmitLoadOfLValue(LHS);
-
- OpInfo.LHS = EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
+ // Load from the l-value and convert it.
+ if (LHSTy->isAnyComplexType()) {
+ ComplexPairTy LHSVal = EmitLoadOfLValue(LHS, E->getExprLoc());
+ OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ } else {
+ llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
+ OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ }
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
- // Truncate the result back to the LHS type.
- Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
- Val = Result;
-
- // Store the result value into the LHS lvalue.
- EmitStoreOfComplex(Result, LHS, /*isInit*/ false);
+ // Truncate the result and store it into the LHS lvalue.
+ if (LHSTy->isAnyComplexType()) {
+ ComplexPairTy ResVal = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
+ EmitStoreOfComplex(ResVal, LHS, /*isInit*/ false);
+ Val = RValue::getComplex(ResVal);
+ } else {
+ llvm::Value *ResVal =
+ CGF.EmitComplexToScalarConversion(Result, OpInfo.Ty, LHSTy);
+ CGF.EmitStoreOfScalar(ResVal, LHS, /*isInit*/ false);
+ Val = RValue::get(ResVal);
+ }
return LHS;
}
@@ -650,18 +686,18 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
ComplexPairTy ComplexExprEmitter::
EmitCompoundAssign(const CompoundAssignOperator *E,
ComplexPairTy (ComplexExprEmitter::*Func)(const BinOpInfo&)){
- ComplexPairTy Val;
+ RValue Val;
LValue LV = EmitCompoundAssignLValue(E, Func, Val);
// The result of an assignment in C is the assigned r-value.
if (!CGF.getLangOpts().CPlusPlus)
- return Val;
+ return Val.getComplexVal();
// If the lvalue is non-volatile, return the computed value of the assignment.
if (!LV.isVolatileQualified())
- return Val;
+ return Val.getComplexVal();
- return EmitLoadOfLValue(LV);
+ return EmitLoadOfLValue(LV, E->getExprLoc());
}
LValue ComplexExprEmitter::EmitBinAssignLValue(const BinaryOperator *E,
@@ -696,7 +732,7 @@ ComplexPairTy ComplexExprEmitter::VisitBinAssign(const BinaryOperator *E) {
if (!LV.isVolatileQualified())
return Val;
- return EmitLoadOfLValue(LV);
+ return EmitLoadOfLValue(LV, E->getExprLoc());
}
ComplexPairTy ComplexExprEmitter::VisitBinComma(const BinaryOperator *E) {
@@ -746,7 +782,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
}
ComplexPairTy ComplexExprEmitter::VisitChooseExpr(ChooseExpr *E) {
- return Visit(E->getChosenSubExpr(CGF.getContext()));
+ return Visit(E->getChosenSubExpr());
}
ComplexPairTy ComplexExprEmitter::VisitInitListExpr(InitListExpr *E) {
@@ -785,8 +821,8 @@ ComplexPairTy ComplexExprEmitter::VisitVAArgExpr(VAArgExpr *E) {
return ComplexPairTy(U, U);
}
- return EmitLoadOfLValue(
- CGF.MakeNaturalAlignAddrLValue(ArgPtr, E->getType()));
+ return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(ArgPtr, E->getType()),
+ E->getExprLoc());
}
//===----------------------------------------------------------------------===//
@@ -820,8 +856,9 @@ void CodeGenFunction::EmitStoreOfComplex(ComplexPairTy V, LValue dest,
}
/// EmitLoadOfComplex - Load a complex number from the specified address.
-ComplexPairTy CodeGenFunction::EmitLoadOfComplex(LValue src) {
- return ComplexExprEmitter(*this).EmitLoadOfLValue(src);
+ComplexPairTy CodeGenFunction::EmitLoadOfComplex(LValue src,
+ SourceLocation loc) {
+ return ComplexExprEmitter(*this).EmitLoadOfLValue(src, loc);
}
LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
@@ -830,19 +867,33 @@ LValue CodeGenFunction::EmitComplexAssignmentLValue(const BinaryOperator *E) {
return ComplexExprEmitter(*this).EmitBinAssignLValue(E, Val);
}
-LValue CodeGenFunction::
-EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
- ComplexPairTy(ComplexExprEmitter::*Op)(const ComplexExprEmitter::BinOpInfo &);
- switch (E->getOpcode()) {
- case BO_MulAssign: Op = &ComplexExprEmitter::EmitBinMul; break;
- case BO_DivAssign: Op = &ComplexExprEmitter::EmitBinDiv; break;
- case BO_SubAssign: Op = &ComplexExprEmitter::EmitBinSub; break;
- case BO_AddAssign: Op = &ComplexExprEmitter::EmitBinAdd; break;
+typedef ComplexPairTy (ComplexExprEmitter::*CompoundFunc)(
+ const ComplexExprEmitter::BinOpInfo &);
+static CompoundFunc getComplexOp(BinaryOperatorKind Op) {
+ switch (Op) {
+ case BO_MulAssign: return &ComplexExprEmitter::EmitBinMul;
+ case BO_DivAssign: return &ComplexExprEmitter::EmitBinDiv;
+ case BO_SubAssign: return &ComplexExprEmitter::EmitBinSub;
+ case BO_AddAssign: return &ComplexExprEmitter::EmitBinAdd;
default:
llvm_unreachable("unexpected complex compound assignment");
}
+}
- ComplexPairTy Val; // ignored
+LValue CodeGenFunction::
+EmitComplexCompoundAssignmentLValue(const CompoundAssignOperator *E) {
+ CompoundFunc Op = getComplexOp(E->getOpcode());
+ RValue Val;
return ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
}
+
+LValue CodeGenFunction::
+EmitScalarCompooundAssignWithComplex(const CompoundAssignOperator *E,
+ llvm::Value *&Result) {
+ CompoundFunc Op = getComplexOp(E->getOpcode());
+ RValue Val;
+ LValue Ret = ComplexExprEmitter(*this).EmitCompoundAssignLValue(E, Op, Val);
+ Result = Val.getScalarVal();
+ return Ret;
+}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index f5c8187c26f4..f4d6861c8b8b 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -53,9 +53,6 @@ private:
NextFieldOffsetInChars(CharUnits::Zero()),
LLVMStructAlignment(CharUnits::One()) { }
- void AppendVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
- const CXXRecordDecl *VTableClass);
-
void AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitExpr);
@@ -72,8 +69,7 @@ private:
bool Build(InitListExpr *ILE);
void Build(const APValue &Val, const RecordDecl *RD, bool IsPrimaryBase,
- llvm::Constant *VTable, const CXXRecordDecl *VTableClass,
- CharUnits BaseOffset);
+ const CXXRecordDecl *VTableClass, CharUnits BaseOffset);
llvm::Constant *Finalize(QualType Ty);
CharUnits getAlignment(const llvm::Constant *C) const {
@@ -88,23 +84,6 @@ private:
}
};
-void ConstStructBuilder::AppendVTablePointer(BaseSubobject Base,
- llvm::Constant *VTable,
- const CXXRecordDecl *VTableClass) {
- // Find the appropriate vtable within the vtable group.
- uint64_t AddressPoint =
- CGM.getVTableContext().getVTableLayout(VTableClass).getAddressPoint(Base);
- llvm::Value *Indices[] = {
- llvm::ConstantInt::get(CGM.Int64Ty, 0),
- llvm::ConstantInt::get(CGM.Int64Ty, AddressPoint)
- };
- llvm::Constant *VTableAddressPoint =
- llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Indices);
-
- // Add the vtable at the start of the object.
- AppendBytes(Base.getBaseOffset(), VTableAddressPoint);
-}
-
void ConstStructBuilder::
AppendField(const FieldDecl *Field, uint64_t FieldOffset,
llvm::Constant *InitCst) {
@@ -368,40 +347,21 @@ void ConstStructBuilder::ConvertStructToPacked() {
}
bool ConstStructBuilder::Build(InitListExpr *ILE) {
- if (ILE->initializesStdInitializerList()) {
- //CGM.ErrorUnsupported(ILE, "global std::initializer_list");
- return false;
- }
-
RecordDecl *RD = ILE->getType()->getAs<RecordType>()->getDecl();
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
unsigned FieldNo = 0;
unsigned ElementNo = 0;
- const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->isMsStruct(CGM.getContext());
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are
- // ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
- --FieldNo;
- continue;
- }
- LastFD = *Field;
- }
-
// If this is a union, skip all the fields that aren't being initialized.
if (RD->isUnion() && ILE->getInitializedFieldInUnion() != *Field)
continue;
// Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield()) {
- LastFD = *Field;
+ if (Field->isUnnamedBitfield())
continue;
- }
// Get the initializer. A struct can include fields without initializers,
// we just use explicit null values for them.
@@ -443,15 +403,19 @@ struct BaseInfo {
}
void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
- bool IsPrimaryBase, llvm::Constant *VTable,
+ bool IsPrimaryBase,
const CXXRecordDecl *VTableClass,
CharUnits Offset) {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD)) {
// Add a vtable pointer, if we need one and it hasn't already been added.
- if (CD->isDynamicClass() && !IsPrimaryBase)
- AppendVTablePointer(BaseSubobject(CD, Offset), VTable, VTableClass);
+ if (CD->isDynamicClass() && !IsPrimaryBase) {
+ llvm::Constant *VTableAddressPoint =
+ CGM.getCXXABI().getVTableAddressPointForConstExpr(
+ BaseSubobject(CD, Offset), VTableClass);
+ AppendBytes(Offset, VTableAddressPoint);
+ }
// Accumulate and sort bases, in order to visit them in address order, which
// may not be the same as declaration order.
@@ -472,36 +436,22 @@ void ConstStructBuilder::Build(const APValue &Val, const RecordDecl *RD,
bool IsPrimaryBase = Layout.getPrimaryBase() == Base.Decl;
Build(Val.getStructBase(Base.Index), Base.Decl, IsPrimaryBase,
- VTable, VTableClass, Offset + Base.Offset);
+ VTableClass, Offset + Base.Offset);
}
}
unsigned FieldNo = 0;
- const FieldDecl *LastFD = 0;
- bool IsMsStruct = RD->isMsStruct(CGM.getContext());
uint64_t OffsetBits = CGM.getContext().toBits(Offset);
for (RecordDecl::field_iterator Field = RD->field_begin(),
FieldEnd = RD->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are
- // ignored:
- if (CGM.getContext().ZeroBitfieldFollowsNonBitfield(*Field, LastFD)) {
- --FieldNo;
- continue;
- }
- LastFD = *Field;
- }
-
// If this is a union, skip all the fields that aren't being initialized.
if (RD->isUnion() && Val.getUnionField() != *Field)
continue;
// Don't emit anonymous bitfields, they just affect layout.
- if (Field->isUnnamedBitfield()) {
- LastFD = *Field;
+ if (Field->isUnnamedBitfield())
continue;
- }
// Emit the value of the initializer.
const APValue &FieldValue =
@@ -593,11 +543,7 @@ llvm::Constant *ConstStructBuilder::BuildStruct(CodeGenModule &CGM,
const RecordDecl *RD = ValTy->castAs<RecordType>()->getDecl();
const CXXRecordDecl *CD = dyn_cast<CXXRecordDecl>(RD);
- llvm::Constant *VTable = 0;
- if (CD && CD->isDynamicClass())
- VTable = CGM.getVTables().GetAddrOfVTable(CD);
-
- Builder.Build(Val, RD, false, VTable, CD, CharUnits::Zero());
+ Builder.Build(Val, RD, false, CD, CharUnits::Zero());
return Builder.Finalize(ValTy);
}
@@ -642,6 +588,10 @@ public:
return Visit(GE->getResultExpr());
}
+ llvm::Constant *VisitChooseExpr(ChooseExpr *CE) {
+ return Visit(CE->getChosenSubExpr());
+ }
+
llvm::Constant *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
return Visit(E->getInitializer());
}
@@ -687,6 +637,7 @@ public:
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
case CK_NoOp:
+ case CK_ConstructorConversion:
return C;
case CK_Dependent: llvm_unreachable("saw dependent cast!");
@@ -716,7 +667,6 @@ public:
case CK_LValueBitCast:
case CK_NullToMemberPointer:
case CK_UserDefinedConversion:
- case CK_ConstructorConversion:
case CK_CPointerToObjCPointerCast:
case CK_BlockPointerToObjCPointerCast:
case CK_AnyPointerToBlockPointerCast:
@@ -995,7 +945,7 @@ public:
CXXTypeidExpr *Typeid = cast<CXXTypeidExpr>(E);
QualType T;
if (Typeid->isTypeOperand())
- T = Typeid->getTypeOperand();
+ T = Typeid->getTypeOperand(CGM.getContext());
else
T = Typeid->getExprOperand()->getType();
return CGM.GetAddrOfRTTIDescriptor(T);
@@ -1003,6 +953,15 @@ public:
case Expr::CXXUuidofExprClass: {
return CGM.GetAddrOfUuidDescriptor(cast<CXXUuidofExpr>(E));
}
+ case Expr::MaterializeTemporaryExprClass: {
+ MaterializeTemporaryExpr *MTE = cast<MaterializeTemporaryExpr>(E);
+ assert(MTE->getStorageDuration() == SD_Static);
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *Inner = MTE->GetTemporaryExpr()
+ ->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+ return CGM.GetAddrOfGlobalTemporary(MTE, Inner);
+ }
}
return 0;
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index c1c252d12b76..f3a5387c58d3 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -87,15 +87,16 @@ public:
void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
- Value *EmitLoadOfLValue(LValue LV) {
- return CGF.EmitLoadOfLValue(LV).getScalarVal();
+ Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
+ return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
}
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load));
+ return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
+ E->getExprLoc());
}
/// EmitConversionToBool - Convert the specified expression value to a
@@ -161,18 +162,18 @@ public:
Value *Visit(Expr *E) {
return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
}
-
+
Value *VisitStmt(Stmt *S) {
S->dump(CGF.getContext().getSourceManager());
llvm_unreachable("Stmt can't have complex result type!");
}
Value *VisitExpr(Expr *S);
-
+
Value *VisitParenExpr(ParenExpr *PE) {
- return Visit(PE->getSubExpr());
+ return Visit(PE->getSubExpr());
}
Value *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
- return Visit(E->getReplacement());
+ return Visit(E->getReplacement());
}
Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
@@ -217,7 +218,7 @@ public:
Value *VisitOpaqueValueExpr(OpaqueValueExpr *E) {
if (E->isGLValue())
- return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E));
+ return EmitLoadOfLValue(CGF.getOpaqueLValueMapping(E), E->getExprLoc());
// Otherwise, assume the mapping is the scalar directly.
return CGF.getOpaqueRValueMapping(E).getScalarVal();
@@ -227,7 +228,8 @@ public:
Value *VisitDeclRefExpr(DeclRefExpr *E) {
if (CodeGenFunction::ConstantEmission result = CGF.tryEmitAsConstant(E)) {
if (result.isReference())
- return EmitLoadOfLValue(result.getReferenceLValue(CGF, E));
+ return EmitLoadOfLValue(result.getReferenceLValue(CGF, E),
+ E->getExprLoc());
return result.getValue();
}
return EmitLoadOfLValue(E);
@@ -243,7 +245,7 @@ public:
return EmitLoadOfLValue(E);
}
Value *VisitObjCMessageExpr(ObjCMessageExpr *E) {
- if (E->getMethodDecl() &&
+ if (E->getMethodDecl() &&
E->getMethodDecl()->getResultType()->isReferenceType())
return EmitLoadOfLValue(E);
return CGF.EmitObjCMessageExpr(E).getScalarVal();
@@ -251,12 +253,13 @@ public:
Value *VisitObjCIsaExpr(ObjCIsaExpr *E) {
LValue LV = CGF.EmitObjCIsaExpr(E);
- Value *V = CGF.EmitLoadOfLValue(LV).getScalarVal();
+ Value *V = CGF.EmitLoadOfLValue(LV, E->getExprLoc()).getScalarVal();
return V;
}
Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
+ Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
Value *VisitMemberExpr(MemberExpr *E);
Value *VisitExtVectorElementExpr(Expr *E) { return EmitLoadOfLValue(E); }
Value *VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
@@ -310,7 +313,7 @@ public:
llvm::Value *EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre);
-
+
Value *VisitUnaryAddrOf(const UnaryOperator *E) {
if (isa<MemberPointerType>(E->getType())) // never sugared
return CGF.CGM.getMemberPointerConstant(E);
@@ -335,12 +338,12 @@ public:
Value *VisitUnaryExtension(const UnaryOperator *E) {
return Visit(E->getSubExpr());
}
-
+
// C++
Value *VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E) {
return EmitLoadOfLValue(E);
}
-
+
Value *VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
return Visit(DAE->getExpr());
}
@@ -430,7 +433,7 @@ public:
Value *EmitOverflowCheckedBinOp(const BinOpInfo &Ops);
// Check for undefined division and modulus behaviors.
- void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
+ void EmitUndefinedBehaviorIntegerDivAndRemCheck(const BinOpInfo &Ops,
llvm::Value *Zero,bool isDiv);
// Common helper for getting how wide LHS of shift is.
static Value *GetWidthMinusOneValue(Value* LHS,Value* RHS);
@@ -893,51 +896,43 @@ Value *ScalarExprEmitter::VisitExpr(Expr *E) {
Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// Vector Mask Case
- if (E->getNumSubExprs() == 2 ||
+ if (E->getNumSubExprs() == 2 ||
(E->getNumSubExprs() == 3 && E->getExpr(2)->getType()->isVectorType())) {
Value *LHS = CGF.EmitScalarExpr(E->getExpr(0));
Value *RHS = CGF.EmitScalarExpr(E->getExpr(1));
Value *Mask;
-
+
llvm::VectorType *LTy = cast<llvm::VectorType>(LHS->getType());
unsigned LHSElts = LTy->getNumElements();
if (E->getNumSubExprs() == 3) {
Mask = CGF.EmitScalarExpr(E->getExpr(2));
-
+
// Shuffle LHS & RHS into one input vector.
SmallVector<llvm::Constant*, 32> concat;
for (unsigned i = 0; i != LHSElts; ++i) {
concat.push_back(Builder.getInt32(2*i));
concat.push_back(Builder.getInt32(2*i+1));
}
-
+
Value* CV = llvm::ConstantVector::get(concat);
LHS = Builder.CreateShuffleVector(LHS, RHS, CV, "concat");
LHSElts *= 2;
} else {
Mask = RHS;
}
-
+
llvm::VectorType *MTy = cast<llvm::VectorType>(Mask->getType());
llvm::Constant* EltMask;
-
- // Treat vec3 like vec4.
- if ((LHSElts == 6) && (E->getNumSubExprs() == 3))
- EltMask = llvm::ConstantInt::get(MTy->getElementType(),
- (1 << llvm::Log2_32(LHSElts+2))-1);
- else if ((LHSElts == 3) && (E->getNumSubExprs() == 2))
- EltMask = llvm::ConstantInt::get(MTy->getElementType(),
- (1 << llvm::Log2_32(LHSElts+1))-1);
- else
- EltMask = llvm::ConstantInt::get(MTy->getElementType(),
- (1 << llvm::Log2_32(LHSElts))-1);
-
+
+ EltMask = llvm::ConstantInt::get(MTy->getElementType(),
+ llvm::NextPowerOf2(LHSElts-1)-1);
+
// Mask off the high bits of each shuffle index.
Value *MaskBits = llvm::ConstantVector::getSplat(MTy->getNumElements(),
EltMask);
Mask = Builder.CreateAnd(Mask, MaskBits, "mask");
-
+
// newv = undef
// mask = mask & maskbits
// for each elt
@@ -945,43 +940,110 @@ Value *ScalarExprEmitter::VisitShuffleVectorExpr(ShuffleVectorExpr *E) {
// x = extract val n
// newv = insert newv, x, i
llvm::VectorType *RTy = llvm::VectorType::get(LTy->getElementType(),
- MTy->getNumElements());
+ MTy->getNumElements());
Value* NewV = llvm::UndefValue::get(RTy);
for (unsigned i = 0, e = MTy->getNumElements(); i != e; ++i) {
Value *IIndx = Builder.getInt32(i);
Value *Indx = Builder.CreateExtractElement(Mask, IIndx, "shuf_idx");
Indx = Builder.CreateZExt(Indx, CGF.Int32Ty, "idx_zext");
-
- // Handle vec3 special since the index will be off by one for the RHS.
- if ((LHSElts == 6) && (E->getNumSubExprs() == 3)) {
- Value *cmpIndx, *newIndx;
- cmpIndx = Builder.CreateICmpUGT(Indx, Builder.getInt32(3),
- "cmp_shuf_idx");
- newIndx = Builder.CreateSub(Indx, Builder.getInt32(1), "shuf_idx_adj");
- Indx = Builder.CreateSelect(cmpIndx, newIndx, Indx, "sel_shuf_idx");
- }
+
Value *VExt = Builder.CreateExtractElement(LHS, Indx, "shuf_elt");
NewV = Builder.CreateInsertElement(NewV, VExt, IIndx, "shuf_ins");
}
return NewV;
}
-
+
Value* V1 = CGF.EmitScalarExpr(E->getExpr(0));
Value* V2 = CGF.EmitScalarExpr(E->getExpr(1));
-
- // Handle vec3 special since the index will be off by one for the RHS.
- llvm::VectorType *VTy = cast<llvm::VectorType>(V1->getType());
+
SmallVector<llvm::Constant*, 32> indices;
- for (unsigned i = 2; i < E->getNumSubExprs(); i++) {
- unsigned Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
- if (VTy->getNumElements() == 3 && Idx > 3)
- Idx -= 1;
- indices.push_back(Builder.getInt32(Idx));
+ for (unsigned i = 2; i < E->getNumSubExprs(); ++i) {
+ llvm::APSInt Idx = E->getShuffleMaskIdx(CGF.getContext(), i-2);
+ // Check for -1 and output it as undef in the IR.
+ if (Idx.isSigned() && Idx.isAllOnesValue())
+ indices.push_back(llvm::UndefValue::get(CGF.Int32Ty));
+ else
+ indices.push_back(Builder.getInt32(Idx.getZExtValue()));
}
Value *SV = llvm::ConstantVector::get(indices);
return Builder.CreateShuffleVector(V1, V2, SV, "shuffle");
}
+
+Value *ScalarExprEmitter::VisitConvertVectorExpr(ConvertVectorExpr *E) {
+ QualType SrcType = E->getSrcExpr()->getType(),
+ DstType = E->getType();
+
+ Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
+
+ SrcType = CGF.getContext().getCanonicalType(SrcType);
+ DstType = CGF.getContext().getCanonicalType(DstType);
+ if (SrcType == DstType) return Src;
+
+ assert(SrcType->isVectorType() &&
+ "ConvertVector source type must be a vector");
+ assert(DstType->isVectorType() &&
+ "ConvertVector destination type must be a vector");
+
+ llvm::Type *SrcTy = Src->getType();
+ llvm::Type *DstTy = ConvertType(DstType);
+
+ // Ignore conversions like int -> uint.
+ if (SrcTy == DstTy)
+ return Src;
+
+ QualType SrcEltType = SrcType->getAs<VectorType>()->getElementType(),
+ DstEltType = DstType->getAs<VectorType>()->getElementType();
+
+ assert(SrcTy->isVectorTy() &&
+ "ConvertVector source IR type must be a vector");
+ assert(DstTy->isVectorTy() &&
+ "ConvertVector destination IR type must be a vector");
+
+ llvm::Type *SrcEltTy = SrcTy->getVectorElementType(),
+ *DstEltTy = DstTy->getVectorElementType();
+
+ if (DstEltType->isBooleanType()) {
+ assert((SrcEltTy->isFloatingPointTy() ||
+ isa<llvm::IntegerType>(SrcEltTy)) && "Unknown boolean conversion");
+
+ llvm::Value *Zero = llvm::Constant::getNullValue(SrcTy);
+ if (SrcEltTy->isFloatingPointTy()) {
+ return Builder.CreateFCmpUNE(Src, Zero, "tobool");
+ } else {
+ return Builder.CreateICmpNE(Src, Zero, "tobool");
+ }
+ }
+
+ // We have the arithmetic types: real int/float.
+ Value *Res = NULL;
+
+ if (isa<llvm::IntegerType>(SrcEltTy)) {
+ bool InputSigned = SrcEltType->isSignedIntegerOrEnumerationType();
+ if (isa<llvm::IntegerType>(DstEltTy))
+ Res = Builder.CreateIntCast(Src, DstTy, InputSigned, "conv");
+ else if (InputSigned)
+ Res = Builder.CreateSIToFP(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateUIToFP(Src, DstTy, "conv");
+ } else if (isa<llvm::IntegerType>(DstEltTy)) {
+ assert(SrcEltTy->isFloatingPointTy() && "Unknown real conversion");
+ if (DstEltType->isSignedIntegerOrEnumerationType())
+ Res = Builder.CreateFPToSI(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPToUI(Src, DstTy, "conv");
+ } else {
+ assert(SrcEltTy->isFloatingPointTy() && DstEltTy->isFloatingPointTy() &&
+ "Unknown real conversion");
+ if (DstEltTy->getTypeID() < SrcEltTy->getTypeID())
+ Res = Builder.CreateFPTrunc(Src, DstTy, "conv");
+ else
+ Res = Builder.CreateFPExt(Src, DstTy, "conv");
+ }
+
+ return Res;
+}
+
Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
llvm::APSInt Value;
if (E->EvaluateAsInt(Value, CGF.getContext(), Expr::SE_AllowSideEffects)) {
@@ -992,18 +1054,6 @@ Value *ScalarExprEmitter::VisitMemberExpr(MemberExpr *E) {
return Builder.getInt(Value);
}
- // Emit debug info for aggregate now, if it was delayed to reduce
- // debug info size.
- CGDebugInfo *DI = CGF.getDebugInfo();
- if (DI &&
- CGF.CGM.getCodeGenOpts().getDebugInfo()
- == CodeGenOptions::LimitedDebugInfo) {
- QualType PQTy = E->getBase()->IgnoreParenImpCasts()->getType();
- if (const PointerType * PTy = dyn_cast<PointerType>(PQTy))
- if (FieldDecl *M = dyn_cast<FieldDecl>(E->getMemberDecl()))
- DI->getOrCreateRecordType(PTy->getPointeeType(),
- M->getParent()->getLocation());
- }
return EmitLoadOfLValue(E);
}
@@ -1023,7 +1073,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Value *Idx = Visit(E->getIdx());
QualType IdxTy = E->getIdx()->getType();
- if (CGF.SanOpts->Bounds)
+ if (CGF.SanOpts->ArrayBounds)
CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
@@ -1034,7 +1084,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
static llvm::Constant *getMaskElt(llvm::ShuffleVectorInst *SVI, unsigned Idx,
unsigned Off, llvm::Type *I32Ty) {
int MV = SVI->getMaskValue(Idx);
- if (MV == -1)
+ if (MV == -1)
return llvm::UndefValue::get(I32Ty);
return llvm::ConstantInt::get(I32Ty, Off+MV);
}
@@ -1044,13 +1094,13 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
(void)Ignore;
assert (Ignore == false && "init list ignored");
unsigned NumInitElements = E->getNumInits();
-
+
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
-
+
llvm::VectorType *VType =
dyn_cast<llvm::VectorType>(ConvertType(E->getType()));
-
+
if (!VType) {
if (NumInitElements == 0) {
// C++11 value-initialization for the scalar.
@@ -1059,10 +1109,10 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
// We have a scalar in braces. Just use the first element.
return Visit(E->getInit(0));
}
-
+
unsigned ResElts = VType->getNumElements();
-
- // Loop over initializers collecting the Value for each, and remembering
+
+ // Loop over initializers collecting the Value for each, and remembering
// whether the source was swizzle (ExtVectorElementExpr). This will allow
// us to fold the shuffle for the swizzle into the shuffle for the vector
// initializer, since LLVM optimizers generally do not want to touch
@@ -1074,11 +1124,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
Expr *IE = E->getInit(i);
Value *Init = Visit(IE);
SmallVector<llvm::Constant*, 16> Args;
-
+
llvm::VectorType *VVT = dyn_cast<llvm::VectorType>(Init->getType());
-
+
// Handle scalar elements. If the scalar initializer is actually one
- // element of a different vector of the same width, use shuffle instead of
+ // element of a different vector of the same width, use shuffle instead of
// extract+insert.
if (!VVT) {
if (isa<ExtVectorElementExpr>(IE)) {
@@ -1121,10 +1171,10 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
++CurIdx;
continue;
}
-
+
unsigned InitElts = VVT->getNumElements();
- // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
+ // If the initializer is an ExtVecEltExpr (a swizzle), and the swizzle's
// input is the same width as the vector being constructed, generate an
// optimized shuffle of the swizzle input into the result.
unsigned Offset = (CurIdx == 0) ? 0 : ResElts;
@@ -1132,7 +1182,7 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
llvm::ShuffleVectorInst *SVI = cast<llvm::ShuffleVectorInst>(Init);
Value *SVOp = SVI->getOperand(0);
llvm::VectorType *OpTy = cast<llvm::VectorType>(SVOp->getType());
-
+
if (OpTy->getNumElements() == ResElts) {
for (unsigned j = 0; j != CurIdx; ++j) {
// If the current vector initializer is a shuffle with undef, merge
@@ -1182,11 +1232,11 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
VIsUndefShuffle = isa<llvm::UndefValue>(Init);
CurIdx += InitElts;
}
-
+
// FIXME: evaluate codegen vs. shuffling against constant null vector.
// Emit remaining default initializers.
llvm::Type *EltTy = VType->getElementType();
-
+
// Emit remaining default initializers
for (/* Do not initialize i*/; CurIdx < ResElts; ++CurIdx) {
Value *Idx = Builder.getInt32(CurIdx);
@@ -1201,12 +1251,12 @@ static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
if (CE->getCastKind() == CK_UncheckedDerivedToBase)
return false;
-
+
if (isa<CXXThisExpr>(E)) {
// We always assume that 'this' is never null.
return false;
}
-
+
if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(CE)) {
// And that glvalue casts are never null.
if (ICE->getValueKind() != VK_RValue)
@@ -1223,7 +1273,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Expr *E = CE->getSubExpr();
QualType DestTy = CE->getType();
CastKind Kind = CE->getCastKind();
-
+
if (!DestTy->isVoidType())
TestAndClearIgnoreResultAssign();
@@ -1235,12 +1285,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_BuiltinFnToFnPtr:
llvm_unreachable("builtin functions are handled elsewhere");
- case CK_LValueBitCast:
+ case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
Value *V = EmitLValue(E).getAddress();
- V = Builder.CreateBitCast(V,
+ V = Builder.CreateBitCast(V,
ConvertType(CGF.getContext().getPointerType(DestTy)));
- return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy));
+ return EmitLoadOfLValue(CGF.MakeNaturalAlignAddrLValue(V, DestTy),
+ CE->getExprLoc());
}
case CK_CPointerToObjCPointerCast:
@@ -1262,15 +1313,18 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Value *V = Visit(E);
+ llvm::Value *Derived =
+ CGF.GetAddressOfDerivedClass(V, DerivedClassDecl,
+ CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE));
+
// C++11 [expr.static.cast]p11: Behavior is undefined if a downcast is
// performed and the object is not of the derived type.
if (CGF.SanitizePerformTypeCheck)
CGF.EmitTypeCheck(CodeGenFunction::TCK_DowncastPointer, CE->getExprLoc(),
- V, DestTy->getPointeeType());
+ Derived, DestTy->getPointeeType());
- return CGF.GetAddressOfDerivedClass(V, DerivedClassDecl,
- CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE));
+ return Derived;
}
case CK_UncheckedDerivedToBase:
case CK_DerivedToBase: {
@@ -1278,7 +1332,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
E->getType()->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
- return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
+ return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
CE->path_begin(), CE->path_end(),
ShouldNullCheckClassCastValue(CE));
}
@@ -1330,7 +1384,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_BaseToDerivedMemberPointer:
case CK_DerivedToBaseMemberPointer: {
Value *Src = Visit(E);
-
+
// Note that the AST doesn't distinguish between checked and
// unchecked member pointer conversions, so we always have to
// implement checked conversions here. This is inefficient when
@@ -1354,7 +1408,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_CopyAndAutoreleaseBlockObject:
return CGF.EmitBlockCopyAndAutorelease(Visit(E), E->getType());
-
+
case CK_FloatingRealToComplex:
case CK_FloatingComplexCast:
case CK_IntegralRealToComplex:
@@ -1442,8 +1496,12 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
CodeGenFunction::StmtExprEvaluation eval(CGF);
- return CGF.EmitCompoundStmt(*E->getSubStmt(), !E->getType()->isVoidType())
- .getScalarVal();
+ llvm::Value *RetAlloca = CGF.EmitCompoundStmt(*E->getSubStmt(),
+ !E->getType()->isVoidType());
+ if (!RetAlloca)
+ return 0;
+ return CGF.EmitLoadOfScalar(CGF.MakeAddrLValue(RetAlloca, E->getType()),
+ E->getExprLoc());
}
//===----------------------------------------------------------------------===//
@@ -1477,7 +1535,7 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
llvm::Value *
ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
bool isInc, bool isPre) {
-
+
QualType type = E->getSubExpr()->getType();
llvm::PHINode *atomicPHI = 0;
llvm::Value *value;
@@ -1503,7 +1561,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
// Special case for atomic increment / decrement on integers, emit
// atomicrmw instructions. We skip this if we want to be doing overflow
- // checking, and fall into the slow path with the atomic cmpxchg loop.
+ // checking, and fall into the slow path with the atomic cmpxchg loop.
if (!type->isBooleanType() && type->isIntegerType() &&
!(type->isUnsignedIntegerType() &&
CGF.SanOpts->UnsignedIntegerOverflow) &&
@@ -1519,7 +1577,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
LV.getAddress(), amt, llvm::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
- value = EmitLoadOfLValue(LV);
+ value = EmitLoadOfLValue(LV, E->getExprLoc());
input = value;
// For every other atomic operation, we need to emit a load-op-cmpxchg loop
llvm::BasicBlock *startBB = Builder.GetInsertBlock();
@@ -1531,7 +1589,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
atomicPHI->addIncoming(value, startBB);
value = atomicPHI;
} else {
- value = EmitLoadOfLValue(LV);
+ value = EmitLoadOfLValue(LV, E->getExprLoc());
input = value;
}
@@ -1569,7 +1627,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = EmitOverflowCheckedBinOp(BinOp);
} else
value = Builder.CreateAdd(value, amt, isInc ? "inc" : "dec");
-
+
// Next most common: pointer increment.
} else if (const PointerType *ptr = type->getAs<PointerType>()) {
QualType type = ptr->getPointeeType();
@@ -1583,7 +1641,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = Builder.CreateGEP(value, numElts, "vla.inc");
else
value = Builder.CreateInBoundsGEP(value, numElts, "vla.inc");
-
+
// Arithmetic on function pointers (!) is just +-1.
} else if (type->isFunctionType()) {
llvm::Value *amt = Builder.getInt32(amount);
@@ -1665,7 +1723,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
value = Builder.CreateInBoundsGEP(value, sizeValue, "incdec.objptr");
value = Builder.CreateBitCast(value, input->getType());
}
-
+
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
@@ -1696,10 +1754,10 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
// Emit unary minus with EmitSub so we handle overflow cases etc.
BinOpInfo BinOp;
BinOp.RHS = Visit(E->getSubExpr());
-
+
if (BinOp.RHS->getType()->isFPOrFPVectorTy())
BinOp.LHS = llvm::ConstantFP::getZeroValueForNegation(BinOp.RHS->getType());
- else
+ else
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
@@ -1726,7 +1784,7 @@ Value *ScalarExprEmitter::VisitUnaryLNot(const UnaryOperator *E) {
Result = Builder.CreateICmp(llvm::CmpInst::ICMP_EQ, Oper, Zero, "cmp");
return Builder.CreateSExt(Result, ConvertType(E->getType()), "sext");
}
-
+
// Compare operand to zero.
Value *BoolVal = CGF.EvaluateExprAsBool(E->getSubExpr());
@@ -1814,7 +1872,7 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
// Save the element type.
CurrentType = ON.getBase()->getType();
-
+
// Compute the offset to the base.
const RecordType *BaseRT = CurrentType->getAs<RecordType>();
CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
@@ -1873,7 +1931,8 @@ Value *ScalarExprEmitter::VisitUnaryReal(const UnaryOperator *E) {
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (E->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
+ E->getExprLoc()).getScalarVal();
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, false, true).first;
@@ -1889,7 +1948,8 @@ Value *ScalarExprEmitter::VisitUnaryImag(const UnaryOperator *E) {
// Note that we have to ask E because Op might be an l-value that
// this won't work for, e.g. an Obj-C property.
if (Op->isGLValue())
- return CGF.EmitLoadOfLValue(CGF.EmitLValue(E)).getScalarVal();
+ return CGF.EmitLoadOfLValue(CGF.EmitLValue(E),
+ E->getExprLoc()).getScalarVal();
// Otherwise, calculate and project.
return CGF.EmitComplexExpr(Op, true, false).second;
@@ -1926,17 +1986,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
Value *&Result) {
QualType LHSTy = E->getLHS()->getType();
BinOpInfo OpInfo;
-
- if (E->getComputationResultType()->isAnyComplexType()) {
- // This needs to go through the complex expression emitter, but it's a tad
- // complicated to do that... I'm leaving it out for now. (Note that we do
- // actually need the imaginary part of the RHS for multiplication and
- // division.)
- CGF.ErrorUnsupported(E, "complex compound assignment");
- Result = llvm::UndefValue::get(CGF.ConvertType(E->getType()));
- return LValue();
- }
-
+
+ if (E->getComputationResultType()->isAnyComplexType())
+ return CGF.EmitScalarCompooundAssignWithComplex(E, Result);
+
// Emit the RHS first. __block variables need to have the rhs evaluated
// first, plus this should improve codegen a little.
OpInfo.RHS = Visit(E->getRHS());
@@ -1993,7 +2046,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
// floating point environment in the loop.
llvm::BasicBlock *startBB = Builder.GetInsertBlock();
llvm::BasicBlock *opBB = CGF.createBasicBlock("atomic_op", CGF.CurFn);
- OpInfo.LHS = EmitLoadOfLValue(LHSLV);
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
OpInfo.LHS = CGF.EmitToMemory(OpInfo.LHS, type);
Builder.CreateBr(opBB);
Builder.SetInsertPoint(opBB);
@@ -2002,14 +2055,14 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.LHS = atomicPHI;
}
else
- OpInfo.LHS = EmitLoadOfLValue(LHSLV);
+ OpInfo.LHS = EmitLoadOfLValue(LHSLV, E->getExprLoc());
OpInfo.LHS = EmitScalarConversion(OpInfo.LHS, LHSTy,
E->getComputationLHSType());
// Expand the binary operator.
Result = (this->*Func)(OpInfo);
-
+
// Convert the result back to the LHS type.
Result = EmitScalarConversion(Result, E->getComputationResultType(), LHSTy);
@@ -2024,7 +2077,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
Builder.SetInsertPoint(contBB);
return LHSLV;
}
-
+
// Store the result value into the LHS lvalue. Bit-fields are handled
// specially because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after the
@@ -2056,7 +2109,7 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
return RHS;
// Otherwise, reload the value.
- return EmitLoadOfLValue(LHS);
+ return EmitLoadOfLValue(LHS, E->getExprLoc());
}
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
@@ -2236,7 +2289,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// Must have binary (not unary) expr here. Unary pointer
// increment/decrement doesn't use this path.
const BinaryOperator *expr = cast<BinaryOperator>(op.E);
-
+
Value *pointer = op.LHS;
Expr *pointerOperand = expr->getLHS();
Value *index = op.RHS;
@@ -2261,7 +2314,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (isSubtraction)
index = CGF.Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts->Bounds)
+ if (CGF.SanOpts->ArrayBounds)
CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
/*Accessed*/ false);
@@ -2325,7 +2378,7 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool negMul, bool negAdd) {
assert(!(negMul && negAdd) && "Only one of negMul and negAdd should be set.");
-
+
Value *MulOp0 = MulOp->getOperand(0);
Value *MulOp1 = MulOp->getOperand(1);
if (negMul) {
@@ -2355,7 +2408,7 @@ static Value* buildFMulAdd(llvm::BinaryOperator *MulOp, Value *Addend,
// Checks that (a) the operation is fusable, and (b) -ffp-contract=on.
// Does NOT check the type of the operation - it's assumed that this function
// will be called from contexts where it's known that the type is contractable.
-static Value* tryEmitFMulAdd(const BinOpInfo &op,
+static Value* tryEmitFMulAdd(const BinOpInfo &op,
const CodeGenFunction &CGF, CGBuilderTy &Builder,
bool isSub=false) {
@@ -2503,7 +2556,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
divisor = CGF.CGM.getSize(elementSize);
}
-
+
// Otherwise, do a full sdiv. This uses the "exact" form of sdiv, since
// pointer difference in C is only defined in the case where both operands
// are pointing to elements of an array.
@@ -2809,7 +2862,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
return RHS;
// Otherwise, reload the value.
- return EmitLoadOfLValue(LHS);
+ return EmitLoadOfLValue(LHS, E->getExprLoc());
}
Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
@@ -2828,9 +2881,9 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
Value *And = Builder.CreateAnd(LHS, RHS);
return Builder.CreateSExt(And, ConvertType(E->getType()), "sext");
}
-
+
llvm::Type *ResTy = ConvertType(E->getType());
-
+
// If we have 0 && RHS, see if we can elide RHS, if so, just return 0.
// If we have 1 && X, just emit X without inserting the control flow.
bool LHSCondVal;
@@ -2899,9 +2952,9 @@ Value *ScalarExprEmitter::VisitBinLOr(const BinaryOperator *E) {
Value *Or = Builder.CreateOr(LHS, RHS);
return Builder.CreateSExt(Or, ConvertType(E->getType()), "sext");
}
-
+
llvm::Type *ResTy = ConvertType(E->getType());
-
+
// If we have 1 || RHS, see if we can elide RHS, if so, just return 1.
// If we have 0 || X, just emit X without inserting the control flow.
bool LHSCondVal;
@@ -2970,22 +3023,15 @@ Value *ScalarExprEmitter::VisitBinComma(const BinaryOperator *E) {
/// flow into selects in some cases.
static bool isCheapEnoughToEvaluateUnconditionally(const Expr *E,
CodeGenFunction &CGF) {
- E = E->IgnoreParens();
-
// Anything that is an integer or floating point constant is fine.
- if (E->isConstantInitializer(CGF.getContext(), false))
- return true;
-
- // Non-volatile automatic variables too, to get "cond ? X : Y" where
- // X and Y are local variables.
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
- if (VD->hasLocalStorage() && !(CGF.getContext()
- .getCanonicalType(VD->getType())
- .isVolatileQualified()))
- return true;
-
- return false;
+ return E->IgnoreParens()->isEvaluatable(CGF.getContext());
+
+ // Even non-volatile automatic variables can't be evaluated unconditionally.
+ // Referencing a thread_local may cause non-trivial initialization work to
+ // occur. If we're inside a lambda and one of the variables is from the scope
+ // outside the lambda, that function may have returned already. Reading its
+ // locals is a bad idea. Also, these reads may introduce races there didn't
+ // exist in the source-level program.
}
@@ -3023,26 +3069,26 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// OpenCL: If the condition is a vector, we can treat this condition like
// the select function.
- if (CGF.getLangOpts().OpenCL
+ if (CGF.getLangOpts().OpenCL
&& condExpr->getType()->isVectorType()) {
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
-
+
llvm::Type *condType = ConvertType(condExpr->getType());
llvm::VectorType *vecTy = cast<llvm::VectorType>(condType);
-
- unsigned numElem = vecTy->getNumElements();
+
+ unsigned numElem = vecTy->getNumElements();
llvm::Type *elemType = vecTy->getElementType();
-
+
llvm::Value *zeroVec = llvm::Constant::getNullValue(vecTy);
llvm::Value *TestMSB = Builder.CreateICmpSLT(CondV, zeroVec);
- llvm::Value *tmp = Builder.CreateSExt(TestMSB,
+ llvm::Value *tmp = Builder.CreateSExt(TestMSB,
llvm::VectorType::get(elemType,
- numElem),
+ numElem),
"sext");
llvm::Value *tmp2 = Builder.CreateNot(tmp);
-
+
// Cast float to int to perform ANDs if necessary.
llvm::Value *RHSTmp = RHS;
llvm::Value *LHSTmp = LHS;
@@ -3053,7 +3099,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
LHSTmp = Builder.CreateBitCast(LHS, tmp->getType());
wasCast = true;
}
-
+
llvm::Value *tmp3 = Builder.CreateAnd(RHSTmp, tmp2);
llvm::Value *tmp4 = Builder.CreateAnd(LHSTmp, tmp);
llvm::Value *tmp5 = Builder.CreateOr(tmp3, tmp4, "cond");
@@ -3062,7 +3108,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
return tmp5;
}
-
+
// If this is a really simple expression (like x ? 4 : 5), emit this as a
// select instead of as control flow. We can only do this if it is cheap and
// safe to evaluate the LHS and RHS unconditionally.
@@ -3116,7 +3162,7 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
}
Value *ScalarExprEmitter::VisitChooseExpr(ChooseExpr *E) {
- return Visit(E->getChosenSubExpr(CGF.getContext()));
+ return Visit(E->getChosenSubExpr());
}
Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
@@ -3138,49 +3184,49 @@ Value *ScalarExprEmitter::VisitBlockExpr(const BlockExpr *block) {
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
llvm::Type *DstTy = ConvertType(E->getType());
-
+
// Going from vec4->vec3 or vec3->vec4 is a special case and requires
// a shuffle vector instead of a bitcast.
llvm::Type *SrcTy = Src->getType();
if (isa<llvm::VectorType>(DstTy) && isa<llvm::VectorType>(SrcTy)) {
unsigned numElementsDst = cast<llvm::VectorType>(DstTy)->getNumElements();
unsigned numElementsSrc = cast<llvm::VectorType>(SrcTy)->getNumElements();
- if ((numElementsDst == 3 && numElementsSrc == 4)
+ if ((numElementsDst == 3 && numElementsSrc == 4)
|| (numElementsDst == 4 && numElementsSrc == 3)) {
-
-
+
+
// In the case of going from int4->float3, a bitcast is needed before
// doing a shuffle.
- llvm::Type *srcElemTy =
+ llvm::Type *srcElemTy =
cast<llvm::VectorType>(SrcTy)->getElementType();
- llvm::Type *dstElemTy =
+ llvm::Type *dstElemTy =
cast<llvm::VectorType>(DstTy)->getElementType();
-
+
if ((srcElemTy->isIntegerTy() && dstElemTy->isFloatTy())
|| (srcElemTy->isFloatTy() && dstElemTy->isIntegerTy())) {
// Create a float type of the same size as the source or destination.
llvm::VectorType *newSrcTy = llvm::VectorType::get(dstElemTy,
numElementsSrc);
-
+
Src = Builder.CreateBitCast(Src, newSrcTy, "astypeCast");
}
-
+
llvm::Value *UnV = llvm::UndefValue::get(Src->getType());
-
+
SmallVector<llvm::Constant*, 3> Args;
Args.push_back(Builder.getInt32(0));
Args.push_back(Builder.getInt32(1));
Args.push_back(Builder.getInt32(2));
-
+
if (numElementsDst == 4)
Args.push_back(llvm::UndefValue::get(CGF.Int32Ty));
-
+
llvm::Constant *Mask = llvm::ConstantVector::get(Args);
-
+
return Builder.CreateShuffleVector(Src, UnV, Mask, "astype");
}
}
-
+
return Builder.CreateBitCast(Src, DstTy, "astype");
}
@@ -3248,14 +3294,14 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
llvm::Value *Src = EmitScalarExpr(BaseExpr);
Builder.CreateStore(Src, V);
V = ScalarExprEmitter(*this).EmitLoadOfLValue(
- MakeNaturalAlignAddrLValue(V, E->getType()));
+ MakeNaturalAlignAddrLValue(V, E->getType()), E->getExprLoc());
} else {
if (E->isArrow())
V = ScalarExprEmitter(*this).EmitLoadOfLValue(BaseExpr);
else
V = EmitLValue(BaseExpr).getAddress();
}
-
+
// build Class* type
ClassPtrTy = ClassPtrTy->getPointerTo();
V = Builder.CreateBitCast(V, ClassPtrTy);
@@ -3283,7 +3329,7 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue(
COMPOUND_OP(Xor);
COMPOUND_OP(Or);
#undef COMPOUND_OP
-
+
case BO_PtrMemD:
case BO_PtrMemI:
case BO_Mul:
@@ -3308,6 +3354,6 @@ LValue CodeGenFunction::EmitCompoundAssignmentLValue(
case BO_Comma:
llvm_unreachable("Not valid compound assignment operators");
}
-
+
llvm_unreachable("Unhandled compound assignment operator");
}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 713509bf6738..0bda053f35f7 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/Diagnostic.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/CallSite.h"
#include "llvm/IR/DataLayout.h"
@@ -468,8 +469,8 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
SourceLocation StartLoc) {
FunctionArgList args;
// Check if we should generate debug info for this method.
- if (!OMD->hasAttr<NoDebugAttr>())
- maybeInitializeDebugInfo();
+ if (OMD->hasAttr<NoDebugAttr>())
+ DebugInfo = NULL; // disable debug info indefinitely for this function
llvm::Function *Fn = CGM.getObjCRuntime().GenerateMethod(OMD, CD);
@@ -925,7 +926,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
QualType ivarType = ivar->getType();
switch (getEvaluationKind(ivarType)) {
case TEK_Complex: {
- ComplexPairTy pair = EmitLoadOfComplex(LV);
+ ComplexPairTy pair = EmitLoadOfComplex(LV, SourceLocation());
EmitStoreOfComplex(pair,
MakeNaturalAlignAddrLValue(ReturnValue, ivarType),
/*init*/ true);
@@ -949,7 +950,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// Otherwise we want to do a simple load, suppressing the
// final autorelease.
} else {
- value = EmitLoadOfLValue(LV).getScalarVal();
+ value = EmitLoadOfLValue(LV, SourceLocation()).getScalarVal();
AutoreleaseResult = false;
}
@@ -1048,8 +1049,6 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
FunctionType::ExtInfo(),
RequiredArgs::All),
copyCppAtomicObjectFn, ReturnValueSlot(), args);
-
-
}
@@ -1404,7 +1403,7 @@ llvm::Value *CodeGenFunction::LoadObjCSelf() {
VarDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
DeclRefExpr DRE(Self, /*is enclosing local*/ (CurFuncDecl != CurCodeDecl),
Self->getType(), VK_LValue, SourceLocation());
- return EmitLoadOfScalar(EmitDeclRefLValue(&DRE));
+ return EmitLoadOfScalar(EmitDeclRefLValue(&DRE), SourceLocation());
}
QualType CodeGenFunction::TypeOfSelfObject() {
@@ -2084,7 +2083,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
newValue = EmitARCRetain(type, newValue);
// Read the old value.
- llvm::Value *oldValue = EmitLoadOfScalar(dst);
+ llvm::Value *oldValue = EmitLoadOfScalar(dst, SourceLocation());
// Store. We do this before the release so that any deallocs won't
// see the old value.
@@ -2355,7 +2354,8 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
case Qualifiers::OCL_ExplicitNone:
case Qualifiers::OCL_Strong:
case Qualifiers::OCL_Autoreleasing:
- return TryEmitResult(CGF.EmitLoadOfLValue(lvalue).getScalarVal(),
+ return TryEmitResult(CGF.EmitLoadOfLValue(lvalue,
+ SourceLocation()).getScalarVal(),
false);
case Qualifiers::OCL_Weak:
@@ -2381,7 +2381,8 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
LValue lv = CGF.EmitLValue(e);
// Load the object pointer.
- llvm::Value *result = CGF.EmitLoadOfLValue(lv).getScalarVal();
+ llvm::Value *result = CGF.EmitLoadOfLValue(lv,
+ SourceLocation()).getScalarVal();
// Set the source pointer to NULL.
CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
@@ -2784,8 +2785,7 @@ CodeGenFunction::EmitARCStoreStrong(const BinaryOperator *e,
// If the RHS was emitted retained, expand this.
if (hasImmediateRetain) {
- llvm::Value *oldValue =
- EmitLoadOfScalar(lvalue);
+ llvm::Value *oldValue = EmitLoadOfScalar(lvalue, SourceLocation());
EmitStoreOfScalar(value, lvalue);
EmitARCRelease(oldValue, lvalue.isARCPreciseLifetime());
} else {
@@ -2905,9 +2905,6 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
"__assign_helper_atomic_property_",
&CGM.getModule());
- // Initialize debug info if needed.
- maybeInitializeDebugInfo();
-
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
DeclRefExpr DstExpr(&dstDecl, false, DestTy,
@@ -2988,9 +2985,6 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
"__copy_helper_atomic_property_", &CGM.getModule());
- // Initialize debug info if needed.
- maybeInitializeDebugInfo();
-
StartFunction(FD, C.VoidTy, Fn, FI, args, SourceLocation());
DeclRefExpr SrcExpr(&srcDecl, false, SrcTy,
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index fbf8a1abb013..a7ab8507014f 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -454,13 +454,15 @@ protected:
virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
llvm::Value *&Receiver,
llvm::Value *cmd,
- llvm::MDNode *node) = 0;
+ llvm::MDNode *node,
+ MessageSendInfo &MSI) = 0;
/// Looks up the method for sending a message to a superclass. This
/// mechanism differs between the GCC and GNU runtimes, so this method must
/// be overridden in subclasses.
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
llvm::Value *ObjCSuper,
- llvm::Value *cmd) = 0;
+ llvm::Value *cmd,
+ MessageSendInfo &MSI) = 0;
/// Libobjc2 uses a bitfield representation where small(ish) bitfields are
/// stored in a 64-bit value with the low bit set to 1 and the remaining 63
/// bits set to their values, LSB first, while larger ones are stored in a
@@ -596,7 +598,8 @@ protected:
virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
llvm::Value *&Receiver,
llvm::Value *cmd,
- llvm::MDNode *node) {
+ llvm::MDNode *node,
+ MessageSendInfo &MSI) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *args[] = {
EnforceType(Builder, Receiver, IdTy),
@@ -607,7 +610,8 @@ protected:
}
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
llvm::Value *ObjCSuper,
- llvm::Value *cmd) {
+ llvm::Value *cmd,
+ MessageSendInfo &MSI) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
PtrToObjCSuperTy), cmd};
@@ -655,7 +659,8 @@ class CGObjCGNUstep : public CGObjCGNU {
virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
llvm::Value *&Receiver,
llvm::Value *cmd,
- llvm::MDNode *node) {
+ llvm::MDNode *node,
+ MessageSendInfo &MSI) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Function *LookupFn = SlotLookupFn;
@@ -693,7 +698,8 @@ class CGObjCGNUstep : public CGObjCGNU {
}
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
llvm::Value *ObjCSuper,
- llvm::Value *cmd) {
+ llvm::Value *cmd,
+ MessageSendInfo &MSI) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {ObjCSuper, cmd};
@@ -790,38 +796,52 @@ class CGObjCGNUstep : public CGObjCGNU {
}
};
-/// Support for the ObjFW runtime. Support here is due to
-/// Jonathan Schleifer <js@webkeks.org>, the ObjFW maintainer.
+/// Support for the ObjFW runtime.
class CGObjCObjFW: public CGObjCGNU {
protected:
/// The GCC ABI message lookup function. Returns an IMP pointing to the
/// method implementation for this message.
LazyRuntimeFunction MsgLookupFn;
+ /// stret lookup function. While this does not seem to make sense at the
+ /// first look, this is required to call the correct forwarding function.
+ LazyRuntimeFunction MsgLookupFnSRet;
/// The GCC ABI superclass message lookup function. Takes a pointer to a
/// structure describing the receiver and the class, and a selector as
/// arguments. Returns the IMP for the corresponding method.
- LazyRuntimeFunction MsgLookupSuperFn;
+ LazyRuntimeFunction MsgLookupSuperFn, MsgLookupSuperFnSRet;
virtual llvm::Value *LookupIMP(CodeGenFunction &CGF,
llvm::Value *&Receiver,
llvm::Value *cmd,
- llvm::MDNode *node) {
+ llvm::MDNode *node,
+ MessageSendInfo &MSI) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *args[] = {
EnforceType(Builder, Receiver, IdTy),
EnforceType(Builder, cmd, SelectorTy) };
- llvm::CallSite imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
+
+ llvm::CallSite imp;
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+ imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFnSRet, args);
+ else
+ imp = CGF.EmitRuntimeCallOrInvoke(MsgLookupFn, args);
+
imp->setMetadata(msgSendMDKind, node);
return imp.getInstruction();
}
virtual llvm::Value *LookupIMPSuper(CodeGenFunction &CGF,
llvm::Value *ObjCSuper,
- llvm::Value *cmd) {
+ llvm::Value *cmd,
+ MessageSendInfo &MSI) {
CGBuilderTy &Builder = CGF.Builder;
llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
PtrToObjCSuperTy), cmd};
- return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
+
+ if (CGM.ReturnTypeUsesSRet(MSI.CallInfo))
+ return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFnSRet, lookupArgs);
+ else
+ return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
virtual llvm::Value *GetClassNamed(CodeGenFunction &CGF,
@@ -847,9 +867,13 @@ public:
CGObjCObjFW(CodeGenModule &Mod): CGObjCGNU(Mod, 9, 3) {
// IMP objc_msg_lookup(id, SEL);
MsgLookupFn.init(&CGM, "objc_msg_lookup", IMPTy, IdTy, SelectorTy, NULL);
+ MsgLookupFnSRet.init(&CGM, "objc_msg_lookup_stret", IMPTy, IdTy,
+ SelectorTy, NULL);
// IMP objc_msg_lookup_super(struct objc_super*, SEL);
MsgLookupSuperFn.init(&CGM, "objc_msg_lookup_super", IMPTy,
PtrToObjCSuperTy, SelectorTy, NULL);
+ MsgLookupSuperFnSRet.init(&CGM, "objc_msg_lookup_super_stret", IMPTy,
+ PtrToObjCSuperTy, SelectorTy, NULL);
}
};
} // end anonymous namespace
@@ -1041,7 +1065,7 @@ llvm::Value *CGObjCGNU::EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) {
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel,
const std::string &TypeEncoding, bool lval) {
- SmallVector<TypedSelector, 2> &Types = SelectorTable[Sel];
+ SmallVectorImpl<TypedSelector> &Types = SelectorTable[Sel];
llvm::GlobalAlias *SelValue = 0;
@@ -1291,7 +1315,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
// Get the IMP
- llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd);
+ llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI);
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Value *impMD[] = {
@@ -1390,7 +1414,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
// given platform), so we
switch (CGM.getCodeGenOpts().getObjCDispatchMethod()) {
case CodeGenOptions::Legacy:
- imp = LookupIMP(CGF, Receiver, cmd, node);
+ imp = LookupIMP(CGF, Receiver, cmd, node, MSI);
break;
case CodeGenOptions::Mixed:
case CodeGenOptions::NonLegacy:
@@ -1414,8 +1438,7 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
- 0, &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs, 0, &call);
call->setMetadata(msgSendMDKind, node);
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index e8498b06ad2f..2b2a5b837608 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -23,6 +23,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
#include "clang/Basic/LangOptions.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
@@ -4348,7 +4349,7 @@ void CGObjCCommonMac::EmitImageInfo() {
// Indicate whether we're compiling this to run on a simulator.
const llvm::Triple &Triple = CGM.getTarget().getTriple();
- if (Triple.getOS() == llvm::Triple::IOS &&
+ if (Triple.isiOS() &&
(Triple.getArch() == llvm::Triple::x86 ||
Triple.getArch() == llvm::Triple::x86_64))
Mod.addModuleFlag(llvm::Module::Error, "Objective-C Is Simulated",
@@ -5757,6 +5758,9 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassMetaData(
};
if (!Values[1])
Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
+ if (!Values[3])
+ Values[3] = llvm::Constant::getNullValue(
+ llvm::PointerType::getUnqual(ObjCTypes.ImpnfABITy));
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
Values);
llvm::GlobalVariable *GV = GetClassGlobal(ClassName);
@@ -5800,14 +5804,21 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::GlobalValue::ExternalLinkage,
0,
"_objc_empty_cache");
-
- ObjCEmptyVtableVar = new llvm::GlobalVariable(
- CGM.getModule(),
- ObjCTypes.ImpnfABITy,
- false,
- llvm::GlobalValue::ExternalLinkage,
- 0,
- "_objc_empty_vtable");
+
+ // Make this entry NULL for any iOS device target, any iOS simulator target,
+ // OS X with deployment target 10.9 or later.
+ const llvm::Triple &Triple = CGM.getTarget().getTriple();
+ if (Triple.isiOS() || (Triple.isMacOSX() && !Triple.isMacOSXVersionLT(10, 9)))
+ // This entry will be null.
+ ObjCEmptyVtableVar = 0;
+ else
+ ObjCEmptyVtableVar = new llvm::GlobalVariable(
+ CGM.getModule(),
+ ObjCTypes.ImpnfABITy,
+ false,
+ llvm::GlobalValue::ExternalLinkage,
+ 0,
+ "_objc_empty_vtable");
}
assert(ID->getClassInterface() &&
"CGObjCNonFragileABIMac::GenerateClass - class is 0");
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 9c0d5189f815..d097b6fad2c2 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -20,6 +20,7 @@
#include "CodeGenModule.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtObjC.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "llvm/Support/CallSite.h"
using namespace clang;
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 40dc6bfa3b0e..aa687b956098 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -332,6 +332,7 @@ getTypeInfoLinkage(CodeGenModule &CGM, QualType Ty) {
switch (Ty->getLinkage()) {
case NoLinkage:
+ case VisibleNoLinkage:
case InternalLinkage:
case UniqueExternalLinkage:
return llvm::GlobalValue::InternalLinkage;
@@ -507,60 +508,6 @@ void RTTIBuilder::BuildVTablePointer(const Type *Ty) {
Fields.push_back(VTable);
}
-// maybeUpdateRTTILinkage - Will update the linkage of the RTTI data structures
-// from available_externally to the correct linkage if necessary. An example of
-// this is:
-//
-// struct A {
-// virtual void f();
-// };
-//
-// const std::type_info &g() {
-// return typeid(A);
-// }
-//
-// void A::f() { }
-//
-// When we're generating the typeid(A) expression, we do not yet know that
-// A's key function is defined in this translation unit, so we will give the
-// typeinfo and typename structures available_externally linkage. When A::f
-// forces the vtable to be generated, we need to change the linkage of the
-// typeinfo and typename structs, otherwise we'll end up with undefined
-// externals when linking.
-static void
-maybeUpdateRTTILinkage(CodeGenModule &CGM, llvm::GlobalVariable *GV,
- QualType Ty) {
- // We're only interested in globals with available_externally linkage.
- if (!GV->hasAvailableExternallyLinkage())
- return;
-
- // Get the real linkage for the type.
- llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(CGM, Ty);
-
- // If variable is supposed to have available_externally linkage, we don't
- // need to do anything.
- if (Linkage == llvm::GlobalVariable::AvailableExternallyLinkage)
- return;
-
- // Update the typeinfo linkage.
- GV->setLinkage(Linkage);
-
- // Get the typename global.
- SmallString<256> OutName;
- llvm::raw_svector_ostream Out(OutName);
- CGM.getCXXABI().getMangleContext().mangleCXXRTTIName(Ty, Out);
- Out.flush();
- StringRef Name = OutName.str();
-
- llvm::GlobalVariable *TypeNameGV = CGM.getModule().getNamedGlobal(Name);
-
- assert(TypeNameGV->hasAvailableExternallyLinkage() &&
- "Type name has different linkage from type info!");
-
- // And update its linkage.
- TypeNameGV->setLinkage(Linkage);
-}
-
llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// We want to operate on the canonical type.
Ty = CGM.getContext().getCanonicalType(Ty);
@@ -574,7 +521,8 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
llvm::GlobalVariable *OldGV = CGM.getModule().getNamedGlobal(Name);
if (OldGV && !OldGV->isDeclaration()) {
- maybeUpdateRTTILinkage(CGM, OldGV, Ty);
+ assert(!OldGV->hasAvailableExternallyLinkage() &&
+ "available_externally typeinfos not yet implemented");
return llvm::ConstantExpr::getBitCast(OldGV, CGM.Int8PtrTy);
}
@@ -898,7 +846,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
CharUnits Offset;
if (Base->isVirtual())
Offset =
- CGM.getVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ CGM.getItaniumVTableContext().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
Offset = Layout.getBaseClassOffset(BaseDecl);
@@ -1024,6 +972,6 @@ void CodeGenModule::EmitFundamentalRTTIDescriptors() {
Context.UnsignedLongLongTy, Context.FloatTy,
Context.DoubleTy, Context.LongDoubleTy,
Context.Char16Ty, Context.Char32Ty };
- for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
+ for (unsigned i = 0; i < llvm::array_lengthof(FundamentalTypes); ++i)
EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 30ab528ffbe4..ab92563b21f3 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -78,9 +78,6 @@ public:
/// Packed - Whether the resulting LLVM struct will be packed or not.
bool Packed;
-
- /// IsMsStruct - Whether ms_struct is in effect or not
- bool IsMsStruct;
private:
CodeGenTypes &Types;
@@ -117,7 +114,7 @@ private:
RecordDecl::field_iterator &FI,
RecordDecl::field_iterator FE);
- /// LayoutField - try to layout all fields in the record decl.
+ /// LayoutFields - try to layout all fields in the record decl.
/// Returns false if the operation failed because the struct is not packed.
bool LayoutFields(const RecordDecl *D);
@@ -195,8 +192,7 @@ public:
CGRecordLayoutBuilder(CodeGenTypes &Types)
: BaseSubobjectType(0),
IsZeroInitializable(true), IsZeroInitializableAsBase(true),
- Packed(false), IsMsStruct(false),
- Types(Types) { }
+ Packed(false), Types(Types) { }
/// Layout - Will layout a RecordDecl.
void Layout(const RecordDecl *D);
@@ -205,10 +201,9 @@ public:
}
void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
- Alignment = Types.getContext().getASTRecordLayout(D).getAlignment();
- Packed = D->hasAttr<PackedAttr>();
-
- IsMsStruct = D->isMsStruct(Types.getContext());
+ const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
+ Alignment = Layout.getAlignment();
+ Packed = D->hasAttr<PackedAttr>() || Layout.getSize() % Alignment != 0;
if (D->isUnion()) {
LayoutUnion(D);
@@ -702,7 +697,7 @@ CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
}
// Add a vb-table pointer if the layout insists.
- if (Layout.getVBPtrOffset() != CharUnits::fromQuantity(-1)) {
+ if (Layout.hasOwnVBPtr()) {
CharUnits VBPtrOffset = Layout.getVBPtrOffset();
llvm::Type *Vbptr = llvm::Type::getInt32PtrTy(Types.getLLVMContext());
AppendPadding(VBPtrOffset, getTypeAlignment(Vbptr));
@@ -764,20 +759,10 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
return false;
unsigned FieldNo = 0;
- const FieldDecl *LastFD = 0;
for (RecordDecl::field_iterator FI = D->field_begin(), FE = D->field_end();
FI != FE; ++FI, ++FieldNo) {
FieldDecl *FD = *FI;
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are
- // ignored:
- if (Types.getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
- --FieldNo;
- continue;
- }
- LastFD = FD;
- }
// If this field is a bitfield, layout all of the consecutive
// non-zero-length bitfields and the last zero-length bitfield; these will
@@ -992,11 +977,11 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
// Dump the layout, if requested.
if (getContext().getLangOpts().DumpRecordLayouts) {
- llvm::errs() << "\n*** Dumping IRgen Record Layout\n";
- llvm::errs() << "Record: ";
- D->dump();
- llvm::errs() << "\nLayout: ";
- RL->dump();
+ llvm::outs() << "\n*** Dumping IRgen Record Layout\n";
+ llvm::outs() << "Record: ";
+ D->dump(llvm::outs());
+ llvm::outs() << "\nLayout: ";
+ RL->print(llvm::outs());
}
#ifndef NDEBUG
@@ -1028,8 +1013,6 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
const ASTRecordLayout &AST_RL = getContext().getASTRecordLayout(D);
RecordDecl::field_iterator it = D->field_begin();
- const FieldDecl *LastFD = 0;
- bool IsMsStruct = D->isMsStruct(getContext());
for (unsigned i = 0, e = AST_RL.getFieldCount(); i != e; ++i, ++it) {
const FieldDecl *FD = *it;
@@ -1039,25 +1022,12 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D,
unsigned FieldNo = RL->getLLVMFieldNo(FD);
assert(AST_RL.getFieldOffset(i) == SL->getElementOffsetInBits(FieldNo) &&
"Invalid field offset!");
- LastFD = FD;
continue;
}
-
- if (IsMsStruct) {
- // Zero-length bitfields following non-bitfield members are
- // ignored:
- if (getContext().ZeroBitfieldFollowsNonBitfield(FD, LastFD)) {
- --i;
- continue;
- }
- LastFD = FD;
- }
// Ignore unnamed bit-fields.
- if (!FD->getDeclName()) {
- LastFD = FD;
+ if (!FD->getDeclName())
continue;
- }
// Don't inspect zero-length bitfields.
if (FD->getBitWidthValue(getContext()) == 0)
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index 5e2ebe0d9cd4..0bc51ddb5178 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -16,12 +16,14 @@
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "clang/Basic/PrettyStackTrace.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
+#include "llvm/Support/CallSite.h"
using namespace clang;
using namespace CodeGen;
@@ -32,14 +34,10 @@ using namespace CodeGen;
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
if (CGDebugInfo *DI = getDebugInfo()) {
SourceLocation Loc;
- if (isa<DeclStmt>(S))
- Loc = S->getLocEnd();
- else
- Loc = S->getLocStart();
+ Loc = S->getLocStart();
DI->EmitLocation(Builder, Loc);
- //if (++NumStopPoints == 1)
- LastStopPoint = Loc;
+ LastStopPoint = Loc;
}
}
@@ -77,6 +75,7 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:
case Stmt::MSDependentExistsStmtClass:
+ case Stmt::OMPParallelDirectiveClass:
llvm_unreachable("invalid statement class to emit generically");
case Stmt::NullStmtClass:
case Stmt::CompoundStmtClass:
@@ -137,8 +136,10 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
case Stmt::GCCAsmStmtClass: // Intentional fall-through.
case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
- case Stmt::CapturedStmtClass:
- EmitCapturedStmt(cast<CapturedStmt>(*S));
+ case Stmt::CapturedStmtClass: {
+ const CapturedStmt *CS = cast<CapturedStmt>(S);
+ EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
+ }
break;
case Stmt::ObjCAtTryStmtClass:
EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
@@ -167,8 +168,9 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
break;
case Stmt::CXXForRangeStmtClass:
EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S));
+ break;
case Stmt::SEHTryStmtClass:
- // FIXME Not yet implemented
+ EmitSEHTryStmt(cast<SEHTryStmt>(*S));
break;
}
}
@@ -195,8 +197,8 @@ bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
/// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
-RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
- AggValueSlot AggSlot) {
+llvm::Value* CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
+ AggValueSlot AggSlot) {
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
"LLVM IR generation of compound statement ('{}')");
@@ -206,17 +208,17 @@ RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
}
-RValue CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool GetLast,
- AggValueSlot AggSlot) {
+llvm::Value*
+CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
+ bool GetLast,
+ AggValueSlot AggSlot) {
for (CompoundStmt::const_body_iterator I = S.body_begin(),
E = S.body_end()-GetLast; I != E; ++I)
EmitStmt(*I);
- RValue RV;
- if (!GetLast)
- RV = RValue::get(0);
- else {
+ llvm::Value *RetAlloca = 0;
+ if (GetLast) {
// We have to special case labels here. They are statements, but when put
// at the end of a statement expression, they yield the value of their
// subexpression. Handle this by walking through all labels we encounter,
@@ -229,10 +231,21 @@ RValue CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S, bool
EnsureInsertPoint();
- RV = EmitAnyExpr(cast<Expr>(LastStmt), AggSlot);
+ QualType ExprTy = cast<Expr>(LastStmt)->getType();
+ if (hasAggregateEvaluationKind(ExprTy)) {
+ EmitAggExpr(cast<Expr>(LastStmt), AggSlot);
+ } else {
+ // We can't return an RValue here because there might be cleanups at
+ // the end of the StmtExpr. Because of that, we have to emit the result
+ // here into a temporary alloca.
+ RetAlloca = CreateMemTemp(ExprTy);
+ EmitAnyExprToMem(cast<Expr>(LastStmt), RetAlloca, Qualifiers(),
+ /*IsInit*/false);
+ }
+
}
- return RV;
+ return RetAlloca;
}
void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
@@ -416,7 +429,7 @@ void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
// C99 6.8.4.1: The first substatement is executed if the expression compares
// unequal to 0. The condition must be a scalar type.
- RunCleanupsScope ConditionScope(*this);
+ LexicalScope ConditionScope(*this, S.getSourceRange());
if (S.getConditionVariable())
EmitAutoVarDecl(*S.getConditionVariable());
@@ -626,15 +639,14 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// Create a cleanup scope for the condition variable cleanups.
RunCleanupsScope ConditionScope(*this);
- llvm::Value *BoolCondVal = 0;
if (S.getCond()) {
// If the for statement has a condition scope, emit the local variable
// declaration.
- llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
if (S.getConditionVariable()) {
EmitAutoVarDecl(*S.getConditionVariable());
}
+ llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
// If there are any cleanups between here and the loop-exit scope,
// create a block to stage a loop exit along.
if (ForScope.requiresCleanups())
@@ -645,8 +657,7 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
// C99 6.8.5p2/p4: The first substatement is executed if the expression
// compares unequal to 0. The condition must be a scalar type.
- BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+ EmitBranchOnBoolExpr(S.getCond(), ForBody, ExitBlock);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
@@ -726,8 +737,7 @@ void CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S) {
// The body is executed if the expression, contextually converted
// to bool, is true.
- llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
- Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);
+ EmitBranchOnBoolExpr(S.getCond(), ForBody, ExitBlock);
if (ExitBlock != LoopExit.getBlock()) {
EmitBlock(ExitBlock);
@@ -818,7 +828,7 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (FnRetTy->isReferenceType()) {
// If this function returns a reference, take the address of the expression
// rather than the value.
- RValue Result = EmitReferenceBindingToExpr(RV, /*InitializedDecl=*/0);
+ RValue Result = EmitReferenceBindingToExpr(RV);
Builder.CreateStore(Result.getScalarVal(), ReturnValue);
} else {
switch (getEvaluationKind(RV->getType())) {
@@ -842,9 +852,9 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
}
}
- NumReturnExprs += 1;
+ ++NumReturnExprs;
if (RV == 0 || RV->isEvaluatable(getContext()))
- NumSimpleReturnExprs += 1;
+ ++NumSimpleReturnExprs;
cleanupScope.ForceCleanup();
EmitBranchThroughCleanup(ReturnBlock);
@@ -1334,7 +1344,7 @@ SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
break;
case '#': // Ignore the rest of the constraint alternative.
while (Constraint[1] && Constraint[1] != ',')
- Constraint++;
+ Constraint++;
break;
case ',':
Result += "|";
@@ -1398,11 +1408,12 @@ AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
llvm::Value*
CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
LValue InputValue, QualType InputType,
- std::string &ConstraintStr) {
+ std::string &ConstraintStr,
+ SourceLocation Loc) {
llvm::Value *Arg;
if (Info.allowsRegister() || !Info.allowsMemory()) {
if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
- Arg = EmitLoadOfLValue(InputValue).getScalarVal();
+ Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
} else {
llvm::Type *Ty = ConvertType(InputType);
uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
@@ -1435,7 +1446,8 @@ llvm::Value* CodeGenFunction::EmitAsmInput(
InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
LValue Dest = EmitLValue(InputExpr);
- return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr);
+ return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
+ InputExpr->getExprLoc());
}
/// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
@@ -1559,10 +1571,15 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
ResultRegTypes.back() = ConvertType(InputTy);
}
}
- if (llvm::Type* AdjTy =
+ if (llvm::Type* AdjTy =
getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
ResultRegTypes.back()))
ResultRegTypes.back() = AdjTy;
+ else {
+ CGM.getDiags().Report(S.getAsmLoc(),
+ diag::err_asm_invalid_type_in_input)
+ << OutExpr->getType() << OutputConstraint;
+ }
} else {
ArgTypes.push_back(Dest.getAddress()->getType());
Args.push_back(Dest.getAddress());
@@ -1575,11 +1592,12 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
const Expr *InputExpr = S.getOutputExpr(i);
llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
- InOutConstraints);
+ InOutConstraints,
+ InputExpr->getExprLoc());
if (llvm::Type* AdjTy =
- getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
- Arg->getType()))
+ getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
+ Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
if (Info.allowsRegister())
@@ -1644,6 +1662,9 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
getTargetHooks().adjustInlineAsmType(*this, InputConstraint,
Arg->getType()))
Arg = Builder.CreateBitCast(Arg, AdjTy);
+ else
+ CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
+ << InputExpr->getType() << InputConstraint;
ArgTypes.push_back(Arg->getType());
Args.push_back(Arg);
@@ -1751,6 +1772,91 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
}
}
-void CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S) {
- llvm_unreachable("not implemented yet");
+static LValue InitCapturedStruct(CodeGenFunction &CGF, const CapturedStmt &S) {
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ QualType RecordTy = CGF.getContext().getRecordType(RD);
+
+ // Initialize the captured struct.
+ LValue SlotLV = CGF.MakeNaturalAlignAddrLValue(
+ CGF.CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
+
+ RecordDecl::field_iterator CurField = RD->field_begin();
+ for (CapturedStmt::capture_init_iterator I = S.capture_init_begin(),
+ E = S.capture_init_end();
+ I != E; ++I, ++CurField) {
+ LValue LV = CGF.EmitLValueForFieldInitialization(SlotLV, *CurField);
+ CGF.EmitInitializerForField(*CurField, LV, *I, ArrayRef<VarDecl *>());
+ }
+
+ return SlotLV;
+}
+
+/// Generate an outlined function for the body of a CapturedStmt, store any
+/// captured variables into the captured struct, and call the outlined function.
+llvm::Function *
+CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
+ const CapturedDecl *CD = S.getCapturedDecl();
+ const RecordDecl *RD = S.getCapturedRecordDecl();
+ assert(CD->hasBody() && "missing CapturedDecl body");
+
+ LValue CapStruct = InitCapturedStruct(*this, S);
+
+ // Emit the CapturedDecl
+ CodeGenFunction CGF(CGM, true);
+ CGF.CapturedStmtInfo = new CGCapturedStmtInfo(S, K);
+ llvm::Function *F = CGF.GenerateCapturedStmtFunction(CD, RD, S.getLocStart());
+ delete CGF.CapturedStmtInfo;
+
+ // Emit call to the helper function.
+ EmitCallOrInvoke(F, CapStruct.getAddress());
+
+ return F;
+}
+
+/// Creates the outlined function for a CapturedStmt.
+llvm::Function *
+CodeGenFunction::GenerateCapturedStmtFunction(const CapturedDecl *CD,
+ const RecordDecl *RD,
+ SourceLocation Loc) {
+ assert(CapturedStmtInfo &&
+ "CapturedStmtInfo should be set when generating the captured function");
+
+ // Build the argument list.
+ ASTContext &Ctx = CGM.getContext();
+ FunctionArgList Args;
+ Args.append(CD->param_begin(), CD->param_end());
+
+ // Create the function declaration.
+ FunctionType::ExtInfo ExtInfo;
+ const CGFunctionInfo &FuncInfo =
+ CGM.getTypes().arrangeFunctionDeclaration(Ctx.VoidTy, Args, ExtInfo,
+ /*IsVariadic=*/false);
+ llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
+
+ llvm::Function *F =
+ llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
+ CapturedStmtInfo->getHelperName(), &CGM.getModule());
+ CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
+
+ // Generate the function.
+ StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getBody()->getLocStart());
+
+ // Set the context parameter in CapturedStmtInfo.
+ llvm::Value *DeclPtr = LocalDeclMap[CD->getContextParam()];
+ assert(DeclPtr && "missing context parameter for CapturedStmt");
+ CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
+
+ // If 'this' is captured, load it into CXXThisValue.
+ if (CapturedStmtInfo->isCXXThisExprCaptured()) {
+ FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
+ LValue LV = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
+ Ctx.getTagDeclType(RD));
+ LValue ThisLValue = EmitLValueForField(LV, FD);
+ CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
+ }
+
+ CapturedStmtInfo->EmitBody(*this, CD->getBody());
+ FinishFunction(CD->getBodyRBrace());
+
+ return F;
}
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 98be872a5525..bfff47058897 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -19,7 +19,8 @@ using namespace clang;
using namespace CodeGen;
static llvm::Constant *
-GetAddrOfVTTVTable(CodeGenVTables &CGVT, const CXXRecordDecl *MostDerivedClass,
+GetAddrOfVTTVTable(CodeGenVTables &CGVT, CodeGenModule &CGM,
+ const CXXRecordDecl *MostDerivedClass,
const VTTVTable &VTable,
llvm::GlobalVariable::LinkageTypes Linkage,
llvm::DenseMap<BaseSubobject, uint64_t> &AddressPoints) {
@@ -27,7 +28,7 @@ GetAddrOfVTTVTable(CodeGenVTables &CGVT, const CXXRecordDecl *MostDerivedClass,
assert(VTable.getBaseOffset().isZero() &&
"Most derived class vtable must have a zero offset!");
// This is a regular vtable.
- return CGVT.GetAddrOfVTable(MostDerivedClass);
+ return CGM.getCXXABI().getAddrOfVTable(MostDerivedClass, CharUnits());
}
return CGVT.GenerateConstructionVTable(MostDerivedClass,
@@ -52,7 +53,7 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
for (const VTTVTable *i = Builder.getVTTVTables().begin(),
*e = Builder.getVTTVTables().end(); i != e; ++i) {
VTableAddressPoints.push_back(VTableAddressPointsMapTy());
- VTables.push_back(GetAddrOfVTTVTable(*this, RD, *i, Linkage,
+ VTables.push_back(GetAddrOfVTTVTable(*this, CGM, RD, *i, Linkage,
VTableAddressPoints.back()));
}
@@ -64,8 +65,8 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
uint64_t AddressPoint;
if (VTTVT.getBase() == RD) {
// Just get the address point for the regular vtable.
- AddressPoint = VTContext.getVTableLayout(RD)
- .getAddressPoint(i->VTableBase);
+ AddressPoint =
+ ItaniumVTContext.getVTableLayout(RD).getAddressPoint(i->VTableBase);
assert(AddressPoint != 0 && "Did not find vtable address point!");
} else {
AddressPoint = VTableAddressPoints[i->VTableIndex].lookup(i->VTableBase);
@@ -101,12 +102,13 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
- CGM.getCXXABI().getMangleContext().mangleCXXVTT(RD, Out);
+ cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
+ .mangleCXXVTT(RD, Out);
Out.flush();
StringRef Name = OutName.str();
// This will also defer the definition of the VTT.
- (void) GetAddrOfVTable(RD);
+ (void) CGM.getCXXABI().getAddrOfVTable(RD, CharUnits());
VTTBuilder Builder(CGM.getContext(), RD, /*GenerateDefinition=*/false);
@@ -120,24 +122,6 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
return GV;
}
-bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // We don't have any virtual bases, just return early.
- if (!MD->getParent()->getNumVBases())
- return false;
-
- // Check if we have a base constructor.
- if (isa<CXXConstructorDecl>(MD) && GD.getCtorType() == Ctor_Base)
- return true;
-
- // Check if we have a base destructor.
- if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
- return true;
-
- return false;
-}
-
uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
BaseSubobject Base) {
BaseSubobjectPairTy ClassSubobjectPair(RD, Base);
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 069cd5f9e738..f28d9b67a8f3 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -16,6 +16,7 @@
#include "CodeGenModule.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
@@ -29,7 +30,14 @@ using namespace clang;
using namespace CodeGen;
CodeGenVTables::CodeGenVTables(CodeGenModule &CGM)
- : CGM(CGM), VTContext(CGM.getContext()) { }
+ : CGM(CGM), ItaniumVTContext(CGM.getContext()) {
+ if (CGM.getTarget().getCXXABI().isMicrosoft()) {
+ // FIXME: Eventually, we should only have one of V*TContexts available.
+ // Today we use both in the Microsoft ABI as MicrosoftVFTableContext
+ // is not completely supported in CodeGen yet.
+ MicrosoftVTContext.reset(new MicrosoftVTableContext(CGM.getContext()));
+ }
+}
llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
const ThunkInfo &Thunk) {
@@ -49,53 +57,6 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
return GetOrCreateLLVMFunction(Name, Ty, GD, /*ForVTable=*/true);
}
-static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
- llvm::Value *Ptr,
- int64_t NonVirtualAdjustment,
- int64_t VirtualAdjustment,
- bool IsReturnAdjustment) {
- if (!NonVirtualAdjustment && !VirtualAdjustment)
- return Ptr;
-
- llvm::Type *Int8PtrTy = CGF.Int8PtrTy;
- llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
-
- if (NonVirtualAdjustment && !IsReturnAdjustment) {
- // Perform the non-virtual adjustment for a base-to-derived cast.
- V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
- }
-
- if (VirtualAdjustment) {
- llvm::Type *PtrDiffTy =
- CGF.ConvertType(CGF.getContext().getPointerDiffType());
-
- // Perform the virtual adjustment.
- llvm::Value *VTablePtrPtr =
- CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
-
- llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
-
- llvm::Value *OffsetPtr =
- CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
-
- OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
-
- // Load the adjustment offset from the vtable.
- llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
-
- // Adjust our pointer.
- V = CGF.Builder.CreateInBoundsGEP(V, Offset);
- }
-
- if (NonVirtualAdjustment && IsReturnAdjustment) {
- // Perform the non-virtual adjustment for a derived-to-base cast.
- V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
- }
-
- // Cast back to the original type.
- return CGF.Builder.CreateBitCast(V, Ptr->getType());
-}
-
static void setThunkVisibility(CodeGenModule &CGM, const CXXMethodDecl *MD,
const ThunkInfo &Thunk, llvm::Function *Fn) {
CGM.setGlobalVisibility(Fn, MD);
@@ -174,12 +135,10 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
CGF.Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
CGF.EmitBlock(AdjustNotNull);
}
-
- ReturnValue = PerformTypeAdjustment(CGF, ReturnValue,
- Thunk.Return.NonVirtual,
- Thunk.Return.VBaseOffsetOffset,
- /*IsReturnAdjustment*/true);
-
+
+ ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF, ReturnValue,
+ Thunk.Return);
+
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
CGF.EmitBlock(AdjustNull);
@@ -259,11 +218,8 @@ void CodeGenFunction::GenerateVarArgsThunk(
assert(ThisStore && "Store of this should be in entry block?");
// Adjust "this", if necessary.
Builder.SetInsertPoint(ThisStore);
- llvm::Value *AdjustedThisPtr =
- PerformTypeAdjustment(*this, ThisPtr,
- Thunk.This.NonVirtual,
- Thunk.This.VCallOffsetOffset,
- /*IsReturnAdjustment*/false);
+ llvm::Value *AdjustedThisPtr =
+ CGM.getCXXABI().performThisAdjustment(*this, ThisPtr, Thunk.This);
ThisStore->setOperand(0, AdjustedThisPtr);
if (!Thunk.Return.isEmpty()) {
@@ -282,94 +238,99 @@ void CodeGenFunction::GenerateVarArgsThunk(
}
}
-void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
- const CGFunctionInfo &FnInfo,
- GlobalDecl GD, const ThunkInfo &Thunk) {
+void CodeGenFunction::StartThunk(llvm::Function *Fn, GlobalDecl GD,
+ const CGFunctionInfo &FnInfo) {
+ assert(!CurGD.getDecl() && "CurGD was already set!");
+ CurGD = GD;
+
+ // Build FunctionArgs.
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- QualType ResultType = FPT->getResultType();
QualType ThisType = MD->getThisType(getContext());
-
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType =
+ CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getResultType();
FunctionArgList FunctionArgs;
- // FIXME: It would be nice if more of this code could be shared with
- // CodeGenFunction::GenerateCode.
-
// Create the implicit 'this' parameter declaration.
- CurGD = GD;
CGM.getCXXABI().BuildInstanceFunctionParams(*this, ResultType, FunctionArgs);
// Add the rest of the parameters.
for (FunctionDecl::param_const_iterator I = MD->param_begin(),
- E = MD->param_end(); I != E; ++I) {
- ParmVarDecl *Param = *I;
-
- FunctionArgs.push_back(Param);
- }
-
- // Initialize debug info if needed.
- maybeInitializeDebugInfo();
+ E = MD->param_end();
+ I != E; ++I)
+ FunctionArgs.push_back(*I);
+ // Start defining the function.
StartFunction(GlobalDecl(), ResultType, Fn, FnInfo, FunctionArgs,
SourceLocation());
+ // Since we didn't pass a GlobalDecl to StartFunction, do this ourselves.
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
CXXThisValue = CXXABIThisValue;
+}
- // Adjust the 'this' pointer if necessary.
- llvm::Value *AdjustedThisPtr =
- PerformTypeAdjustment(*this, LoadCXXThis(),
- Thunk.This.NonVirtual,
- Thunk.This.VCallOffsetOffset,
- /*IsReturnAdjustment*/false);
-
+void CodeGenFunction::EmitCallAndReturnForThunk(GlobalDecl GD,
+ llvm::Value *Callee,
+ const ThunkInfo *Thunk) {
+ assert(isa<CXXMethodDecl>(CurGD.getDecl()) &&
+ "Please use a new CGF for this thunk");
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Adjust the 'this' pointer if necessary
+ llvm::Value *AdjustedThisPtr = Thunk ? CGM.getCXXABI().performThisAdjustment(
+ *this, LoadCXXThis(), Thunk->This)
+ : LoadCXXThis();
+
+ // Start building CallArgs.
CallArgList CallArgs;
-
- // Add our adjusted 'this' pointer.
+ QualType ThisType = MD->getThisType(getContext());
CallArgs.add(RValue::get(AdjustedThisPtr), ThisType);
- // Add the rest of the parameters.
+ if (isa<CXXDestructorDecl>(MD))
+ CGM.getCXXABI().adjustCallArgsForDestructorThunk(*this, GD, CallArgs);
+
+ // Add the rest of the arguments.
for (FunctionDecl::param_const_iterator I = MD->param_begin(),
- E = MD->param_end(); I != E; ++I) {
- ParmVarDecl *param = *I;
- EmitDelegateCallArg(CallArgs, param);
- }
+ E = MD->param_end(); I != E; ++I)
+ EmitDelegateCallArg(CallArgs, *I, (*I)->getLocStart());
- // Get our callee.
- llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
#ifndef NDEBUG
const CGFunctionInfo &CallFnInfo =
CGM.getTypes().arrangeCXXMethodCall(CallArgs, FPT,
RequiredArgs::forPrototypePlus(FPT, 1));
- assert(CallFnInfo.getRegParm() == FnInfo.getRegParm() &&
- CallFnInfo.isNoReturn() == FnInfo.isNoReturn() &&
- CallFnInfo.getCallingConvention() == FnInfo.getCallingConvention());
+ assert(CallFnInfo.getRegParm() == CurFnInfo->getRegParm() &&
+ CallFnInfo.isNoReturn() == CurFnInfo->isNoReturn() &&
+ CallFnInfo.getCallingConvention() == CurFnInfo->getCallingConvention());
assert(isa<CXXDestructorDecl>(MD) || // ignore dtor return types
similar(CallFnInfo.getReturnInfo(), CallFnInfo.getReturnType(),
- FnInfo.getReturnInfo(), FnInfo.getReturnType()));
- assert(CallFnInfo.arg_size() == FnInfo.arg_size());
- for (unsigned i = 0, e = FnInfo.arg_size(); i != e; ++i)
+ CurFnInfo->getReturnInfo(), CurFnInfo->getReturnType()));
+ assert(CallFnInfo.arg_size() == CurFnInfo->arg_size());
+ for (unsigned i = 0, e = CurFnInfo->arg_size(); i != e; ++i)
assert(similar(CallFnInfo.arg_begin()[i].info,
CallFnInfo.arg_begin()[i].type,
- FnInfo.arg_begin()[i].info, FnInfo.arg_begin()[i].type));
+ CurFnInfo->arg_begin()[i].info,
+ CurFnInfo->arg_begin()[i].type));
#endif
-
+
// Determine whether we have a return value slot to use.
+ QualType ResultType =
+ CGM.getCXXABI().HasThisReturn(GD) ? ThisType : FPT->getResultType();
ReturnValueSlot Slot;
if (!ResultType->isVoidType() &&
- FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
!hasScalarEvaluationKind(CurFnInfo->getReturnType()))
Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
// Now emit our call.
- RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
+ RValue RV = EmitCall(*CurFnInfo, Callee, Slot, CallArgs, MD);
- if (!Thunk.Return.isEmpty())
- RV = PerformReturnAdjustment(*this, ResultType, RV, Thunk);
+ // Consider return adjustment if we have ThunkInfo.
+ if (Thunk && !Thunk->Return.isEmpty())
+ RV = PerformReturnAdjustment(*this, ResultType, RV, *Thunk);
+ // Emit return.
if (!ResultType->isVoidType() && Slot.isNull())
CGM.getCXXABI().EmitReturnFromThunk(*this, RV, ResultType);
@@ -377,17 +338,31 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
AutoreleaseResult = false;
FinishFunction();
+}
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn,
+ const CGFunctionInfo &FnInfo,
+ GlobalDecl GD, const ThunkInfo &Thunk) {
+ StartThunk(Fn, GD, FnInfo);
+
+ // Get our callee.
+ llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().arrangeGlobalDeclaration(GD));
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty, /*ForVTable=*/true);
+
+ // Make the call and return the result.
+ EmitCallAndReturnForThunk(GD, Callee, &Thunk);
// Set the right linkage.
- CGM.setFunctionLinkage(MD, Fn);
+ CGM.setFunctionLinkage(GD, Fn);
// Set the right visibility.
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
setThunkVisibility(CGM, MD, Thunk, Fn);
}
-void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
- bool UseAvailableExternallyLinkage)
-{
+void CodeGenVTables::emitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
+ bool ForVTable) {
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeGlobalDeclaration(GD);
// FIXME: re-use FnInfo in this computation.
@@ -425,19 +400,17 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
}
llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+ bool ABIHasKeyFunctions = CGM.getTarget().getCXXABI().hasKeyFunctions();
+ bool UseAvailableExternallyLinkage = ForVTable && ABIHasKeyFunctions;
if (!ThunkFn->isDeclaration()) {
- if (UseAvailableExternallyLinkage) {
+ if (!ABIHasKeyFunctions || UseAvailableExternallyLinkage) {
// There is already a thunk emitted for this function, do nothing.
return;
}
- // If a function has a body, it should have available_externally linkage.
- assert(ThunkFn->hasAvailableExternallyLinkage() &&
- "Function should have available_externally linkage!");
-
// Change the linkage.
- CGM.setFunctionLinkage(cast<CXXMethodDecl>(GD.getDecl()), ThunkFn);
+ CGM.setFunctionLinkage(GD, ThunkFn);
return;
}
@@ -449,30 +422,34 @@ void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
// expensive/sucky at the moment, so don't generate the thunk unless
// we have to.
// FIXME: Do something better here; GenerateVarArgsThunk is extremely ugly.
- if (!UseAvailableExternallyLinkage)
+ if (!UseAvailableExternallyLinkage) {
CodeGenFunction(CGM).GenerateVarArgsThunk(ThunkFn, FnInfo, GD, Thunk);
+ CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable);
+ }
} else {
// Normal thunk body generation.
CodeGenFunction(CGM).GenerateThunk(ThunkFn, FnInfo, GD, Thunk);
+ CGM.getCXXABI().setThunkLinkage(ThunkFn, ForVTable);
}
-
- if (UseAvailableExternallyLinkage)
- ThunkFn->setLinkage(llvm::GlobalValue::AvailableExternallyLinkage);
}
-void CodeGenVTables::MaybeEmitThunkAvailableExternally(GlobalDecl GD,
- const ThunkInfo &Thunk) {
- // We only want to do this when building with optimizations.
- if (!CGM.getCodeGenOpts().OptimizationLevel)
+void CodeGenVTables::maybeEmitThunkForVTable(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ // If the ABI has key functions, only the TU with the key function should emit
+ // the thunk. However, we can allow inlining of thunks if we emit them with
+ // available_externally linkage together with vtables when optimizations are
+ // enabled.
+ if (CGM.getTarget().getCXXABI().hasKeyFunctions() &&
+ !CGM.getCodeGenOpts().OptimizationLevel)
return;
// We can't emit thunks for member functions with incomplete types.
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
if (!CGM.getTypes().isFuncTypeConvertible(
- cast<FunctionType>(MD->getType().getTypePtr())))
+ MD->getType()->castAs<FunctionType>()))
return;
- EmitThunk(GD, Thunk, /*UseAvailableExternallyLinkage=*/true);
+ emitThunk(GD, Thunk, /*ForVTable=*/true);
}
void CodeGenVTables::EmitThunks(GlobalDecl GD)
@@ -484,14 +461,18 @@ void CodeGenVTables::EmitThunks(GlobalDecl GD)
if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
return;
- const VTableContext::ThunkInfoVectorTy *ThunkInfoVector =
- VTContext.getThunkInfo(MD);
+ const VTableContextBase::ThunkInfoVectorTy *ThunkInfoVector;
+ if (MicrosoftVTContext.isValid()) {
+ ThunkInfoVector = MicrosoftVTContext->getThunkInfo(GD);
+ } else {
+ ThunkInfoVector = ItaniumVTContext.getThunkInfo(GD);
+ }
+
if (!ThunkInfoVector)
return;
for (unsigned I = 0, E = ThunkInfoVector->size(); I != E; ++I)
- EmitThunk(GD, (*ThunkInfoVector)[I],
- /*UseAvailableExternallyLinkage=*/false);
+ emitThunk(GD, (*ThunkInfoVector)[I], /*ForVTable=*/false);
}
llvm::Constant *
@@ -586,7 +567,7 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
VTableThunks[NextVTableThunkIndex].first == I) {
const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
- MaybeEmitThunkAvailableExternally(GD, Thunk);
+ maybeEmitThunkForVTable(GD, Thunk);
Init = CGM.GetAddrOfThunk(GD, Thunk);
NextVTableThunkIndex++;
@@ -613,63 +594,18 @@ CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
return llvm::ConstantArray::get(ArrayType, Inits);
}
-llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
- llvm::GlobalVariable *&VTable = VTables[RD];
- if (VTable)
- return VTable;
-
- // Queue up this v-table for possible deferred emission.
- CGM.addDeferredVTable(RD);
-
- SmallString<256> OutName;
- llvm::raw_svector_ostream Out(OutName);
- CGM.getCXXABI().getMangleContext().mangleCXXVTable(RD, Out);
- Out.flush();
- StringRef Name = OutName.str();
-
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(CGM.Int8PtrTy,
- VTContext.getVTableLayout(RD).getNumVTableComponents());
-
- VTable =
- CGM.CreateOrReplaceCXXRuntimeVariable(Name, ArrayType,
- llvm::GlobalValue::ExternalLinkage);
- VTable->setUnnamedAddr(true);
- return VTable;
-}
-
-void
-CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
- llvm::GlobalVariable::LinkageTypes Linkage,
- const CXXRecordDecl *RD) {
- const VTableLayout &VTLayout = VTContext.getVTableLayout(RD);
-
- // Create and set the initializer.
- llvm::Constant *Init =
- CreateVTableInitializer(RD,
- VTLayout.vtable_component_begin(),
- VTLayout.getNumVTableComponents(),
- VTLayout.vtable_thunk_begin(),
- VTLayout.getNumVTableThunks());
- VTable->setInitializer(Init);
-
- // Set the correct linkage.
- VTable->setLinkage(Linkage);
-
- // Set the right visibility.
- CGM.setTypeVisibility(VTable, RD, CodeGenModule::TVK_ForVTable);
-}
-
llvm::GlobalVariable *
CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
const BaseSubobject &Base,
bool BaseIsVirtual,
llvm::GlobalVariable::LinkageTypes Linkage,
VTableAddressPointsMapTy& AddressPoints) {
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ DI->completeClassData(Base.getBase());
+
OwningPtr<VTableLayout> VTLayout(
- VTContext.createConstructionVTableLayout(Base.getBase(),
- Base.getBaseOffset(),
- BaseIsVirtual, RD));
+ ItaniumVTContext.createConstructionVTableLayout(
+ Base.getBase(), Base.getBaseOffset(), BaseIsVirtual, RD));
// Add the address points.
AddressPoints = VTLayout->getAddressPoints();
@@ -677,9 +613,9 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
// Get the mangled construction vtable name.
SmallString<256> OutName;
llvm::raw_svector_ostream Out(OutName);
- CGM.getCXXABI().getMangleContext().
- mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(), Base.getBase(),
- Out);
+ cast<ItaniumMangleContext>(CGM.getCXXABI().getMangleContext())
+ .mangleCXXCtorVTable(RD, Base.getBaseOffset().getQuantity(),
+ Base.getBase(), Out);
Out.flush();
StringRef Name = OutName.str();
@@ -719,7 +655,7 @@ CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
/// Note that we only call this at the end of the translation unit.
llvm::GlobalVariable::LinkageTypes
CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
- if (RD->getLinkage() != ExternalLinkage)
+ if (!RD->isExternallyVisible())
return llvm::GlobalVariable::InternalLinkage;
// We're at the end of the translation unit, so the current key
@@ -734,12 +670,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
switch (keyFunction->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
- // When compiling with optimizations turned on, we emit all vtables,
- // even if the key function is not defined in the current translation
- // unit. If this is the case, use available_externally linkage.
- if (!def && CodeGenOpts.OptimizationLevel)
- return llvm::GlobalVariable::AvailableExternallyLinkage;
-
+ assert(def && "Should not have been asked to emit this");
if (keyFunction->isInlined())
return !Context.getLangOpts().AppleKext ?
llvm::GlobalVariable::LinkOnceODRLinkage :
@@ -758,9 +689,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
llvm::Function::InternalLinkage;
case TSK_ExplicitInstantiationDeclaration:
- return !Context.getLangOpts().AppleKext ?
- llvm::GlobalVariable::AvailableExternallyLinkage :
- llvm::Function::InternalLinkage;
+ llvm_unreachable("Should not have been asked to emit this");
}
}
@@ -776,7 +705,7 @@ CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
return llvm::GlobalVariable::LinkOnceODRLinkage;
case TSK_ExplicitInstantiationDeclaration:
- return llvm::GlobalVariable::AvailableExternallyLinkage;
+ llvm_unreachable("Should not have been asked to emit this");
case TSK_ExplicitInstantiationDefinition:
return llvm::GlobalVariable::WeakODRLinkage;
@@ -803,35 +732,13 @@ void CodeGenModule::EmitVTable(CXXRecordDecl *theClass, bool isRequired) {
void
CodeGenVTables::GenerateClassData(const CXXRecordDecl *RD) {
- // First off, check whether we've already emitted the v-table and
- // associated stuff.
- llvm::GlobalVariable *VTable = GetAddrOfVTable(RD);
- if (VTable->hasInitializer())
- return;
-
- llvm::GlobalVariable::LinkageTypes Linkage = CGM.getVTableLinkage(RD);
- EmitVTableDefinition(VTable, Linkage, RD);
+ if (CGDebugInfo *DI = CGM.getModuleDebugInfo())
+ DI->completeClassData(RD);
- if (RD->getNumVBases()) {
- if (!CGM.getTarget().getCXXABI().isMicrosoft()) {
- llvm::GlobalVariable *VTT = GetAddrOfVTT(RD);
- EmitVTTDefinition(VTT, Linkage, RD);
- } else {
- // FIXME: Emit vbtables here.
- }
- }
+ if (RD->getNumVBases())
+ CGM.getCXXABI().emitVirtualInheritanceTables(RD);
- // If this is the magic class __cxxabiv1::__fundamental_type_info,
- // we will emit the typeinfo for the fundamental types. This is the
- // same behaviour as GCC.
- const DeclContext *DC = RD->getDeclContext();
- if (RD->getIdentifier() &&
- RD->getIdentifier()->isStr("__fundamental_type_info") &&
- isa<NamespaceDecl>(DC) &&
- cast<NamespaceDecl>(DC)->getIdentifier() &&
- cast<NamespaceDecl>(DC)->getIdentifier()->isStr("__cxxabiv1") &&
- DC->getParent()->isTranslationUnit())
- CGM.EmitFundamentalRTTIDescriptors();
+ CGM.getCXXABI().emitVTableDefinitions(*this, RD);
}
/// At this point in the translation unit, does it appear that can we
@@ -875,16 +782,6 @@ bool CodeGenVTables::isVTableExternal(const CXXRecordDecl *RD) {
/// we define that v-table?
static bool shouldEmitVTableAtEndOfTranslationUnit(CodeGenModule &CGM,
const CXXRecordDecl *RD) {
- // If we're building with optimization, we always emit v-tables
- // since that allows for virtual function calls to be devirtualized.
- // If the v-table is defined strongly elsewhere, this definition
- // will be emitted available_externally.
- //
- // However, we don't want to do this in -fapple-kext mode, because
- // kext mode does not permit devirtualization.
- if (CGM.getCodeGenOpts().OptimizationLevel && !CGM.getLangOpts().AppleKext)
- return true;
-
return !CGM.getVTables().isVTableExternal(RD);
}
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index bd3bdb13583d..e8cd55eed80a 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -31,10 +31,10 @@ namespace CodeGen {
class CodeGenVTables {
CodeGenModule &CGM;
- VTableContext VTContext;
-
- /// VTables - All the vtables which have been defined.
- llvm::DenseMap<const CXXRecordDecl *, llvm::GlobalVariable *> VTables;
+ // FIXME: Consider moving ItaniumVTContext and MicrosoftVTContext into
+ // respective CXXABI classes?
+ ItaniumVTableContext ItaniumVTContext;
+ OwningPtr<MicrosoftVTableContext> MicrosoftVTContext;
/// VTableAddressPointsMapTy - Address points for a single vtable.
typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
@@ -52,16 +52,14 @@ class CodeGenVTables {
/// indices.
SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
- /// EmitThunk - Emit a single thunk.
- void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk,
- bool UseAvailableExternallyLinkage);
+ /// emitThunk - Emit a single thunk.
+ void emitThu