aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2015-01-18 16:23:48 +0000
committerDimitry Andric <dim@FreeBSD.org>2015-01-18 16:23:48 +0000
commit06d4ba388873e6d1cfa9cd715a8935ecc8cd2097 (patch)
tree3eb853da77d46cc77c4b017525a422f9ddb1385b /lib/CodeGen
parent30d791273d07fac9c0c1641a0731191bca6e8606 (diff)
downloadsrc-06d4ba388873e6d1cfa9cd715a8935ecc8cd2097.tar.gz
src-06d4ba388873e6d1cfa9cd715a8935ecc8cd2097.zip
Vendor import of clang RELEASE_360/rc1 tag r226102 (effectively, 3.6.0 RC1):vendor/clang/clang-release_360-r226102
Notes
Notes: svn path=/vendor/clang/dist/; revision=277325 svn path=/vendor/clang/clang-release_360-r226102/; revision=277326; tag=vendor/clang/clang-release_360-r226102
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h23
-rw-r--r--lib/CodeGen/BackendUtil.cpp88
-rw-r--r--lib/CodeGen/CGAtomic.cpp328
-rw-r--r--lib/CodeGen/CGBlocks.cpp48
-rw-r--r--lib/CodeGen/CGBlocks.h4
-rw-r--r--lib/CodeGen/CGBuilder.h6
-rw-r--r--lib/CodeGen/CGBuiltin.cpp530
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp3
-rw-r--r--lib/CodeGen/CGCUDARuntime.h4
-rw-r--r--lib/CodeGen/CGCXX.cpp157
-rw-r--r--lib/CodeGen/CGCXXABI.cpp23
-rw-r--r--lib/CodeGen/CGCXXABI.h102
-rw-r--r--lib/CodeGen/CGCall.cpp1512
-rw-r--r--lib/CodeGen/CGCall.h4
-rw-r--r--lib/CodeGen/CGClass.cpp263
-rw-r--r--lib/CodeGen/CGCleanup.cpp12
-rw-r--r--lib/CodeGen/CGCleanup.h12
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp1569
-rw-r--r--lib/CodeGen/CGDebugInfo.h136
-rw-r--r--lib/CodeGen/CGDecl.cpp141
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp154
-rw-r--r--lib/CodeGen/CGException.cpp106
-rw-r--r--lib/CodeGen/CGExpr.cpp547
-rw-r--r--lib/CodeGen/CGExprCXX.cpp301
-rw-r--r--lib/CodeGen/CGExprComplex.cpp271
-rw-r--r--lib/CodeGen/CGExprConstant.cpp202
-rw-r--r--lib/CodeGen/CGExprScalar.cpp240
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp27
-rw-r--r--lib/CodeGen/CGLoopInfo.h6
-rw-r--r--lib/CodeGen/CGObjC.cpp43
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp31
-rw-r--r--lib/CodeGen/CGObjCMac.cpp424
-rw-r--r--lib/CodeGen/CGObjCRuntime.h4
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h4
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp794
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h315
-rw-r--r--lib/CodeGen/CGRecordLayout.h4
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp59
-rw-r--r--lib/CodeGen/CGStmt.cpp196
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp639
-rw-r--r--lib/CodeGen/CGVTables.cpp126
-rw-r--r--lib/CodeGen/CGVTables.h4
-rw-r--r--lib/CodeGen/CGValue.h4
-rw-r--r--lib/CodeGen/CMakeLists.txt15
-rw-r--r--lib/CodeGen/CodeGenABITypes.cpp9
-rw-r--r--lib/CodeGen/CodeGenAction.cpp89
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp232
-rw-r--r--lib/CodeGen/CodeGenFunction.h396
-rw-r--r--lib/CodeGen/CodeGenModule.cpp668
-rw-r--r--lib/CodeGen/CodeGenModule.h194
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp364
-rw-r--r--lib/CodeGen/CodeGenPGO.h48
-rw-r--r--lib/CodeGen/CodeGenTBAA.h4
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp30
-rw-r--r--lib/CodeGen/CodeGenTypes.h140
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp1174
-rw-r--r--lib/CodeGen/CoverageMappingGen.h114
-rw-r--r--lib/CodeGen/EHScopeStack.h6
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp400
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp457
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp66
-rw-r--r--lib/CodeGen/SanitizerBlacklist.cpp52
-rw-r--r--lib/CodeGen/SanitizerBlacklist.h46
-rw-r--r--lib/CodeGen/SanitizerMetadata.cpp92
-rw-r--r--lib/CodeGen/SanitizerMetadata.h53
-rw-r--r--lib/CodeGen/TargetInfo.cpp1110
-rw-r--r--lib/CodeGen/TargetInfo.h22
67 files changed, 10339 insertions, 4908 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index d3ec46c4c4a1..7e7f7fa20679 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_ABIINFO_H
-#define CLANG_CODEGEN_ABIINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
@@ -44,9 +44,12 @@ namespace clang {
CodeGen::CodeGenTypes &CGT;
protected:
llvm::CallingConv::ID RuntimeCC;
+ llvm::CallingConv::ID BuiltinCC;
public:
ABIInfo(CodeGen::CodeGenTypes &cgt)
- : CGT(cgt), RuntimeCC(llvm::CallingConv::C) {}
+ : CGT(cgt),
+ RuntimeCC(llvm::CallingConv::C),
+ BuiltinCC(llvm::CallingConv::C) {}
virtual ~ABIInfo();
@@ -62,6 +65,11 @@ namespace clang {
return RuntimeCC;
}
+ /// Return the calling convention to use for compiler builtins
+ llvm::CallingConv::ID getBuiltinCC() const {
+ return BuiltinCC;
+ }
+
virtual void computeInfo(CodeGen::CGFunctionInfo &FI) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
@@ -73,6 +81,15 @@ namespace clang {
// abstract this out.
virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGen::CodeGenFunction &CGF) const = 0;
+
+ virtual bool isHomogeneousAggregateBaseType(QualType Ty) const;
+
+ virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
+ uint64_t Members) const;
+
+ bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
+ uint64_t &Members) const;
+
};
} // end namespace clang
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index cec48f35a2e9..25ecec586244 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -33,11 +33,13 @@
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/ObjCARC.h"
#include "llvm/Transforms/Scalar.h"
+#include "llvm/Transforms/Utils/SymbolRewriter.h"
#include <memory>
using namespace clang;
using namespace llvm;
@@ -61,7 +63,7 @@ private:
PassManager *getCodeGenPasses() const {
if (!CodeGenPasses) {
CodeGenPasses = new PassManager();
- CodeGenPasses->add(new DataLayoutPass(TheModule));
+ CodeGenPasses->add(new DataLayoutPass());
if (TM)
TM->addAnalysisPasses(*CodeGenPasses);
}
@@ -71,7 +73,7 @@ private:
PassManager *getPerModulePasses() const {
if (!PerModulePasses) {
PerModulePasses = new PassManager();
- PerModulePasses->add(new DataLayoutPass(TheModule));
+ PerModulePasses->add(new DataLayoutPass());
if (TM)
TM->addAnalysisPasses(*PerModulePasses);
}
@@ -81,7 +83,7 @@ private:
FunctionPassManager *getPerFunctionPasses() const {
if (!PerFunctionPasses) {
PerFunctionPasses = new FunctionPassManager(TheModule);
- PerFunctionPasses->add(new DataLayoutPass(TheModule));
+ PerFunctionPasses->add(new DataLayoutPass());
if (TM)
TM->addAnalysisPasses(*PerFunctionPasses);
}
@@ -121,7 +123,7 @@ public:
delete PerModulePasses;
delete PerFunctionPasses;
if (CodeGenOpts.DisableFree)
- BuryPointer(TM.release());
+ BuryPointer(std::move(TM));
}
std::unique_ptr<TargetMachine> TM;
@@ -178,6 +180,14 @@ static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
PM.add(createBoundsCheckingPass());
}
+static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
+ PassManagerBase &PM) {
+ const PassManagerBuilderWrapper &BuilderWrapper =
+ static_cast<const PassManagerBuilderWrapper&>(Builder);
+ const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
+ PM.add(createSanitizerCoverageModulePass(CGOpts.SanitizeCoverage));
+}
+
static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
PM.add(createAddressSanitizerFunctionPass());
@@ -213,8 +223,27 @@ static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
PassManagerBase &PM) {
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createDataFlowSanitizerPass(CGOpts.SanitizerBlacklistFile));
+ const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
+ PM.add(createDataFlowSanitizerPass(LangOpts.SanitizerBlacklistFile));
+}
+
+static TargetLibraryInfo *createTLI(llvm::Triple &TargetTriple,
+ const CodeGenOptions &CodeGenOpts) {
+ TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple);
+ if (!CodeGenOpts.SimplifyLibCalls)
+ TLI->disableAllFunctions();
+ return TLI;
+}
+
+static void addSymbolRewriterPass(const CodeGenOptions &Opts,
+ PassManager *MPM) {
+ llvm::SymbolRewriter::RewriteDescriptorList DL;
+
+ llvm::SymbolRewriter::RewriteMapParser MapParser;
+ for (const auto &MapFile : Opts.RewriteMapFiles)
+ MapParser.parse(MapFile, &DL);
+
+ MPM->add(createRewriteSymbolsPass(DL));
}
void EmitAssemblyHelper::CreatePasses() {
@@ -238,6 +267,7 @@ void EmitAssemblyHelper::CreatePasses() {
PMBuilder.DisableTailCalls = CodeGenOpts.DisableTailCalls;
PMBuilder.DisableUnitAtATime = !CodeGenOpts.UnitAtATime;
PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
+ PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
@@ -257,35 +287,42 @@ void EmitAssemblyHelper::CreatePasses() {
addObjCARCOptPass);
}
- if (LangOpts.Sanitize.LocalBounds) {
+ if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
addBoundsCheckingPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addBoundsCheckingPass);
}
- if (LangOpts.Sanitize.Address) {
+ if (CodeGenOpts.SanitizeCoverage) {
+ PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
+ addSanitizerCoveragePass);
+ PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
+ addSanitizerCoveragePass);
+ }
+
+ if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addAddressSanitizerPasses);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addAddressSanitizerPasses);
}
- if (LangOpts.Sanitize.Memory) {
+ if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addMemorySanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addMemorySanitizerPass);
}
- if (LangOpts.Sanitize.Thread) {
+ if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addThreadSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
addThreadSanitizerPass);
}
- if (LangOpts.Sanitize.DataFlow) {
+ if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addDataFlowSanitizerPass);
PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
@@ -294,9 +331,7 @@ void EmitAssemblyHelper::CreatePasses() {
// Figure out TargetLibraryInfo.
Triple TargetTriple(TheModule->getTargetTriple());
- PMBuilder.LibraryInfo = new TargetLibraryInfo(TargetTriple);
- if (!CodeGenOpts.SimplifyLibCalls)
- PMBuilder.LibraryInfo->disableAllFunctions();
+ PMBuilder.LibraryInfo = createTLI(TargetTriple, CodeGenOpts);
switch (Inlining) {
case CodeGenOptions::NoInlining: break;
@@ -323,6 +358,8 @@ void EmitAssemblyHelper::CreatePasses() {
// Set up the per-module pass manager.
PassManager *MPM = getPerModulePasses();
+ if (!CodeGenOpts.RewriteMapFiles.empty())
+ addSymbolRewriterPass(CodeGenOpts, MPM);
if (CodeGenOpts.VerifyModule)
MPM->add(createDebugInfoVerifierPass());
@@ -343,6 +380,12 @@ void EmitAssemblyHelper::CreatePasses() {
MPM->add(createStripSymbolsPass(true));
}
+ if (CodeGenOpts.ProfileInstrGenerate) {
+ InstrProfOptions Options;
+ Options.NoRedZone = CodeGenOpts.DisableRedZone;
+ MPM->add(createInstrProfilingPass(Options));
+ }
+
PMBuilder.populateModulePassManager(*MPM);
}
@@ -418,6 +461,11 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
llvm::TargetOptions Options;
+ Options.ThreadModel =
+ llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
+ .Case("posix", llvm::ThreadModel::POSIX)
+ .Case("single", llvm::ThreadModel::Single);
+
if (CodeGenOpts.DisableIntegratedAS)
Options.DisableIntegratedAS = true;
@@ -476,7 +524,9 @@ TargetMachine *EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
+ Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.ABIName = TargetOpts.ABI;
TargetMachine *TM = TheTarget->createTargetMachine(Triple, TargetOpts.CPU,
FeaturesStr, Options,
@@ -493,10 +543,7 @@ bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
// Add LibraryInfo.
llvm::Triple TargetTriple(TheModule->getTargetTriple());
- TargetLibraryInfo *TLI = new TargetLibraryInfo(TargetTriple);
- if (!CodeGenOpts.SimplifyLibCalls)
- TLI->disableAllFunctions();
- PM->add(TLI);
+ PM->add(createTLI(TargetTriple, CodeGenOpts));
// Add Target specific analysis passes.
TM->addAnalysisPasses(*PM);
@@ -600,8 +647,9 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// If an optional clang TargetInfo description string was passed in, use it to
// verify the LLVM TargetMachine's DataLayout.
if (AsmHelper.TM && !TDesc.empty()) {
- std::string DLDesc =
- AsmHelper.TM->getDataLayout()->getStringRepresentation();
+ std::string DLDesc = AsmHelper.TM->getSubtargetImpl()
+ ->getDataLayout()
+ ->getStringRepresentation();
if (DLDesc != TDesc) {
unsigned DiagID = Diags.getCustomDiagID(
DiagnosticsEngine::Error, "backend data layout '%0' does not match "
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 89bde2ce20d9..daac174c8e0c 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -46,23 +46,26 @@ namespace {
ASTContext &C = CGF.getContext();
- uint64_t valueAlignInBits;
- std::tie(ValueSizeInBits, valueAlignInBits) = C.getTypeInfo(ValueTy);
+ uint64_t ValueAlignInBits;
+ uint64_t AtomicAlignInBits;
+ TypeInfo ValueTI = C.getTypeInfo(ValueTy);
+ ValueSizeInBits = ValueTI.Width;
+ ValueAlignInBits = ValueTI.Align;
- uint64_t atomicAlignInBits;
- std::tie(AtomicSizeInBits, atomicAlignInBits) = C.getTypeInfo(AtomicTy);
+ TypeInfo AtomicTI = C.getTypeInfo(AtomicTy);
+ AtomicSizeInBits = AtomicTI.Width;
+ AtomicAlignInBits = AtomicTI.Align;
assert(ValueSizeInBits <= AtomicSizeInBits);
- assert(valueAlignInBits <= atomicAlignInBits);
+ assert(ValueAlignInBits <= AtomicAlignInBits);
- AtomicAlign = C.toCharUnitsFromBits(atomicAlignInBits);
- ValueAlign = C.toCharUnitsFromBits(valueAlignInBits);
+ AtomicAlign = C.toCharUnitsFromBits(AtomicAlignInBits);
+ ValueAlign = C.toCharUnitsFromBits(ValueAlignInBits);
if (lvalue.getAlignment().isZero())
lvalue.setAlignment(AtomicAlign);
- UseLibcall =
- (AtomicSizeInBits > uint64_t(C.toBits(lvalue.getAlignment())) ||
- AtomicSizeInBits > C.getTargetInfo().getMaxAtomicInlineWidth());
+ UseLibcall = !C.getTargetInfo().hasBuiltinAtomic(
+ AtomicSizeInBits, C.toBits(lvalue.getAlignment()));
}
QualType getAtomicType() const { return AtomicTy; }
@@ -70,7 +73,7 @@ namespace {
CharUnits getAtomicAlignment() const { return AtomicAlign; }
CharUnits getValueAlignment() const { return ValueAlign; }
uint64_t getAtomicSizeInBits() const { return AtomicSizeInBits; }
- uint64_t getValueSizeInBits() const { return AtomicSizeInBits; }
+ uint64_t getValueSizeInBits() const { return ValueSizeInBits; }
TypeEvaluationKind getEvaluationKind() const { return EvaluationKind; }
bool shouldUseLibcall() const { return UseLibcall; }
@@ -100,6 +103,12 @@ namespace {
AggValueSlot resultSlot,
SourceLocation loc) const;
+ /// \brief Converts a rvalue to integer value.
+ llvm::Value *convertRValueToInt(RValue RVal) const;
+
+ RValue convertIntToValue(llvm::Value *IntVal, AggValueSlot ResultSlot,
+ SourceLocation Loc) const;
+
/// Copy an atomic r-value into atomic-layout memory.
void emitCopyIntoMemory(RValue rvalue, LValue lvalue) const;
@@ -461,11 +470,19 @@ EmitValToTemp(CodeGenFunction &CGF, Expr *E) {
static void
AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
bool UseOptimizedLibcall, llvm::Value *Val, QualType ValTy,
- SourceLocation Loc) {
+ SourceLocation Loc, CharUnits SizeInChars) {
if (UseOptimizedLibcall) {
// Load value and pass it to the function directly.
unsigned Align = CGF.getContext().getTypeAlignInChars(ValTy).getQuantity();
- Val = CGF.EmitLoadOfScalar(Val, false, Align, ValTy, Loc);
+ int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
+ ValTy =
+ CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
+ llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
+ SizeInBits)->getPointerTo();
+ Val = CGF.EmitLoadOfScalar(CGF.Builder.CreateBitCast(Val, IPtrTy), false,
+ Align, CGF.getContext().getPointerType(ValTy),
+ Loc);
+ // Coerce the value into an appropriately sized integer type.
Args.add(RValue::get(Val), ValTy);
} else {
// Non-optimized functions always take a reference.
@@ -576,8 +593,14 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
break;
}
- if (!E->getType()->isVoidType() && !Dest)
- Dest = CreateMemTemp(E->getType(), ".atomicdst");
+ QualType RValTy = E->getType().getUnqualifiedType();
+
+ auto GetDest = [&] {
+ if (!RValTy->isVoidType() && !Dest) {
+ Dest = CreateMemTemp(RValTy, ".atomicdst");
+ }
+ return Dest;
+ };
// Use a library call. See: http://gcc.gnu.org/wiki/Atomic/GCCMM/LIbrary .
if (UseLibcall) {
@@ -634,7 +657,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
HaveRetTy = true;
Args.add(RValue::get(EmitCastToVoidPtr(Val1)), getContext().VoidPtrTy);
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val2, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
Args.add(RValue::get(Order), getContext().IntTy);
Order = OrderFail;
break;
@@ -646,7 +669,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_exchange:
LibCallName = "__atomic_exchange";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// void __atomic_store(size_t size, void *mem, void *val, int order)
// void __atomic_store_N(T *mem, T val, int order)
@@ -657,7 +680,7 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
RetTy = getContext().VoidTy;
HaveRetTy = true;
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// void __atomic_load(size_t size, void *mem, void *return, int order)
// T __atomic_load_N(T *mem, int order)
@@ -671,35 +694,35 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
case AtomicExpr::AO__atomic_fetch_add:
LibCallName = "__atomic_fetch_add";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_and_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_and:
case AtomicExpr::AO__atomic_fetch_and:
LibCallName = "__atomic_fetch_and";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_or_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_or:
case AtomicExpr::AO__atomic_fetch_or:
LibCallName = "__atomic_fetch_or";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_sub_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_sub:
case AtomicExpr::AO__atomic_fetch_sub:
LibCallName = "__atomic_fetch_sub";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, LoweredMemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
// T __atomic_fetch_xor_N(T *mem, T val, int order)
case AtomicExpr::AO__c11_atomic_fetch_xor:
case AtomicExpr::AO__atomic_fetch_xor:
LibCallName = "__atomic_fetch_xor";
AddDirectArgument(*this, Args, UseOptimizedLibcall, Val1, MemTy,
- E->getExprLoc());
+ E->getExprLoc(), sizeChars);
break;
default: return EmitUnsupportedRValue(E, "atomic library call");
}
@@ -711,29 +734,36 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
if (!HaveRetTy) {
if (UseOptimizedLibcall) {
// Value is returned directly.
- RetTy = MemTy;
+ // The function returns an appropriately sized integer type.
+ RetTy = getContext().getIntTypeForBitwidth(
+ getContext().toBits(sizeChars), /*Signed=*/false);
} else {
// Value is returned through parameter before the order.
RetTy = getContext().VoidTy;
- Args.add(RValue::get(EmitCastToVoidPtr(Dest)),
- getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(Dest)), getContext().VoidPtrTy);
}
}
// order is always the last parameter
Args.add(RValue::get(Order),
getContext().IntTy);
- const CGFunctionInfo &FuncInfo =
- CGM.getTypes().arrangeFreeFunctionCall(RetTy, Args,
- FunctionType::ExtInfo(), RequiredArgs::All);
- llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
- llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- RValue Res = EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
- if (!RetTy->isVoidType())
+ RValue Res = emitAtomicLibcall(*this, LibCallName, RetTy, Args);
+ // The value is returned directly from the libcall.
+ if (HaveRetTy && !RetTy->isVoidType())
return Res;
- if (E->getType()->isVoidType())
+ // The value is returned via an explicit out param.
+ if (RetTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(Dest, E->getType(), E->getExprLoc());
+ // The value is returned directly for optimized libcalls but the caller is
+ // expected an out-param.
+ if (UseOptimizedLibcall) {
+ llvm::Value *ResVal = Res.getScalarVal();
+ llvm::StoreInst *StoreDest = Builder.CreateStore(
+ ResVal,
+ Builder.CreateBitCast(GetDest(), ResVal->getType()->getPointerTo()));
+ StoreDest->setAlignment(Align);
+ }
+ return convertTempToRValue(Dest, RValTy, E->getExprLoc());
}
bool IsStore = E->getOp() == AtomicExpr::AO__c11_atomic_store ||
@@ -743,13 +773,15 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
E->getOp() == AtomicExpr::AO__atomic_load ||
E->getOp() == AtomicExpr::AO__atomic_load_n;
- llvm::Type *IPtrTy =
- llvm::IntegerType::get(getLLVMContext(), Size * 8)->getPointerTo();
- llvm::Value *OrigDest = Dest;
- Ptr = Builder.CreateBitCast(Ptr, IPtrTy);
- if (Val1) Val1 = Builder.CreateBitCast(Val1, IPtrTy);
- if (Val2) Val2 = Builder.CreateBitCast(Val2, IPtrTy);
- if (Dest && !E->isCmpXChg()) Dest = Builder.CreateBitCast(Dest, IPtrTy);
+ llvm::Type *ITy =
+ llvm::IntegerType::get(getLLVMContext(), Size * 8);
+ llvm::Value *OrigDest = GetDest();
+ Ptr = Builder.CreateBitCast(
+ Ptr, ITy->getPointerTo(Ptr->getType()->getPointerAddressSpace()));
+ if (Val1) Val1 = Builder.CreateBitCast(Val1, ITy->getPointerTo());
+ if (Val2) Val2 = Builder.CreateBitCast(Val2, ITy->getPointerTo());
+ if (Dest && !E->isCmpXChg())
+ Dest = Builder.CreateBitCast(Dest, ITy->getPointerTo());
if (isa<llvm::ConstantInt>(Order)) {
int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
@@ -786,9 +818,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// enforce that in general.
break;
}
- if (E->getType()->isVoidType())
+ if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+ return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
// Long case, when Order isn't obviously constant.
@@ -854,9 +886,9 @@ RValue CodeGenFunction::EmitAtomicExpr(AtomicExpr *E, llvm::Value *Dest) {
// Cleanup and return
Builder.SetInsertPoint(ContBB);
- if (E->getType()->isVoidType())
+ if (RValTy->isVoidType())
return RValue::get(nullptr);
- return convertTempToRValue(OrigDest, E->getType(), E->getExprLoc());
+ return convertTempToRValue(OrigDest, RValTy, E->getExprLoc());
}
llvm::Value *AtomicInfo::emitCastToAtomicIntPointer(llvm::Value *addr) const {
@@ -882,6 +914,45 @@ RValue AtomicInfo::convertTempToRValue(llvm::Value *addr,
return CGF.convertTempToRValue(addr, getValueType(), loc);
}
+RValue AtomicInfo::convertIntToValue(llvm::Value *IntVal,
+ AggValueSlot ResultSlot,
+ SourceLocation Loc) const {
+ // Try not to in some easy cases.
+ assert(IntVal->getType()->isIntegerTy() && "Expected integer value");
+ if (getEvaluationKind() == TEK_Scalar && !hasPadding()) {
+ auto *ValTy = CGF.ConvertTypeForMem(ValueTy);
+ if (ValTy->isIntegerTy()) {
+ assert(IntVal->getType() == ValTy && "Different integer types.");
+ return RValue::get(IntVal);
+ } else if (ValTy->isPointerTy())
+ return RValue::get(CGF.Builder.CreateIntToPtr(IntVal, ValTy));
+ else if (llvm::CastInst::isBitCastable(IntVal->getType(), ValTy))
+ return RValue::get(CGF.Builder.CreateBitCast(IntVal, ValTy));
+ }
+
+ // Create a temporary. This needs to be big enough to hold the
+ // atomic integer.
+ llvm::Value *Temp;
+ bool TempIsVolatile = false;
+ CharUnits TempAlignment;
+ if (getEvaluationKind() == TEK_Aggregate) {
+ assert(!ResultSlot.isIgnored());
+ Temp = ResultSlot.getAddr();
+ TempAlignment = getValueAlignment();
+ TempIsVolatile = ResultSlot.isVolatile();
+ } else {
+ Temp = CGF.CreateMemTemp(getAtomicType(), "atomic-temp");
+ TempAlignment = getAtomicAlignment();
+ }
+
+ // Slam the integer into the temporary.
+ llvm::Value *CastTemp = emitCastToAtomicIntPointer(Temp);
+ CGF.Builder.CreateAlignedStore(IntVal, CastTemp, TempAlignment.getQuantity())
+ ->setVolatile(TempIsVolatile);
+
+ return convertTempToRValue(Temp, ResultSlot, Loc);
+}
+
/// Emit a load from an l-value of atomic type. Note that the r-value
/// we produce is an r-value of the atomic *value* type.
RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
@@ -927,50 +998,12 @@ RValue CodeGenFunction::EmitAtomicLoad(LValue src, SourceLocation loc,
if (src.getTBAAInfo())
CGM.DecorateInstruction(load, src.getTBAAInfo());
- // Okay, turn that back into the original value type.
- QualType valueType = atomics.getValueType();
- llvm::Value *result = load;
-
// If we're ignoring an aggregate return, don't do anything.
if (atomics.getEvaluationKind() == TEK_Aggregate && resultSlot.isIgnored())
return RValue::getAggregate(nullptr, false);
- // The easiest way to do this this is to go through memory, but we
- // try not to in some easy cases.
- if (atomics.getEvaluationKind() == TEK_Scalar && !atomics.hasPadding()) {
- llvm::Type *resultTy = CGM.getTypes().ConvertTypeForMem(valueType);
- if (isa<llvm::IntegerType>(resultTy)) {
- assert(result->getType() == resultTy);
- result = EmitFromMemory(result, valueType);
- } else if (isa<llvm::PointerType>(resultTy)) {
- result = Builder.CreateIntToPtr(result, resultTy);
- } else {
- result = Builder.CreateBitCast(result, resultTy);
- }
- return RValue::get(result);
- }
-
- // Create a temporary. This needs to be big enough to hold the
- // atomic integer.
- llvm::Value *temp;
- bool tempIsVolatile = false;
- CharUnits tempAlignment;
- if (atomics.getEvaluationKind() == TEK_Aggregate) {
- assert(!resultSlot.isIgnored());
- temp = resultSlot.getAddr();
- tempAlignment = atomics.getValueAlignment();
- tempIsVolatile = resultSlot.isVolatile();
- } else {
- temp = CreateMemTemp(atomics.getAtomicType(), "atomic-load-temp");
- tempAlignment = atomics.getAtomicAlignment();
- }
-
- // Slam the integer into the temporary.
- llvm::Value *castTemp = atomics.emitCastToAtomicIntPointer(temp);
- Builder.CreateAlignedStore(result, castTemp, tempAlignment.getQuantity())
- ->setVolatile(tempIsVolatile);
-
- return atomics.convertTempToRValue(temp, resultSlot, loc);
+ // Okay, turn that back into the original value type.
+ return atomics.convertIntToValue(load, resultSlot, loc);
}
@@ -1023,6 +1056,32 @@ llvm::Value *AtomicInfo::materializeRValue(RValue rvalue) const {
return temp;
}
+llvm::Value *AtomicInfo::convertRValueToInt(RValue RVal) const {
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ if (RVal.isScalar() && !hasPadding()) {
+ llvm::Value *Value = RVal.getScalarVal();
+ if (isa<llvm::IntegerType>(Value->getType()))
+ return Value;
+ else {
+ llvm::IntegerType *InputIntTy =
+ llvm::IntegerType::get(CGF.getLLVMContext(), getValueSizeInBits());
+ if (isa<llvm::PointerType>(Value->getType()))
+ return CGF.Builder.CreatePtrToInt(Value, InputIntTy);
+ else if (llvm::BitCastInst::isBitCastable(Value->getType(), InputIntTy))
+ return CGF.Builder.CreateBitCast(Value, InputIntTy);
+ }
+ }
+ // Otherwise, we need to go through memory.
+ // Put the r-value in memory.
+ llvm::Value *Addr = materializeRValue(RVal);
+
+ // Cast the temporary to the atomic int type and pull a value out.
+ Addr = emitCastToAtomicIntPointer(Addr);
+ return CGF.Builder.CreateAlignedLoad(Addr,
+ getAtomicAlignment().getQuantity());
+}
+
/// Emit a store to an l-value of atomic type.
///
/// Note that the r-value is expected to be an r-value *of the atomic
@@ -1064,34 +1123,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
}
// Okay, we're doing this natively.
- llvm::Value *intValue;
-
- // If we've got a scalar value of the right size, try to avoid going
- // through memory.
- if (rvalue.isScalar() && !atomics.hasPadding()) {
- llvm::Value *value = rvalue.getScalarVal();
- if (isa<llvm::IntegerType>(value->getType())) {
- intValue = value;
- } else {
- llvm::IntegerType *inputIntTy =
- llvm::IntegerType::get(getLLVMContext(), atomics.getValueSizeInBits());
- if (isa<llvm::PointerType>(value->getType())) {
- intValue = Builder.CreatePtrToInt(value, inputIntTy);
- } else {
- intValue = Builder.CreateBitCast(value, inputIntTy);
- }
- }
-
- // Otherwise, we need to go through memory.
- } else {
- // Put the r-value in memory.
- llvm::Value *addr = atomics.materializeRValue(rvalue);
-
- // Cast the temporary to the atomic int type and pull a value out.
- addr = atomics.emitCastToAtomicIntPointer(addr);
- intValue = Builder.CreateAlignedLoad(addr,
- atomics.getAtomicAlignment().getQuantity());
- }
+ llvm::Value *intValue = atomics.convertRValueToInt(rvalue);
// Do the atomic store.
llvm::Value *addr = atomics.emitCastToAtomicIntPointer(dest.getAddress());
@@ -1108,6 +1140,74 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest, bool isInit) {
CGM.DecorateInstruction(store, dest.getTBAAInfo());
}
+/// Emit a compare-and-exchange op for atomic type.
+///
+std::pair<RValue, RValue> CodeGenFunction::EmitAtomicCompareExchange(
+ LValue Obj, RValue Expected, RValue Desired, SourceLocation Loc,
+ llvm::AtomicOrdering Success, llvm::AtomicOrdering Failure, bool IsWeak,
+ AggValueSlot Slot) {
+ // If this is an aggregate r-value, it should agree in type except
+ // maybe for address-space qualification.
+ assert(!Expected.isAggregate() ||
+ Expected.getAggregateAddr()->getType()->getPointerElementType() ==
+ Obj.getAddress()->getType()->getPointerElementType());
+ assert(!Desired.isAggregate() ||
+ Desired.getAggregateAddr()->getType()->getPointerElementType() ==
+ Obj.getAddress()->getType()->getPointerElementType());
+ AtomicInfo Atomics(*this, Obj);
+
+ if (Failure >= Success)
+ // Don't assert on undefined behavior.
+ Failure = llvm::AtomicCmpXchgInst::getStrongestFailureOrdering(Success);
+
+ auto Alignment = Atomics.getValueAlignment();
+ // Check whether we should use a library call.
+ if (Atomics.shouldUseLibcall()) {
+ auto *ExpectedAddr = Atomics.materializeRValue(Expected);
+ // Produce a source address.
+ auto *DesiredAddr = Atomics.materializeRValue(Desired);
+ // bool __atomic_compare_exchange(size_t size, void *obj, void *expected,
+ // void *desired, int success, int failure);
+ CallArgList Args;
+ Args.add(RValue::get(Atomics.getAtomicSizeValue()),
+ getContext().getSizeType());
+ Args.add(RValue::get(EmitCastToVoidPtr(Obj.getAddress())),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(ExpectedAddr)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(EmitCastToVoidPtr(DesiredAddr)),
+ getContext().VoidPtrTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Success)),
+ getContext().IntTy);
+ Args.add(RValue::get(llvm::ConstantInt::get(IntTy, Failure)),
+ getContext().IntTy);
+ auto SuccessFailureRVal = emitAtomicLibcall(
+ *this, "__atomic_compare_exchange", getContext().BoolTy, Args);
+ auto *PreviousVal =
+ Builder.CreateAlignedLoad(ExpectedAddr, Alignment.getQuantity());
+ return std::make_pair(RValue::get(PreviousVal), SuccessFailureRVal);
+ }
+
+ // If we've got a scalar value of the right size, try to avoid going
+ // through memory.
+ auto *ExpectedIntVal = Atomics.convertRValueToInt(Expected);
+ auto *DesiredIntVal = Atomics.convertRValueToInt(Desired);
+
+ // Do the atomic store.
+ auto *Addr = Atomics.emitCastToAtomicIntPointer(Obj.getAddress());
+ auto *Inst = Builder.CreateAtomicCmpXchg(Addr, ExpectedIntVal, DesiredIntVal,
+ Success, Failure);
+ // Other decoration.
+ Inst->setVolatile(Obj.isVolatileQualified());
+ Inst->setWeak(IsWeak);
+
+ // Okay, turn that back into the original value type.
+ auto *PreviousVal = Builder.CreateExtractValue(Inst, /*Idxs=*/0);
+ auto *SuccessFailureVal = Builder.CreateExtractValue(Inst, /*Idxs=*/1);
+ return std::make_pair(Atomics.convertIntToValue(PreviousVal, Slot, Loc),
+ RValue::get(SuccessFailureVal));
+}
+
void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
AtomicInfo atomics(*this, dest);
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 72fde9dc55f1..b98460a9ddd8 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -545,6 +545,16 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
// multiple of alignment.
for (SmallVectorImpl<BlockLayoutChunk>::iterator
li = layout.begin(), le = layout.end(); li != le; ++li) {
+ if (endAlign < li->Alignment) {
+ // size may not be multiple of alignment. This can only happen with
+ // an over-aligned variable. We will be adding a padding field to
+ // make the size be multiple of alignment.
+ CharUnits padding = li->Alignment - endAlign;
+ elementTypes.push_back(llvm::ArrayType::get(CGM.Int8Ty,
+ padding.getQuantity()));
+ blockSize += padding;
+ endAlign = getLowBit(blockSize);
+ }
assert(endAlign >= li->Alignment);
li->setIndex(info, elementTypes.size());
elementTypes.push_back(li->Type);
@@ -782,9 +792,10 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// emission.
src = LocalDeclMap.lookup(variable);
if (!src) {
- DeclRefExpr declRef(const_cast<VarDecl *>(variable),
- /*refersToEnclosing*/ CI.isNested(), type,
- VK_LValue, SourceLocation());
+ DeclRefExpr declRef(
+ const_cast<VarDecl *>(variable),
+ /*RefersToEnclosingVariableOrCapture*/ CI.isNested(), type,
+ VK_LValue, SourceLocation());
src = EmitDeclRefLValue(&declRef).getAddress();
}
}
@@ -853,12 +864,15 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// We use one of these or the other depending on whether the
// reference is nested.
- DeclRefExpr declRef(const_cast<VarDecl*>(variable),
- /*refersToEnclosing*/ CI.isNested(), type,
- VK_LValue, SourceLocation());
+ DeclRefExpr declRef(const_cast<VarDecl *>(variable),
+ /*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
+ type, VK_LValue, SourceLocation());
ImplicitCastExpr l2r(ImplicitCastExpr::OnStack, type, CK_LValueToRValue,
&declRef, VK_RValue);
+ // FIXME: Pass a specific location for the expr init so that the store is
+ // attributed to a reasonable location - otherwise it may be attributed to
+ // locations of subexpressions in the initialization.
EmitExprAsInit(&l2r, &blockFieldPseudoVar,
MakeAddrLValue(blockField, type, align),
/*captured by init*/ false);
@@ -905,7 +919,7 @@ llvm::Type *CodeGenModule::getBlockDescriptorType() {
// };
BlockDescriptorType =
llvm::StructType::create("struct.__block_descriptor",
- UnsignedLongTy, UnsignedLongTy, NULL);
+ UnsignedLongTy, UnsignedLongTy, nullptr);
// Now form a pointer to that.
BlockDescriptorType = llvm::PointerType::getUnqual(BlockDescriptorType);
@@ -928,7 +942,7 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
GenericBlockLiteralType =
llvm::StructType::create("struct.__block_literal_generic",
VoidPtrTy, IntTy, IntTy, VoidPtrTy,
- BlockDescPtrTy, NULL);
+ BlockDescPtrTy, nullptr);
return GenericBlockLiteralType;
}
@@ -1093,6 +1107,8 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
CurGD = GD;
+
+ CurEHLocation = blockInfo.getBlockExpr()->getLocEnd();
BlockInfo = &blockInfo;
@@ -1162,7 +1178,7 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
Alloca->setAlignment(Align);
// Set the DebugLocation to empty, so the store is recognized as a
// frame setup instruction by llvm::DwarfDebug::beginFunction().
- NoLocation NL(*this, Builder);
+ ApplyDebugLocation NL(*this);
Builder.CreateAlignedStore(BlockPointer, Alloca, Align);
BlockPointerDbgLoc = Alloca;
}
@@ -1205,8 +1221,6 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
RegionCounter Cnt = getPGORegionCounter(blockDecl->getBody());
Cnt.beginRegion(Builder);
EmitStmt(blockDecl->getBody());
- PGO.emitInstrumentationData();
- PGO.destroyRegionCounters();
}
// Remember where we were...
@@ -1233,7 +1247,9 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
}
DI->EmitDeclareOfBlockDeclRefVariable(variable, BlockPointerDbgLoc,
- Builder, blockInfo);
+ Builder, blockInfo,
+ entry_ptr == entry->end()
+ ? nullptr : entry_ptr);
}
}
// Recover location if it was changed in the above loop.
@@ -1313,9 +1329,9 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
false,
false);
// Create a scope with an artificial location for the body of this function.
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(FD, C.VoidTy, Fn, FI, args);
- AL.Emit();
+ ArtificialLocation AL(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
@@ -1484,9 +1500,9 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
nullptr, SC_Static,
false, false);
// Create a scope with an artificial location for the body of this function.
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(FD, C.VoidTy, Fn, FI, args);
- AL.Emit();
+ ArtificialLocation AL(*this);
llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 0031e32c9daf..c4eed0d0e8eb 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGBLOCKS_H
-#define CLANG_CODEGEN_CGBLOCKS_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
+#define LLVM_CLANG_LIB_CODEGEN_CGBLOCKS_H
#include "CGBuilder.h"
#include "CGCall.h"
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index f113b970b7b7..72ba4faa3c7c 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGBUILDER_H
-#define CLANG_CODEGEN_CGBUILDER_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
+#define LLVM_CLANG_LIB_CODEGEN_CGBUILDER_H
#include "llvm/IR/IRBuilder.h"
@@ -18,7 +18,7 @@ namespace CodeGen {
class CodeGenFunction;
/// \brief This is an IRBuilder insertion helper that forwards to
-/// CodeGenFunction::InsertHelper, which adds nesessary metadata to
+/// CodeGenFunction::InsertHelper, which adds necessary metadata to
/// instructions.
template <bool PreserveNames>
class CGBuilderInserter
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 4f68b347dbf0..635e34207de7 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -20,7 +20,9 @@
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Intrinsics.h"
using namespace clang;
@@ -113,7 +115,8 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
llvm::AtomicRMWInst::BinOp Kind,
const CallExpr *E,
- Instruction::BinaryOps Op) {
+ Instruction::BinaryOps Op,
+ bool Invert = false) {
QualType T = E->getType();
assert(E->getArg(0)->getType()->isPointerType());
assert(CGF.getContext().hasSameUnqualifiedType(T,
@@ -138,36 +141,25 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
llvm::SequentiallyConsistent);
Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
+ if (Invert)
+ Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
+ llvm::ConstantInt::get(IntType, -1));
Result = EmitFromInt(CGF, Result, T, ValueType);
return RValue::get(Result);
}
-/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
-/// which must be a scalar floating point type.
-static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
- const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
- assert(ValTyP && "isn't scalar fp type!");
-
- StringRef FnName;
- switch (ValTyP->getKind()) {
- default: llvm_unreachable("Isn't a scalar fp type!");
- case BuiltinType::Float: FnName = "fabsf"; break;
- case BuiltinType::Double: FnName = "fabs"; break;
- case BuiltinType::LongDouble: FnName = "fabsl"; break;
- }
-
- // The prototype is something that takes and returns whatever V's type is.
- llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), V->getType(),
- false);
- llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
-
- return CGF.EmitNounwindRuntimeCall(Fn, V, "abs");
+/// EmitFAbs - Emit a call to @llvm.fabs().
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
+ Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
+ llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
+ Call->setDoesNotAccessMemory();
+ return Call;
}
static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
const CallExpr *E, llvm::Value *calleeValue) {
- return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E->getLocStart(),
- ReturnValueSlot(), E->arg_begin(), E->arg_end(), Fn);
+ return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
+ ReturnValueSlot(), Fn);
}
/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
@@ -195,7 +187,8 @@ static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
}
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
- unsigned BuiltinID, const CallExpr *E) {
+ unsigned BuiltinID, const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
// See if we can constant fold this builtin. If so, don't emit it at all.
Expr::EvalResult Result;
if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
@@ -255,6 +248,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Result);
}
+ case Builtin::BI__builtin_fabs:
+ case Builtin::BI__builtin_fabsf:
+ case Builtin::BI__builtin_fabsl: {
+ Value *Arg1 = EmitScalarExpr(E->getArg(0));
+ Value *Result = EmitFAbs(*this, Arg1);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_fmod:
+ case Builtin::BI__builtin_fmodf:
+ case Builtin::BI__builtin_fmodl: {
+ Value *Arg1 = EmitScalarExpr(E->getArg(0));
+ Value *Arg2 = EmitScalarExpr(E->getArg(1));
+ Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_conj:
case Builtin::BI__builtin_conjf:
@@ -388,6 +396,27 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"expval");
return RValue::get(Result);
}
+ case Builtin::BI__builtin_assume_aligned: {
+ Value *PtrValue = EmitScalarExpr(E->getArg(0));
+ Value *OffsetValue =
+ (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
+
+ Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
+ ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
+ unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
+
+ EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
+ return RValue::get(PtrValue);
+ }
+ case Builtin::BI__assume:
+ case Builtin::BI__builtin_assume: {
+ if (E->getArg(0)->HasSideEffects(getContext()))
+ return RValue::get(nullptr);
+
+ Value *ArgValue = EmitScalarExpr(E->getArg(0));
+ Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
+ return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
+ }
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
case Builtin::BI__builtin_bswap64: {
@@ -447,11 +476,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F));
}
case Builtin::BI__builtin_unreachable: {
- if (SanOpts->Unreachable) {
+ if (SanOpts.has(SanitizerKind::Unreachable)) {
SanitizerScope SanScope(this);
- EmitCheck(Builder.getFalse(), "builtin_unreachable",
- EmitCheckSourceLocation(E->getExprLoc()),
- ArrayRef<llvm::Value *>(), CRK_Unrecoverable);
+ EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
+ SanitizerKind::Unreachable),
+ "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
+ None);
} else
Builder.CreateUnreachable();
@@ -515,7 +545,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__builtin_isinf: {
// isinf(x) --> fabs(x) == infinity
Value *V = EmitScalarExpr(E->getArg(0));
- V = EmitFAbs(*this, V, E->getArg(0)->getType());
+ V = EmitFAbs(*this, V);
V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
@@ -529,7 +559,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
- Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *Abs = EmitFAbs(*this, V);
Value *IsLessThanInf =
Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
APFloat Smallest = APFloat::getSmallestNormalized(
@@ -547,7 +577,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *V = EmitScalarExpr(E->getArg(0));
Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
- Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *Abs = EmitFAbs(*this, V);
Value *IsNotInf =
Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
@@ -586,7 +616,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// if (fabs(V) == infinity) return FP_INFINITY
Builder.SetInsertPoint(NotNan);
- Value *VAbs = EmitFAbs(*this, V, E->getArg(5)->getType());
+ Value *VAbs = EmitFAbs(*this, V);
Value *IsInf =
Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
"isinf");
@@ -864,11 +894,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_fetch_and_or:
case Builtin::BI__sync_fetch_and_and:
case Builtin::BI__sync_fetch_and_xor:
+ case Builtin::BI__sync_fetch_and_nand:
case Builtin::BI__sync_add_and_fetch:
case Builtin::BI__sync_sub_and_fetch:
case Builtin::BI__sync_and_and_fetch:
case Builtin::BI__sync_or_and_fetch:
case Builtin::BI__sync_xor_and_fetch:
+ case Builtin::BI__sync_nand_and_fetch:
case Builtin::BI__sync_val_compare_and_swap:
case Builtin::BI__sync_bool_compare_and_swap:
case Builtin::BI__sync_lock_test_and_set:
@@ -905,6 +937,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
+ case Builtin::BI__sync_fetch_and_nand_1:
+ case Builtin::BI__sync_fetch_and_nand_2:
+ case Builtin::BI__sync_fetch_and_nand_4:
+ case Builtin::BI__sync_fetch_and_nand_8:
+ case Builtin::BI__sync_fetch_and_nand_16:
+ return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
@@ -951,6 +989,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
llvm::Instruction::Xor);
+ case Builtin::BI__sync_nand_and_fetch_1:
+ case Builtin::BI__sync_nand_and_fetch_2:
+ case Builtin::BI__sync_nand_and_fetch_4:
+ case Builtin::BI__sync_nand_and_fetch_8:
+ case Builtin::BI__sync_nand_and_fetch_16:
+ return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
+ llvm::Instruction::And, true);
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
@@ -1347,11 +1392,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgTy = Arg->getType();
- if (ArgTy->isPPC_FP128Ty())
- break; // FIXME: I'm not sure what the right implementation is here.
int ArgWidth = ArgTy->getPrimitiveSizeInBits();
llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
+ if (ArgTy->isPPC_FP128Ty()) {
+ // The higher-order double comes first, and so we need to truncate the
+ // pair to extract the overall sign. The order of the pair is the same
+ // in both little- and big-Endian modes.
+ ArgWidth >>= 1;
+ ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
+ BCArg = Builder.CreateTrunc(BCArg, ArgIntTy);
+ }
Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
@@ -1518,9 +1569,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__noop:
// __noop always evaluates to an integer literal zero.
return RValue::get(ConstantInt::get(IntTy, 0));
- case Builtin::BI__assume:
- // Until LLVM supports assumptions at the IR level, this becomes nothing.
- return RValue::get(nullptr);
+ case Builtin::BI__builtin_call_with_static_chain: {
+ const CallExpr *Call = cast<CallExpr>(E->getArg(0));
+ const Expr *Chain = E->getArg(1);
+ return EmitCall(Call->getCallee()->getType(),
+ EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
+ Call->getCalleeDecl(), EmitScalarExpr(Chain));
+ }
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
@@ -1587,6 +1642,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
RMWI->setVolatile(true);
return RValue::get(RMWI);
}
+ case Builtin::BI__readfsdword: {
+ Value *IntToPtr =
+ Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
+ llvm::PointerType::get(CGM.Int32Ty, 257));
+ LoadInst *Load =
+ Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
+ return RValue::get(Load);
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -1690,8 +1753,6 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
return EmitARMBuiltinExpr(BuiltinID, E);
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
- case llvm::Triple::arm64:
- case llvm::Triple::arm64_be:
return EmitAArch64BuiltinExpr(BuiltinID, E);
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -1701,6 +1762,7 @@ Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
case llvm::Triple::ppc64le:
return EmitPPCBuiltinExpr(BuiltinID, E);
case llvm::Triple::r600:
+ case llvm::Triple::amdgcn:
return EmitR600BuiltinExpr(BuiltinID, E);
default:
return nullptr;
@@ -2005,8 +2067,12 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
NEONMAP1(vld4q_v, arm_neon_vld4, 0),
NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
+ NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
+ NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
NEONMAP0(vmovl_v),
NEONMAP0(vmovn_v),
@@ -2042,6 +2108,8 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
+ NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
@@ -2051,8 +2119,22 @@ static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
+ NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
+ NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
+ NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
+ NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
+ NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
+ NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
@@ -2173,6 +2255,8 @@ static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
+ NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
+ NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
@@ -2184,6 +2268,8 @@ static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
+ NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
+ NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
@@ -2828,6 +2914,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vqshlq_n_v:
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
1, false);
+ case NEON::BI__builtin_neon_vqshlu_n_v:
+ case NEON::BI__builtin_neon_vqshluq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
+ 1, false);
case NEON::BI__builtin_neon_vrecpe_v:
case NEON::BI__builtin_neon_vrecpeq_v:
case NEON::BI__builtin_neon_vrsqrte_v:
@@ -2835,6 +2925,10 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
+ case NEON::BI__builtin_neon_vrshr_n_v:
+ case NEON::BI__builtin_neon_vrshrq_n_v:
+ return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
+ 1, true);
case NEON::BI__builtin_neon_vshl_n_v:
case NEON::BI__builtin_neon_vshlq_n_v:
Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
@@ -3039,39 +3133,76 @@ static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
return CGF.EmitNeonCall(TblF, TblOps, Name);
}
-Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
- const CallExpr *E) {
- unsigned HintID = static_cast<unsigned>(-1);
+Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
switch (BuiltinID) {
- default: break;
+ default:
+ return nullptr;
case ARM::BI__builtin_arm_nop:
- HintID = 0;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 0));
case ARM::BI__builtin_arm_yield:
case ARM::BI__yield:
- HintID = 1;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 1));
case ARM::BI__builtin_arm_wfe:
case ARM::BI__wfe:
- HintID = 2;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 2));
case ARM::BI__builtin_arm_wfi:
case ARM::BI__wfi:
- HintID = 3;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 3));
case ARM::BI__builtin_arm_sev:
case ARM::BI__sev:
- HintID = 4;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 4));
case ARM::BI__builtin_arm_sevl:
case ARM::BI__sevl:
- HintID = 5;
- break;
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
+ llvm::ConstantInt::get(Int32Ty, 5));
}
+}
- if (HintID != static_cast<unsigned>(-1)) {
- Function *F = CGM.getIntrinsic(Intrinsic::arm_hint);
- return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
+Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
+ const CallExpr *E) {
+ if (auto Hint = GetValueForARMHint(BuiltinID))
+ return Hint;
+
+ if (BuiltinID == ARM::BI__emit) {
+ bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
+
+ APSInt Value;
+ if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
+
+ llvm::InlineAsm *Emit =
+ IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
+ /*SideEffects=*/true)
+ : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
+ /*SideEffects=*/true);
+
+ return Builder.CreateCall(Emit);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_dbg) {
+ Value *Option = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
+ }
+
+ if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *IsData = EmitScalarExpr(E->getArg(2));
+
+ // Locality is not supported on ARM target
+ Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall4(F, Address, RW, Locality, IsData);
}
if (BuiltinID == ARM::BI__builtin_arm_rbit) {
@@ -3157,7 +3288,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
? Intrinsic::arm_stlexd
: Intrinsic::arm_strexd);
- llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, NULL);
+ llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
@@ -3393,7 +3524,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// Many NEON builtins have identical semantics and uses in ARM and
// AArch64. Emit these in a single function.
- ArrayRef<NeonIntrinsicInfo> IntrinsicMap(ARMSIMDIntrinsicMap);
+ auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
if (Builtin)
@@ -3500,10 +3631,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vqrshrun_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
Ops, "vqrshrun_n", 1, true);
- case NEON::BI__builtin_neon_vqshlu_n_v:
- case NEON::BI__builtin_neon_vqshluq_n_v:
- return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftsu, Ty),
- Ops, "vqshlu", 1, false);
case NEON::BI__builtin_neon_vqshrn_n_v:
Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
@@ -3518,10 +3645,6 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vrshrn_n_v:
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
Ops, "vrshrn_n", 1, true);
- case NEON::BI__builtin_neon_vrshr_n_v:
- case NEON::BI__builtin_neon_vrshrq_n_v:
- Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
case NEON::BI__builtin_neon_vrsra_n_v:
case NEON::BI__builtin_neon_vrsraq_n_v:
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
@@ -3836,6 +3959,29 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
+ if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *RW = EmitScalarExpr(E->getArg(1));
+ Value *CacheLevel = EmitScalarExpr(E->getArg(2));
+ Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
+ Value *IsData = EmitScalarExpr(E->getArg(4));
+
+ Value *Locality = nullptr;
+ if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
+ // Temporal fetch, needs to convert cache level to locality.
+ Locality = llvm::ConstantInt::get(Int32Ty,
+ -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
+ } else {
+ // Streaming fetch.
+ Locality = llvm::ConstantInt::get(Int32Ty, 0);
+ }
+
+ // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
+ // PLDL3STRM or PLDL2STRM.
+ Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
+ return Builder.CreateCall4(F, Address, RW, Locality, IsData);
+ }
+
if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
@@ -3913,7 +4059,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
? Intrinsic::aarch64_stlxp
: Intrinsic::aarch64_stxp);
- llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, NULL);
+ llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
Value *One = llvm::ConstantInt::get(Int32Ty, 1);
Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
@@ -3994,7 +4140,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
Ops.push_back(EmitScalarExpr(E->getArg(i)));
- ArrayRef<NeonIntrinsicInfo> SISDMap(AArch64SISDIntrinsicMap);
+ auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
@@ -4675,38 +4821,19 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
Ops, "vrecps");
}
- case NEON::BI__builtin_neon_vrshr_n_v:
- case NEON::BI__builtin_neon_vrshrq_n_v:
- // FIXME: this can be shared with 32-bit ARM, but not AArch64 at the
- // moment. After the final merge it should be added to
- // EmitCommonNeonBuiltinExpr.
- Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n", 1, true);
- case NEON::BI__builtin_neon_vqshlu_n_v:
- case NEON::BI__builtin_neon_vqshluq_n_v:
- // FIXME: AArch64 and ARM use different intrinsics for this, but are
- // essentially compatible. It should be in EmitCommonNeonBuiltinExpr after
- // the final merge.
- Int = Intrinsic::aarch64_neon_sqshlu;
- return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n", 1, false);
case NEON::BI__builtin_neon_vqshrun_n_v:
- // FIXME: as above
Int = Intrinsic::aarch64_neon_sqshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
case NEON::BI__builtin_neon_vqrshrun_n_v:
- // FIXME: and again.
Int = Intrinsic::aarch64_neon_sqrshrun;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
case NEON::BI__builtin_neon_vqshrn_n_v:
- // FIXME: guess
Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
case NEON::BI__builtin_neon_vrshrn_n_v:
- // FIXME: there might be a pattern here.
Int = Intrinsic::aarch64_neon_rshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
case NEON::BI__builtin_neon_vqrshrn_n_v:
- // FIXME: another one
Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
case NEON::BI__builtin_neon_vrnda_v:
@@ -5435,8 +5562,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
Ops[3] = Builder.CreateZExt(Ops[3],
llvm::IntegerType::get(getLLVMContext(), 64));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value*>(Ops).slice(1), "vld2_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -5452,8 +5578,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
Ops[4] = Builder.CreateZExt(Ops[4],
llvm::IntegerType::get(getLLVMContext(), 64));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value*>(Ops).slice(1), "vld3_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -5470,8 +5595,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
Ops[5] = Builder.CreateZExt(Ops[5],
llvm::IntegerType::get(getLLVMContext(), 64));
- Ops[1] = Builder.CreateCall(F,
- ArrayRef<Value*>(Ops).slice(1), "vld4_lane");
+ Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
return Builder.CreateStore(Ops[1], Ops[0]);
@@ -5757,7 +5881,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
- return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
@@ -5787,7 +5911,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
- return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -5825,7 +5949,7 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// create i32 constant
llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
- return Builder.CreateCall(F, makeArrayRef(&Ops[0], 2), "palignr");
+ return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
}
// If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
@@ -5839,8 +5963,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_movntdq256:
case X86::BI__builtin_ia32_movnti:
case X86::BI__builtin_ia32_movnti64: {
- llvm::MDNode *Node = llvm::MDNode::get(getLLVMContext(),
- Builder.getInt32(1));
+ llvm::MDNode *Node = llvm::MDNode::get(
+ getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
// Convert the type of the pointer to a pointer to the stored type.
Value *BC = Builder.CreateBitCast(Ops[0],
@@ -5863,8 +5987,8 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
case X86::BI__builtin_ia32_pswapdsi: {
- const char *name = nullptr;
- Intrinsic::ID ID = Intrinsic::not_intrinsic;
+ const char *name;
+ Intrinsic::ID ID;
switch(BuiltinID) {
default: llvm_unreachable("Unsupported intrinsic!");
case X86::BI__builtin_ia32_pswapdsf:
@@ -5918,6 +6042,154 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
}
+ // SSE comparison intrisics
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ // These exist so that the builtin that takes an immediate can be bounds
+ // checked by clang to avoid passing bad immediates to the backend. Since
+ // AVX has a larger immediate than SSE we would need separate builtins to
+ // do the different bounds checking. Rather than create a clang specific
+ // SSE only builtin, this implements eight separate builtins to match gcc
+ // implementation.
+
+ // Choose the immediate.
+ unsigned Imm;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpeqsd:
+ Imm = 0;
+ break;
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ Imm = 1;
+ break;
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmplesd:
+ Imm = 2;
+ break;
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ Imm = 3;
+ break;
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ Imm = 4;
+ break;
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ Imm = 5;
+ break;
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ Imm = 6;
+ break;
+ case X86::BI__builtin_ia32_cmpordps:
+ case X86::BI__builtin_ia32_cmpordss:
+ case X86::BI__builtin_ia32_cmpordpd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ Imm = 7;
+ break;
+ }
+
+ // Choose the intrinsic ID.
+ const char *name;
+ Intrinsic::ID ID;
+ switch (BuiltinID) {
+ default: llvm_unreachable("Unsupported intrinsic!");
+ case X86::BI__builtin_ia32_cmpeqps:
+ case X86::BI__builtin_ia32_cmpltps:
+ case X86::BI__builtin_ia32_cmpleps:
+ case X86::BI__builtin_ia32_cmpunordps:
+ case X86::BI__builtin_ia32_cmpneqps:
+ case X86::BI__builtin_ia32_cmpnltps:
+ case X86::BI__builtin_ia32_cmpnleps:
+ case X86::BI__builtin_ia32_cmpordps:
+ name = "cmpps";
+ ID = Intrinsic::x86_sse_cmp_ps;
+ break;
+ case X86::BI__builtin_ia32_cmpeqss:
+ case X86::BI__builtin_ia32_cmpltss:
+ case X86::BI__builtin_ia32_cmpless:
+ case X86::BI__builtin_ia32_cmpunordss:
+ case X86::BI__builtin_ia32_cmpneqss:
+ case X86::BI__builtin_ia32_cmpnltss:
+ case X86::BI__builtin_ia32_cmpnless:
+ case X86::BI__builtin_ia32_cmpordss:
+ name = "cmpss";
+ ID = Intrinsic::x86_sse_cmp_ss;
+ break;
+ case X86::BI__builtin_ia32_cmpeqpd:
+ case X86::BI__builtin_ia32_cmpltpd:
+ case X86::BI__builtin_ia32_cmplepd:
+ case X86::BI__builtin_ia32_cmpunordpd:
+ case X86::BI__builtin_ia32_cmpneqpd:
+ case X86::BI__builtin_ia32_cmpnltpd:
+ case X86::BI__builtin_ia32_cmpnlepd:
+ case X86::BI__builtin_ia32_cmpordpd:
+ name = "cmppd";
+ ID = Intrinsic::x86_sse2_cmp_pd;
+ break;
+ case X86::BI__builtin_ia32_cmpeqsd:
+ case X86::BI__builtin_ia32_cmpltsd:
+ case X86::BI__builtin_ia32_cmplesd:
+ case X86::BI__builtin_ia32_cmpunordsd:
+ case X86::BI__builtin_ia32_cmpneqsd:
+ case X86::BI__builtin_ia32_cmpnltsd:
+ case X86::BI__builtin_ia32_cmpnlesd:
+ case X86::BI__builtin_ia32_cmpordsd:
+ name = "cmpsd";
+ ID = Intrinsic::x86_sse2_cmp_sd;
+ break;
+ }
+
+ Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
+ llvm::Function *F = CGM.getIntrinsic(ID);
+ return Builder.CreateCall(F, Ops, name);
}
}
@@ -5942,6 +6214,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvewx:
case PPC::BI__builtin_altivec_lvsl:
case PPC::BI__builtin_altivec_lvsr:
+ case PPC::BI__builtin_vsx_lxvd2x:
+ case PPC::BI__builtin_vsx_lxvw4x:
{
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
@@ -5971,6 +6245,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvsr:
ID = Intrinsic::ppc_altivec_lvsr;
break;
+ case PPC::BI__builtin_vsx_lxvd2x:
+ ID = Intrinsic::ppc_vsx_lxvd2x;
+ break;
+ case PPC::BI__builtin_vsx_lxvw4x:
+ ID = Intrinsic::ppc_vsx_lxvw4x;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
@@ -5982,6 +6262,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvebx:
case PPC::BI__builtin_altivec_stvehx:
case PPC::BI__builtin_altivec_stvewx:
+ case PPC::BI__builtin_vsx_stxvd2x:
+ case PPC::BI__builtin_vsx_stxvw4x:
{
Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
@@ -6004,6 +6286,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
ID = Intrinsic::ppc_altivec_stvewx;
break;
+ case PPC::BI__builtin_vsx_stxvd2x:
+ ID = Intrinsic::ppc_vsx_stxvd2x;
+ break;
+ case PPC::BI__builtin_vsx_stxvw4x:
+ ID = Intrinsic::ppc_vsx_stxvw4x;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
@@ -6033,6 +6321,17 @@ static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
return CGF.Builder.CreateCall3(F, Src0, Src1, Src2);
}
+// Emit an intrinsic that has 1 float or double operand, and 1 integer.
+static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
+ const CallExpr *E,
+ unsigned IntrinsicID) {
+ llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
+
+ Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
+ return CGF.Builder.CreateCall2(F, Src0, Src1);
+}
+
Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
switch (BuiltinID) {
@@ -6065,18 +6364,23 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
return Result;
}
case R600::BI__builtin_amdgpu_div_fmas:
- case R600::BI__builtin_amdgpu_div_fmasf:
- return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fmas);
+ case R600::BI__builtin_amdgpu_div_fmasf: {
+ llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
+ llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
+ llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
+
+ llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
+ Src0->getType());
+ llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
+ return Builder.CreateCall4(F, Src0, Src1, Src2, Src3ToBool);
+ }
case R600::BI__builtin_amdgpu_div_fixup:
case R600::BI__builtin_amdgpu_div_fixupf:
return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
case R600::BI__builtin_amdgpu_trig_preop:
- case R600::BI__builtin_amdgpu_trig_preopf: {
- Value *Src0 = EmitScalarExpr(E->getArg(0));
- Value *Src1 = EmitScalarExpr(E->getArg(1));
- Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_trig_preop, Src0->getType());
- return Builder.CreateCall2(F, Src0, Src1);
- }
+ case R600::BI__builtin_amdgpu_trig_preopf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
case R600::BI__builtin_amdgpu_rcp:
case R600::BI__builtin_amdgpu_rcpf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
@@ -6086,6 +6390,12 @@ Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
case R600::BI__builtin_amdgpu_rsq_clamped:
case R600::BI__builtin_amdgpu_rsq_clampedf:
return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
+ case R600::BI__builtin_amdgpu_ldexp:
+ case R600::BI__builtin_amdgpu_ldexpf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
+ case R600::BI__builtin_amdgpu_class:
+ case R600::BI__builtin_amdgpu_classf:
+ return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
default:
return nullptr;
}
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index 29e0a91a63fe..014a5dbd46d6 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -45,8 +45,7 @@ RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
}
llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
- CGF.EmitCall(E->getCallee()->getType(), Callee, E->getLocStart(),
- ReturnValue, E->arg_begin(), E->arg_end(), TargetDecl);
+ CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
diff --git a/lib/CodeGen/CGCUDARuntime.h b/lib/CodeGen/CGCUDARuntime.h
index a99a67ae1ae7..8c162fb05ab9 100644
--- a/lib/CodeGen/CGCUDARuntime.h
+++ b/lib/CodeGen/CGCUDARuntime.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CUDARUNTIME_H
-#define CLANG_CODEGEN_CUDARUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCUDARUNTIME_H
namespace clang {
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 545c5ef9f827..9f0e67e42176 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -44,12 +44,13 @@ bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!D->hasTrivialBody())
return true;
- // For exported destructors, we need a full definition.
- if (D->hasAttr<DLLExportAttr>())
- return true;
-
const CXXRecordDecl *Class = D->getParent();
+ // We are going to instrument this destructor, so give up even if it is
+ // currently empty.
+ if (Class->mayInsertExtraPadding())
+ return true;
+
// If we need to manipulate a VTT parameter, give up.
if (Class->getNumVBases()) {
// Extra Credit: passing extra parameters is perfectly safe
@@ -123,6 +124,11 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
if (!llvm::GlobalAlias::isValidLinkage(Linkage))
return true;
+ // Don't create a weak alias for a dllexport'd symbol.
+ if (AliasDecl.getDecl()->hasAttr<DLLExportAttr>() &&
+ llvm::GlobalValue::isWeakForLinker(Linkage))
+ return true;
+
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
@@ -161,9 +167,9 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
if (!InEveryTU) {
- /// If we don't have a definition for the destructor yet, don't
- /// emit. We can't emit aliases to declarations; that's just not
- /// how aliases work.
+ // If we don't have a definition for the destructor yet, don't
+ // emit. We can't emit aliases to declarations; that's just not
+ // how aliases work.
if (Ref->isDeclaration())
return true;
}
@@ -191,114 +197,55 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
}
// Finally, set up the alias with its proper name and attributes.
- SetCommonAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
+ setAliasAttributes(cast<NamedDecl>(AliasDecl.getDecl()), Alias);
return false;
}
-void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *ctor,
- CXXCtorType ctorType) {
- if (!getTarget().getCXXABI().hasConstructorVariants()) {
- // If there are no constructor variants, always emit the complete destructor.
- ctorType = Ctor_Complete;
- } else if (!ctor->getParent()->getNumVBases() &&
- (ctorType == Ctor_Complete || ctorType == Ctor_Base)) {
- // The complete constructor is equivalent to the base constructor
- // for classes with no virtual bases. Try to emit it as an alias.
- bool ProducedAlias =
- !TryEmitDefinitionAsAlias(GlobalDecl(ctor, Ctor_Complete),
- GlobalDecl(ctor, Ctor_Base), true);
- if (ctorType == Ctor_Complete && ProducedAlias)
- return;
- }
+llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
+ StructorType Type) {
+ const CGFunctionInfo &FnInfo =
+ getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ auto *Fn = cast<llvm::Function>(
+ getAddrOfCXXStructor(MD, Type, &FnInfo, nullptr, true));
- const CGFunctionInfo &fnInfo =
- getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
-
- auto *fn = cast<llvm::Function>(
- GetAddrOfCXXConstructor(ctor, ctorType, &fnInfo, true));
- setFunctionLinkage(GlobalDecl(ctor, ctorType), fn);
-
- CodeGenFunction(*this).GenerateCode(GlobalDecl(ctor, ctorType), fn, fnInfo);
-
- setFunctionDefinitionAttributes(ctor, fn);
- SetLLVMFunctionAttributesForDefinition(ctor, fn);
-}
-
-llvm::GlobalValue *
-CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *ctor,
- CXXCtorType ctorType,
- const CGFunctionInfo *fnInfo,
- bool DontDefer) {
- GlobalDecl GD(ctor, ctorType);
-
- StringRef name = getMangledName(GD);
- if (llvm::GlobalValue *existing = GetGlobalValue(name))
- return existing;
-
- if (!fnInfo)
- fnInfo = &getTypes().arrangeCXXConstructorDeclaration(ctor, ctorType);
+ GlobalDecl GD;
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ } else {
+ const auto *CD = cast<CXXConstructorDecl>(MD);
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ }
- llvm::FunctionType *fnType = getTypes().GetFunctionType(*fnInfo);
- return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
- /*ForVTable=*/false,
- DontDefer));
+ setFunctionLinkage(GD, Fn);
+ CodeGenFunction(*this).GenerateCode(GD, Fn, FnInfo);
+ setFunctionDefinitionAttributes(MD, Fn);
+ SetLLVMFunctionAttributesForDefinition(MD, Fn);
+ return Fn;
}
-void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *dtor,
- CXXDtorType dtorType) {
- // The complete destructor is equivalent to the base destructor for
- // classes with no virtual bases, so try to emit it as an alias.
- if (!dtor->getParent()->getNumVBases() &&
- (dtorType == Dtor_Complete || dtorType == Dtor_Base)) {
- bool ProducedAlias =
- !TryEmitDefinitionAsAlias(GlobalDecl(dtor, Dtor_Complete),
- GlobalDecl(dtor, Dtor_Base), true);
- if (ProducedAlias) {
- if (dtorType == Dtor_Complete)
- return;
- if (dtor->isVirtual())
- getVTables().EmitThunks(GlobalDecl(dtor, Dtor_Complete));
- }
+llvm::GlobalValue *CodeGenModule::getAddrOfCXXStructor(
+ const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
+ llvm::FunctionType *FnType, bool DontDefer) {
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ auto *DD = dyn_cast<CXXDestructorDecl>(MD);
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
}
- // The base destructor is equivalent to the base destructor of its
- // base class if there is exactly one non-virtual base class with a
- // non-trivial destructor, there are no fields with a non-trivial
- // destructor, and the body of the destructor is trivial.
- if (dtorType == Dtor_Base && !TryEmitBaseDestructorAsAlias(dtor))
- return;
+ StringRef Name = getMangledName(GD);
+ if (llvm::GlobalValue *Existing = GetGlobalValue(Name))
+ return Existing;
- const CGFunctionInfo &fnInfo =
- getTypes().arrangeCXXDestructor(dtor, dtorType);
-
- auto *fn = cast<llvm::Function>(
- GetAddrOfCXXDestructor(dtor, dtorType, &fnInfo, nullptr, true));
- setFunctionLinkage(GlobalDecl(dtor, dtorType), fn);
-
- CodeGenFunction(*this).GenerateCode(GlobalDecl(dtor, dtorType), fn, fnInfo);
-
- setFunctionDefinitionAttributes(dtor, fn);
- SetLLVMFunctionAttributesForDefinition(dtor, fn);
-}
-
-llvm::GlobalValue *
-CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *dtor,
- CXXDtorType dtorType,
- const CGFunctionInfo *fnInfo,
- llvm::FunctionType *fnType,
- bool DontDefer) {
- GlobalDecl GD(dtor, dtorType);
-
- StringRef name = getMangledName(GD);
- if (llvm::GlobalValue *existing = GetGlobalValue(name))
- return existing;
-
- if (!fnType) {
- if (!fnInfo) fnInfo = &getTypes().arrangeCXXDestructor(dtor, dtorType);
- fnType = getTypes().GetFunctionType(*fnInfo);
+ if (!FnType) {
+ if (!FnInfo)
+ FnInfo = &getTypes().arrangeCXXStructorDeclaration(MD, Type);
+ FnType = getTypes().GetFunctionType(*FnInfo);
}
- return cast<llvm::Function>(GetOrCreateLLVMFunction(name, fnType, GD,
+
+ return cast<llvm::Function>(GetOrCreateLLVMFunction(Name, FnType, GD,
/*ForVTable=*/false,
DontDefer));
}
@@ -360,8 +307,8 @@ CodeGenFunction::BuildAppleKextVirtualDestructorCall(
// -O does that. But need to support -O0 as well.
if (MD->isVirtual() && Type != Dtor_Base) {
// Compute the function type we're calling.
- const CGFunctionInfo &FInfo =
- CGM.getTypes().arrangeCXXDestructor(DD, Dtor_Complete);
+ const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
+ DD, StructorType::Complete);
llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index 55ddd666c490..d31331de6868 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -246,17 +246,6 @@ llvm::Value *CGCXXABI::readArrayCookieImpl(CodeGenFunction &CGF,
return llvm::ConstantInt::get(CGF.SizeTy, 0);
}
-void CGCXXABI::registerGlobalDtor(CodeGenFunction &CGF,
- const VarDecl &D,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
- if (D.getTLSKind())
- CGM.ErrorUnsupported(&D, "non-trivial TLS destruction");
-
- // The default behavior is to use atexit.
- CGF.registerGlobalDtorWithAtExit(D, dtor, addr);
-}
-
/// Returns the adjustment, in bytes, required for the given
/// member-pointer operation. Returns null if no adjustment is
/// required.
@@ -310,18 +299,6 @@ CGCXXABI::EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
return nullptr;
}
-void CGCXXABI::EmitThreadLocalInitFuncs(
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
- llvm::Function *InitFunc) {
-}
-
-LValue CGCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
- const VarDecl *VD,
- QualType LValType) {
- ErrorUnsupportedABI(CGF, "odr-use of thread_local global");
- return LValue();
-}
-
bool CGCXXABI::NeedsVTTParameter(GlobalDecl GD) {
return false;
}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 91e49707bae6..cc5c1b2e0ae6 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CXXABI_H
-#define CLANG_CODEGEN_CXXABI_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCXXABI_H
#include "CodeGenFunction.h"
#include "clang/Basic/LLVM.h"
@@ -93,6 +93,8 @@ public:
/// when called virtually, and code generation does not support the case.
virtual bool HasThisReturn(GlobalDecl GD) const { return false; }
+ virtual bool hasMostDerivedReturn(GlobalDecl GD) const { return false; }
+
/// If the C++ ABI requires the given type be returned in a particular way,
/// this method sets RetAI and returns true.
virtual bool classifyReturnType(CGFunctionInfo &FI) const = 0;
@@ -156,6 +158,15 @@ public:
/// (in the C++ sense) with an LLVM zeroinitializer.
virtual bool isZeroInitializable(const MemberPointerType *MPT);
+ /// Return whether or not a member pointers type is convertible to an IR type.
+ virtual bool isMemberPointerConvertible(const MemberPointerType *MPT) const {
+ return true;
+ }
+
+ virtual bool isTypeInfoCalculable(QualType Ty) const {
+ return !Ty->isIncompleteType();
+ }
+
/// Create a null member pointer of the given type.
virtual llvm::Constant *EmitNullMemberPointer(const MemberPointerType *MPT);
@@ -198,14 +209,11 @@ protected:
CharUnits getMemberPointerPathAdjustment(const APValue &MP);
public:
- /// Adjust the given non-null pointer to an object of polymorphic
- /// type to point to the complete object.
- ///
- /// The IR type of the result should be a pointer but is otherwise
- /// irrelevant.
- virtual llvm::Value *adjustToCompleteObject(CodeGenFunction &CGF,
- llvm::Value *ptr,
- QualType type) = 0;
+ virtual void emitVirtualObjectDelete(CodeGenFunction &CGF,
+ const CXXDeleteExpr *DE,
+ llvm::Value *Ptr, QualType ElementType,
+ const CXXDestructorDecl *Dtor) = 0;
+ virtual void emitRethrow(CodeGenFunction &CGF, bool isNoReturn) = 0;
virtual llvm::Constant *getAddrOfRTTIDescriptor(QualType Ty) = 0;
@@ -236,20 +244,6 @@ public:
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl) = 0;
- /// Build the signature of the given constructor variant by adding
- /// any required parameters. For convenience, ArgTys has been initialized
- /// with the type of 'this' and ResTy has been initialized with the type of
- /// 'this' if HasThisReturn(GlobalDecl(Ctor, T)) is true or 'void' otherwise
- /// (although both may be changed by the ABI).
- ///
- /// If there are ever any ABIs where the implicit parameters are
- /// intermixed with the formal parameters, we can address those
- /// then.
- virtual void BuildConstructorSignature(const CXXConstructorDecl *Ctor,
- CXXCtorType T,
- CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) = 0;
-
virtual llvm::BasicBlock *EmitCtorCompleteObjectHandler(CodeGenFunction &CGF,
const CXXRecordDecl *RD);
@@ -262,15 +256,11 @@ public:
/// Emit constructor variants required by this ABI.
virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
- /// Build the signature of the given destructor variant by adding
- /// any required parameters. For convenience, ArgTys has been initialized
- /// with the type of 'this' and ResTy has been initialized with the type of
- /// 'this' if HasThisReturn(GlobalDecl(Dtor, T)) is true or 'void' otherwise
- /// (although both may be changed by the ABI).
- virtual void BuildDestructorSignature(const CXXDestructorDecl *Dtor,
- CXXDtorType T,
- CanQualType &ResTy,
- SmallVectorImpl<CanQualType> &ArgTys) = 0;
+ /// Build the signature of the given constructor or destructor variant by
+ /// adding any required parameters. For convenience, ArgTys has been
+ /// initialized with the type of 'this'.
+ virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
@@ -368,11 +358,10 @@ public:
llvm::Type *Ty) = 0;
/// Emit the ABI-specific virtual destructor call.
- virtual void EmitVirtualDestructorCall(CodeGenFunction &CGF,
- const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- SourceLocation CallLoc,
- llvm::Value *This) = 0;
+ virtual llvm::Value *
+ EmitVirtualDestructorCall(CodeGenFunction &CGF, const CXXDestructorDecl *Dtor,
+ CXXDtorType DtorType, llvm::Value *This,
+ const CXXMemberCallExpr *CE) = 0;
virtual void adjustCallArgsForDestructorThunk(CodeGenFunction &CGF,
GlobalDecl GD,
@@ -397,6 +386,9 @@ public:
virtual void EmitReturnFromThunk(CodeGenFunction &CGF,
RValue RV, QualType ResultType);
+ virtual size_t getSrcArgforCopyCtor(const CXXConstructorDecl *,
+ FunctionArgList &Args) const = 0;
+
/// Gets the pure virtual member call function.
virtual StringRef GetPureVirtualCallName() = 0;
@@ -490,30 +482,44 @@ public:
/// Emit code to force the execution of a destructor during global
/// teardown. The default implementation of this uses atexit.
///
- /// \param dtor - a function taking a single pointer argument
- /// \param addr - a pointer to pass to the destructor function.
+ /// \param Dtor - a function taking a single pointer argument
+ /// \param Addr - a pointer to pass to the destructor function.
virtual void registerGlobalDtor(CodeGenFunction &CGF, const VarDecl &D,
- llvm::Constant *dtor, llvm::Constant *addr);
+ llvm::Constant *Dtor,
+ llvm::Constant *Addr) = 0;
/*************************** thread_local initialization ********************/
/// Emits ABI-required functions necessary to initialize thread_local
/// variables in this translation unit.
///
- /// \param Decls The thread_local declarations in this translation unit.
- /// \param InitFunc If this translation unit contains any non-constant
- /// initialization or non-trivial destruction for thread_local
- /// variables, a function to perform the initialization. Otherwise, 0.
+ /// \param CXXThreadLocals - The thread_local declarations in this translation
+ /// unit.
+ /// \param CXXThreadLocalInits - If this translation unit contains any
+ /// non-constant initialization or non-trivial destruction for
+ /// thread_local variables, a list of functions to perform the
+ /// initialization.
virtual void EmitThreadLocalInitFuncs(
- ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *> > Decls,
- llvm::Function *InitFunc);
+ CodeGenModule &CGM,
+ ArrayRef<std::pair<const VarDecl *, llvm::GlobalVariable *>>
+ CXXThreadLocals,
+ ArrayRef<llvm::Function *> CXXThreadLocalInits,
+ ArrayRef<llvm::GlobalVariable *> CXXThreadLocalInitVars) = 0;
+
+ // Determine if references to thread_local global variables can be made
+ // directly or require access through a thread wrapper function.
+ virtual bool usesThreadWrapperFunction() const = 0;
/// Emit a reference to a non-local thread_local variable (including
/// triggering the initialization of all thread_local variables in its
/// translation unit).
virtual LValue EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
const VarDecl *VD,
- QualType LValType);
+ QualType LValType) = 0;
+
+ /// Emit a single constructor/destructor with the given type from a C++
+ /// constructor Decl.
+ virtual void emitCXXStructor(const CXXMethodDecl *MD, StructorType Type) = 0;
};
// Create an instance of a C++ ABI class:
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 17c3354f93e9..6403fa99aa7b 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -47,7 +47,10 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
- // TODO: add support for CC_X86Pascal to llvm
+ // TODO: Add support for __pascal to LLVM.
+ case CC_X86Pascal: return llvm::CallingConv::C;
+ // TODO: Add support for __vectorcall to LLVM.
+ case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
}
}
@@ -80,42 +83,25 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
// When translating an unprototyped function type, always use a
// variadic type.
return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
- false, None, FTNP->getExtInfo(),
- RequiredArgs(0));
+ /*instanceMethod=*/false,
+ /*chainCall=*/false, None,
+ FTNP->getExtInfo(), RequiredArgs(0));
}
/// Arrange the LLVM function layout for a value of the given function
-/// type, on top of any implicit parameters already stored. Use the
-/// given ExtInfo instead of the ExtInfo from the function type.
-static const CGFunctionInfo &arrangeLLVMFunctionInfo(CodeGenTypes &CGT,
- bool IsInstanceMethod,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP,
- FunctionType::ExtInfo extInfo) {
+/// type, on top of any implicit parameters already stored.
+static const CGFunctionInfo &
+arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
+ SmallVectorImpl<CanQualType> &prefix,
+ CanQual<FunctionProtoType> FTP) {
RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
prefix.push_back(FTP->getParamType(i));
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
- return CGT.arrangeLLVMFunctionInfo(resultType, IsInstanceMethod, prefix,
- extInfo, required);
-}
-
-/// Arrange the argument and result information for a free function (i.e.
-/// not a C++ or ObjC instance method) of the given type.
-static const CGFunctionInfo &arrangeFreeFunctionType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
- return arrangeLLVMFunctionInfo(CGT, false, prefix, FTP, FTP->getExtInfo());
-}
-
-/// Arrange the argument and result information for a free function (i.e.
-/// not a C++ or ObjC instance method) of the given type.
-static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
- SmallVectorImpl<CanQualType> &prefix,
- CanQual<FunctionProtoType> FTP) {
- FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(CGT, true, prefix, FTP, extInfo);
+ return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
+ /*chainCall=*/false, prefix,
+ FTP->getExtInfo(), required);
}
/// Arrange the argument and result information for a value of the
@@ -123,7 +109,8 @@ static const CGFunctionInfo &arrangeCXXMethodType(CodeGenTypes &CGT,
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
SmallVector<CanQualType, 16> argTypes;
- return ::arrangeFreeFunctionType(*this, argTypes, FTP);
+ return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
+ FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
@@ -137,6 +124,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
+ if (D->hasAttr<VectorCallAttr>())
+ return CC_X86VectorCall;
+
if (D->hasAttr<PascalAttr>())
return CC_X86Pascal;
@@ -158,23 +148,6 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
return CC_C;
}
-static bool isAAPCSVFP(const CGFunctionInfo &FI, const TargetInfo &Target) {
- switch (FI.getEffectiveCallingConvention()) {
- case llvm::CallingConv::C:
- switch (Target.getTriple().getEnvironment()) {
- case llvm::Triple::EABIHF:
- case llvm::Triple::GNUEABIHF:
- return true;
- default:
- return false;
- }
- case llvm::CallingConv::ARM_AAPCS_VFP:
- return true;
- default:
- return false;
- }
-}
-
/// Arrange the argument and result information for a call to an
/// unknown C++ non-static member function of the given abstract type.
/// (Zero value of RD means we don't have any meaningful "this" argument type,
@@ -192,8 +165,9 @@ CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
else
argTypes.push_back(Context.VoidPtrTy);
- return ::arrangeCXXMethodType(*this, argTypes,
- FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
+ return ::arrangeLLVMFunctionInfo(
+ *this, true, argTypes,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
/// Arrange the argument and result information for a declaration or
@@ -216,31 +190,41 @@ CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
return arrangeFreeFunctionType(prototype);
}
-/// Arrange the argument and result information for a declaration
-/// or definition to the given constructor variant.
const CGFunctionInfo &
-CodeGenTypes::arrangeCXXConstructorDeclaration(const CXXConstructorDecl *D,
- CXXCtorType ctorKind) {
+CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
+ StructorType Type) {
+
SmallVector<CanQualType, 16> argTypes;
- argTypes.push_back(GetThisType(Context, D->getParent()));
+ argTypes.push_back(GetThisType(Context, MD->getParent()));
- GlobalDecl GD(D, ctorKind);
- CanQualType resultType =
- TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
+ GlobalDecl GD;
+ if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
+ GD = GlobalDecl(CD, toCXXCtorType(Type));
+ } else {
+ auto *DD = dyn_cast<CXXDestructorDecl>(MD);
+ GD = GlobalDecl(DD, toCXXDtorType(Type));
+ }
- CanQual<FunctionProtoType> FTP = GetFormalType(D);
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
// Add the formal parameters.
for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
argTypes.push_back(FTP->getParamType(i));
- TheCXXABI.BuildConstructorSignature(D, ctorKind, resultType, argTypes);
+ TheCXXABI.buildStructorSignature(MD, Type, argTypes);
RequiredArgs required =
- (D->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
+ (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo, required);
+ CanQualType resultType = TheCXXABI.HasThisReturn(GD)
+ ? argTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
+ return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, extInfo,
+ required);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -251,42 +235,22 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
unsigned ExtraArgs) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> ArgTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end(); i != e;
- ++i)
- ArgTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ for (const auto &Arg : args)
+ ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
CanQual<FunctionProtoType> FPT = GetFormalType(D);
RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
GlobalDecl GD(D, CtorKind);
- CanQualType ResultType =
- TheCXXABI.HasThisReturn(GD) ? ArgTypes.front() : Context.VoidTy;
+ CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
+ ? ArgTypes.front()
+ : TheCXXABI.hasMostDerivedReturn(GD)
+ ? CGM.getContext().VoidPtrTy
+ : Context.VoidTy;
FunctionType::ExtInfo Info = FPT->getExtInfo();
- return arrangeLLVMFunctionInfo(ResultType, true, ArgTypes, Info, Required);
-}
-
-/// Arrange the argument and result information for a declaration,
-/// definition, or call to the given destructor variant. It so
-/// happens that all three cases produce the same information.
-const CGFunctionInfo &
-CodeGenTypes::arrangeCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType dtorKind) {
- SmallVector<CanQualType, 2> argTypes;
- argTypes.push_back(GetThisType(Context, D->getParent()));
-
- GlobalDecl GD(D, dtorKind);
- CanQualType resultType =
- TheCXXABI.HasThisReturn(GD) ? argTypes.front() : Context.VoidTy;
-
- TheCXXABI.BuildDestructorSignature(D, dtorKind, resultType, argTypes);
-
- CanQual<FunctionProtoType> FTP = GetFormalType(D);
- assert(FTP->getNumParams() == 0 && "dtor with formal parameters");
- assert(FTP->isVariadic() == 0 && "dtor with formal parameters");
-
- FunctionType::ExtInfo extInfo = FTP->getExtInfo();
- return arrangeLLVMFunctionInfo(resultType, true, argTypes, extInfo,
- RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
+ /*chainCall=*/false, ArgTypes, Info,
+ Required);
}
/// Arrange the argument and result information for the declaration or
@@ -305,8 +269,9 @@ CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
// non-variadic type.
if (isa<FunctionNoProtoType>(FTy)) {
CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
- return arrangeLLVMFunctionInfo(noProto->getReturnType(), false, None,
- noProto->getExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ noProto->getReturnType(), /*instanceMethod=*/false,
+ /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
}
assert(isa<FunctionProtoType>(FTy));
@@ -350,8 +315,9 @@ CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
RequiredArgs required =
(MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()), false,
- argTys, einfo, required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTys, einfo, required);
}
const CGFunctionInfo &
@@ -360,14 +326,29 @@ CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
- return arrangeCXXConstructorDeclaration(CD, GD.getCtorType());
+ return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
- return arrangeCXXDestructor(DD, GD.getDtorType());
+ return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
return arrangeFunctionDeclaration(FD);
}
+/// Arrange a thunk that takes 'this' as the first parameter followed by
+/// varargs. Return a void pointer, regardless of the actual return type.
+/// The body of the thunk will end in a musttail call to a function of the
+/// correct type, and the caller will bitcast the function to the correct
+/// prototype.
+const CGFunctionInfo &
+CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
+ assert(MD->isVirtual() && "only virtual memptrs have thunks");
+ CanQual<FunctionProtoType> FTP = GetFormalType(MD);
+ CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
+ return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
+ /*chainCall=*/false, ArgTys,
+ FTP->getExtInfo(), RequiredArgs(1));
+}
+
/// Arrange a call as unto a free function, except possibly with an
/// additional number of formal parameters considered required.
static const CGFunctionInfo &
@@ -375,7 +356,8 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
CodeGenModule &CGM,
const CallArgList &args,
const FunctionType *fnType,
- unsigned numExtraRequiredArgs) {
+ unsigned numExtraRequiredArgs,
+ bool chainCall) {
assert(args.size() >= numExtraRequiredArgs);
// In most cases, there are no optional arguments.
@@ -397,8 +379,13 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
required = RequiredArgs(args.size());
}
- return CGT.arrangeFreeFunctionCall(fnType->getReturnType(), args,
- fnType->getExtInfo(), required);
+ // FIXME: Kill copy.
+ SmallVector<CanQualType, 16> argTypes;
+ for (const auto &arg : args)
+ argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
+ return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
+ /*instanceMethod=*/false, chainCall,
+ argTypes, fnType->getExtInfo(), required);
}
/// Figure out the rules for calling a function with the given formal
@@ -407,8 +394,10 @@ arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
/// target-dependent in crazy ways.
const CGFunctionInfo &
CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
- const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 0);
+ const FunctionType *fnType,
+ bool chainCall) {
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
+ chainCall ? 1 : 0, chainCall);
}
/// A block function call is essentially a free-function call with an
@@ -416,7 +405,8 @@ CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
const CGFunctionInfo &
CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
const FunctionType *fnType) {
- return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1);
+ return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
+ /*chainCall=*/false);
}
const CGFunctionInfo &
@@ -426,11 +416,11 @@ CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType(i->Ty));
- return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes,
- info, required);
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
}
/// Arrange a call to a C++ method, passing the given arguments.
@@ -440,13 +430,13 @@ CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
RequiredArgs required) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (CallArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType(i->Ty));
+ for (const auto &Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
FunctionType::ExtInfo info = FPT->getExtInfo();
- return arrangeLLVMFunctionInfo(GetReturnType(FPT->getReturnType()), true,
- argTypes, info, required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
+ /*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
@@ -454,19 +444,20 @@ const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
const FunctionType::ExtInfo &info, bool isVariadic) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> argTypes;
- for (FunctionArgList::const_iterator i = args.begin(), e = args.end();
- i != e; ++i)
- argTypes.push_back(Context.getCanonicalParamType((*i)->getType()));
+ for (auto Arg : args)
+ argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
RequiredArgs required =
(isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
- return arrangeLLVMFunctionInfo(GetReturnType(resultType), false, argTypes, info,
- required);
+ return arrangeLLVMFunctionInfo(
+ GetReturnType(resultType), /*instanceMethod=*/false,
+ /*chainCall=*/false, argTypes, info, required);
}
const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
- return arrangeLLVMFunctionInfo(getContext().VoidTy, false, None,
- FunctionType::ExtInfo(), RequiredArgs::All);
+ return arrangeLLVMFunctionInfo(
+ getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
+ None, FunctionType::ExtInfo(), RequiredArgs::All);
}
/// Arrange the argument and result information for an abstract value
@@ -474,22 +465,20 @@ const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
/// above functions ultimately defer to.
const CGFunctionInfo &
CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
ArrayRef<CanQualType> argTypes,
FunctionType::ExtInfo info,
RequiredArgs required) {
-#ifndef NDEBUG
- for (ArrayRef<CanQualType>::const_iterator
- I = argTypes.begin(), E = argTypes.end(); I != E; ++I)
- assert(I->isCanonicalAsParam());
-#endif
+ assert(std::all_of(argTypes.begin(), argTypes.end(),
+ std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, IsInstanceMethod, info, required, resultType,
- argTypes);
+ CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
+ resultType, argTypes);
void *insertPos = nullptr;
CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
@@ -497,11 +486,12 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
return *FI;
// Construct the function info. We co-allocate the ArgInfos.
- FI = CGFunctionInfo::create(CC, IsInstanceMethod, info, resultType, argTypes,
- required);
+ FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
+ resultType, argTypes, required);
FunctionInfos.InsertNode(FI, insertPos);
- bool inserted = FunctionsBeingProcessed.insert(FI); (void)inserted;
+ bool inserted = FunctionsBeingProcessed.insert(FI).second;
+ (void)inserted;
assert(inserted && "Recursively being processed?");
// Compute ABI information.
@@ -525,7 +515,8 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
}
CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
- bool IsInstanceMethod,
+ bool instanceMethod,
+ bool chainCall,
const FunctionType::ExtInfo &info,
CanQualType resultType,
ArrayRef<CanQualType> argTypes,
@@ -536,7 +527,8 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->CallingConvention = llvmCC;
FI->EffectiveCallingConvention = llvmCC;
FI->ASTCallingConvention = info.getCC();
- FI->InstanceMethod = IsInstanceMethod;
+ FI->InstanceMethod = instanceMethod;
+ FI->ChainCall = chainCall;
FI->NoReturn = info.getNoReturn();
FI->ReturnsRetained = info.getProducesResult();
FI->Required = required;
@@ -552,13 +544,79 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
/***/
-void CodeGenTypes::GetExpandedTypes(QualType type,
- SmallVectorImpl<llvm::Type*> &expandedTypes) {
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(type)) {
- uint64_t NumElts = AT->getSize().getZExtValue();
- for (uint64_t Elt = 0; Elt < NumElts; ++Elt)
- GetExpandedTypes(AT->getElementType(), expandedTypes);
- } else if (const RecordType *RT = type->getAs<RecordType>()) {
+namespace {
+// ABIArgInfo::Expand implementation.
+
+// Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
+struct TypeExpansion {
+ enum TypeExpansionKind {
+ // Elements of constant arrays are expanded recursively.
+ TEK_ConstantArray,
+ // Record fields are expanded recursively (but if record is a union, only
+ // the field with the largest size is expanded).
+ TEK_Record,
+ // For complex types, real and imaginary parts are expanded recursively.
+ TEK_Complex,
+ // All other types are not expandable.
+ TEK_None
+ };
+
+ const TypeExpansionKind Kind;
+
+ TypeExpansion(TypeExpansionKind K) : Kind(K) {}
+ virtual ~TypeExpansion() {}
+};
+
+struct ConstantArrayExpansion : TypeExpansion {
+ QualType EltTy;
+ uint64_t NumElts;
+
+ ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
+ : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_ConstantArray;
+ }
+};
+
+struct RecordExpansion : TypeExpansion {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+
+ SmallVector<const FieldDecl *, 1> Fields;
+
+ RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
+ SmallVector<const FieldDecl *, 1> &&Fields)
+ : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Record;
+ }
+};
+
+struct ComplexExpansion : TypeExpansion {
+ QualType EltTy;
+
+ ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_Complex;
+ }
+};
+
+struct NoExpansion : TypeExpansion {
+ NoExpansion() : TypeExpansion(TEK_None) {}
+ static bool classof(const TypeExpansion *TE) {
+ return TE->Kind == TEK_None;
+ }
+};
+} // namespace
+
+static std::unique_ptr<TypeExpansion>
+getTypeExpansion(QualType Ty, const ASTContext &Context) {
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
+ return llvm::make_unique<ConstantArrayExpansion>(
+ AT->getElementType(), AT->getSize().getZExtValue());
+ }
+ if (const RecordType *RT = Ty->getAs<RecordType>()) {
+ SmallVector<const CXXBaseSpecifier *, 1> Bases;
+ SmallVector<const FieldDecl *, 1> Fields;
const RecordDecl *RD = RT->getDecl();
assert(!RD->hasFlexibleArrayMember() &&
"Cannot expand structure with flexible array.");
@@ -569,88 +627,178 @@ void CodeGenTypes::GetExpandedTypes(QualType type,
CharUnits UnionSize = CharUnits::Zero();
for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
+ CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
if (UnionSize < FieldSize) {
UnionSize = FieldSize;
LargestFD = FD;
}
}
if (LargestFD)
- GetExpandedTypes(LargestFD->getType(), expandedTypes);
+ Fields.push_back(LargestFD);
} else {
- for (const auto *I : RD->fields()) {
- assert(!I->isBitField() &&
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ assert(!CXXRD->isDynamicClass() &&
+ "cannot expand vtable pointers in dynamic classes");
+ for (const CXXBaseSpecifier &BS : CXXRD->bases())
+ Bases.push_back(&BS);
+ }
+
+ for (const auto *FD : RD->fields()) {
+ // Skip zero length bitfields.
+ if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
+ continue;
+ assert(!FD->isBitField() &&
"Cannot expand structure with bit-field members.");
- GetExpandedTypes(I->getType(), expandedTypes);
+ Fields.push_back(FD);
}
}
- } else if (const ComplexType *CT = type->getAs<ComplexType>()) {
- llvm::Type *EltTy = ConvertType(CT->getElementType());
- expandedTypes.push_back(EltTy);
- expandedTypes.push_back(EltTy);
- } else
- expandedTypes.push_back(ConvertType(type));
+ return llvm::make_unique<RecordExpansion>(std::move(Bases),
+ std::move(Fields));
+ }
+ if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
+ return llvm::make_unique<ComplexExpansion>(CT->getElementType());
+ }
+ return llvm::make_unique<NoExpansion>();
}
-llvm::Function::arg_iterator
-CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
- llvm::Function::arg_iterator AI) {
- assert(LV.isSimple() &&
- "Unexpected non-simple lvalue during struct expansion.");
+static int getExpansionSize(QualType Ty, const ASTContext &Context) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
+ }
+ if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ int Res = 0;
+ for (auto BS : RExp->Bases)
+ Res += getExpansionSize(BS->getType(), Context);
+ for (auto FD : RExp->Fields)
+ Res += getExpansionSize(FD->getType(), Context);
+ return Res;
+ }
+ if (isa<ComplexExpansion>(Exp.get()))
+ return 2;
+ assert(isa<NoExpansion>(Exp.get()));
+ return 1;
+}
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- unsigned NumElts = AT->getSize().getZExtValue();
- QualType EltTy = AT->getElementType();
- for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, Elt);
- LValue LV = MakeAddrLValue(EltAddr, EltTy);
- AI = ExpandTypeFromArgs(EltTy, LV, AI);
+void
+CodeGenTypes::getExpandedTypes(QualType Ty,
+ SmallVectorImpl<llvm::Type *>::iterator &TI) {
+ auto Exp = getTypeExpansion(Ty, Context);
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ getExpandedTypes(CAExp->EltTy, TI);
}
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- if (RD->isUnion()) {
- // Unions can be here only in degenerative cases - all the fields are same
- // after flattening. Thus we have to use the "largest" field.
- const FieldDecl *LargestFD = nullptr;
- CharUnits UnionSize = CharUnits::Zero();
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ for (auto BS : RExp->Bases)
+ getExpandedTypes(BS->getType(), TI);
+ for (auto FD : RExp->Fields)
+ getExpandedTypes(FD->getType(), TI);
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
+ llvm::Type *EltTy = ConvertType(CExp->EltTy);
+ *TI++ = EltTy;
+ *TI++ = EltTy;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ *TI++ = ConvertType(Ty);
+ }
+}
- for (const auto *FD : RD->fields()) {
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
- if (UnionSize < FieldSize) {
- UnionSize = FieldSize;
- LargestFD = FD;
- }
- }
- if (LargestFD) {
- // FIXME: What are the right qualifiers here?
- LValue SubLV = EmitLValueForField(LV, LargestFD);
- AI = ExpandTypeFromArgs(LargestFD->getType(), SubLV, AI);
- }
- } else {
- for (const auto *FD : RD->fields()) {
- QualType FT = FD->getType();
+void CodeGenFunction::ExpandTypeFromArgs(
+ QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
+ assert(LV.isSimple() &&
+ "Unexpected non-simple lvalue during struct expansion.");
- // FIXME: What are the right qualifiers here?
- LValue SubLV = EmitLValueForField(LV, FD);
- AI = ExpandTypeFromArgs(FT, SubLV, AI);
- }
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, i);
+ LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
+ ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ llvm::Value *This = LV.getAddress();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ LValue SubLV = MakeAddrLValue(Base, BS->getType());
+
+ // Recurse onto bases.
+ ExpandTypeFromArgs(BS->getType(), SubLV, AI);
}
- } else if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
- QualType EltTy = CT->getElementType();
+ for (auto FD : RExp->Fields) {
+ // FIXME: What are the right qualifiers here?
+ LValue SubLV = EmitLValueForField(LV, FD);
+ ExpandTypeFromArgs(FD->getType(), SubLV, AI);
+ }
+ } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
- EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(RealAddr, EltTy));
+ EmitStoreThroughLValue(RValue::get(*AI++),
+ MakeAddrLValue(RealAddr, CExp->EltTy));
llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
- EmitStoreThroughLValue(RValue::get(AI++), MakeAddrLValue(ImagAddr, EltTy));
+ EmitStoreThroughLValue(RValue::get(*AI++),
+ MakeAddrLValue(ImagAddr, CExp->EltTy));
} else {
- EmitStoreThroughLValue(RValue::get(AI), LV);
- ++AI;
+ assert(isa<NoExpansion>(Exp.get()));
+ EmitStoreThroughLValue(RValue::get(*AI++), LV);
}
+}
+
+void CodeGenFunction::ExpandTypeToArgs(
+ QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
+ SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
+ auto Exp = getTypeExpansion(Ty, getContext());
+ if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
+ llvm::Value *Addr = RV.getAggregateAddr();
+ for (int i = 0, n = CAExp->NumElts; i < n; i++) {
+ llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, i);
+ RValue EltRV =
+ convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
+ ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
+ }
+ } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
+ llvm::Value *This = RV.getAggregateAddr();
+ for (const CXXBaseSpecifier *BS : RExp->Bases) {
+ // Perform a single step derived-to-base conversion.
+ llvm::Value *Base =
+ GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
+ /*NullCheckValue=*/false, SourceLocation());
+ RValue BaseRV = RValue::getAggregate(Base);
+
+ // Recurse onto bases.
+ ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+
+ LValue LV = MakeAddrLValue(This, Ty);
+ for (auto FD : RExp->Fields) {
+ RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
+ ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
+ IRCallArgPos);
+ }
+ } else if (isa<ComplexExpansion>(Exp.get())) {
+ ComplexPairTy CV = RV.getComplexVal();
+ IRCallArgs[IRCallArgPos++] = CV.first;
+ IRCallArgs[IRCallArgPos++] = CV.second;
+ } else {
+ assert(isa<NoExpansion>(Exp.get()));
+ assert(RV.isScalar() &&
+ "Unexpected non-scalar rvalue during struct expansion.");
+
+ // Insert a bitcast as needed.
+ llvm::Value *V = RV.getScalarVal();
+ if (IRCallArgPos < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(IRCallArgPos))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
- return AI;
+ IRCallArgs[IRCallArgPos++] = V;
+ }
}
/// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
@@ -667,11 +815,13 @@ EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
llvm::Type *FirstElt = SrcSTy->getElementType(0);
// If the first elt is at least as large as what we're looking for, or if the
- // first element is the same size as the whole struct, we can enter it.
+ // first element is the same size as the whole struct, we can enter it. The
+ // comparison must be made on the store size and not the alloca size. Using
+ // the alloca size may overstate the size of the load.
uint64_t FirstEltSize =
- CGF.CGM.getDataLayout().getTypeAllocSize(FirstElt);
+ CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
if (FirstEltSize < DstSize &&
- FirstEltSize < CGF.CGM.getDataLayout().getTypeAllocSize(SrcSTy))
+ FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
return SrcPtr;
// GEP into the first element.
@@ -890,6 +1040,145 @@ static void CreateCoercedStore(llvm::Value *Src,
}
}
+namespace {
+
+/// Encapsulates information about the way function arguments from
+/// CGFunctionInfo should be passed to actual LLVM IR function.
+class ClangToLLVMArgMapping {
+ static const unsigned InvalidIndex = ~0U;
+ unsigned InallocaArgNo;
+ unsigned SRetArgNo;
+ unsigned TotalIRArgs;
+
+ /// Arguments of LLVM IR function corresponding to single Clang argument.
+ struct IRArgs {
+ unsigned PaddingArgIndex;
+ // Argument is expanded to IR arguments at positions
+ // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
+ unsigned FirstArgIndex;
+ unsigned NumberOfArgs;
+
+ IRArgs()
+ : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
+ NumberOfArgs(0) {}
+ };
+
+ SmallVector<IRArgs, 8> ArgInfo;
+
+public:
+ ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs = false)
+ : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
+ ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
+ construct(Context, FI, OnlyRequiredArgs);
+ }
+
+ bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
+ unsigned getInallocaArgNo() const {
+ assert(hasInallocaArg());
+ return InallocaArgNo;
+ }
+
+ bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
+ unsigned getSRetArgNo() const {
+ assert(hasSRetArg());
+ return SRetArgNo;
+ }
+
+ unsigned totalIRArgs() const { return TotalIRArgs; }
+
+ bool hasPaddingArg(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
+ }
+ unsigned getPaddingArgNo(unsigned ArgNo) const {
+ assert(hasPaddingArg(ArgNo));
+ return ArgInfo[ArgNo].PaddingArgIndex;
+ }
+
+ /// Returns index of first IR argument corresponding to ArgNo, and their
+ /// quantity.
+ std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
+ assert(ArgNo < ArgInfo.size());
+ return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
+ ArgInfo[ArgNo].NumberOfArgs);
+ }
+
+private:
+ void construct(const ASTContext &Context, const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs);
+};
+
+void ClangToLLVMArgMapping::construct(const ASTContext &Context,
+ const CGFunctionInfo &FI,
+ bool OnlyRequiredArgs) {
+ unsigned IRArgNo = 0;
+ bool SwapThisWithSRet = false;
+ const ABIArgInfo &RetAI = FI.getReturnInfo();
+
+ if (RetAI.getKind() == ABIArgInfo::Indirect) {
+ SwapThisWithSRet = RetAI.isSRetAfterThis();
+ SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
+ }
+
+ unsigned ArgNo = 0;
+ unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
+ ++I, ++ArgNo) {
+ assert(I != FI.arg_end());
+ QualType ArgType = I->type;
+ const ABIArgInfo &AI = I->info;
+ // Collect data about IR arguments corresponding to Clang argument ArgNo.
+ auto &IRArgs = ArgInfo[ArgNo];
+
+ if (AI.getPaddingType())
+ IRArgs.PaddingArgIndex = IRArgNo++;
+
+ switch (AI.getKind()) {
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ // FIXME: handle sseregparm someday...
+ llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
+ if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
+ IRArgs.NumberOfArgs = STy->getNumElements();
+ } else {
+ IRArgs.NumberOfArgs = 1;
+ }
+ break;
+ }
+ case ABIArgInfo::Indirect:
+ IRArgs.NumberOfArgs = 1;
+ break;
+ case ABIArgInfo::Ignore:
+ case ABIArgInfo::InAlloca:
+ // ignore and inalloca doesn't have matching LLVM parameters.
+ IRArgs.NumberOfArgs = 0;
+ break;
+ case ABIArgInfo::Expand: {
+ IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
+ break;
+ }
+ }
+
+ if (IRArgs.NumberOfArgs > 0) {
+ IRArgs.FirstArgIndex = IRArgNo;
+ IRArgNo += IRArgs.NumberOfArgs;
+ }
+
+ // Skip over the sret parameter when it comes second. We already handled it
+ // above.
+ if (IRArgNo == 1 && SwapThisWithSRet)
+ IRArgNo++;
+ }
+ assert(ArgNo == ArgInfo.size());
+
+ if (FI.usesInAlloca())
+ InallocaArgNo = IRArgNo++;
+
+ TotalIRArgs = IRArgNo;
+}
+} // namespace
+
/***/
bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
@@ -936,14 +1225,12 @@ llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
llvm::FunctionType *
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
-
- bool Inserted = FunctionsBeingProcessed.insert(&FI); (void)Inserted;
+
+ bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
+ (void)Inserted;
assert(Inserted && "Recursively being processed?");
-
- bool SwapThisWithSRet = false;
- SmallVector<llvm::Type*, 8> argTypes;
- llvm::Type *resultType = nullptr;
+ llvm::Type *resultType = nullptr;
const ABIArgInfo &retAI = FI.getReturnInfo();
switch (retAI.getKind()) {
case ABIArgInfo::Expand:
@@ -969,13 +1256,6 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
case ABIArgInfo::Indirect: {
assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
resultType = llvm::Type::getVoidTy(getLLVMContext());
-
- QualType ret = FI.getReturnType();
- llvm::Type *ty = ConvertType(ret);
- unsigned addressSpace = Context.getTargetAddressSpace(ret);
- argTypes.push_back(llvm::PointerType::get(ty, addressSpace));
-
- SwapThisWithSRet = retAI.isSRetAfterThis();
break;
}
@@ -984,67 +1264,83 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
break;
}
- // Add in all of the required arguments.
- CGFunctionInfo::const_arg_iterator it = FI.arg_begin(), ie;
- if (FI.isVariadic()) {
- ie = it + FI.getRequiredArgs().getNumRequiredArgs();
- } else {
- ie = FI.arg_end();
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
+ SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
+
+ // Add type for sret argument.
+ if (IRFunctionArgs.hasSRetArg()) {
+ QualType Ret = FI.getReturnType();
+ llvm::Type *Ty = ConvertType(Ret);
+ unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
+ ArgTypes[IRFunctionArgs.getSRetArgNo()] =
+ llvm::PointerType::get(Ty, AddressSpace);
+ }
+
+ // Add type for inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
+ auto ArgStruct = FI.getArgStruct();
+ assert(ArgStruct);
+ ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
}
- for (; it != ie; ++it) {
- const ABIArgInfo &argAI = it->info;
+
+ // Add in all of the required arguments.
+ unsigned ArgNo = 0;
+ CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
+ ie = it + FI.getNumRequiredArgs();
+ for (; it != ie; ++it, ++ArgNo) {
+ const ABIArgInfo &ArgInfo = it->info;
// Insert a padding type to ensure proper alignment.
- if (llvm::Type *PaddingType = argAI.getPaddingType())
- argTypes.push_back(PaddingType);
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ ArgInfo.getPaddingType();
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
- switch (argAI.getKind()) {
+ switch (ArgInfo.getKind()) {
case ABIArgInfo::Ignore:
case ABIArgInfo::InAlloca:
+ assert(NumIRArgs == 0);
break;
case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
// indirect arguments are always on the stack, which is addr space #0.
llvm::Type *LTy = ConvertTypeForMem(it->type);
- argTypes.push_back(LTy->getPointerTo());
+ ArgTypes[FirstIRArg] = LTy->getPointerTo();
break;
}
case ABIArgInfo::Extend:
case ABIArgInfo::Direct: {
- // If the coerce-to type is a first class aggregate, flatten it. Either
- // way is semantically identical, but fast-isel and the optimizer
- // generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
- llvm::Type *argType = argAI.getCoerceToType();
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
+ llvm::Type *argType = ArgInfo.getCoerceToType();
llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
- if (st && !isAAPCSVFP(FI, getTarget())) {
+ if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ assert(NumIRArgs == st->getNumElements());
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
- argTypes.push_back(st->getElementType(i));
+ ArgTypes[FirstIRArg + i] = st->getElementType(i);
} else {
- argTypes.push_back(argType);
+ assert(NumIRArgs == 1);
+ ArgTypes[FirstIRArg] = argType;
}
break;
}
case ABIArgInfo::Expand:
- GetExpandedTypes(it->type, argTypes);
+ auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
+ getExpandedTypes(it->type, ArgTypesIter);
+ assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
break;
}
}
- // Add the inalloca struct as the last parameter type.
- if (llvm::StructType *ArgStruct = FI.getArgStruct())
- argTypes.push_back(ArgStruct->getPointerTo());
-
- if (SwapThisWithSRet)
- std::swap(argTypes[0], argTypes[1]);
-
bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
assert(Erased && "Not in set?");
-
- return llvm::FunctionType::get(resultType, argTypes, FI.isVariadic());
+
+ return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
}
llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
@@ -1056,7 +1352,8 @@ llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
const CGFunctionInfo *Info;
if (isa<CXXDestructorDecl>(MD))
- Info = &arrangeCXXDestructor(cast<CXXDestructorDecl>(MD), GD.getDtorType());
+ Info =
+ &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
else
Info = &arrangeCXXMethodDeclaration(MD);
return GetFunctionType(*Info);
@@ -1069,6 +1366,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
+ bool HasOptnone = false;
CallingConv = FI.getEffectiveCallingConvention();
@@ -1109,12 +1407,18 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs.addAttribute(llvm::Attribute::NoAlias);
if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
RetAttrs.addAttribute(llvm::Attribute::NonNull);
+
+ HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
+ }
+
+ // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
+ if (!HasOptnone) {
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attribute::MinSize);
}
- if (CodeGenOpts.OptimizeSize)
- FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
- if (CodeGenOpts.OptimizeSize == 2)
- FuncAttrs.addAttribute(llvm::Attribute::MinSize);
if (CodeGenOpts.DisableRedZone)
FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
if (CodeGenOpts.NoImplicitFloat)
@@ -1156,9 +1460,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
FuncAttrs.addAttribute("no-realign-stack");
}
+ ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
+
QualType RetTy = FI.getReturnType();
- unsigned Index = 1;
- bool SwapThisWithSRet = false;
const ABIArgInfo &RetAI = FI.getReturnInfo();
switch (RetAI.getKind()) {
case ABIArgInfo::Extend:
@@ -1174,25 +1478,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
case ABIArgInfo::Ignore:
break;
- case ABIArgInfo::InAlloca: {
- // inalloca disables readnone and readonly
- FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
- .removeAttribute(llvm::Attribute::ReadNone);
- break;
- }
-
+ case ABIArgInfo::InAlloca:
case ABIArgInfo::Indirect: {
- llvm::AttrBuilder SRETAttrs;
- SRETAttrs.addAttribute(llvm::Attribute::StructRet);
- if (RetAI.getInReg())
- SRETAttrs.addAttribute(llvm::Attribute::InReg);
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- PAL.push_back(llvm::AttributeSet::get(
- getLLVMContext(), SwapThisWithSRet ? 2 : Index, SRETAttrs));
-
- if (!SwapThisWithSRet)
- ++Index;
- // sret disables readnone and readonly
+ // inalloca and sret disable readnone and readonly
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
break;
@@ -1211,28 +1499,44 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
RetAttrs.addAttribute(llvm::Attribute::NonNull);
}
- if (RetAttrs.hasAttributes())
- PAL.push_back(llvm::
- AttributeSet::get(getLLVMContext(),
- llvm::AttributeSet::ReturnIndex,
- RetAttrs));
+ // Attach return attributes.
+ if (RetAttrs.hasAttributes()) {
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
+ }
- for (const auto &I : FI.arguments()) {
- QualType ParamType = I.type;
- const ABIArgInfo &AI = I.info;
+ // Attach attributes to sret.
+ if (IRFunctionArgs.hasSRetArg()) {
+ llvm::AttrBuilder SRETAttrs;
+ SRETAttrs.addAttribute(llvm::Attribute::StructRet);
+ if (RetAI.getInReg())
+ SRETAttrs.addAttribute(llvm::Attribute::InReg);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
+ }
+
+ // Attach attributes to inalloca argument.
+ if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
+ Attrs.addAttribute(llvm::Attribute::InAlloca);
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
+ }
- // Skip over the sret parameter when it comes second. We already handled it
- // above.
- if (Index == 2 && SwapThisWithSRet)
- ++Index;
+ unsigned ArgNo = 0;
+ for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
+ E = FI.arg_end();
+ I != E; ++I, ++ArgNo) {
+ QualType ParamType = I->type;
+ const ABIArgInfo &AI = I->info;
+ llvm::AttrBuilder Attrs;
- if (AI.getPaddingType()) {
+ // Add attribute for padding argument, if necessary.
+ if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
if (AI.getPaddingInReg())
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index,
- llvm::Attribute::InReg));
- // Increment Index if there is padding.
- ++Index;
+ PAL.push_back(llvm::AttributeSet::get(
+ getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
+ llvm::Attribute::InReg));
}
// 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
@@ -1245,24 +1549,13 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
else if (ParamType->isUnsignedIntegerOrEnumerationType())
Attrs.addAttribute(llvm::Attribute::ZExt);
// FALL THROUGH
- case ABIArgInfo::Direct: {
- if (AI.getInReg())
+ case ABIArgInfo::Direct:
+ if (ArgNo == 0 && FI.isChainCall())
+ Attrs.addAttribute(llvm::Attribute::Nest);
+ else if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
-
- // FIXME: handle sseregparm someday...
-
- llvm::StructType *STy =
- dyn_cast<llvm::StructType>(AI.getCoerceToType());
- if (!isAAPCSVFP(FI, getTarget()) && STy) {
- unsigned Extra = STy->getNumElements()-1; // 1 will be added below.
- if (Attrs.hasAttributes())
- for (unsigned I = 0; I < Extra; ++I)
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index + I,
- Attrs));
- Index += Extra;
- }
break;
- }
+
case ABIArgInfo::Indirect:
if (AI.getInReg())
Attrs.addAttribute(llvm::Attribute::InReg);
@@ -1278,26 +1571,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
break;
case ABIArgInfo::Ignore:
- // Skip increment, no matching LLVM parameter.
+ case ABIArgInfo::Expand:
continue;
case ABIArgInfo::InAlloca:
// inalloca disables readnone and readonly.
FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
.removeAttribute(llvm::Attribute::ReadNone);
- // Skip increment, no matching LLVM parameter.
- continue;
-
- case ABIArgInfo::Expand: {
- SmallVector<llvm::Type*, 8> types;
- // FIXME: This is rather inefficient. Do we ever actually need to do
- // anything here? The result should be just reconstructed on the other
- // side, so extension should be a non-issue.
- getTypes().GetExpandedTypes(ParamType, types);
- Index += types.size();
continue;
}
- }
if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
QualType PTy = RefTy->getPointeeType();
@@ -1308,17 +1590,15 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
Attrs.addAttribute(llvm::Attribute::NonNull);
}
- if (Attrs.hasAttributes())
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
- ++Index;
- }
-
- // Add the inalloca attribute to the trailing inalloca parameter if present.
- if (FI.usesInAlloca()) {
- llvm::AttrBuilder Attrs;
- Attrs.addAttribute(llvm::Attribute::InAlloca);
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(), Index, Attrs));
+ if (Attrs.hasAttributes()) {
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
+ for (unsigned i = 0; i < NumIRArgs; i++)
+ PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
+ FirstIRArg + i + 1, Attrs));
+ }
}
+ assert(ArgNo == FI.arg_size());
if (FuncAttrs.hasAttributes())
PAL.push_back(llvm::
@@ -1347,9 +1627,41 @@ static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
}
+/// Returns the attribute (either parameter attribute, or function
+/// attribute), which declares argument ArgNo to be non-null.
+static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
+ QualType ArgType, unsigned ArgNo) {
+ // FIXME: __attribute__((nonnull)) can also be applied to:
+ // - references to pointers, where the pointee is known to be
+ // nonnull (apparently a Clang extension)
+ // - transparent unions containing pointers
+ // In the former case, LLVM IR cannot represent the constraint. In
+ // the latter case, we have no guarantee that the transparent union
+ // is in fact passed as a pointer.
+ if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
+ return nullptr;
+ // First, check attribute on parameter itself.
+ if (PVD) {
+ if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
+ return ParmNNAttr;
+ }
+ // Check function attributes.
+ if (!FD)
+ return nullptr;
+ for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
+ if (NNAttr->isNonNull(ArgNo))
+ return NNAttr;
+ }
+ return nullptr;
+}
+
void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Function *Fn,
const FunctionArgList &Args) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
+ // Naked functions don't have prologues.
+ return;
+
// If this is an implicit-return-zero function, go ahead and
// initialize the return value. TODO: it might be nice to have
// a more general mechanism for this that didn't require synthesized
@@ -1366,39 +1678,31 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// FIXME: We no longer need the types from FunctionArgList; lift up and
// simplify.
- // Emit allocs for param decls. Give the LLVM Argument nodes names.
- llvm::Function::arg_iterator AI = Fn->arg_begin();
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
+ // Flattened function arguments.
+ SmallVector<llvm::Argument *, 16> FnArgs;
+ FnArgs.reserve(IRFunctionArgs.totalIRArgs());
+ for (auto &Arg : Fn->args()) {
+ FnArgs.push_back(&Arg);
+ }
+ assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
// If we're using inalloca, all the memory arguments are GEPs off of the last
// parameter, which is a pointer to the complete memory area.
llvm::Value *ArgStruct = nullptr;
- if (FI.usesInAlloca()) {
- llvm::Function::arg_iterator EI = Fn->arg_end();
- --EI;
- ArgStruct = EI;
+ if (IRFunctionArgs.hasInallocaArg()) {
+ ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
}
- // Name the struct return parameter, which can come first or second.
- const ABIArgInfo &RetAI = FI.getReturnInfo();
- bool SwapThisWithSRet = false;
- if (RetAI.isIndirect()) {
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- if (SwapThisWithSRet)
- ++AI;
+ // Name the struct return parameter.
+ if (IRFunctionArgs.hasSRetArg()) {
+ auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
AI->setName("agg.result");
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
llvm::Attribute::NoAlias));
- if (SwapThisWithSRet)
- --AI; // Go back to the beginning for 'this'.
- else
- ++AI; // Skip the sret parameter.
}
- // Get the function-level nonnull attribute if it exists.
- const NonNullAttr *NNAtt =
- CurCodeDecl ? CurCodeDecl->getAttr<NonNullAttr>() : nullptr;
-
// Track if we received the parameter as a pointer (indirect, byval, or
// inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
// into a local alloca for us.
@@ -1413,9 +1717,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// we can push the cleanups in the correct order for the ABI.
assert(FI.arg_size() == Args.size() &&
"Mismatch between function signature & arguments.");
- unsigned ArgNo = 1;
+ unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
+ for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i, ++info_it, ++ArgNo) {
const VarDecl *Arg = *i;
QualType Ty = info_it->type;
@@ -1424,20 +1728,21 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
bool isPromoted =
isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
- // Skip the dummy padding argument.
- if (ArgI.getPaddingType())
- ++AI;
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgI.getKind()) {
case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
llvm::Value *V = Builder.CreateStructGEP(
ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
- continue; // Don't increment AI!
+ break;
}
case ABIArgInfo::Indirect: {
- llvm::Value *V = AI;
+ assert(NumIRArgs == 1);
+ llvm::Value *V = FnArgs[FirstIRArg];
if (!hasScalarEvaluationKind(Ty)) {
// Aggregates and complex variables are accessed by reference. All we
@@ -1483,12 +1788,13 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
ArgI.getCoerceToType() == ConvertType(Ty) &&
ArgI.getDirectOffset() == 0) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
llvm::Value *V = AI;
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
- if ((NNAtt && NNAtt->isNonNull(PVD->getFunctionScopeIndex())) ||
- PVD->hasAttr<NonNullAttr>())
+ if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
+ PVD->getFunctionScopeIndex()))
AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
@@ -1527,6 +1833,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
AI->getArgNo() + 1,
llvm::Attribute::NonNull));
}
+
+ const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
+ if (!AVAttr)
+ if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
+ AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
+ if (AVAttr) {
+ llvm::Value *AlignmentValue =
+ EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI =
+ cast<llvm::ConstantInt>(AlignmentValue);
+ unsigned Alignment =
+ std::min((unsigned) AlignmentCI->getZExtValue(),
+ +llvm::Value::MaximumAlignment);
+
+ llvm::AttrBuilder Attrs;
+ Attrs.addAlignmentAttr(Alignment);
+ AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
+ AI->getArgNo() + 1, Attrs));
+ }
}
if (Arg->getType().isRestrictQualified())
@@ -1581,13 +1906,11 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
}
- // If the coerce-to type is a first class aggregate, we flatten it and
- // pass the elements. Either way is semantically identical, but fast-isel
- // and the optimizer generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
- if (!isAAPCSVFP(FI, getTarget()) && STy && STy->getNumElements() > 1) {
+ if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
llvm::Type *DstTy =
cast<llvm::PointerType>(Ptr->getType())->getElementType();
@@ -1596,11 +1919,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (SrcSize <= DstSize) {
Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
+ assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateStore(AI, EltPtr);
}
} else {
llvm::AllocaInst *TempAlloca =
@@ -1608,20 +1932,22 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
TempAlloca->setAlignment(AlignmentToUse);
llvm::Value *TempV = TempAlloca;
+ assert(STy->getNumElements() == NumIRArgs);
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ auto AI = FnArgs[FirstIRArg + i];
AI->setName(Arg->getName() + ".coerce" + Twine(i));
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
- Builder.CreateStore(AI++, EltPtr);
+ Builder.CreateStore(AI, EltPtr);
}
Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
- assert(AI != Fn->arg_end() && "Argument mismatch!");
+ assert(NumIRArgs == 1);
+ auto AI = FnArgs[FirstIRArg];
AI->setName(Arg->getName() + ".coerce");
- CreateCoercedStore(AI++, Ptr, /*DestIsVolatile=*/false, *this);
+ CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
}
@@ -1634,7 +1960,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
} else {
ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
}
- continue; // Skip ++AI increment, already done.
+ break;
}
case ABIArgInfo::Expand: {
@@ -1645,17 +1971,20 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
CharUnits Align = getContext().getDeclAlign(Arg);
Alloca->setAlignment(Align.getQuantity());
LValue LV = MakeAddrLValue(Alloca, Ty, Align);
- llvm::Function::arg_iterator End = ExpandTypeFromArgs(Ty, LV, AI);
ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
- // Name the arguments used in expansion and increment AI.
- unsigned Index = 0;
- for (; AI != End; ++AI, ++Index)
- AI->setName(Arg->getName() + "." + Twine(Index));
- continue;
+ auto FnArgIter = FnArgs.begin() + FirstIRArg;
+ ExpandTypeFromArgs(Ty, LV, FnArgIter);
+ assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
+ for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
+ auto AI = FnArgs[FirstIRArg + i];
+ AI->setName(Arg->getName() + "." + Twine(i));
+ }
+ break;
}
case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
// Initialize the local variable appropriately.
if (!hasScalarEvaluationKind(Ty)) {
ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
@@ -1663,21 +1992,10 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
}
-
- // Skip increment, no matching LLVM parameter.
- continue;
+ break;
}
-
- ++AI;
-
- if (ArgNo == 1 && SwapThisWithSRet)
- ++AI; // Skip the sret parameter.
}
- if (FI.usesInAlloca())
- ++AI;
- assert(AI == Fn->arg_end() && "Argument mismatch!");
-
if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
for (int I = Args.size() - 1; I >= 0; --I)
EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
@@ -1887,6 +2205,12 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
bool EmitRetDbgLoc,
SourceLocation EndLoc) {
+ if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
+ // Naked functions don't have epilogues.
+ Builder.CreateUnreachable();
+ return;
+ }
+
// Functions with no result always return void.
if (!ReturnValue) {
Builder.CreateRetVoid();
@@ -1998,7 +2322,26 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm_unreachable("Invalid ABI kind for return argument");
}
- llvm::Instruction *Ret = RV ? Builder.CreateRet(RV) : Builder.CreateRetVoid();
+ llvm::Instruction *Ret;
+ if (RV) {
+ if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
+ if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
+ SanitizerScope SanScope(this);
+ llvm::Value *Cond = Builder.CreateICmpNE(
+ RV, llvm::Constant::getNullValue(RV->getType()));
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(EndLoc),
+ EmitCheckSourceLocation(RetNNAttr->getLocation()),
+ };
+ EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
+ "nonnull_return", StaticData, None);
+ }
+ }
+ Ret = Builder.CreateRet(RV);
+ } else {
+ Ret = Builder.CreateRetVoid();
+ }
+
if (!RetDbgLoc.isUnknown())
Ret->setDebugLoc(RetDbgLoc);
}
@@ -2045,19 +2388,8 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
return args.add(RValue::get(Builder.CreateLoad(local)), type);
}
- if (isInAllocaArgument(CGM.getCXXABI(), type)) {
- AggValueSlot Slot = createPlaceholderSlot(*this, type);
- Slot.setExternallyDestructed();
-
- // FIXME: Either emit a copy constructor call, or figure out how to do
- // guaranteed tail calls with perfect forwarding in LLVM.
- CGM.ErrorUnsupported(param, "non-trivial argument copy for thunk");
- EmitNullInitialization(Slot.getAddr(), type);
-
- RValue RV = Slot.asRValue();
- args.add(RV, type);
- return;
- }
+ assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
+ "cannot emit delegate call arguments for inalloca arguments!");
args.add(convertTempToRValue(local, type, loc), type);
}
@@ -2317,10 +2649,36 @@ void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
}
}
+static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV,
+ QualType ArgType, SourceLocation ArgLoc,
+ const FunctionDecl *FD, unsigned ParmNum) {
+ if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
+ return;
+ auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
+ unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
+ auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
+ if (!NNAttr)
+ return;
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ assert(RV.isScalar());
+ llvm::Value *V = RV.getScalarVal();
+ llvm::Value *Cond =
+ CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
+ llvm::Constant *StaticData[] = {
+ CGF.EmitCheckSourceLocation(ArgLoc),
+ CGF.EmitCheckSourceLocation(NNAttr->getLocation()),
+ llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1),
+ };
+ CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
+ "nonnull_arg", StaticData, None);
+}
+
void CodeGenFunction::EmitCallArgs(CallArgList &Args,
ArrayRef<QualType> ArgTypes,
CallExpr::const_arg_iterator ArgBeg,
CallExpr::const_arg_iterator ArgEnd,
+ const FunctionDecl *CalleeDecl,
+ unsigned ParamsToSkip,
bool ForceColumnInfo) {
CGDebugInfo *DI = getDebugInfo();
SourceLocation CallLoc;
@@ -2344,6 +2702,8 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
for (int I = ArgTypes.size() - 1; I >= 0; --I) {
CallExpr::const_arg_iterator Arg = ArgBeg + I;
EmitCallArg(Args, *Arg, ArgTypes[I]);
+ emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
// Restore the debug location.
if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
}
@@ -2358,6 +2718,8 @@ void CodeGenFunction::EmitCallArgs(CallArgList &Args,
CallExpr::const_arg_iterator Arg = ArgBeg + I;
assert(Arg != ArgEnd);
EmitCallArg(Args, *Arg, ArgTypes[I]);
+ emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
+ CalleeDecl, ParamsToSkip + I);
// Restore the debug location.
if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
}
@@ -2457,6 +2819,24 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
args.add(EmitAnyExprToTemp(E), type);
}
+QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
+ // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
+ // implicitly widens null pointer constants that are arguments to varargs
+ // functions to pointer-sized ints.
+ if (!getTarget().getTriple().isOSWindows())
+ return Arg->getType();
+
+ if (Arg->getType()->isIntegerType() &&
+ getContext().getTypeSize(Arg->getType()) <
+ getContext().getTargetInfo().getPointerWidth(0) &&
+ Arg->isNullPointerConstant(getContext(),
+ Expr::NPC_ValueDependentIsNotNull)) {
+ return getContext().getIntPtrType();
+ }
+
+ return Arg->getType();
+}
+
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
void
@@ -2471,7 +2851,7 @@ CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
llvm::CallInst *
CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
- return EmitNounwindRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitNounwindRuntimeCall(callee, None, name);
}
/// Emits a call to the given nounwind runtime function.
@@ -2489,7 +2869,7 @@ CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
llvm::CallInst *
CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
const llvm::Twine &name) {
- return EmitRuntimeCall(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitRuntimeCall(callee, None, name);
}
/// Emits a simple call (never an invoke) to the given runtime
@@ -2528,7 +2908,7 @@ void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
llvm::CallSite
CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
const Twine &name) {
- return EmitRuntimeCallOrInvoke(callee, ArrayRef<llvm::Value*>(), name);
+ return EmitRuntimeCallOrInvoke(callee, None, name);
}
/// Emits a call or invoke instruction to the given runtime function.
@@ -2544,7 +2924,7 @@ CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
llvm::CallSite
CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
const Twine &Name) {
- return EmitCallOrInvoke(Callee, ArrayRef<llvm::Value *>(), Name);
+ return EmitCallOrInvoke(Callee, None, Name);
}
/// Emits a call or invoke instruction to the given function, depending
@@ -2572,73 +2952,6 @@ CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
return Inst;
}
-static void checkArgMatches(llvm::Value *Elt, unsigned &ArgNo,
- llvm::FunctionType *FTy) {
- if (ArgNo < FTy->getNumParams())
- assert(Elt->getType() == FTy->getParamType(ArgNo));
- else
- assert(FTy->isVarArg());
- ++ArgNo;
-}
-
-void CodeGenFunction::ExpandTypeToArgs(QualType Ty, RValue RV,
- SmallVectorImpl<llvm::Value *> &Args,
- llvm::FunctionType *IRFuncTy) {
- if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
- unsigned NumElts = AT->getSize().getZExtValue();
- QualType EltTy = AT->getElementType();
- llvm::Value *Addr = RV.getAggregateAddr();
- for (unsigned Elt = 0; Elt < NumElts; ++Elt) {
- llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, Elt);
- RValue EltRV = convertTempToRValue(EltAddr, EltTy, SourceLocation());
- ExpandTypeToArgs(EltTy, EltRV, Args, IRFuncTy);
- }
- } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
- RecordDecl *RD = RT->getDecl();
- assert(RV.isAggregate() && "Unexpected rvalue during struct expansion");
- LValue LV = MakeAddrLValue(RV.getAggregateAddr(), Ty);
-
- if (RD->isUnion()) {
- const FieldDecl *LargestFD = nullptr;
- CharUnits UnionSize = CharUnits::Zero();
-
- for (const auto *FD : RD->fields()) {
- assert(!FD->isBitField() &&
- "Cannot expand structure with bit-field members.");
- CharUnits FieldSize = getContext().getTypeSizeInChars(FD->getType());
- if (UnionSize < FieldSize) {
- UnionSize = FieldSize;
- LargestFD = FD;
- }
- }
- if (LargestFD) {
- RValue FldRV = EmitRValueForField(LV, LargestFD, SourceLocation());
- ExpandTypeToArgs(LargestFD->getType(), FldRV, Args, IRFuncTy);
- }
- } else {
- for (const auto *FD : RD->fields()) {
- RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
- ExpandTypeToArgs(FD->getType(), FldRV, Args, IRFuncTy);
- }
- }
- } else if (Ty->isAnyComplexType()) {
- ComplexPairTy CV = RV.getComplexVal();
- Args.push_back(CV.first);
- Args.push_back(CV.second);
- } else {
- assert(RV.isScalar() &&
- "Unexpected non-scalar rvalue during struct expansion.");
-
- // Insert a bitcast as needed.
- llvm::Value *V = RV.getScalarVal();
- if (Args.size() < IRFuncTy->getNumParams() &&
- V->getType() != IRFuncTy->getParamType(Args.size()))
- V = Builder.CreateBitCast(V, IRFuncTy->getParamType(Args.size()));
-
- Args.push_back(V);
- }
-}
-
/// \brief Store a non-aggregate value to an address to initialize it. For
/// initialization, a non-atomic store will be used.
static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
@@ -2661,15 +2974,12 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const Decl *TargetDecl,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
- SmallVector<llvm::Value*, 16> Args;
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- // IRArgNo - Keep track of the argument number in the callee we're looking at.
- unsigned IRArgNo = 0;
llvm::FunctionType *IRFuncTy =
cast<llvm::FunctionType>(
cast<llvm::PointerType>(Callee->getType())->getElementType());
@@ -2691,22 +3001,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
ArgMemory = AI;
}
+ ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
+ SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
+
// If the call returns a temporary with struct return, create a temporary
// alloca to hold the result, unless one is given to us.
llvm::Value *SRetPtr = nullptr;
- bool SwapThisWithSRet = false;
if (RetAI.isIndirect() || RetAI.isInAlloca()) {
SRetPtr = ReturnValue.getValue();
if (!SRetPtr)
SRetPtr = CreateMemTemp(RetTy);
- if (RetAI.isIndirect()) {
- Args.push_back(SRetPtr);
- SwapThisWithSRet = RetAI.isSRetAfterThis();
- if (SwapThisWithSRet)
- IRArgNo = 1;
- checkArgMatches(SRetPtr, IRArgNo, IRFuncTy);
- if (SwapThisWithSRet)
- IRArgNo = 0;
+ if (IRFunctionArgs.hasSRetArg()) {
+ IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
} else {
llvm::Value *Addr =
Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
@@ -2716,26 +3022,26 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
+ unsigned ArgNo = 0;
CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
- I != E; ++I, ++info_it) {
+ I != E; ++I, ++info_it, ++ArgNo) {
const ABIArgInfo &ArgInfo = info_it->info;
RValue RV = I->RV;
- // Skip 'sret' if it came second.
- if (IRArgNo == 1 && SwapThisWithSRet)
- ++IRArgNo;
-
CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
// Insert a padding argument to ensure proper alignment.
- if (llvm::Type *PaddingType = ArgInfo.getPaddingType()) {
- Args.push_back(llvm::UndefValue::get(PaddingType));
- ++IRArgNo;
- }
+ if (IRFunctionArgs.hasPaddingArg(ArgNo))
+ IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
+ llvm::UndefValue::get(ArgInfo.getPaddingType());
+
+ unsigned FirstIRArg, NumIRArgs;
+ std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
switch (ArgInfo.getKind()) {
case ABIArgInfo::InAlloca: {
+ assert(NumIRArgs == 0);
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (RV.isAggregate()) {
// Replace the placeholder with the appropriate argument slot GEP.
@@ -2761,22 +3067,20 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
}
- break; // Don't increment IRArgNo!
+ break;
}
case ABIArgInfo::Indirect: {
+ assert(NumIRArgs == 1);
if (RV.isScalar() || RV.isComplex()) {
// Make a temporary alloca to pass the argument.
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (ArgInfo.getIndirectAlign() > AI->getAlignment())
AI->setAlignment(ArgInfo.getIndirectAlign());
- Args.push_back(AI);
+ IRCallArgs[FirstIRArg] = AI;
- LValue argLV = MakeAddrLValue(Args.back(), I->Ty, TypeAlign);
+ LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
EmitInitStoreOfNonAggregate(*this, RV, argLV);
-
- // Validate argument match.
- checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
@@ -2790,8 +3094,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
unsigned Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
- const unsigned ArgAddrSpace = (IRArgNo < IRFuncTy->getNumParams() ?
- IRFuncTy->getParamType(IRArgNo)->getPointerAddressSpace() : 0);
+ const unsigned ArgAddrSpace =
+ (FirstIRArg < IRFuncTy->getNumParams()
+ ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
+ : 0);
if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
(ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
@@ -2800,23 +3106,18 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
if (Align > AI->getAlignment())
AI->setAlignment(Align);
- Args.push_back(AI);
+ IRCallArgs[FirstIRArg] = AI;
EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
-
- // Validate argument match.
- checkArgMatches(AI, IRArgNo, IRFuncTy);
} else {
// Skip the extra memcpy call.
- Args.push_back(Addr);
-
- // Validate argument match.
- checkArgMatches(Addr, IRArgNo, IRFuncTy);
+ IRCallArgs[FirstIRArg] = Addr;
}
}
break;
}
case ABIArgInfo::Ignore:
+ assert(NumIRArgs == 0);
break;
case ABIArgInfo::Extend:
@@ -2824,20 +3125,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
ArgInfo.getDirectOffset() == 0) {
+ assert(NumIRArgs == 1);
llvm::Value *V;
if (RV.isScalar())
V = RV.getScalarVal();
else
V = Builder.CreateLoad(RV.getAggregateAddr());
-
+
+ // We might have to widen integers, but we should never truncate.
+ if (ArgInfo.getCoerceToType() != V->getType() &&
+ V->getType()->isIntegerTy())
+ V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
+
// If the argument doesn't match, perform a bitcast to coerce it. This
// can happen due to trivial type mismatches.
- if (IRArgNo < IRFuncTy->getNumParams() &&
- V->getType() != IRFuncTy->getParamType(IRArgNo))
- V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRArgNo));
- Args.push_back(V);
-
- checkArgMatches(V, IRArgNo, IRFuncTy);
+ if (FirstIRArg < IRFuncTy->getNumParams() &&
+ V->getType() != IRFuncTy->getParamType(FirstIRArg))
+ V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
+ IRCallArgs[FirstIRArg] = V;
break;
}
@@ -2859,14 +3164,11 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
- // If the coerce-to type is a first class aggregate, we flatten it and
- // pass the elements. Either way is semantically identical, but fast-isel
- // and the optimizer generally likes scalar values better than FCAs.
- // We cannot do this for functions using the AAPCS calling convention,
- // as structures are treated differently by that calling convention.
+ // Fast-isel and the optimizer generally like scalar values better than
+ // FCAs, so we flatten them if this is safe to do for this argument.
llvm::StructType *STy =
dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
- if (STy && !isAAPCSVFP(CallInfo, getTarget())) {
+ if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
llvm::Type *SrcTy =
cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
@@ -2886,38 +3188,32 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::PointerType::getUnqual(STy));
}
+ assert(NumIRArgs == STy->getNumElements());
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
// We don't know what we're loading from.
LI->setAlignment(1);
- Args.push_back(LI);
-
- // Validate argument match.
- checkArgMatches(LI, IRArgNo, IRFuncTy);
+ IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
- Args.push_back(CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
- *this));
-
- // Validate argument match.
- checkArgMatches(Args.back(), IRArgNo, IRFuncTy);
+ assert(NumIRArgs == 1);
+ IRCallArgs[FirstIRArg] =
+ CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
}
break;
}
case ABIArgInfo::Expand:
- ExpandTypeToArgs(I->Ty, RV, Args, IRFuncTy);
- IRArgNo = Args.size();
+ unsigned IRArgPos = FirstIRArg;
+ ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
+ assert(IRArgPos == FirstIRArg + NumIRArgs);
break;
}
}
- if (SwapThisWithSRet)
- std::swap(Args[0], Args[1]);
-
if (ArgMemory) {
llvm::Value *Arg = ArgMemory;
if (CallInfo.isVariadic()) {
@@ -2948,7 +3244,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Arg = Builder.CreateBitCast(Arg, LastParamTy);
}
}
- Args.push_back(Arg);
+ assert(IRFunctionArgs.hasInallocaArg());
+ IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
if (!CallArgs.getCleanupsToDeactivate().empty())
@@ -2967,7 +3264,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (CE->getOpcode() == llvm::Instruction::BitCast &&
ActualFT->getReturnType() == CurFT->getReturnType() &&
ActualFT->getNumParams() == CurFT->getNumParams() &&
- ActualFT->getNumParams() == Args.size() &&
+ ActualFT->getNumParams() == IRCallArgs.size() &&
(CurFT->isVarArg() || !ActualFT->isVarArg())) {
bool ArgsMatch = true;
for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
@@ -2984,6 +3281,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
+ assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
+ // Inalloca argument can have different type.
+ if (IRFunctionArgs.hasInallocaArg() &&
+ i == IRFunctionArgs.getInallocaArgNo())
+ continue;
+ if (i < IRFuncTy->getNumParams())
+ assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
+ }
+
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
@@ -2998,10 +3305,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, Args);
+ CS = Builder.CreateCall(Callee, IRCallArgs);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, Args);
+ CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
EmitBlock(Cont);
}
if (callOrInvoke)
@@ -3050,75 +3357,92 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
- switch (RetAI.getKind()) {
- case ABIArgInfo::InAlloca:
- case ABIArgInfo::Indirect:
- return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
+ RValue Ret = [&] {
+ switch (RetAI.getKind()) {
+ case ABIArgInfo::InAlloca:
+ case ABIArgInfo::Indirect:
+ return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
- case ABIArgInfo::Ignore:
- // If we are ignoring an argument that had a result, make sure to
- // construct the appropriate return value for our caller.
- return GetUndefRValue(RetTy);
+ case ABIArgInfo::Ignore:
+ // If we are ignoring an argument that had a result, make sure to
+ // construct the appropriate return value for our caller.
+ return GetUndefRValue(RetTy);
- case ABIArgInfo::Extend:
- case ABIArgInfo::Direct: {
- llvm::Type *RetIRTy = ConvertType(RetTy);
- if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
- switch (getEvaluationKind(RetTy)) {
- case TEK_Complex: {
- llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
- llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
- return RValue::getComplex(std::make_pair(Real, Imag));
- }
- case TEK_Aggregate: {
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ case ABIArgInfo::Extend:
+ case ABIArgInfo::Direct: {
+ llvm::Type *RetIRTy = ConvertType(RetTy);
+ if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
+ switch (getEvaluationKind(RetTy)) {
+ case TEK_Complex: {
+ llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
+ llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
+ return RValue::getComplex(std::make_pair(Real, Imag));
+ }
+ case TEK_Aggregate: {
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "agg.tmp");
- DestIsVolatile = false;
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "agg.tmp");
+ DestIsVolatile = false;
+ }
+ BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
+ return RValue::getAggregate(DestPtr);
}
- BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
- return RValue::getAggregate(DestPtr);
- }
- case TEK_Scalar: {
- // If the argument doesn't match, perform a bitcast to coerce it. This
- // can happen due to trivial type mismatches.
- llvm::Value *V = CI;
- if (V->getType() != RetIRTy)
- V = Builder.CreateBitCast(V, RetIRTy);
- return RValue::get(V);
+ case TEK_Scalar: {
+ // If the argument doesn't match, perform a bitcast to coerce it. This
+ // can happen due to trivial type mismatches.
+ llvm::Value *V = CI;
+ if (V->getType() != RetIRTy)
+ V = Builder.CreateBitCast(V, RetIRTy);
+ return RValue::get(V);
+ }
+ }
+ llvm_unreachable("bad evaluation kind");
}
+
+ llvm::Value *DestPtr = ReturnValue.getValue();
+ bool DestIsVolatile = ReturnValue.isVolatile();
+
+ if (!DestPtr) {
+ DestPtr = CreateMemTemp(RetTy, "coerce");
+ DestIsVolatile = false;
}
- llvm_unreachable("bad evaluation kind");
- }
- llvm::Value *DestPtr = ReturnValue.getValue();
- bool DestIsVolatile = ReturnValue.isVolatile();
+ // If the value is offset in memory, apply the offset now.
+ llvm::Value *StorePtr = DestPtr;
+ if (unsigned Offs = RetAI.getDirectOffset()) {
+ StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
+ StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
+ StorePtr = Builder.CreateBitCast(StorePtr,
+ llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ }
+ CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- if (!DestPtr) {
- DestPtr = CreateMemTemp(RetTy, "coerce");
- DestIsVolatile = false;
+ return convertTempToRValue(DestPtr, RetTy, SourceLocation());
}
- // If the value is offset in memory, apply the offset now.
- llvm::Value *StorePtr = DestPtr;
- if (unsigned Offs = RetAI.getDirectOffset()) {
- StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
- StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
- StorePtr = Builder.CreateBitCast(StorePtr,
- llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
+ case ABIArgInfo::Expand:
+ llvm_unreachable("Invalid ABI kind for return argument");
}
- CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
- return convertTempToRValue(DestPtr, RetTy, SourceLocation());
- }
+ llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ } ();
- case ABIArgInfo::Expand:
- llvm_unreachable("Invalid ABI kind for return argument");
+ if (Ret.isScalar() && TargetDecl) {
+ if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
+ llvm::Value *OffsetValue = nullptr;
+ if (const auto *Offset = AA->getOffset())
+ OffsetValue = EmitScalarExpr(Offset);
+
+ llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
+ EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
+ OffsetValue);
+ }
}
- llvm_unreachable("Unhandled ABIArgInfo::Kind");
+ return Ret;
}
/* VarArg handling */
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 9510a1cd5461..b228733fb8ce 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGCALL_H
-#define CLANG_CODEGEN_CGCALL_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCALL_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCALL_H
#include "CGValue.h"
#include "EHScopeStack.h"
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 9427de14d704..92c694a76de7 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -134,12 +134,11 @@ ApplyNonVirtualAndVirtualOffset(CodeGenFunction &CGF, llvm::Value *ptr,
return ptr;
}
-llvm::Value *
-CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
- const CXXRecordDecl *Derived,
- CastExpr::path_const_iterator PathBegin,
- CastExpr::path_const_iterator PathEnd,
- bool NullCheckValue) {
+llvm::Value *CodeGenFunction::GetAddressOfBaseClass(
+ llvm::Value *Value, const CXXRecordDecl *Derived,
+ CastExpr::path_const_iterator PathBegin,
+ CastExpr::path_const_iterator PathEnd, bool NullCheckValue,
+ SourceLocation Loc) {
assert(PathBegin != PathEnd && "Base path should not be empty!");
CastExpr::path_const_iterator Start = PathBegin;
@@ -176,9 +175,16 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
llvm::Type *BasePtrTy =
ConvertType((PathEnd[-1])->getType())->getPointerTo();
+ QualType DerivedTy = getContext().getRecordType(Derived);
+ CharUnits DerivedAlign = getContext().getTypeAlignInChars(DerivedTy);
+
// If the static offset is zero and we don't have a virtual step,
// just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
+ if (sanitizePerformTypeCheck()) {
+ EmitTypeCheck(TCK_Upcast, Loc, Value, DerivedTy, DerivedAlign,
+ !NullCheckValue);
+ }
return Builder.CreateBitCast(Value, BasePtrTy);
}
@@ -197,6 +203,11 @@ CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
EmitBlock(notNullBB);
}
+ if (sanitizePerformTypeCheck()) {
+ EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc, Value,
+ DerivedTy, DerivedAlign, true);
+ }
+
// Compute the virtual offset.
llvm::Value *VirtualOffset = nullptr;
if (VBase) {
@@ -533,6 +544,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXCtorInitializer *MemberInit,
const CXXConstructorDecl *Constructor,
FunctionArgList &Args) {
+ ApplyDebugLocation Loc(CGF, MemberInit->getMemberLocation());
assert(MemberInit->isAnyMemberInitializer() &&
"Must have member initializer!");
assert(MemberInit->getInit() && "Must have initializer!");
@@ -569,9 +581,8 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CXXConstructExpr *CE = dyn_cast<CXXConstructExpr>(MemberInit->getInit());
if (BaseElementTy.isPODType(CGF.getContext()) ||
(CE && CE->getConstructor()->isTrivial())) {
- // Find the source pointer. We know it's the last argument because
- // we know we're in an implicit copy constructor.
- unsigned SrcArgIndex = Args.size() - 1;
+ unsigned SrcArgIndex =
+ CGF.CGM.getCXXABI().getSrcArgforCopyCtor(Constructor, Args);
llvm::Value *SrcPtr
= CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(Args[SrcArgIndex]));
LValue ThisRHSLV = CGF.MakeNaturalAlignAddrLValue(SrcPtr, RecordTy);
@@ -587,12 +598,13 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
ArrayRef<VarDecl *> ArrayIndexes;
if (MemberInit->getNumArrayIndices())
ArrayIndexes = MemberInit->getArrayIndexes();
+ ApplyDebugLocation DL(CGF, MemberInit->getMemberLocation());
CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
}
-void CodeGenFunction::EmitInitializerForField(FieldDecl *Field,
- LValue LHS, Expr *Init,
- ArrayRef<VarDecl *> ArrayIndexes) {
+void CodeGenFunction::EmitInitializerForField(
+ FieldDecl *Field, LValue LHS, Expr *Init,
+ ArrayRef<VarDecl *> ArrayIndexes) {
QualType FieldType = Field->getType();
switch (getEvaluationKind(FieldType)) {
case TEK_Scalar:
@@ -692,8 +704,74 @@ static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
return true;
}
+// Emit code in ctor (Prologue==true) or dtor (Prologue==false)
+// to poison the extra field paddings inserted under
+// -fsanitize-address-field-padding=1|2.
+void CodeGenFunction::EmitAsanPrologueOrEpilogue(bool Prologue) {
+ ASTContext &Context = getContext();
+ const CXXRecordDecl *ClassDecl =
+ Prologue ? cast<CXXConstructorDecl>(CurGD.getDecl())->getParent()
+ : cast<CXXDestructorDecl>(CurGD.getDecl())->getParent();
+ if (!ClassDecl->mayInsertExtraPadding()) return;
+
+ struct SizeAndOffset {
+ uint64_t Size;
+ uint64_t Offset;
+ };
+
+ unsigned PtrSize = CGM.getDataLayout().getPointerSizeInBits();
+ const ASTRecordLayout &Info = Context.getASTRecordLayout(ClassDecl);
+
+ // Populate sizes and offsets of fields.
+ SmallVector<SizeAndOffset, 16> SSV(Info.getFieldCount());
+ for (unsigned i = 0, e = Info.getFieldCount(); i != e; ++i)
+ SSV[i].Offset =
+ Context.toCharUnitsFromBits(Info.getFieldOffset(i)).getQuantity();
+
+ size_t NumFields = 0;
+ for (const auto *Field : ClassDecl->fields()) {
+ const FieldDecl *D = Field;
+ std::pair<CharUnits, CharUnits> FieldInfo =
+ Context.getTypeInfoInChars(D->getType());
+ CharUnits FieldSize = FieldInfo.first;
+ assert(NumFields < SSV.size());
+ SSV[NumFields].Size = D->isBitField() ? 0 : FieldSize.getQuantity();
+ NumFields++;
+ }
+ assert(NumFields == SSV.size());
+ if (SSV.size() <= 1) return;
+
+ // We will insert calls to __asan_* run-time functions.
+ // LLVM AddressSanitizer pass may decide to inline them later.
+ llvm::Type *Args[2] = {IntPtrTy, IntPtrTy};
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CGM.VoidTy, Args, false);
+ llvm::Constant *F = CGM.CreateRuntimeFunction(
+ FTy, Prologue ? "__asan_poison_intra_object_redzone"
+ : "__asan_unpoison_intra_object_redzone");
+
+ llvm::Value *ThisPtr = LoadCXXThis();
+ ThisPtr = Builder.CreatePtrToInt(ThisPtr, IntPtrTy);
+ uint64_t TypeSize = Info.getNonVirtualSize().getQuantity();
+ // For each field check if it has sufficient padding,
+ // if so (un)poison it with a call.
+ for (size_t i = 0; i < SSV.size(); i++) {
+ uint64_t AsanAlignment = 8;
+ uint64_t NextField = i == SSV.size() - 1 ? TypeSize : SSV[i + 1].Offset;
+ uint64_t PoisonSize = NextField - SSV[i].Offset - SSV[i].Size;
+ uint64_t EndOffset = SSV[i].Offset + SSV[i].Size;
+ if (PoisonSize < AsanAlignment || !SSV[i].Size ||
+ (NextField % AsanAlignment) != 0)
+ continue;
+ Builder.CreateCall2(
+ F, Builder.CreateAdd(ThisPtr, Builder.getIntN(PtrSize, EndOffset)),
+ Builder.getIntN(PtrSize, PoisonSize));
+ }
+}
+
/// EmitConstructorBody - Emits the body of the current constructor.
void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ EmitAsanPrologueOrEpilogue(true);
const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
CXXCtorType CtorType = CurGD.getCtorType();
@@ -705,13 +783,13 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// delegation optimization.
if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor) &&
CGM.getTarget().getCXXABI().hasConstructorVariants()) {
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, Ctor->getLocEnd());
EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args, Ctor->getLocEnd());
return;
}
- Stmt *Body = Ctor->getBody();
+ const FunctionDecl *Definition = 0;
+ Stmt *Body = Ctor->getBody(Definition);
+ assert(Definition == Ctor && "emitting wrong constructor body");
// Enter the function-try-block before the constructor prologue if
// applicable.
@@ -755,18 +833,16 @@ namespace {
class CopyingValueRepresentation {
public:
explicit CopyingValueRepresentation(CodeGenFunction &CGF)
- : CGF(CGF), SO(*CGF.SanOpts), OldSanOpts(CGF.SanOpts) {
- SO.Bool = false;
- SO.Enum = false;
- CGF.SanOpts = &SO;
+ : CGF(CGF), OldSanOpts(CGF.SanOpts) {
+ CGF.SanOpts.set(SanitizerKind::Bool, false);
+ CGF.SanOpts.set(SanitizerKind::Enum, false);
}
~CopyingValueRepresentation() {
CGF.SanOpts = OldSanOpts;
}
private:
CodeGenFunction &CGF;
- SanitizerOptions SO;
- const SanitizerOptions *OldSanOpts;
+ SanitizerSet OldSanOpts;
};
}
@@ -780,7 +856,10 @@ namespace {
FirstField(nullptr), LastField(nullptr), FirstFieldOffset(0),
LastFieldOffset(0), LastAddedFieldIndex(0) {}
- static bool isMemcpyableField(FieldDecl *F) {
+ bool isMemcpyableField(FieldDecl *F) const {
+ // Never memcpy fields when we are adding poisoned paddings.
+ if (CGF.getContext().getLangOpts().SanitizeAddressFieldPadding)
+ return false;
Qualifiers Qual = F->getType().getQualifiers();
if (Qual.hasVolatile() || Qual.hasObjCLifetime())
return false;
@@ -794,13 +873,13 @@ namespace {
addNextField(F);
}
- CharUnits getMemcpySize() const {
+ CharUnits getMemcpySize(uint64_t FirstByteOffset) const {
unsigned LastFieldSize =
LastField->isBitField() ?
LastField->getBitWidthValue(CGF.getContext()) :
CGF.getContext().getTypeSize(LastField->getType());
uint64_t MemcpySizeBits =
- LastFieldOffset + LastFieldSize - FirstFieldOffset +
+ LastFieldOffset + LastFieldSize - FirstByteOffset +
CGF.getContext().getCharWidth() - 1;
CharUnits MemcpySize =
CGF.getContext().toCharUnitsFromBits(MemcpySizeBits);
@@ -816,19 +895,31 @@ namespace {
CharUnits Alignment;
+ uint64_t FirstByteOffset;
if (FirstField->isBitField()) {
const CGRecordLayout &RL =
CGF.getTypes().getCGRecordLayout(FirstField->getParent());
const CGBitFieldInfo &BFInfo = RL.getBitFieldInfo(FirstField);
Alignment = CharUnits::fromQuantity(BFInfo.StorageAlignment);
+ // FirstFieldOffset is not appropriate for bitfields,
+ // it won't tell us what the storage offset should be and thus might not
+ // be properly aligned.
+ //
+ // Instead calculate the storage offset using the offset of the field in
+ // the struct type.
+ const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
+ FirstByteOffset =
+ DL.getStructLayout(RL.getLLVMType())
+ ->getElementOffsetInBits(RL.getLLVMFieldNo(FirstField));
} else {
Alignment = CGF.getContext().getDeclAlign(FirstField);
+ FirstByteOffset = FirstFieldOffset;
}
- assert((CGF.getContext().toCharUnitsFromBits(FirstFieldOffset) %
+ assert((CGF.getContext().toCharUnitsFromBits(FirstByteOffset) %
Alignment) == 0 && "Bad field alignment.");
- CharUnits MemcpySize = getMemcpySize();
+ CharUnits MemcpySize = getMemcpySize(FirstByteOffset);
QualType RecordTy = CGF.getContext().getTypeDeclType(ClassDecl);
llvm::Value *ThisPtr = CGF.LoadCXXThis();
LValue DestLV = CGF.MakeNaturalAlignAddrLValue(ThisPtr, RecordTy);
@@ -912,11 +1003,12 @@ namespace {
private:
/// Get source argument for copy constructor. Returns null if not a copy
- /// constructor.
- static const VarDecl* getTrivialCopySource(const CXXConstructorDecl *CD,
+ /// constructor.
+ static const VarDecl *getTrivialCopySource(CodeGenFunction &CGF,
+ const CXXConstructorDecl *CD,
FunctionArgList &Args) {
if (CD->isCopyOrMoveConstructor() && CD->isDefaulted())
- return Args[Args.size() - 1];
+ return Args[CGF.CGM.getCXXABI().getSrcArgforCopyCtor(CD, Args)];
return nullptr;
}
@@ -947,7 +1039,7 @@ namespace {
public:
ConstructorMemcpyizer(CodeGenFunction &CGF, const CXXConstructorDecl *CD,
FunctionArgList &Args)
- : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CD, Args)),
+ : FieldMemcpyizer(CGF, CD->getParent(), getTrivialCopySource(CGF, CD, Args)),
ConstructorDecl(CD),
MemcpyableCtor(CD->isDefaulted() &&
CD->isCopyOrMoveConstructor() &&
@@ -1279,6 +1371,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
bool isTryBody = (Body && isa<CXXTryStmt>(Body));
if (isTryBody)
EnterCXXTryStmt(*cast<CXXTryStmt>(Body), true);
+ EmitAsanPrologueOrEpilogue(false);
// Enter the epilogue cleanups.
RunCleanupsScope DtorEpilogue(*this);
@@ -1289,6 +1382,9 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// we'd introduce *two* handler blocks. In the Microsoft ABI, we
// always delegate because we might not have a definition in this TU.
switch (DtorType) {
+ case Dtor_Comdat:
+ llvm_unreachable("not expecting a COMDAT");
+
case Dtor_Deleting: llvm_unreachable("already handled deleting case");
case Dtor_Complete:
@@ -1515,19 +1611,14 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
/// \param arrayBegin an arrayType*
/// \param zeroInitialize true if each element should be
/// zero-initialized before it is constructed
-void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
- const ConstantArrayType *arrayType,
- llvm::Value *arrayBegin,
- CallExpr::const_arg_iterator argBegin,
- CallExpr::const_arg_iterator argEnd,
- bool zeroInitialize) {
+void CodeGenFunction::EmitCXXAggrConstructorCall(
+ const CXXConstructorDecl *ctor, const ConstantArrayType *arrayType,
+ llvm::Value *arrayBegin, const CXXConstructExpr *E, bool zeroInitialize) {
QualType elementType;
llvm::Value *numElements =
emitArrayLength(arrayType, elementType, arrayBegin);
- EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin,
- argBegin, argEnd, zeroInitialize);
+ EmitCXXAggrConstructorCall(ctor, numElements, arrayBegin, E, zeroInitialize);
}
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
@@ -1539,13 +1630,11 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
/// \param arrayBegin a T*, where T is the type constructed by ctor
/// \param zeroInitialize true if each element should be
/// zero-initialized before it is constructed
-void
-CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
- llvm::Value *numElements,
- llvm::Value *arrayBegin,
- CallExpr::const_arg_iterator argBegin,
- CallExpr::const_arg_iterator argEnd,
- bool zeroInitialize) {
+void CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
+ llvm::Value *numElements,
+ llvm::Value *arrayBegin,
+ const CXXConstructExpr *E,
+ bool zeroInitialize) {
// It's legal for numElements to be zero. This can happen both
// dynamically, because x can be zero in 'new A[x]', and statically,
@@ -1608,8 +1697,8 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *ctor,
pushRegularPartialArrayCleanup(arrayBegin, cur, type, *destroyer);
}
- EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/ false,
- /*Delegating=*/false, cur, argBegin, argEnd);
+ EmitCXXConstructorCall(ctor, Ctor_Complete, /*ForVirtualBase=*/false,
+ /*Delegating=*/false, cur, E);
}
// Go to the next element.
@@ -1640,29 +1729,27 @@ void CodeGenFunction::destroyCXXObject(CodeGenFunction &CGF,
/*Delegating=*/false, addr);
}
-void
-CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
- CXXCtorType Type, bool ForVirtualBase,
- bool Delegating,
- llvm::Value *This,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
+void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
+ CXXCtorType Type,
+ bool ForVirtualBase,
+ bool Delegating, llvm::Value *This,
+ const CXXConstructExpr *E) {
// If this is a trivial constructor, just emit what's needed.
- if (D->isTrivial()) {
- if (ArgBeg == ArgEnd) {
+ if (D->isTrivial() && !D->getParent()->mayInsertExtraPadding()) {
+ if (E->getNumArgs() == 0) {
// Trivial default constructor, no codegen required.
assert(D->isDefaultConstructor() &&
"trivial 0-arg ctor not a default ctor");
return;
}
- assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
assert(D->isCopyOrMoveConstructor() &&
"trivial 1-arg ctor not a copy/move ctor");
- const Expr *E = (*ArgBeg);
- QualType Ty = E->getType();
- llvm::Value *Src = EmitLValue(E).getAddress();
+ const Expr *Arg = E->getArg(0);
+ QualType Ty = Arg->getType();
+ llvm::Value *Src = EmitLValue(Arg).getAddress();
EmitAggregateCopy(This, Src, Ty);
return;
}
@@ -1681,14 +1768,14 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
// Add the rest of the user-supplied arguments.
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
- EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getConstructor());
// Insert any ABI-specific implicit constructor arguments.
unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
*this, D, Type, ForVirtualBase, Delegating, Args);
// Emit the call.
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, Type);
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
const CGFunctionInfo &Info =
CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
@@ -1697,16 +1784,16 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
void
CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
llvm::Value *This, llvm::Value *Src,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
- if (D->isTrivial()) {
- assert(ArgBeg + 1 == ArgEnd && "unexpected argcount for trivial ctor");
+ const CXXConstructExpr *E) {
+ if (D->isTrivial() &&
+ !D->getParent()->mayInsertExtraPadding()) {
+ assert(E->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
assert(D->isCopyOrMoveConstructor() &&
"trivial 1-arg ctor not a copy/move ctor");
- EmitAggregateCopy(This, Src, (*ArgBeg)->getType());
+ EmitAggregateCopy(This, Src, E->arg_begin()->getType());
return;
}
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(D, clang::Ctor_Complete);
+ llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, StructorType::Complete);
assert(D->isInstance() &&
"Trying to emit a member call expr on a static method!");
@@ -1724,8 +1811,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
Args.add(RValue::get(Src), QT);
// Skip over first argument (Src).
- EmitCallArgs(Args, FPT->isVariadic(), FPT->param_type_begin() + 1,
- FPT->param_type_end(), ArgBeg + 1, ArgEnd);
+ EmitCallArgs(Args, FPT, E->arg_begin() + 1, E->arg_end(), E->getConstructor(),
+ /*ParamsToSkip*/ 1);
EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, RequiredArgs::All),
Callee, ReturnValueSlot(), Args, D);
@@ -1766,8 +1853,10 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
EmitDelegateCallArg(DelegateArgs, param, Loc);
}
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(Ctor, CtorType);
- EmitCall(CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor, CtorType),
+ llvm::Value *Callee =
+ CGM.getAddrOfCXXStructor(Ctor, getFromCtorType(CtorType));
+ EmitCall(CGM.getTypes()
+ .arrangeCXXStructorDeclaration(Ctor, getFromCtorType(CtorType)),
Callee, ReturnValueSlot(), DelegateArgs, Ctor);
}
@@ -1894,10 +1983,14 @@ CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
NonVirtualOffset,
VirtualOffset);
- // Finally, store the address point.
- llvm::Type *AddressPointPtrTy =
- VTableAddressPoint->getType()->getPointerTo();
- VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ // Finally, store the address point. Use the same LLVM types as the field to
+ // support optimization.
+ llvm::Type *VTablePtrTy =
+ llvm::FunctionType::get(CGM.Int32Ty, /*isVarArg=*/true)
+ ->getPointerTo()
+ ->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, VTablePtrTy->getPointerTo());
+ VTableAddressPoint = Builder.CreateBitCast(VTableAddressPoint, VTablePtrTy);
llvm::StoreInst *Store = Builder.CreateStore(VTableAddressPoint, VTableField);
CGM.DecorateInstruction(Store, CGM.getTBAAInfoForVTablePtr());
}
@@ -1934,7 +2027,7 @@ CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
if (I.isVirtual()) {
// Check if we've visited this virtual base before.
- if (!VBases.insert(BaseDecl))
+ if (!VBases.insert(BaseDecl).second)
continue;
const ASTRecordLayout &Layout =
@@ -2075,20 +2168,6 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
return false;
}
-llvm::Value *
-CodeGenFunction::EmitCXXOperatorMemberCallee(const CXXOperatorCallExpr *E,
- const CXXMethodDecl *MD,
- llvm::Value *This) {
- llvm::FunctionType *fnType =
- CGM.getTypes().GetFunctionType(
- CGM.getTypes().arrangeCXXMethodDeclaration(MD));
-
- if (MD->isVirtual() && !CanDevirtualizeMemberFunctionCall(E->getArg(0), MD))
- return CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, fnType);
-
- return CGM.GetAddrOfFunction(MD, fnType);
-}
-
void CodeGenFunction::EmitForwardingCallToLambda(
const CXXMethodDecl *callOperator,
CallArgList &callArgs) {
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index ed9f96df7987..18ed3e543d20 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -184,7 +184,7 @@ void EHScopeStack::popCleanup() {
StartOfData += Cleanup.getAllocatedSize();
// Destroy the cleanup.
- Cleanup.~EHCleanupScope();
+ Cleanup.Destroy();
// Check whether we can shrink the branch-fixups stack.
if (!BranchFixups.empty()) {
@@ -301,7 +301,8 @@ static void ResolveAllBranchFixups(CodeGenFunction &CGF,
}
// Don't add this case to the switch statement twice.
- if (!CasesAdded.insert(Fixup.Destination)) continue;
+ if (!CasesAdded.insert(Fixup.Destination).second)
+ continue;
Switch->addCase(CGF.Builder.getInt32(Fixup.DestinationIndex),
Fixup.Destination);
@@ -357,7 +358,7 @@ void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
continue;
// Don't process the same optimistic branch block twice.
- if (!ModifiedOptimisticBlocks.insert(BranchBB))
+ if (!ModifiedOptimisticBlocks.insert(BranchBB).second)
continue;
llvm::SwitchInst *Switch = TransitionToCleanupSwitch(*this, BranchBB);
@@ -860,10 +861,7 @@ void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
// Emit the EH cleanup if required.
if (RequiresEHCleanup) {
- CGDebugInfo *DI = getDebugInfo();
- SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
- if (DI)
- DI->EmitLocation(Builder, CurEHLocation);
+ ApplyDebugLocation AutoRestoreLocation(*this, CurEHLocation);
CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 1d4606f13669..dd156c696ad3 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGCLEANUP_H
-#define CLANG_CODEGEN_CGCLEANUP_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
+#define LLVM_CLANG_LIB_CODEGEN_CGCLEANUP_H
#include "EHScopeStack.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -280,9 +280,11 @@ public:
assert(CleanupBits.CleanupSize == cleanupSize && "cleanup size overflow");
}
- ~EHCleanupScope() {
+ void Destroy() {
delete ExtInfo;
}
+ // Objects of EHCleanupScope are not destructed. Use Destroy().
+ ~EHCleanupScope() LLVM_DELETED_FUNCTION;
bool isNormalCleanup() const { return CleanupBits.IsNormalCleanup; }
llvm::BasicBlock *getNormalBlock() const { return NormalBlock; }
@@ -341,7 +343,7 @@ public:
void addBranchAfter(llvm::ConstantInt *Index,
llvm::BasicBlock *Block) {
struct ExtInfo &ExtInfo = getExtInfo();
- if (ExtInfo.Branches.insert(Block))
+ if (ExtInfo.Branches.insert(Block).second)
ExtInfo.BranchAfters.push_back(std::make_pair(Block, Index));
}
@@ -376,7 +378,7 @@ public:
///
/// \return true if the branch-through was new to this scope
bool addBranchThrough(llvm::BasicBlock *Block) {
- return getExtInfo().Branches.insert(Block);
+ return getExtInfo().Branches.insert(Block).second;
}
/// Determines if this cleanup scope has any branch throughs.
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 048c8f8f3674..978e1bb5b81f 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -52,64 +52,59 @@ CGDebugInfo::~CGDebugInfo() {
"Region stack mismatch, stack not empty!");
}
-SaveAndRestoreLocation::SaveAndRestoreLocation(CodeGenFunction &CGF,
- CGBuilderTy &B)
- : DI(CGF.getDebugInfo()), Builder(B) {
- if (DI) {
- SavedLoc = DI->getLocation();
- DI->CurLoc = SourceLocation();
+ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF)
+ : ApplyDebugLocation(CGF) {
+ if (auto *DI = CGF.getDebugInfo()) {
+ // Construct a location that has a valid scope, but no line info.
+ assert(!DI->LexicalBlockStack.empty());
+ llvm::DIDescriptor Scope(DI->LexicalBlockStack.back());
+ CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(0, 0, Scope));
}
}
-SaveAndRestoreLocation::~SaveAndRestoreLocation() {
- if (DI)
- DI->EmitLocation(Builder, SavedLoc);
-}
-
-NoLocation::NoLocation(CodeGenFunction &CGF, CGBuilderTy &B)
- : SaveAndRestoreLocation(CGF, B) {
- if (DI)
- Builder.SetCurrentDebugLocation(llvm::DebugLoc());
-}
-
-NoLocation::~NoLocation() {
- if (DI)
- assert(Builder.getCurrentDebugLocation().isUnknown());
-}
-
-ArtificialLocation::ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B)
- : SaveAndRestoreLocation(CGF, B) {
- if (DI)
- Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF,
+ SourceLocation TemporaryLocation,
+ bool ForceColumnInfo)
+ : CGF(CGF) {
+ if (auto *DI = CGF.getDebugInfo()) {
+ OriginalLocation = CGF.Builder.getCurrentDebugLocation();
+ if (TemporaryLocation.isInvalid())
+ CGF.Builder.SetCurrentDebugLocation(llvm::DebugLoc());
+ else
+ DI->EmitLocation(CGF.Builder, TemporaryLocation, ForceColumnInfo);
+ }
}
-void ArtificialLocation::Emit() {
- if (DI) {
- // Sync the Builder.
- DI->EmitLocation(Builder, SavedLoc);
- DI->CurLoc = SourceLocation();
- // Construct a location that has a valid scope, but no line info.
- assert(!DI->LexicalBlockStack.empty());
- llvm::DIDescriptor Scope(DI->LexicalBlockStack.back());
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(0, 0, Scope));
+ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc)
+ : CGF(CGF) {
+ if (CGF.getDebugInfo()) {
+ OriginalLocation = CGF.Builder.getCurrentDebugLocation();
+ if (!Loc.isUnknown())
+ CGF.Builder.SetCurrentDebugLocation(Loc);
}
}
-ArtificialLocation::~ArtificialLocation() {
- if (DI)
- assert(Builder.getCurrentDebugLocation().getLine() == 0);
+ApplyDebugLocation::~ApplyDebugLocation() {
+ // Query CGF so the location isn't overwritten when location updates are
+ // temporarily disabled (for C++ default function arguments)
+ if (CGF.getDebugInfo())
+ CGF.Builder.SetCurrentDebugLocation(OriginalLocation);
}
+/// ArtificialLocation - An RAII object that temporarily switches to
+/// an artificial debug location that has a valid scope, but no line
void CGDebugInfo::setLocation(SourceLocation Loc) {
// If the new location isn't valid return.
- if (Loc.isInvalid()) return;
+ if (Loc.isInvalid())
+ return;
CurLoc = CGM.getContext().getSourceManager().getExpansionLoc(Loc);
// If we've changed files in the middle of a lexical scope go ahead
// and create a new lexical scope with file node if it's different
// from the one in the scope.
- if (LexicalBlockStack.empty()) return;
+ if (LexicalBlockStack.empty())
+ return;
SourceManager &SM = CGM.getContext().getSourceManager();
llvm::DIScope Scope(LexicalBlockStack.back());
@@ -120,18 +115,17 @@ void CGDebugInfo::setLocation(SourceLocation Loc) {
if (Scope.isLexicalBlockFile()) {
llvm::DILexicalBlockFile LBF = llvm::DILexicalBlockFile(Scope);
- llvm::DIDescriptor D
- = DBuilder.createLexicalBlockFile(LBF.getScope(),
- getOrCreateFile(CurLoc));
+ llvm::DIDescriptor D = DBuilder.createLexicalBlockFile(
+ LBF.getScope(), getOrCreateFile(CurLoc));
llvm::MDNode *N = D;
LexicalBlockStack.pop_back();
- LexicalBlockStack.push_back(N);
+ LexicalBlockStack.emplace_back(N);
} else if (Scope.isLexicalBlock() || Scope.isSubprogram()) {
- llvm::DIDescriptor D
- = DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
+ llvm::DIDescriptor D =
+ DBuilder.createLexicalBlockFile(Scope, getOrCreateFile(CurLoc));
llvm::MDNode *N = D;
LexicalBlockStack.pop_back();
- LexicalBlockStack.push_back(N);
+ LexicalBlockStack.emplace_back(N);
}
}
@@ -140,10 +134,9 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (!Context)
return TheCU;
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator
- I = RegionMap.find(Context);
+ auto I = RegionMap.find(Context);
if (I != RegionMap.end()) {
- llvm::Value *V = I->second;
+ llvm::Metadata *V = I->second;
return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V));
}
@@ -154,7 +147,7 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context))
if (!RDecl->isDependentType())
return getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
- getOrCreateMainFile());
+ getOrCreateMainFile());
return TheCU;
}
@@ -162,10 +155,10 @@ llvm::DIScope CGDebugInfo::getContextDescriptor(const Decl *Context) {
/// name is constructed on demand (e.g. C++ destructor) then the name
/// is stored on the side.
StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
- assert (FD && "Invalid FunctionDecl!");
+ assert(FD && "Invalid FunctionDecl!");
IdentifierInfo *FII = FD->getIdentifier();
- FunctionTemplateSpecializationInfo *Info
- = FD->getTemplateSpecializationInfo();
+ FunctionTemplateSpecializationInfo *Info =
+ FD->getTemplateSpecializationInfo();
if (!Info && FII)
return FII->getName();
@@ -194,20 +187,20 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
OS << (OMD->isInstanceMethod() ? '-' : '+') << '[';
const DeclContext *DC = OMD->getDeclContext();
if (const ObjCImplementationDecl *OID =
- dyn_cast<const ObjCImplementationDecl>(DC)) {
- OS << OID->getName();
+ dyn_cast<const ObjCImplementationDecl>(DC)) {
+ OS << OID->getName();
} else if (const ObjCInterfaceDecl *OID =
- dyn_cast<const ObjCInterfaceDecl>(DC)) {
- OS << OID->getName();
+ dyn_cast<const ObjCInterfaceDecl>(DC)) {
+ OS << OID->getName();
} else if (const ObjCCategoryImplDecl *OCD =
- dyn_cast<const ObjCCategoryImplDecl>(DC)){
- OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '(' <<
- OCD->getIdentifier()->getNameStart() << ')';
+ dyn_cast<const ObjCCategoryImplDecl>(DC)) {
+ OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '('
+ << OCD->getIdentifier()->getNameStart() << ')';
} else if (isa<ObjCProtocolDecl>(DC)) {
// We can extract the type of the class from the self pointer.
- if (ImplicitParamDecl* SelfDecl = OMD->getSelfDecl()) {
+ if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) {
QualType ClassTy =
- cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
+ cast<ObjCObjectPointerType>(SelfDecl->getType())->getPointeeType();
ClassTy.print(OS, PrintingPolicy(LangOptions()));
}
}
@@ -223,8 +216,7 @@ StringRef CGDebugInfo::getSelectorName(Selector S) {
}
/// getClassName - Get class name including template argument list.
-StringRef
-CGDebugInfo::getClassName(const RecordDecl *RD) {
+StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
// quick optimization to avoid having to intern strings that are already
// stored reliably elsewhere
if (!isa<ClassTemplateSpecializationDecl>(RD))
@@ -256,18 +248,17 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
// Cache the results.
const char *fname = PLoc.getFilename();
- llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
- DIFileCache.find(fname);
+ auto it = DIFileCache.find(fname);
if (it != DIFileCache.end()) {
// Verify that the information still exists.
- if (llvm::Value *V = it->second)
+ if (llvm::Metadata *V = it->second)
return llvm::DIFile(cast<llvm::MDNode>(V));
}
llvm::DIFile F = DBuilder.createFile(PLoc.getFilename(), getCurrentDirname());
- DIFileCache[fname] = F;
+ DIFileCache[fname].reset(F);
return F;
}
@@ -283,7 +274,7 @@ unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.isValid()? PLoc.getLine() : 0;
+ return PLoc.isValid() ? PLoc.getLine() : 0;
}
/// getColumnNumber - Get column number for the location.
@@ -297,7 +288,7 @@ unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc, bool Force) {
return 0;
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
- return PLoc.isValid()? PLoc.getColumn() : 0;
+ return PLoc.isValid() ? PLoc.getColumn() : 0;
}
StringRef CGDebugInfo::getCurrentDirname() {
@@ -388,8 +379,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
StringRef BTName;
switch (BT->getKind()) {
#define BUILTIN_TYPE(Id, SingletonId)
-#define PLACEHOLDER_TYPE(Id, SingletonId) \
- case BuiltinType::Id:
+#define PLACEHOLDER_TYPE(Id, SingletonId) case BuiltinType::Id:
#include "clang/AST/BuiltinTypes.def"
case BuiltinType::Dependent:
llvm_unreachable("Unexpected builtin type");
@@ -425,8 +415,10 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(),
0, 0, 0, 0, llvm::DIType(), llvm::DIArray());
- ObjTy.setTypeArray(DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
- ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
+ DBuilder.replaceArrays(
+ ObjTy,
+ DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
+ ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
return ObjTy;
}
case BuiltinType::ObjCSel: {
@@ -438,8 +430,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
}
case BuiltinType::OCLImage1d:
- return getOrCreateStructPtrType("opencl_image1d_t",
- OCLImage1dDITy);
+ return getOrCreateStructPtrType("opencl_image1d_t", OCLImage1dDITy);
case BuiltinType::OCLImage1dArray:
return getOrCreateStructPtrType("opencl_image1d_array_t",
OCLImage1dArrayDITy);
@@ -447,53 +438,71 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
return getOrCreateStructPtrType("opencl_image1d_buffer_t",
OCLImage1dBufferDITy);
case BuiltinType::OCLImage2d:
- return getOrCreateStructPtrType("opencl_image2d_t",
- OCLImage2dDITy);
+ return getOrCreateStructPtrType("opencl_image2d_t", OCLImage2dDITy);
case BuiltinType::OCLImage2dArray:
return getOrCreateStructPtrType("opencl_image2d_array_t",
OCLImage2dArrayDITy);
case BuiltinType::OCLImage3d:
- return getOrCreateStructPtrType("opencl_image3d_t",
- OCLImage3dDITy);
+ return getOrCreateStructPtrType("opencl_image3d_t", OCLImage3dDITy);
case BuiltinType::OCLSampler:
- return DBuilder.createBasicType("opencl_sampler_t",
- CGM.getContext().getTypeSize(BT),
- CGM.getContext().getTypeAlign(BT),
- llvm::dwarf::DW_ATE_unsigned);
+ return DBuilder.createBasicType(
+ "opencl_sampler_t", CGM.getContext().getTypeSize(BT),
+ CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned);
case BuiltinType::OCLEvent:
- return getOrCreateStructPtrType("opencl_event_t",
- OCLEventDITy);
+ return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
case BuiltinType::UChar:
- case BuiltinType::Char_U: Encoding = llvm::dwarf::DW_ATE_unsigned_char; break;
+ case BuiltinType::Char_U:
+ Encoding = llvm::dwarf::DW_ATE_unsigned_char;
+ break;
case BuiltinType::Char_S:
- case BuiltinType::SChar: Encoding = llvm::dwarf::DW_ATE_signed_char; break;
+ case BuiltinType::SChar:
+ Encoding = llvm::dwarf::DW_ATE_signed_char;
+ break;
case BuiltinType::Char16:
- case BuiltinType::Char32: Encoding = llvm::dwarf::DW_ATE_UTF; break;
+ case BuiltinType::Char32:
+ Encoding = llvm::dwarf::DW_ATE_UTF;
+ break;
case BuiltinType::UShort:
case BuiltinType::UInt:
case BuiltinType::UInt128:
case BuiltinType::ULong:
case BuiltinType::WChar_U:
- case BuiltinType::ULongLong: Encoding = llvm::dwarf::DW_ATE_unsigned; break;
+ case BuiltinType::ULongLong:
+ Encoding = llvm::dwarf::DW_ATE_unsigned;
+ break;
case BuiltinType::Short:
case BuiltinType::Int:
case BuiltinType::Int128:
case BuiltinType::Long:
case BuiltinType::WChar_S:
- case BuiltinType::LongLong: Encoding = llvm::dwarf::DW_ATE_signed; break;
- case BuiltinType::Bool: Encoding = llvm::dwarf::DW_ATE_boolean; break;
+ case BuiltinType::LongLong:
+ Encoding = llvm::dwarf::DW_ATE_signed;
+ break;
+ case BuiltinType::Bool:
+ Encoding = llvm::dwarf::DW_ATE_boolean;
+ break;
case BuiltinType::Half:
case BuiltinType::Float:
case BuiltinType::LongDouble:
- case BuiltinType::Double: Encoding = llvm::dwarf::DW_ATE_float; break;
+ case BuiltinType::Double:
+ Encoding = llvm::dwarf::DW_ATE_float;
+ break;
}
switch (BT->getKind()) {
- case BuiltinType::Long: BTName = "long int"; break;
- case BuiltinType::LongLong: BTName = "long long int"; break;
- case BuiltinType::ULong: BTName = "long unsigned int"; break;
- case BuiltinType::ULongLong: BTName = "long long unsigned int"; break;
+ case BuiltinType::Long:
+ BTName = "long int";
+ break;
+ case BuiltinType::LongLong:
+ BTName = "long long int";
+ break;
+ case BuiltinType::ULong:
+ BTName = "long unsigned int";
+ break;
+ case BuiltinType::ULongLong:
+ BTName = "long long unsigned int";
+ break;
default:
BTName = BT->getName(CGM.getLangOpts());
break;
@@ -501,8 +510,7 @@ llvm::DIType CGDebugInfo::CreateType(const BuiltinType *BT) {
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
uint64_t Align = CGM.getContext().getTypeAlign(BT);
- llvm::DIType DbgTy =
- DBuilder.createBasicType(BTName, Size, Align, Encoding);
+ llvm::DIType DbgTy = DBuilder.createBasicType(BTName, Size, Align, Encoding);
return DbgTy;
}
@@ -515,7 +523,7 @@ llvm::DIType CGDebugInfo::CreateType(const ComplexType *Ty) {
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
llvm::DIType DbgTy =
- DBuilder.createBasicType("complex", Size, Align, Encoding);
+ DBuilder.createBasicType("complex", Size, Align, Encoding);
return DbgTy;
}
@@ -564,25 +572,23 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
// whereas 'id<protocol>' is treated as an ObjCPointerType. For the
// debug info, we want to emit 'id' in both cases.
if (Ty->isObjCQualifiedIdType())
- return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
+ return getOrCreateType(CGM.getContext().getObjCIdType(), Unit);
- llvm::DIType DbgTy =
- CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
- Ty->getPointeeType(), Unit);
+ llvm::DIType DbgTy = CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type,
+ Ty, Ty->getPointeeType(), Unit);
return DbgTy;
}
-llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile Unit) {
return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
Ty->getPointeeType(), Unit);
}
/// In C++ mode, types have linkage, so we can rely on the ODR and
/// on their mangled names, if they're external.
-static SmallString<256>
-getUniqueTagTypeName(const TagType *Ty, CodeGenModule &CGM,
- llvm::DICompileUnit TheCU) {
+static SmallString<256> getUniqueTagTypeName(const TagType *Ty,
+ CodeGenModule &CGM,
+ llvm::DICompileUnit TheCU) {
SmallString<256> FullName;
// FIXME: ODR should apply to ObjC++ exactly the same wasy it does to C++.
// For now, only apply ODR with C++.
@@ -627,7 +633,9 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
llvm::DICompositeType RetTy = DBuilder.createReplaceableForwardDecl(
Tag, RDName, Ctx, DefUnit, Line, 0, 0, 0, FullName);
- ReplaceMap.push_back(std::make_pair(Ty, static_cast<llvm::Value *>(RetTy)));
+ ReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(Ty),
+ std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
return RetTy;
}
@@ -666,7 +674,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
if (BlockLiteralGeneric)
return BlockLiteralGeneric;
- SmallVector<llvm::Value *, 8> EltTys;
+ SmallVector<llvm::Metadata *, 8> EltTys;
llvm::DIType FieldTy;
QualType FType;
uint64_t FieldSize, FieldOffset;
@@ -685,9 +693,9 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
unsigned Flags = llvm::DIDescriptor::FlagAppleBlock;
unsigned LineNo = getLineNumber(CurLoc);
- EltTy = DBuilder.createStructType(Unit, "__block_descriptor",
- Unit, LineNo, FieldOffset, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.createStructType(Unit, "__block_descriptor", Unit, LineNo,
+ FieldOffset, 0, Flags, llvm::DIType(),
+ Elements);
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -700,50 +708,52 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FType = CGM.getContext().IntTy;
EltTys.push_back(CreateMemberType(Unit, FType, "__flags", &FieldOffset));
EltTys.push_back(CreateMemberType(Unit, FType, "__reserved", &FieldOffset));
- FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
+ FType = CGM.getContext().getPointerType(Ty->getPointeeType());
EltTys.push_back(CreateMemberType(Unit, FType, "__FuncPtr", &FieldOffset));
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
FieldTy = DescTy;
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- FieldTy = DBuilder.createMemberType(Unit, "__descriptor", Unit,
- LineNo, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy =
+ DBuilder.createMemberType(Unit, "__descriptor", Unit, LineNo, FieldSize,
+ FieldAlign, FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
Elements = DBuilder.getOrCreateArray(EltTys);
- EltTy = DBuilder.createStructType(Unit, "__block_literal_generic",
- Unit, LineNo, FieldOffset, 0,
- Flags, llvm::DIType(), Elements);
+ EltTy = DBuilder.createStructType(Unit, "__block_literal_generic", Unit,
+ LineNo, FieldOffset, 0, Flags,
+ llvm::DIType(), Elements);
BlockLiteralGeneric = DBuilder.createPointerType(EltTy, Size);
return BlockLiteralGeneric;
}
-llvm::DIType CGDebugInfo::CreateType(const TemplateSpecializationType *Ty, llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
+ llvm::DIFile Unit) {
assert(Ty->isTypeAlias());
llvm::DIType Src = getOrCreateType(Ty->getAliasedType(), Unit);
SmallString<128> NS;
llvm::raw_svector_ostream OS(NS);
- Ty->getTemplateName().print(OS, CGM.getContext().getPrintingPolicy(), /*qualified*/ false);
+ Ty->getTemplateName().print(OS, CGM.getContext().getPrintingPolicy(),
+ /*qualified*/ false);
TemplateSpecializationType::PrintTemplateArgumentList(
OS, Ty->getArgs(), Ty->getNumArgs(),
CGM.getContext().getPrintingPolicy());
- TypeAliasDecl *AliasDecl =
- cast<TypeAliasTemplateDecl>(Ty->getTemplateName().getAsTemplateDecl())
- ->getTemplatedDecl();
+ TypeAliasDecl *AliasDecl = cast<TypeAliasTemplateDecl>(
+ Ty->getTemplateName().getAsTemplateDecl())->getTemplatedDecl();
SourceLocation Loc = AliasDecl->getLocation();
llvm::DIFile File = getOrCreateFile(Loc);
unsigned Line = getLineNumber(Loc);
- llvm::DIDescriptor Ctxt = getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext()));
+ llvm::DIDescriptor Ctxt =
+ getContextDescriptor(cast<Decl>(AliasDecl->getDeclContext()));
return DBuilder.createTypedef(Src, internString(OS.str()), File, Line, Ctxt);
}
@@ -760,15 +770,15 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty, llvm::DIFile Unit) {
const TypedefNameDecl *TyDecl = Ty->getDecl();
llvm::DIDescriptor TypedefContext =
- getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
+ getContextDescriptor(cast<Decl>(Ty->getDecl()->getDeclContext()));
- return
- DBuilder.createTypedef(Src, TyDecl->getName(), File, Line, TypedefContext);
+ return DBuilder.createTypedef(Src, TyDecl->getName(), File, Line,
+ TypedefContext);
}
llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
llvm::DIFile Unit) {
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
// Add the result type at least.
EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
@@ -784,49 +794,66 @@ llvm::DIType CGDebugInfo::CreateType(const FunctionType *Ty,
EltTys.push_back(DBuilder.createUnspecifiedParameter());
}
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
return DBuilder.createSubroutineType(Unit, EltTypeArray);
}
+/// Convert an AccessSpecifier into the corresponding DIDescriptor flag.
+/// As an optimization, return 0 if the access specifier equals the
+/// default for the containing type.
+static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
+ AccessSpecifier Default = clang::AS_none;
+ if (RD && RD->isClass())
+ Default = clang::AS_private;
+ else if (RD && (RD->isStruct() || RD->isUnion()))
+ Default = clang::AS_public;
-llvm::DIType CGDebugInfo::createFieldType(StringRef name,
- QualType type,
- uint64_t sizeInBitsOverride,
- SourceLocation loc,
- AccessSpecifier AS,
- uint64_t offsetInBits,
- llvm::DIFile tunit,
- llvm::DIScope scope) {
+ if (Access == Default)
+ return 0;
+
+ switch (Access) {
+ case clang::AS_private:
+ return llvm::DIDescriptor::FlagPrivate;
+ case clang::AS_protected:
+ return llvm::DIDescriptor::FlagProtected;
+ case clang::AS_public:
+ return llvm::DIDescriptor::FlagPublic;
+ case clang::AS_none:
+ return 0;
+ }
+ llvm_unreachable("unexpected access enumerator");
+}
+
+llvm::DIType CGDebugInfo::createFieldType(
+ StringRef name, QualType type, uint64_t sizeInBitsOverride,
+ SourceLocation loc, AccessSpecifier AS, uint64_t offsetInBits,
+ llvm::DIFile tunit, llvm::DIScope scope, const RecordDecl *RD) {
llvm::DIType debugType = getOrCreateType(type, tunit);
// Get the location for the field.
llvm::DIFile file = getOrCreateFile(loc);
unsigned line = getLineNumber(loc);
- uint64_t sizeInBits = 0;
- unsigned alignInBits = 0;
+ uint64_t SizeInBits = 0;
+ unsigned AlignInBits = 0;
if (!type->isIncompleteArrayType()) {
- std::tie(sizeInBits, alignInBits) = CGM.getContext().getTypeInfo(type);
+ TypeInfo TI = CGM.getContext().getTypeInfo(type);
+ SizeInBits = TI.Width;
+ AlignInBits = TI.Align;
if (sizeInBitsOverride)
- sizeInBits = sizeInBitsOverride;
+ SizeInBits = sizeInBitsOverride;
}
- unsigned flags = 0;
- if (AS == clang::AS_private)
- flags |= llvm::DIDescriptor::FlagPrivate;
- else if (AS == clang::AS_protected)
- flags |= llvm::DIDescriptor::FlagProtected;
-
- return DBuilder.createMemberType(scope, name, file, line, sizeInBits,
- alignInBits, offsetInBits, flags, debugType);
+ unsigned flags = getAccessFlag(AS, RD);
+ return DBuilder.createMemberType(scope, name, file, line, SizeInBits,
+ AlignInBits, offsetInBits, flags, debugType);
}
/// CollectRecordLambdaFields - Helper for CollectRecordFields.
-void CGDebugInfo::
-CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectRecordLambdaFields(
+ const CXXRecordDecl *CXXDecl, SmallVectorImpl<llvm::Metadata *> &elements,
+ llvm::DIType RecordTy) {
// For C++11 Lambdas a Field will be the same as a Capture, but the Capture
// has the name and the location of the variable so we should iterate over
// both concurrently.
@@ -834,7 +861,8 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
RecordDecl::field_iterator Field = CXXDecl->field_begin();
unsigned fieldno = 0;
for (CXXRecordDecl::capture_const_iterator I = CXXDecl->captures_begin(),
- E = CXXDecl->captures_end(); I != E; ++I, ++Field, ++fieldno) {
+ E = CXXDecl->captures_end();
+ I != E; ++I, ++Field, ++fieldno) {
const LambdaCapture &C = *I;
if (C.capturesVariable()) {
VarDecl *V = C.getCapturedVar();
@@ -845,23 +873,22 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
SizeInBitsOverride = Field->getBitWidthValue(CGM.getContext());
assert(SizeInBitsOverride && "found named 0-width bitfield");
}
- llvm::DIType fieldType
- = createFieldType(VName, Field->getType(), SizeInBitsOverride,
- C.getLocation(), Field->getAccess(),
- layout.getFieldOffset(fieldno), VUnit, RecordTy);
+ llvm::DIType fieldType = createFieldType(
+ VName, Field->getType(), SizeInBitsOverride, C.getLocation(),
+ Field->getAccess(), layout.getFieldOffset(fieldno), VUnit, RecordTy,
+ CXXDecl);
elements.push_back(fieldType);
- } else {
+ } else if (C.capturesThis()) {
// TODO: Need to handle 'this' in some way by probably renaming the
// this of the lambda class and having a field member of 'this' or
// by using AT_object_pointer for the function and having that be
// used as 'this' for semantic references.
- assert(C.capturesThis() && "Field that isn't captured and isn't this?");
FieldDecl *f = *Field;
llvm::DIFile VUnit = getOrCreateFile(f->getLocation());
QualType type = f->getType();
- llvm::DIType fieldType
- = createFieldType("this", type, 0, f->getLocation(), f->getAccess(),
- layout.getFieldOffset(fieldno), VUnit, RecordTy);
+ llvm::DIType fieldType = createFieldType(
+ "this", type, 0, f->getLocation(), f->getAccess(),
+ layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
elements.push_back(fieldType);
}
@@ -869,11 +896,12 @@ CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
}
/// Helper for CollectRecordFields.
-llvm::DIDerivedType
-CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
- llvm::DIType RecordTy) {
+llvm::DIDerivedType CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
+ llvm::DIType RecordTy,
+ const RecordDecl *RD) {
// Create the descriptor for the static variable, with or without
// constant initializers.
+ Var = Var->getCanonicalDecl();
llvm::DIFile VUnit = getOrCreateFile(Var->getLocation());
llvm::DIType VTy = getOrCreateType(Var->getType(), VUnit);
@@ -890,25 +918,18 @@ CGDebugInfo::CreateRecordStaticField(const VarDecl *Var,
}
}
- unsigned Flags = 0;
- AccessSpecifier Access = Var->getAccess();
- if (Access == clang::AS_private)
- Flags |= llvm::DIDescriptor::FlagPrivate;
- else if (Access == clang::AS_protected)
- Flags |= llvm::DIDescriptor::FlagProtected;
-
+ unsigned Flags = getAccessFlag(Var->getAccess(), RD);
llvm::DIDerivedType GV = DBuilder.createStaticMemberType(
RecordTy, VName, VUnit, LineNumber, VTy, Flags, C);
- StaticDataMemberCache[Var->getCanonicalDecl()] = llvm::WeakVH(GV);
+ StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
return GV;
}
/// CollectRecordNormalField - Helper for CollectRecordFields.
-void CGDebugInfo::
-CollectRecordNormalField(const FieldDecl *field, uint64_t OffsetInBits,
- llvm::DIFile tunit,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectRecordNormalField(
+ const FieldDecl *field, uint64_t OffsetInBits, llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Metadata *> &elements, llvm::DIType RecordTy,
+ const RecordDecl *RD) {
StringRef name = field->getName();
QualType type = field->getType();
@@ -922,20 +943,19 @@ CollectRecordNormalField(const FieldDecl *field, uint64_t OffsetInBits,
assert(SizeInBitsOverride && "found named 0-width bitfield");
}
- llvm::DIType fieldType
- = createFieldType(name, type, SizeInBitsOverride,
- field->getLocation(), field->getAccess(),
- OffsetInBits, tunit, RecordTy);
+ llvm::DIType fieldType =
+ createFieldType(name, type, SizeInBitsOverride, field->getLocation(),
+ field->getAccess(), OffsetInBits, tunit, RecordTy, RD);
elements.push_back(fieldType);
}
/// CollectRecordFields - A helper function to collect debug info for
/// record fields. This is used while creating debug info entry for a Record.
-void CGDebugInfo::CollectRecordFields(const RecordDecl *record,
- llvm::DIFile tunit,
- SmallVectorImpl<llvm::Value *> &elements,
- llvm::DICompositeType RecordTy) {
+void CGDebugInfo::CollectRecordFields(
+ const RecordDecl *record, llvm::DIFile tunit,
+ SmallVectorImpl<llvm::Metadata *> &elements,
+ llvm::DICompositeType RecordTy) {
const CXXRecordDecl *CXXDecl = dyn_cast<CXXRecordDecl>(record);
if (CXXDecl && CXXDecl->isLambda())
@@ -951,18 +971,19 @@ void CGDebugInfo::CollectRecordFields(const RecordDecl *record,
for (const auto *I : record->decls())
if (const auto *V = dyn_cast<VarDecl>(I)) {
// Reuse the existing static member declaration if one exists
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
- StaticDataMemberCache.find(V->getCanonicalDecl());
+ auto MI = StaticDataMemberCache.find(V->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second &&
"Static data member declaration should still exist");
elements.push_back(
llvm::DIDerivedType(cast<llvm::MDNode>(MI->second)));
- } else
- elements.push_back(CreateRecordStaticField(V, RecordTy));
+ } else {
+ auto Field = CreateRecordStaticField(V, RecordTy, record);
+ elements.push_back(Field);
+ }
} else if (const auto *field = dyn_cast<FieldDecl>(I)) {
- CollectRecordNormalField(field, layout.getFieldOffset(fieldNo),
- tunit, elements, RecordTy);
+ CollectRecordNormalField(field, layout.getFieldOffset(fieldNo), tunit,
+ elements, RecordTy, record);
// Bump field number for next field.
++fieldNo;
@@ -986,11 +1007,11 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
QualType ThisPtr, const FunctionProtoType *Func, llvm::DIFile Unit) {
// Add "this" pointer.
- llvm::DIArray Args = llvm::DICompositeType(
+ llvm::DITypeArray Args = llvm::DISubroutineType(
getOrCreateType(QualType(Func, 0), Unit)).getTypeArray();
- assert (Args.getNumElements() && "Invalid number of arguments!");
+ assert(Args.getNumElements() && "Invalid number of arguments!");
- SmallVector<llvm::Value *, 16> Elts;
+ SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
Elts.push_back(Args.getElement(0));
@@ -1006,8 +1027,8 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
llvm::DIType PointeeType = getOrCreateType(PointeeTy, Unit);
llvm::DIType ThisPtrType =
- DBuilder.createPointerType(PointeeType, Size, Align);
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ DBuilder.createPointerType(PointeeType, Size, Align);
+ TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
// TODO: This and the artificial type below are misleading, the
// types aren't artificial the argument is, but the current
// metadata doesn't represent that.
@@ -1015,7 +1036,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
Elts.push_back(ThisPtrType);
} else {
llvm::DIType ThisPtrType = getOrCreateType(ThisPtr, Unit);
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
+ TypeCache[ThisPtr.getAsOpaquePtr()].reset(ThisPtrType);
ThisPtrType = DBuilder.createObjectPointerType(ThisPtrType);
Elts.push_back(ThisPtrType);
}
@@ -1024,7 +1045,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateInstanceMethodType(
for (unsigned i = 1, e = Args.getNumElements(); i != e; ++i)
Elts.push_back(Args.getElement(i));
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
unsigned Flags = 0;
if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
@@ -1049,10 +1070,9 @@ static bool isFunctionLocalClass(const CXXRecordDecl *RD) {
/// a single member function GlobalDecl.
llvm::DISubprogram
CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
- llvm::DIFile Unit,
- llvm::DIType RecordTy) {
+ llvm::DIFile Unit, llvm::DIType RecordTy) {
bool IsCtorOrDtor =
- isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
+ isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method);
StringRef MethodName = getFunctionName(Method);
llvm::DICompositeType MethodTy = getOrCreateMethodType(Method, Unit);
@@ -1096,16 +1116,12 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
unsigned Flags = 0;
if (Method->isImplicit())
Flags |= llvm::DIDescriptor::FlagArtificial;
- AccessSpecifier Access = Method->getAccess();
- if (Access == clang::AS_private)
- Flags |= llvm::DIDescriptor::FlagPrivate;
- else if (Access == clang::AS_protected)
- Flags |= llvm::DIDescriptor::FlagProtected;
+ Flags |= getAccessFlag(Method->getAccess(), Method->getParent());
if (const CXXConstructorDecl *CXXC = dyn_cast<CXXConstructorDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DIDescriptor::FlagExplicit;
} else if (const CXXConversionDecl *CXXC =
- dyn_cast<CXXConversionDecl>(Method)) {
+ dyn_cast<CXXConversionDecl>(Method)) {
if (CXXC->isExplicit())
Flags |= llvm::DIDescriptor::FlagExplicit;
}
@@ -1117,16 +1133,13 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
Flags |= llvm::DIDescriptor::FlagRValueReference;
llvm::DIArray TParamsArray = CollectFunctionTemplateParams(Method, Unit);
- llvm::DISubprogram SP =
- DBuilder.createMethod(RecordTy, MethodName, MethodLinkageName,
- MethodDefUnit, MethodLine,
- MethodTy, /*isLocalToUnit=*/false,
- /* isDefinition=*/ false,
- Virtuality, VIndex, ContainingType,
- Flags, CGM.getLangOpts().Optimize, nullptr,
- TParamsArray);
+ llvm::DISubprogram SP = DBuilder.createMethod(
+ RecordTy, MethodName, MethodLinkageName, MethodDefUnit, MethodLine,
+ MethodTy, /*isLocalToUnit=*/false,
+ /* isDefinition=*/false, Virtuality, VIndex, ContainingType, Flags,
+ CGM.getLangOpts().Optimize, nullptr, TParamsArray);
- SPCache[Method->getCanonicalDecl()] = llvm::WeakVH(SP);
+ SPCache[Method->getCanonicalDecl()].reset(SP);
return SP;
}
@@ -1134,53 +1147,49 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
/// CollectCXXMemberFunctions - A helper function to collect debug info for
/// C++ member functions. This is used while creating debug info entry for
/// a Record.
-void CGDebugInfo::
-CollectCXXMemberFunctions(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectCXXMemberFunctions(
+ const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType RecordTy) {
// Since we want more than just the individual member decls if we
// have templated functions iterate over every declaration to gather
// the functions.
- for(const auto *I : RD->decls()) {
- if (const auto *Method = dyn_cast<CXXMethodDecl>(I)) {
- // Reuse the existing member function declaration if it exists.
- // It may be associated with the declaration of the type & should be
- // reused as we're building the definition.
- //
- // This situation can arise in the vtable-based debug info reduction where
- // implicit members are emitted in a non-vtable TU.
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
- SPCache.find(Method->getCanonicalDecl());
- if (MI == SPCache.end()) {
- // If the member is implicit, lazily create it when we see the
- // definition, not before. (an ODR-used implicit default ctor that's
- // never actually code generated should not produce debug info)
- if (!Method->isImplicit())
- EltTys.push_back(CreateCXXMemberFunction(Method, Unit, RecordTy));
- } else
- EltTys.push_back(MI->second);
- } else if (const auto *FTD = dyn_cast<FunctionTemplateDecl>(I)) {
- // Add any template specializations that have already been seen. Like
- // implicit member functions, these may have been added to a declaration
- // in the case of vtable-based debug info reduction.
- for (const auto *SI : FTD->specializations()) {
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator MI =
- SPCache.find(cast<CXXMethodDecl>(SI)->getCanonicalDecl());
- if (MI != SPCache.end())
- EltTys.push_back(MI->second);
- }
- }
+ for (const auto *I : RD->decls()) {
+ const auto *Method = dyn_cast<CXXMethodDecl>(I);
+ // If the member is implicit, don't add it to the member list. This avoids
+ // the member being added to type units by LLVM, while still allowing it
+ // to be emitted into the type declaration/reference inside the compile
+ // unit.
+ // FIXME: Handle Using(Shadow?)Decls here to create
+ // DW_TAG_imported_declarations inside the class for base decls brought into
+ // derived classes. GDB doesn't seem to notice/leverage these when I tried
+ // it, so I'm not rushing to fix this. (GCC seems to produce them, if
+ // referenced)
+ if (!Method || Method->isImplicit())
+ continue;
+
+ if (Method->getType()->getAs<FunctionProtoType>()->getContainedAutoType())
+ continue;
+
+ // Reuse the existing member function declaration if it exists.
+ // It may be associated with the declaration of the type & should be
+ // reused as we're building the definition.
+ //
+ // This situation can arise in the vtable-based debug info reduction where
+ // implicit members are emitted in a non-vtable TU.
+ auto MI = SPCache.find(Method->getCanonicalDecl());
+ EltTys.push_back(MI == SPCache.end()
+ ? CreateCXXMemberFunction(Method, Unit, RecordTy)
+ : static_cast<llvm::Metadata *>(MI->second));
}
}
/// CollectCXXBases - A helper function to collect debug info for
/// C++ base classes. This is used while creating debug info entry for
/// a Record.
-void CGDebugInfo::
-CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys,
- llvm::DIType RecordTy) {
+void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DIType RecordTy) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (const auto &BI : RD->bases()) {
@@ -1188,40 +1197,40 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
uint64_t BaseOffset;
const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
+ cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
if (BI.isVirtual()) {
- // virtual base offset offset is -ve. The code generator emits dwarf
- // expression where it expects +ve number.
- BaseOffset =
- 0 - CGM.getItaniumVTableContext()
- .getVirtualBaseOffsetOffset(RD, Base).getQuantity();
+ if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
+ // virtual base offset offset is -ve. The code generator emits dwarf
+ // expression where it expects +ve number.
+ BaseOffset = 0 - CGM.getItaniumVTableContext()
+ .getVirtualBaseOffsetOffset(RD, Base)
+ .getQuantity();
+ } else {
+ // In the MS ABI, store the vbtable offset, which is analogous to the
+ // vbase offset offset in Itanium.
+ BaseOffset =
+ 4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
+ }
BFlags = llvm::DIDescriptor::FlagVirtual;
} else
BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
- AccessSpecifier Access = BI.getAccessSpecifier();
- if (Access == clang::AS_private)
- BFlags |= llvm::DIDescriptor::FlagPrivate;
- else if (Access == clang::AS_protected)
- BFlags |= llvm::DIDescriptor::FlagProtected;
-
- llvm::DIType DTy =
- DBuilder.createInheritance(RecordTy,
- getOrCreateType(BI.getType(), Unit),
- BaseOffset, BFlags);
+ BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
+ llvm::DIType DTy = DBuilder.createInheritance(
+ RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags);
EltTys.push_back(DTy);
}
}
/// CollectTemplateParams - A helper function to collect template parameters.
-llvm::DIArray CGDebugInfo::
-CollectTemplateParams(const TemplateParameterList *TPList,
- ArrayRef<TemplateArgument> TAList,
- llvm::DIFile Unit) {
- SmallVector<llvm::Value *, 16> TemplateParams;
+llvm::DIArray
+CGDebugInfo::CollectTemplateParams(const TemplateParameterList *TPList,
+ ArrayRef<TemplateArgument> TAList,
+ llvm::DIFile Unit) {
+ SmallVector<llvm::Metadata *, 16> TemplateParams;
for (unsigned i = 0, e = TAList.size(); i != e; ++i) {
const TemplateArgument &TA = TAList[i];
StringRef Name;
@@ -1244,46 +1253,41 @@ CollectTemplateParams(const TemplateParameterList *TPList,
} break;
case TemplateArgument::Declaration: {
const ValueDecl *D = TA.getAsDecl();
- bool InstanceMember = D->isCXXInstanceMember();
- QualType T = InstanceMember
- ? CGM.getContext().getMemberPointerType(
- D->getType(), cast<RecordDecl>(D->getDeclContext())
- ->getTypeForDecl())
- : CGM.getContext().getPointerType(D->getType());
+ QualType T = TA.getParamTypeForDecl().getDesugaredType(CGM.getContext());
llvm::DIType TTy = getOrCreateType(T, Unit);
- llvm::Value *V = nullptr;
+ llvm::Constant *V = nullptr;
+ const CXXMethodDecl *MD;
// Variable pointer template parameters have a value that is the address
// of the variable.
- if (const VarDecl *VD = dyn_cast<VarDecl>(D))
+ if (const auto *VD = dyn_cast<VarDecl>(D))
V = CGM.GetAddrOfGlobalVar(VD);
// Member function pointers have special support for building them, though
// this is currently unsupported in LLVM CodeGen.
- if (InstanceMember) {
- if (const CXXMethodDecl *method = dyn_cast<CXXMethodDecl>(D))
- V = CGM.getCXXABI().EmitMemberPointer(method);
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ else if ((MD = dyn_cast<CXXMethodDecl>(D)) && MD->isInstance())
+ V = CGM.getCXXABI().EmitMemberPointer(MD);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(D))
V = CGM.GetAddrOfFunction(FD);
// Member data pointers have special handling too to compute the fixed
// offset within the object.
- if (isa<FieldDecl>(D) || isa<IndirectFieldDecl>(D)) {
+ else if (const auto *MPT = dyn_cast<MemberPointerType>(T.getTypePtr())) {
// These five lines (& possibly the above member function pointer
// handling) might be able to be refactored to use similar code in
// CodeGenModule::getMemberPointerConstant
uint64_t fieldOffset = CGM.getContext().getFieldOffset(D);
CharUnits chars =
- CGM.getContext().toCharUnitsFromBits((int64_t) fieldOffset);
- V = CGM.getCXXABI().EmitMemberDataPointer(
- cast<MemberPointerType>(T.getTypePtr()), chars);
+ CGM.getContext().toCharUnitsFromBits((int64_t)fieldOffset);
+ V = CGM.getCXXABI().EmitMemberDataPointer(MPT, chars);
}
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
- V->stripPointerCasts());
+ DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy,
+ cast_or_null<llvm::Constant>(V->stripPointerCasts()));
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::NullPtr: {
QualType T = TA.getNullPtrType();
llvm::DIType TTy = getOrCreateType(T, Unit);
- llvm::Value *V = nullptr;
+ llvm::Constant *V = nullptr;
// Special case member data pointer null values since they're actually -1
// instead of zero.
if (const MemberPointerType *MPT =
@@ -1298,33 +1302,34 @@ CollectTemplateParams(const TemplateParameterList *TPList,
if (!V)
V = llvm::ConstantInt::get(CGM.Int8Ty, 0);
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy, V);
+ DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
+ cast<llvm::Constant>(V));
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::Template: {
- llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateTemplateParameter(
- TheCU, Name, llvm::DIType(),
- TA.getAsTemplate().getAsTemplateDecl()
- ->getQualifiedNameAsString());
+ llvm::DITemplateValueParameter
+ TVP = DBuilder.createTemplateTemplateParameter(
+ TheCU, Name, llvm::DIType(),
+ TA.getAsTemplate().getAsTemplateDecl()->getQualifiedNameAsString());
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::Pack: {
- llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateParameterPack(
- TheCU, Name, llvm::DIType(),
- CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit));
+ llvm::DITemplateValueParameter TVP = DBuilder.createTemplateParameterPack(
+ TheCU, Name, llvm::DIType(),
+ CollectTemplateParams(nullptr, TA.getPackAsArray(), Unit));
TemplateParams.push_back(TVP);
} break;
case TemplateArgument::Expression: {
const Expr *E = TA.getAsExpr();
QualType T = E->getType();
- llvm::Value *V = CGM.EmitConstantExpr(E, T);
+ if (E->isGLValue())
+ T = CGM.getContext().getLValueReferenceType(T);
+ llvm::Constant *V = CGM.EmitConstantExpr(E, T);
assert(V && "Expression in template argument isn't constant");
llvm::DIType TTy = getOrCreateType(T, Unit);
llvm::DITemplateValueParameter TVP =
- DBuilder.createTemplateValueParameter(TheCU, Name, TTy,
- V->stripPointerCasts());
+ DBuilder.createTemplateValueParameter(
+ TheCU, Name, TTy, cast<llvm::Constant>(V->stripPointerCasts()));
TemplateParams.push_back(TVP);
} break;
// And the following should never occur:
@@ -1339,13 +1344,13 @@ CollectTemplateParams(const TemplateParameterList *TPList,
/// CollectFunctionTemplateParams - A helper function to collect debug
/// info for function template parameters.
-llvm::DIArray CGDebugInfo::
-CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
+llvm::DIArray CGDebugInfo::CollectFunctionTemplateParams(const FunctionDecl *FD,
+ llvm::DIFile Unit) {
if (FD->getTemplatedKind() ==
FunctionDecl::TK_FunctionTemplateSpecialization) {
- const TemplateParameterList *TList =
- FD->getTemplateSpecializationInfo()->getTemplate()
- ->getTemplateParameters();
+ const TemplateParameterList *TList = FD->getTemplateSpecializationInfo()
+ ->getTemplate()
+ ->getTemplateParameters();
return CollectTemplateParams(
TList, FD->getTemplateSpecializationArgs()->asArray(), Unit);
}
@@ -1354,13 +1359,12 @@ CollectFunctionTemplateParams(const FunctionDecl *FD, llvm::DIFile Unit) {
/// CollectCXXTemplateParams - A helper function to collect debug info for
/// template parameters.
-llvm::DIArray CGDebugInfo::
-CollectCXXTemplateParams(const ClassTemplateSpecializationDecl *TSpecial,
- llvm::DIFile Unit) {
+llvm::DIArray CGDebugInfo::CollectCXXTemplateParams(
+ const ClassTemplateSpecializationDecl *TSpecial, llvm::DIFile Unit) {
// Always get the full list of parameters, not just the ones from
// the specialization.
TemplateParameterList *TPList =
- TSpecial->getSpecializedTemplate()->getTemplateParameters();
+ TSpecial->getSpecializedTemplate()->getTemplateParameters();
const TemplateArgumentList &TAList = TSpecial->getTemplateArgs();
return CollectTemplateParams(TPList, TAList.asArray(), Unit);
}
@@ -1373,12 +1377,12 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
ASTContext &Context = CGM.getContext();
/* Function type */
- llvm::Value *STy = getOrCreateType(Context.IntTy, Unit);
- llvm::DIArray SElements = DBuilder.getOrCreateArray(STy);
+ llvm::Metadata *STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DITypeArray SElements = DBuilder.getOrCreateTypeArray(STy);
llvm::DIType SubTy = DBuilder.createSubroutineType(Unit, SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
- llvm::DIType vtbl_ptr_type = DBuilder.createPointerType(SubTy, Size, 0,
- "__vtbl_ptr_type");
+ llvm::DIType vtbl_ptr_type =
+ DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type");
VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
@@ -1389,12 +1393,10 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
return internString("_vptr$", RD->getNameAsString());
}
-
/// CollectVTableInfo - If the C++ class has vtable info then insert appropriate
/// debug info entry in EltTys vector.
-void CGDebugInfo::
-CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
- SmallVectorImpl<llvm::Value *> &EltTys) {
+void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys) {
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
// If there is a primary base then it will hold vtable info.
@@ -1406,11 +1408,9 @@ CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile Unit,
return;
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType VPTR
- = DBuilder.createMemberType(Unit, getVTableName(RD), Unit,
- 0, Size, 0, 0,
- llvm::DIDescriptor::FlagArtificial,
- getOrCreateVTablePtrType(Unit));
+ llvm::DIType VPTR = DBuilder.createMemberType(
+ Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
+ llvm::DIDescriptor::FlagArtificial, getOrCreateVTablePtrType(Unit));
EltTys.push_back(VPTR);
}
@@ -1436,15 +1436,14 @@ void CGDebugInfo::completeType(const EnumDecl *ED) {
if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getEnumType(ED);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I == TypeCache.end() ||
- !llvm::DIType(cast<llvm::MDNode>(static_cast<llvm::Value *>(I->second)))
- .isForwardDecl())
+ !llvm::DIType(cast<llvm::MDNode>(I->second)).isForwardDecl())
return;
llvm::DIType Res = CreateTypeDefinition(Ty->castAs<EnumType>());
assert(!Res.isForwardDecl());
- TypeCache[TyPtr] = Res;
+ TypeCache[TyPtr].reset(Res);
}
void CGDebugInfo::completeType(const RecordDecl *RD) {
@@ -1471,15 +1470,14 @@ void CGDebugInfo::completeClassData(const RecordDecl *RD) {
if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getRecordType(RD);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
auto I = TypeCache.find(TyPtr);
if (I != TypeCache.end() &&
- !llvm::DIType(cast<llvm::MDNode>(static_cast<llvm::Value *>(I->second)))
- .isForwardDecl())
+ !llvm::DIType(cast<llvm::MDNode>(I->second)).isForwardDecl())
return;
llvm::DIType Res = CreateTypeDefinition(Ty->castAs<RecordType>());
assert(!Res.isForwardDecl());
- TypeCache[TyPtr] = Res;
+ TypeCache[TyPtr].reset(Res);
}
static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
@@ -1563,11 +1561,11 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
CollectContainingType(CXXDecl, FwdDecl);
// Push the struct on region stack.
- LexicalBlockStack.push_back(&*FwdDecl);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ LexicalBlockStack.emplace_back(&*FwdDecl);
+ RegionMap[Ty->getDecl()].reset(FwdDecl);
// Convert all the elements.
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
// what about nested types?
// Note: The split of CXXDecl information here is intentional, the
@@ -1589,9 +1587,9 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
RegionMap.erase(Ty->getDecl());
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- FwdDecl.setTypeArray(Elements);
+ DBuilder.replaceArrays(FwdDecl, Elements);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
+ RegionMap[Ty->getDecl()].reset(FwdDecl);
return FwdDecl;
}
@@ -1602,7 +1600,6 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
return getOrCreateType(Ty->getBaseType(), Unit);
}
-
/// \return true if Getter has the default name for the property PD.
static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
const ObjCMethodDecl *Getter) {
@@ -1612,7 +1609,7 @@ static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
assert(Getter->getDeclName().isObjCZeroArgSelector());
return PD->getName() ==
- Getter->getDeclName().getObjCSelector().getNameForSlot(0);
+ Getter->getDeclName().getObjCSelector().getNameForSlot(0);
}
/// \return true if Setter has the default name for the property PD.
@@ -1624,7 +1621,7 @@ static bool hasDefaultSetterName(const ObjCPropertyDecl *PD,
assert(Setter->getDeclName().isObjCOneArgSelector());
return SelectorTable::constructSetterName(PD->getName()) ==
- Setter->getDeclName().getObjCSelector().getNameForSlot(0);
+ Setter->getDeclName().getObjCSelector().getNameForSlot(0);
}
/// CreateType - get objective-c interface type.
@@ -1650,11 +1647,11 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
return FwdDecl;
}
-
return CreateTypeDefinition(Ty, Unit);
}
-llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
+ llvm::DIFile Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
unsigned Line = getLineNumber(ID->getLocation());
@@ -1668,30 +1665,28 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
if (ID->getImplementation())
Flags |= llvm::DIDescriptor::FlagObjcClassComplete;
- llvm::DICompositeType RealDecl =
- DBuilder.createStructType(Unit, ID->getName(), DefUnit,
- Line, Size, Align, Flags,
- llvm::DIType(), llvm::DIArray(), RuntimeLang);
+ llvm::DICompositeType RealDecl = DBuilder.createStructType(
+ Unit, ID->getName(), DefUnit, Line, Size, Align, Flags, llvm::DIType(),
+ llvm::DIArray(), RuntimeLang);
QualType QTy(Ty, 0);
- TypeCache[QTy.getAsOpaquePtr()] = RealDecl;
+ TypeCache[QTy.getAsOpaquePtr()].reset(RealDecl);
// Push the struct on region stack.
- LexicalBlockStack.push_back(static_cast<llvm::MDNode*>(RealDecl));
- RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
+ LexicalBlockStack.emplace_back(static_cast<llvm::MDNode *>(RealDecl));
+ RegionMap[Ty->getDecl()].reset(RealDecl);
// Convert all the elements.
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
ObjCInterfaceDecl *SClass = ID->getSuperClass();
if (SClass) {
llvm::DIType SClassTy =
- getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
+ getOrCreateType(CGM.getContext().getObjCInterfaceType(SClass), Unit);
if (!SClassTy.isValid())
return llvm::DIType();
- llvm::DIType InhTag =
- DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ llvm::DIType InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
EltTys.push_back(InhTag);
}
@@ -1702,15 +1697,13 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
unsigned PLine = getLineNumber(Loc);
ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
- llvm::MDNode *PropertyNode =
- DBuilder.createObjCProperty(PD->getName(),
- PUnit, PLine,
- hasDefaultGetterName(PD, Getter) ? "" :
- getSelectorName(PD->getGetterName()),
- hasDefaultSetterName(PD, Setter) ? "" :
- getSelectorName(PD->getSetterName()),
- PD->getPropertyAttributes(),
- getOrCreateType(PD->getType(), PUnit));
+ llvm::MDNode *PropertyNode = DBuilder.createObjCProperty(
+ PD->getName(), PUnit, PLine,
+ hasDefaultGetterName(PD, Getter) ? ""
+ : getSelectorName(PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter) ? ""
+ : getSelectorName(PD->getSetterName()),
+ PD->getPropertyAttributes(), getOrCreateType(PD->getType(), PUnit));
EltTys.push_back(PropertyNode);
}
@@ -1750,8 +1743,8 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
// non-fragile ABI. For bitfields, use the bit offset into the first
// byte of storage of the bitfield. For other fields, use zero.
if (Field->isBitField()) {
- FieldOffset = CGM.getObjCRuntime().ComputeBitfieldBitOffset(
- CGM, ID, Field);
+ FieldOffset =
+ CGM.getObjCRuntime().ComputeBitfieldBitOffset(CGM, ID, Field);
FieldOffset %= CGM.getContext().getCharWidth();
} else {
FieldOffset = 0;
@@ -1765,38 +1758,38 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty, llvm
Flags = llvm::DIDescriptor::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
Flags = llvm::DIDescriptor::FlagPrivate;
+ else if (Field->getAccessControl() == ObjCIvarDecl::Public)
+ Flags = llvm::DIDescriptor::FlagPublic;
llvm::MDNode *PropertyNode = nullptr;
if (ObjCImplementationDecl *ImpD = ID->getImplementation()) {
if (ObjCPropertyImplDecl *PImpD =
- ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
+ ImpD->FindPropertyImplIvarDecl(Field->getIdentifier())) {
if (ObjCPropertyDecl *PD = PImpD->getPropertyDecl()) {
SourceLocation Loc = PD->getLocation();
llvm::DIFile PUnit = getOrCreateFile(Loc);
unsigned PLine = getLineNumber(Loc);
ObjCMethodDecl *Getter = PD->getGetterMethodDecl();
ObjCMethodDecl *Setter = PD->getSetterMethodDecl();
- PropertyNode =
- DBuilder.createObjCProperty(PD->getName(),
- PUnit, PLine,
- hasDefaultGetterName(PD, Getter) ? "" :
- getSelectorName(PD->getGetterName()),
- hasDefaultSetterName(PD, Setter) ? "" :
- getSelectorName(PD->getSetterName()),
- PD->getPropertyAttributes(),
- getOrCreateType(PD->getType(), PUnit));
+ PropertyNode = DBuilder.createObjCProperty(
+ PD->getName(), PUnit, PLine,
+ hasDefaultGetterName(PD, Getter) ? "" : getSelectorName(
+ PD->getGetterName()),
+ hasDefaultSetterName(PD, Setter) ? "" : getSelectorName(
+ PD->getSetterName()),
+ PD->getPropertyAttributes(),
+ getOrCreateType(PD->getType(), PUnit));
}
}
}
- FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit,
- FieldLine, FieldSize, FieldAlign,
- FieldOffset, Flags, FieldTy,
- PropertyNode);
+ FieldTy = DBuilder.createObjCIVar(FieldName, FieldDefUnit, FieldLine,
+ FieldSize, FieldAlign, FieldOffset, Flags,
+ FieldTy, PropertyNode);
EltTys.push_back(FieldTy);
}
llvm::DIArray Elements = DBuilder.getOrCreateArray(EltTys);
- RealDecl.setTypeArray(Elements);
+ DBuilder.replaceArrays(RealDecl, Elements);
LexicalBlockStack.pop_back();
return RealDecl;
@@ -1810,7 +1803,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
// Use Count == -1 to express such arrays.
Count = -1;
- llvm::Value *Subscript = DBuilder.getOrCreateSubrange(0, Count);
+ llvm::Metadata *Subscript = DBuilder.getOrCreateSubrange(0, Count);
llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -1819,8 +1812,7 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty, llvm::DIFile Unit) {
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
-llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
- llvm::DIFile Unit) {
+llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile Unit) {
uint64_t Size;
uint64_t Align;
@@ -1828,7 +1820,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
Align =
- CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
if (Ty->getElementType()->isIncompleteType())
@@ -1847,7 +1839,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
// interior arrays, do we care? Why aren't nested arrays represented the
// obvious/recursive way?
- SmallVector<llvm::Value *, 8> Subscripts;
+ SmallVector<llvm::Metadata *, 8> Subscripts;
QualType EltTy(Ty, 0);
while ((Ty = dyn_cast<ArrayType>(EltTy))) {
// If the number of elements is known, then count is that number. Otherwise,
@@ -1857,7 +1849,7 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
// struct foo {
// int x[0];
// };
- int64_t Count = -1; // Count == -1 is an unbounded array.
+ int64_t Count = -1; // Count == -1 is an unbounded array.
if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(Ty))
Count = CAT->getSize().getZExtValue();
@@ -1868,22 +1860,21 @@ llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
llvm::DIArray SubscriptArray = DBuilder.getOrCreateArray(Subscripts);
- llvm::DIType DbgTy =
- DBuilder.createArrayType(Size, Align, getOrCreateType(EltTy, Unit),
- SubscriptArray);
+ llvm::DIType DbgTy = DBuilder.createArrayType(
+ Size, Align, getOrCreateType(EltTy, Unit), SubscriptArray);
return DbgTy;
}
llvm::DIType CGDebugInfo::CreateType(const LValueReferenceType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type,
- Ty, Ty->getPointeeType(), Unit);
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_reference_type, Ty,
+ Ty->getPointeeType(), Unit);
}
llvm::DIType CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIFile Unit) {
- return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type,
- Ty, Ty->getPointeeType(), Unit);
+ return CreatePointerLikeType(llvm::dwarf::DW_TAG_rvalue_reference_type, Ty,
+ Ty->getPointeeType(), Unit);
}
llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
@@ -1891,18 +1882,19 @@ llvm::DIType CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIType ClassType = getOrCreateType(QualType(Ty->getClass(), 0), U);
if (!Ty->getPointeeType()->isFunctionType())
return DBuilder.createMemberPointerType(
- getOrCreateType(Ty->getPointeeType(), U), ClassType);
+ getOrCreateType(Ty->getPointeeType(), U), ClassType,
+ CGM.getContext().getTypeSize(Ty));
const FunctionProtoType *FPT =
- Ty->getPointeeType()->getAs<FunctionProtoType>();
- return DBuilder.createMemberPointerType(getOrCreateInstanceMethodType(
- CGM.getContext().getPointerType(QualType(Ty->getClass(),
- FPT->getTypeQuals())),
- FPT, U), ClassType);
+ Ty->getPointeeType()->getAs<FunctionProtoType>();
+ return DBuilder.createMemberPointerType(
+ getOrCreateInstanceMethodType(CGM.getContext().getPointerType(QualType(
+ Ty->getClass(), FPT->getTypeQuals())),
+ FPT, U),
+ ClassType, CGM.getContext().getTypeSize(Ty));
}
-llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty,
- llvm::DIFile U) {
+llvm::DIType CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile U) {
// Ignore the atomic wrapping
// FIXME: What is the correct representation?
return getOrCreateType(Ty->getValueType(), U);
@@ -1931,7 +1923,9 @@ llvm::DIType CGDebugInfo::CreateEnumType(const EnumType *Ty) {
llvm::DIType RetTy = DBuilder.createReplaceableForwardDecl(
llvm::dwarf::DW_TAG_enumeration_type, EDName, EDContext, DefUnit, Line,
0, Size, Align, FullName);
- ReplaceMap.push_back(std::make_pair(Ty, static_cast<llvm::Value *>(RetTy)));
+ ReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(Ty),
+ std::make_tuple(static_cast<llvm::Metadata *>(RetTy)));
return RetTy;
}
@@ -1950,12 +1944,11 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
// Create DIEnumerator elements for each enumerator.
- SmallVector<llvm::Value *, 16> Enumerators;
+ SmallVector<llvm::Metadata *, 16> Enumerators;
ED = ED->getDefinition();
for (const auto *Enum : ED->enumerators()) {
- Enumerators.push_back(
- DBuilder.createEnumerator(Enum->getName(),
- Enum->getInitVal().getSExtValue()));
+ Enumerators.push_back(DBuilder.createEnumerator(
+ Enum->getName(), Enum->getInitVal().getSExtValue()));
}
// Return a CompositeType for the enum itself.
@@ -1964,13 +1957,13 @@ llvm::DIType CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
unsigned Line = getLineNumber(ED->getLocation());
llvm::DIDescriptor EnumContext =
- getContextDescriptor(cast<Decl>(ED->getDeclContext()));
- llvm::DIType ClassTy = ED->isFixed() ?
- getOrCreateType(ED->getIntegerType(), DefUnit) : llvm::DIType();
+ getContextDescriptor(cast<Decl>(ED->getDeclContext()));
+ llvm::DIType ClassTy = ED->isFixed()
+ ? getOrCreateType(ED->getIntegerType(), DefUnit)
+ : llvm::DIType();
llvm::DIType DbgTy =
- DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
- Size, Align, EltArray,
- ClassTy, FullName);
+ DBuilder.createEnumerationType(EnumContext, ED->getName(), DefUnit, Line,
+ Size, Align, EltArray, ClassTy, FullName);
return DbgTy;
}
@@ -1991,7 +1984,8 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
if (Spec->isTypeAlias())
return C.getQualifiedType(T.getTypePtr(), Quals);
T = Spec->desugar();
- break; }
+ break;
+ }
case Type::TypeOfExpr:
T = cast<TypeOfExprType>(T)->getUnderlyingExpr()->getType();
break;
@@ -2018,8 +2012,7 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
break;
case Type::Auto:
QualType DT = cast<AutoType>(T)->getDeducedType();
- if (DT.isNull())
- return T;
+ assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
T = DT;
break;
}
@@ -2039,7 +2032,7 @@ llvm::DIType CGDebugInfo::getTypeOrNull(QualType Ty) {
auto it = TypeCache.find(Ty.getAsOpaquePtr());
if (it != TypeCache.end()) {
// Verify that the debug info still exists.
- if (llvm::Value *V = it->second)
+ if (llvm::Metadata *V = it->second)
return llvm::DIType(cast<llvm::MDNode>(V));
}
@@ -2071,10 +2064,10 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile Unit) {
// Otherwise create the type.
llvm::DIType Res = CreateTypeNode(Ty, Unit);
- void* TyPtr = Ty.getAsOpaquePtr();
+ void *TyPtr = Ty.getAsOpaquePtr();
// And update the type cache.
- TypeCache[TyPtr] = Res;
+ TypeCache[TyPtr].reset(Res);
return Res;
}
@@ -2096,8 +2089,8 @@ unsigned CGDebugInfo::Checksum(const ObjCInterfaceDecl *ID) {
ObjCInterfaceDecl *CGDebugInfo::getObjCInterfaceDecl(QualType Ty) {
switch (Ty->getTypeClass()) {
case Type::ObjCObjectPointer:
- return getObjCInterfaceDecl(cast<ObjCObjectPointerType>(Ty)
- ->getPointeeType());
+ return getObjCInterfaceDecl(
+ cast<ObjCObjectPointerType>(Ty)->getPointeeType());
case Type::ObjCInterface:
return cast<ObjCInterfaceType>(Ty)->getDecl();
default:
@@ -2111,8 +2104,6 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
if (Ty.hasLocalQualifiers())
return CreateQualifiedType(Ty, Unit);
- const char *Diag = nullptr;
-
// Work out details of type.
switch (Ty->getTypeClass()) {
#define TYPE(Class, Base)
@@ -2172,6 +2163,7 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
case Type::TemplateSpecialization:
return CreateType(cast<TemplateSpecializationType>(Ty), Unit);
+ case Type::Auto:
case Type::Attributed:
case Type::Elaborated:
case Type::Paren:
@@ -2181,18 +2173,10 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile Unit) {
case Type::Decltype:
case Type::UnaryTransform:
case Type::PackExpansion:
- llvm_unreachable("type should have been unwrapped!");
- case Type::Auto:
- Diag = "auto";
break;
}
- assert(Diag && "Fall through without a diagnostic?");
- unsigned DiagID = CGM.getDiags().getCustomDiagID(DiagnosticsEngine::Error,
- "debug information for %0 is not yet supported");
- CGM.getDiags().Report(DiagID)
- << Diag;
- return llvm::DIType();
+ llvm_unreachable("type should have been unwrapped!");
}
/// getOrCreateLimitedType - Get the type from the cache or create a new
@@ -2206,7 +2190,8 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
// We may have cached a forward decl when we could have created
// a non-forward decl. Go ahead and create a non-forward decl
// now.
- if (T && !T.isForwardDecl()) return T;
+ if (T && !T.isForwardDecl())
+ return T;
// Otherwise create the type.
llvm::DICompositeType Res = CreateLimitedType(Ty);
@@ -2214,10 +2199,10 @@ llvm::DIType CGDebugInfo::getOrCreateLimitedType(const RecordType *Ty,
// Propagate members from the declaration to the definition
// CreateType(const RecordType*) will overwrite this with the members in the
// correct order if the full type is needed.
- Res.setTypeArray(T.getTypeArray());
+ DBuilder.replaceArrays(Res, T.getElements());
// And update the type cache.
- TypeCache[QTy.getAsOpaquePtr()] = Res;
+ TypeCache[QTy.getAsOpaquePtr()].reset(Res);
return Res;
}
@@ -2237,7 +2222,7 @@ llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
// just return that.
llvm::DICompositeType T(getTypeOrNull(CGM.getContext().getRecordType(RD)));
if (T && (!T.isForwardDecl() || !RD->getDefinition()))
- return T;
+ return T;
// If this is just a forward or incomplete declaration, construct an
// appropriately marked node and just return it.
@@ -2252,29 +2237,26 @@ llvm::DICompositeType CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
if (RD->isUnion())
- RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIArray(), 0,
- FullName);
+ RealDecl = DBuilder.createUnionType(RDContext, RDName, DefUnit, Line, Size,
+ Align, 0, llvm::DIArray(), 0, FullName);
else if (RD->isClass()) {
// FIXME: This could be a struct type giving a default visibility different
// than C++ class type, but needs llvm metadata changes first.
- RealDecl = DBuilder.createClassType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, 0, llvm::DIType(),
- llvm::DIArray(), llvm::DIType(),
- llvm::DIArray(), FullName);
+ RealDecl = DBuilder.createClassType(
+ RDContext, RDName, DefUnit, Line, Size, Align, 0, 0, llvm::DIType(),
+ llvm::DIArray(), llvm::DIType(), llvm::DIArray(), FullName);
} else
- RealDecl = DBuilder.createStructType(RDContext, RDName, DefUnit, Line,
- Size, Align, 0, llvm::DIType(),
- llvm::DIArray(), 0, llvm::DIType(),
- FullName);
+ RealDecl = DBuilder.createStructType(
+ RDContext, RDName, DefUnit, Line, Size, Align, 0, llvm::DIType(),
+ llvm::DIArray(), 0, llvm::DIType(), FullName);
- RegionMap[Ty->getDecl()] = llvm::WeakVH(RealDecl);
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = RealDecl;
+ RegionMap[Ty->getDecl()].reset(RealDecl);
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()].reset(RealDecl);
if (const ClassTemplateSpecializationDecl *TSpecial =
dyn_cast<ClassTemplateSpecializationDecl>(RD))
- RealDecl.setTypeArray(llvm::DIArray(),
- CollectCXXTemplateParams(TSpecial, DefUnit));
+ DBuilder.replaceArrays(RealDecl, llvm::DIArray(),
+ CollectCXXTemplateParams(TSpecial, DefUnit));
return RealDecl;
}
@@ -2299,24 +2281,148 @@ void CGDebugInfo::CollectContainingType(const CXXRecordDecl *RD,
} else if (RD->isDynamicClass())
ContainingType = RealDecl;
- RealDecl.setContainingType(ContainingType);
+ DBuilder.replaceVTableHolder(RealDecl, ContainingType);
}
/// CreateMemberType - Create new member and increase Offset by FType's size.
llvm::DIType CGDebugInfo::CreateMemberType(llvm::DIFile Unit, QualType FType,
- StringRef Name,
- uint64_t *Offset) {
+ StringRef Name, uint64_t *Offset) {
llvm::DIType FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0,
- FieldSize, FieldAlign,
- *Offset, 0, FieldTy);
+ llvm::DIType Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize,
+ FieldAlign, *Offset, 0, FieldTy);
*Offset += FieldSize;
return Ty;
}
-llvm::DIScope CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
+void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD,
+ llvm::DIFile Unit,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &FDContext,
+ llvm::DIArray &TParamsArray,
+ unsigned &Flags) {
+ const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
+ Name = getFunctionName(FD);
+ // Use mangled name as linkage name for C/C++ functions.
+ if (FD->hasPrototype()) {
+ LinkageName = CGM.getMangledName(GD);
+ Flags |= llvm::DIDescriptor::FlagPrototyped;
+ }
+ // No need to replicate the linkage name if it isn't different from the
+ // subprogram name, no need to have it at all unless coverage is enabled or
+ // debug is set to more than just line tables.
+ if (LinkageName == Name ||
+ (!CGM.getCodeGenOpts().EmitGcovArcs &&
+ !CGM.getCodeGenOpts().EmitGcovNotes &&
+ DebugKind <= CodeGenOptions::DebugLineTablesOnly))
+ LinkageName = StringRef();
+
+ if (DebugKind >= CodeGenOptions::LimitedDebugInfo) {
+ if (const NamespaceDecl *NSDecl =
+ dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
+ FDContext = getOrCreateNameSpace(NSDecl);
+ else if (const RecordDecl *RDecl =
+ dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
+ FDContext = getContextDescriptor(cast<Decl>(RDecl));
+ // Collect template parameters.
+ TParamsArray = CollectFunctionTemplateParams(FD, Unit);
+ }
+}
+
+void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit,
+ unsigned &LineNo, QualType &T,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &VDContext) {
+ Unit = getOrCreateFile(VD->getLocation());
+ LineNo = getLineNumber(VD->getLocation());
+
+ setLocation(VD->getLocation());
+
+ T = VD->getType();
+ if (T->isIncompleteArrayType()) {
+ // CodeGen turns int[] into int[1] so we'll do the same here.
+ llvm::APInt ConstVal(32, 1);
+ QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
+
+ T = CGM.getContext().getConstantArrayType(ET, ConstVal,
+ ArrayType::Normal, 0);
+ }
+
+ Name = VD->getName();
+ if (VD->getDeclContext() && !isa<FunctionDecl>(VD->getDeclContext()) &&
+ !isa<ObjCMethodDecl>(VD->getDeclContext()))
+ LinkageName = CGM.getMangledName(VD);
+ if (LinkageName == Name)
+ LinkageName = StringRef();
+
+ // Since we emit declarations (DW_AT_members) for static members, place the
+ // definition of those static members in the namespace they were declared in
+ // in the source code (the lexical decl context).
+ // FIXME: Generalize this for even non-member global variables where the
+ // declaration and definition may have different lexical decl contexts, once
+ // we have support for emitting declarations of (non-member) global variables.
+ VDContext = getContextDescriptor(
+ dyn_cast<Decl>(VD->isStaticDataMember() ? VD->getLexicalDeclContext()
+ : VD->getDeclContext()));
+}
+
+llvm::DISubprogram
+CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
+ llvm::DIArray TParamsArray;
+ StringRef Name, LinkageName;
+ unsigned Flags = 0;
+ SourceLocation Loc = FD->getLocation();
+ llvm::DIFile Unit = getOrCreateFile(Loc);
+ llvm::DIDescriptor DContext(Unit);
+ unsigned Line = getLineNumber(Loc);
+
+ collectFunctionDeclProps(FD, Unit, Name, LinkageName, DContext,
+ TParamsArray, Flags);
+ // Build function type.
+ SmallVector<QualType, 16> ArgTypes;
+ for (const ParmVarDecl *Parm: FD->parameters())
+ ArgTypes.push_back(Parm->getType());
+ QualType FnType =
+ CGM.getContext().getFunctionType(FD->getReturnType(), ArgTypes,
+ FunctionProtoType::ExtProtoInfo());
+ llvm::DISubprogram SP =
+ DBuilder.createTempFunctionFwdDecl(DContext, Name, LinkageName, Unit, Line,
+ getOrCreateFunctionType(FD, FnType, Unit),
+ !FD->isExternallyVisible(),
+ false /*declaration*/, 0, Flags,
+ CGM.getLangOpts().Optimize, nullptr,
+ TParamsArray, getFunctionDeclaration(FD));
+ const FunctionDecl *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl());
+ FwdDeclReplaceMap.emplace_back(
+ std::piecewise_construct, std::make_tuple(CanonDecl),
+ std::make_tuple(static_cast<llvm::Metadata *>(SP)));
+ return SP;
+}
+
+llvm::DIGlobalVariable
+CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
+ QualType T;
+ StringRef Name, LinkageName;
+ SourceLocation Loc = VD->getLocation();
+ llvm::DIFile Unit = getOrCreateFile(Loc);
+ llvm::DIDescriptor DContext(Unit);
+ unsigned Line = getLineNumber(Loc);
+
+ collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ llvm::DIGlobalVariable GV =
+ DBuilder.createTempGlobalVariableFwdDecl(DContext, Name, LinkageName, Unit,
+ Line, getOrCreateType(T, Unit),
+ !VD->isExternallyVisible(),
+ nullptr, nullptr);
+ FwdDeclReplaceMap.emplace_back(
+ std::piecewise_construct,
+ std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
+ std::make_tuple(static_cast<llvm::Metadata *>(GV)));
+ return GV;
+}
+
+llvm::DIDescriptor CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
// We only need a declaration (not a definition) of the type - so use whatever
// we would otherwise do to get a type for a pointee. (forward declarations in
// limited debug info, full definitions (if the type definition is available)
@@ -2324,19 +2430,19 @@ llvm::DIScope CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
if (const TypeDecl *TD = dyn_cast<TypeDecl>(D))
return getOrCreateType(CGM.getContext().getTypeDeclType(TD),
getOrCreateFile(TD->getLocation()));
- // Otherwise fall back to a fairly rudimentary cache of existing declarations.
- // This doesn't handle providing declarations (for functions or variables) for
- // entities without definitions in this TU, nor when the definition proceeds
- // the call to this function.
- // FIXME: This should be split out into more specific maps with support for
- // emitting forward declarations and merging definitions with declarations,
- // the same way as we do for types.
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator I =
- DeclCache.find(D->getCanonicalDecl());
- if (I == DeclCache.end())
- return llvm::DIScope();
- llvm::Value *V = I->second;
- return llvm::DIScope(dyn_cast_or_null<llvm::MDNode>(V));
+ auto I = DeclCache.find(D->getCanonicalDecl());
+
+ if (I != DeclCache.end())
+ return llvm::DIDescriptor(dyn_cast_or_null<llvm::MDNode>(I->second));
+
+ // No definition for now. Emit a forward definition that might be
+ // merged with a potential upcoming definition.
+ if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
+ return getFunctionForwardDeclaration(FD);
+ else if (const auto *VD = dyn_cast<VarDecl>(D))
+ return getGlobalVariableForwardDeclaration(VD);
+
+ return llvm::DIDescriptor();
}
/// getFunctionDeclaration - Return debug info descriptor to describe method
@@ -2346,13 +2452,13 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
return llvm::DISubprogram();
const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (!FD) return llvm::DISubprogram();
+ if (!FD)
+ return llvm::DISubprogram();
// Setup context.
llvm::DIScope S = getContextDescriptor(cast<Decl>(D->getDeclContext()));
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(FD->getCanonicalDecl());
+ auto MI = SPCache.find(FD->getCanonicalDecl());
if (MI == SPCache.end()) {
if (const CXXMethodDecl *MD =
dyn_cast<CXXMethodDecl>(FD->getCanonicalDecl())) {
@@ -2363,18 +2469,15 @@ llvm::DISubprogram CGDebugInfo::getFunctionDeclaration(const Decl *D) {
}
}
if (MI != SPCache.end()) {
- llvm::Value *V = MI->second;
- llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(MI->second));
if (SP.isSubprogram() && !SP.isDefinition())
return SP;
}
for (auto NextFD : FD->redecls()) {
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- MI = SPCache.find(NextFD->getCanonicalDecl());
+ auto MI = SPCache.find(NextFD->getCanonicalDecl());
if (MI != SPCache.end()) {
- llvm::Value *V = MI->second;
- llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(V));
+ llvm::DISubprogram SP(dyn_cast_or_null<llvm::MDNode>(MI->second));
if (SP.isSubprogram() && !SP.isDefinition())
return SP;
}
@@ -2392,13 +2495,14 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// llvm::DISubprogram::Verify() would return false, and
// subprogram DIE will miss DW_AT_decl_file and
// DW_AT_decl_line fields.
- return DBuilder.createSubroutineType(F, DBuilder.getOrCreateArray(None));
+ return DBuilder.createSubroutineType(F,
+ DBuilder.getOrCreateTypeArray(None));
if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
return getOrCreateMethodType(Method, F);
if (const ObjCMethodDecl *OMethod = dyn_cast<ObjCMethodDecl>(D)) {
// Add "self" and "_cmd"
- SmallVector<llvm::Value *, 16> Elts;
+ SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
QualType ResultTy = OMethod->getReturnType();
@@ -2406,7 +2510,7 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// Replace the instancetype keyword with the actual type.
if (ResultTy == CGM.getContext().getObjCInstanceType())
ResultTy = CGM.getContext().getPointerType(
- QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
+ QualType(OMethod->getClassInterface()->getTypeForDecl(), 0));
Elts.push_back(getOrCreateType(ResultTy, F));
// "self" pointer is always first argument.
@@ -2419,8 +2523,11 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// Get rest of the arguments.
for (const auto *PI : OMethod->params())
Elts.push_back(getOrCreateType(PI->getType(), F));
+ // Variadic methods need a special marker at the end of the type list.
+ if (OMethod->isVariadic())
+ Elts.push_back(DBuilder.createUnspecifiedParameter());
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(Elts);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
return DBuilder.createSubroutineType(F, EltTypeArray);
}
@@ -2428,13 +2535,13 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
// unspecified parameter.
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
if (FD->isVariadic()) {
- SmallVector<llvm::Value *, 16> EltTys;
+ SmallVector<llvm::Metadata *, 16> EltTys;
EltTys.push_back(getOrCreateType(FD->getReturnType(), F));
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FnType))
for (unsigned i = 0, e = FPT->getNumParams(); i != e; ++i)
EltTys.push_back(getOrCreateType(FPT->getParamType(i), F));
EltTys.push_back(DBuilder.createUnspecifiedParameter());
- llvm::DIArray EltTypeArray = DBuilder.getOrCreateArray(EltTys);
+ llvm::DITypeArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
return DBuilder.createSubroutineType(F, EltTypeArray);
}
@@ -2442,12 +2549,9 @@ llvm::DICompositeType CGDebugInfo::getOrCreateFunctionType(const Decl *D,
}
/// EmitFunctionStart - Constructs the debug code for entering a function.
-void CGDebugInfo::EmitFunctionStart(GlobalDecl GD,
- SourceLocation Loc,
- SourceLocation ScopeLoc,
- QualType FnType,
- llvm::Function *Fn,
- CGBuilderTy &Builder) {
+void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
+ SourceLocation ScopeLoc, QualType FnType,
+ llvm::Function *Fn, CGBuilderTy &Builder) {
StringRef Name;
StringRef LinkageName;
@@ -2466,44 +2570,18 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD,
LinkageName = Fn->getName();
} else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// If there is a DISubprogram for this function available then use it.
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH>::iterator
- FI = SPCache.find(FD->getCanonicalDecl());
+ auto FI = SPCache.find(FD->getCanonicalDecl());
if (FI != SPCache.end()) {
- llvm::Value *V = FI->second;
- llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(V));
+ llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
llvm::MDNode *SPN = SP;
- LexicalBlockStack.push_back(SPN);
- RegionMap[D] = llvm::WeakVH(SP);
+ LexicalBlockStack.emplace_back(SPN);
+ RegionMap[D].reset(SP);
return;
}
}
- Name = getFunctionName(FD);
- // Use mangled name as linkage name for C/C++ functions.
- if (FD->hasPrototype()) {
- LinkageName = CGM.getMangledName(GD);
- Flags |= llvm::DIDescriptor::FlagPrototyped;
- }
- // No need to replicate the linkage name if it isn't different from the
- // subprogram name, no need to have it at all unless coverage is enabled or
- // debug is set to more than just line tables.
- if (LinkageName == Name ||
- (!CGM.getCodeGenOpts().EmitGcovArcs &&
- !CGM.getCodeGenOpts().EmitGcovNotes &&
- DebugKind <= CodeGenOptions::DebugLineTablesOnly))
- LinkageName = StringRef();
-
- if (DebugKind >= CodeGenOptions::LimitedDebugInfo) {
- if (const NamespaceDecl *NSDecl =
- dyn_cast_or_null<NamespaceDecl>(FD->getDeclContext()))
- FDContext = getOrCreateNameSpace(NSDecl);
- else if (const RecordDecl *RDecl =
- dyn_cast_or_null<RecordDecl>(FD->getDeclContext()))
- FDContext = getContextDescriptor(cast<Decl>(RDecl));
-
- // Collect template parameters.
- TParamsArray = CollectFunctionTemplateParams(FD, Unit);
- }
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, FDContext,
+ TParamsArray, Flags);
} else if (const ObjCMethodDecl *OMD = dyn_cast<ObjCMethodDecl>(D)) {
Name = getObjCMethodName(OMD);
Flags |= llvm::DIDescriptor::FlagPrototyped;
@@ -2529,22 +2607,23 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD,
// FunctionDecls. When/if we fix this we can have FDContext be TheCU/null for
// all subprograms instead of the actual context since subprogram definitions
// are emitted as CU level entities by the backend.
- llvm::DISubprogram SP =
- DBuilder.createFunction(FDContext, Name, LinkageName, Unit, LineNo,
- getOrCreateFunctionType(D, FnType, Unit),
- Fn->hasInternalLinkage(), true /*definition*/,
- ScopeLine, Flags,
- CGM.getLangOpts().Optimize, Fn, TParamsArray,
- getFunctionDeclaration(D));
- if (HasDecl)
- DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(SP)));
+ llvm::DISubprogram SP = DBuilder.createFunction(
+ FDContext, Name, LinkageName, Unit, LineNo,
+ getOrCreateFunctionType(D, FnType, Unit), Fn->hasInternalLinkage(),
+ true /*definition*/, ScopeLine, Flags, CGM.getLangOpts().Optimize, Fn,
+ TParamsArray, getFunctionDeclaration(D));
+ // We might get here with a VarDecl in the case we're generating
+ // code for the initialization of globals. Do not record these decls
+ // as they will overwrite the actual VarDecl Decl in the cache.
+ if (HasDecl && isa<FunctionDecl>(D))
+ DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(SP));
// Push the function onto the lexical block stack.
llvm::MDNode *SPN = SP;
- LexicalBlockStack.push_back(SPN);
+ LexicalBlockStack.emplace_back(SPN);
if (HasDecl)
- RegionMap[D] = llvm::WeakVH(SP);
+ RegionMap[D].reset(SP);
}
/// EmitLocation - Emit metadata to indicate a change in line/column
@@ -2555,38 +2634,25 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc,
// Update our current location
setLocation(Loc);
- if (CurLoc.isInvalid() || CurLoc.isMacroID()) return;
-
- // Don't bother if things are the same as last time.
- SourceManager &SM = CGM.getContext().getSourceManager();
- if (CurLoc == PrevLoc ||
- SM.getExpansionLoc(CurLoc) == SM.getExpansionLoc(PrevLoc))
- // New Builder may not be in sync with CGDebugInfo.
- if (!Builder.getCurrentDebugLocation().isUnknown() &&
- Builder.getCurrentDebugLocation().getScope(CGM.getLLVMContext()) ==
- LexicalBlockStack.back())
- return;
-
- // Update last state.
- PrevLoc = CurLoc;
+ if (CurLoc.isInvalid() || CurLoc.isMacroID())
+ return;
llvm::MDNode *Scope = LexicalBlockStack.back();
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get
- (getLineNumber(CurLoc),
- getColumnNumber(CurLoc, ForceColumnInfo),
- Scope));
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ getLineNumber(CurLoc), getColumnNumber(CurLoc, ForceColumnInfo), Scope));
}
/// CreateLexicalBlock - Creates a new lexical block node and pushes it on
/// the stack.
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
+ llvm::MDNode *Back = nullptr;
+ if (!LexicalBlockStack.empty())
+ Back = LexicalBlockStack.back().get();
llvm::DIDescriptor D = DBuilder.createLexicalBlock(
- llvm::DIDescriptor(LexicalBlockStack.empty() ? nullptr
- : LexicalBlockStack.back()),
- getOrCreateFile(CurLoc), getLineNumber(CurLoc), getColumnNumber(CurLoc),
- 0);
+ llvm::DIDescriptor(Back), getOrCreateFile(CurLoc), getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
llvm::MDNode *DN = D;
- LexicalBlockStack.push_back(DN);
+ LexicalBlockStack.emplace_back(DN);
}
/// EmitLexicalBlockStart - Constructs the debug code for entering a declarative
@@ -2596,13 +2662,15 @@ void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
// Set our current location.
setLocation(Loc);
+ // Emit a line table change for the current location inside the new scope.
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ getLineNumber(Loc), getColumnNumber(Loc), LexicalBlockStack.back()));
+
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
// Create a new lexical block and push it on the stack.
CreateLexicalBlock(Loc);
-
- // Emit a line table change for the current location inside the new scope.
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(Loc),
- getColumnNumber(Loc),
- LexicalBlockStack.back()));
}
/// EmitLexicalBlockEnd - Constructs the debug code for exiting a declarative
@@ -2614,6 +2682,9 @@ void CGDebugInfo::EmitLexicalBlockEnd(CGBuilderTy &Builder,
// Provide an entry in the line table for the end of the block.
EmitLocation(Builder, Loc);
+ if (DebugKind <= CodeGenOptions::DebugLineTablesOnly)
+ return;
+
LexicalBlockStack.pop_back();
}
@@ -2624,8 +2695,11 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
assert(RCount <= LexicalBlockStack.size() && "Region stack mismatch");
// Pop all regions for this function.
- while (LexicalBlockStack.size() != RCount)
- EmitLexicalBlockEnd(Builder, CurLoc);
+ while (LexicalBlockStack.size() != RCount) {
+ // Provide an entry in the line table for the end of the block.
+ EmitLocation(Builder, CurLoc);
+ LexicalBlockStack.pop_back();
+ }
FnBeginRegionCount.pop_back();
}
@@ -2634,7 +2708,7 @@ void CGDebugInfo::EmitFunctionEnd(CGBuilderTy &Builder) {
llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
uint64_t *XOffset) {
- SmallVector<llvm::Value *, 5> EltTys;
+ SmallVector<llvm::Metadata *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
unsigned FieldAlign;
@@ -2653,31 +2727,29 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
bool HasCopyAndDispose = CGM.getContext().BlockRequiresCopying(Type, VD);
if (HasCopyAndDispose) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- EltTys.push_back(CreateMemberType(Unit, FType, "__copy_helper",
- &FieldOffset));
- EltTys.push_back(CreateMemberType(Unit, FType, "__destroy_helper",
- &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__copy_helper", &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__destroy_helper", &FieldOffset));
}
bool HasByrefExtendedLayout;
Qualifiers::ObjCLifetime Lifetime;
- if (CGM.getContext().getByrefLifetime(Type,
- Lifetime, HasByrefExtendedLayout)
- && HasByrefExtendedLayout) {
+ if (CGM.getContext().getByrefLifetime(Type, Lifetime,
+ HasByrefExtendedLayout) &&
+ HasByrefExtendedLayout) {
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
- EltTys.push_back(CreateMemberType(Unit, FType,
- "__byref_variable_layout",
- &FieldOffset));
+ EltTys.push_back(
+ CreateMemberType(Unit, FType, "__byref_variable_layout", &FieldOffset));
}
CharUnits Align = CGM.getContext().getDeclAlign(VD);
if (Align > CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerAlign(0))) {
- CharUnits FieldOffsetInBytes
- = CGM.getContext().toCharUnitsFromBits(FieldOffset);
- CharUnits AlignedOffsetInBytes
- = FieldOffsetInBytes.RoundUpToAlignment(Align);
- CharUnits NumPaddingBytes
- = AlignedOffsetInBytes - FieldOffsetInBytes;
+ CGM.getTarget().getPointerAlign(0))) {
+ CharUnits FieldOffsetInBytes =
+ CGM.getContext().toCharUnitsFromBits(FieldOffset);
+ CharUnits AlignedOffsetInBytes =
+ FieldOffsetInBytes.RoundUpToAlignment(Align);
+ CharUnits NumPaddingBytes = AlignedOffsetInBytes - FieldOffsetInBytes;
if (NumPaddingBytes.isPositive()) {
llvm::APInt pad(32, NumPaddingBytes.getQuantity());
@@ -2693,9 +2765,8 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
FieldAlign = CGM.getContext().toBits(Align);
*XOffset = FieldOffset;
- FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit,
- 0, FieldSize, FieldAlign,
- FieldOffset, 0, FieldTy);
+ FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize,
+ FieldAlign, FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
@@ -2709,8 +2780,8 @@ llvm::DIType CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
/// EmitDeclare - Emit local variable declaration debug info.
void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
- llvm::Value *Storage,
- unsigned ArgNo, CGBuilderTy &Builder) {
+ llvm::Value *Storage, unsigned ArgNo,
+ CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
@@ -2760,29 +2831,26 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
if (!Name.empty()) {
if (VD->hasAttr<BlocksAttr>()) {
CharUnits offset = CharUnits::fromQuantity(32);
- SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = CGM.Int64Ty;
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ SmallVector<int64_t, 9> addr;
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of __forwarding field
offset = CGM.getContext().toCharUnitsFromBits(
- CGM.getTarget().getPointerWidth(0));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ CGM.getTarget().getPointerWidth(0));
+ addr.push_back(offset.getQuantity());
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(offset.getQuantity());
// Create the descriptor for the variable.
- llvm::DIVariable D =
- DBuilder.createComplexVariable(Tag,
- llvm::DIDescriptor(Scope),
- VD->getName(), Unit, Line, Ty,
- addr, ArgNo);
+ llvm::DIVariable D = DBuilder.createLocalVariable(
+ Tag, llvm::DIDescriptor(Scope), VD->getName(), Unit, Line, Ty, ArgNo);
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr),
+ Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
return;
} else if (isa<VariableArrayType>(VD->getType()))
@@ -2801,15 +2869,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
continue;
// Use VarDecl's Tag, Scope and Line number.
- llvm::DIVariable D =
- DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
- FieldName, Unit, Line, FieldTy,
- CGM.getLangOpts().Optimize, Flags,
- ArgNo);
+ llvm::DIVariable D = DBuilder.createLocalVariable(
+ Tag, llvm::DIDescriptor(Scope), FieldName, Unit, Line, FieldTy,
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ llvm::Instruction *Call = DBuilder.insertDeclare(
+ Storage, D, DBuilder.createExpression(), Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
return;
@@ -2817,14 +2883,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::dwarf::LLVMConstants Tag,
}
// Create the descriptor for the variable.
- llvm::DIVariable D =
- DBuilder.createLocalVariable(Tag, llvm::DIDescriptor(Scope),
- Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags, ArgNo);
+ llvm::DIVariable D = DBuilder.createLocalVariable(
+ Tag, llvm::DIDescriptor(Scope), Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags, ArgNo);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertBlock());
+ llvm::Instruction *Call = DBuilder.insertDeclare(
+ Storage, D, DBuilder.createExpression(), Builder.GetInsertBlock());
Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
@@ -2844,14 +2909,14 @@ void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
llvm::DIType CGDebugInfo::CreateSelfType(const QualType &QualTy,
llvm::DIType Ty) {
llvm::DIType CachedTy = getTypeOrNull(QualTy);
- if (CachedTy) Ty = CachedTy;
+ if (CachedTy)
+ Ty = CachedTy;
return DBuilder.createObjectPointerType(Ty);
}
-void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
- llvm::Value *Storage,
- CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo) {
+void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
+ const VarDecl *VD, llvm::Value *Storage, CGBuilderTy &Builder,
+ const CGBlockInfo &blockInfo, llvm::Instruction *InsertPoint) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
@@ -2880,40 +2945,42 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(const VarDecl *VD,
const llvm::DataLayout &target = CGM.getDataLayout();
CharUnits offset = CharUnits::fromQuantity(
- target.getStructLayout(blockInfo.StructureType)
+ target.getStructLayout(blockInfo.StructureType)
->getElementOffset(blockInfo.getCapture(VD).getIndex()));
- SmallVector<llvm::Value *, 9> addr;
- llvm::Type *Int64Ty = CGM.Int64Ty;
+ SmallVector<int64_t, 9> addr;
if (isa<llvm::AllocaInst>(Storage))
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
+ addr.push_back(offset.getQuantity());
if (isByRef) {
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of __forwarding field
- offset = CGM.getContext()
- .toCharUnitsFromBits(target.getPointerSizeInBits(0));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpDeref));
- addr.push_back(llvm::ConstantInt::get(Int64Ty, llvm::DIBuilder::OpPlus));
+ offset =
+ CGM.getContext().toCharUnitsFromBits(target.getPointerSizeInBits(0));
+ addr.push_back(offset.getQuantity());
+ addr.push_back(llvm::dwarf::DW_OP_deref);
+ addr.push_back(llvm::dwarf::DW_OP_plus);
// offset of x field
offset = CGM.getContext().toCharUnitsFromBits(XOffset);
- addr.push_back(llvm::ConstantInt::get(Int64Ty, offset.getQuantity()));
+ addr.push_back(offset.getQuantity());
}
// Create the descriptor for the variable.
llvm::DIVariable D =
- DBuilder.createComplexVariable(llvm::dwarf::DW_TAG_auto_variable,
+ DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_auto_variable,
llvm::DIDescriptor(LexicalBlockStack.back()),
- VD->getName(), Unit, Line, Ty, addr);
+ VD->getName(), Unit, Line, Ty);
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *Call =
- DBuilder.insertDeclare(Storage, D, Builder.GetInsertPoint());
- Call->setDebugLoc(llvm::DebugLoc::get(Line, Column,
- LexicalBlockStack.back()));
+ llvm::Instruction *Call = InsertPoint ?
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr),
+ InsertPoint)
+ : DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr),
+ Builder.GetInsertBlock());
+ Call->setDebugLoc(
+ llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back()));
}
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
@@ -2926,17 +2993,18 @@ void CGDebugInfo::EmitDeclareOfArgVariable(const VarDecl *VD, llvm::Value *AI,
}
namespace {
- struct BlockLayoutChunk {
- uint64_t OffsetInBits;
- const BlockDecl::Capture *Capture;
- };
- bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
- return l.OffsetInBits < r.OffsetInBits;
- }
+struct BlockLayoutChunk {
+ uint64_t OffsetInBits;
+ const BlockDecl::Capture *Capture;
+};
+bool operator<(const BlockLayoutChunk &l, const BlockLayoutChunk &r) {
+ return l.OffsetInBits < r.OffsetInBits;
+}
}
void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::Value *Arg,
+ unsigned ArgNo,
llvm::Value *LocalAddr,
CGBuilderTy &Builder) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
@@ -2953,9 +3021,9 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
getContextDescriptor(cast<Decl>(blockDecl->getDeclContext()));
const llvm::StructLayout *blockLayout =
- CGM.getDataLayout().getStructLayout(block.StructureType);
+ CGM.getDataLayout().getStructLayout(block.StructureType);
- SmallVector<llvm::Value*, 16> fields;
+ SmallVector<llvm::Metadata *, 16> fields;
fields.push_back(createFieldType("__isa", C.VoidPtrTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(0),
tunit, tunit));
@@ -2965,16 +3033,16 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
fields.push_back(createFieldType("__reserved", C.IntTy, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(2),
tunit, tunit));
- fields.push_back(createFieldType("__FuncPtr", C.VoidPtrTy, 0, loc, AS_public,
+ auto *FnTy = block.getBlockExpr()->getFunctionType();
+ auto FnPtrType = CGM.getContext().getPointerType(FnTy->desugar());
+ fields.push_back(createFieldType("__FuncPtr", FnPtrType, 0, loc, AS_public,
blockLayout->getElementOffsetInBits(3),
tunit, tunit));
- fields.push_back(createFieldType("__descriptor",
- C.getPointerType(block.NeedsCopyDispose ?
- C.getBlockDescriptorExtendedType() :
- C.getBlockDescriptorType()),
- 0, loc, AS_public,
- blockLayout->getElementOffsetInBits(4),
- tunit, tunit));
+ fields.push_back(createFieldType(
+ "__descriptor", C.getPointerType(block.NeedsCopyDispose
+ ? C.getBlockDescriptorExtendedType()
+ : C.getBlockDescriptorType()),
+ 0, loc, AS_public, blockLayout->getElementOffsetInBits(4), tunit, tunit));
// We want to sort the captures by offset, not because DWARF
// requires this, but because we're paranoid about debuggers.
@@ -2984,7 +3052,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
if (blockDecl->capturesCXXThis()) {
BlockLayoutChunk chunk;
chunk.OffsetInBits =
- blockLayout->getElementOffsetInBits(block.CXXThisIndex);
+ blockLayout->getElementOffsetInBits(block.CXXThisIndex);
chunk.Capture = nullptr;
chunks.push_back(chunk);
}
@@ -3000,7 +3068,7 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
BlockLayoutChunk chunk;
chunk.OffsetInBits =
- blockLayout->getElementOffsetInBits(captureInfo.getIndex());
+ blockLayout->getElementOffsetInBits(captureInfo.getIndex());
chunk.Capture = &capture;
chunks.push_back(chunk);
}
@@ -3008,15 +3076,16 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Sort by offset.
llvm::array_pod_sort(chunks.begin(), chunks.end());
- for (SmallVectorImpl<BlockLayoutChunk>::iterator
- i = chunks.begin(), e = chunks.end(); i != e; ++i) {
+ for (SmallVectorImpl<BlockLayoutChunk>::iterator i = chunks.begin(),
+ e = chunks.end();
+ i != e; ++i) {
uint64_t offsetInBits = i->OffsetInBits;
const BlockDecl::Capture *capture = i->Capture;
// If we have a null capture, this must be the C++ 'this' capture.
if (!capture) {
const CXXMethodDecl *method =
- cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
+ cast<CXXMethodDecl>(blockDecl->getNonClosureContext());
QualType type = method->getThisType(C);
fields.push_back(createFieldType("this", type, 0, loc, AS_public,
@@ -3029,33 +3098,33 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIType fieldType;
if (capture->isByRef()) {
- std::pair<uint64_t,unsigned> ptrInfo = C.getTypeInfo(C.VoidPtrTy);
+ TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
// FIXME: this creates a second copy of this type!
uint64_t xoffset;
fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
- fieldType = DBuilder.createPointerType(fieldType, ptrInfo.first);
- fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
- ptrInfo.first, ptrInfo.second,
- offsetInBits, 0, fieldType);
+ fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
+ fieldType =
+ DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width,
+ PtrInfo.Align, offsetInBits, 0, fieldType);
} else {
- fieldType = createFieldType(name, variable->getType(), 0,
- loc, AS_public, offsetInBits, tunit, tunit);
+ fieldType = createFieldType(name, variable->getType(), 0, loc, AS_public,
+ offsetInBits, tunit, tunit);
}
fields.push_back(fieldType);
}
SmallString<36> typeName;
- llvm::raw_svector_ostream(typeName)
- << "__block_literal_" << CGM.getUniqueBlockCount();
+ llvm::raw_svector_ostream(typeName) << "__block_literal_"
+ << CGM.getUniqueBlockCount();
llvm::DIArray fieldsArray = DBuilder.getOrCreateArray(fields);
llvm::DIType type =
- DBuilder.createStructType(tunit, typeName.str(), tunit, line,
- CGM.getContext().toBits(block.BlockSize),
- CGM.getContext().toBits(block.BlockAlign),
- 0, llvm::DIType(), fieldsArray);
+ DBuilder.createStructType(tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize),
+ CGM.getContext().toBits(block.BlockAlign), 0,
+ llvm::DIType(), fieldsArray);
type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
// Get overall information about the block.
@@ -3063,24 +3132,22 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::MDNode *scope = LexicalBlockStack.back();
// Create the descriptor for the parameter.
- llvm::DIVariable debugVar =
- DBuilder.createLocalVariable(llvm::dwarf::DW_TAG_arg_variable,
- llvm::DIDescriptor(scope),
- Arg->getName(), tunit, line, type,
- CGM.getLangOpts().Optimize, flags,
- cast<llvm::Argument>(Arg)->getArgNo() + 1);
+ llvm::DIVariable debugVar = DBuilder.createLocalVariable(
+ llvm::dwarf::DW_TAG_arg_variable, llvm::DIDescriptor(scope),
+ Arg->getName(), tunit, line, type, CGM.getLangOpts().Optimize, flags,
+ ArgNo);
if (LocalAddr) {
// Insert an llvm.dbg.value into the current block.
- llvm::Instruction *DbgVal =
- DBuilder.insertDbgValueIntrinsic(LocalAddr, 0, debugVar,
- Builder.GetInsertBlock());
+ llvm::Instruction *DbgVal = DBuilder.insertDbgValueIntrinsic(
+ LocalAddr, 0, debugVar, DBuilder.createExpression(),
+ Builder.GetInsertBlock());
DbgVal->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
}
// Insert an llvm.dbg.declare into the current block.
- llvm::Instruction *DbgDecl =
- DBuilder.insertDeclare(Arg, debugVar, Builder.GetInsertBlock());
+ llvm::Instruction *DbgDecl = DBuilder.insertDeclare(
+ Arg, debugVar, DBuilder.createExpression(), Builder.GetInsertBlock());
DbgDecl->setDebugLoc(llvm::DebugLoc::get(line, column, scope));
}
@@ -3090,8 +3157,7 @@ llvm::DIDerivedType
CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
if (!D->isStaticDataMember())
return llvm::DIDerivedType();
- llvm::DenseMap<const Decl *, llvm::WeakVH>::iterator MI =
- StaticDataMemberCache.find(D->getCanonicalDecl());
+ auto MI = StaticDataMemberCache.find(D->getCanonicalDecl());
if (MI != StaticDataMemberCache.end()) {
assert(MI->second && "Static data member declaration should still exist");
return llvm::DIDerivedType(cast<llvm::MDNode>(MI->second));
@@ -3099,10 +3165,9 @@ CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
// If the member wasn't found in the cache, lazily construct and add it to the
// type (used when a limited form of the type is emitted).
- llvm::DICompositeType Ctxt(
- getContextDescriptor(cast<Decl>(D->getDeclContext())));
- llvm::DIDerivedType T = CreateRecordStaticField(D, Ctxt);
- return T;
+ auto DC = D->getDeclContext();
+ llvm::DICompositeType Ctxt(getContextDescriptor(cast<Decl>(DC)));
+ return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
}
/// Recursively collect all of the member fields of a global anonymous decl and
@@ -3128,10 +3193,9 @@ CGDebugInfo::CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile Unit,
continue;
}
// Use VarDecl's Tag, Scope and Line number.
- GV = DBuilder.createStaticVariable(DContext, FieldName, LinkageName, Unit,
- LineNo, FieldTy,
- Var->hasInternalLinkage(), Var,
- llvm::DIDerivedType());
+ GV = DBuilder.createGlobalVariable(
+ DContext, FieldName, LinkageName, Unit, LineNo, FieldTy,
+ Var->hasInternalLinkage(), Var, llvm::DIDerivedType());
}
return GV;
}
@@ -3141,32 +3205,12 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const VarDecl *D) {
assert(DebugKind >= CodeGenOptions::LimitedDebugInfo);
// Create global variable debug descriptor.
- llvm::DIFile Unit = getOrCreateFile(D->getLocation());
- unsigned LineNo = getLineNumber(D->getLocation());
-
- setLocation(D->getLocation());
-
- QualType T = D->getType();
- if (T->isIncompleteArrayType()) {
-
- // CodeGen turns int[] into int[1] so we'll do the same here.
- llvm::APInt ConstVal(32, 1);
- QualType ET = CGM.getContext().getAsArrayType(T)->getElementType();
-
- T = CGM.getContext().getConstantArrayType(ET, ConstVal,
- ArrayType::Normal, 0);
- }
-
- StringRef DeclName = D->getName();
- StringRef LinkageName;
- if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()) &&
- !isa<ObjCMethodDecl>(D->getDeclContext()))
- LinkageName = Var->getName();
- if (LinkageName == DeclName)
- LinkageName = StringRef();
-
- llvm::DIDescriptor DContext =
- getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()));
+ llvm::DIFile Unit;
+ llvm::DIDescriptor DContext;
+ unsigned LineNo;
+ StringRef DeclName, LinkageName;
+ QualType T;
+ collectVarDeclProps(D, Unit, LineNo, T, DeclName, LinkageName, DContext);
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
@@ -3177,15 +3221,16 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// to find the name of any field in the union.
if (T->isUnionType() && DeclName.empty()) {
const RecordDecl *RD = cast<RecordType>(T)->getDecl();
- assert(RD->isAnonymousStructOrUnion() && "unnamed non-anonymous struct or union?");
+ assert(RD->isAnonymousStructOrUnion() &&
+ "unnamed non-anonymous struct or union?");
GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
} else {
- GV = DBuilder.createStaticVariable(
+ GV = DBuilder.createGlobalVariable(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
Var->hasInternalLinkage(), Var,
getOrCreateStaticDataMemberDeclarationOrNull(D));
}
- DeclCache.insert(std::make_pair(D->getCanonicalDecl(), llvm::WeakVH(GV)));
+ DeclCache[D->getCanonicalDecl()].reset(static_cast<llvm::Metadata *>(GV));
}
/// EmitGlobalVariable - Emit global variable's debug info.
@@ -3208,16 +3253,25 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
if (isa<FunctionDecl>(VD->getDeclContext()))
return;
VD = cast<ValueDecl>(VD->getCanonicalDecl());
- auto pair = DeclCache.insert(std::make_pair(VD, llvm::WeakVH()));
- if (!pair.second)
+ auto *VarD = cast<VarDecl>(VD);
+ if (VarD->isStaticDataMember()) {
+ auto *RD = cast<RecordDecl>(VarD->getDeclContext());
+ getContextDescriptor(RD);
+ // Ensure that the type is retained even though it's otherwise unreferenced.
+ RetainedTypes.push_back(
+ CGM.getContext().getRecordType(RD).getAsOpaquePtr());
return;
+ }
+
llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(VD->getDeclContext()));
- llvm::DIGlobalVariable GV = DBuilder.createStaticVariable(
+
+ auto &GV = DeclCache[VD];
+ if (GV)
+ return;
+ GV.reset(DBuilder.createGlobalVariable(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
- true, Init,
- getOrCreateStaticDataMemberDeclarationOrNull(cast<VarDecl>(VD)));
- pair.first->second = llvm::WeakVH(GV);
+ true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD)));
}
llvm::DIScope CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
@@ -3243,7 +3297,7 @@ void CGDebugInfo::EmitUsingDecl(const UsingDecl &UD) {
// Emitting one decl is sufficient - debuggers can detect that this is an
// overloaded name & provide lookup for all the overloads.
const UsingShadowDecl &USD = **UD.shadow_begin();
- if (llvm::DIScope Target =
+ if (llvm::DIDescriptor Target =
getDeclarationOrDefinition(USD.getUnderlyingDecl()))
DBuilder.createImportedDeclaration(
getCurrentContextDescriptor(cast<Decl>(USD.getDeclContext())), Target,
@@ -3254,7 +3308,7 @@ llvm::DIImportedEntity
CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
return llvm::DIImportedEntity(nullptr);
- llvm::WeakVH &VH = NamespaceAliasCache[&NA];
+ auto &VH = NamespaceAliasCache[&NA];
if (VH)
return llvm::DIImportedEntity(cast<llvm::MDNode>(VH));
llvm::DIImportedEntity R(nullptr);
@@ -3270,7 +3324,7 @@ CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
getCurrentContextDescriptor(cast<Decl>(NA.getDeclContext())),
getOrCreateNameSpace(cast<NamespaceDecl>(NA.getAliasedNamespace())),
getLineNumber(NA.getLocation()), NA.getName());
- VH = R;
+ VH.reset(R);
return R;
}
@@ -3279,8 +3333,7 @@ CGDebugInfo::EmitNamespaceAlias(const NamespaceAliasDecl &NA) {
llvm::DINameSpace
CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
NSDecl = NSDecl->getCanonicalDecl();
- llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH>::iterator I =
- NameSpaceCache.find(NSDecl);
+ auto I = NameSpaceCache.find(NSDecl);
if (I != NameSpaceCache.end())
return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
@@ -3290,7 +3343,7 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()));
llvm::DINameSpace NS =
DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
- NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
+ NameSpaceCache[NSDecl].reset(NS);
return NS;
}
@@ -3318,6 +3371,24 @@ void CGDebugInfo::finalize() {
Ty.replaceAllUsesWith(CGM.getLLVMContext(), RepTy);
}
+ for (const auto &p : FwdDeclReplaceMap) {
+ assert(p.second);
+ llvm::DIDescriptor FwdDecl(cast<llvm::MDNode>(p.second));
+ llvm::Metadata *Repl;
+
+ auto it = DeclCache.find(p.first);
+ // If there has been no definition for the declaration, call RAUW
+ // with ourselves, that will destroy the temporary MDNode and
+ // replace it with a standard one, avoiding leaking memory.
+ if (it == DeclCache.end())
+ Repl = p.second;
+ else
+ Repl = it->second;
+
+ FwdDecl.replaceAllUsesWith(CGM.getLLVMContext(),
+ llvm::DIDescriptor(cast<llvm::MDNode>(Repl)));
+ }
+
// We keep our own list of retained types, because we need to look
// up the final type in the type cache.
for (std::vector<void *>::const_iterator RI = RetainedTypes.begin(),
@@ -3326,3 +3397,11 @@ void CGDebugInfo::finalize() {
DBuilder.finalize();
}
+
+void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
+ if (CGM.getCodeGenOpts().getDebugInfo() < CodeGenOptions::LimitedDebugInfo)
+ return;
+ llvm::DIType DieTy = getOrCreateType(Ty, getOrCreateMainFile());
+ // Don't ignore in case of explicit cast where it is referenced indirectly.
+ DBuilder.retainType(DieTy);
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index fc3f434991fa..0be032c1d790 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGDEBUGINFO_H
-#define CLANG_CODEGEN_CGDEBUGINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
#include "CGBuilder.h"
#include "clang/AST/Expr.h"
@@ -53,7 +53,7 @@ class CGDebugInfo {
const CodeGenOptions::DebugInfoKind DebugKind;
llvm::DIBuilder DBuilder;
llvm::DICompileUnit TheCU;
- SourceLocation CurLoc, PrevLoc;
+ SourceLocation CurLoc;
llvm::DIType VTablePtrType;
llvm::DIType ClassTy;
llvm::DICompositeType ObjTy;
@@ -65,7 +65,7 @@ class CGDebugInfo {
llvm::DIType BlockLiteralGeneric;
/// TypeCache - Cache of previously constructed Types.
- llvm::DenseMap<const void *, llvm::WeakVH> TypeCache;
+ llvm::DenseMap<const void *, llvm::TrackingMDRef> TypeCache;
struct ObjCInterfaceCacheEntry {
const ObjCInterfaceType *Type;
@@ -85,11 +85,16 @@ class CGDebugInfo {
/// ReplaceMap - Cache of forward declared types to RAUW at the end of
/// compilation.
- std::vector<std::pair<const TagType *, llvm::WeakVH>> ReplaceMap;
+ std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap;
+
+ /// \brief Cache of replaceable forward declarartions (functions and
+ /// variables) to RAUW at the end of compilation.
+ std::vector<std::pair<const DeclaratorDecl *, llvm::TrackingMDRef>>
+ FwdDeclReplaceMap;
// LexicalBlockStack - Keep track of our current nested lexical block.
- std::vector<llvm::TrackingVH<llvm::MDNode> > LexicalBlockStack;
- llvm::DenseMap<const Decl *, llvm::WeakVH> RegionMap;
+ std::vector<llvm::TrackingMDNodeRef> LexicalBlockStack;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> RegionMap;
// FnBeginRegionCount - Keep track of LexicalBlockStack counter at the
// beginning of a function. This is used to pop unbalanced regions at
// the end of a function.
@@ -100,14 +105,15 @@ class CGDebugInfo {
llvm::BumpPtrAllocator DebugInfoNames;
StringRef CWDName;
- llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
- llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
+ llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
+ llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// \brief Cache declarations relevant to DW_TAG_imported_declarations (C++
/// using declarations) that aren't covered by other more specific caches.
- llvm::DenseMap<const Decl *, llvm::WeakVH> DeclCache;
- llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
- llvm::DenseMap<const NamespaceAliasDecl *, llvm::WeakVH> NamespaceAliasCache;
- llvm::DenseMap<const Decl *, llvm::WeakVH> StaticDataMemberCache;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
+ llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NameSpaceCache;
+ llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
+ NamespaceAliasCache;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> StaticDataMemberCache;
/// Helper functions for getOrCreateType.
unsigned Checksum(const ObjCInterfaceDecl *InterfaceDecl);
@@ -158,14 +164,12 @@ class CGDebugInfo {
llvm::DIFile F,
llvm::DIType RecordTy);
- void CollectCXXMemberFunctions(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &E,
+ void CollectCXXMemberFunctions(const CXXRecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType T);
- void CollectCXXBases(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &EltTys,
+ void CollectCXXBases(const CXXRecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType RecordTy);
llvm::DIArray
@@ -180,27 +184,29 @@ class CGDebugInfo {
llvm::DIType createFieldType(StringRef name, QualType type,
uint64_t sizeInBitsOverride, SourceLocation loc,
- AccessSpecifier AS, uint64_t offsetInBits,
+ AccessSpecifier AS,
+ uint64_t offsetInBits,
llvm::DIFile tunit,
- llvm::DIScope scope);
+ llvm::DIScope scope,
+ const RecordDecl* RD = nullptr);
// Helpers for collecting fields of a record.
void CollectRecordLambdaFields(const CXXRecordDecl *CXXDecl,
- SmallVectorImpl<llvm::Value *> &E,
+ SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType RecordTy);
llvm::DIDerivedType CreateRecordStaticField(const VarDecl *Var,
- llvm::DIType RecordTy);
+ llvm::DIType RecordTy,
+ const RecordDecl* RD);
void CollectRecordNormalField(const FieldDecl *Field, uint64_t OffsetInBits,
llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &E,
- llvm::DIType RecordTy);
+ SmallVectorImpl<llvm::Metadata *> &E,
+ llvm::DIType RecordTy, const RecordDecl *RD);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &E,
+ SmallVectorImpl<llvm::Metadata *> &E,
llvm::DICompositeType RecordTy);
- void CollectVTableInfo(const CXXRecordDecl *Decl,
- llvm::DIFile F,
- SmallVectorImpl<llvm::Value *> &EltTys);
+ void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile F,
+ SmallVectorImpl<llvm::Metadata *> &EltTys);
// CreateLexicalBlock - Create a new lexical block node and push it on
// the stack.
@@ -255,7 +261,8 @@ public:
void EmitDeclareOfBlockDeclRefVariable(const VarDecl *variable,
llvm::Value *storage,
CGBuilderTy &Builder,
- const CGBlockInfo &blockInfo);
+ const CGBlockInfo &blockInfo,
+ llvm::Instruction *InsertPoint = 0);
/// EmitDeclareOfArgVariable - Emit call to llvm.dbg.declare for an argument
/// variable declaration.
@@ -266,7 +273,7 @@ public:
/// llvm.dbg.declare for the block-literal argument to a block
/// invocation function.
void EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
- llvm::Value *Arg,
+ llvm::Value *Arg, unsigned ArgNo,
llvm::Value *LocalAddr,
CGBuilderTy &Builder);
@@ -279,6 +286,9 @@ public:
/// \brief - Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
+ /// EmitExplicitCastType - Emit the type explicitly casted to.
+ void EmitExplicitCastType(QualType Ty);
+
/// \brief - Emit C++ using declaration.
void EmitUsingDecl(const UsingDecl &UD);
@@ -356,9 +366,9 @@ private:
llvm::DIType CreateMemberType(llvm::DIFile Unit, QualType FType,
StringRef Name, uint64_t *Offset);
- /// \brief Retrieve the DIScope, if any, for the canonical form of this
+ /// \brief Retrieve the DIDescriptor, if any, for the canonical form of this
/// declaration.
- llvm::DIScope getDeclarationOrDefinition(const Decl *D);
+ llvm::DIDescriptor getDeclarationOrDefinition(const Decl *D);
/// getFunctionDeclaration - Return debug info descriptor to describe method
/// declaration for the given method definition.
@@ -369,6 +379,14 @@ private:
llvm::DIDerivedType
getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D);
+ /// \brief Create a DISubprogram describing the forward
+ /// decalration represented in the given FunctionDecl.
+ llvm::DISubprogram getFunctionForwardDeclaration(const FunctionDecl *FD);
+
+ /// \brief Create a DIGlobalVariable describing the forward
+ /// decalration represented in the given VarDecl.
+ llvm::DIGlobalVariable getGlobalVariableForwardDeclaration(const VarDecl *VD);
+
/// Return a global variable that represents one of the collection of
/// global variables created for an anonmyous union.
llvm::DIGlobalVariable
@@ -404,6 +422,21 @@ private:
/// \param Force Assume DebugColumnInfo option is true.
unsigned getColumnNumber(SourceLocation Loc, bool Force=false);
+ /// \brief Collect various properties of a FunctionDecl.
+ /// \param GD A GlobalDecl whose getDecl() must return a FunctionDecl.
+ void collectFunctionDeclProps(GlobalDecl GD,
+ llvm::DIFile Unit,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &FDContext,
+ llvm::DIArray &TParamsArray,
+ unsigned &Flags);
+
+ /// \brief Collect various properties of a VarDecl.
+ void collectVarDeclProps(const VarDecl *VD, llvm::DIFile &Unit,
+ unsigned &LineNo, QualType &T,
+ StringRef &Name, StringRef &LinkageName,
+ llvm::DIDescriptor &VDContext);
+
/// internString - Allocate a copy of \p A using the DebugInfoNames allocator
/// and return a reference to it. If multiple arguments are given the strings
/// are concatenated.
@@ -415,27 +448,17 @@ private:
}
};
-/// SaveAndRestoreLocation - An RAII object saves the current location
-/// and automatically restores it to the original value.
-class SaveAndRestoreLocation {
+class ApplyDebugLocation {
protected:
- SourceLocation SavedLoc;
- CGDebugInfo *DI;
- CGBuilderTy &Builder;
-public:
- SaveAndRestoreLocation(CodeGenFunction &CGF, CGBuilderTy &B);
- /// Autorestore everything back to normal.
- ~SaveAndRestoreLocation();
-};
+ llvm::DebugLoc OriginalLocation;
+ CodeGenFunction &CGF;
-/// NoLocation - An RAII object that temporarily disables debug
-/// locations. This is useful for emitting instructions that should be
-/// counted towards the function prologue.
-class NoLocation : public SaveAndRestoreLocation {
public:
- NoLocation(CodeGenFunction &CGF, CGBuilderTy &B);
- /// Autorestore everything back to normal.
- ~NoLocation();
+ ApplyDebugLocation(CodeGenFunction &CGF,
+ SourceLocation TemporaryLocation = SourceLocation(),
+ bool ForceColumnInfo = false);
+ ApplyDebugLocation(CodeGenFunction &CGF, llvm::DebugLoc Loc);
+ ~ApplyDebugLocation();
};
/// ArtificialLocation - An RAII object that temporarily switches to
@@ -449,16 +472,9 @@ public:
/// This is necessary because passing an empty SourceLocation to
/// CGDebugInfo::setLocation() will result in the last valid location
/// being reused.
-class ArtificialLocation : public SaveAndRestoreLocation {
+class ArtificialLocation : public ApplyDebugLocation {
public:
- ArtificialLocation(CodeGenFunction &CGF, CGBuilderTy &B);
-
- /// Set the current location to line 0, but within the current scope
- /// (= the top of the LexicalBlockStack).
- void Emit();
-
- /// Autorestore everything back to normal.
- ~ArtificialLocation();
+ ArtificialLocation(CodeGenFunction &CGF);
};
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 91f804193049..766d2aa6ffb8 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -146,60 +146,71 @@ void CodeGenFunction::EmitVarDecl(const VarDecl &D) {
return EmitAutoVarDecl(D);
}
-static std::string GetStaticDeclName(CodeGenFunction &CGF, const VarDecl &D,
- const char *Separator) {
- CodeGenModule &CGM = CGF.CGM;
-
- if (CGF.getLangOpts().CPlusPlus)
+static std::string getStaticDeclName(CodeGenModule &CGM, const VarDecl &D) {
+ if (CGM.getLangOpts().CPlusPlus)
return CGM.getMangledName(&D).str();
- StringRef ContextName;
- if (!CGF.CurFuncDecl) {
- // Better be in a block declared in global scope.
- const NamedDecl *ND = cast<NamedDecl>(&D);
- const DeclContext *DC = ND->getDeclContext();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(DC))
- ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
- else
- llvm_unreachable("Unknown context for block static var decl");
- } else if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CGF.CurFuncDecl))
+ // If this isn't C++, we don't need a mangled name, just a pretty one.
+ assert(!D.isExternallyVisible() && "name shouldn't matter");
+ std::string ContextName;
+ const DeclContext *DC = D.getDeclContext();
+ if (const auto *FD = dyn_cast<FunctionDecl>(DC))
ContextName = CGM.getMangledName(FD);
- else if (isa<ObjCMethodDecl>(CGF.CurFuncDecl))
- ContextName = CGF.CurFn->getName();
+ else if (const auto *BD = dyn_cast<BlockDecl>(DC))
+ ContextName = CGM.getBlockMangledName(GlobalDecl(), BD);
+ else if (const auto *OMD = dyn_cast<ObjCMethodDecl>(DC))
+ ContextName = OMD->getSelector().getAsString();
else
llvm_unreachable("Unknown context for static var decl");
- return ContextName.str() + Separator + D.getNameAsString();
+ ContextName += "." + D.getNameAsString();
+ return ContextName;
}
-llvm::Constant *
-CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
- const char *Separator,
- llvm::GlobalValue::LinkageTypes Linkage) {
+llvm::Constant *CodeGenModule::getOrCreateStaticVarDecl(
+ const VarDecl &D, llvm::GlobalValue::LinkageTypes Linkage) {
+ // In general, we don't always emit static var decls once before we reference
+ // them. It is possible to reference them before emitting the function that
+ // contains them, and it is possible to emit the containing function multiple
+ // times.
+ if (llvm::Constant *ExistingGV = StaticLocalDeclMap[&D])
+ return ExistingGV;
+
QualType Ty = D.getType();
assert(Ty->isConstantSizeType() && "VLAs can't be static");
// Use the label if the variable is renamed with the asm-label extension.
std::string Name;
if (D.hasAttr<AsmLabelAttr>())
- Name = CGM.getMangledName(&D);
+ Name = getMangledName(&D);
else
- Name = GetStaticDeclName(*this, D, Separator);
+ Name = getStaticDeclName(*this, D);
- llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(Ty);
+ llvm::Type *LTy = getTypes().ConvertTypeForMem(Ty);
unsigned AddrSpace =
- CGM.GetGlobalVarAddressSpace(&D, CGM.getContext().getTargetAddressSpace(Ty));
+ GetGlobalVarAddressSpace(&D, getContext().getTargetAddressSpace(Ty));
+
+ // Local address space cannot have an initializer.
+ llvm::Constant *Init = nullptr;
+ if (Ty.getAddressSpace() != LangAS::opencl_local)
+ Init = EmitNullConstant(Ty);
+ else
+ Init = llvm::UndefValue::get(LTy);
+
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), LTy,
+ new llvm::GlobalVariable(getModule(), LTy,
Ty.isConstant(getContext()), Linkage,
- CGM.EmitNullConstant(D.getType()), Name, nullptr,
+ Init, Name, nullptr,
llvm::GlobalVariable::NotThreadLocal,
AddrSpace);
GV->setAlignment(getContext().getDeclAlign(&D).getQuantity());
- CGM.setGlobalVisibility(GV, &D);
+ setGlobalVisibility(GV, &D);
+
+ if (supportsCOMDAT() && GV->isWeakForLinker())
+ GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
if (D.getTLSKind())
- CGM.setTLSMode(GV, D);
+ setTLSMode(GV, D);
if (D.isExternallyVisible()) {
if (D.hasAttr<DLLImportAttr>())
@@ -209,13 +220,44 @@ CodeGenFunction::CreateStaticVarDecl(const VarDecl &D,
}
// Make sure the result is of the correct type.
- unsigned ExpectedAddrSpace = CGM.getContext().getTargetAddressSpace(Ty);
+ unsigned ExpectedAddrSpace = getContext().getTargetAddressSpace(Ty);
+ llvm::Constant *Addr = GV;
if (AddrSpace != ExpectedAddrSpace) {
llvm::PointerType *PTy = llvm::PointerType::get(LTy, ExpectedAddrSpace);
- return llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
+ Addr = llvm::ConstantExpr::getAddrSpaceCast(GV, PTy);
}
- return GV;
+ setStaticLocalDeclAddress(&D, Addr);
+
+ // Ensure that the static local gets initialized by making sure the parent
+ // function gets emitted eventually.
+ const Decl *DC = cast<Decl>(D.getDeclContext());
+
+ // We can't name blocks or captured statements directly, so try to emit their
+ // parents.
+ if (isa<BlockDecl>(DC) || isa<CapturedDecl>(DC)) {
+ DC = DC->getNonClosureContext();
+ // FIXME: Ensure that global blocks get emitted.
+ if (!DC)
+ return Addr;
+ }
+
+ GlobalDecl GD;
+ if (const auto *CD = dyn_cast<CXXConstructorDecl>(DC))
+ GD = GlobalDecl(CD, Ctor_Base);
+ else if (const auto *DD = dyn_cast<CXXDestructorDecl>(DC))
+ GD = GlobalDecl(DD, Dtor_Base);
+ else if (const auto *FD = dyn_cast<FunctionDecl>(DC))
+ GD = GlobalDecl(FD);
+ else {
+ // Don't do anything for Obj-C method decls or global closures. We should
+ // never defer them.
+ assert(isa<ObjCMethodDecl>(DC) && "unexpected parent code decl");
+ }
+ if (GD.getDecl())
+ (void)GetAddrOfGlobal(GD);
+
+ return Addr;
}
/// hasNontrivialDestruction - Determine whether a type's destruction is
@@ -298,16 +340,11 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
// Check to see if we already have a global variable for this
// declaration. This can happen when double-emitting function
// bodies, e.g. with complete and base constructors.
- llvm::Constant *addr =
- CGM.getStaticLocalDeclAddress(&D);
-
- if (!addr)
- addr = CreateStaticVarDecl(D, ".", Linkage);
+ llvm::Constant *addr = CGM.getOrCreateStaticVarDecl(D, Linkage);
// Store into LocalDeclMap before generating initializer to handle
// circular references.
DMEntry = addr;
- CGM.setStaticLocalDeclAddress(&D, addr);
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -345,7 +382,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
DMEntry = castedAddr;
CGM.setStaticLocalDeclAddress(&D, castedAddr);
- CGM.reportGlobalToASan(var, D);
+ CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
@@ -562,10 +599,8 @@ static void drillIntoBlockVariable(CodeGenFunction &CGF,
lvalue.setAddress(CGF.BuildBlockByrefAddress(lvalue.getAddress(), var));
}
-void CodeGenFunction::EmitScalarInit(const Expr *init,
- const ValueDecl *D,
- LValue lvalue,
- bool capturedByInit) {
+void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
if (!lifetime) {
llvm::Value *value = EmitScalarExpr(init);
@@ -1035,7 +1070,7 @@ static bool isCapturedBy(const VarDecl &var, const Expr *e) {
/// \brief Determine whether the given initializer is trivial in the sense
/// that it requires no code to be generated.
-static bool isTrivialInitializer(const Expr *Init) {
+bool CodeGenFunction::isTrivialInitializer(const Expr *Init) {
if (!Init)
return true;
@@ -1055,6 +1090,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
if (emission.wasEmittedAsGlobal()) return;
const VarDecl &D = *emission.Variable;
+ ApplyDebugLocation DL(*this, D.getLocation());
QualType type = D.getType();
// If this local has an initializer, emit it now.
@@ -1129,7 +1165,7 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
} else {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
- std::string Name = GetStaticDeclName(*this, D, ".");
+ std::string Name = getStaticDeclName(CGM, D);
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
@@ -1158,10 +1194,8 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
/// \param alignment the alignment of the address
/// \param capturedByInit true if the variable is a __block variable
/// whose address is potentially changed by the initializer
-void CodeGenFunction::EmitExprAsInit(const Expr *init,
- const ValueDecl *D,
- LValue lvalue,
- bool capturedByInit) {
+void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
+ LValue lvalue, bool capturedByInit) {
QualType type = D->getType();
if (type->isReferenceType()) {
@@ -1636,7 +1670,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
if (CGM.getCodeGenOpts().getDebugInfo()
>= CodeGenOptions::LimitedDebugInfo) {
DI->setLocation(D.getLocation());
- DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, LocalAddr, Builder);
+ DI->EmitDeclareOfBlockLiteralArgVariable(*BlockInfo, Arg, ArgNo,
+ LocalAddr, Builder);
}
}
@@ -1656,7 +1691,9 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, llvm::Value *Arg,
DeclPtr = Arg->getType() == IRTy ? Arg : Builder.CreateBitCast(Arg, IRTy,
D.getName());
// Push a destructor cleanup for this parameter if the ABI requires it.
- if (!IsScalar &&
+ // Don't push a cleanup in a thunk for a method that will also emit a
+ // cleanup.
+ if (!IsScalar && !CurFuncIsThunk &&
getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
const CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
if (RD && RD->hasNonTrivialDestructor())
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 94cfe211601f..3b379b7d258b 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -14,6 +14,7 @@
#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGOpenMPRuntime.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Intrinsics.h"
@@ -96,7 +97,7 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
assert(!record->hasTrivialDestructor());
CXXDestructorDecl *dtor = record->getDestructor();
- function = CGM.GetAddrOfCXXDestructor(dtor, Dtor_Complete);
+ function = CGM.getAddrOfCXXStructor(dtor, StructorType::Complete);
argument = llvm::ConstantExpr::getBitCast(
addr, CGF.getTypes().ConvertType(type)->getPointerTo());
@@ -139,6 +140,10 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
QualType T = D.getType();
if (!T->isReferenceType()) {
+ if (getLangOpts().OpenMP && D.hasAttr<OMPThreadPrivateDeclAttr>())
+ (void)CGM.getOpenMPRuntime().EmitOMPThreadPrivateVarDefinition(
+ &D, DeclPtr, D.getAttr<OMPThreadPrivateDeclAttr>()->getLocation(),
+ PerformInit, this);
if (PerformInit)
EmitDeclInit(*this, D, DeclPtr);
if (CGM.isTypeConstant(D.getType(), true))
@@ -155,17 +160,11 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
EmitStoreOfScalar(RV.getScalarVal(), DeclPtr, false, Alignment, T);
}
-static llvm::Function *
-CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
- llvm::FunctionType *ty,
- const Twine &name,
- bool TLS = false);
-
/// Create a stub function, suitable for being passed to atexit,
/// which passes the given address to the given destructor function.
-static llvm::Constant *createAtExitStub(CodeGenModule &CGM, const VarDecl &VD,
- llvm::Constant *dtor,
- llvm::Constant *addr) {
+llvm::Constant *CodeGenFunction::createAtExitStub(const VarDecl &VD,
+ llvm::Constant *dtor,
+ llvm::Constant *addr) {
// Get the destructor function type, void(*)(void).
llvm::FunctionType *ty = llvm::FunctionType::get(CGM.VoidTy, false);
SmallString<256> FnName;
@@ -173,8 +172,8 @@ static llvm::Constant *createAtExitStub(CodeGenModule &CGM, const VarDecl &VD,
llvm::raw_svector_ostream Out(FnName);
CGM.getCXXABI().getMangleContext().mangleDynamicAtExitDestructor(&VD, Out);
}
- llvm::Function *fn =
- CreateGlobalInitOrDestructFunction(CGM, ty, FnName.str());
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(ty, FnName.str(),
+ VD.getLocation());
CodeGenFunction CGF(CGM);
@@ -198,7 +197,7 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::Constant *dtor,
llvm::Constant *addr) {
// Create a function which calls the destructor.
- llvm::Constant *dtorStub = createAtExitStub(CGM, VD, dtor, addr);
+ llvm::Constant *dtorStub = createAtExitStub(VD, dtor, addr);
// extern "C" int atexit(void (*f)(void));
llvm::FunctionType *atexitTy =
@@ -226,31 +225,28 @@ void CodeGenFunction::EmitCXXGuardedInit(const VarDecl &D,
CGM.getCXXABI().EmitGuardedInit(*this, D, DeclPtr, PerformInit);
}
-static llvm::Function *
-CreateGlobalInitOrDestructFunction(CodeGenModule &CGM,
- llvm::FunctionType *FTy,
- const Twine &Name, bool TLS) {
+llvm::Function *CodeGenModule::CreateGlobalInitOrDestructFunction(
+ llvm::FunctionType *FTy, const Twine &Name, SourceLocation Loc, bool TLS) {
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
- Name, &CGM.getModule());
- if (!CGM.getLangOpts().AppleKext && !TLS) {
+ Name, &getModule());
+ if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
- if (const char *Section =
- CGM.getTarget().getStaticInitSectionSpecifier())
+ if (const char *Section = getTarget().getStaticInitSectionSpecifier())
Fn->setSection(Section);
}
- Fn->setCallingConv(CGM.getRuntimeCC());
+ Fn->setCallingConv(getRuntimeCC());
- if (!CGM.getLangOpts().Exceptions)
+ if (!getLangOpts().Exceptions)
Fn->setDoesNotThrow();
- if (!CGM.getSanitizerBlacklist().isIn(*Fn)) {
- if (CGM.getLangOpts().Sanitize.Address)
+ if (!isInSanitizerBlacklist(Fn, Loc)) {
+ if (getLangOpts().Sanitize.has(SanitizerKind::Address))
Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
- if (CGM.getLangOpts().Sanitize.Thread)
+ if (getLangOpts().Sanitize.has(SanitizerKind::Thread))
Fn->addFnAttr(llvm::Attribute::SanitizeThread);
- if (CGM.getLangOpts().Sanitize.Memory)
+ if (getLangOpts().Sanitize.has(SanitizerKind::Memory))
Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
}
@@ -271,15 +267,7 @@ void CodeGenModule::EmitPointerToInitFunc(const VarDecl *D,
addUsedGlobal(PtrArray);
// If the GV is already in a comdat group, then we have to join it.
- llvm::Comdat *C = GV->getComdat();
-
- // LinkOnce and Weak linkage are lowered down to a single-member comdat group.
- // Make an explicit group so we can join it.
- if (!C && (GV->hasWeakLinkage() || GV->hasLinkOnceLinkage())) {
- C = TheModule.getOrInsertComdat(GV->getName());
- GV->setComdat(C);
- }
- if (C)
+ if (llvm::Comdat *C = GV->getComdat())
PtrArray->setComdat(C);
}
@@ -296,11 +284,15 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// Create a variable initialization function.
llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, FnName.str());
+ CreateGlobalInitOrDestructFunction(FTy, FnName.str(), D->getLocation());
auto *ISA = D->getAttr<InitSegAttr>();
CodeGenFunction(*this).GenerateCXXGlobalVarDeclInitFunc(Fn, D, Addr,
PerformInit);
+
+ llvm::GlobalVariable *COMDATKey =
+ supportsCOMDAT() && D->isExternallyVisible() ? Addr : nullptr;
+
if (D->getTLSKind()) {
// FIXME: Should we support init_priority for thread_local?
// FIXME: Ideally, initialization of instantiated thread_local static data
@@ -309,6 +301,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
// FIXME: We only need to register one __cxa_thread_atexit function for the
// entire TU.
CXXThreadLocalInits.push_back(Fn);
+ CXXThreadLocalInitVars.push_back(Addr);
} else if (PerformInit && ISA) {
EmitPointerToInitFunc(D, Addr, Fn, ISA);
DelayedCXXInitPosition.erase(D);
@@ -316,8 +309,7 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
OrderGlobalInits Key(IPA->getPriority(), PrioritizedCXXGlobalInits.size());
PrioritizedCXXGlobalInits.push_back(std::make_pair(Key, Fn));
DelayedCXXInitPosition.erase(D);
- } else if (D->getTemplateSpecializationKind() != TSK_ExplicitSpecialization &&
- D->getTemplateSpecializationKind() != TSK_Undeclared) {
+ } else if (isTemplateInstantiation(D->getTemplateSpecializationKind())) {
// C++ [basic.start.init]p2:
// Definitions of explicitly specialized class template static data
// members have ordered initialization. Other class template static data
@@ -326,11 +318,17 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
//
// As a consequence, we can put them into their own llvm.global_ctors entry.
//
- // In addition, put the initializer into a COMDAT group with the global
- // being initialized. On most platforms, this is a minor startup time
- // optimization. In the MS C++ ABI, there are no guard variables, so this
- // COMDAT key is required for correctness.
- AddGlobalCtor(Fn, 65535, Addr);
+ // If the global is externally visible, put the initializer into a COMDAT
+ // group with the global being initialized. On most platforms, this is a
+ // minor startup time optimization. In the MS C++ ABI, there are no guard
+ // variables, so this COMDAT key is required for correctness.
+ AddGlobalCtor(Fn, 65535, COMDATKey);
+ DelayedCXXInitPosition.erase(D);
+ } else if (D->hasAttr<SelectAnyAttr>()) {
+ // SelectAny globals will be comdat-folded. Put the initializer into a
+ // COMDAT group associated with the global, so the initializers get folded
+ // too.
+ AddGlobalCtor(Fn, 65535, COMDATKey);
DelayedCXXInitPosition.erase(D);
} else {
llvm::DenseMap<const Decl *, unsigned>::iterator I =
@@ -346,23 +344,11 @@ CodeGenModule::EmitCXXGlobalVarDeclInitFunc(const VarDecl *D,
}
void CodeGenModule::EmitCXXThreadLocalInitFunc() {
- llvm::Function *InitFn = nullptr;
- if (!CXXThreadLocalInits.empty()) {
- // Generate a guarded initialization function.
- llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
- InitFn = CreateGlobalInitOrDestructFunction(*this, FTy, "__tls_init",
- /*TLS*/ true);
- llvm::GlobalVariable *Guard = new llvm::GlobalVariable(
- getModule(), Int8Ty, false, llvm::GlobalVariable::InternalLinkage,
- llvm::ConstantInt::get(Int8Ty, 0), "__tls_guard");
- Guard->setThreadLocal(true);
- CodeGenFunction(*this)
- .GenerateCXXGlobalInitFunc(InitFn, CXXThreadLocalInits, Guard);
- }
-
- getCXXABI().EmitThreadLocalInitFuncs(CXXThreadLocals, InitFn);
+ getCXXABI().EmitThreadLocalInitFuncs(
+ *this, CXXThreadLocals, CXXThreadLocalInits, CXXThreadLocalInitVars);
CXXThreadLocalInits.clear();
+ CXXThreadLocalInitVars.clear();
CXXThreadLocals.clear();
}
@@ -379,7 +365,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// Create our global initialization function.
if (!PrioritizedCXXGlobalInits.empty()) {
- SmallVector<llvm::Constant*, 8> LocalCXXGlobalInits;
+ SmallVector<llvm::Function *, 8> LocalCXXGlobalInits;
llvm::array_pod_sort(PrioritizedCXXGlobalInits.begin(),
PrioritizedCXXGlobalInits.end());
// Iterate over "chunks" of ctors with same priority and emit each chunk
@@ -398,10 +384,9 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
std::string PrioritySuffix = llvm::utostr(Priority);
// Priority is always <= 65535 (enforced by sema).
PrioritySuffix = std::string(6-PrioritySuffix.size(), '0')+PrioritySuffix;
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy,
- "_GLOBAL__I_" + PrioritySuffix);
-
+ llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
+ FTy, "_GLOBAL__I_" + PrioritySuffix);
+
for (; I < PrioE; ++I)
LocalCXXGlobalInits.push_back(I->second);
@@ -409,21 +394,27 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
AddGlobalCtor(Fn, Priority);
}
}
-
- // Include the filename in the symbol name. Including "sub_" matches gcc and
- // makes sure these symbols appear lexicographically behind the symbols with
- // priority emitted above.
+
+ SmallString<128> FileName;
SourceManager &SM = Context.getSourceManager();
- SmallString<128> FileName(llvm::sys::path::filename(
- SM.getFileEntryForID(SM.getMainFileID())->getName()));
+ if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
+ // Include the filename in the symbol name. Including "sub_" matches gcc and
+ // makes sure these symbols appear lexicographically behind the symbols with
+ // priority emitted above.
+ FileName = llvm::sys::path::filename(MainFile->getName());
+ } else {
+ FileName = SmallString<128>("<null>");
+ }
+
for (size_t i = 0; i < FileName.size(); ++i) {
// Replace everything that's not [a-zA-Z0-9._] with a _. This set happens
// to be the set of C preprocessing numbers.
if (!isPreprocessingNumberBody(FileName[i]))
FileName[i] = '_';
}
+
llvm::Function *Fn = CreateGlobalInitOrDestructFunction(
- *this, FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));
+ FTy, llvm::Twine("_GLOBAL__sub_I_", FileName));
CodeGenFunction(*this).GenerateCXXGlobalInitFunc(Fn, CXXGlobalInits);
AddGlobalCtor(Fn);
@@ -439,8 +430,7 @@ void CodeGenModule::EmitCXXGlobalDtorFunc() {
llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
// Create our global destructor function.
- llvm::Function *Fn =
- CreateGlobalInitOrDestructFunction(*this, FTy, "_GLOBAL__D_a");
+ llvm::Function *Fn = CreateGlobalInitOrDestructFunction(FTy, "_GLOBAL__D_a");
CodeGenFunction(*this).GenerateCXXGlobalDtorsFunc(Fn, CXXGlobalDtors);
AddGlobalDtor(Fn);
@@ -455,6 +445,8 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
if (D->hasAttr<NoDebugAttr>())
DebugInfo = nullptr; // disable debug info indefinitely for this function
+ CurEHLocation = D->getLocStart();
+
StartFunction(GlobalDecl(D), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(),
FunctionArgList(), D->getLocation(),
@@ -474,14 +466,14 @@ void CodeGenFunction::GenerateCXXGlobalVarDeclInitFunc(llvm::Function *Fn,
void
CodeGenFunction::GenerateCXXGlobalInitFunc(llvm::Function *Fn,
- ArrayRef<llvm::Constant *> Decls,
+ ArrayRef<llvm::Function *> Decls,
llvm::GlobalVariable *Guard) {
{
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(), FunctionArgList());
// Emit an artificial location for this function.
- AL.Emit();
+ ArtificialLocation AL(*this);
llvm::BasicBlock *ExitBlock = nullptr;
if (Guard) {
@@ -528,11 +520,11 @@ void CodeGenFunction::GenerateCXXGlobalDtorsFunc(llvm::Function *Fn,
const std::vector<std::pair<llvm::WeakVH, llvm::Constant*> >
&DtorsAndObjects) {
{
- ArtificialLocation AL(*this, Builder);
+ ApplyDebugLocation NL(*this);
StartFunction(GlobalDecl(), getContext().VoidTy, Fn,
getTypes().arrangeNullaryFunction(), FunctionArgList());
// Emit an artificial location for this function.
- AL.Emit();
+ ArtificialLocation AL(*this);
// Emit the dtors, in reverse order from construction.
for (unsigned i = 0, e = DtorsAndObjects.size(); i != e; ++i) {
@@ -561,8 +553,10 @@ llvm::Function *CodeGenFunction::generateDestroyHelper(
const CGFunctionInfo &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
getContext().VoidTy, args, FunctionType::ExtInfo(), /*variadic=*/false);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
- llvm::Function *fn =
- CreateGlobalInitOrDestructFunction(CGM, FTy, "__cxx_global_array_dtor");
+ llvm::Function *fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, "__cxx_global_array_dtor", VD->getLocation());
+
+ CurEHLocation = VD->getLocStart();
StartFunction(VD, getContext().VoidTy, fn, FI, args);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 1bbda5cbf09c..cb8eb8fa490c 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGObjCRuntime.h"
#include "TargetInfo.h"
@@ -52,15 +53,6 @@ static llvm::Constant *getThrowFn(CodeGenModule &CGM) {
return CGM.CreateRuntimeFunction(FTy, "__cxa_throw");
}
-static llvm::Constant *getReThrowFn(CodeGenModule &CGM) {
- // void __cxa_rethrow();
-
- llvm::FunctionType *FTy =
- llvm::FunctionType::get(CGM.VoidTy, /*IsVarArgs=*/false);
-
- return CGM.CreateRuntimeFunction(FTy, "__cxa_rethrow");
-}
-
static llvm::Constant *getGetExceptionPtrFn(CodeGenModule &CGM) {
// void *__cxa_get_exception_ptr(void*);
@@ -134,15 +126,17 @@ namespace {
// This function must have prototype void(void*).
const char *CatchallRethrowFn;
- static const EHPersonality &get(const LangOptions &Lang);
+ static const EHPersonality &get(CodeGenModule &CGM);
static const EHPersonality GNU_C;
static const EHPersonality GNU_C_SJLJ;
+ static const EHPersonality GNU_C_SEH;
static const EHPersonality GNU_ObjC;
static const EHPersonality GNUstep_ObjC;
static const EHPersonality GNU_ObjCXX;
static const EHPersonality NeXT_ObjC;
static const EHPersonality GNU_CPlusPlus;
static const EHPersonality GNU_CPlusPlus_SJLJ;
+ static const EHPersonality GNU_CPlusPlus_SEH;
};
}
@@ -150,28 +144,42 @@ const EHPersonality EHPersonality::GNU_C = { "__gcc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_C_SJLJ = { "__gcc_personality_sj0", nullptr };
const EHPersonality
+EHPersonality::GNU_C_SEH = { "__gcc_personality_seh0", nullptr };
+const EHPersonality
EHPersonality::NeXT_ObjC = { "__objc_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus = { "__gxx_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNU_CPlusPlus_SJLJ = { "__gxx_personality_sj0", nullptr };
const EHPersonality
+EHPersonality::GNU_CPlusPlus_SEH = { "__gxx_personality_seh0", nullptr };
+const EHPersonality
EHPersonality::GNU_ObjC = {"__gnu_objc_personality_v0", "objc_exception_throw"};
const EHPersonality
EHPersonality::GNU_ObjCXX = { "__gnustep_objcxx_personality_v0", nullptr };
const EHPersonality
EHPersonality::GNUstep_ObjC = { "__gnustep_objc_personality_v0", nullptr };
-static const EHPersonality &getCPersonality(const LangOptions &L) {
+/// On Win64, use libgcc's SEH personality function. We fall back to dwarf on
+/// other platforms, unless the user asked for SjLj exceptions.
+static bool useLibGCCSEHPersonality(const llvm::Triple &T) {
+ return T.isOSWindows() && T.getArch() == llvm::Triple::x86_64;
+}
+
+static const EHPersonality &getCPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
if (L.SjLjExceptions)
return EHPersonality::GNU_C_SJLJ;
+ else if (useLibGCCSEHPersonality(T))
+ return EHPersonality::GNU_C_SEH;
return EHPersonality::GNU_C;
}
-static const EHPersonality &getObjCPersonality(const LangOptions &L) {
+static const EHPersonality &getObjCPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
switch (L.ObjCRuntime.getKind()) {
case ObjCRuntime::FragileMacOSX:
- return getCPersonality(L);
+ return getCPersonality(T, L);
case ObjCRuntime::MacOSX:
case ObjCRuntime::iOS:
return EHPersonality::NeXT_ObjC;
@@ -186,16 +194,19 @@ static const EHPersonality &getObjCPersonality(const LangOptions &L) {
llvm_unreachable("bad runtime kind");
}
-static const EHPersonality &getCXXPersonality(const LangOptions &L) {
+static const EHPersonality &getCXXPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
if (L.SjLjExceptions)
return EHPersonality::GNU_CPlusPlus_SJLJ;
- else
- return EHPersonality::GNU_CPlusPlus;
+ else if (useLibGCCSEHPersonality(T))
+ return EHPersonality::GNU_CPlusPlus_SEH;
+ return EHPersonality::GNU_CPlusPlus;
}
/// Determines the personality function to use when both C++
/// and Objective-C exceptions are being caught.
-static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
+static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
+ const LangOptions &L) {
switch (L.ObjCRuntime.getKind()) {
// The ObjC personality defers to the C++ personality for non-ObjC
// handlers. Unlike the C++ case, we use the same personality
@@ -207,7 +218,7 @@ static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
// In the fragile ABI, just use C++ exception handling and hope
// they're not doing crazy exception mixing.
case ObjCRuntime::FragileMacOSX:
- return getCXXPersonality(L);
+ return getCXXPersonality(T, L);
// The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
@@ -220,15 +231,17 @@ static const EHPersonality &getObjCXXPersonality(const LangOptions &L) {
llvm_unreachable("bad runtime kind");
}
-const EHPersonality &EHPersonality::get(const LangOptions &L) {
+const EHPersonality &EHPersonality::get(CodeGenModule &CGM) {
+ const llvm::Triple &T = CGM.getTarget().getTriple();
+ const LangOptions &L = CGM.getLangOpts();
if (L.CPlusPlus && L.ObjC1)
- return getObjCXXPersonality(L);
+ return getObjCXXPersonality(T, L);
else if (L.CPlusPlus)
- return getCXXPersonality(L);
+ return getCXXPersonality(T, L);
else if (L.ObjC1)
- return getObjCPersonality(L);
+ return getObjCPersonality(T, L);
else
- return getCPersonality(L);
+ return getCPersonality(T, L);
}
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
@@ -305,8 +318,9 @@ void CodeGenModule::SimplifyPersonality() {
if (!LangOpts.ObjCRuntime.isNeXTFamily())
return;
- const EHPersonality &ObjCXX = EHPersonality::get(LangOpts);
- const EHPersonality &CXX = getCXXPersonality(LangOpts);
+ const EHPersonality &ObjCXX = EHPersonality::get(*this);
+ const EHPersonality &CXX =
+ getCXXPersonality(getTarget().getTriple(), LangOpts);
if (&ObjCXX == &CXX)
return;
@@ -403,14 +417,8 @@ llvm::Value *CodeGenFunction::getSelectorFromSlot() {
void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
bool KeepInsertionPoint) {
- if (CGM.getTarget().getTriple().isWindowsMSVCEnvironment()) {
- ErrorUnsupported(E, "throw expression");
- return;
- }
-
if (!E->getSubExpr()) {
- EmitNoreturnRuntimeCallOrInvoke(getReThrowFn(CGM),
- ArrayRef<llvm::Value*>());
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/true);
// throw is an expression, and the expression emitters expect us
// to leave ourselves at a valid insertion point.
@@ -420,6 +428,11 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
return;
}
+ if (CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment()) {
+ ErrorUnsupported(E, "throw expression");
+ return;
+ }
+
QualType ThrowType = E->getSubExpr()->getType();
if (ThrowType->isObjCObjectPointerType()) {
@@ -457,7 +470,7 @@ void CodeGenFunction::EmitCXXThrowExpr(const CXXThrowExpr *E,
CXXRecordDecl *Record = cast<CXXRecordDecl>(RecordTy->getDecl());
if (!Record->hasTrivialDestructor()) {
CXXDestructorDecl *DtorD = Record->getDestructor();
- Dtor = CGM.GetAddrOfCXXDestructor(DtorD, Dtor_Complete);
+ Dtor = CGM.getAddrOfCXXStructor(DtorD, StructorType::Complete);
Dtor = llvm::ConstantExpr::getBitCast(Dtor, Int8PtrTy);
}
}
@@ -576,7 +589,7 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
- if (CGM.getTarget().getTriple().isWindowsMSVCEnvironment()) {
+ if (CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment()) {
ErrorUnsupported(&S, "try statement");
return;
}
@@ -601,8 +614,9 @@ void CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// existing compilers do, and it's not clear that the standard
// personality routine is capable of doing this right. See C++ DR 388:
// http://www.open-std.org/jtc1/sc22/wg21/docs/cwg_active.html#388
- QualType CaughtType = C->getCaughtType();
- CaughtType = CaughtType.getNonReferenceType().getUnqualifiedType();
+ Qualifiers CaughtTypeQuals;
+ QualType CaughtType = CGM.getContext().getUnqualifiedArrayType(
+ C->getCaughtType().getNonReferenceType(), CaughtTypeQuals);
llvm::Constant *TypeInfo = nullptr;
if (CaughtType->isObjCObjectPointerType())
@@ -720,18 +734,16 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
// Save the current IR generation state.
CGBuilderTy::InsertPoint savedIP = Builder.saveAndClearIP();
- SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
- if (CGDebugInfo *DI = getDebugInfo())
- DI->EmitLocation(Builder, CurEHLocation);
+ ApplyDebugLocation AutoRestoreLocation(*this, CurEHLocation);
- const EHPersonality &personality = EHPersonality::get(getLangOpts());
+ const EHPersonality &personality = EHPersonality::get(CGM);
// Create and configure the landing pad.
llvm::BasicBlock *lpad = createBasicBlock("lpad");
EmitBlock(lpad);
llvm::LandingPadInst *LPadInst =
- Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr),
getOpaquePersonalityFn(CGM, personality), 0);
llvm::Value *LPadExn = Builder.CreateExtractValue(LPadInst, 0);
@@ -795,7 +807,7 @@ llvm::BasicBlock *CodeGenFunction::EmitLandingPad() {
}
// Check whether we already have a handler for this type.
- if (catchTypes.insert(handler.Type))
+ if (catchTypes.insert(handler.Type).second)
// If not, add it directly to the landingpad.
LPadInst->addClause(handler.Type);
}
@@ -1259,7 +1271,7 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S, bool IsFnTryBlock) {
// constructor function-try-block's catch handler (p14), so this
// really only applies to destructors.
if (doImplicitRethrow && HaveInsertPoint()) {
- EmitRuntimeCallOrInvoke(getReThrowFn(CGM));
+ CGM.getCXXABI().emitRethrow(*this, /*isNoReturn*/false);
Builder.CreateUnreachable();
Builder.ClearInsertionPoint();
}
@@ -1541,9 +1553,9 @@ llvm::BasicBlock *CodeGenFunction::getTerminateLandingPad() {
Builder.SetInsertPoint(TerminateLandingPad);
// Tell the backend that this is a landing pad.
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+ const EHPersonality &Personality = EHPersonality::get(CGM);
llvm::LandingPadInst *LPadInst =
- Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, NULL),
+ Builder.CreateLandingPad(llvm::StructType::get(Int8PtrTy, Int32Ty, nullptr),
getOpaquePersonalityFn(CGM, Personality), 0);
LPadInst->addClause(getCatchAllValue(*this));
@@ -1600,7 +1612,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
EHResumeBlock = createBasicBlock("eh.resume");
Builder.SetInsertPoint(EHResumeBlock);
- const EHPersonality &Personality = EHPersonality::get(CGM.getLangOpts());
+ const EHPersonality &Personality = EHPersonality::get(CGM);
// This can always be a call because we necessarily didn't find
// anything on the EH stack which needs our help.
@@ -1619,7 +1631,7 @@ llvm::BasicBlock *CodeGenFunction::getEHResumeBlock(bool isCleanup) {
llvm::Value *Sel = getSelectorFromSlot();
llvm::Type *LPadType = llvm::StructType::get(Exn->getType(),
- Sel->getType(), NULL);
+ Sel->getType(), nullptr);
llvm::Value *LPadVal = llvm::UndefValue::get(LPadType);
LPadVal = Builder.CreateInsertValue(LPadVal, Exn, 0, "lpad.val");
LPadVal = Builder.CreateInsertValue(LPadVal, Sel, 1, "lpad.val");
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 512b323ba109..ce7679c836e4 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -16,14 +16,16 @@
#include "CGCall.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
+#include "CGOpenMPRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
-#include "clang/AST/DeclObjC.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/LLVMContext.h"
@@ -209,7 +211,6 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
case SD_Automatic:
case SD_FullExpression:
- assert(!ObjCARCReferenceLifetimeType->isArrayType());
CodeGenFunction::Destroyer *Destroy;
CleanupKind CleanupKind;
if (Lifetime == Qualifiers::OCL_Strong) {
@@ -267,8 +268,8 @@ pushTemporaryCleanup(CodeGenFunction &CGF, const MaterializeTemporaryExpr *M,
dyn_cast_or_null<VarDecl>(M->getExtendingDecl()));
CleanupArg = llvm::Constant::getNullValue(CGF.Int8PtrTy);
} else {
- CleanupFn =
- CGF.CGM.GetAddrOfCXXDestructor(ReferenceTemporaryDtor, Dtor_Complete);
+ CleanupFn = CGF.CGM.getAddrOfCXXStructor(ReferenceTemporaryDtor,
+ StructorType::Complete);
CleanupArg = cast<llvm::Constant>(ReferenceTemporary);
}
CGF.CGM.getCXXABI().registerGlobalDtor(
@@ -312,15 +313,16 @@ createReferenceTemporary(CodeGenFunction &CGF,
llvm_unreachable("unknown storage duration");
}
-LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
- const MaterializeTemporaryExpr *M) {
+LValue CodeGenFunction::
+EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
const Expr *E = M->GetTemporaryExpr();
+ // FIXME: ideally this would use EmitAnyExprToMem, however, we cannot do so
+ // as that will cause the lifetime adjustment to be lost for ARC
if (getLangOpts().ObjCAutoRefCount &&
M->getType()->isObjCLifetimeType() &&
M->getType().getObjCLifetime() != Qualifiers::OCL_None &&
M->getType().getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
- // FIXME: Fold this into the general case below.
llvm::Value *Object = createReferenceTemporary(*this, M, E);
LValue RefTempDst = MakeAddrLValue(Object, M->getType());
@@ -331,7 +333,21 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
Var->setInitializer(CGM.EmitNullConstant(E->getType()));
}
- EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
+ switch (getEvaluationKind(E->getType())) {
+ default: llvm_unreachable("expected scalar or aggregate expression");
+ case TEK_Scalar:
+ EmitScalarInit(E, M->getExtendingDecl(), RefTempDst, false);
+ break;
+ case TEK_Aggregate: {
+ CharUnits Alignment = getContext().getTypeAlignInChars(E->getType());
+ EmitAggExpr(E, AggValueSlot::forAddr(Object, Alignment,
+ E->getType().getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased));
+ break;
+ }
+ }
pushTemporaryCleanup(*this, M, E, Object);
return RefTempDst;
@@ -341,8 +357,8 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
SmallVector<SubobjectAdjustment, 2> Adjustments;
E = E->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
- for (unsigned I = 0, N = CommaLHSs.size(); I != N; ++I)
- EmitIgnoredExpr(CommaLHSs[I]);
+ for (const auto &Ignored : CommaLHSs)
+ EmitIgnoredExpr(Ignored);
if (const auto *opaque = dyn_cast<OpaqueValueExpr>(E)) {
if (opaque->getType()->isRecordType()) {
@@ -376,7 +392,7 @@ LValue CodeGenFunction::EmitMaterializeTemporaryExpr(
GetAddressOfBaseClass(Object, Adjustment.DerivedToBase.DerivedClass,
Adjustment.DerivedToBase.BasePath->path_begin(),
Adjustment.DerivedToBase.BasePath->path_end(),
- /*NullCheckValue=*/ false);
+ /*NullCheckValue=*/ false, E->getExprLoc());
break;
case SubobjectAdjustment::FieldAdjustment: {
@@ -442,13 +458,15 @@ static llvm::Value *emitHash16Bytes(CGBuilderTy &Builder, llvm::Value *Low,
}
bool CodeGenFunction::sanitizePerformTypeCheck() const {
- return SanOpts->Null | SanOpts->Alignment | SanOpts->ObjectSize |
- SanOpts->Vptr;
+ return SanOpts.has(SanitizerKind::Null) |
+ SanOpts.has(SanitizerKind::Alignment) |
+ SanOpts.has(SanitizerKind::ObjectSize) |
+ SanOpts.has(SanitizerKind::Vptr);
}
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
- llvm::Value *Address,
- QualType Ty, CharUnits Alignment) {
+ llvm::Value *Address, QualType Ty,
+ CharUnits Alignment, bool SkipNullCheck) {
if (!sanitizePerformTypeCheck())
return;
@@ -460,26 +478,30 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
SanitizerScope SanScope(this);
- llvm::Value *Cond = nullptr;
+ SmallVector<std::pair<llvm::Value *, SanitizerKind>, 3> Checks;
llvm::BasicBlock *Done = nullptr;
- if (SanOpts->Null || TCK == TCK_DowncastPointer) {
+ bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
+ TCK == TCK_UpcastToVirtualBase;
+ if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
+ !SkipNullCheck) {
// The glvalue must not be an empty glvalue.
- Cond = Builder.CreateICmpNE(
+ llvm::Value *IsNonNull = Builder.CreateICmpNE(
Address, llvm::Constant::getNullValue(Address->getType()));
- if (TCK == TCK_DowncastPointer) {
- // When performing a pointer downcast, it's OK if the value is null.
+ if (AllowNullPointers) {
+ // When performing pointer casts, it's OK if the value is null.
// Skip the remaining checks in that case.
Done = createBasicBlock("null");
llvm::BasicBlock *Rest = createBasicBlock("not.null");
- Builder.CreateCondBr(Cond, Rest, Done);
+ Builder.CreateCondBr(IsNonNull, Rest, Done);
EmitBlock(Rest);
- Cond = nullptr;
+ } else {
+ Checks.push_back(std::make_pair(IsNonNull, SanitizerKind::Null));
}
}
- if (SanOpts->ObjectSize && !Ty->isIncompleteType()) {
+ if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) {
uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
// The glvalue must refer to a large enough storage region.
@@ -493,12 +515,12 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *LargeEnough =
Builder.CreateICmpUGE(Builder.CreateCall2(F, CastAddr, Min),
llvm::ConstantInt::get(IntPtrTy, Size));
- Cond = Cond ? Builder.CreateAnd(Cond, LargeEnough) : LargeEnough;
+ Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
}
uint64_t AlignVal = 0;
- if (SanOpts->Alignment) {
+ if (SanOpts.has(SanitizerKind::Alignment)) {
AlignVal = Alignment.getQuantity();
if (!Ty->isIncompleteType() && !AlignVal)
AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
@@ -510,18 +532,18 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
- Cond = Cond ? Builder.CreateAnd(Cond, Aligned) : Aligned;
+ Checks.push_back(std::make_pair(Aligned, SanitizerKind::Alignment));
}
}
- if (Cond) {
+ if (Checks.size() > 0) {
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty),
llvm::ConstantInt::get(SizeTy, AlignVal),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- EmitCheck(Cond, "type_mismatch", StaticData, Address, CRK_Recoverable);
+ EmitCheck(Checks, "type_mismatch", StaticData, Address);
}
// If possible, check that the vptr indicates that there is a subobject of
@@ -533,9 +555,10 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// -- the [pointer or glvalue] is used to access a non-static data member
// or call a non-static member function
CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
- if (SanOpts->Vptr &&
+ if (SanOpts.has(SanitizerKind::Vptr) &&
(TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
- TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference) &&
+ TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
+ TCK == TCK_UpcastToVirtualBase) &&
RD && RD->hasDefinition() && RD->isDynamicClass()) {
// Compute a hash of the mangled name of the type.
//
@@ -548,7 +571,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
Out);
// Blacklist based on the mangled type.
- if (!CGM.getSanitizerBlacklist().isBlacklistedType(Out.str())) {
+ if (!CGM.getContext().getSanitizerBlacklist().isBlacklistedType(
+ Out.str())) {
llvm::hash_code TypeHash = hash_value(Out.str());
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
@@ -577,6 +601,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// hard work of checking whether the vptr is for an object of the right
// type. This will either fill in the cache and return, or produce a
// diagnostic.
+ llvm::Value *EqualHash = Builder.CreateICmpEQ(CacheVal, Hash);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty),
@@ -584,9 +609,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(Int8Ty, TCK)
};
llvm::Value *DynamicData[] = { Address, Hash };
- EmitCheck(Builder.CreateICmpEQ(CacheVal, Hash),
- "dynamic_type_cache_miss", StaticData, DynamicData,
- CRK_AlwaysRecoverable);
+ EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
+ "dynamic_type_cache_miss", StaticData, DynamicData);
}
}
@@ -654,7 +678,7 @@ static llvm::Value *getArrayIndexingBound(
void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
llvm::Value *Index, QualType IndexType,
bool Accessed) {
- assert(SanOpts->ArrayBounds &&
+ assert(SanOpts.has(SanitizerKind::ArrayBounds) &&
"should not be called unless adding bounds checks");
SanitizerScope SanScope(this);
@@ -674,7 +698,8 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
};
llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
: Builder.CreateICmpULE(IndexVal, BoundVal);
- EmitCheck(Check, "out_of_bounds", StaticData, Index, CRK_Recoverable);
+ EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds",
+ StaticData, Index);
}
@@ -711,7 +736,6 @@ EmitComplexPrePostIncDec(const UnaryOperator *E, LValue LV,
return isPre ? IncVal : InVal;
}
-
//===----------------------------------------------------------------------===//
// LValue Expression Emission
//===----------------------------------------------------------------------===//
@@ -757,7 +781,7 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV;
- if (SanOpts->ArrayBounds && isa<ArraySubscriptExpr>(E))
+ if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
else
LV = EmitLValue(E);
@@ -1130,8 +1154,11 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
CGM.DecorateInstruction(Load, TBAAPath, false/*ConvertTypeToTag*/);
}
- if ((SanOpts->Bool && hasBooleanRepresentation(Ty)) ||
- (SanOpts->Enum && Ty->getAs<EnumType>())) {
+ bool NeedsBoolCheck =
+ SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty);
+ bool NeedsEnumCheck =
+ SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>();
+ if (NeedsBoolCheck || NeedsEnumCheck) {
SanitizerScope SanScope(this);
llvm::APInt Min, End;
if (getRangeForType(*this, Ty, Min, End, true)) {
@@ -1151,8 +1178,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(llvm::Value *Addr, bool Volatile,
EmitCheckSourceLocation(Loc),
EmitCheckTypeDescriptor(Ty)
};
- EmitCheck(Check, "load_invalid_value", StaticArgs, EmitCheckValue(Load),
- CRK_Recoverable);
+ SanitizerKind Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
+ EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs,
+ EmitCheckValue(Load));
}
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
@@ -1361,12 +1389,34 @@ RValue CodeGenFunction::EmitLoadOfExtVectorElementLValue(LValue LV) {
return RValue::get(Vec);
}
+/// @brief Generates lvalue for partial ext_vector access.
+llvm::Value *CodeGenFunction::EmitExtVectorElementLValue(LValue LV) {
+ llvm::Value *VectorAddress = LV.getExtVectorAddr();
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ QualType EQT = ExprVT->getElementType();
+ llvm::Type *VectorElementTy = CGM.getTypes().ConvertType(EQT);
+ llvm::Type *VectorElementPtrToTy = VectorElementTy->getPointerTo();
+
+ llvm::Value *CastToPointerElement =
+ Builder.CreateBitCast(VectorAddress,
+ VectorElementPtrToTy, "conv.ptr.element");
+
+ const llvm::Constant *Elts = LV.getExtVectorElts();
+ unsigned ix = getAccessedFieldNo(0, Elts);
+
+ llvm::Value *VectorBasePtrPlusIx =
+ Builder.CreateInBoundsGEP(CastToPointerElement,
+ llvm::ConstantInt::get(SizeTy, ix), "add.ptr");
+
+ return VectorBasePtrPlusIx;
+}
+
/// @brief Load of global gamed gegisters are always calls to intrinsics.
RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
assert((LV.getType()->isIntegerType() || LV.getType()->isPointerType()) &&
"Bad type for register variable");
- llvm::MDNode *RegName = dyn_cast<llvm::MDNode>(LV.getGlobalReg());
- assert(RegName && "Register LValue is not metadata");
+ llvm::MDNode *RegName = cast<llvm::MDNode>(
+ cast<llvm::MetadataAsValue>(LV.getGlobalReg())->getMetadata());
// We accept integer and pointer types only
llvm::Type *OrigTy = CGM.getTypes().ConvertType(LV.getType());
@@ -1376,7 +1426,8 @@ RValue CodeGenFunction::EmitLoadOfGlobalRegLValue(LValue LV) {
llvm::Type *Types[] = { Ty };
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
- llvm::Value *Call = Builder.CreateCall(F, RegName);
+ llvm::Value *Call = Builder.CreateCall(
+ F, llvm::MetadataAsValue::get(Ty->getContext(), RegName));
if (OrigTy->isPointerTy())
Call = Builder.CreateIntToPtr(Call, OrigTy);
return RValue::get(Call);
@@ -1626,7 +1677,8 @@ void CodeGenFunction::EmitStoreThroughExtVectorComponentLValue(RValue Src,
void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
assert((Dst.getType()->isIntegerType() || Dst.getType()->isPointerType()) &&
"Bad type for register variable");
- llvm::MDNode *RegName = dyn_cast<llvm::MDNode>(Dst.getGlobalReg());
+ llvm::MDNode *RegName = cast<llvm::MDNode>(
+ cast<llvm::MetadataAsValue>(Dst.getGlobalReg())->getMetadata());
assert(RegName && "Register LValue is not metadata");
// We accept integer and pointer types only
@@ -1640,7 +1692,8 @@ void CodeGenFunction::EmitStoreThroughGlobalRegLValue(RValue Src, LValue Dst) {
llvm::Value *Value = Src.getScalarVal();
if (OrigTy->isPointerTy())
Value = Builder.CreatePtrToInt(Value, Ty);
- Builder.CreateCall2(F, RegName, Value);
+ Builder.CreateCall2(F, llvm::MetadataAsValue::get(Ty->getContext(), RegName),
+ Value);
}
// setObjCGCLValueClass - sets class of the lvalue for the purpose of
@@ -1751,12 +1804,21 @@ EmitBitCastOfLValueToProperType(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(V, IRType->getPointerTo(AS), Name);
}
+static LValue EmitThreadPrivateVarDeclLValue(
+ CodeGenFunction &CGF, const VarDecl *VD, QualType T, llvm::Value *V,
+ llvm::Type *RealVarTy, CharUnits Alignment, SourceLocation Loc) {
+ V = CGF.CGM.getOpenMPRuntime().getOMPAddrOfThreadPrivate(CGF, VD, V, Loc);
+ V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
+ return CGF.MakeAddrLValue(V, T, Alignment);
+}
+
static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
const Expr *E, const VarDecl *VD) {
QualType T = E->getType();
// If it's thread_local, emit a call to its wrapper function instead.
- if (VD->getTLSKind() == VarDecl::TLS_Dynamic)
+ if (VD->getTLSKind() == VarDecl::TLS_Dynamic &&
+ CGF.CGM.getCXXABI().usesThreadWrapperFunction())
return CGF.CGM.getCXXABI().EmitThreadLocalVarDeclLValue(CGF, VD, T);
llvm::Value *V = CGF.CGM.GetAddrOfGlobalVar(VD);
@@ -1764,6 +1826,11 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
V = EmitBitCastOfLValueToProperType(CGF, V, RealVarTy);
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
LValue LV;
+ // Emit reference to the private copy of the variable if it is an OpenMP
+ // threadprivate variable.
+ if (CGF.getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
+ return EmitThreadPrivateVarDeclLValue(CGF, VD, T, V, RealVarTy, Alignment,
+ E->getExprLoc());
if (VD->getType()->isReferenceType()) {
llvm::LoadInst *LI = CGF.Builder.CreateLoad(V);
LI->setAlignment(Alignment.getQuantity());
@@ -1821,10 +1888,12 @@ static LValue EmitGlobalNamedRegister(const VarDecl *VD,
if (M->getNumOperands() == 0) {
llvm::MDString *Str = llvm::MDString::get(CGM.getLLVMContext(),
Asm->getLabel());
- llvm::Value *Ops[] = { Str };
+ llvm::Metadata *Ops[] = {Str};
M->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
- return LValue::MakeGlobalReg(M->getOperand(0), VD->getType(), Alignment);
+ return LValue::MakeGlobalReg(
+ llvm::MetadataAsValue::get(CGM.getLLVMContext(), M->getOperand(0)),
+ VD->getType(), Alignment);
}
LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
@@ -1850,6 +1919,22 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
// FIXME: Eventually we will want to emit vector element references.
return MakeAddrLValue(Val, T, Alignment);
}
+
+ // Check for captured variables.
+ if (E->refersToEnclosingVariableOrCapture()) {
+ if (auto *FD = LambdaCaptureFields.lookup(VD))
+ return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
+ else if (CapturedStmtInfo) {
+ if (auto *V = LocalDeclMap.lookup(VD))
+ return MakeAddrLValue(V, T, Alignment);
+ else
+ return EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
+ CapturedStmtInfo->getContextValue());
+ }
+ assert(isa<BlockDecl>(CurCodeDecl));
+ return MakeAddrLValue(GetAddrOfBlockDecl(VD, VD->hasAttr<BlocksAttr>()),
+ T, Alignment);
+ }
}
// FIXME: We should be able to assert this for FunctionDecls as well!
@@ -1874,22 +1959,14 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
llvm::Value *V = LocalDeclMap.lookup(VD);
if (!V && VD->isStaticLocal())
- V = CGM.getStaticLocalDeclAddress(VD);
-
- // Use special handling for lambdas.
- if (!V) {
- if (FieldDecl *FD = LambdaCaptureFields.lookup(VD)) {
- return EmitCapturedFieldLValue(*this, FD, CXXABIThisValue);
- } else if (CapturedStmtInfo) {
- if (const FieldDecl *FD = CapturedStmtInfo->lookup(VD))
- return EmitCapturedFieldLValue(*this, FD,
- CapturedStmtInfo->getContextValue());
- }
+ V = CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
- assert(isa<BlockDecl>(CurCodeDecl) && E->refersToEnclosingLocal());
- return MakeAddrLValue(GetAddrOfBlockDecl(VD, isBlockVariable),
- T, Alignment);
- }
+ // Check if variable is threadprivate.
+ if (V && getLangOpts().OpenMP && VD->hasAttr<OMPThreadPrivateDeclAttr>())
+ return EmitThreadPrivateVarDeclLValue(
+ *this, VD, T, V, getTypes().ConvertTypeForMem(VD->getType()),
+ Alignment, E->getExprLoc());
assert(V && "DeclRefExpr not entered in LocalDeclMap?");
@@ -2001,86 +2078,21 @@ LValue CodeGenFunction::EmitObjCEncodeExprLValue(const ObjCEncodeExpr *E) {
E->getType());
}
-static void ConvertUTF8ToWideString(unsigned CharByteWidth, StringRef Source,
- SmallString<32>& Target) {
- Target.resize(CharByteWidth * (Source.size() + 1));
- char *ResultPtr = &Target[0];
- const UTF8 *ErrorPtr;
- bool success = ConvertUTF8toWide(CharByteWidth, Source, ResultPtr, ErrorPtr);
- (void)success;
- assert(success);
- Target.resize(ResultPtr - &Target[0]);
-}
-
LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
- switch (E->getIdentType()) {
- default:
- return EmitUnsupportedLValue(E, "predefined expression");
-
- case PredefinedExpr::Func:
- case PredefinedExpr::Function:
- case PredefinedExpr::LFunction:
- case PredefinedExpr::FuncDName:
- case PredefinedExpr::FuncSig:
- case PredefinedExpr::PrettyFunction: {
- PredefinedExpr::IdentType IdentType = E->getIdentType();
- std::string GVName;
-
- // FIXME: We should use the string literal mangling for the Microsoft C++
- // ABI so that strings get merged.
- switch (IdentType) {
- default: llvm_unreachable("Invalid type");
- case PredefinedExpr::Func: GVName = "__func__."; break;
- case PredefinedExpr::Function: GVName = "__FUNCTION__."; break;
- case PredefinedExpr::FuncDName: GVName = "__FUNCDNAME__."; break;
- case PredefinedExpr::FuncSig: GVName = "__FUNCSIG__."; break;
- case PredefinedExpr::LFunction: GVName = "L__FUNCTION__."; break;
- case PredefinedExpr::PrettyFunction: GVName = "__PRETTY_FUNCTION__."; break;
- }
-
- StringRef FnName = CurFn->getName();
- if (FnName.startswith("\01"))
- FnName = FnName.substr(1);
- GVName += FnName;
-
- // If this is outside of a function use the top level decl.
- const Decl *CurDecl = CurCodeDecl;
- if (!CurDecl || isa<VarDecl>(CurDecl))
- CurDecl = getContext().getTranslationUnitDecl();
-
- const Type *ElemType = E->getType()->getArrayElementTypeNoTypeQual();
- std::string FunctionName;
- if (isa<BlockDecl>(CurDecl)) {
- // Blocks use the mangled function name.
- // FIXME: ComputeName should handle blocks.
- FunctionName = FnName.str();
- } else if (isa<CapturedDecl>(CurDecl)) {
- // For a captured statement, the function name is its enclosing
- // function name not the one compiler generated.
- FunctionName = PredefinedExpr::ComputeName(IdentType, CurDecl);
- } else {
- FunctionName = PredefinedExpr::ComputeName(IdentType, CurDecl);
- assert(cast<ConstantArrayType>(E->getType())->getSize() - 1 ==
- FunctionName.size() &&
- "Computed __func__ length differs from type!");
- }
-
- llvm::Constant *C;
- if (ElemType->isWideCharType()) {
- SmallString<32> RawChars;
- ConvertUTF8ToWideString(
- getContext().getTypeSizeInChars(ElemType).getQuantity(), FunctionName,
- RawChars);
- StringLiteral *SL = StringLiteral::Create(
- getContext(), RawChars, StringLiteral::Wide,
- /*Pascal = */ false, E->getType(), E->getLocation());
- C = CGM.GetAddrOfConstantStringFromLiteral(SL);
- } else {
- C = CGM.GetAddrOfConstantCString(FunctionName, GVName.c_str(), 1);
- }
+ auto SL = E->getFunctionName();
+ assert(SL != nullptr && "No StringLiteral name in PredefinedExpr");
+ StringRef FnName = CurFn->getName();
+ if (FnName.startswith("\01"))
+ FnName = FnName.substr(1);
+ StringRef NameItems[] = {
+ PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
+ std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
+ if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str(), 1);
return MakeAddrLValue(C, E->getType());
}
- }
+ auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
+ return MakeAddrLValue(C, E->getType());
}
/// Emit a type description suitable for use by a runtime sanitizer library. The
@@ -2115,7 +2127,7 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
CGM.getDiags().ConvertArgToString(DiagnosticsEngine::ak_qualtype,
(intptr_t)T.getAsOpaquePtr(),
StringRef(), StringRef(), None, Buffer,
- ArrayRef<intptr_t>());
+ None);
llvm::Constant *Components[] = {
Builder.getInt16(TypeKind), Builder.getInt16(TypeInfo),
@@ -2127,7 +2139,7 @@ llvm::Constant *CodeGenFunction::EmitCheckTypeDescriptor(QualType T) {
CGM.getModule(), Descriptor->getType(),
/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage, Descriptor);
GV->setUnnamedAddr(true);
- CGM.disableSanitizerForGlobal(GV);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(GV);
// Remember the descriptor for this type.
CGM.setTypeDescriptorInMap(T, GV);
@@ -2177,7 +2189,7 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
PresumedLoc PLoc = getContext().getSourceManager().getPresumedLoc(Loc);
if (PLoc.isValid()) {
auto FilenameGV = CGM.GetAddrOfConstantCString(PLoc.getFilename(), ".src");
- CGM.disableSanitizerForGlobal(FilenameGV);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(FilenameGV);
Filename = FilenameGV;
Line = PLoc.getLine();
Column = PLoc.getColumn();
@@ -2192,39 +2204,126 @@ llvm::Constant *CodeGenFunction::EmitCheckSourceLocation(SourceLocation Loc) {
return llvm::ConstantStruct::getAnon(Data);
}
-void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
- ArrayRef<llvm::Constant *> StaticArgs,
- ArrayRef<llvm::Value *> DynamicArgs,
- CheckRecoverableKind RecoverKind) {
- assert(SanOpts != &SanitizerOptions::Disabled);
- assert(IsSanitizerScope);
+namespace {
+/// \brief Specify under what conditions this check can be recovered
+enum class CheckRecoverableKind {
+ /// Always terminate program execution if this check fails.
+ Unrecoverable,
+ /// Check supports recovering, runtime has both fatal (noreturn) and
+ /// non-fatal handlers for this check.
+ Recoverable,
+ /// Runtime conditionally aborts, always need to support recovery.
+ AlwaysRecoverable
+};
+}
- if (CGM.getCodeGenOpts().SanitizeUndefinedTrapOnError) {
- assert (RecoverKind != CRK_AlwaysRecoverable &&
- "Runtime call required for AlwaysRecoverable kind!");
- return EmitTrapCheck(Checked);
+static CheckRecoverableKind getRecoverableKind(SanitizerKind Kind) {
+ switch (Kind) {
+ case SanitizerKind::Vptr:
+ return CheckRecoverableKind::AlwaysRecoverable;
+ case SanitizerKind::Return:
+ case SanitizerKind::Unreachable:
+ return CheckRecoverableKind::Unrecoverable;
+ default:
+ return CheckRecoverableKind::Recoverable;
}
+}
- llvm::BasicBlock *Cont = createBasicBlock("cont");
+static void emitCheckHandlerCall(CodeGenFunction &CGF,
+ llvm::FunctionType *FnType,
+ ArrayRef<llvm::Value *> FnArgs,
+ StringRef CheckName,
+ CheckRecoverableKind RecoverKind, bool IsFatal,
+ llvm::BasicBlock *ContBB) {
+ assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
+ bool NeedsAbortSuffix =
+ IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
+ std::string FnName = ("__ubsan_handle_" + CheckName +
+ (NeedsAbortSuffix ? "_abort" : "")).str();
+ bool MayReturn =
+ !IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
- llvm::BasicBlock *Handler = createBasicBlock("handler." + CheckName);
+ llvm::AttrBuilder B;
+ if (!MayReturn) {
+ B.addAttribute(llvm::Attribute::NoReturn)
+ .addAttribute(llvm::Attribute::NoUnwind);
+ }
+ B.addAttribute(llvm::Attribute::UWTable);
- llvm::Instruction *Branch = Builder.CreateCondBr(Checked, Cont, Handler);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
+ FnType, FnName,
+ llvm::AttributeSet::get(CGF.getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex, B));
+ llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
+ if (!MayReturn) {
+ HandlerCall->setDoesNotReturn();
+ CGF.Builder.CreateUnreachable();
+ } else {
+ CGF.Builder.CreateBr(ContBB);
+ }
+}
+void CodeGenFunction::EmitCheck(
+ ArrayRef<std::pair<llvm::Value *, SanitizerKind>> Checked,
+ StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ ArrayRef<llvm::Value *> DynamicArgs) {
+ assert(IsSanitizerScope);
+ assert(Checked.size() > 0);
+
+ llvm::Value *FatalCond = nullptr;
+ llvm::Value *RecoverableCond = nullptr;
+ for (int i = 0, n = Checked.size(); i < n; ++i) {
+ llvm::Value *Check = Checked[i].first;
+ llvm::Value *&Cond =
+ CGM.getCodeGenOpts().SanitizeRecover.has(Checked[i].second)
+ ? RecoverableCond
+ : FatalCond;
+ Cond = Cond ? Builder.CreateAnd(Cond, Check) : Check;
+ }
+
+ llvm::Value *JointCond;
+ if (FatalCond && RecoverableCond)
+ JointCond = Builder.CreateAnd(FatalCond, RecoverableCond);
+ else
+ JointCond = FatalCond ? FatalCond : RecoverableCond;
+ assert(JointCond);
+
+ CheckRecoverableKind RecoverKind = getRecoverableKind(Checked[0].second);
+ assert(SanOpts.has(Checked[0].second));
+#ifndef NDEBUG
+ for (int i = 1, n = Checked.size(); i < n; ++i) {
+ assert(RecoverKind == getRecoverableKind(Checked[i].second) &&
+ "All recoverable kinds in a single check must be same!");
+ assert(SanOpts.has(Checked[i].second));
+ }
+#endif
+
+ if (CGM.getCodeGenOpts().SanitizeUndefinedTrapOnError) {
+ assert(RecoverKind != CheckRecoverableKind::AlwaysRecoverable &&
+ "Runtime call required for AlwaysRecoverable kind!");
+ // Assume that -fsanitize-undefined-trap-on-error overrides
+ // -fsanitize-recover= options, as we can only print meaningful error
+ // message and recover if we have a runtime support.
+ return EmitTrapCheck(JointCond);
+ }
+
+ llvm::BasicBlock *Cont = createBasicBlock("cont");
+ llvm::BasicBlock *Handlers = createBasicBlock("handler." + CheckName);
+ llvm::Instruction *Branch = Builder.CreateCondBr(JointCond, Cont, Handlers);
// Give hint that we very much don't expect to execute the handler
// Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
llvm::MDBuilder MDHelper(getLLVMContext());
llvm::MDNode *Node = MDHelper.createBranchWeights((1U << 20) - 1, 1);
Branch->setMetadata(llvm::LLVMContext::MD_prof, Node);
+ EmitBlock(Handlers);
- EmitBlock(Handler);
-
+ // Emit handler arguments and create handler function type.
llvm::Constant *Info = llvm::ConstantStruct::getAnon(StaticArgs);
auto *InfoPtr =
new llvm::GlobalVariable(CGM.getModule(), Info->getType(), false,
llvm::GlobalVariable::PrivateLinkage, Info);
InfoPtr->setUnnamedAddr(true);
- CGM.disableSanitizerForGlobal(InfoPtr);
+ CGM.getSanitizerMetadata()->disableSanitizerForGlobal(InfoPtr);
SmallVector<llvm::Value *, 4> Args;
SmallVector<llvm::Type *, 4> ArgTypes;
@@ -2241,34 +2340,27 @@ void CodeGenFunction::EmitCheck(llvm::Value *Checked, StringRef CheckName,
ArgTypes.push_back(IntPtrTy);
}
- bool Recover = RecoverKind == CRK_AlwaysRecoverable ||
- (RecoverKind == CRK_Recoverable &&
- CGM.getCodeGenOpts().SanitizeRecover);
-
llvm::FunctionType *FnType =
llvm::FunctionType::get(CGM.VoidTy, ArgTypes, false);
- llvm::AttrBuilder B;
- if (!Recover) {
- B.addAttribute(llvm::Attribute::NoReturn)
- .addAttribute(llvm::Attribute::NoUnwind);
- }
- B.addAttribute(llvm::Attribute::UWTable);
- // Checks that have two variants use a suffix to differentiate them
- bool NeedsAbortSuffix = RecoverKind != CRK_Unrecoverable &&
- !CGM.getCodeGenOpts().SanitizeRecover;
- std::string FunctionName = ("__ubsan_handle_" + CheckName +
- (NeedsAbortSuffix? "_abort" : "")).str();
- llvm::Value *Fn = CGM.CreateRuntimeFunction(
- FnType, FunctionName,
- llvm::AttributeSet::get(getLLVMContext(),
- llvm::AttributeSet::FunctionIndex, B));
- llvm::CallInst *HandlerCall = EmitNounwindRuntimeCall(Fn, Args);
- if (Recover) {
- Builder.CreateBr(Cont);
+ if (!FatalCond || !RecoverableCond) {
+ // Simple case: we need to generate a single handler call, either
+ // fatal, or non-fatal.
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind,
+ (FatalCond != nullptr), Cont);
} else {
- HandlerCall->setDoesNotReturn();
- Builder.CreateUnreachable();
+ // Emit two handler calls: first one for set of unrecoverable checks,
+ // another one for recoverable.
+ llvm::BasicBlock *NonFatalHandlerBB =
+ createBasicBlock("non_fatal." + CheckName);
+ llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
+ Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
+ EmitBlock(FatalHandlerBB);
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true,
+ NonFatalHandlerBB);
+ EmitBlock(NonFatalHandlerBB);
+ emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false,
+ Cont);
}
EmitBlock(Cont);
@@ -2318,12 +2410,13 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
QualType IdxTy = E->getIdx()->getType();
bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
- if (SanOpts->ArrayBounds)
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
- if (E->getBase()->getType()->isVectorType()) {
+ if (E->getBase()->getType()->isVectorType() &&
+ !isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
@@ -2339,8 +2432,17 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// size is a VLA or Objective-C interface.
llvm::Value *Address = nullptr;
CharUnits ArrayAlignment;
- if (const VariableArrayType *vla =
- getContext().getAsVariableArrayType(E->getType())) {
+ if (isa<ExtVectorElementExpr>(E->getBase())) {
+ LValue LV = EmitLValue(E->getBase());
+ Address = EmitExtVectorElementLValue(LV);
+ Address = Builder.CreateInBoundsGEP(Address, Idx, "arrayidx");
+ const VectorType *ExprVT = LV.getType()->getAs<VectorType>();
+ QualType EQT = ExprVT->getElementType();
+ return MakeAddrLValue(Address, EQT,
+ getContext().getTypeAlignInChars(EQT));
+ }
+ else if (const VariableArrayType *vla =
+ getContext().getAsVariableArrayType(E->getType())) {
// The base must be a pointer, which is not an aggregate. Emit
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
@@ -2879,10 +2981,9 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
llvm::Value *This = LV.getAddress();
// Perform the derived-to-base conversion
- llvm::Value *Base =
- GetAddressOfBaseClass(This, DerivedClassDecl,
- E->path_begin(), E->path_end(),
- /*NullCheckValue=*/false);
+ llvm::Value *Base = GetAddressOfBaseClass(
+ This, DerivedClassDecl, E->path_begin(), E->path_end(),
+ /*NullCheckValue=*/false, E->getExprLoc());
return MakeAddrLValue(Base, E->getType());
}
@@ -2958,18 +3059,15 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
ReturnValueSlot ReturnValue) {
- if (CGDebugInfo *DI = getDebugInfo()) {
- SourceLocation Loc = E->getLocStart();
- // Force column info to be generated so we can differentiate
- // multiple call sites on the same line in the debug info.
- // FIXME: This is insufficient. Two calls coming from the same macro
- // expansion will still get the same line/column and break debug info. It's
- // possible that LLVM can be fixed to not rely on this uniqueness, at which
- // point this workaround can be removed.
- const FunctionDecl* Callee = E->getDirectCallee();
- bool ForceColumnInfo = Callee && Callee->isInlineSpecified();
- DI->EmitLocation(Builder, Loc, ForceColumnInfo);
- }
+ // Force column info to be generated so we can differentiate
+ // multiple call sites on the same line in the debug info.
+ // FIXME: This is insufficient. Two calls coming from the same macro
+ // expansion will still get the same line/column and break debug info. It's
+ // possible that LLVM can be fixed to not rely on this uniqueness, at which
+ // point this workaround can be removed.
+ ApplyDebugLocation DL(*this, E->getLocStart(),
+ E->getDirectCallee() &&
+ E->getDirectCallee()->isInlineSpecified());
// Builtins never have block type.
if (E->getCallee()->getType()->isBlockPointerType())
@@ -2984,7 +3082,7 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
const Decl *TargetDecl = E->getCalleeDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
if (unsigned builtinID = FD->getBuiltinID())
- return EmitBuiltinExpr(FD, builtinID, E);
+ return EmitBuiltinExpr(FD, builtinID, E, ReturnValue);
}
if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
@@ -3046,8 +3144,8 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
}
llvm::Value *Callee = EmitScalarExpr(E->getCallee());
- return EmitCall(E->getCallee()->getType(), Callee, E->getLocStart(),
- ReturnValue, E->arg_begin(), E->arg_end(), TargetDecl);
+ return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
+ TargetDecl);
}
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
@@ -3218,11 +3316,8 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
}
RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
- SourceLocation CallLoc,
- ReturnValueSlot ReturnValue,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd,
- const Decl *TargetDecl) {
+ const CallExpr *E, ReturnValueSlot ReturnValue,
+ const Decl *TargetDecl, llvm::Value *Chain) {
// Get the actual function type. The callee type will always be a pointer to
// function type or a block pointer type.
assert(CalleeType->isFunctionPointerType() &&
@@ -3243,7 +3338,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
if (const FunctionDecl* FD = dyn_cast_or_null<const FunctionDecl>(TargetDecl))
ForceColumnInfo = FD->isInlineSpecified();
- if (getLangOpts().CPlusPlus && SanOpts->Function &&
+ if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
if (llvm::Constant *PrefixSig =
CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
@@ -3275,14 +3370,11 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
llvm::Value *CalleeRTTIMatch =
Builder.CreateICmpEQ(CalleeRTTI, FTRTTIConst);
llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(CallLoc),
+ EmitCheckSourceLocation(E->getLocStart()),
EmitCheckTypeDescriptor(CalleeType)
};
- EmitCheck(CalleeRTTIMatch,
- "function_type_mismatch",
- StaticData,
- Callee,
- CRK_Recoverable);
+ EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
+ "function_type_mismatch", StaticData, Callee);
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -3290,11 +3382,15 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
}
CallArgList Args;
- EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), ArgBeg, ArgEnd,
+ if (Chain)
+ Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
+ CGM.getContext().VoidPtrTy);
+ EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arg_begin(),
+ E->arg_end(), E->getDirectCallee(), /*ParamsToSkip*/ 0,
ForceColumnInfo);
- const CGFunctionInfo &FnInfo =
- CGM.getTypes().arrangeFreeFunctionCall(Args, FnType);
+ const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
+ Args, FnType, /*isChainCall=*/Chain);
// C99 6.5.2.2p6:
// If the expression that denotes the called function has a type
@@ -3313,7 +3409,10 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
// through an unprototyped function type works like a *non-variadic*
// call. The way we make this work is to cast to the exact type
// of the promoted arguments.
- if (isa<FunctionNoProtoType>(FnType)) {
+ //
+ // Chain calls use this same code path to add the invisible chain parameter
+ // to the function type.
+ if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 7aacee4d6ba1..6d63b3ae9cf8 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -24,29 +24,28 @@
using namespace clang;
using namespace CodeGen;
-RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
- SourceLocation CallLoc,
- llvm::Value *Callee,
- ReturnValueSlot ReturnValue,
- llvm::Value *This,
- llvm::Value *ImplicitParam,
- QualType ImplicitParamTy,
- CallExpr::const_arg_iterator ArgBeg,
- CallExpr::const_arg_iterator ArgEnd) {
+static RequiredArgs commonEmitCXXMemberOrOperatorCall(
+ CodeGenFunction &CGF, const CXXMethodDecl *MD, llvm::Value *Callee,
+ ReturnValueSlot ReturnValue, llvm::Value *This, llvm::Value *ImplicitParam,
+ QualType ImplicitParamTy, const CallExpr *CE, CallArgList &Args) {
+ assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
+ isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
- "Trying to emit a member call expr on a static method!");
+ "Trying to emit a member or operator call expr on a static method!");
// C++11 [class.mfct.non-static]p2:
// If a non-static member function of a class X is called for an object that
// is not of type X, or of a type derived from X, the behavior is undefined.
- EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
- : TCK_MemberCall,
- CallLoc, This, getContext().getRecordType(MD->getParent()));
-
- CallArgList Args;
+ SourceLocation CallLoc;
+ if (CE)
+ CallLoc = CE->getExprLoc();
+ CGF.EmitTypeCheck(
+ isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
// Push the this ptr.
- Args.add(RValue::get(This), MD->getThisType(getContext()));
+ Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
@@ -55,14 +54,45 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size());
-
+
// And the rest of the call args.
- EmitCallArgs(Args, FPT, ArgBeg, ArgEnd);
+ if (CE) {
+ // Special case: skip first argument of CXXOperatorCall (it is "this").
+ unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ CGF.EmitCallArgs(Args, FPT, CE->arg_begin() + ArgsToSkip, CE->arg_end(),
+ CE->getDirectCallee());
+ } else {
+ assert(
+ FPT->getNumParams() == 0 &&
+ "No CallExpr specified for function with non-zero number of arguments");
+ }
+ return required;
+}
+RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
+ const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
+ const CallExpr *CE) {
+ const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
+ CallArgList Args;
+ RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
+ *this, MD, Callee, ReturnValue, This, ImplicitParam, ImplicitParamTy, CE,
+ Args);
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args, MD);
}
+RValue CodeGenFunction::EmitCXXStructorCall(
+ const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
+ const CallExpr *CE, StructorType Type) {
+ CallArgList Args;
+ commonEmitCXXMemberOrOperatorCall(*this, MD, Callee, ReturnValue, This,
+ ImplicitParam, ImplicitParamTy, CE, Args);
+ return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(MD, Type),
+ Callee, ReturnValue, Args, MD);
+}
+
static CXXRecordDecl *getCXXRecord(const Expr *E) {
QualType T = E->getType();
if (const PointerType *PTy = T->getAs<PointerType>())
@@ -86,14 +116,27 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee,
- CE->getLocStart(), ReturnValue, CE->arg_begin(),
- CE->arg_end());
+ return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
+ ReturnValue);
}
- // Compute the object pointer.
+ bool HasQualifier = ME->hasQualifier();
+ NestedNameSpecifier *Qualifier = HasQualifier ? ME->getQualifier() : nullptr;
+ bool IsArrow = ME->isArrow();
const Expr *Base = ME->getBase();
- bool CanUseVirtualCall = MD->isVirtual() && !ME->hasQualifier();
+
+ return EmitCXXMemberOrOperatorMemberCallExpr(
+ CE, MD, ReturnValue, HasQualifier, Qualifier, IsArrow, Base);
+}
+
+RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
+ const CallExpr *CE, const CXXMethodDecl *MD, ReturnValueSlot ReturnValue,
+ bool HasQualifier, NestedNameSpecifier *Qualifier, bool IsArrow,
+ const Expr *Base) {
+ assert(isa<CXXMemberCallExpr>(CE) || isa<CXXOperatorCallExpr>(CE));
+
+ // Compute the object pointer.
+ bool CanUseVirtualCall = MD->isVirtual() && !HasQualifier;
const CXXMethodDecl *DevirtualizedMethod = nullptr;
if (CanUseVirtualCall && CanDevirtualizeMemberFunctionCall(Base, MD)) {
@@ -102,7 +145,15 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
assert(DevirtualizedMethod);
const CXXRecordDecl *DevirtualizedClass = DevirtualizedMethod->getParent();
const Expr *Inner = Base->ignoreParenBaseCasts();
- if (getCXXRecord(Inner) == DevirtualizedClass)
+ if (DevirtualizedMethod->getReturnType().getCanonicalType() !=
+ MD->getReturnType().getCanonicalType())
+ // If the return types are not the same, this might be a case where more
+ // code needs to run to compensate for it. For example, the derived
+ // method might return a type that inherits form from the return
+ // type of MD and has a prefix.
+ // For now we just avoid devirtualizing these covariant cases.
+ DevirtualizedMethod = nullptr;
+ else if (getCXXRecord(Inner) == DevirtualizedClass)
// If the class of the Inner expression is where the dynamic method
// is defined, build the this pointer from it.
Base = Inner;
@@ -113,19 +164,10 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
// we don't have support for that yet, so do a virtual call.
DevirtualizedMethod = nullptr;
}
- // If the return types are not the same, this might be a case where more
- // code needs to run to compensate for it. For example, the derived
- // method might return a type that inherits form from the return
- // type of MD and has a prefix.
- // For now we just avoid devirtualizing these covariant cases.
- if (DevirtualizedMethod &&
- DevirtualizedMethod->getReturnType().getCanonicalType() !=
- MD->getReturnType().getCanonicalType())
- DevirtualizedMethod = nullptr;
}
llvm::Value *This;
- if (ME->isArrow())
+ if (IsArrow)
This = EmitScalarExpr(Base);
else
This = EmitLValue(Base).getAddress();
@@ -137,34 +179,40 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
cast<CXXConstructorDecl>(MD)->isDefaultConstructor())
return RValue::get(nullptr);
- if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
- // We don't like to generate the trivial copy/move assignment operator
- // when it isn't necessary; just produce the proper effect here.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitAggregateAssign(This, RHS, CE->getType());
- return RValue::get(This);
- }
-
- if (isa<CXXConstructorDecl>(MD) &&
- cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
- // Trivial move and copy ctor are the same.
- llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
- EmitSynthesizedCXXCopyCtorCall(cast<CXXConstructorDecl>(MD), This, RHS,
- CE->arg_begin(), CE->arg_end());
- return RValue::get(This);
+ if (!MD->getParent()->mayInsertExtraPadding()) {
+ if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
+ // We don't like to generate the trivial copy/move assignment operator
+ // when it isn't necessary; just produce the proper effect here.
+ // Special case: skip first argument of CXXOperatorCall (it is "this").
+ unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
+ llvm::Value *RHS =
+ EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
+ EmitAggregateAssign(This, RHS, CE->getType());
+ return RValue::get(This);
+ }
+
+ if (isa<CXXConstructorDecl>(MD) &&
+ cast<CXXConstructorDecl>(MD)->isCopyOrMoveConstructor()) {
+ // Trivial move and copy ctor are the same.
+ assert(CE->getNumArgs() == 1 && "unexpected argcount for trivial ctor");
+ llvm::Value *RHS = EmitLValue(*CE->arg_begin()).getAddress();
+ EmitAggregateCopy(This, RHS, CE->arg_begin()->getType());
+ return RValue::get(This);
+ }
+ llvm_unreachable("unknown trivial member function");
}
- llvm_unreachable("unknown trivial member function");
}
// Compute the function type we're calling.
- const CXXMethodDecl *CalleeDecl = DevirtualizedMethod ? DevirtualizedMethod : MD;
+ const CXXMethodDecl *CalleeDecl =
+ DevirtualizedMethod ? DevirtualizedMethod : MD;
const CGFunctionInfo *FInfo = nullptr;
- if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
- FInfo = &CGM.getTypes().arrangeCXXDestructor(Dtor,
- Dtor_Complete);
- else if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
- FInfo = &CGM.getTypes().arrangeCXXConstructorDeclaration(Ctor,
- Ctor_Complete);
+ if (const auto *Dtor = dyn_cast<CXXDestructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Dtor, StructorType::Complete);
+ else if (const auto *Ctor = dyn_cast<CXXConstructorDecl>(CalleeDecl))
+ FInfo = &CGM.getTypes().arrangeCXXStructorDeclaration(
+ Ctor, StructorType::Complete);
else
FInfo = &CGM.getTypes().arrangeCXXMethodDeclaration(CalleeDecl);
@@ -184,22 +232,21 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
"Destructor shouldn't have explicit parameters");
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
- CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
- CE->getExprLoc(), This);
+ CGM.getCXXABI().EmitVirtualDestructorCall(
+ *this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
} else {
- if (getLangOpts().AppleKext &&
- MD->isVirtual() &&
- ME->hasQualifier())
- Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete, FInfo, Ty);
+ Callee =
+ CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
}
- EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
- /*ImplicitParam=*/nullptr, QualType(), nullptr,nullptr);
+ EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ /*ImplicitParam=*/nullptr, QualType(), CE);
}
return RValue::get(nullptr);
}
@@ -209,10 +256,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
} else if (UseVirtualCall) {
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty);
} else {
- if (getLangOpts().AppleKext &&
- MD->isVirtual() &&
- ME->hasQualifier())
- Callee = BuildAppleKextVirtualCall(MD, ME->getQualifier(), Ty);
+ if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
+ Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
Callee = CGM.GetAddrOfFunction(MD, Ty);
else {
@@ -225,9 +270,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
*this, MD, This, UseVirtualCall);
}
- return EmitCXXMemberCall(MD, CE->getExprLoc(), Callee, ReturnValue, This,
- /*ImplicitParam=*/nullptr, QualType(),
- CE->arg_begin(), CE->arg_end());
+ return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This,
+ /*ImplicitParam=*/nullptr, QualType(), CE);
}
RValue
@@ -275,7 +319,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, 1);
// And the rest of the call args
- EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end());
+ EmitCallArgs(Args, FPT, E->arg_begin(), E->arg_end(), E->getDirectCallee());
return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
Callee, ReturnValue, Args);
}
@@ -286,21 +330,9 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
ReturnValueSlot ReturnValue) {
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
- LValue LV = EmitLValue(E->getArg(0));
- llvm::Value *This = LV.getAddress();
-
- if ((MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) &&
- MD->isTrivial()) {
- llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
- QualType Ty = E->getType();
- EmitAggregateAssign(This, Src, Ty);
- return RValue::get(This);
- }
-
- llvm::Value *Callee = EmitCXXOperatorMemberCallee(E, MD, This);
- return EmitCXXMemberCall(MD, E->getExprLoc(), Callee, ReturnValue, This,
- /*ImplicitParam=*/nullptr, QualType(),
- E->arg_begin() + 1, E->arg_end());
+ return EmitCXXMemberOrOperatorMemberCallExpr(
+ E, MD, ReturnValue, /*HasQualifier=*/false, /*Qualifier=*/nullptr,
+ /*IsArrow=*/false, E->getArg(0));
}
RValue CodeGenFunction::EmitCUDAKernelCallExpr(const CUDAKernelCallExpr *E,
@@ -392,8 +424,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
if (const ConstantArrayType *arrayType
= getContext().getAsConstantArrayType(E->getType())) {
- EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(),
- E->arg_begin(), E->arg_end());
+ EmitCXXAggrConstructorCall(CD, arrayType, Dest.getAddr(), E);
} else {
CXXCtorType Type = Ctor_Complete;
bool ForVirtualBase = false;
@@ -420,7 +451,7 @@ CodeGenFunction::EmitCXXConstructExpr(const CXXConstructExpr *E,
// Call the constructor.
EmitCXXConstructorCall(CD, Type, ForVirtualBase, Delegating, Dest.getAddr(),
- E->arg_begin(), E->arg_end());
+ E);
}
}
@@ -445,7 +476,7 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtor(llvm::Value *Dest,
assert(!getContext().getAsConstantArrayType(E->getType())
&& "EmitSynthesizedCXXCopyCtor - Copied-in Array");
- EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E->arg_begin(), E->arg_end());
+ EmitSynthesizedCXXCopyCtorCall(CD, Dest, Src, E);
}
static CharUnits CalculateCookiePadding(CodeGenFunction &CGF,
@@ -726,9 +757,8 @@ static void StoreAnyExprIntoOneUnit(CodeGenFunction &CGF, const Expr *Init,
CharUnits Alignment = CGF.getContext().getTypeAlignInChars(AllocType);
switch (CGF.getEvaluationKind(AllocType)) {
case TEK_Scalar:
- CGF.EmitScalarInit(Init, nullptr, CGF.MakeAddrLValue(NewPtr, AllocType,
- Alignment),
- false);
+ CGF.EmitScalarInit(Init, nullptr,
+ CGF.MakeAddrLValue(NewPtr, AllocType, Alignment), false);
return;
case TEK_Complex:
CGF.EmitComplexExprIntoLValue(Init, CGF.MakeAddrLValue(NewPtr, AllocType,
@@ -895,8 +925,7 @@ CodeGenFunction::EmitNewArrayInitializer(const CXXNewExpr *E,
NumElements = Builder.CreateSub(
NumElements,
llvm::ConstantInt::get(NumElements->getType(), InitListElements));
- EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr,
- CCE->arg_begin(), CCE->arg_end(),
+ EmitCXXAggrConstructorCall(Ctor, NumElements, CurPtr, CCE,
CCE->requiresZeroInitialization());
return;
}
@@ -987,6 +1016,7 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
llvm::Value *NewPtr,
llvm::Value *NumElements,
llvm::Value *AllocSizeWithoutCookie) {
+ ApplyDebugLocation DL(CGF, E->getStartLoc());
if (E->isArray())
CGF.EmitNewArrayInitializer(E, ElementType, NewPtr, NumElements,
AllocSizeWithoutCookie);
@@ -1003,9 +1033,9 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
llvm::Instruction *CallOrInvoke;
llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
RValue RV =
- CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(Args, CalleeType),
- CalleeAddr, ReturnValueSlot(), Args,
- Callee, &CallOrInvoke);
+ CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Args, CalleeType, /*chainCall=*/false),
+ CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
/// C++1y [expr.new]p10:
/// [In a new-expression,] an implementation is allowed to omit a call
@@ -1226,15 +1256,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
-
+
allocatorArgs.add(RValue::get(allocSize), sizeType);
// We start at 1 here because the first argument (the allocation size)
// has already been emitted.
- EmitCallArgs(allocatorArgs, allocatorType->isVariadic(),
- allocatorType->param_type_begin() + 1,
- allocatorType->param_type_end(), E->placement_arg_begin(),
- E->placement_arg_end());
+ EmitCallArgs(allocatorArgs, allocatorType, E->placement_arg_begin(),
+ E->placement_arg_end(), /* CalleeDecl */ nullptr,
+ /*ParamsToSkip*/ 1);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1386,12 +1415,19 @@ namespace {
};
}
+void
+CodeGenFunction::pushCallObjectDeleteCleanup(const FunctionDecl *OperatorDelete,
+ llvm::Value *CompletePtr,
+ QualType ElementType) {
+ EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup, CompletePtr,
+ OperatorDelete, ElementType);
+}
+
/// Emit the code for deleting a single object.
static void EmitObjectDelete(CodeGenFunction &CGF,
- const FunctionDecl *OperatorDelete,
+ const CXXDeleteExpr *DE,
llvm::Value *Ptr,
- QualType ElementType,
- bool UseGlobalDelete) {
+ QualType ElementType) {
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
@@ -1401,29 +1437,8 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
Dtor = RD->getDestructor();
if (Dtor->isVirtual()) {
- if (UseGlobalDelete) {
- // If we're supposed to call the global delete, make sure we do so
- // even if the destructor throws.
-
- // Derive the complete-object pointer, which is what we need
- // to pass to the deallocation function.
- llvm::Value *completePtr =
- CGF.CGM.getCXXABI().adjustToCompleteObject(CGF, Ptr, ElementType);
-
- CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
- completePtr, OperatorDelete,
- ElementType);
- }
-
- // FIXME: Provide a source location here.
- CXXDtorType DtorType = UseGlobalDelete ? Dtor_Complete : Dtor_Deleting;
- CGF.CGM.getCXXABI().EmitVirtualDestructorCall(CGF, Dtor, DtorType,
- SourceLocation(), Ptr);
-
- if (UseGlobalDelete) {
- CGF.PopCleanupBlock();
- }
-
+ CGF.CGM.getCXXABI().emitVirtualObjectDelete(CGF, DE, Ptr, ElementType,
+ Dtor);
return;
}
}
@@ -1432,6 +1447,7 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
// Make sure that we call delete even if the dtor throws.
// This doesn't have to a conditional cleanup because we're going
// to pop it off in a second.
+ const FunctionDecl *OperatorDelete = DE->getOperatorDelete();
CGF.EHStack.pushCleanup<CallObjectDelete>(NormalAndEHCleanup,
Ptr, OperatorDelete, ElementType);
@@ -1608,8 +1624,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
if (E->isArrayForm()) {
EmitArrayDelete(*this, E, Ptr, DeleteTy);
} else {
- EmitObjectDelete(*this, E->getOperatorDelete(), Ptr, DeleteTy,
- E->isGlobalDelete());
+ EmitObjectDelete(*this, E, Ptr, DeleteTy);
}
EmitBlock(DeleteEnd);
@@ -1800,19 +1815,23 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *Value,
void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
RunCleanupsScope Scope(*this);
- LValue SlotLV = MakeAddrLValue(Slot.getAddr(), E->getType(),
- Slot.getAlignment());
+ LValue SlotLV =
+ MakeAddrLValue(Slot.getAddr(), E->getType(), Slot.getAlignment());
CXXRecordDecl::field_iterator CurField = E->getLambdaClass()->field_begin();
for (LambdaExpr::capture_init_iterator i = E->capture_init_begin(),
e = E->capture_init_end();
i != e; ++i, ++CurField) {
// Emit initialization
-
LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
- ArrayRef<VarDecl *> ArrayIndexes;
- if (CurField->getType()->isArrayType())
- ArrayIndexes = E->getCaptureInitIndexVars(i);
- EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ if (CurField->hasCapturedVLAType()) {
+ auto VAT = CurField->getCapturedVLAType();
+ EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
+ } else {
+ ArrayRef<VarDecl *> ArrayIndexes;
+ if (CurField->getType()->isArrayType())
+ ArrayIndexes = E->getCaptureInitIndexVars(i);
+ EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ }
}
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 7244b9e4d1eb..1580bbe6a294 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -15,9 +15,13 @@
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
+#include "llvm/IR/Instructions.h"
+#include "llvm/IR/MDBuilder.h"
+#include "llvm/IR/Metadata.h"
#include <algorithm>
using namespace clang;
using namespace CodeGen;
@@ -142,7 +146,7 @@ public:
// FIXME: CompoundLiteralExpr
- ComplexPairTy EmitCast(CastExpr::CastKind CK, Expr *Op, QualType DestTy);
+ ComplexPairTy EmitCast(CastKind CK, Expr *Op, QualType DestTy);
ComplexPairTy VisitImplicitCastExpr(ImplicitCastExpr *E) {
// Unlike for scalars, we don't have to worry about function->ptr demotion
// here.
@@ -230,6 +234,9 @@ public:
ComplexPairTy EmitBinMul(const BinOpInfo &Op);
ComplexPairTy EmitBinDiv(const BinOpInfo &Op);
+ ComplexPairTy EmitComplexBinOpLibCall(StringRef LibCallName,
+ const BinOpInfo &Op);
+
ComplexPairTy VisitBinAdd(const BinaryOperator *E) {
return EmitBinAdd(EmitBinOps(E));
}
@@ -326,8 +333,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
/// EmitStoreOfComplex - Store the specified real/imag parts into the
/// specified value pointer.
-void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val,
- LValue lvalue,
+void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
bool isInit) {
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
@@ -410,7 +416,7 @@ ComplexPairTy ComplexExprEmitter::EmitScalarToComplexCast(llvm::Value *Val,
return ComplexPairTy(Val, llvm::Constant::getNullValue(Val->getType()));
}
-ComplexPairTy ComplexExprEmitter::EmitCast(CastExpr::CastKind CK, Expr *Op,
+ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
QualType DestTy) {
switch (CK) {
case CK_Dependent: llvm_unreachable("dependent cast kind in IR gen!");
@@ -528,9 +534,15 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
if (Op.LHS.first->getType()->isFloatingPointTy()) {
ResR = Builder.CreateFAdd(Op.LHS.first, Op.RHS.first, "add.r");
- ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ if (Op.LHS.second && Op.RHS.second)
+ ResI = Builder.CreateFAdd(Op.LHS.second, Op.RHS.second, "add.i");
+ else
+ ResI = Op.LHS.second ? Op.LHS.second : Op.RHS.second;
+ assert(ResI && "Only one operand may be real!");
} else {
ResR = Builder.CreateAdd(Op.LHS.first, Op.RHS.first, "add.r");
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
ResI = Builder.CreateAdd(Op.LHS.second, Op.RHS.second, "add.i");
}
return ComplexPairTy(ResR, ResI);
@@ -539,63 +551,222 @@ ComplexPairTy ComplexExprEmitter::EmitBinAdd(const BinOpInfo &Op) {
ComplexPairTy ComplexExprEmitter::EmitBinSub(const BinOpInfo &Op) {
llvm::Value *ResR, *ResI;
if (Op.LHS.first->getType()->isFloatingPointTy()) {
- ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
- ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ ResR = Builder.CreateFSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ if (Op.LHS.second && Op.RHS.second)
+ ResI = Builder.CreateFSub(Op.LHS.second, Op.RHS.second, "sub.i");
+ else
+ ResI = Op.LHS.second ? Op.LHS.second
+ : Builder.CreateFNeg(Op.RHS.second, "sub.i");
+ assert(ResI && "Only one operand may be real!");
} else {
- ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ ResR = Builder.CreateSub(Op.LHS.first, Op.RHS.first, "sub.r");
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
ResI = Builder.CreateSub(Op.LHS.second, Op.RHS.second, "sub.i");
}
return ComplexPairTy(ResR, ResI);
}
+/// \brief Emit a libcall for a binary operation on complex types.
+ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
+ const BinOpInfo &Op) {
+ CallArgList Args;
+ Args.add(RValue::get(Op.LHS.first),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.LHS.second),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.RHS.first),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+ Args.add(RValue::get(Op.RHS.second),
+ Op.Ty->castAs<ComplexType>()->getElementType());
+
+ // We *must* use the full CG function call building logic here because the
+ // complex type has special ABI handling. We also should not forget about
+ // special calling convention which may be used for compiler builtins.
+ const CGFunctionInfo &FuncInfo =
+ CGF.CGM.getTypes().arrangeFreeFunctionCall(
+ Op.Ty, Args, FunctionType::ExtInfo(/* No CC here - will be added later */),
+ RequiredArgs::All);
+ llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
+ llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
+ llvm::Instruction *Call;
+
+ RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
+ nullptr, &Call);
+ cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
+ cast<llvm::CallInst>(Call)->setDoesNotThrow();
+
+ return Res.getComplexVal();
+}
+
+/// \brief Lookup the libcall name for a given floating point type complex
+/// multiply.
+static StringRef getComplexMultiplyLibCallName(llvm::Type *Ty) {
+ switch (Ty->getTypeID()) {
+ default:
+ llvm_unreachable("Unsupported floating point type!");
+ case llvm::Type::HalfTyID:
+ return "__mulhc3";
+ case llvm::Type::FloatTyID:
+ return "__mulsc3";
+ case llvm::Type::DoubleTyID:
+ return "__muldc3";
+ case llvm::Type::PPC_FP128TyID:
+ return "__multc3";
+ case llvm::Type::X86_FP80TyID:
+ return "__mulxc3";
+ case llvm::Type::FP128TyID:
+ return "__multc3";
+ }
+}
+// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
+// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinMul(const BinOpInfo &Op) {
using llvm::Value;
Value *ResR, *ResI;
+ llvm::MDBuilder MDHelper(CGF.getLLVMContext());
if (Op.LHS.first->getType()->isFloatingPointTy()) {
- Value *ResRl = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
- Value *ResRr = Builder.CreateFMul(Op.LHS.second, Op.RHS.second,"mul.rr");
- ResR = Builder.CreateFSub(ResRl, ResRr, "mul.r");
+ // The general formulation is:
+ // (a + ib) * (c + id) = (a * c - b * d) + i(a * d + b * c)
+ //
+ // But we can fold away components which would be zero due to a real
+ // operand according to C11 Annex G.5.1p2.
+ // FIXME: C11 also provides for imaginary types which would allow folding
+ // still more of this within the type system.
+
+ if (Op.LHS.second && Op.RHS.second) {
+ // If both operands are complex, emit the core math directly, and then
+ // test for NaNs. If we find NaNs in the result, we delegate to a libcall
+ // to carefully re-compute the correct infinity representation if
+ // possible. The expectation is that the presence of NaNs here is
+ // *extremely* rare, and so the cost of the libcall is almost irrelevant.
+ // This is good, because the libcall re-computes the core multiplication
+ // exactly the same as we do here and re-tests for NaNs in order to be
+ // a generic complex*complex libcall.
+
+ // First compute the four products.
+ Value *AC = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul_ac");
+ Value *BD = Builder.CreateFMul(Op.LHS.second, Op.RHS.second, "mul_bd");
+ Value *AD = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul_ad");
+ Value *BC = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul_bc");
+
+ // The real part is the difference of the first two, the imaginary part is
+ // the sum of the second.
+ ResR = Builder.CreateFSub(AC, BD, "mul_r");
+ ResI = Builder.CreateFAdd(AD, BC, "mul_i");
+
+ // Emit the test for the real part becoming NaN and create a branch to
+ // handle it. We test for NaN by comparing the number to itself.
+ Value *IsRNaN = Builder.CreateFCmpUNO(ResR, ResR, "isnan_cmp");
+ llvm::BasicBlock *ContBB = CGF.createBasicBlock("complex_mul_cont");
+ llvm::BasicBlock *INaNBB = CGF.createBasicBlock("complex_mul_imag_nan");
+ llvm::Instruction *Branch = Builder.CreateCondBr(IsRNaN, INaNBB, ContBB);
+ llvm::BasicBlock *OrigBB = Branch->getParent();
+
+ // Give hint that we very much don't expect to see NaNs.
+ // Value chosen to match UR_NONTAKEN_WEIGHT, see BranchProbabilityInfo.cpp
+ llvm::MDNode *BrWeight = MDHelper.createBranchWeights(1, (1U << 20) - 1);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
+
+ // Now test the imaginary part and create its branch.
+ CGF.EmitBlock(INaNBB);
+ Value *IsINaN = Builder.CreateFCmpUNO(ResI, ResI, "isnan_cmp");
+ llvm::BasicBlock *LibCallBB = CGF.createBasicBlock("complex_mul_libcall");
+ Branch = Builder.CreateCondBr(IsINaN, LibCallBB, ContBB);
+ Branch->setMetadata(llvm::LLVMContext::MD_prof, BrWeight);
+
+ // Now emit the libcall on this slowest of the slow paths.
+ CGF.EmitBlock(LibCallBB);
+ Value *LibCallR, *LibCallI;
+ std::tie(LibCallR, LibCallI) = EmitComplexBinOpLibCall(
+ getComplexMultiplyLibCallName(Op.LHS.first->getType()), Op);
+ Builder.CreateBr(ContBB);
+
+ // Finally continue execution by phi-ing together the different
+ // computation paths.
+ CGF.EmitBlock(ContBB);
+ llvm::PHINode *RealPHI = Builder.CreatePHI(ResR->getType(), 3, "real_mul_phi");
+ RealPHI->addIncoming(ResR, OrigBB);
+ RealPHI->addIncoming(ResR, INaNBB);
+ RealPHI->addIncoming(LibCallR, LibCallBB);
+ llvm::PHINode *ImagPHI = Builder.CreatePHI(ResI->getType(), 3, "imag_mul_phi");
+ ImagPHI->addIncoming(ResI, OrigBB);
+ ImagPHI->addIncoming(ResI, INaNBB);
+ ImagPHI->addIncoming(LibCallI, LibCallBB);
+ return ComplexPairTy(RealPHI, ImagPHI);
+ }
+ assert((Op.LHS.second || Op.RHS.second) &&
+ "At least one operand must be complex!");
+
+ // If either of the operands is a real rather than a complex, the
+ // imaginary component is ignored when computing the real component of the
+ // result.
+ ResR = Builder.CreateFMul(Op.LHS.first, Op.RHS.first, "mul.rl");
- Value *ResIl = Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il");
- Value *ResIr = Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
- ResI = Builder.CreateFAdd(ResIl, ResIr, "mul.i");
+ ResI = Op.LHS.second
+ ? Builder.CreateFMul(Op.LHS.second, Op.RHS.first, "mul.il")
+ : Builder.CreateFMul(Op.LHS.first, Op.RHS.second, "mul.ir");
} else {
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
Value *ResRl = Builder.CreateMul(Op.LHS.first, Op.RHS.first, "mul.rl");
- Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second,"mul.rr");
- ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
+ Value *ResRr = Builder.CreateMul(Op.LHS.second, Op.RHS.second, "mul.rr");
+ ResR = Builder.CreateSub(ResRl, ResRr, "mul.r");
Value *ResIl = Builder.CreateMul(Op.LHS.second, Op.RHS.first, "mul.il");
Value *ResIr = Builder.CreateMul(Op.LHS.first, Op.RHS.second, "mul.ir");
- ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
+ ResI = Builder.CreateAdd(ResIl, ResIr, "mul.i");
}
return ComplexPairTy(ResR, ResI);
}
+// See C11 Annex G.5.1 for the semantics of multiplicative operators on complex
+// typed values.
ComplexPairTy ComplexExprEmitter::EmitBinDiv(const BinOpInfo &Op) {
llvm::Value *LHSr = Op.LHS.first, *LHSi = Op.LHS.second;
llvm::Value *RHSr = Op.RHS.first, *RHSi = Op.RHS.second;
llvm::Value *DSTr, *DSTi;
- if (Op.LHS.first->getType()->isFloatingPointTy()) {
- // (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
- llvm::Value *Tmp1 = Builder.CreateFMul(LHSr, RHSr); // a*c
- llvm::Value *Tmp2 = Builder.CreateFMul(LHSi, RHSi); // b*d
- llvm::Value *Tmp3 = Builder.CreateFAdd(Tmp1, Tmp2); // ac+bd
-
- llvm::Value *Tmp4 = Builder.CreateFMul(RHSr, RHSr); // c*c
- llvm::Value *Tmp5 = Builder.CreateFMul(RHSi, RHSi); // d*d
- llvm::Value *Tmp6 = Builder.CreateFAdd(Tmp4, Tmp5); // cc+dd
-
- llvm::Value *Tmp7 = Builder.CreateFMul(LHSi, RHSr); // b*c
- llvm::Value *Tmp8 = Builder.CreateFMul(LHSr, RHSi); // a*d
- llvm::Value *Tmp9 = Builder.CreateFSub(Tmp7, Tmp8); // bc-ad
+ if (LHSr->getType()->isFloatingPointTy()) {
+ // If we have a complex operand on the RHS, we delegate to a libcall to
+ // handle all of the complexities and minimize underflow/overflow cases.
+ //
+ // FIXME: We would be able to avoid the libcall in many places if we
+ // supported imaginary types in addition to complex types.
+ if (RHSi) {
+ BinOpInfo LibCallOp = Op;
+ // If LHS was a real, supply a null imaginary part.
+ if (!LHSi)
+ LibCallOp.LHS.second = llvm::Constant::getNullValue(LHSr->getType());
+
+ StringRef LibCallName;
+ switch (LHSr->getType()->getTypeID()) {
+ default:
+ llvm_unreachable("Unsupported floating point type!");
+ case llvm::Type::HalfTyID:
+ return EmitComplexBinOpLibCall("__divhc3", LibCallOp);
+ case llvm::Type::FloatTyID:
+ return EmitComplexBinOpLibCall("__divsc3", LibCallOp);
+ case llvm::Type::DoubleTyID:
+ return EmitComplexBinOpLibCall("__divdc3", LibCallOp);
+ case llvm::Type::PPC_FP128TyID:
+ return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
+ case llvm::Type::X86_FP80TyID:
+ return EmitComplexBinOpLibCall("__divxc3", LibCallOp);
+ case llvm::Type::FP128TyID:
+ return EmitComplexBinOpLibCall("__divtc3", LibCallOp);
+ }
+ }
+ assert(LHSi && "Can have at most one non-complex operand!");
- DSTr = Builder.CreateFDiv(Tmp3, Tmp6);
- DSTi = Builder.CreateFDiv(Tmp9, Tmp6);
+ DSTr = Builder.CreateFDiv(LHSr, RHSr);
+ DSTi = Builder.CreateFDiv(LHSi, RHSr);
} else {
+ assert(Op.LHS.second && Op.RHS.second &&
+ "Both operands of integer complex operators must be complex!");
// (a+ib) / (c+id) = ((ac+bd)/(cc+dd)) + i((bc-ad)/(cc+dd))
llvm::Value *Tmp1 = Builder.CreateMul(LHSr, RHSr); // a*c
llvm::Value *Tmp2 = Builder.CreateMul(LHSi, RHSi); // b*d
@@ -626,8 +797,15 @@ ComplexExprEmitter::EmitBinOps(const BinaryOperator *E) {
TestAndClearIgnoreReal();
TestAndClearIgnoreImag();
BinOpInfo Ops;
- Ops.LHS = Visit(E->getLHS());
- Ops.RHS = Visit(E->getRHS());
+ if (E->getLHS()->getType()->isRealFloatingType())
+ Ops.LHS = ComplexPairTy(CGF.EmitScalarExpr(E->getLHS()), nullptr);
+ else
+ Ops.LHS = Visit(E->getLHS());
+ if (E->getRHS()->getType()->isRealFloatingType())
+ Ops.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ else
+ Ops.RHS = Visit(E->getRHS());
+
Ops.Ty = E->getType();
return Ops;
}
@@ -647,12 +825,19 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
// __block variables need to have the rhs evaluated first, plus this should
// improve codegen a little.
OpInfo.Ty = E->getComputationResultType();
+ QualType ComplexElementTy = cast<ComplexType>(OpInfo.Ty)->getElementType();
// The RHS should have been converted to the computation type.
- assert(OpInfo.Ty->isAnyComplexType());
- assert(CGF.getContext().hasSameUnqualifiedType(OpInfo.Ty,
- E->getRHS()->getType()));
- OpInfo.RHS = Visit(E->getRHS());
+ if (E->getRHS()->getType()->isRealFloatingType()) {
+ assert(
+ CGF.getContext()
+ .hasSameUnqualifiedType(ComplexElementTy, E->getRHS()->getType()));
+ OpInfo.RHS = ComplexPairTy(CGF.EmitScalarExpr(E->getRHS()), nullptr);
+ } else {
+ assert(CGF.getContext()
+ .hasSameUnqualifiedType(OpInfo.Ty, E->getRHS()->getType()));
+ OpInfo.RHS = Visit(E->getRHS());
+ }
LValue LHS = CGF.EmitLValue(E->getLHS());
@@ -662,7 +847,15 @@ EmitCompoundAssignLValue(const CompoundAssignOperator *E,
OpInfo.LHS = EmitComplexToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
} else {
llvm::Value *LHSVal = CGF.EmitLoadOfScalar(LHS, E->getExprLoc());
- OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ // For floating point real operands we can directly pass the scalar form
+ // to the binary operator emission and potentially get more efficient code.
+ if (LHSTy->isRealFloatingType()) {
+ if (!CGF.getContext().hasSameUnqualifiedType(ComplexElementTy, LHSTy))
+ LHSVal = CGF.EmitScalarConversion(LHSVal, LHSTy, ComplexElementTy);
+ OpInfo.LHS = ComplexPairTy(LHSVal, nullptr);
+ } else {
+ OpInfo.LHS = EmitScalarToComplexCast(LHSVal, LHSTy, OpInfo.Ty);
+ }
}
// Expand the binary operator.
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index b508dcb446fb..54f7eee6791e 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -104,16 +104,7 @@ AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
// Round up the field offset to the alignment of the field type.
CharUnits AlignedNextFieldOffsetInChars =
- NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
-
- if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
- assert(!Packed && "Alignment is wrong even with a packed struct!");
-
- // Convert the struct to a packed struct.
- ConvertStructToPacked();
-
- AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
- }
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
if (AlignedNextFieldOffsetInChars < FieldOffsetInChars) {
// We need to append padding.
@@ -122,6 +113,24 @@ AppendBytes(CharUnits FieldOffsetInChars, llvm::Constant *InitCst) {
assert(NextFieldOffsetInChars == FieldOffsetInChars &&
"Did not add enough padding!");
+ AlignedNextFieldOffsetInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(FieldAlignment);
+ }
+
+ if (AlignedNextFieldOffsetInChars > FieldOffsetInChars) {
+ assert(!Packed && "Alignment is wrong even with a packed struct!");
+
+ // Convert the struct to a packed struct.
+ ConvertStructToPacked();
+
+ // After we pack the struct, we may need to insert padding.
+ if (NextFieldOffsetInChars < FieldOffsetInChars) {
+ // We need to append padding.
+ AppendPadding(FieldOffsetInChars - NextFieldOffsetInChars);
+
+ assert(NextFieldOffsetInChars == FieldOffsetInChars &&
+ "Did not add enough padding!");
+ }
AlignedNextFieldOffsetInChars = NextFieldOffsetInChars;
}
@@ -486,10 +495,14 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
// No tail padding is necessary.
} else {
// Append tail padding if necessary.
- AppendTailPadding(LayoutSizeInChars);
-
CharUnits LLVMSizeInChars =
- NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ if (LLVMSizeInChars != LayoutSizeInChars)
+ AppendTailPadding(LayoutSizeInChars);
+
+ LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
// Check if we need to convert the struct to a packed struct.
if (NextFieldOffsetInChars <= LayoutSizeInChars &&
@@ -501,7 +514,10 @@ llvm::Constant *ConstStructBuilder::Finalize(QualType Ty) {
"Converting to packed did not help!");
}
- assert(LayoutSizeInChars == NextFieldOffsetInChars &&
+ LLVMSizeInChars =
+ NextFieldOffsetInChars.RoundUpToAlignment(LLVMStructAlignment);
+
+ assert(LayoutSizeInChars == LLVMSizeInChars &&
"Tail padding mismatch!");
}
@@ -734,6 +750,20 @@ public:
// initialise any elements that have not been initialised explicitly
unsigned NumInitableElts = std::min(NumInitElements, NumElements);
+ // Initialize remaining array elements.
+ // FIXME: This doesn't handle member pointers correctly!
+ llvm::Constant *fillC;
+ if (Expr *filler = ILE->getArrayFiller())
+ fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
+ else
+ fillC = llvm::Constant::getNullValue(ElemTy);
+ if (!fillC)
+ return nullptr;
+
+ // Try to use a ConstantAggregateZero if we can.
+ if (fillC->isNullValue() && !NumInitableElts)
+ return llvm::ConstantAggregateZero::get(AType);
+
// Copy initializer elements.
std::vector<llvm::Constant*> Elts;
Elts.reserve(NumInitableElts + NumElements);
@@ -748,15 +778,6 @@ public:
Elts.push_back(C);
}
- // Initialize remaining array elements.
- // FIXME: This doesn't handle member pointers correctly!
- llvm::Constant *fillC;
- if (Expr *filler = ILE->getArrayFiller())
- fillC = CGM.EmitConstantExpr(filler, filler->getType(), CGF);
- else
- fillC = llvm::Constant::getNullValue(ElemTy);
- if (!fillC)
- return nullptr;
RewriteType |= (fillC->getType() != ElemTy);
Elts.resize(NumElements, fillC);
@@ -869,7 +890,8 @@ public:
if (VD->isFileVarDecl() || VD->hasExternalStorage())
return CGM.GetAddrOfGlobalVar(VD);
else if (VD->isLocalVarDecl())
- return CGM.getStaticLocalDeclAddress(VD);
+ return CGM.getOrCreateStaticVarDecl(
+ *VD, CGM.getLLVMLinkageVarDefinition(VD, /*isConstant=*/false));
}
}
return nullptr;
@@ -1126,13 +1148,14 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
// FIXME: the target may want to specify that this is packed.
llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
Complex[1]->getType(),
- NULL);
+ nullptr);
return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Float: {
const llvm::APFloat &Init = Value.getFloat();
if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
- !Context.getLangOpts().NativeHalfType)
+ !Context.getLangOpts().NativeHalfType &&
+ !Context.getLangOpts().HalfArgsAndReturns)
return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
else
return llvm::ConstantFP::get(VMContext, Init);
@@ -1148,7 +1171,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
// FIXME: the target may want to specify that this is packed.
llvm::StructType *STy = llvm::StructType::get(Complex[0]->getType(),
Complex[1]->getType(),
- NULL);
+ nullptr);
return llvm::ConstantStruct::get(STy, Complex);
}
case APValue::Vector: {
@@ -1189,9 +1212,6 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
unsigned NumElements = Value.getArraySize();
unsigned NumInitElts = Value.getArrayInitializedElts();
- std::vector<llvm::Constant*> Elts;
- Elts.reserve(NumElements);
-
// Emit array filler, if there is one.
llvm::Constant *Filler = nullptr;
if (Value.hasArrayFiller())
@@ -1199,7 +1219,18 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
CAT->getElementType(), CGF);
// Emit initializer elements.
- llvm::Type *CommonElementType = nullptr;
+ llvm::Type *CommonElementType =
+ getTypes().ConvertType(CAT->getElementType());
+
+ // Try to use a ConstantAggregateZero if we can.
+ if (Filler && Filler->isNullValue() && !NumInitElts) {
+ llvm::ArrayType *AType =
+ llvm::ArrayType::get(CommonElementType, NumElements);
+ return llvm::ConstantAggregateZero::get(AType);
+ }
+
+ std::vector<llvm::Constant*> Elts;
+ Elts.reserve(NumElements);
for (unsigned I = 0; I < NumElements; ++I) {
llvm::Constant *C = Filler;
if (I < NumInitElts)
@@ -1268,83 +1299,6 @@ CodeGenModule::getMemberPointerConstant(const UnaryOperator *uo) {
return getCXXABI().EmitMemberDataPointer(type, chars);
}
-static void
-FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
- SmallVectorImpl<llvm::Constant *> &Elements,
- uint64_t StartOffset) {
- assert(StartOffset % CGM.getContext().getCharWidth() == 0 &&
- "StartOffset not byte aligned!");
-
- if (CGM.getTypes().isZeroInitializable(T))
- return;
-
- if (const ConstantArrayType *CAT =
- CGM.getContext().getAsConstantArrayType(T)) {
- QualType ElementTy = CAT->getElementType();
- uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
-
- for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
- FillInNullDataMemberPointers(CGM, ElementTy, Elements,
- StartOffset + I * ElementSize);
- }
- } else if (const RecordType *RT = T->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
-
- // Go through all bases and fill in any null pointer to data members.
- for (const auto &I : RD->bases()) {
- if (I.isVirtual()) {
- // Ignore virtual bases.
- continue;
- }
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
-
- // Ignore empty bases.
- if (BaseDecl->isEmpty())
- continue;
-
- // Ignore bases that don't have any pointer to data members.
- if (CGM.getTypes().isZeroInitializable(BaseDecl))
- continue;
-
- uint64_t BaseOffset =
- CGM.getContext().toBits(Layout.getBaseClassOffset(BaseDecl));
- FillInNullDataMemberPointers(CGM, I.getType(),
- Elements, StartOffset + BaseOffset);
- }
-
- // Visit all fields.
- unsigned FieldNo = 0;
- for (RecordDecl::field_iterator I = RD->field_begin(),
- E = RD->field_end(); I != E; ++I, ++FieldNo) {
- QualType FieldType = I->getType();
-
- if (CGM.getTypes().isZeroInitializable(FieldType))
- continue;
-
- uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
- FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
- }
- } else {
- assert(T->isMemberPointerType() && "Should only see member pointers here!");
- assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
- "Should only see pointers to data members here!");
-
- CharUnits StartIndex = CGM.getContext().toCharUnitsFromBits(StartOffset);
- CharUnits EndIndex = StartIndex + CGM.getContext().getTypeSizeInChars(T);
-
- // FIXME: hardcodes Itanium member pointer representation!
- llvm::Constant *NegativeOne =
- llvm::ConstantInt::get(CGM.Int8Ty, -1ULL, /*isSigned*/true);
-
- // Fill in the null data member pointer.
- for (CharUnits I = StartIndex; I != EndIndex; ++I)
- Elements[I.getQuantity()] = NegativeOne;
- }
-}
-
static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
llvm::Type *baseType,
const CXXRecordDecl *base);
@@ -1433,32 +1387,8 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
if (baseLayout.isZeroInitializableAsBase())
return llvm::Constant::getNullValue(baseType);
- // If the base type is a struct, we can just use its null constant.
- if (isa<llvm::StructType>(baseType)) {
- return EmitNullConstant(CGM, base, /*complete*/ false);
- }
-
- // Otherwise, some bases are represented as arrays of i8 if the size
- // of the base is smaller than its corresponding LLVM type. Figure
- // out how many elements this base array has.
- llvm::ArrayType *baseArrayType = cast<llvm::ArrayType>(baseType);
- unsigned numBaseElements = baseArrayType->getNumElements();
-
- // Fill in null data member pointers.
- SmallVector<llvm::Constant *, 16> baseElements(numBaseElements);
- FillInNullDataMemberPointers(CGM, CGM.getContext().getTypeDeclType(base),
- baseElements, 0);
-
- // Now go through all other elements and zero them out.
- if (numBaseElements) {
- llvm::Constant *i8_zero = llvm::Constant::getNullValue(CGM.Int8Ty);
- for (unsigned i = 0; i != numBaseElements; ++i) {
- if (!baseElements[i])
- baseElements[i] = i8_zero;
- }
- }
-
- return llvm::ConstantArray::get(baseArrayType, baseElements);
+ // Otherwise, we can just use its null constant.
+ return EmitNullConstant(CGM, base, /*asCompleteObject=*/false);
}
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
@@ -1489,9 +1419,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
assert(T->isMemberPointerType() && "Should only see member pointers here!");
assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
"Should only see pointers to data members here!");
-
- // Itanium C++ ABI 2.3:
- // A NULL pointer is represented as -1.
+
return getCXXABI().EmitNullMemberPointer(T->castAs<MemberPointerType>());
}
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 140e9aa3445f..a9cbf05da104 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -85,18 +85,54 @@ public:
return CGF.EmitCheckedLValue(E, TCK);
}
- void EmitBinOpCheck(Value *Check, const BinOpInfo &Info);
+ void EmitBinOpCheck(ArrayRef<std::pair<Value *, SanitizerKind>> Checks,
+ const BinOpInfo &Info);
Value *EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
return CGF.EmitLoadOfLValue(LV, Loc).getScalarVal();
}
+ void EmitLValueAlignmentAssumption(const Expr *E, Value *V) {
+ const AlignValueAttr *AVAttr = nullptr;
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(E)) {
+ const ValueDecl *VD = DRE->getDecl();
+
+ if (VD->getType()->isReferenceType()) {
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(VD->getType().getNonReferenceType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+ } else {
+ // Assumptions for function parameters are emitted at the start of the
+ // function, so there is no need to repeat that here.
+ if (isa<ParmVarDecl>(VD))
+ return;
+
+ AVAttr = VD->getAttr<AlignValueAttr>();
+ }
+ }
+
+ if (!AVAttr)
+ if (const auto *TTy =
+ dyn_cast<TypedefType>(E->getType()))
+ AVAttr = TTy->getDecl()->getAttr<AlignValueAttr>();
+
+ if (!AVAttr)
+ return;
+
+ Value *AlignmentValue = CGF.EmitScalarExpr(AVAttr->getAlignment());
+ llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(AlignmentValue);
+ CGF.EmitAlignmentAssumption(V, AlignmentCI->getZExtValue());
+ }
+
/// EmitLoadOfLValue - Given an expression with complex type that represents a
/// value l-value, this method emits the address of the l-value, then loads
/// and returns the result.
Value *EmitLoadOfLValue(const Expr *E) {
- return EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
- E->getExprLoc());
+ Value *V = EmitLoadOfLValue(EmitCheckedLValue(E, CodeGenFunction::TCK_Load),
+ E->getExprLoc());
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
}
/// EmitConversionToBool - Convert the specified expression value to a
@@ -160,6 +196,7 @@ public:
//===--------------------------------------------------------------------===//
Value *Visit(Expr *E) {
+ ApplyDebugLocation DL(CGF, E->getLocStart());
return StmtVisitor<ScalarExprEmitter, Value*>::Visit(E);
}
@@ -274,6 +311,10 @@ public:
Value *VisitExplicitCastExpr(ExplicitCastExpr *E) {
if (E->getType()->isVariablyModifiedType())
CGF.EmitVariablyModifiedType(E->getType());
+
+ if (CGDebugInfo *DI = CGF.getDebugInfo())
+ DI->EmitExplicitCastType(E->getType());
+
return VisitCastExpr(E);
}
Value *VisitCastExpr(CastExpr *E);
@@ -282,7 +323,10 @@ public:
if (E->getCallReturnType()->isReferenceType())
return EmitLoadOfLValue(E);
- return CGF.EmitCallExpr(E).getScalarVal();
+ Value *V = CGF.EmitCallExpr(E).getScalarVal();
+
+ EmitLValueAlignmentAssumption(E, V);
+ return V;
}
Value *VisitStmtExpr(const StmtExpr *E);
@@ -410,7 +454,7 @@ public:
case LangOptions::SOB_Defined:
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -418,7 +462,8 @@ public:
}
}
- if (Ops.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (Ops.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(Ops);
if (Ops.LHS->getType()->isFPOrFPVectorTy())
@@ -682,8 +727,8 @@ void ScalarExprEmitter::EmitFloatConversionCheck(Value *OrigSrc,
CGF.EmitCheckTypeDescriptor(OrigSrcType),
CGF.EmitCheckTypeDescriptor(DstType)
};
- CGF.EmitCheck(Check, "float_cast_overflow", StaticArgs, OrigSrc,
- CodeGenFunction::CRK_Recoverable);
+ CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
+ "float_cast_overflow", StaticArgs, OrigSrc);
}
/// EmitScalarConversion - Emit a conversion from the specified type to the
@@ -701,7 +746,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
llvm::Type *SrcTy = Src->getType();
// If casting to/from storage-only half FP, use special intrinsics.
- if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (SrcType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns) {
Src = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
CGF.CGM.FloatTy),
@@ -767,13 +813,14 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// An overflowing conversion has undefined behavior if either the source type
// or the destination type is a floating-point type.
- if (CGF.SanOpts->FloatCastOverflow &&
+ if (CGF.SanOpts.has(SanitizerKind::FloatCastOverflow) &&
(OrigSrcType->isFloatingType() || DstType->isFloatingType()))
EmitFloatConversionCheck(OrigSrc, OrigSrcType, Src, SrcType, DstType,
DstTy);
// Cast to half via float
- if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType)
+ if (DstType->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns)
DstTy = CGF.FloatTy;
if (isa<llvm::IntegerType>(SrcTy)) {
@@ -839,8 +886,10 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
/// \brief Emit a sanitization check for the given "binary" operation (which
/// might actually be a unary increment which has been lowered to a binary
-/// operation). The check passes if \p Check, which is an \c i1, is \c true.
-void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
+/// operation). The check passes if all values in \p Checks (which are \c i1),
+/// are \c true.
+void ScalarExprEmitter::EmitBinOpCheck(
+ ArrayRef<std::pair<Value *, SanitizerKind>> Checks, const BinOpInfo &Info) {
assert(CGF.IsSanitizerScope);
StringRef CheckName;
SmallVector<llvm::Constant *, 4> StaticData;
@@ -870,7 +919,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
CheckName = "divrem_overflow";
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
} else {
- // Signed arithmetic overflow (+, -, *).
+ // Arithmetic overflow (+, -, *).
switch (Opcode) {
case BO_Add: CheckName = "add_overflow"; break;
case BO_Sub: CheckName = "sub_overflow"; break;
@@ -883,8 +932,7 @@ void ScalarExprEmitter::EmitBinOpCheck(Value *Check, const BinOpInfo &Info) {
DynamicData.push_back(Info.RHS);
}
- CGF.EmitCheck(Check, CheckName, StaticData, DynamicData,
- CodeGenFunction::CRK_Recoverable);
+ CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
}
//===----------------------------------------------------------------------===//
@@ -1076,7 +1124,7 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
Value *Idx = Visit(E->getIdx());
QualType IdxTy = E->getIdx()->getType();
- if (CGF.SanOpts->ArrayBounds)
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
CGF.EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, /*Accessed*/true);
return Builder.CreateExtractElement(Base, Idx, "vecext");
@@ -1304,8 +1352,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
llvm::Type *DstTy = ConvertType(DestTy);
if (SrcTy->isPtrOrPtrVectorTy() && DstTy->isPtrOrPtrVectorTy() &&
SrcTy->getPointerAddressSpace() != DstTy->getPointerAddressSpace()) {
- llvm::Type *MidTy = CGF.CGM.getDataLayout().getIntPtrType(SrcTy);
- return Builder.CreateIntToPtr(Builder.CreatePtrToInt(Src, MidTy), DstTy);
+ llvm_unreachable("wrong cast for pointers in different address spaces"
+ "(must be an address space cast)!");
}
return Builder.CreateBitCast(Src, DstTy);
}
@@ -1344,9 +1392,9 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
E->getType()->getPointeeCXXRecordDecl();
assert(DerivedClassDecl && "DerivedToBase arg isn't a C++ object pointer!");
- return CGF.GetAddressOfBaseClass(Visit(E), DerivedClassDecl,
- CE->path_begin(), CE->path_end(),
- ShouldNullCheckClassCastValue(CE));
+ return CGF.GetAddressOfBaseClass(
+ Visit(E), DerivedClassDecl, CE->path_begin(), CE->path_end(),
+ ShouldNullCheckClassCastValue(CE), CE->getExprLoc());
}
case CK_Dynamic: {
Value *V = Visit(const_cast<Expr*>(E));
@@ -1364,8 +1412,11 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// anything here.
if (!E->getType()->isVariableArrayType()) {
assert(isa<llvm::PointerType>(V->getType()) && "Expected pointer");
- assert(isa<llvm::ArrayType>(cast<llvm::PointerType>(V->getType())
- ->getElementType()) &&
+ V = CGF.Builder.CreatePointerCast(
+ V, ConvertType(E->getType())->getPointerTo(
+ V->getType()->getPointerAddressSpace()));
+
+ assert(isa<llvm::ArrayType>(V->getType()->getPointerElementType()) &&
"Expected pointer to array");
V = Builder.CreateStructGEP(V, 0, "arraydecay");
}
@@ -1528,7 +1579,7 @@ EmitAddConsiderOverflowBehavior(const UnaryOperator *E,
case LangOptions::SOB_Defined:
return Builder.CreateAdd(InVal, NextVal, IsInc ? "inc" : "dec");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(InVal, NextVal, IsInc ? "inc" : "dec");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -1576,9 +1627,9 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// checking, and fall into the slow path with the atomic cmpxchg loop.
if (!type->isBooleanType() && type->isIntegerType() &&
!(type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
CGF.getLangOpts().getSignedOverflowBehavior() !=
- LangOptions::SOB_Trapping) {
+ LangOptions::SOB_Trapping) {
llvm::AtomicRMWInst::BinOp aop = isInc ? llvm::AtomicRMWInst::Add :
llvm::AtomicRMWInst::Sub;
llvm::Instruction::BinaryOps op = isInc ? llvm::Instruction::Add :
@@ -1627,7 +1678,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (CanOverflow && type->isSignedIntegerOrEnumerationType()) {
value = EmitAddConsiderOverflowBehavior(E, value, amt, isInc);
} else if (CanOverflow && type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) {
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) {
BinOpInfo BinOp;
BinOp.LHS = value;
BinOp.RHS = llvm::ConstantInt::get(value->getType(), 1, false);
@@ -1691,7 +1742,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
// Add the inc/dec to the real part.
llvm::Value *amt;
- if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType) {
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns) {
// Another special case: half FP increment should be done via float
value = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_from_fp16,
@@ -1714,7 +1766,8 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
}
value = Builder.CreateFAdd(value, amt, isInc ? "inc" : "dec");
- if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType)
+ if (type->isHalfType() && !CGF.getContext().getLangOpts().NativeHalfType &&
+ !CGF.getContext().getLangOpts().HalfArgsAndReturns)
value = Builder.CreateCall(
CGF.CGM.getIntrinsic(llvm::Intrinsic::convert_to_fp16,
CGF.CGM.FloatTy),
@@ -1740,11 +1793,11 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
- llvm::Value *pair = Builder.CreateAtomicCmpXchg(
- LV.getAddress(), atomicPHI, CGF.EmitToMemory(value, type),
- llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
- llvm::Value *old = Builder.CreateExtractValue(pair, 0);
- llvm::Value *success = Builder.CreateExtractValue(pair, 1);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LV, RValue::get(atomicPHI), RValue::get(CGF.EmitToMemory(value, type)),
+ E->getExprLoc());
+ llvm::Value *old = Pair.first.getScalarVal();
+ llvm::Value *success = Pair.second.getScalarVal();
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2019,10 +2072,10 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
if (const AtomicType *atomicTy = LHSTy->getAs<AtomicType>()) {
QualType type = atomicTy->getValueType();
if (!type->isBooleanType() && type->isIntegerType() &&
- !(type->isUnsignedIntegerType() &&
- CGF.SanOpts->UnsignedIntegerOverflow) &&
- CGF.getLangOpts().getSignedOverflowBehavior() !=
- LangOptions::SOB_Trapping) {
+ !(type->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow)) &&
+ CGF.getLangOpts().getSignedOverflowBehavior() !=
+ LangOptions::SOB_Trapping) {
llvm::AtomicRMWInst::BinOp aop = llvm::AtomicRMWInst::BAD_BINOP;
switch (OpInfo.Opcode) {
// We don't have atomicrmw operands for *, %, /, <<, >>
@@ -2084,11 +2137,11 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
if (atomicPHI) {
llvm::BasicBlock *opBB = Builder.GetInsertBlock();
llvm::BasicBlock *contBB = CGF.createBasicBlock("atomic_cont", CGF.CurFn);
- llvm::Value *pair = Builder.CreateAtomicCmpXchg(
- LHSLV.getAddress(), atomicPHI, CGF.EmitToMemory(Result, LHSTy),
- llvm::SequentiallyConsistent, llvm::SequentiallyConsistent);
- llvm::Value *old = Builder.CreateExtractValue(pair, 0);
- llvm::Value *success = Builder.CreateExtractValue(pair, 1);
+ auto Pair = CGF.EmitAtomicCompareExchange(
+ LHSLV, RValue::get(atomicPHI),
+ RValue::get(CGF.EmitToMemory(Result, LHSTy)), E->getExprLoc());
+ llvm::Value *old = Pair.first.getScalarVal();
+ llvm::Value *success = Pair.second.getScalarVal();
atomicPHI->addIncoming(old, opBB);
Builder.CreateCondBr(success, contBB, opBB);
Builder.SetInsertPoint(contBB);
@@ -2131,12 +2184,14 @@ Value *ScalarExprEmitter::EmitCompoundAssign(const CompoundAssignOperator *E,
void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
const BinOpInfo &Ops, llvm::Value *Zero, bool isDiv) {
- llvm::Value *Cond = nullptr;
+ SmallVector<std::pair<llvm::Value *, SanitizerKind>, 2> Checks;
- if (CGF.SanOpts->IntegerDivideByZero)
- Cond = Builder.CreateICmpNE(Ops.RHS, Zero);
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
+ Checks.push_back(std::make_pair(Builder.CreateICmpNE(Ops.RHS, Zero),
+ SanitizerKind::IntegerDivideByZero));
+ }
- if (CGF.SanOpts->SignedIntegerOverflow &&
+ if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
Ops.Ty->hasSignedIntegerRepresentation()) {
llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
@@ -2146,26 +2201,29 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
llvm::Value *LHSCmp = Builder.CreateICmpNE(Ops.LHS, IntMin);
llvm::Value *RHSCmp = Builder.CreateICmpNE(Ops.RHS, NegOne);
- llvm::Value *Overflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
- Cond = Cond ? Builder.CreateAnd(Cond, Overflow, "and") : Overflow;
+ llvm::Value *NotOverflow = Builder.CreateOr(LHSCmp, RHSCmp, "or");
+ Checks.push_back(
+ std::make_pair(NotOverflow, SanitizerKind::SignedIntegerOverflow));
}
- if (Cond)
- EmitBinOpCheck(Cond, Ops);
+ if (Checks.size() > 0)
+ EmitBinOpCheck(Checks, Ops);
}
Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
{
CodeGenFunction::SanitizerScope SanScope(&CGF);
- if ((CGF.SanOpts->IntegerDivideByZero ||
- CGF.SanOpts->SignedIntegerOverflow) &&
+ if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
+ CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
Ops.Ty->isIntegerType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, true);
- } else if (CGF.SanOpts->FloatDivideByZero &&
+ } else if (CGF.SanOpts.has(SanitizerKind::FloatDivideByZero) &&
Ops.Ty->isRealFloatingType()) {
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
- EmitBinOpCheck(Builder.CreateFCmpUNE(Ops.RHS, Zero), Ops);
+ llvm::Value *NonZero = Builder.CreateFCmpUNE(Ops.RHS, Zero);
+ EmitBinOpCheck(std::make_pair(NonZero, SanitizerKind::FloatDivideByZero),
+ Ops);
}
}
@@ -2189,7 +2247,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (CGF.SanOpts->IntegerDivideByZero) {
+ if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
@@ -2248,9 +2306,12 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
if (handlerName->empty()) {
// If the signed-integer-overflow sanitizer is enabled, emit a call to its
// runtime. Otherwise, this is a -ftrapv check, so just emit a trap.
- if (!isSigned || CGF.SanOpts->SignedIntegerOverflow) {
+ if (!isSigned || CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- EmitBinOpCheck(Builder.CreateNot(overflow), Ops);
+ llvm::Value *NotOverflow = Builder.CreateNot(overflow);
+ SanitizerKind Kind = isSigned ? SanitizerKind::SignedIntegerOverflow
+ : SanitizerKind::UnsignedIntegerOverflow;
+ EmitBinOpCheck(std::make_pair(NotOverflow, Kind), Ops);
} else
CGF.EmitTrapCheck(Builder.CreateNot(overflow));
return result;
@@ -2336,7 +2397,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
if (isSubtraction)
index = CGF.Builder.CreateNeg(index, "idx.neg");
- if (CGF.SanOpts->ArrayBounds)
+ if (CGF.SanOpts.has(SanitizerKind::ArrayBounds))
CGF.EmitBoundsCheck(op.E, pointerOperand, index, indexOperand->getType(),
/*Accessed*/ false);
@@ -2476,7 +2537,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
case LangOptions::SOB_Defined:
return Builder.CreateAdd(op.LHS, op.RHS, "add");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -2484,7 +2545,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
}
- if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2506,7 +2568,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
case LangOptions::SOB_Defined:
return Builder.CreateSub(op.LHS, op.RHS, "sub");
case LangOptions::SOB_Undefined:
- if (!CGF.SanOpts->SignedIntegerOverflow)
+ if (!CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow))
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
// Fall through.
case LangOptions::SOB_Trapping:
@@ -2514,7 +2576,8 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
}
- if (op.Ty->isUnsignedIntegerType() && CGF.SanOpts->UnsignedIntegerOverflow)
+ if (op.Ty->isUnsignedIntegerType() &&
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2601,7 +2664,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL &&
+ if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS);
@@ -2638,7 +2701,7 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
Valid = P;
}
- EmitBinOpCheck(Valid, Ops);
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops);
}
// OpenCL 6.3j: shift values are effectively % word size of LHS.
if (CGF.getLangOpts().OpenCL)
@@ -2654,10 +2717,12 @@ Value *ScalarExprEmitter::EmitShr(const BinOpInfo &Ops) {
if (Ops.LHS->getType() != RHS->getType())
RHS = Builder.CreateIntCast(RHS, Ops.LHS->getType(), false, "sh_prom");
- if (CGF.SanOpts->Shift && !CGF.getLangOpts().OpenCL &&
+ if (CGF.SanOpts.has(SanitizerKind::Shift) && !CGF.getLangOpts().OpenCL &&
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
- EmitBinOpCheck(Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS)), Ops);
+ llvm::Value *Valid =
+ Builder.CreateICmpULE(RHS, GetWidthMinusOneValue(Ops.LHS, RHS));
+ EmitBinOpCheck(std::make_pair(Valid, SanitizerKind::Shift), Ops);
}
// OpenCL 6.3j: shift values are effectively % word size of LHS.
@@ -2708,6 +2773,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
TestAndClearIgnoreResultAssign();
Value *Result;
QualType LHSTy = E->getLHS()->getType();
+ QualType RHSTy = E->getRHS()->getType();
if (const MemberPointerType *MPT = LHSTy->getAs<MemberPointerType>()) {
assert(E->getOpcode() == BO_EQ ||
E->getOpcode() == BO_NE);
@@ -2715,7 +2781,7 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
Value *RHS = CGF.EmitScalarExpr(E->getRHS());
Result = CGF.CGM.getCXXABI().EmitMemberPointerComparison(
CGF, LHS, RHS, MPT, E->getOpcode() == BO_NE);
- } else if (!LHSTy->isAnyComplexType()) {
+ } else if (!LHSTy->isAnyComplexType() && !RHSTy->isAnyComplexType()) {
Value *LHS = Visit(E->getLHS());
Value *RHS = Visit(E->getRHS());
@@ -2803,10 +2869,28 @@ Value *ScalarExprEmitter::EmitCompare(const BinaryOperator *E,unsigned UICmpOpc,
} else {
// Complex Comparison: can only be an equality comparison.
- CodeGenFunction::ComplexPairTy LHS = CGF.EmitComplexExpr(E->getLHS());
- CodeGenFunction::ComplexPairTy RHS = CGF.EmitComplexExpr(E->getRHS());
-
- QualType CETy = LHSTy->getAs<ComplexType>()->getElementType();
+ CodeGenFunction::ComplexPairTy LHS, RHS;
+ QualType CETy;
+ if (auto *CTy = LHSTy->getAs<ComplexType>()) {
+ LHS = CGF.EmitComplexExpr(E->getLHS());
+ CETy = CTy->getElementType();
+ } else {
+ LHS.first = Visit(E->getLHS());
+ LHS.second = llvm::Constant::getNullValue(LHS.first->getType());
+ CETy = LHSTy;
+ }
+ if (auto *CTy = RHSTy->getAs<ComplexType>()) {
+ RHS = CGF.EmitComplexExpr(E->getRHS());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy,
+ CTy->getElementType()) &&
+ "The element types must always match.");
+ (void)CTy;
+ } else {
+ RHS.first = Visit(E->getRHS());
+ RHS.second = llvm::Constant::getNullValue(RHS.first->getType());
+ assert(CGF.getContext().hasSameUnqualifiedType(CETy, RHSTy) &&
+ "The element types must always match.");
+ }
Value *ResultR, *ResultI;
if (CETy->isRealFloatingType()) {
@@ -2959,7 +3043,7 @@ Value *ScalarExprEmitter::VisitBinLAnd(const BinaryOperator *E) {
// Emit an unconditional branch from this block to ContBlock.
{
// There is no need to emit line number for unconditional branch.
- SuppressDebugLocation S(Builder);
+ ApplyDebugLocation DL(CGF);
CGF.EmitBlock(ContBlock);
}
// Insert an entry into the phi node for the edge with the value of RHSCond.
@@ -3232,8 +3316,12 @@ Value *ScalarExprEmitter::VisitVAArgExpr(VAArgExpr *VE) {
llvm::Value *Val = Builder.CreateLoad(ArgPtr);
// If EmitVAArg promoted the type, we must truncate it.
- if (ArgTy != Val->getType())
- Val = Builder.CreateTrunc(Val, ArgTy);
+ if (ArgTy != Val->getType()) {
+ if (ArgTy->isPointerTy() && !Val->getType()->isPointerTy())
+ Val = Builder.CreateIntToPtr(Val, ArgTy);
+ else
+ Val = Builder.CreateTrunc(Val, ArgTy);
+ }
return Val;
}
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index a273f1d4dda8..89f43c281590 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -24,40 +24,39 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs) {
Attrs.VectorizerEnable == LoopAttributes::VecUnspecified)
return nullptr;
- SmallVector<Value *, 4> Args;
+ SmallVector<Metadata *, 4> Args;
// Reserve operand 0 for loop id self reference.
MDNode *TempNode = MDNode::getTemporary(Ctx, None);
Args.push_back(TempNode);
// Setting vectorizer.width
if (Attrs.VectorizerWidth > 0) {
- Value *Vals[] = { MDString::get(Ctx, "llvm.loop.vectorize.width"),
- ConstantInt::get(Type::getInt32Ty(Ctx),
- Attrs.VectorizerWidth) };
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.vectorize.width"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.VectorizerWidth))};
Args.push_back(MDNode::get(Ctx, Vals));
}
// Setting vectorizer.unroll
if (Attrs.VectorizerUnroll > 0) {
- Value *Vals[] = { MDString::get(Ctx, "llvm.loop.interleave.count"),
- ConstantInt::get(Type::getInt32Ty(Ctx),
- Attrs.VectorizerUnroll) };
+ Metadata *Vals[] = {MDString::get(Ctx, "llvm.loop.interleave.count"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt32Ty(Ctx), Attrs.VectorizerUnroll))};
Args.push_back(MDNode::get(Ctx, Vals));
}
// Setting vectorizer.enable
if (Attrs.VectorizerEnable != LoopAttributes::VecUnspecified) {
- Value *Vals[] = { MDString::get(Ctx, "llvm.loop.vectorize.enable"),
- ConstantInt::get(Type::getInt1Ty(Ctx),
- (Attrs.VectorizerEnable ==
- LoopAttributes::VecEnable)) };
+ Metadata *Vals[] = {
+ MDString::get(Ctx, "llvm.loop.vectorize.enable"),
+ ConstantAsMetadata::get(ConstantInt::get(
+ Type::getInt1Ty(Ctx),
+ (Attrs.VectorizerEnable == LoopAttributes::VecEnable)))};
Args.push_back(MDNode::get(Ctx, Vals));
}
- MDNode *LoopID = MDNode::get(Ctx, Args);
- assert(LoopID->use_empty() && "LoopID should not be used");
-
// Set the first operand to itself.
+ MDNode *LoopID = MDNode::get(Ctx, Args);
LoopID->replaceOperandWith(0, LoopID);
MDNode::deleteTemporary(TempNode);
return LoopID;
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index 2f6f172e047a..b1693996507e 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -12,8 +12,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_CGLOOPINFO_H
-#define CLANG_CODEGEN_CGLOOPINFO_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
+#define LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
@@ -133,4 +133,4 @@ private:
} // end namespace CodeGen
} // end namespace clang
-#endif // CLANG_CODEGEN_CGLOOPINFO_H
+#endif
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 8ca80808e007..34c6d94f8817 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -60,7 +60,6 @@ llvm::Value *CodeGenFunction::EmitObjCStringLiteral(const ObjCStringLiteral *E)
llvm::Value *
CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// Generate the correct selector for this literal's concrete type.
- const Expr *SubExpr = E->getSubExpr();
// Get the method.
const ObjCMethodDecl *BoxingMethod = E->getBoxingMethod();
assert(BoxingMethod && "BoxingMethod is null");
@@ -73,12 +72,9 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
CGObjCRuntime &Runtime = CGM.getObjCRuntime();
const ObjCInterfaceDecl *ClassDecl = BoxingMethod->getClassInterface();
llvm::Value *Receiver = Runtime.GetClass(*this, ClassDecl);
-
- const ParmVarDecl *argDecl = *BoxingMethod->param_begin();
- QualType ArgQT = argDecl->getType().getUnqualifiedType();
- RValue RV = EmitAnyExpr(SubExpr);
+
CallArgList Args;
- Args.add(RV, ArgQT);
+ EmitCallArgs(Args, BoxingMethod, E->arg_begin(), E->arg_end());
RValue result = Runtime.GenerateMessageSend(
*this, ReturnValueSlot(), BoxingMethod->getReturnType(), Sel, Receiver,
@@ -461,8 +457,8 @@ struct FinishARCDealloc : EHScopeStack::Cleanup {
/// the LLVM function and sets the other context used by
/// CodeGenFunction.
void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
- const ObjCContainerDecl *CD,
- SourceLocation StartLoc) {
+ const ObjCContainerDecl *CD) {
+ SourceLocation StartLoc = OMD->getLocStart();
FunctionArgList args;
// Check if we should generate debug info for this method.
if (OMD->hasAttr<NoDebugAttr>())
@@ -480,6 +476,7 @@ void CodeGenFunction::StartObjCMethod(const ObjCMethodDecl *OMD,
args.push_back(PI);
CurGD = OMD;
+ CurEHLocation = OMD->getLocEnd();
StartFunction(OMD, OMD->getReturnType(), Fn, FI, args,
OMD->getLocation(), StartLoc);
@@ -501,15 +498,13 @@ static llvm::Value *emitARCRetainLoadOfScalar(CodeGenFunction &CGF,
/// Generate an Objective-C method. An Objective-C method is a C function with
/// its pointer, name, and types registered in the class struture.
void CodeGenFunction::GenerateObjCMethod(const ObjCMethodDecl *OMD) {
- StartObjCMethod(OMD, OMD->getClassInterface(), OMD->getLocStart());
+ StartObjCMethod(OMD, OMD->getClassInterface());
PGO.assignRegionCounters(OMD, CurFn);
assert(isa<CompoundStmt>(OMD->getBody()));
RegionCounter Cnt = getPGORegionCounter(OMD->getBody());
Cnt.beginRegion(Builder);
EmitCompoundStmtWithoutScope(*cast<CompoundStmt>(OMD->getBody()));
FinishFunction(OMD->getBodyRBrace());
- PGO.emitInstrumentationData();
- PGO.destroyRegionCounters();
}
/// emitStructGetterCall - Call the runtime function to load a property
@@ -744,12 +739,12 @@ PropertyImplStrategy::PropertyImplStrategy(CodeGenModule &CGM,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
- llvm::Constant *AtomicHelperFn =
- GenerateObjCAtomicGetterCopyHelperFunction(PID);
+ llvm::Constant *AtomicHelperFn =
+ CodeGenFunction(CGM).GenerateObjCAtomicGetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface());
generateObjCGetterBody(IMP, PID, OMD, AtomicHelperFn);
@@ -1273,12 +1268,12 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
/// is illegal within a category.
void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyImplDecl *PID) {
- llvm::Constant *AtomicHelperFn =
- GenerateObjCAtomicSetterCopyHelperFunction(PID);
+ llvm::Constant *AtomicHelperFn =
+ CodeGenFunction(CGM).GenerateObjCAtomicSetterCopyHelperFunction(PID);
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
- StartObjCMethod(OMD, IMP->getClassInterface(), OMD->getLocStart());
+ StartObjCMethod(OMD, IMP->getClassInterface());
generateObjCSetterBody(IMP, PID, AtomicHelperFn);
@@ -1356,7 +1351,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
ObjCMethodDecl *MD,
bool ctor) {
MD->createImplicitParams(CGM.getContext(), IMP->getClassInterface());
- StartObjCMethod(MD, IMP->getClassInterface(), MD->getLocStart());
+ StartObjCMethod(MD, IMP->getClassInterface());
// Emit .cxx_construct.
if (ctor) {
@@ -1757,7 +1752,7 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
llvm::Constant *&fn = CGM.getARCEntrypoints().clang_arc_use;
if (!fn) {
llvm::FunctionType *fnType =
- llvm::FunctionType::get(CGM.VoidTy, ArrayRef<llvm::Type*>(), true);
+ llvm::FunctionType::get(CGM.VoidTy, None, true);
fn = CGM.CreateRuntimeFunction(fnType, "clang.arc.use");
}
@@ -1940,9 +1935,8 @@ llvm::Value *CodeGenFunction::EmitARCRetainBlock(llvm::Value *value,
= cast<llvm::CallInst>(result->stripPointerCasts());
assert(call->getCalledValue() == CGM.getARCEntrypoints().objc_retainBlock);
- SmallVector<llvm::Value*,1> args;
call->setMetadata("clang.arc.copy_on_escape",
- llvm::MDNode::get(Builder.getContext(), args));
+ llvm::MDNode::get(Builder.getContext(), None));
}
return result;
@@ -1984,8 +1978,8 @@ CodeGenFunction::EmitARCRetainAutoreleasedReturnValue(llvm::Value *value) {
"clang.arc.retainAutoreleasedReturnValueMarker");
assert(metadata->getNumOperands() <= 1);
if (metadata->getNumOperands() == 0) {
- llvm::Value *string = llvm::MDString::get(getLLVMContext(), assembly);
- metadata->addOperand(llvm::MDNode::get(getLLVMContext(), string));
+ metadata->addOperand(llvm::MDNode::get(
+ getLLVMContext(), llvm::MDString::get(getLLVMContext(), assembly)));
}
}
}
@@ -2018,9 +2012,8 @@ void CodeGenFunction::EmitARCRelease(llvm::Value *value,
llvm::CallInst *call = EmitNounwindRuntimeCall(fn, value);
if (precise == ARCImpreciseLifetime) {
- SmallVector<llvm::Value*,1> args;
call->setMetadata("clang.imprecise_release",
- llvm::MDNode::get(Builder.getContext(), args));
+ llvm::MDNode::get(Builder.getContext(), None));
}
}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 619a66ab4a69..c0dc3b8002d8 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -58,7 +58,7 @@ class LazyRuntimeFunction {
/// Initialises the lazy function with the name, return type, and the types
/// of the arguments.
- END_WITH_NULL
+ LLVM_END_WITH_NULL
void init(CodeGenModule *Mod, const char *name,
llvm::Type *RetTy, ...) {
CGM =Mod;
@@ -391,8 +391,8 @@ private:
///
/// This structure is used by both classes and categories, and contains a next
/// pointer allowing them to be chained together in a linked list.
- llvm::Constant *GenerateMethodList(const StringRef &ClassName,
- const StringRef &CategoryName,
+ llvm::Constant *GenerateMethodList(StringRef ClassName,
+ StringRef CategoryName,
ArrayRef<Selector> MethodSels,
ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList);
@@ -875,8 +875,8 @@ void CGObjCGNU::EmitClassRef(const std::string &className) {
llvm::GlobalValue::WeakAnyLinkage, ClassSymbol, symbolRef);
}
-static std::string SymbolNameForMethod(const StringRef &ClassName,
- const StringRef &CategoryName, const Selector MethodName,
+static std::string SymbolNameForMethod( StringRef ClassName,
+ StringRef CategoryName, const Selector MethodName,
bool isClassMethod) {
std::string MethodNameColonStripped = MethodName.getAsString();
std::replace(MethodNameColonStripped.begin(), MethodNameColonStripped.end(),
@@ -1296,11 +1296,11 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI);
imp = EnforceType(Builder, imp, MSI.MessengerType);
- llvm::Value *impMD[] = {
+ llvm::Metadata *impMD[] = {
llvm::MDString::get(VMContext, Sel.getAsString()),
llvm::MDString::get(VMContext, Class->getSuperClass()->getNameAsString()),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsClassMessage)
- };
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), IsClassMessage))};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
llvm::Instruction *call;
@@ -1371,12 +1371,11 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
cmd = EnforceType(Builder, cmd, SelectorTy);
Receiver = EnforceType(Builder, Receiver, IdTy);
- llvm::Value *impMD[] = {
- llvm::MDString::get(VMContext, Sel.getAsString()),
- llvm::MDString::get(VMContext, Class ? Class->getNameAsString() :""),
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext),
- Class!=nullptr)
- };
+ llvm::Metadata *impMD[] = {
+ llvm::MDString::get(VMContext, Sel.getAsString()),
+ llvm::MDString::get(VMContext, Class ? Class->getNameAsString() : ""),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt1Ty(VMContext), Class != nullptr))};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
CallArgList ActualArgs;
@@ -1463,8 +1462,8 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
/// Generates a MethodList. Used in construction of a objc_class and
/// objc_category structures.
llvm::Constant *CGObjCGNU::
-GenerateMethodList(const StringRef &ClassName,
- const StringRef &CategoryName,
+GenerateMethodList(StringRef ClassName,
+ StringRef CategoryName,
ArrayRef<Selector> MethodSels,
ArrayRef<llvm::Constant *> MethodTypes,
bool isClassMethodList) {
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 6f0979d06c53..f91e8e15b039 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -106,7 +106,7 @@ private:
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
llvm::Type *longDoubleType = llvm::Type::getX86_FP80Ty(VMContext);
llvm::Type *resultType =
- llvm::StructType::get(longDoubleType, longDoubleType, NULL);
+ llvm::StructType::get(longDoubleType, longDoubleType, nullptr);
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(resultType,
params, true),
@@ -244,9 +244,9 @@ public:
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(IdType, false, Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ IdType, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -264,10 +264,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
@@ -291,10 +290,9 @@ public:
Params.push_back(IdType);
Params.push_back(Ctx.getPointerDiffType()->getCanonicalTypeUnqualified());
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
const char *name;
if (atomic && copy)
name = "objc_setProperty_atomic_copy";
@@ -319,10 +317,9 @@ public:
Params.push_back(Ctx.BoolTy);
Params.push_back(Ctx.BoolTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_copyStruct");
}
@@ -339,7 +336,7 @@ public:
Params.push_back(Ctx.VoidPtrTy);
Params.push_back(Ctx.VoidPtrTy);
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false, false,
Params,
FunctionType::ExtInfo(),
RequiredArgs::All));
@@ -353,10 +350,9 @@ public:
SmallVector<CanQualType,1> Params;
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
llvm::FunctionType *FTy =
- Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(Ctx.VoidTy, false,
- Params,
- FunctionType::ExtInfo(),
- RequiredArgs::All));
+ Types.GetFunctionType(Types.arrangeLLVMFunctionInfo(
+ Ctx.VoidTy, false, false, Params, FunctionType::ExtInfo(),
+ RequiredArgs::All));
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
@@ -2446,11 +2442,11 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
printf("\n");
}
}
-
- llvm::GlobalVariable * Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap,false),
- "__TEXT,__objc_classname,cstring_literals", 1, true);
+
+ llvm::GlobalVariable *Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap, false),
+ "__TEXT,__objc_classname,cstring_literals", 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -2553,14 +2549,6 @@ llvm::Constant *CGObjCCommonMac::GetProtocolRef(const ObjCProtocolDecl *PD) {
return GetOrEmitProtocolRef(PD);
}
-static void assertPrivateName(const llvm::GlobalValue *GV) {
- StringRef NameRef = GV->getName();
- (void)NameRef;
- assert(NameRef[0] == '\01' && (NameRef[1] == 'L' || NameRef[1] == 'l'));
- assert(GV->getVisibility() == llvm::GlobalValue::DefaultVisibility);
- assert(GV->hasPrivateLinkage());
-}
-
/*
// Objective-C 1.0 extensions
struct _objc_protocol {
@@ -2624,19 +2612,17 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
OptMethodTypesExt.begin(), OptMethodTypesExt.end());
llvm::Constant *Values[] = {
- EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
- MethodTypesExt),
- GetClassName(PD->getObjCRuntimeNameAsString()),
- EmitProtocolList("\01L_OBJC_PROTOCOL_REFS_" + PD->getName(),
- PD->protocol_begin(),
- PD->protocol_end()),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods)
- };
+ EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
+ MethodTypesExt),
+ GetClassName(PD->getObjCRuntimeNameAsString()),
+ EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(), PD->protocol_end()),
+ EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods),
+ EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods)};
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
Values);
@@ -2645,18 +2631,15 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
assert(Entry->hasPrivateLinkage());
Entry->setInitializer(Init);
} else {
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ Init, "OBJC_PROTOCOL_" + PD->getName());
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
Protocols[PD->getIdentifier()] = Entry;
}
- assertPrivateName(Entry);
CGM.addCompilerUsedGlobal(Entry);
return Entry;
@@ -2669,16 +2652,13 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
// We use the initializer as a marker of whether this is a forward
// reference or not. At module finalization we add the empty
// contents for protocols which were referenced but never defined.
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy, false,
- llvm::GlobalValue::PrivateLinkage,
- nullptr,
- "\01L_OBJC_PROTOCOL_" + PD->getName());
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ nullptr, "OBJC_PROTOCOL_" + PD->getName());
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
// FIXME: Is this necessary? Why only for protocol?
Entry->setAlignment(4);
}
- assertPrivateName(Entry);
return Entry;
}
@@ -2700,19 +2680,17 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
uint64_t Size =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
llvm::Constant *Values[] = {
- llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_INSTANCE_METHODS_OPT_"
- + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- OptInstanceMethods),
- EmitMethodDescList("\01L_OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- OptClassMethods),
- EmitPropertyList("\01L_OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr,
- PD, ObjCTypes),
- EmitProtocolMethodTypes("\01L_OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
- MethodTypesExt, ObjCTypes)
- };
+ llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
+ EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ OptInstanceMethods),
+ EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ OptClassMethods),
+ EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
+ ObjCTypes),
+ EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ MethodTypesExt, ObjCTypes)};
// Return null if no extension bits are used.
if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
@@ -2776,7 +2754,7 @@ PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
for (const auto *P : Proto->protocols())
PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes);
for (const auto *PD : Proto->properties()) {
- if (!PropertySet.insert(PD->getIdentifier()))
+ if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
llvm::Constant *Prop[] = {
GetPropertyName(PD->getIdentifier()),
@@ -2941,19 +2919,16 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Values[0] = GetClassName(OCD->getName());
Values[1] = GetClassName(Interface->getObjCRuntimeNameAsString());
LazySymbols.insert(Interface->getIdentifier());
- Values[2] =
- EmitMethodList("\01L_OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods);
- Values[3] =
- EmitMethodList("\01L_OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods);
+ Values[2] = EmitMethodList("OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_inst_meth,regular,no_dead_strip",
+ InstanceMethods);
+ Values[3] = EmitMethodList("OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
+ "__OBJC,__cat_cls_meth,regular,no_dead_strip",
+ ClassMethods);
if (Category) {
Values[4] =
- EmitProtocolList("\01L_OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
- Category->protocol_begin(),
- Category->protocol_end());
+ EmitProtocolList("OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
+ Category->protocol_begin(), Category->protocol_end());
} else {
Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
}
@@ -2971,9 +2946,8 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
Values);
llvm::GlobalVariable *GV =
- CreateMetadataVar("\01L_OBJC_CATEGORY_" + ExtName.str(), Init,
- "__OBJC,__category,regular,no_dead_strip",
- 4, true);
+ CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
+ "__OBJC,__category,regular,no_dead_strip", 4, true);
DefinedCategories.push_back(GV);
DefinedCategoryNames.insert(ExtName.str());
// method definition entries must be clear for next implementation.
@@ -3040,9 +3014,9 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
ObjCInterfaceDecl *Interface =
const_cast<ObjCInterfaceDecl*>(ID->getClassInterface());
llvm::Constant *Protocols =
- EmitProtocolList("\01L_OBJC_CLASS_PROTOCOLS_" + ID->getName(),
- Interface->all_referenced_protocol_begin(),
- Interface->all_referenced_protocol_end());
+ EmitProtocolList("OBJC_CLASS_PROTOCOLS_" + ID->getName(),
+ Interface->all_referenced_protocol_begin(),
+ Interface->all_referenced_protocol_end());
unsigned Flags = FragileABI_Class_Factory;
if (ID->hasNonZeroConstructors() || ID->hasDestructors())
Flags |= FragileABI_Class_HasCXXStructors;
@@ -3093,10 +3067,9 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
Values[ 6] = EmitIvarList(ID, false);
- Values[ 7] =
- EmitMethodList("\01L_OBJC_INSTANCE_METHODS_" + ID->getName(),
- "__OBJC,__inst_meth,regular,no_dead_strip",
- InstanceMethods);
+ Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(),
+ "__OBJC,__inst_meth,regular,no_dead_strip",
+ InstanceMethods);
// cache is always NULL.
Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
Values[ 9] = Protocols;
@@ -3104,7 +3077,7 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
Values[11] = EmitClassExtension(ID);
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
Values);
- std::string Name("\01L_OBJC_CLASS_");
+ std::string Name("OBJC_CLASS_");
Name += ClassName;
const char *Section = "__OBJC,__class,regular,no_dead_strip";
// Check for a forward reference.
@@ -3118,7 +3091,6 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
CGM.addCompilerUsedGlobal(GV);
} else
GV = CreateMetadataVar(Name, Init, Section, 4, true);
- assertPrivateName(GV);
DefinedClasses.push_back(GV);
ImplementedClasses.push_back(Interface);
// method definition entries must be clear for next implementation.
@@ -3158,10 +3130,9 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
Values[ 6] = EmitIvarList(ID, true);
- Values[ 7] =
- EmitMethodList("\01L_OBJC_CLASS_METHODS_" + ID->getNameAsString(),
- "__OBJC,__cls_meth,regular,no_dead_strip",
- Methods);
+ Values[7] =
+ EmitMethodList("OBJC_CLASS_METHODS_" + ID->getNameAsString(),
+ "__OBJC,__cls_meth,regular,no_dead_strip", Methods);
// cache is always NULL.
Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
Values[ 9] = Protocols;
@@ -3172,7 +3143,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
Values);
- std::string Name("\01L_OBJC_METACLASS_");
+ std::string Name("OBJC_METACLASS_");
Name += ID->getName();
// Check for a forward reference.
@@ -3186,7 +3157,6 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::GlobalValue::PrivateLinkage,
Init, Name);
}
- assertPrivateName(GV);
GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
GV->setAlignment(4);
CGM.addCompilerUsedGlobal(GV);
@@ -3195,7 +3165,7 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
}
llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
- std::string Name = "\01L_OBJC_METACLASS_" + ID->getNameAsString();
+ std::string Name = "OBJC_METACLASS_" + ID->getNameAsString();
// FIXME: Should we look these up somewhere other than the module. Its a bit
// silly since we only generate these while processing an implementation, so
@@ -3213,12 +3183,11 @@ llvm::Constant *CGObjCMac::EmitMetaClassRef(const ObjCInterfaceDecl *ID) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
- assertPrivateName(GV);
return GV;
}
llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
- std::string Name = "\01L_OBJC_CLASS_" + ID->getNameAsString();
+ std::string Name = "OBJC_CLASS_" + ID->getNameAsString();
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (!GV)
@@ -3228,7 +3197,6 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward class metadata reference has incorrect type.");
- assertPrivateName(GV);
return GV;
}
@@ -3256,9 +3224,8 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID) {
llvm::Constant *Init =
llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
- return CreateMetadataVar("\01L_OBJC_CLASSEXT_" + ID->getName(),
- Init, "__OBJC,__class_ext,regular,no_dead_strip",
- 4, true);
+ return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
+ "__OBJC,__class_ext,regular,no_dead_strip", 4, true);
}
/*
@@ -3314,13 +3281,13 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
llvm::GlobalVariable *GV;
if (ForClass)
- GV = CreateMetadataVar("\01L_OBJC_CLASS_VARIABLES_" + ID->getName(),
- Init, "__OBJC,__class_vars,regular,no_dead_strip",
- 4, true);
+ GV =
+ CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
+ "__OBJC,__class_vars,regular,no_dead_strip", 4, true);
else
- GV = CreateMetadataVar("\01L_OBJC_INSTANCE_VARIABLES_" + ID->getName(),
- Init, "__OBJC,__instance_vars,regular,no_dead_strip",
- 4, true);
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
+ "__OBJC,__instance_vars,regular,no_dead_strip", 4,
+ true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
@@ -3401,7 +3368,6 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Ty, false,
llvm::GlobalValue::PrivateLinkage, Init, Name);
- assertPrivateName(GV);
if (!Section.empty())
GV->setSection(Section);
if (Align)
@@ -4308,11 +4274,10 @@ void CGObjCCommonMac::EmitImageInfo() {
eImageInfo_GCOnly);
// Require that GC be specified and set to eImageInfo_GarbageCollected.
- llvm::Value *Ops[2] = {
- llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
- llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext),
- eImageInfo_GarbageCollected)
- };
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(VMContext, "Objective-C Garbage Collection"),
+ llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(VMContext), eImageInfo_GarbageCollected))};
Mod.addModuleFlag(llvm::Module::Require, "Objective-C GC Only",
llvm::MDNode::get(VMContext, Ops));
}
@@ -4347,10 +4312,9 @@ void CGObjCMac::EmitModuleInfo() {
GetClassName(StringRef("")),
EmitModuleSymbols()
};
- CreateMetadataVar("\01L_OBJC_MODULES",
+ CreateMetadataVar("OBJC_MODULES",
llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
- "__OBJC,__module_info,regular,no_dead_strip",
- 4, true);
+ "__OBJC,__module_info,regular,no_dead_strip", 4, true);
}
llvm::Constant *CGObjCMac::EmitModuleSymbols() {
@@ -4393,10 +4357,8 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- llvm::GlobalVariable *GV =
- CreateMetadataVar("\01L_OBJC_SYMBOLS", Init,
- "__OBJC,__symbols,regular,no_dead_strip",
- 4, true);
+ llvm::GlobalVariable *GV = CreateMetadataVar(
+ "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip", 4, true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
@@ -4410,10 +4372,9 @@ llvm::Value *CGObjCMac::EmitClassRefFromId(CodeGenFunction &CGF,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetClassName(II->getName()),
ObjCTypes.ClassPtrTy);
- Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_REFERENCES_", Casted,
- "__OBJC,__cls_refs,literal_pointers,no_dead_strip",
- 4, true);
+ Entry = CreateMetadataVar(
+ "OBJC_CLASS_REFERENCES_", Casted,
+ "__OBJC,__cls_refs,literal_pointers,no_dead_strip", 4, true);
}
return CGF.Builder.CreateLoad(Entry);
@@ -4437,10 +4398,9 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
- Entry =
- CreateMetadataVar("\01L_OBJC_SELECTOR_REFERENCES_", Casted,
- "__OBJC,__message_refs,literal_pointers,no_dead_strip",
- 4, true);
+ Entry = CreateMetadataVar(
+ "OBJC_SELECTOR_REFERENCES_", Casted,
+ "__OBJC,__message_refs,literal_pointers,no_dead_strip", 4, true);
Entry->setExternallyInitialized(true);
}
@@ -4452,13 +4412,12 @@ llvm::Value *CGObjCMac::EmitSelector(CodeGenFunction &CGF, Selector Sel,
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
llvm::GlobalVariable *&Entry = ClassNames[RuntimeName];
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext,
- RuntimeName),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_classname,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, RuntimeName),
+ ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4772,14 +4731,13 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayoutBitmap(std::string &BitMap) {
// null terminate string.
unsigned char zero = 0;
BitMap += zero;
-
- llvm::GlobalVariable * Entry =
- CreateMetadataVar("\01L_OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap,false),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_classname,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+
+ llvm::GlobalVariable *Entry = CreateMetadataVar(
+ "OBJC_CLASS_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, BitMap, false),
+ ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4864,12 +4822,12 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
// FIXME: Avoid std::string in "Sel.getAsString()"
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_NAME_",
- llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_methname,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_NAME_",
+ llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4886,12 +4844,12 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_methtype,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4905,12 +4863,12 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
if (!Entry)
- Entry = CreateMetadataVar("\01L_OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ?
- "__TEXT,__objc_methtype,cstring_literals" :
- "__TEXT,__cstring,cstring_literals"),
- 1, true);
+ Entry = CreateMetadataVar(
+ "OBJC_METH_VAR_TYPE_",
+ llvm::ConstantDataArray::getString(VMContext, TypeStr),
+ ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals"),
+ 1, true);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4921,7 +4879,7 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
if (!Entry)
Entry = CreateMetadataVar(
- "\01L_OBJC_PROP_NAME_ATTR_",
+ "OBJC_PROP_NAME_ATTR_",
llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
"__TEXT,__cstring,cstring_literals", 1, true);
@@ -4967,7 +4925,6 @@ void CGObjCMac::FinishModule() {
Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
Values[3] = Values[4] =
llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
- assertPrivateName(I->second);
I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
Values));
CGM.addCompilerUsedGlobal(I->second);
@@ -5027,8 +4984,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// arm64 targets use "int" ivar offset variables. All others,
// including OS X x86_64 and Windows x86_64, use "long" ivar offsets.
- if (CGM.getTarget().getTriple().getArch() == llvm::Triple::arm64 ||
- CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64)
+ if (CGM.getTarget().getTriple().getArch() == llvm::Triple::aarch64)
IvarOffsetVarTy = IntTy;
else
IvarOffsetVarTy = LongTy;
@@ -5072,7 +5028,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// char *attributes;
// }
PropertyTy = llvm::StructType::create("struct._prop_t",
- Int8PtrTy, Int8PtrTy, NULL);
+ Int8PtrTy, Int8PtrTy, nullptr);
// struct _prop_list_t {
// uint32_t entsize; // sizeof(struct _prop_t)
@@ -5081,7 +5037,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// }
PropertyListTy =
llvm::StructType::create("struct._prop_list_t", IntTy, IntTy,
- llvm::ArrayType::get(PropertyTy, 0), NULL);
+ llvm::ArrayType::get(PropertyTy, 0), nullptr);
// struct _prop_list_t *
PropertyListPtrTy = llvm::PointerType::getUnqual(PropertyListTy);
@@ -5092,7 +5048,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// }
MethodTy = llvm::StructType::create("struct._objc_method",
SelectorPtrTy, Int8PtrTy, Int8PtrTy,
- NULL);
+ nullptr);
// struct _objc_cache *
CacheTy = llvm::StructType::create(VMContext, "struct._objc_cache");
@@ -5108,16 +5064,15 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// }
MethodDescriptionTy =
llvm::StructType::create("struct._objc_method_description",
- SelectorPtrTy, Int8PtrTy, NULL);
+ SelectorPtrTy, Int8PtrTy, nullptr);
// struct _objc_method_description_list {
// int count;
// struct _objc_method_description[1];
// }
- MethodDescriptionListTy =
- llvm::StructType::create("struct._objc_method_description_list",
- IntTy,
- llvm::ArrayType::get(MethodDescriptionTy, 0),NULL);
+ MethodDescriptionListTy = llvm::StructType::create(
+ "struct._objc_method_description_list", IntTy,
+ llvm::ArrayType::get(MethodDescriptionTy, 0), nullptr);
// struct _objc_method_description_list *
MethodDescriptionListPtrTy =
@@ -5136,7 +5091,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::StructType::create("struct._objc_protocol_extension",
IntTy, MethodDescriptionListPtrTy,
MethodDescriptionListPtrTy, PropertyListPtrTy,
- Int8PtrPtrTy, NULL);
+ Int8PtrPtrTy, nullptr);
// struct _objc_protocol_extension *
ProtocolExtensionPtrTy = llvm::PointerType::getUnqual(ProtocolExtensionTy);
@@ -5151,7 +5106,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
ProtocolListTy->setBody(llvm::PointerType::getUnqual(ProtocolListTy),
LongTy,
llvm::ArrayType::get(ProtocolTy, 0),
- NULL);
+ nullptr);
// struct _objc_protocol {
// struct _objc_protocol_extension *isa;
@@ -5164,7 +5119,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::PointerType::getUnqual(ProtocolListTy),
MethodDescriptionListPtrTy,
MethodDescriptionListPtrTy,
- NULL);
+ nullptr);
// struct _objc_protocol_list *
ProtocolListPtrTy = llvm::PointerType::getUnqual(ProtocolListTy);
@@ -5179,7 +5134,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// int ivar_offset;
// }
IvarTy = llvm::StructType::create("struct._objc_ivar",
- Int8PtrTy, Int8PtrTy, IntTy, NULL);
+ Int8PtrTy, Int8PtrTy, IntTy, nullptr);
// struct _objc_ivar_list *
IvarListTy =
@@ -5194,7 +5149,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// struct _objc_class_extension *
ClassExtensionTy =
llvm::StructType::create("struct._objc_class_extension",
- IntTy, Int8PtrTy, PropertyListPtrTy, NULL);
+ IntTy, Int8PtrTy, PropertyListPtrTy, nullptr);
ClassExtensionPtrTy = llvm::PointerType::getUnqual(ClassExtensionTy);
ClassTy = llvm::StructType::create(VMContext, "struct._objc_class");
@@ -5225,7 +5180,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
ProtocolListPtrTy,
Int8PtrTy,
ClassExtensionPtrTy,
- NULL);
+ nullptr);
ClassPtrTy = llvm::PointerType::getUnqual(ClassTy);
@@ -5241,7 +5196,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
llvm::StructType::create("struct._objc_category",
Int8PtrTy, Int8PtrTy, MethodListPtrTy,
MethodListPtrTy, ProtocolListPtrTy,
- IntTy, PropertyListPtrTy, NULL);
+ IntTy, PropertyListPtrTy, nullptr);
// Global metadata structures
@@ -5255,7 +5210,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
SymtabTy =
llvm::StructType::create("struct._objc_symtab",
LongTy, SelectorPtrTy, ShortTy, ShortTy,
- llvm::ArrayType::get(Int8PtrTy, 0), NULL);
+ llvm::ArrayType::get(Int8PtrTy, 0), nullptr);
SymtabPtrTy = llvm::PointerType::getUnqual(SymtabTy);
// struct _objc_module {
@@ -5266,7 +5221,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
// }
ModuleTy =
llvm::StructType::create("struct._objc_module",
- LongTy, LongTy, Int8PtrTy, SymtabPtrTy, NULL);
+ LongTy, LongTy, Int8PtrTy, SymtabPtrTy, nullptr);
// FIXME: This is the size of the setjmp buffer and should be target
@@ -5279,7 +5234,7 @@ ObjCTypesHelper::ObjCTypesHelper(CodeGen::CodeGenModule &cgm)
ExceptionDataTy =
llvm::StructType::create("struct._objc_exception_data",
llvm::ArrayType::get(CGM.Int32Ty,SetJmpBufferSize),
- StackPtrTy, NULL);
+ StackPtrTy, nullptr);
}
@@ -5292,7 +5247,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
MethodListnfABITy =
llvm::StructType::create("struct.__method_list_t", IntTy, IntTy,
- llvm::ArrayType::get(MethodTy, 0), NULL);
+ llvm::ArrayType::get(MethodTy, 0), nullptr);
// struct method_list_t *
MethodListnfABIPtrTy = llvm::PointerType::getUnqual(MethodListnfABITy);
@@ -5320,7 +5275,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
MethodListnfABIPtrTy, MethodListnfABIPtrTy,
PropertyListPtrTy, IntTy, IntTy, Int8PtrPtrTy,
- NULL);
+ nullptr);
// struct _protocol_t*
ProtocolnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolnfABITy);
@@ -5331,7 +5286,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
ProtocolListnfABITy->setBody(LongTy,
llvm::ArrayType::get(ProtocolnfABIPtrTy, 0),
- NULL);
+ nullptr);
// struct _objc_protocol_list*
ProtocolListnfABIPtrTy = llvm::PointerType::getUnqual(ProtocolListnfABITy);
@@ -5345,7 +5300,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
IvarnfABITy = llvm::StructType::create(
"struct._ivar_t", llvm::PointerType::getUnqual(IvarOffsetVarTy),
- Int8PtrTy, Int8PtrTy, IntTy, IntTy, NULL);
+ Int8PtrTy, Int8PtrTy, IntTy, IntTy, nullptr);
// struct _ivar_list_t {
// uint32 entsize; // sizeof(struct _ivar_t)
@@ -5354,7 +5309,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// }
IvarListnfABITy =
llvm::StructType::create("struct._ivar_list_t", IntTy, IntTy,
- llvm::ArrayType::get(IvarnfABITy, 0), NULL);
+ llvm::ArrayType::get(IvarnfABITy, 0), nullptr);
IvarListnfABIPtrTy = llvm::PointerType::getUnqual(IvarListnfABITy);
@@ -5378,7 +5333,8 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
Int8PtrTy, MethodListnfABIPtrTy,
ProtocolListnfABIPtrTy,
IvarListnfABIPtrTy,
- Int8PtrTy, PropertyListPtrTy, NULL);
+ Int8PtrTy, PropertyListPtrTy,
+ nullptr);
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
llvm::Type *params[] = { ObjectPtrTy, SelectorPtrTy };
@@ -5399,7 +5355,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
CachePtrTy,
llvm::PointerType::getUnqual(ImpnfABITy),
llvm::PointerType::getUnqual(ClassRonfABITy),
- NULL);
+ nullptr);
// LLVM for struct _class_t *
ClassnfABIPtrTy = llvm::PointerType::getUnqual(ClassnfABITy);
@@ -5418,7 +5374,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
MethodListnfABIPtrTy,
ProtocolListnfABIPtrTy,
PropertyListPtrTy,
- NULL);
+ nullptr);
// New types for nonfragile abi messaging.
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -5457,7 +5413,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// };
SuperMessageRefTy =
llvm::StructType::create("struct._super_message_ref_t",
- ImpnfABITy, SelectorPtrTy, NULL);
+ ImpnfABITy, SelectorPtrTy, nullptr);
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
SuperMessageRefPtrTy = llvm::PointerType::getUnqual(SuperMessageRefTy);
@@ -5471,7 +5427,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
EHTypeTy =
llvm::StructType::create("struct._objc_typeinfo",
llvm::PointerType::getUnqual(Int8PtrTy),
- Int8PtrTy, ClassnfABIPtrTy, NULL);
+ Int8PtrTy, ClassnfABIPtrTy, nullptr);
EHTypePtrTy = llvm::PointerType::getUnqual(EHTypeTy);
}
@@ -5504,7 +5460,6 @@ AddModuleClassList(ArrayRef<llvm::GlobalValue*> Container,
llvm::GlobalValue::PrivateLinkage,
Init,
SymbolName);
- assertPrivateName(GV);
GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(SectionName);
CGM.addCompilerUsedGlobal(GV);
@@ -5526,22 +5481,18 @@ void CGObjCNonFragileABIMac::FinishNonFragileABIModule() {
DefinedMetaClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
}
}
-
- AddModuleClassList(DefinedClasses,
- "\01L_OBJC_LABEL_CLASS_$",
+
+ AddModuleClassList(DefinedClasses, "OBJC_LABEL_CLASS_$",
"__DATA, __objc_classlist, regular, no_dead_strip");
- AddModuleClassList(DefinedNonLazyClasses,
- "\01L_OBJC_LABEL_NONLAZY_CLASS_$",
+ AddModuleClassList(DefinedNonLazyClasses, "OBJC_LABEL_NONLAZY_CLASS_$",
"__DATA, __objc_nlclslist, regular, no_dead_strip");
// Build list of all implemented category addresses in array
// L_OBJC_LABEL_CATEGORY_$.
- AddModuleClassList(DefinedCategories,
- "\01L_OBJC_LABEL_CATEGORY_$",
+ AddModuleClassList(DefinedCategories, "OBJC_LABEL_CATEGORY_$",
"__DATA, __objc_catlist, regular, no_dead_strip");
- AddModuleClassList(DefinedNonLazyCategories,
- "\01L_OBJC_LABEL_NONLAZY_CATEGORY_$",
+ AddModuleClassList(DefinedNonLazyCategories, "OBJC_LABEL_NONLAZY_CATEGORY_$",
"__DATA, __objc_nlcatlist, regular, no_dead_strip");
EmitImageInfo();
@@ -5701,7 +5652,6 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
(flags & NonFragileABI_Class_Meta) ?
std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
- assertPrivateName(CLASS_RO_GV);
CLASS_RO_GV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
CLASS_RO_GV->setSection("__DATA, __objc_const");
@@ -6038,7 +5988,6 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
llvm::GlobalValue::PrivateLinkage,
Init,
ExtCatName.str());
- assertPrivateName(GCATV);
GCATV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
GCATV->setSection("__DATA, __objc_const");
@@ -6099,7 +6048,6 @@ CGObjCNonFragileABIMac::EmitMethodList(Twine Name,
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::PrivateLinkage, Init, Name);
- assertPrivateName(GV);
GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection(Section);
CGM.addCompilerUsedGlobal(GV);
@@ -6218,7 +6166,6 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
llvm::GlobalValue::PrivateLinkage,
Init,
Prefix + OID->getObjCRuntimeNameAsString());
- assertPrivateName(GV);
GV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(Init->getType()));
GV->setSection("__DATA, __objc_const");
@@ -6237,7 +6184,7 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
// contents for protocols which were referenced but never defined.
Entry =
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
- false, llvm::GlobalValue::WeakAnyLinkage,
+ false, llvm::GlobalValue::ExternalLinkage,
nullptr,
"\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
Entry->setSection("__DATA,__datacoal_nt,coalesced");
@@ -6348,8 +6295,8 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
Values);
if (Entry) {
- // Already created, update the initializer.
- assert(Entry->hasWeakAnyLinkage());
+ // Already created, fix the linkage and update the initializer.
+ Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
Entry->setInitializer(Init);
} else {
Entry =
@@ -6424,7 +6371,6 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
llvm::GlobalValue::PrivateLinkage,
Init, Name);
- assertPrivateName(GV);
GV->setSection("__DATA, __objc_const");
GV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(Init->getType()));
@@ -6482,7 +6428,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
if (IsIvarOffsetKnownIdempotent(CGF, Ivar))
cast<llvm::LoadInst>(IvarOffsetValue)
->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext, ArrayRef<llvm::Value *>()));
+ llvm::MDNode::get(VMContext, None));
// This could be 32bit int or 64bit integer depending on the architecture.
// Cast it to 64bit integer value, if it is a 32bit integer ivar offset value
@@ -6673,18 +6619,15 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
getClassSymbolPrefix() +
(ID ? ID->getObjCRuntimeNameAsString() : II->getName()).str());
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak);
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_REFERENCES_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
Entry->setAlignment(
CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_classrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
return CGF.Builder.CreateLoad(Entry);
}
@@ -6709,18 +6652,15 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
ClassName += ID->getObjCRuntimeNameAsString();
llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(),
ID->isWeakImported());
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
- false, llvm::GlobalValue::PrivateLinkage,
- ClassGV,
- "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
CGM.getDataLayout().getABITypeAlignment(
ObjCTypes.ClassnfABIPtrTy));
Entry->setSection("__DATA, __objc_superrefs, regular, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
return CGF.Builder.CreateLoad(Entry);
}
@@ -6736,11 +6676,10 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
MetaClassName += ID->getObjCRuntimeNameAsString();
llvm::GlobalVariable *MetaClassGV =
GetClassGlobal(MetaClassName.str(), Weak);
-
+
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
- MetaClassGV,
- "\01L_OBJC_CLASSLIST_SUP_REFS_$_");
+ MetaClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
Entry->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABIPtrTy));
@@ -6748,7 +6687,6 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
return CGF.Builder.CreateLoad(Entry);
}
@@ -6795,8 +6733,7 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
// If this is a class message the metaclass is passed as the target.
llvm::Value *Target;
if (IsClassMessage)
- Target = EmitMetaClassRef(CGF, Class,
- (isCategoryImpl && Class->isWeakImported()));
+ Target = EmitMetaClassRef(CGF, Class, Class->isWeakImported());
else
Target = EmitSuperClassRef(CGF, Class);
@@ -6826,23 +6763,20 @@ llvm::Value *CGObjCNonFragileABIMac::EmitSelector(CodeGenFunction &CGF,
llvm::Constant *Casted =
llvm::ConstantExpr::getBitCast(GetMethodVarName(Sel),
ObjCTypes.SelectorPtrTy);
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy, false,
- llvm::GlobalValue::PrivateLinkage,
- Casted, "\01L_OBJC_SELECTOR_REFERENCES_");
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.SelectorPtrTy,
+ false, llvm::GlobalValue::PrivateLinkage,
+ Casted, "OBJC_SELECTOR_REFERENCES_");
Entry->setExternallyInitialized(true);
Entry->setSection("__DATA, __objc_selrefs, literal_pointers, no_dead_strip");
CGM.addCompilerUsedGlobal(Entry);
}
- assertPrivateName(Entry);
if (lval)
return Entry;
llvm::LoadInst* LI = CGF.Builder.CreateLoad(Entry);
LI->setMetadata(CGM.getModule().getMDKindID("invariant.load"),
- llvm::MDNode::get(VMContext,
- ArrayRef<llvm::Value*>()));
+ llvm::MDNode::get(VMContext, None));
return LI;
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index fc6bee3fabed..475254649866 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
-#define CLANG_CODEGEN_OBCJRUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOBJCRUNTIME_H
#include "CGBuilder.h"
#include "CGCall.h"
#include "CGValue.h"
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index 7b675c3bc1e7..0c50b92914b8 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -13,8 +13,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_OPENCLRUNTIME_H
-#define CLANG_CODEGEN_OPENCLRUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENCLRUNTIME_H
#include "clang/AST/Type.h"
#include "llvm/IR/Type.h"
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 12a3a7790eec..22ee00f2c7ae 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -14,7 +14,9 @@
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
@@ -24,16 +26,81 @@
using namespace clang;
using namespace CodeGen;
+namespace {
+/// \brief API for captured statement code generation in OpenMP constructs.
+class CGOpenMPRegionInfo : public CodeGenFunction::CGCapturedStmtInfo {
+public:
+ CGOpenMPRegionInfo(const OMPExecutableDirective &D, const CapturedStmt &CS,
+ const VarDecl *ThreadIDVar)
+ : CGCapturedStmtInfo(CS, CR_OpenMP), ThreadIDVar(ThreadIDVar),
+ Directive(D) {
+ assert(ThreadIDVar != nullptr && "No ThreadID in OpenMP region.");
+ }
+
+ /// \brief Gets a variable or parameter for storing global thread id
+ /// inside OpenMP construct.
+ const VarDecl *getThreadIDVariable() const { return ThreadIDVar; }
+
+ /// \brief Gets an LValue for the current ThreadID variable.
+ LValue getThreadIDVariableLValue(CodeGenFunction &CGF);
+
+ static bool classof(const CGCapturedStmtInfo *Info) {
+ return Info->getKind() == CR_OpenMP;
+ }
+
+ /// \brief Emit the captured statement body.
+ void EmitBody(CodeGenFunction &CGF, Stmt *S) override;
+
+ /// \brief Get the name of the capture helper.
+ StringRef getHelperName() const override { return ".omp_outlined."; }
+
+private:
+ /// \brief A variable or parameter storing global thread id for OpenMP
+ /// constructs.
+ const VarDecl *ThreadIDVar;
+ /// \brief OpenMP executable directive associated with the region.
+ const OMPExecutableDirective &Directive;
+};
+} // namespace
+
+LValue CGOpenMPRegionInfo::getThreadIDVariableLValue(CodeGenFunction &CGF) {
+ return CGF.MakeNaturalAlignAddrLValue(
+ CGF.GetAddrOfLocalVar(ThreadIDVar),
+ CGF.getContext().getPointerType(ThreadIDVar->getType()));
+}
+
+void CGOpenMPRegionInfo::EmitBody(CodeGenFunction &CGF, Stmt *S) {
+ CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
+ CGF.EmitOMPPrivateClause(Directive, PrivateScope);
+ CGF.EmitOMPFirstprivateClause(Directive, PrivateScope);
+ if (PrivateScope.Privatize())
+ // Emit implicit barrier to synchronize threads and avoid data races.
+ CGF.CGM.getOpenMPRuntime().EmitOMPBarrierCall(CGF, Directive.getLocStart(),
+ /*IsExplicit=*/false);
+ CGCapturedStmtInfo::EmitBody(CGF, S);
+}
+
CGOpenMPRuntime::CGOpenMPRuntime(CodeGenModule &CGM)
: CGM(CGM), DefaultOpenMPPSource(nullptr) {
IdentTy = llvm::StructType::create(
"ident_t", CGM.Int32Ty /* reserved_1 */, CGM.Int32Ty /* flags */,
CGM.Int32Ty /* reserved_2 */, CGM.Int32Ty /* reserved_3 */,
- CGM.Int8PtrTy /* psource */, NULL);
+ CGM.Int8PtrTy /* psource */, nullptr);
// Build void (*kmpc_micro)(kmp_int32 *global_tid, kmp_int32 *bound_tid,...)
llvm::Type *MicroParams[] = {llvm::PointerType::getUnqual(CGM.Int32Ty),
llvm::PointerType::getUnqual(CGM.Int32Ty)};
Kmpc_MicroTy = llvm::FunctionType::get(CGM.VoidTy, MicroParams, true);
+ KmpCriticalNameTy = llvm::ArrayType::get(CGM.Int32Ty, /*NumElements*/ 8);
+}
+
+llvm::Value *
+CGOpenMPRuntime::EmitOpenMPOutlinedFunction(const OMPExecutableDirective &D,
+ const VarDecl *ThreadIDVar) {
+ const CapturedStmt *CS = cast<CapturedStmt>(D.getAssociatedStmt());
+ CodeGenFunction CGF(CGM, true);
+ CGOpenMPRegionInfo CGInfo(D, *CS, ThreadIDVar);
+ CGF.CapturedStmtInfo = &CGInfo;
+ return CGF.GenerateCapturedStmtFunction(*CS);
}
llvm::Value *
@@ -50,11 +117,10 @@ CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) {
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- llvm::GlobalVariable *DefaultOpenMPLocation = cast<llvm::GlobalVariable>(
- CGM.CreateRuntimeVariable(IdentTy, ".kmpc_default_loc.addr"));
+ auto DefaultOpenMPLocation = new llvm::GlobalVariable(
+ CGM.getModule(), IdentTy, /*isConstant*/ true,
+ llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
DefaultOpenMPLocation->setUnnamedAddr(true);
- DefaultOpenMPLocation->setConstant(true);
- DefaultOpenMPLocation->setLinkage(llvm::GlobalValue::PrivateLinkage);
llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
llvm::Constant *Values[] = {Zero,
@@ -62,6 +128,7 @@ CGOpenMPRuntime::GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags) {
Zero, Zero, DefaultOpenMPPSource};
llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
DefaultOpenMPLocation->setInitializer(Init);
+ OpenMPDefaultLocMap[Flags] = DefaultOpenMPLocation;
return DefaultOpenMPLocation;
}
return Entry;
@@ -77,14 +144,17 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation(
assert(CGF.CurFn && "No function in current CodeGenFunction.");
llvm::Value *LocValue = nullptr;
- OpenMPLocMapTy::iterator I = OpenMPLocMap.find(CGF.CurFn);
- if (I != OpenMPLocMap.end()) {
- LocValue = I->second;
- } else {
+ auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
+ if (I != OpenMPLocThreadIDMap.end())
+ LocValue = I->second.DebugLoc;
+ // OpenMPLocThreadIDMap may have null DebugLoc and non-null ThreadID, if
+ // GetOpenMPThreadID was called before this routine.
+ if (LocValue == nullptr) {
// Generate "ident_t .kmpc_loc.addr;"
llvm::AllocaInst *AI = CGF.CreateTempAlloca(IdentTy, ".kmpc_loc.addr");
AI->setAlignment(CGM.getDataLayout().getPrefTypeAlignment(IdentTy));
- OpenMPLocMap[CGF.CurFn] = AI;
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.DebugLoc = AI;
LocValue = AI;
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
@@ -95,7 +165,7 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation(
}
// char **psource = &.kmpc_loc_<flags>.addr.psource;
- llvm::Value *PSource =
+ auto *PSource =
CGF.Builder.CreateConstInBoundsGEP2_32(LocValue, 0, IdentField_PSource);
auto OMPDebugLoc = OpenMPDebugLocMap.lookup(Loc.getRawEncoding());
@@ -119,32 +189,54 @@ llvm::Value *CGOpenMPRuntime::EmitOpenMPUpdateLocation(
return LocValue;
}
-llvm::Value *CGOpenMPRuntime::GetOpenMPGlobalThreadNum(CodeGenFunction &CGF,
- SourceLocation Loc) {
+llvm::Value *CGOpenMPRuntime::GetOpenMPThreadID(CodeGenFunction &CGF,
+ SourceLocation Loc) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- llvm::Value *GTid = nullptr;
- OpenMPGtidMapTy::iterator I = OpenMPGtidMap.find(CGF.CurFn);
- if (I != OpenMPGtidMap.end()) {
- GTid = I->second;
+ llvm::Value *ThreadID = nullptr;
+ // Check whether we've already cached a load of the thread id in this
+ // function.
+ auto I = OpenMPLocThreadIDMap.find(CGF.CurFn);
+ if (I != OpenMPLocThreadIDMap.end()) {
+ ThreadID = I->second.ThreadID;
+ if (ThreadID != nullptr)
+ return ThreadID;
+ }
+ if (auto OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo)) {
+ // Check if this an outlined function with thread id passed as argument.
+ auto ThreadIDVar = OMPRegionInfo->getThreadIDVariable();
+ auto LVal = OMPRegionInfo->getThreadIDVariableLValue(CGF);
+ auto RVal = CGF.EmitLoadOfLValue(LVal, Loc);
+ LVal = CGF.MakeNaturalAlignAddrLValue(RVal.getScalarVal(),
+ ThreadIDVar->getType());
+ ThreadID = CGF.EmitLoadOfLValue(LVal, Loc).getScalarVal();
+ // If value loaded in entry block, cache it and use it everywhere in
+ // function.
+ if (CGF.Builder.GetInsertBlock() == CGF.AllocaInsertPt->getParent()) {
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.ThreadID = ThreadID;
+ }
} else {
- // Generate "int32 .kmpc_global_thread_num.addr;"
+ // This is not an outlined function region - need to call __kmpc_int32
+ // kmpc_global_thread_num(ident_t *loc).
+ // Generate thread id value and cache this value for use across the
+ // function.
CGBuilderTy::InsertPointGuard IPG(CGF.Builder);
CGF.Builder.SetInsertPoint(CGF.AllocaInsertPt);
llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc)};
- GTid = CGF.EmitRuntimeCall(
+ ThreadID = CGF.EmitRuntimeCall(
CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num), Args);
- OpenMPGtidMap[CGF.CurFn] = GTid;
+ auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
+ Elem.second.ThreadID = ThreadID;
}
- return GTid;
+ return ThreadID;
}
void CGOpenMPRuntime::FunctionFinished(CodeGenFunction &CGF) {
assert(CGF.CurFn && "No function in current CodeGenFunction.");
- if (OpenMPGtidMap.count(CGF.CurFn))
- OpenMPGtidMap.erase(CGF.CurFn);
- if (OpenMPLocMap.count(CGF.CurFn))
- OpenMPLocMap.erase(CGF.CurFn);
+ if (OpenMPLocThreadIDMap.count(CGF.CurFn))
+ OpenMPLocThreadIDMap.erase(CGF.CurFn);
}
llvm::Type *CGOpenMPRuntime::getIdentTyPointerTy() {
@@ -165,7 +257,7 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) {
llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
getKmpc_MicroPointerTy()};
llvm::FunctionType *FnTy =
- llvm::FunctionType::get(CGM.VoidTy, TypeParams, true);
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_fork_call");
break;
}
@@ -173,10 +265,658 @@ CGOpenMPRuntime::CreateRuntimeFunction(OpenMPRTLFunction Function) {
// Build kmp_int32 __kmpc_global_thread_num(ident_t *loc);
llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
llvm::FunctionType *FnTy =
- llvm::FunctionType::get(CGM.Int32Ty, TypeParams, false);
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_global_thread_num");
break;
}
+ case OMPRTL__kmpc_threadprivate_cached: {
+ // Build void *__kmpc_threadprivate_cached(ident_t *loc,
+ // kmp_int32 global_tid, void *data, size_t size, void ***cache);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.VoidPtrTy, CGM.SizeTy,
+ CGM.VoidPtrTy->getPointerTo()->getPointerTo()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_cached");
+ break;
+ }
+ case OMPRTL__kmpc_critical: {
+ // Build void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_critical");
+ break;
+ }
+ case OMPRTL__kmpc_threadprivate_register: {
+ // Build void __kmpc_threadprivate_register(ident_t *, void *data,
+ // kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+ // typedef void *(*kmpc_ctor)(void *);
+ auto KmpcCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg*/ false)->getPointerTo();
+ // typedef void *(*kmpc_cctor)(void *, void *);
+ llvm::Type *KmpcCopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto KmpcCopyCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, KmpcCopyCtorTyArgs,
+ /*isVarArg*/ false)->getPointerTo();
+ // typedef void (*kmpc_dtor)(void *);
+ auto KmpcDtorTy =
+ llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy, /*isVarArg*/ false)
+ ->getPointerTo();
+ llvm::Type *FnTyArgs[] = {getIdentTyPointerTy(), CGM.VoidPtrTy, KmpcCtorTy,
+ KmpcCopyCtorTy, KmpcDtorTy};
+ auto FnTy = llvm::FunctionType::get(CGM.VoidTy, FnTyArgs,
+ /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_threadprivate_register");
+ break;
+ }
+ case OMPRTL__kmpc_end_critical: {
+ // Build void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), CGM.Int32Ty,
+ llvm::PointerType::getUnqual(KmpCriticalNameTy)};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_critical");
+ break;
+ }
+ case OMPRTL__kmpc_cancel_barrier: {
+ // Build kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name*/ "__kmpc_cancel_barrier");
+ break;
+ }
+ // Build __kmpc_for_static_init*(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ case OMPRTL__kmpc_for_static_init_4: {
+ auto ITy = CGM.Int32Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_4");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_init_4u: {
+ auto ITy = CGM.Int32Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_4u");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_init_8: {
+ auto ITy = CGM.Int64Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_8");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_init_8u: {
+ auto ITy = CGM.Int64Ty;
+ auto PtrTy = llvm::PointerType::getUnqual(ITy);
+ llvm::Type *TypeParams[] = {
+ getIdentTyPointerTy(), // loc
+ CGM.Int32Ty, // tid
+ CGM.Int32Ty, // schedtype
+ llvm::PointerType::getUnqual(CGM.Int32Ty), // p_lastiter
+ PtrTy, // p_lower
+ PtrTy, // p_upper
+ PtrTy, // p_stride
+ ITy, // incr
+ ITy // chunk
+ };
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_init_8u");
+ break;
+ }
+ case OMPRTL__kmpc_for_static_fini: {
+ // Build void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_for_static_fini");
+ break;
+ }
+ case OMPRTL__kmpc_push_num_threads: {
+ // Build void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_threads)
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty,
+ CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_push_num_threads");
+ break;
+ }
+ case OMPRTL__kmpc_serialized_parallel: {
+ // Build void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_serialized_parallel");
+ break;
+ }
+ case OMPRTL__kmpc_end_serialized_parallel: {
+ // Build void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_end_serialized_parallel");
+ break;
+ }
+ case OMPRTL__kmpc_flush: {
+ // Build void __kmpc_flush(ident_t *loc, ...);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy()};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg*/ true);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, "__kmpc_flush");
+ break;
+ }
+ case OMPRTL__kmpc_master: {
+ // Build kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.Int32Ty, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_master");
+ break;
+ }
+ case OMPRTL__kmpc_end_master: {
+ // Build void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid);
+ llvm::Type *TypeParams[] = {getIdentTyPointerTy(), CGM.Int32Ty};
+ llvm::FunctionType *FnTy =
+ llvm::FunctionType::get(CGM.VoidTy, TypeParams, /*isVarArg=*/false);
+ RTLFn = CGM.CreateRuntimeFunction(FnTy, /*Name=*/"__kmpc_end_master");
+ break;
+ }
}
return RTLFn;
}
+
+llvm::Constant *
+CGOpenMPRuntime::getOrCreateThreadPrivateCache(const VarDecl *VD) {
+ // Lookup the entry, lazily creating it if necessary.
+ return GetOrCreateInternalVariable(CGM.Int8PtrPtrTy,
+ Twine(CGM.getMangledName(VD)) + ".cache.");
+}
+
+llvm::Value *CGOpenMPRuntime::getOMPAddrOfThreadPrivate(CodeGenFunction &CGF,
+ const VarDecl *VD,
+ llvm::Value *VDAddr,
+ SourceLocation Loc) {
+ auto VarTy = VDAddr->getType()->getPointerElementType();
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ GetOpenMPThreadID(CGF, Loc),
+ CGF.Builder.CreatePointerCast(VDAddr, CGM.Int8PtrTy),
+ CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
+ getOrCreateThreadPrivateCache(VD)};
+ return CGF.EmitRuntimeCall(
+ CreateRuntimeFunction(OMPRTL__kmpc_threadprivate_cached), Args);
+}
+
+void CGOpenMPRuntime::EmitOMPThreadPrivateVarInit(
+ CodeGenFunction &CGF, llvm::Value *VDAddr, llvm::Value *Ctor,
+ llvm::Value *CopyCtor, llvm::Value *Dtor, SourceLocation Loc) {
+ // Call kmp_int32 __kmpc_global_thread_num(&loc) to init OpenMP runtime
+ // library.
+ auto OMPLoc = EmitOpenMPUpdateLocation(CGF, Loc);
+ CGF.EmitRuntimeCall(CreateRuntimeFunction(OMPRTL__kmpc_global_thread_num),
+ OMPLoc);
+ // Call __kmpc_threadprivate_register(&loc, &var, ctor, cctor/*NULL*/, dtor)
+ // to register constructor/destructor for variable.
+ llvm::Value *Args[] = {OMPLoc,
+ CGF.Builder.CreatePointerCast(VDAddr, CGM.VoidPtrTy),
+ Ctor, CopyCtor, Dtor};
+ CGF.EmitRuntimeCall(
+ CreateRuntimeFunction(OMPRTL__kmpc_threadprivate_register), Args);
+}
+
+llvm::Function *CGOpenMPRuntime::EmitOMPThreadPrivateVarDefinition(
+ const VarDecl *VD, llvm::Value *VDAddr, SourceLocation Loc,
+ bool PerformInit, CodeGenFunction *CGF) {
+ VD = VD->getDefinition(CGM.getContext());
+ if (VD && ThreadPrivateWithDefinition.count(VD) == 0) {
+ ThreadPrivateWithDefinition.insert(VD);
+ QualType ASTTy = VD->getType();
+
+ llvm::Value *Ctor = nullptr, *CopyCtor = nullptr, *Dtor = nullptr;
+ auto Init = VD->getAnyInitializer();
+ if (CGM.getLangOpts().CPlusPlus && PerformInit) {
+ // Generate function that re-emits the declaration's initializer into the
+ // threadprivate copy of the variable VD
+ CodeGenFunction CtorCGF(CGM);
+ FunctionArgList Args;
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy);
+ Args.push_back(&Dst);
+
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ CGM.getContext().VoidPtrTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, ".__kmpc_global_ctor_.", Loc);
+ CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidPtrTy, Fn, FI,
+ Args, SourceLocation());
+ auto ArgVal = CtorCGF.EmitLoadOfScalar(
+ CtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ auto Arg = CtorCGF.Builder.CreatePointerCast(
+ ArgVal,
+ CtorCGF.ConvertTypeForMem(CGM.getContext().getPointerType(ASTTy)));
+ CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
+ /*IsInitializer=*/true);
+ ArgVal = CtorCGF.EmitLoadOfScalar(
+ CtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ CtorCGF.Builder.CreateStore(ArgVal, CtorCGF.ReturnValue);
+ CtorCGF.FinishFunction();
+ Ctor = Fn;
+ }
+ if (VD->getType().isDestructedType() != QualType::DK_none) {
+ // Generate function that emits destructor call for the threadprivate copy
+ // of the variable VD
+ CodeGenFunction DtorCGF(CGM);
+ FunctionArgList Args;
+ ImplicitParamDecl Dst(CGM.getContext(), /*DC=*/nullptr, SourceLocation(),
+ /*Id=*/nullptr, CGM.getContext().VoidPtrTy);
+ Args.push_back(&Dst);
+
+ auto &FI = CGM.getTypes().arrangeFreeFunctionDeclaration(
+ CGM.getContext().VoidTy, Args, FunctionType::ExtInfo(),
+ /*isVariadic=*/false);
+ auto FTy = CGM.getTypes().GetFunctionType(FI);
+ auto Fn = CGM.CreateGlobalInitOrDestructFunction(
+ FTy, ".__kmpc_global_dtor_.", Loc);
+ DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI, Args,
+ SourceLocation());
+ auto ArgVal = DtorCGF.EmitLoadOfScalar(
+ DtorCGF.GetAddrOfLocalVar(&Dst),
+ /*Volatile=*/false, CGM.PointerAlignInBytes,
+ CGM.getContext().VoidPtrTy, Dst.getLocation());
+ DtorCGF.emitDestroy(ArgVal, ASTTy,
+ DtorCGF.getDestroyer(ASTTy.isDestructedType()),
+ DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
+ DtorCGF.FinishFunction();
+ Dtor = Fn;
+ }
+ // Do not emit init function if it is not required.
+ if (!Ctor && !Dtor)
+ return nullptr;
+
+ llvm::Type *CopyCtorTyArgs[] = {CGM.VoidPtrTy, CGM.VoidPtrTy};
+ auto CopyCtorTy =
+ llvm::FunctionType::get(CGM.VoidPtrTy, CopyCtorTyArgs,
+ /*isVarArg=*/false)->getPointerTo();
+ // Copying constructor for the threadprivate variable.
+ // Must be NULL - reserved by runtime, but currently it requires that this
+ // parameter is always NULL. Otherwise it fires assertion.
+ CopyCtor = llvm::Constant::getNullValue(CopyCtorTy);
+ if (Ctor == nullptr) {
+ auto CtorTy = llvm::FunctionType::get(CGM.VoidPtrTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)->getPointerTo();
+ Ctor = llvm::Constant::getNullValue(CtorTy);
+ }
+ if (Dtor == nullptr) {
+ auto DtorTy = llvm::FunctionType::get(CGM.VoidTy, CGM.VoidPtrTy,
+ /*isVarArg=*/false)->getPointerTo();
+ Dtor = llvm::Constant::getNullValue(DtorTy);
+ }
+ if (!CGF) {
+ auto InitFunctionTy =
+ llvm::FunctionType::get(CGM.VoidTy, /*isVarArg*/ false);
+ auto InitFunction = CGM.CreateGlobalInitOrDestructFunction(
+ InitFunctionTy, ".__omp_threadprivate_init_.");
+ CodeGenFunction InitCGF(CGM);
+ FunctionArgList ArgList;
+ InitCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, InitFunction,
+ CGM.getTypes().arrangeNullaryFunction(), ArgList,
+ Loc);
+ EmitOMPThreadPrivateVarInit(InitCGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
+ InitCGF.FinishFunction();
+ return InitFunction;
+ }
+ EmitOMPThreadPrivateVarInit(*CGF, VDAddr, Ctor, CopyCtor, Dtor, Loc);
+ }
+ return nullptr;
+}
+
+void CGOpenMPRuntime::EmitOMPParallelCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct) {
+ // Build call __kmpc_fork_call(loc, 1, microtask, captured_struct/*context*/)
+ llvm::Value *Args[] = {
+ EmitOpenMPUpdateLocation(CGF, Loc),
+ CGF.Builder.getInt32(1), // Number of arguments after 'microtask' argument
+ // (there is only one additional argument - 'context')
+ CGF.Builder.CreateBitCast(OutlinedFn, getKmpc_MicroPointerTy()),
+ CGF.EmitCastToVoidPtr(CapturedStruct)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_fork_call);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPSerialCall(CodeGenFunction &CGF,
+ SourceLocation Loc,
+ llvm::Value *OutlinedFn,
+ llvm::Value *CapturedStruct) {
+ auto ThreadID = GetOpenMPThreadID(CGF, Loc);
+ // Build calls:
+ // __kmpc_serialized_parallel(&Loc, GTid);
+ llvm::Value *SerArgs[] = {EmitOpenMPUpdateLocation(CGF, Loc), ThreadID};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_serialized_parallel);
+ CGF.EmitRuntimeCall(RTLFn, SerArgs);
+
+ // OutlinedFn(&GTid, &zero, CapturedStruct);
+ auto ThreadIDAddr = EmitThreadIDAddress(CGF, Loc);
+ auto Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
+ auto ZeroAddr = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".zero.addr");
+ CGF.InitTempAlloca(ZeroAddr, CGF.Builder.getInt32(/*C*/ 0));
+ llvm::Value *OutlinedFnArgs[] = {ThreadIDAddr, ZeroAddr, CapturedStruct};
+ CGF.EmitCallOrInvoke(OutlinedFn, OutlinedFnArgs);
+
+ // __kmpc_end_serialized_parallel(&Loc, GTid);
+ llvm::Value *EndSerArgs[] = {EmitOpenMPUpdateLocation(CGF, Loc), ThreadID};
+ RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_serialized_parallel);
+ CGF.EmitRuntimeCall(RTLFn, EndSerArgs);
+}
+
+// If we're inside an (outlined) parallel region, use the region info's
+// thread-ID variable (it is passed in a first argument of the outlined function
+// as "kmp_int32 *gtid"). Otherwise, if we're not inside parallel region, but in
+// regular serial code region, get thread ID by calling kmp_int32
+// kmpc_global_thread_num(ident_t *loc), stash this thread ID in a temporary and
+// return the address of that temp.
+llvm::Value *CGOpenMPRuntime::EmitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc) {
+ if (auto OMPRegionInfo =
+ dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
+ return CGF.EmitLoadOfLValue(OMPRegionInfo->getThreadIDVariableLValue(CGF),
+ SourceLocation()).getScalarVal();
+ auto ThreadID = GetOpenMPThreadID(CGF, Loc);
+ auto Int32Ty =
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth*/ 32, /*Signed*/ true);
+ auto ThreadIDTemp = CGF.CreateMemTemp(Int32Ty, /*Name*/ ".threadid_temp.");
+ CGF.EmitStoreOfScalar(ThreadID,
+ CGF.MakeNaturalAlignAddrLValue(ThreadIDTemp, Int32Ty));
+
+ return ThreadIDTemp;
+}
+
+llvm::Constant *
+CGOpenMPRuntime::GetOrCreateInternalVariable(llvm::Type *Ty,
+ const llvm::Twine &Name) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ Out << Name;
+ auto RuntimeName = Out.str();
+ auto &Elem = *InternalVars.insert(std::make_pair(RuntimeName, nullptr)).first;
+ if (Elem.second) {
+ assert(Elem.second->getType()->getPointerElementType() == Ty &&
+ "OMP internal variable has different type than requested");
+ return &*Elem.second;
+ }
+
+ return Elem.second = new llvm::GlobalVariable(
+ CGM.getModule(), Ty, /*IsConstant*/ false,
+ llvm::GlobalValue::CommonLinkage, llvm::Constant::getNullValue(Ty),
+ Elem.first());
+}
+
+llvm::Value *CGOpenMPRuntime::GetCriticalRegionLock(StringRef CriticalName) {
+ llvm::Twine Name(".gomp_critical_user_", CriticalName);
+ return GetOrCreateInternalVariable(KmpCriticalNameTy, Name.concat(".var"));
+}
+
+void CGOpenMPRuntime::EmitOMPCriticalRegion(
+ CodeGenFunction &CGF, StringRef CriticalName,
+ const std::function<void()> &CriticalOpGen, SourceLocation Loc) {
+ auto RegionLock = GetCriticalRegionLock(CriticalName);
+ // __kmpc_critical(ident_t *, gtid, Lock);
+ // CriticalOpGen();
+ // __kmpc_end_critical(ident_t *, gtid, Lock);
+ // Prepare arguments and build a call to __kmpc_critical
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ GetOpenMPThreadID(CGF, Loc), RegionLock};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_critical);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+ CriticalOpGen();
+ // Build a call to __kmpc_end_critical
+ RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_critical);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+static void EmitOMPIfStmt(CodeGenFunction &CGF, llvm::Value *IfCond,
+ const std::function<void()> &BodyOpGen) {
+ llvm::Value *CallBool = CGF.EmitScalarConversion(
+ IfCond,
+ CGF.getContext().getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true),
+ CGF.getContext().BoolTy);
+
+ auto *ThenBlock = CGF.createBasicBlock("omp_if.then");
+ auto *ContBlock = CGF.createBasicBlock("omp_if.end");
+ // Generate the branch (If-stmt)
+ CGF.Builder.CreateCondBr(CallBool, ThenBlock, ContBlock);
+ CGF.EmitBlock(ThenBlock);
+ BodyOpGen();
+ // Emit the rest of bblocks/branches
+ CGF.EmitBranch(ContBlock);
+ CGF.EmitBlock(ContBlock, true);
+}
+
+void CGOpenMPRuntime::EmitOMPMasterRegion(
+ CodeGenFunction &CGF, const std::function<void()> &MasterOpGen,
+ SourceLocation Loc) {
+ // if(__kmpc_master(ident_t *, gtid)) {
+ // MasterOpGen();
+ // __kmpc_end_master(ident_t *, gtid);
+ // }
+ // Prepare arguments and build a call to __kmpc_master
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ GetOpenMPThreadID(CGF, Loc)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_master);
+ auto *IsMaster = CGF.EmitRuntimeCall(RTLFn, Args);
+ EmitOMPIfStmt(CGF, IsMaster, [&]() -> void {
+ MasterOpGen();
+ // Build a call to __kmpc_end_master.
+ // OpenMP [1.2.2 OpenMP Language Terminology]
+ // For C/C++, an executable statement, possibly compound, with a single
+ // entry at the top and a single exit at the bottom, or an OpenMP construct.
+ // * Access to the structured block must not be the result of a branch.
+ // * The point of exit cannot be a branch out of the structured block.
+ // * The point of entry must not be a call to setjmp().
+ // * longjmp() and throw() must not violate the entry/exit criteria.
+ // * An expression statement, iteration statement, selection statement, or
+ // try block is considered to be a structured block if the corresponding
+ // compound statement obtained by enclosing it in { and } would be a
+ // structured block.
+ // It is analyzed in Sema, so we can just call __kmpc_end_master() on
+ // fallthrough rather than pushing a normal cleanup for it.
+ RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_end_master);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+ });
+}
+
+void CGOpenMPRuntime::EmitOMPBarrierCall(CodeGenFunction &CGF,
+ SourceLocation Loc, bool IsExplicit) {
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ auto Flags = static_cast<OpenMPLocationFlags>(
+ OMP_IDENT_KMPC |
+ (IsExplicit ? OMP_IDENT_BARRIER_EXPL : OMP_IDENT_BARRIER_IMPL));
+ // Build call __kmpc_cancel_barrier(loc, thread_id);
+ // Replace __kmpc_barrier() function by __kmpc_cancel_barrier() because this
+ // one provides the same functionality and adds initial support for
+ // cancellation constructs introduced in OpenMP 4.0. __kmpc_cancel_barrier()
+ // is provided default by the runtime library so it safe to make such
+ // replacement.
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc, Flags),
+ GetOpenMPThreadID(CGF, Loc)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_cancel_barrier);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+/// \brief Schedule types for 'omp for' loops (these enumerators are taken from
+/// the enum sched_type in kmp.h).
+enum OpenMPSchedType {
+ /// \brief Lower bound for default (unordered) versions.
+ OMP_sch_lower = 32,
+ OMP_sch_static_chunked = 33,
+ OMP_sch_static = 34,
+ OMP_sch_dynamic_chunked = 35,
+ OMP_sch_guided_chunked = 36,
+ OMP_sch_runtime = 37,
+ OMP_sch_auto = 38,
+ /// \brief Lower bound for 'ordered' versions.
+ OMP_ord_lower = 64,
+ /// \brief Lower bound for 'nomerge' versions.
+ OMP_nm_lower = 160,
+};
+
+/// \brief Map the OpenMP loop schedule to the runtime enumeration.
+static OpenMPSchedType getRuntimeSchedule(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) {
+ switch (ScheduleKind) {
+ case OMPC_SCHEDULE_static:
+ return Chunked ? OMP_sch_static_chunked : OMP_sch_static;
+ case OMPC_SCHEDULE_dynamic:
+ return OMP_sch_dynamic_chunked;
+ case OMPC_SCHEDULE_guided:
+ return OMP_sch_guided_chunked;
+ case OMPC_SCHEDULE_auto:
+ return OMP_sch_auto;
+ case OMPC_SCHEDULE_runtime:
+ return OMP_sch_runtime;
+ case OMPC_SCHEDULE_unknown:
+ assert(!Chunked && "chunk was specified but schedule kind not known");
+ return OMP_sch_static;
+ }
+ llvm_unreachable("Unexpected runtime schedule");
+}
+
+bool CGOpenMPRuntime::isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
+ bool Chunked) const {
+ auto Schedule = getRuntimeSchedule(ScheduleKind, Chunked);
+ return Schedule == OMP_sch_static;
+}
+
+void CGOpenMPRuntime::EmitOMPForInit(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind,
+ unsigned IVSize, bool IVSigned,
+ llvm::Value *IL, llvm::Value *LB,
+ llvm::Value *UB, llvm::Value *ST,
+ llvm::Value *Chunk) {
+ OpenMPSchedType Schedule = getRuntimeSchedule(ScheduleKind, Chunk != nullptr);
+ // Call __kmpc_for_static_init(
+ // ident_t *loc, kmp_int32 tid, kmp_int32 schedtype,
+ // kmp_int32 *p_lastiter, kmp_int[32|64] *p_lower,
+ // kmp_int[32|64] *p_upper, kmp_int[32|64] *p_stride,
+ // kmp_int[32|64] incr, kmp_int[32|64] chunk);
+ // TODO: Implement dynamic schedule.
+
+ // If the Chunk was not specified in the clause - use default value 1.
+ if (Chunk == nullptr)
+ Chunk = CGF.Builder.getIntN(IVSize, /*C*/ 1);
+
+ llvm::Value *Args[] = {
+ EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ GetOpenMPThreadID(CGF, Loc),
+ CGF.Builder.getInt32(Schedule), // Schedule type
+ IL, // &isLastIter
+ LB, // &LB
+ UB, // &UB
+ ST, // &Stride
+ CGF.Builder.getIntN(IVSize, 1), // Incr
+ Chunk // Chunk
+ };
+ assert((IVSize == 32 || IVSize == 64) &&
+ "Index size is not compatible with the omp runtime");
+ auto F = IVSize == 32 ? (IVSigned ? OMPRTL__kmpc_for_static_init_4
+ : OMPRTL__kmpc_for_static_init_4u)
+ : (IVSigned ? OMPRTL__kmpc_for_static_init_8
+ : OMPRTL__kmpc_for_static_init_8u);
+ auto RTLFn = CreateRuntimeFunction(F);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPForFinish(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPScheduleClauseKind ScheduleKind) {
+ assert((ScheduleKind == OMPC_SCHEDULE_static ||
+ ScheduleKind == OMPC_SCHEDULE_unknown) &&
+ "Non-static schedule kinds are not yet implemented");
+ // Call __kmpc_for_static_fini(ident_t *loc, kmp_int32 tid);
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc, OMP_IDENT_KMPC),
+ GetOpenMPThreadID(CGF, Loc)};
+ auto RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_for_static_fini);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPNumThreadsClause(CodeGenFunction &CGF,
+ llvm::Value *NumThreads,
+ SourceLocation Loc) {
+ // Build call __kmpc_push_num_threads(&loc, global_tid, num_threads)
+ llvm::Value *Args[] = {
+ EmitOpenMPUpdateLocation(CGF, Loc), GetOpenMPThreadID(CGF, Loc),
+ CGF.Builder.CreateIntCast(NumThreads, CGF.Int32Ty, /*isSigned*/ true)};
+ llvm::Constant *RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_push_num_threads);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
+
+void CGOpenMPRuntime::EmitOMPFlush(CodeGenFunction &CGF, ArrayRef<const Expr *>,
+ SourceLocation Loc) {
+ // Build call void __kmpc_flush(ident_t *loc, ...)
+ // FIXME: List of variables is ignored by libiomp5 runtime, no need to
+ // generate it, just request full memory fence.
+ llvm::Value *Args[] = {EmitOpenMPUpdateLocation(CGF, Loc),
+ llvm::ConstantInt::get(CGM.Int32Ty, 0)};
+ auto *RTLFn = CreateRuntimeFunction(OMPRTL__kmpc_flush);
+ CGF.EmitRuntimeCall(RTLFn, Args);
+}
diff --git a/lib/CodeGen/CGOpenMPRuntime.h b/lib/CodeGen/CGOpenMPRuntime.h
index 862e8a148c56..6daf8179c14e 100644
--- a/lib/CodeGen/CGOpenMPRuntime.h
+++ b/lib/CodeGen/CGOpenMPRuntime.h
@@ -11,29 +11,31 @@
//
//===----------------------------------------------------------------------===//
-#ifndef CLANG_CODEGEN_OPENMPRUNTIME_H
-#define CLANG_CODEGEN_OPENMPRUNTIME_H
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
-#include "clang/AST/Type.h"
+#include "clang/Basic/OpenMPKinds.h"
+#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/IR/Type.h"
-#include "llvm/IR/Value.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/IR/ValueHandle.h"
namespace llvm {
-class AllocaInst;
-class CallInst;
-class GlobalVariable;
+class ArrayType;
class Constant;
class Function;
-class Module;
-class StructLayout;
class FunctionType;
+class GlobalVariable;
class StructType;
class Type;
class Value;
} // namespace llvm
namespace clang {
+class Expr;
+class OMPExecutableDirective;
+class VarDecl;
namespace CodeGen {
@@ -42,6 +44,52 @@ class CodeGenModule;
class CGOpenMPRuntime {
public:
+
+private:
+ enum OpenMPRTLFunction {
+ /// \brief Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc,
+ /// kmpc_micro microtask, ...);
+ OMPRTL__kmpc_fork_call,
+ /// \brief Call to void *__kmpc_threadprivate_cached(ident_t *loc,
+ /// kmp_int32 global_tid, void *data, size_t size, void ***cache);
+ OMPRTL__kmpc_threadprivate_cached,
+ /// \brief Call to void __kmpc_threadprivate_register( ident_t *,
+ /// void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor);
+ OMPRTL__kmpc_threadprivate_register,
+ // Call to __kmpc_int32 kmpc_global_thread_num(ident_t *loc);
+ OMPRTL__kmpc_global_thread_num,
+ // Call to void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ OMPRTL__kmpc_critical,
+ // Call to void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
+ // kmp_critical_name *crit);
+ OMPRTL__kmpc_end_critical,
+ // Call to kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_cancel_barrier,
+ // Calls for static scheduling 'omp for' loops.
+ OMPRTL__kmpc_for_static_init_4,
+ OMPRTL__kmpc_for_static_init_4u,
+ OMPRTL__kmpc_for_static_init_8,
+ OMPRTL__kmpc_for_static_init_8u,
+ OMPRTL__kmpc_for_static_fini,
+ // Call to void __kmpc_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_serialized_parallel,
+ // Call to void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32
+ // global_tid);
+ OMPRTL__kmpc_end_serialized_parallel,
+ // Call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
+ // kmp_int32 num_threads);
+ OMPRTL__kmpc_push_num_threads,
+ // Call to void __kmpc_flush(ident_t *loc, ...);
+ OMPRTL__kmpc_flush,
+ // Call to kmp_int32 __kmpc_master(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_master,
+ // Call to void __kmpc_end_master(ident_t *, kmp_int32 global_tid);
+ OMPRTL__kmpc_end_master,
+ };
+
/// \brief Values for bit flags used in the ident_t to describe the fields.
/// All enumeric elements are named and described in accordance with the code
/// from http://llvm.org/svn/llvm-project/openmp/trunk/runtime/src/kmp.h
@@ -63,20 +111,11 @@ public:
/// \brief Implicit barrier in 'single' directive.
OMP_IDENT_BARRIER_IMPL_SINGLE = 0x140
};
- enum OpenMPRTLFunction {
- // Call to void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro
- // microtask, ...);
- OMPRTL__kmpc_fork_call,
- // Call to kmp_int32 kmpc_global_thread_num(ident_t *loc);
- OMPRTL__kmpc_global_thread_num
- };
-
-private:
CodeGenModule &CGM;
/// \brief Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource;
- /// \brief Map of flags and corrsponding default locations.
+ /// \brief Map of flags and corresponding default locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDefaultLocMapTy;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
llvm::Value *GetOrCreateDefaultOpenMPLocation(OpenMPLocationFlags Flags);
@@ -121,55 +160,241 @@ private:
IdentField_PSource
};
llvm::StructType *IdentTy;
- /// \brief Map for Sourcelocation and OpenMP runtime library debug locations.
+ /// \brief Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// \brief The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy;
- /// \brief Map of local debug location and functions.
- typedef llvm::DenseMap<llvm::Function *, llvm::Value *> OpenMPLocMapTy;
- OpenMPLocMapTy OpenMPLocMap;
- /// \brief Map of local gtid and functions.
- typedef llvm::DenseMap<llvm::Function *, llvm::Value *> OpenMPGtidMapTy;
- OpenMPGtidMapTy OpenMPGtidMap;
+ /// \brief Stores debug location and ThreadID for the function.
+ struct DebugLocThreadIdTy {
+ llvm::Value *DebugLoc;
+ llvm::Value *ThreadID;
+ };
+ /// \brief Map of local debug location, ThreadId and functions.
+ typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
+ OpenMPLocThreadIDMapTy;
+ OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
+ /// \brief Type kmp_critical_name, originally defined as typedef kmp_int32
+ /// kmp_critical_name[8];
+ llvm::ArrayType *KmpCriticalNameTy;
+ /// \brief An ordered map of auto-generated variables to their unique names.
+ /// It stores variables with the following names: 1) ".gomp_critical_user_" +
+ /// <critical_section_name> + ".var" for "omp critical" directives; 2)
+ /// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
+ /// variables.
+ llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
+ InternalVars;
+
+ /// \brief Emits object of ident_t type with info for source location.
+ /// \param Flags Flags for OpenMP location.
+ ///
+ llvm::Value *
+ EmitOpenMPUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
+ OpenMPLocationFlags Flags = OMP_IDENT_KMPC);
+
+ /// \brief Returns pointer to ident_t type.
+ llvm::Type *getIdentTyPointerTy();
+
+ /// \brief Returns pointer to kmpc_micro type.
+ llvm::Type *getKmpc_MicroPointerTy();
+
+ /// \brief Returns specified OpenMP runtime function.
+ /// \param Function OpenMP runtime function.
+ /// \return Specified function.
+ llvm::Constant *CreateRuntimeFunction(OpenMPRTLFunction Function);
+
+ /// \brief If the specified mangled name is not in the module, create and
+ /// return threadprivate cache object. This object is a pointer's worth of
+ /// storage that's reserved for use by the OpenMP runtime.
+ /// \param VD Threadprivate variable.
+ /// \return Cache variable for the specified threadprivate.
+ llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
+
+ /// \brief Emits address of the word in a memory where current thread id is
+ /// stored.
+ virtual llvm::Value *EmitThreadIDAddress(CodeGenFunction &CGF,
+ SourceLocation Loc);
+
+ /// \brief Gets thread id value for the current thread.
+ ///
+ llv