aboutsummaryrefslogtreecommitdiffstats
path: root/lib/CodeGen
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:08 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-01-02 19:18:08 +0000
commitbab175ec4b075c8076ba14c762900392533f6ee4 (patch)
tree01f4f29419a2cb10abe13c1e63cd2a66068b0137 /lib/CodeGen
parent8b7a8012d223fac5d17d16a66bb39168a9a1dfc0 (diff)
downloadsrc-bab175ec4b075c8076ba14c762900392533f6ee4.tar.gz
src-bab175ec4b075c8076ba14c762900392533f6ee4.zip
Vendor import of clang trunk r290819:vendor/clang/clang-trunk-r290819
Notes
Notes: svn path=/vendor/clang/dist/; revision=311118 svn path=/vendor/clang/clang-trunk-r290819/; revision=311119; tag=vendor/clang/clang-trunk-r290819
Diffstat (limited to 'lib/CodeGen')
-rw-r--r--lib/CodeGen/ABIInfo.h2
-rw-r--r--lib/CodeGen/BackendUtil.cpp390
-rw-r--r--lib/CodeGen/CGAtomic.cpp6
-rw-r--r--lib/CodeGen/CGBlocks.cpp152
-rw-r--r--lib/CodeGen/CGBlocks.h17
-rw-r--r--lib/CodeGen/CGBuilder.h18
-rw-r--r--lib/CodeGen/CGBuiltin.cpp1093
-rw-r--r--lib/CodeGen/CGCUDABuiltin.cpp6
-rw-r--r--lib/CodeGen/CGCUDANV.cpp92
-rw-r--r--lib/CodeGen/CGCUDARuntime.cpp11
-rw-r--r--lib/CodeGen/CGCXX.cpp67
-rw-r--r--lib/CodeGen/CGCXXABI.cpp5
-rw-r--r--lib/CodeGen/CGCXXABI.h13
-rw-r--r--lib/CodeGen/CGCall.cpp352
-rw-r--r--lib/CodeGen/CGCall.h143
-rw-r--r--lib/CodeGen/CGClass.cpp246
-rw-r--r--lib/CodeGen/CGCleanup.cpp2
-rw-r--r--lib/CodeGen/CGCleanup.h3
-rw-r--r--lib/CodeGen/CGCoroutine.cpp116
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp593
-rw-r--r--lib/CodeGen/CGDebugInfo.h54
-rw-r--r--lib/CodeGen/CGDecl.cpp122
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp9
-rw-r--r--lib/CodeGen/CGException.cpp14
-rw-r--r--lib/CodeGen/CGExpr.cpp383
-rw-r--r--lib/CodeGen/CGExprAgg.cpp115
-rw-r--r--lib/CodeGen/CGExprCXX.cpp684
-rw-r--r--lib/CodeGen/CGExprComplex.cpp11
-rw-r--r--lib/CodeGen/CGExprConstant.cpp99
-rw-r--r--lib/CodeGen/CGExprScalar.cpp149
-rw-r--r--lib/CodeGen/CGLoopInfo.cpp31
-rw-r--r--lib/CodeGen/CGLoopInfo.h11
-rw-r--r--lib/CodeGen/CGObjC.cpp49
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp1059
-rw-r--r--lib/CodeGen/CGObjCMac.cpp2207
-rw-r--r--lib/CodeGen/CGObjCRuntime.cpp27
-rw-r--r--lib/CodeGen/CGObjCRuntime.h3
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp38
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.h14
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp613
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h50
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp5
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h7
-rw-r--r--lib/CodeGen/CGStmt.cpp64
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp256
-rw-r--r--lib/CodeGen/CGVTT.cpp16
-rw-r--r--lib/CodeGen/CGVTables.cpp328
-rw-r--r--lib/CodeGen/CGVTables.h33
-rw-r--r--lib/CodeGen/CMakeLists.txt7
-rw-r--r--lib/CodeGen/CodeGenAction.cpp133
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp143
-rw-r--r--lib/CodeGen/CodeGenFunction.h177
-rw-r--r--lib/CodeGen/CodeGenModule.cpp607
-rw-r--r--lib/CodeGen/CodeGenModule.h97
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp4
-rw-r--r--lib/CodeGen/CodeGenPGO.h2
-rw-r--r--lib/CodeGen/CodeGenTypeCache.h5
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp24
-rw-r--r--lib/CodeGen/CodeGenTypes.h10
-rw-r--r--lib/CodeGen/ConstantBuilder.h444
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp73
-rw-r--r--lib/CodeGen/CoverageMappingGen.h1
-rw-r--r--lib/CodeGen/EHScopeStack.h6
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp293
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp314
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp37
-rw-r--r--lib/CodeGen/SanitizerMetadata.cpp8
-rw-r--r--lib/CodeGen/SwiftCallingConv.cpp9
-rw-r--r--lib/CodeGen/TargetInfo.cpp488
-rw-r--r--lib/CodeGen/TargetInfo.h16
-rw-r--r--lib/CodeGen/VarBypassDetector.cpp168
-rw-r--r--lib/CodeGen/VarBypassDetector.h70
73 files changed, 8572 insertions, 4344 deletions
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 530a7ef560c5..ac31dfdaf3e4 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -142,6 +142,8 @@ namespace swiftcall {
llvm::Type *eltTy,
unsigned elts) const;
+ virtual bool isSwiftErrorInRegister() const = 0;
+
static bool classof(const ABIInfo *info) {
return info->supportsSwift();
}
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index 165b6dd55c9b..164e52d7de27 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -14,24 +14,29 @@
#include "clang/Frontend/CodeGenOptions.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/Utils.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/Triple.h"
#include "llvm/Analysis/TargetLibraryInfo.h"
#include "llvm/Analysis/TargetTransformInfo.h"
+#include "llvm/Bitcode/BitcodeReader.h"
+#include "llvm/Bitcode/BitcodeWriter.h"
#include "llvm/Bitcode/BitcodeWriterPass.h"
-#include "llvm/Bitcode/ReaderWriter.h"
#include "llvm/CodeGen/RegAllocRegistry.h"
#include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/IR/DataLayout.h"
-#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/IRPrintingPasses.h"
#include "llvm/IR/LegacyPassManager.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/IR/Verifier.h"
+#include "llvm/LTO/LTOBackend.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/Object/ModuleSummaryIndexObjectFile.h"
+#include "llvm/Passes/PassBuilder.h"
#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/TargetRegistry.h"
#include "llvm/Support/Timer.h"
@@ -39,7 +44,9 @@
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/Target/TargetSubtargetInfo.h"
+#include "llvm/Transforms/Coroutines.h"
#include "llvm/Transforms/IPO.h"
+#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/Instrumentation.h"
#include "llvm/Transforms/ObjCARC.h"
@@ -74,8 +81,7 @@ private:
/// Set LLVM command line options passed through -backend-option.
void setCommandLineOpts();
- void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM,
- ModuleSummaryIndex *ModuleSummary);
+ void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM);
/// Generates the TargetMachine.
/// Leaves TM unchanged if it is unable to create the target machine.
@@ -98,7 +104,7 @@ public:
const clang::TargetOptions &TOpts,
const LangOptions &LOpts, Module *M)
: Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts), LangOpts(LOpts),
- TheModule(M), CodeGenerationTime("Code Generation Time") {}
+ TheModule(M), CodeGenerationTime("codegen", "Code Generation Time") {}
~EmitAssemblyHelper() {
if (CodeGenOpts.DisableFree)
@@ -109,6 +115,9 @@ public:
void EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS);
+
+ void EmitAssemblyWithNewPassManager(BackendAction Action,
+ std::unique_ptr<raw_pwrite_stream> OS);
};
// We need this wrapper to access LangOpts and CGOpts from extension functions
@@ -147,17 +156,6 @@ static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
PM.add(createAddDiscriminatorsPass());
}
-static void addCleanupPassesForSampleProfiler(
- const PassManagerBuilder &Builder, legacy::PassManagerBase &PM) {
- // instcombine is needed before sample profile annotation because it converts
- // certain function calls to be inlinable. simplifycfg and sroa are needed
- // before instcombine for necessary preparation. E.g. load store is eliminated
- // properly so that instcombine will not introduce unecessary liverange.
- PM.add(createCFGSimplificationPass());
- PM.add(createSROAPass());
- PM.add(createInstructionCombiningPass());
-}
-
static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
legacy::PassManagerBase &PM) {
PM.add(createBoundsCheckingPass());
@@ -174,8 +172,11 @@ static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
Opts.IndirectCalls = CGOpts.SanitizeCoverageIndirectCalls;
Opts.TraceBB = CGOpts.SanitizeCoverageTraceBB;
Opts.TraceCmp = CGOpts.SanitizeCoverageTraceCmp;
+ Opts.TraceDiv = CGOpts.SanitizeCoverageTraceDiv;
+ Opts.TraceGep = CGOpts.SanitizeCoverageTraceGep;
Opts.Use8bitCounters = CGOpts.SanitizeCoverage8bitCounters;
Opts.TracePC = CGOpts.SanitizeCoverageTracePC;
+ Opts.TracePCGuard = CGOpts.SanitizeCoverageTracePCGuard;
PM.add(createSanitizerCoverageModulePass(Opts));
}
@@ -205,7 +206,9 @@ static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
const PassManagerBuilderWrapper &BuilderWrapper =
static_cast<const PassManagerBuilderWrapper&>(Builder);
const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createMemorySanitizerPass(CGOpts.SanitizeMemoryTrackOrigins));
+ int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
+ bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
+ PM.add(createMemorySanitizerPass(TrackOrigins, Recover));
// MemorySanitizer inserts complex instrumentation that mostly follows
// the logic of the original code, but operates on "shadow" values.
@@ -263,6 +266,9 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
case CodeGenOptions::Accelerate:
TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::Accelerate);
break;
+ case CodeGenOptions::SVML:
+ TLII->addVectorizableFunctionsFromVecLib(TargetLibraryInfoImpl::SVML);
+ break;
default:
break;
}
@@ -281,47 +287,33 @@ static void addSymbolRewriterPass(const CodeGenOptions &Opts,
}
void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
- legacy::FunctionPassManager &FPM,
- ModuleSummaryIndex *ModuleSummary) {
+ legacy::FunctionPassManager &FPM) {
+ // Handle disabling of all LLVM passes, where we want to preserve the
+ // internal module before any optimization.
if (CodeGenOpts.DisableLLVMPasses)
return;
- unsigned OptLevel = CodeGenOpts.OptimizationLevel;
- CodeGenOptions::InliningMethod Inlining = CodeGenOpts.getInlining();
-
- // Handle disabling of LLVM optimization, where we want to preserve the
- // internal module before any optimization.
- if (CodeGenOpts.DisableLLVMOpts) {
- OptLevel = 0;
- Inlining = CodeGenOpts.NoInlining;
- }
-
PassManagerBuilderWrapper PMBuilder(CodeGenOpts, LangOpts);
- // Figure out TargetLibraryInfo.
+ // Figure out TargetLibraryInfo. This needs to be added to MPM and FPM
+ // manually (and not via PMBuilder), since some passes (eg. InstrProfiling)
+ // are inserted before PMBuilder ones - they'd get the default-constructed
+ // TLI with an unknown target otherwise.
Triple TargetTriple(TheModule->getTargetTriple());
- PMBuilder.LibraryInfo = createTLII(TargetTriple, CodeGenOpts);
+ std::unique_ptr<TargetLibraryInfoImpl> TLII(
+ createTLII(TargetTriple, CodeGenOpts));
- switch (Inlining) {
- case CodeGenOptions::NoInlining:
- break;
- case CodeGenOptions::NormalInlining:
- case CodeGenOptions::OnlyHintInlining: {
- PMBuilder.Inliner =
- createFunctionInliningPass(OptLevel, CodeGenOpts.OptimizeSize);
- break;
- }
- case CodeGenOptions::OnlyAlwaysInlining:
- // Respect always_inline.
- if (OptLevel == 0)
- // Do not insert lifetime intrinsics at -O0.
- PMBuilder.Inliner = createAlwaysInlinerPass(false);
- else
- PMBuilder.Inliner = createAlwaysInlinerPass();
- break;
+ // At O0 and O1 we only run the always inliner which is more efficient. At
+ // higher optimization levels we run the normal inliner.
+ if (CodeGenOpts.OptimizationLevel <= 1) {
+ bool InsertLifetimeIntrinsics = CodeGenOpts.OptimizationLevel != 0;
+ PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
+ } else {
+ PMBuilder.Inliner = createFunctionInliningPass(
+ CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize);
}
- PMBuilder.OptLevel = OptLevel;
+ PMBuilder.OptLevel = CodeGenOpts.OptimizationLevel;
PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
PMBuilder.BBVectorize = CodeGenOpts.VectorizeBB;
PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
@@ -333,13 +325,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
- // If we are performing a ThinLTO importing compile, invoke the LTO
- // pipeline and pass down the in-memory module summary index.
- if (ModuleSummary) {
- PMBuilder.ModuleSummary = ModuleSummary;
- PMBuilder.populateThinLTOPassManager(MPM);
- return;
- }
+ MPM.add(new TargetLibraryInfoWrapperPass(*TLII));
// Add target-specific passes that need to run as early as possible.
if (TM)
@@ -413,6 +399,9 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
addDataFlowSanitizerPass);
}
+ if (LangOpts.CoroutinesTS)
+ addCoroutinePassesToExtensionPoints(PMBuilder);
+
if (LangOpts.Sanitize.hasOneOf(SanitizerKind::Efficiency)) {
PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
addEfficiencySanitizerPass);
@@ -421,6 +410,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
}
// Set up the per-function pass manager.
+ FPM.add(new TargetLibraryInfoWrapperPass(*TLII));
if (CodeGenOpts.VerifyModule)
FPM.add(createVerifierPass());
@@ -453,20 +443,17 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
MPM.add(createInstrProfilingLegacyPass(Options));
}
if (CodeGenOpts.hasProfileIRInstr()) {
+ PMBuilder.EnablePGOInstrGen = true;
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = "default.profraw";
+ PMBuilder.PGOInstrGen = "default_%m.profraw";
}
if (CodeGenOpts.hasProfileIRUse())
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
- if (!CodeGenOpts.SampleProfileFile.empty()) {
- MPM.add(createPruneEHPass());
- MPM.add(createSampleProfileLoaderPass(CodeGenOpts.SampleProfileFile));
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addCleanupPassesForSampleProfiler);
- }
+ if (!CodeGenOpts.SampleProfileFile.empty())
+ PMBuilder.PGOSampleUse = CodeGenOpts.SampleProfileFile;
PMBuilder.populateFunctionPassManager(FPM);
PMBuilder.populateModulePassManager(MPM);
@@ -517,15 +504,14 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
// Keep this synced with the equivalent code in tools/driver/cc1as_main.cpp.
llvm::Optional<llvm::Reloc::Model> RM;
- if (CodeGenOpts.RelocationModel == "static") {
- RM = llvm::Reloc::Static;
- } else if (CodeGenOpts.RelocationModel == "pic") {
- RM = llvm::Reloc::PIC_;
- } else {
- assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
- "Invalid PIC model!");
- RM = llvm::Reloc::DynamicNoPIC;
- }
+ RM = llvm::StringSwitch<llvm::Reloc::Model>(CodeGenOpts.RelocationModel)
+ .Case("static", llvm::Reloc::Static)
+ .Case("pic", llvm::Reloc::PIC_)
+ .Case("ropi", llvm::Reloc::ROPI)
+ .Case("rwpi", llvm::Reloc::RWPI)
+ .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
+ .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC);
+ assert(RM.hasValue() && "invalid PIC model!");
CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
switch (CodeGenOpts.OptimizationLevel) {
@@ -536,9 +522,6 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
llvm::TargetOptions Options;
- if (!TargetOpts.Reciprocals.empty())
- Options.Reciprocals = TargetRecip(TargetOpts.Reciprocals);
-
Options.ThreadModel =
llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
.Case("posix", llvm::ThreadModel::POSIX)
@@ -601,8 +584,11 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
Options.MCOptions.MCIncrementalLinkerCompatible =
CodeGenOpts.IncrementalLinkerCompatible;
+ Options.MCOptions.MCPIECopyRelocations =
+ CodeGenOpts.PIECopyRelocations;
Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
Options.MCOptions.ABIName = TargetOpts.ABI;
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
@@ -659,26 +645,6 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
if (TM)
TheModule->setDataLayout(TM->createDataLayout());
- // If we are performing a ThinLTO importing compile, load the function
- // index into memory and pass it into CreatePasses, which will add it
- // to the PassManagerBuilder and invoke LTO passes.
- std::unique_ptr<ModuleSummaryIndex> ModuleSummary;
- if (!CodeGenOpts.ThinLTOIndexFile.empty()) {
- ErrorOr<std::unique_ptr<ModuleSummaryIndex>> IndexOrErr =
- llvm::getModuleSummaryIndexForFile(
- CodeGenOpts.ThinLTOIndexFile, [&](const DiagnosticInfo &DI) {
- TheModule->getContext().diagnose(DI);
- });
- if (std::error_code EC = IndexOrErr.getError()) {
- std::string Error = EC.message();
- errs() << "Error loading index file '" << CodeGenOpts.ThinLTOIndexFile
- << "': " << Error << "\n";
- return;
- }
- ModuleSummary = std::move(IndexOrErr.get());
- assert(ModuleSummary && "Expected non-empty module summary index");
- }
-
legacy::PassManager PerModulePasses;
PerModulePasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
@@ -687,7 +653,7 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
PerFunctionPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
- CreatePasses(PerModulePasses, PerFunctionPasses, ModuleSummary.get());
+ CreatePasses(PerModulePasses, PerFunctionPasses);
legacy::PassManager CodeGenPasses;
CodeGenPasses.add(
@@ -740,15 +706,245 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
}
}
+static PassBuilder::OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
+ switch (Opts.OptimizationLevel) {
+ default:
+ llvm_unreachable("Invalid optimization level!");
+
+ case 1:
+ return PassBuilder::O1;
+
+ case 2:
+ switch (Opts.OptimizeSize) {
+ default:
+ llvm_unreachable("Invalide optimization level for size!");
+
+ case 0:
+ return PassBuilder::O2;
+
+ case 1:
+ return PassBuilder::Os;
+
+ case 2:
+ return PassBuilder::Oz;
+ }
+
+ case 3:
+ return PassBuilder::O3;
+ }
+}
+
+/// A clean version of `EmitAssembly` that uses the new pass manager.
+///
+/// Not all features are currently supported in this system, but where
+/// necessary it falls back to the legacy pass manager to at least provide
+/// basic functionality.
+///
+/// This API is planned to have its functionality finished and then to replace
+/// `EmitAssembly` at some point in the future when the default switches.
+void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
+ BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : nullptr);
+ setCommandLineOpts();
+
+ // The new pass manager always makes a target machine available to passes
+ // during construction.
+ CreateTargetMachine(/*MustCreateTM*/ true);
+ if (!TM)
+ // This will already be diagnosed, just bail.
+ return;
+ TheModule->setDataLayout(TM->createDataLayout());
+
+ PassBuilder PB(TM.get());
+
+ LoopAnalysisManager LAM;
+ FunctionAnalysisManager FAM;
+ CGSCCAnalysisManager CGAM;
+ ModuleAnalysisManager MAM;
+
+ // Register the AA manager first so that our version is the one used.
+ FAM.registerPass([&] { return PB.buildDefaultAAPipeline(); });
+
+ // Register all the basic analyses with the managers.
+ PB.registerModuleAnalyses(MAM);
+ PB.registerCGSCCAnalyses(CGAM);
+ PB.registerFunctionAnalyses(FAM);
+ PB.registerLoopAnalyses(LAM);
+ PB.crossRegisterProxies(LAM, FAM, CGAM, MAM);
+
+ ModulePassManager MPM;
+
+ if (!CodeGenOpts.DisableLLVMPasses) {
+ if (CodeGenOpts.OptimizationLevel == 0) {
+ // Build a minimal pipeline based on the semantics required by Clang,
+ // which is just that always inlining occurs.
+ MPM.addPass(AlwaysInlinerPass());
+ } else {
+ // Otherwise, use the default pass pipeline. We also have to map our
+ // optimization levels into one of the distinct levels used to configure
+ // the pipeline.
+ PassBuilder::OptimizationLevel Level = mapToLevel(CodeGenOpts);
+
+ MPM = PB.buildPerModuleDefaultPipeline(Level);
+ }
+ }
+
+ // FIXME: We still use the legacy pass manager to do code generation. We
+ // create that pass manager here and use it as needed below.
+ legacy::PassManager CodeGenPasses;
+ bool NeedCodeGen = false;
+
+ // Append any output we need to the pass manager.
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ MPM.addPass(BitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists,
+ CodeGenOpts.EmitSummaryIndex,
+ CodeGenOpts.EmitSummaryIndex));
+ break;
+
+ case Backend_EmitLL:
+ MPM.addPass(PrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
+ break;
+
+ case Backend_EmitAssembly:
+ case Backend_EmitMCNull:
+ case Backend_EmitObj:
+ NeedCodeGen = true;
+ CodeGenPasses.add(
+ createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
+ if (!AddEmitPasses(CodeGenPasses, Action, *OS))
+ // FIXME: Should we handle this error differently?
+ return;
+ break;
+ }
+
+ // Before executing passes, print the final values of the LLVM options.
+ cl::PrintOptionValues();
+
+ // Now that we have all of the passes ready, run them.
+ {
+ PrettyStackTraceString CrashInfo("Optimizer");
+ MPM.run(*TheModule, MAM);
+ }
+
+ // Now if needed, run the legacy PM for codegen.
+ if (NeedCodeGen) {
+ PrettyStackTraceString CrashInfo("Code generation");
+ CodeGenPasses.run(*TheModule);
+ }
+}
+
+static void runThinLTOBackend(const CodeGenOptions &CGOpts, Module *M,
+ std::unique_ptr<raw_pwrite_stream> OS) {
+ // If we are performing a ThinLTO importing compile, load the function index
+ // into memory and pass it into thinBackend, which will run the function
+ // importer and invoke LTO passes.
+ Expected<std::unique_ptr<ModuleSummaryIndex>> IndexOrErr =
+ llvm::getModuleSummaryIndexForFile(CGOpts.ThinLTOIndexFile);
+ if (!IndexOrErr) {
+ logAllUnhandledErrors(IndexOrErr.takeError(), errs(),
+ "Error loading index file '" +
+ CGOpts.ThinLTOIndexFile + "': ");
+ return;
+ }
+ std::unique_ptr<ModuleSummaryIndex> CombinedIndex = std::move(*IndexOrErr);
+
+ StringMap<std::map<GlobalValue::GUID, GlobalValueSummary *>>
+ ModuleToDefinedGVSummaries;
+ CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
+
+ // We can simply import the values mentioned in the combined index, since
+ // we should only invoke this using the individual indexes written out
+ // via a WriteIndexesThinBackend.
+ FunctionImporter::ImportMapTy ImportList;
+ for (auto &GlobalList : *CombinedIndex) {
+ auto GUID = GlobalList.first;
+ assert(GlobalList.second.size() == 1 &&
+ "Expected individual combined index to have one summary per GUID");
+ auto &Summary = GlobalList.second[0];
+ // Skip the summaries for the importing module. These are included to
+ // e.g. record required linkage changes.
+ if (Summary->modulePath() == M->getModuleIdentifier())
+ continue;
+ // Doesn't matter what value we plug in to the map, just needs an entry
+ // to provoke importing by thinBackend.
+ ImportList[Summary->modulePath()][GUID] = 1;
+ }
+
+ std::vector<std::unique_ptr<llvm::MemoryBuffer>> OwnedImports;
+ MapVector<llvm::StringRef, llvm::BitcodeModule> ModuleMap;
+
+ for (auto &I : ImportList) {
+ ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> MBOrErr =
+ llvm::MemoryBuffer::getFile(I.first());
+ if (!MBOrErr) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': " << MBOrErr.getError().message() << "\n";
+ return;
+ }
+
+ Expected<std::vector<BitcodeModule>> BMsOrErr =
+ getBitcodeModuleList(**MBOrErr);
+ if (!BMsOrErr) {
+ handleAllErrors(BMsOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': " << EIB.message() << '\n';
+ });
+ return;
+ }
+
+ // The bitcode file may contain multiple modules, we want the one with a
+ // summary.
+ bool FoundModule = false;
+ for (BitcodeModule &BM : *BMsOrErr) {
+ Expected<bool> HasSummary = BM.hasSummary();
+ if (HasSummary && *HasSummary) {
+ ModuleMap.insert({I.first(), BM});
+ FoundModule = true;
+ break;
+ }
+ }
+ if (!FoundModule) {
+ errs() << "Error loading imported file '" << I.first()
+ << "': Could not find module summary\n";
+ return;
+ }
+
+ OwnedImports.push_back(std::move(*MBOrErr));
+ }
+ auto AddStream = [&](size_t Task) {
+ return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
+ };
+ lto::Config Conf;
+ if (Error E = thinBackend(
+ Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
+ ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
+ handleAllErrors(std::move(E), [&](ErrorInfoBase &EIB) {
+ errs() << "Error running ThinLTO backend: " << EIB.message() << '\n';
+ });
+ }
+}
+
void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
const CodeGenOptions &CGOpts,
const clang::TargetOptions &TOpts,
const LangOptions &LOpts, const llvm::DataLayout &TDesc,
Module *M, BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
+ if (!CGOpts.ThinLTOIndexFile.empty()) {
+ runThinLTOBackend(CGOpts, M, std::move(OS));
+ return;
+ }
+
EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, LOpts, M);
- AsmHelper.EmitAssembly(Action, std::move(OS));
+ if (CGOpts.ExperimentalNewPassManager)
+ AsmHelper.EmitAssemblyWithNewPassManager(Action, std::move(OS));
+ else
+ AsmHelper.EmitAssembly(Action, std::move(OS));
// Verify clang's TargetInfo DataLayout against the LLVM TargetMachine's
// DataLayout.
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 7b747c138303..9287e46127bd 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -11,13 +11,12 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCall.h"
#include "CGRecordLayout.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
#include "clang/CodeGen/CGFunctionInfo.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Operator.h"
@@ -308,7 +307,8 @@ static RValue emitAtomicLibcall(CodeGenFunction &CGF,
CGF.CGM.getTypes().arrangeBuiltinFunctionCall(resultType, args);
llvm::FunctionType *fnTy = CGF.CGM.getTypes().GetFunctionType(fnInfo);
llvm::Constant *fn = CGF.CGM.CreateRuntimeFunction(fnTy, fnName);
- return CGF.EmitCall(fnInfo, fn, ReturnValueSlot(), args);
+ auto callee = CGCallee::forDirect(fn);
+ return CGF.EmitCall(fnInfo, callee, ReturnValueSlot(), args);
}
/// Does a store of the given IR type modify the full expected width?
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index e3658ab9b762..b250b9a32b18 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/CallSite.h"
@@ -77,63 +78,63 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
const CGBlockInfo &blockInfo) {
ASTContext &C = CGM.getContext();
- llvm::Type *ulong = CGM.getTypes().ConvertType(C.UnsignedLongTy);
- llvm::Type *i8p = nullptr;
+ llvm::IntegerType *ulong =
+ cast<llvm::IntegerType>(CGM.getTypes().ConvertType(C.UnsignedLongTy));
+ llvm::PointerType *i8p = nullptr;
if (CGM.getLangOpts().OpenCL)
i8p =
llvm::Type::getInt8PtrTy(
CGM.getLLVMContext(), C.getTargetAddressSpace(LangAS::opencl_constant));
else
- i8p = CGM.getTypes().ConvertType(C.VoidPtrTy);
+ i8p = CGM.VoidPtrTy;
- SmallVector<llvm::Constant*, 6> elements;
+ ConstantInitBuilder builder(CGM);
+ auto elements = builder.beginStruct();
// reserved
- elements.push_back(llvm::ConstantInt::get(ulong, 0));
+ elements.addInt(ulong, 0);
// Size
// FIXME: What is the right way to say this doesn't fit? We should give
// a user diagnostic in that case. Better fix would be to change the
// API to size_t.
- elements.push_back(llvm::ConstantInt::get(ulong,
- blockInfo.BlockSize.getQuantity()));
+ elements.addInt(ulong, blockInfo.BlockSize.getQuantity());
// Optional copy/dispose helpers.
if (blockInfo.NeedsCopyDispose) {
// copy_func_helper_decl
- elements.push_back(buildCopyHelper(CGM, blockInfo));
+ elements.add(buildCopyHelper(CGM, blockInfo));
// destroy_func_decl
- elements.push_back(buildDisposeHelper(CGM, blockInfo));
+ elements.add(buildDisposeHelper(CGM, blockInfo));
}
// Signature. Mandatory ObjC-style method descriptor @encode sequence.
std::string typeAtEncoding =
CGM.getContext().getObjCEncodingForBlock(blockInfo.getBlockExpr());
- elements.push_back(llvm::ConstantExpr::getBitCast(
+ elements.add(llvm::ConstantExpr::getBitCast(
CGM.GetAddrOfConstantCString(typeAtEncoding).getPointer(), i8p));
// GC layout.
if (C.getLangOpts().ObjC1) {
if (CGM.getLangOpts().getGC() != LangOptions::NonGC)
- elements.push_back(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
+ elements.add(CGM.getObjCRuntime().BuildGCBlockLayout(CGM, blockInfo));
else
- elements.push_back(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
+ elements.add(CGM.getObjCRuntime().BuildRCBlockLayout(CGM, blockInfo));
}
else
- elements.push_back(llvm::Constant::getNullValue(i8p));
-
- llvm::Constant *init = llvm::ConstantStruct::getAnon(elements);
+ elements.addNullPointer(i8p);
unsigned AddrSpace = 0;
if (C.getLangOpts().OpenCL)
AddrSpace = C.getTargetAddressSpace(LangAS::opencl_constant);
+
llvm::GlobalVariable *global =
- new llvm::GlobalVariable(CGM.getModule(), init->getType(), true,
- llvm::GlobalValue::InternalLinkage,
- init, "__block_descriptor_tmp", nullptr,
- llvm::GlobalValue::NotThreadLocal,
- AddrSpace);
+ elements.finishAndCreateGlobal("__block_descriptor_tmp",
+ CGM.getPointerAlign(),
+ /*constant*/ true,
+ llvm::GlobalValue::InternalLinkage,
+ AddrSpace);
return llvm::ConstantExpr::getBitCast(global, CGM.getBlockDescriptorType());
}
@@ -188,9 +189,6 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
};
*/
-/// The number of fields in a block header.
-const unsigned BlockHeaderSize = 5;
-
namespace {
/// A chunk of data that we actually have to capture in the block.
struct BlockLayoutChunk {
@@ -199,13 +197,14 @@ namespace {
Qualifiers::ObjCLifetime Lifetime;
const BlockDecl::Capture *Capture; // null for 'this'
llvm::Type *Type;
+ QualType FieldType;
BlockLayoutChunk(CharUnits align, CharUnits size,
Qualifiers::ObjCLifetime lifetime,
const BlockDecl::Capture *capture,
- llvm::Type *type)
+ llvm::Type *type, QualType fieldType)
: Alignment(align), Size(size), Lifetime(lifetime),
- Capture(capture), Type(type) {}
+ Capture(capture), Type(type), FieldType(fieldType) {}
/// Tell the block info that this chunk has the given field index.
void setIndex(CGBlockInfo &info, unsigned index, CharUnits offset) {
@@ -213,8 +212,8 @@ namespace {
info.CXXThisIndex = index;
info.CXXThisOffset = offset;
} else {
- info.Captures.insert({Capture->getVariable(),
- CGBlockInfo::Capture::makeIndex(index, offset)});
+ auto C = CGBlockInfo::Capture::makeIndex(index, offset, FieldType);
+ info.Captures.insert({Capture->getVariable(), C});
}
}
};
@@ -317,8 +316,6 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
elementTypes.push_back(CGM.IntTy);
elementTypes.push_back(CGM.VoidPtrTy);
elementTypes.push_back(CGM.getBlockDescriptorType());
-
- assert(elementTypes.size() == BlockHeaderSize);
}
/// Compute the layout of the given block. Attempts to lay the block
@@ -363,7 +360,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
layout.push_back(BlockLayoutChunk(tinfo.second, tinfo.first,
Qualifiers::OCL_None,
- nullptr, llvmType));
+ nullptr, llvmType, thisType));
}
// Next, all the block captures.
@@ -380,7 +377,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
layout.push_back(BlockLayoutChunk(align, CGM.getPointerSize(),
Qualifiers::OCL_None, &CI,
- CGM.VoidPtrTy));
+ CGM.VoidPtrTy, variable->getType()));
continue;
}
@@ -436,6 +433,14 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
}
QualType VT = variable->getType();
+
+ // If the variable is captured by an enclosing block or lambda expression,
+ // use the type of the capture field.
+ if (CGF->BlockInfo && CI.isNested())
+ VT = CGF->BlockInfo->getCapture(variable).fieldType();
+ else if (auto *FD = CGF->LambdaCaptureFields.lookup(variable))
+ VT = FD->getType();
+
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -444,7 +449,8 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
llvm::Type *llvmType =
CGM.getTypes().ConvertTypeForMem(VT);
- layout.push_back(BlockLayoutChunk(align, size, lifetime, &CI, llvmType));
+ layout.push_back(
+ BlockLayoutChunk(align, size, lifetime, &CI, llvmType, VT));
}
// If that was everything, we're done here.
@@ -680,6 +686,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const BlockExpr *blockExpr) {
// If the block has no captures, we won't have a pre-computed
// layout for it.
if (!blockExpr->getBlockDecl()->hasCaptures()) {
+ if (llvm::Constant *Block = CGM.getAddrOfGlobalBlockIfEmitted(blockExpr))
+ return Block;
CGBlockInfo blockInfo(blockExpr->getBlockDecl(), CurFn->getName());
computeBlockInfo(CGM, this, blockInfo);
blockInfo.BlockExpression = blockExpr;
@@ -775,7 +783,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Ignore constant captures.
if (capture.isConstant()) continue;
- QualType type = variable->getType();
+ QualType type = capture.fieldType();
// This will be a [[type]]*, except that a byref entry will just be
// an i8**.
@@ -965,25 +973,24 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
const BlockPointerType *BPT =
E->getCallee()->getType()->getAs<BlockPointerType>();
- llvm::Value *Callee = EmitScalarExpr(E->getCallee());
+ llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
// Get a pointer to the generic block literal.
llvm::Type *BlockLiteralTy =
llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
// Bitcast the callee to a block literal.
- llvm::Value *BlockLiteral =
- Builder.CreateBitCast(Callee, BlockLiteralTy, "block.literal");
+ BlockPtr = Builder.CreateBitCast(BlockPtr, BlockLiteralTy, "block.literal");
// Get the function pointer from the literal.
llvm::Value *FuncPtr =
- Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockLiteral, 3);
+ Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3);
- BlockLiteral = Builder.CreateBitCast(BlockLiteral, VoidPtrTy);
+ BlockPtr = Builder.CreateBitCast(BlockPtr, VoidPtrTy);
// Add the block literal.
CallArgList Args;
- Args.add(RValue::get(BlockLiteral), getContext().VoidPtrTy);
+ Args.add(RValue::get(BlockPtr), getContext().VoidPtrTy);
QualType FnType = BPT->getPointeeType();
@@ -1003,8 +1010,11 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
llvm::Type *BlockFTyPtr = llvm::PointerType::getUnqual(BlockFTy);
Func = Builder.CreateBitCast(Func, BlockFTyPtr);
+ // Prepare the callee.
+ CGCallee Callee(CGCalleeInfo(), Func);
+
// And call the block.
- return EmitCall(FnInfo, Func, ReturnValue, Args);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
@@ -1033,18 +1043,27 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable,
variable->getName());
}
- if (auto refType = variable->getType()->getAs<ReferenceType>()) {
+ if (auto refType = capture.fieldType()->getAs<ReferenceType>())
addr = EmitLoadOfReference(addr, refType);
- }
return addr;
}
+void CodeGenModule::setAddrOfGlobalBlock(const BlockExpr *BE,
+ llvm::Constant *Addr) {
+ bool Ok = EmittedGlobalBlocks.insert(std::make_pair(BE, Addr)).second;
+ (void)Ok;
+ assert(Ok && "Trying to replace an already-existing global block!");
+}
+
llvm::Constant *
-CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *blockExpr,
- const char *name) {
- CGBlockInfo blockInfo(blockExpr->getBlockDecl(), name);
- blockInfo.BlockExpression = blockExpr;
+CodeGenModule::GetAddrOfGlobalBlock(const BlockExpr *BE,
+ StringRef Name) {
+ if (llvm::Constant *Block = getAddrOfGlobalBlockIfEmitted(BE))
+ return Block;
+
+ CGBlockInfo blockInfo(BE->getBlockDecl(), Name);
+ blockInfo.BlockExpression = BE;
// Compute information about the layout, etc., of this block.
computeBlockInfo(*this, nullptr, blockInfo);
@@ -1067,43 +1086,46 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
const CGBlockInfo &blockInfo,
llvm::Constant *blockFn) {
assert(blockInfo.CanBeGlobal);
+ // Callers should detect this case on their own: calling this function
+ // generally requires computing layout information, which is a waste of time
+ // if we've already emitted this block.
+ assert(!CGM.getAddrOfGlobalBlockIfEmitted(blockInfo.BlockExpression) &&
+ "Refusing to re-emit a global block.");
// Generate the constants for the block literal initializer.
- llvm::Constant *fields[BlockHeaderSize];
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct();
// isa
- fields[0] = CGM.getNSConcreteGlobalBlock();
+ fields.add(CGM.getNSConcreteGlobalBlock());
// __flags
BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
if (blockInfo.UsesStret) flags |= BLOCK_USE_STRET;
- fields[1] = llvm::ConstantInt::get(CGM.IntTy, flags.getBitMask());
+ fields.addInt(CGM.IntTy, flags.getBitMask());
// Reserved
- fields[2] = llvm::Constant::getNullValue(CGM.IntTy);
+ fields.addInt(CGM.IntTy, 0);
// Function
- fields[3] = blockFn;
+ fields.add(blockFn);
// Descriptor
- fields[4] = buildBlockDescriptor(CGM, blockInfo);
-
- llvm::Constant *init = llvm::ConstantStruct::getAnon(fields);
+ fields.add(buildBlockDescriptor(CGM, blockInfo));
- llvm::GlobalVariable *literal =
- new llvm::GlobalVariable(CGM.getModule(),
- init->getType(),
- /*constant*/ true,
- llvm::GlobalVariable::InternalLinkage,
- init,
- "__block_literal_global");
- literal->setAlignment(blockInfo.BlockAlign.getQuantity());
+ llvm::Constant *literal =
+ fields.finishAndCreateGlobal("__block_literal_global",
+ blockInfo.BlockAlign,
+ /*constant*/ true);
// Return a constant of the appropriately-casted type.
- llvm::Type *requiredType =
+ llvm::Type *RequiredType =
CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
- return llvm::ConstantExpr::getBitCast(literal, requiredType);
+ llvm::Constant *Result =
+ llvm::ConstantExpr::getBitCast(literal, RequiredType);
+ CGM.setAddrOfGlobalBlock(blockInfo.BlockExpression, Result);
+ return Result;
}
void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
@@ -1939,7 +1961,7 @@ static T *buildByrefHelpers(CodeGenModule &CGM, const BlockByrefInfo &byrefInfo,
generator.CopyHelper = buildByrefCopyHelper(CGM, byrefInfo, generator);
generator.DisposeHelper = buildByrefDisposeHelper(CGM, byrefInfo, generator);
- T *copy = new (CGM.getContext()) T(std::move(generator));
+ T *copy = new (CGM.getContext()) T(std::forward<T>(generator));
CGM.ByrefHelpersCache.InsertNode(copy, insertPos);
return copy;
}
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 1edabef4ec74..80e255f75417 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -25,10 +25,8 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/Type.h"
#include "clang/Basic/TargetInfo.h"
-#include "llvm/IR/Module.h"
namespace llvm {
-class Module;
class Constant;
class Function;
class GlobalValue;
@@ -40,10 +38,8 @@ class LLVMContext;
}
namespace clang {
-
namespace CodeGen {
-class CodeGenModule;
class CGBlockInfo;
// Flags stored in __block variables.
@@ -163,6 +159,11 @@ public:
EHScopeStack::stable_iterator Cleanup;
CharUnits::QuantityType Offset;
+ /// Type of the capture field. Normally, this is identical to the type of
+ /// the capture's VarDecl, but can be different if there is an enclosing
+ /// lambda.
+ QualType FieldType;
+
public:
bool isIndex() const { return (Data & 1) != 0; }
bool isConstant() const { return !isIndex(); }
@@ -189,10 +190,16 @@ public:
return reinterpret_cast<llvm::Value*>(Data);
}
- static Capture makeIndex(unsigned index, CharUnits offset) {
+ QualType fieldType() const {
+ return FieldType;
+ }
+
+ static Capture makeIndex(unsigned index, CharUnits offset,
+ QualType FieldType) {
Capture v;
v.Data = (index << 1) | 1;
v.Offset = offset.getQuantity();
+ v.FieldType = FieldType;
return v;
}
diff --git a/lib/CodeGen/CGBuilder.h b/lib/CodeGen/CGBuilder.h
index 027435d7c599..42f9a428bb3a 100644
--- a/lib/CodeGen/CGBuilder.h
+++ b/lib/CodeGen/CGBuilder.h
@@ -102,11 +102,6 @@ public:
assert(Addr->getType()->getPointerElementType() == Ty);
return CreateAlignedLoad(Addr, Align.getQuantity(), Name);
}
- llvm::LoadInst *CreateAlignedLoad(llvm::Value *Addr, CharUnits Align,
- bool IsVolatile,
- const llvm::Twine &Name = "") {
- return CreateAlignedLoad(Addr, Align.getQuantity(), IsVolatile, Name);
- }
// Note that we intentionally hide the CreateStore APIs that don't
// take an alignment.
@@ -124,19 +119,6 @@ public:
// FIXME: these "default-aligned" APIs should be removed,
// but I don't feel like fixing all the builtin code right now.
- llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
- const llvm::Twine &Name = "") {
- return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
- }
- llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr,
- const char *Name) {
- return CGBuilderBaseTy::CreateLoad(Addr, false, Name);
- }
- llvm::LoadInst *CreateDefaultAlignedLoad(llvm::Value *Addr, bool IsVolatile,
- const llvm::Twine &Name = "") {
- return CGBuilderBaseTy::CreateLoad(Addr, IsVolatile, Name);
- }
-
llvm::StoreInst *CreateDefaultAlignedStore(llvm::Value *Val,
llvm::Value *Addr,
bool IsVolatile = false) {
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index a5fc53113bdc..43ca74761fbd 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -11,13 +11,15 @@
//
//===----------------------------------------------------------------------===//
-#include "CodeGenFunction.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGOpenCLRuntime.h"
+#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/Analysis/Analyses/OSLog.h"
#include "clang/Basic/TargetBuiltins.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/CodeGen/CGFunctionInfo.h"
@@ -35,8 +37,8 @@ using namespace llvm;
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
-llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
- unsigned BuiltinID) {
+llvm::Constant *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
+ unsigned BuiltinID) {
assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
// Get the name, skip over the __builtin_ prefix (if necessary).
@@ -302,10 +304,10 @@ static Value *EmitSignBit(CodeGenFunction &CGF, Value *V) {
return CGF.Builder.CreateICmpSLT(V, Zero);
}
-static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
- const CallExpr *E, llvm::Value *calleeValue) {
- return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
- ReturnValueSlot(), Fn);
+static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *FD,
+ const CallExpr *E, llvm::Constant *calleeValue) {
+ CGCallee callee = CGCallee::forDirect(calleeValue, FD);
+ return CGF.EmitCall(E->getCallee()->getType(), callee, E, ReturnValueSlot());
}
/// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
@@ -462,6 +464,119 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
return Builder.CreateCall(F, {EmitScalarExpr(E), CI});
}
+// Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we
+// handle them here.
+enum class CodeGenFunction::MSVCIntrin {
+ _BitScanForward,
+ _BitScanReverse,
+ _InterlockedAnd,
+ _InterlockedDecrement,
+ _InterlockedExchange,
+ _InterlockedExchangeAdd,
+ _InterlockedExchangeSub,
+ _InterlockedIncrement,
+ _InterlockedOr,
+ _InterlockedXor,
+};
+
+Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
+ const CallExpr *E) {
+ switch (BuiltinID) {
+ case MSVCIntrin::_BitScanForward:
+ case MSVCIntrin::_BitScanReverse: {
+ Value *ArgValue = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *ArgType = ArgValue->getType();
+ llvm::Type *IndexType =
+ EmitScalarExpr(E->getArg(0))->getType()->getPointerElementType();
+ llvm::Type *ResultType = ConvertType(E->getType());
+
+ Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+ Value *ResZero = llvm::Constant::getNullValue(ResultType);
+ Value *ResOne = llvm::ConstantInt::get(ResultType, 1);
+
+ BasicBlock *Begin = Builder.GetInsertBlock();
+ BasicBlock *End = createBasicBlock("bitscan_end", this->CurFn);
+ Builder.SetInsertPoint(End);
+ PHINode *Result = Builder.CreatePHI(ResultType, 2, "bitscan_result");
+
+ Builder.SetInsertPoint(Begin);
+ Value *IsZero = Builder.CreateICmpEQ(ArgValue, ArgZero);
+ BasicBlock *NotZero = createBasicBlock("bitscan_not_zero", this->CurFn);
+ Builder.CreateCondBr(IsZero, End, NotZero);
+ Result->addIncoming(ResZero, Begin);
+
+ Builder.SetInsertPoint(NotZero);
+ Address IndexAddress = EmitPointerWithAlignment(E->getArg(0));
+
+ if (BuiltinID == MSVCIntrin::_BitScanForward) {
+ Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
+ Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
+ ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
+ Builder.CreateStore(ZeroCount, IndexAddress, false);
+ } else {
+ unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+ Value *ArgTypeLastIndex = llvm::ConstantInt::get(IndexType, ArgWidth - 1);
+
+ Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
+ Value *ZeroCount = Builder.CreateCall(F, {ArgValue, Builder.getTrue()});
+ ZeroCount = Builder.CreateIntCast(ZeroCount, IndexType, false);
+ Value *Index = Builder.CreateNSWSub(ArgTypeLastIndex, ZeroCount);
+ Builder.CreateStore(Index, IndexAddress, false);
+ }
+ Builder.CreateBr(End);
+ Result->addIncoming(ResOne, NotZero);
+
+ Builder.SetInsertPoint(End);
+ return Result;
+ }
+ case MSVCIntrin::_InterlockedAnd:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::And, E);
+ case MSVCIntrin::_InterlockedExchange:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xchg, E);
+ case MSVCIntrin::_InterlockedExchangeAdd:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Add, E);
+ case MSVCIntrin::_InterlockedExchangeSub:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Sub, E);
+ case MSVCIntrin::_InterlockedOr:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Or, E);
+ case MSVCIntrin::_InterlockedXor:
+ return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
+
+ case MSVCIntrin::_InterlockedDecrement: {
+ llvm::Type *IntTy = ConvertType(E->getType());
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Sub,
+ EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1));
+ }
+ case MSVCIntrin::_InterlockedIncrement: {
+ llvm::Type *IntTy = ConvertType(E->getType());
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Add,
+ EmitScalarExpr(E->getArg(0)),
+ ConstantInt::get(IntTy, 1),
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
+ }
+ }
+ llvm_unreachable("Incorrect MSVC intrinsic!");
+}
+
+namespace {
+// ARC cleanup for __builtin_os_log_format
+struct CallObjCArcUse final : EHScopeStack::Cleanup {
+ CallObjCArcUse(llvm::Value *object) : object(object) {}
+ llvm::Value *object;
+
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ CGF.EmitARCIntrinsicUse(object);
+ }
+};
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E,
ReturnValueSlot ReturnValue) {
@@ -681,6 +796,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI__popcnt16:
+ case Builtin::BI__popcnt:
+ case Builtin::BI__popcnt64:
case Builtin::BI__builtin_popcount:
case Builtin::BI__builtin_popcountl:
case Builtin::BI__builtin_popcountll: {
@@ -696,6 +814,58 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
"cast");
return RValue::get(Result);
}
+ case Builtin::BI_rotr8:
+ case Builtin::BI_rotr16:
+ case Builtin::BI_rotr:
+ case Builtin::BI_lrotr:
+ case Builtin::BI_rotr64: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Value *Shift = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *ArgType = Val->getType();
+ Shift = Builder.CreateIntCast(Shift, ArgType, false);
+ unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+ Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth);
+ Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+
+ Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
+ Shift = Builder.CreateAnd(Shift, Mask);
+ Value *LeftShift = Builder.CreateSub(ArgTypeSize, Shift);
+
+ Value *RightShifted = Builder.CreateLShr(Val, Shift);
+ Value *LeftShifted = Builder.CreateShl(Val, LeftShift);
+ Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted);
+
+ Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero);
+ Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated);
+ return RValue::get(Result);
+ }
+ case Builtin::BI_rotl8:
+ case Builtin::BI_rotl16:
+ case Builtin::BI_rotl:
+ case Builtin::BI_lrotl:
+ case Builtin::BI_rotl64: {
+ Value *Val = EmitScalarExpr(E->getArg(0));
+ Value *Shift = EmitScalarExpr(E->getArg(1));
+
+ llvm::Type *ArgType = Val->getType();
+ Shift = Builder.CreateIntCast(Shift, ArgType, false);
+ unsigned ArgWidth = cast<llvm::IntegerType>(ArgType)->getBitWidth();
+ Value *ArgTypeSize = llvm::ConstantInt::get(ArgType, ArgWidth);
+ Value *ArgZero = llvm::Constant::getNullValue(ArgType);
+
+ Value *Mask = llvm::ConstantInt::get(ArgType, ArgWidth - 1);
+ Shift = Builder.CreateAnd(Shift, Mask);
+ Value *RightShift = Builder.CreateSub(ArgTypeSize, Shift);
+
+ Value *LeftShifted = Builder.CreateShl(Val, Shift);
+ Value *RightShifted = Builder.CreateLShr(Val, RightShift);
+ Value *Rotated = Builder.CreateOr(LeftShifted, RightShifted);
+
+ Value *ShiftIsZero = Builder.CreateICmpEQ(Shift, ArgZero);
+ Value *Result = Builder.CreateSelect(ShiftIsZero, Val, Rotated);
+ return RValue::get(Result);
+ }
case Builtin::BI__builtin_unpredictable: {
// Always return the argument of __builtin_unpredictable. LLVM does not
// handle this builtin. Metadata for this builtin should be added directly
@@ -789,8 +959,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
SanitizerScope SanScope(this);
EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
SanitizerKind::Unreachable),
- "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
- None);
+ SanitizerHandler::BuiltinUnreachable,
+ EmitCheckSourceLocation(E->getExprLoc()), None);
} else
Builder.CreateUnreachable();
@@ -851,6 +1021,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
}
+ case Builtin::BIfinite:
+ case Builtin::BI__finite:
+ case Builtin::BIfinitef:
+ case Builtin::BI__finitef:
+ case Builtin::BIfinitel:
+ case Builtin::BI__finitel:
case Builtin::BI__builtin_isinf:
case Builtin::BI__builtin_isfinite: {
// isinf(x) --> fabs(x) == infinity
@@ -963,8 +1139,29 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_alloca:
case Builtin::BI__builtin_alloca: {
Value *Size = EmitScalarExpr(E->getArg(0));
- return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
+ const TargetInfo &TI = getContext().getTargetInfo();
+ // The alignment of the alloca should correspond to __BIGGEST_ALIGNMENT__.
+ unsigned SuitableAlignmentInBytes =
+ CGM.getContext()
+ .toCharUnitsFromBits(TI.getSuitableAlign())
+ .getQuantity();
+ AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
+ AI->setAlignment(SuitableAlignmentInBytes);
+ return RValue::get(AI);
+ }
+
+ case Builtin::BI__builtin_alloca_with_align: {
+ Value *Size = EmitScalarExpr(E->getArg(0));
+ Value *AlignmentInBitsValue = EmitScalarExpr(E->getArg(1));
+ auto *AlignmentInBitsCI = cast<ConstantInt>(AlignmentInBitsValue);
+ unsigned AlignmentInBits = AlignmentInBitsCI->getZExtValue();
+ unsigned AlignmentInBytes =
+ CGM.getContext().toCharUnitsFromBits(AlignmentInBits).getQuantity();
+ AllocaInst *AI = Builder.CreateAlloca(Builder.getInt8Ty(), Size);
+ AI->setAlignment(AlignmentInBytes);
+ return RValue::get(AI);
}
+
case Builtin::BIbzero:
case Builtin::BI__builtin_bzero: {
Address Dest = EmitPointerWithAlignment(E->getArg(0));
@@ -1085,6 +1282,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
return RValue::get(Builder.CreateCall(F, Depth));
}
+ case Builtin::BI_ReturnAddress: {
+ Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
+ return RValue::get(Builder.CreateCall(F, Builder.getInt32(0)));
+ }
case Builtin::BI__builtin_frame_address: {
Value *Depth =
CGM.EmitConstantExpr(E->getArg(0), getContext().UnsignedIntTy, this);
@@ -1390,7 +1591,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CGM.getTypes().arrangeBuiltinFunctionCall(E->getType(), Args);
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
- return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
+ return EmitCall(FuncInfo, CGCallee::forDirect(Func),
+ ReturnValueSlot(), Args);
}
case Builtin::BI__atomic_test_and_set: {
@@ -1905,12 +2107,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
const CallExpr *Call = cast<CallExpr>(E->getArg(0));
const Expr *Chain = E->getArg(1);
return EmitCall(Call->getCallee()->getType(),
- EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
- Call->getCalleeDecl(), EmitScalarExpr(Chain));
+ EmitCallee(Call->getCallee()), Call, ReturnValue,
+ EmitScalarExpr(Chain));
}
+ case Builtin::BI_InterlockedExchange8:
+ case Builtin::BI_InterlockedExchange16:
case Builtin::BI_InterlockedExchange:
case Builtin::BI_InterlockedExchangePointer:
- return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E));
case Builtin::BI_InterlockedCompareExchangePointer: {
llvm::Type *RTy;
llvm::IntegerType *IntType =
@@ -1938,7 +2143,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
0),
RTy));
}
- case Builtin::BI_InterlockedCompareExchange: {
+ case Builtin::BI_InterlockedCompareExchange8:
+ case Builtin::BI_InterlockedCompareExchange16:
+ case Builtin::BI_InterlockedCompareExchange:
+ case Builtin::BI_InterlockedCompareExchange64: {
AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
EmitScalarExpr(E->getArg(0)),
EmitScalarExpr(E->getArg(2)),
@@ -1948,42 +2156,44 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
CXI->setVolatile(true);
return RValue::get(Builder.CreateExtractValue(CXI, 0));
}
- case Builtin::BI_InterlockedIncrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- RMWI->setVolatile(true);
- return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1)));
- }
- case Builtin::BI_InterlockedDecrement: {
- llvm::Type *IntTy = ConvertType(E->getType());
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Sub,
- EmitScalarExpr(E->getArg(0)),
- ConstantInt::get(IntTy, 1),
- llvm::AtomicOrdering::SequentiallyConsistent);
- RMWI->setVolatile(true);
- return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(IntTy, 1)));
- }
- case Builtin::BI_InterlockedExchangeAdd: {
- AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
- AtomicRMWInst::Add,
- EmitScalarExpr(E->getArg(0)),
- EmitScalarExpr(E->getArg(1)),
- llvm::AtomicOrdering::SequentiallyConsistent);
- RMWI->setVolatile(true);
- return RValue::get(RMWI);
- }
+ case Builtin::BI_InterlockedIncrement16:
+ case Builtin::BI_InterlockedIncrement:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E));
+ case Builtin::BI_InterlockedDecrement16:
+ case Builtin::BI_InterlockedDecrement:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E));
+ case Builtin::BI_InterlockedAnd8:
+ case Builtin::BI_InterlockedAnd16:
+ case Builtin::BI_InterlockedAnd:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E));
+ case Builtin::BI_InterlockedExchangeAdd8:
+ case Builtin::BI_InterlockedExchangeAdd16:
+ case Builtin::BI_InterlockedExchangeAdd:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E));
+ case Builtin::BI_InterlockedExchangeSub8:
+ case Builtin::BI_InterlockedExchangeSub16:
+ case Builtin::BI_InterlockedExchangeSub:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E));
+ case Builtin::BI_InterlockedOr8:
+ case Builtin::BI_InterlockedOr16:
+ case Builtin::BI_InterlockedOr:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E));
+ case Builtin::BI_InterlockedXor8:
+ case Builtin::BI_InterlockedXor16:
+ case Builtin::BI_InterlockedXor:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
case Builtin::BI__readfsdword: {
llvm::Type *IntTy = ConvertType(E->getType());
Value *IntToPtr =
Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
llvm::PointerType::get(IntTy, 257));
- LoadInst *Load =
- Builder.CreateDefaultAlignedLoad(IntToPtr, /*isVolatile=*/true);
+ LoadInst *Load = Builder.CreateAlignedLoad(
+ IntTy, IntToPtr, getContext().getTypeAlignInChars(E->getType()));
+ Load->setVolatile(true);
return RValue::get(Load);
}
@@ -2004,7 +2214,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Attribute::ReturnsTwice);
llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
- "_setjmpex", ReturnsTwiceAttr);
+ "_setjmpex", ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Buf = Builder.CreateBitOrPointerCast(
EmitScalarExpr(E->getArg(0)), Int8PtrTy);
llvm::Value *FrameAddr =
@@ -2029,7 +2239,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgTypes[] = {Int8PtrTy, IntTy};
llvm::Constant *SetJmp3 = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/true),
- "_setjmp3", ReturnsTwiceAttr);
+ "_setjmp3", ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *Count = ConstantInt::get(IntTy, 0);
llvm::Value *Args[] = {Buf, Count};
CS = EmitRuntimeCallOrInvoke(SetJmp3, Args);
@@ -2037,7 +2247,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
llvm::Constant *SetJmp = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
- "_setjmp", ReturnsTwiceAttr);
+ "_setjmp", ReturnsTwiceAttr, /*Local=*/true);
llvm::Value *FrameAddr =
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
ConstantInt::get(Int32Ty, 0));
@@ -2057,11 +2267,47 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
}
+ case Builtin::BI__builtin_coro_size: {
+ auto & Context = getContext();
+ auto SizeTy = Context.getSizeType();
+ auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ Value *F = CGM.getIntrinsic(Intrinsic::coro_size, T);
+ return RValue::get(Builder.CreateCall(F));
+ }
+
+ case Builtin::BI__builtin_coro_id:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_id);
+ case Builtin::BI__builtin_coro_promise:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_promise);
+ case Builtin::BI__builtin_coro_resume:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_resume);
+ case Builtin::BI__builtin_coro_frame:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_frame);
+ case Builtin::BI__builtin_coro_free:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_free);
+ case Builtin::BI__builtin_coro_destroy:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_destroy);
+ case Builtin::BI__builtin_coro_done:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_done);
+ case Builtin::BI__builtin_coro_alloc:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_alloc);
+ case Builtin::BI__builtin_coro_begin:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_begin);
+ case Builtin::BI__builtin_coro_end:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_end);
+ case Builtin::BI__builtin_coro_suspend:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_suspend);
+ case Builtin::BI__builtin_coro_param:
+ return EmitCoroutineIntrinsic(E, Intrinsic::coro_param);
+
// OpenCL v2.0 s6.13.16.2, Built-in pipe read and write functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe: {
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Type of the generic packet parameter.
unsigned GenericAS =
@@ -2075,19 +2321,21 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
: "__write_pipe_2";
// Creating a generic function type to be able to call with any builtin or
// user defined type.
- llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy};
+ llvm::Type *ArgTys[] = {Arg0->getType(), I8PTy, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
Value *BCast = Builder.CreatePointerCast(Arg1, I8PTy);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name), {Arg0, BCast}));
+ return RValue::get(
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, BCast, PacketSize, PacketAlign}));
} else {
assert(4 == E->getNumArgs() &&
"Illegal number of parameters to pipe function");
const char *Name = (BuiltinID == Builtin::BIread_pipe) ? "__read_pipe_4"
: "__write_pipe_4";
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, I8PTy,
+ Int32Ty, Int32Ty};
Value *Arg2 = EmitScalarExpr(E->getArg(2)),
*Arg3 = EmitScalarExpr(E->getArg(3));
llvm::FunctionType *FTy = llvm::FunctionType::get(
@@ -2098,7 +2346,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (Arg2->getType() != Int32Ty)
Arg2 = Builder.CreateZExtOrTrunc(Arg2, Int32Ty);
return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1, Arg2, BCast}));
+ CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, Arg2, BCast, PacketSize, PacketAlign}));
}
}
// OpenCL v2.0 s6.13.16 ,s9.17.3.5 - Built-in pipe reserve read and write
@@ -2127,9 +2376,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
llvm::Type *ReservedIDTy = ConvertType(getContext().OCLReserveIDTy);
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
- llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
ReservedIDTy, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
// We know the second argument is an integer type, but we may need to cast
@@ -2137,7 +2389,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (Arg1->getType() != Int32Ty)
Arg1 = Builder.CreateZExtOrTrunc(Arg1, Int32Ty);
return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1}));
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16, s9.17.3.5 - Built-in pipe commit read and write
// functions
@@ -2163,15 +2416,19 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
Value *Arg0 = EmitScalarExpr(E->getArg(0)),
*Arg1 = EmitScalarExpr(E->getArg(1));
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
// Building the generic function prototype.
- llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType()};
+ llvm::Type *ArgTys[] = {Arg0->getType(), Arg1->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()),
llvm::ArrayRef<llvm::Type *>(ArgTys), false);
return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0, Arg1}));
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, Arg1, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.16.4 Built-in pipe query functions
case Builtin::BIget_pipe_num_packets:
@@ -2184,12 +2441,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Building the generic function prototype.
Value *Arg0 = EmitScalarExpr(E->getArg(0));
- llvm::Type *ArgTys[] = {Arg0->getType()};
+ CGOpenCLRuntime OpenCLRT(CGM);
+ Value *PacketSize = OpenCLRT.getPipeElemSize(E->getArg(0));
+ Value *PacketAlign = OpenCLRT.getPipeElemAlign(E->getArg(0));
+ llvm::Type *ArgTys[] = {Arg0->getType(), Int32Ty, Int32Ty};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), false);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name), {Arg0}));
+ return RValue::get(Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name),
+ {Arg0, PacketSize, PacketAlign}));
}
// OpenCL v2.0 s6.13.9 - Address space qualifier functions.
@@ -2258,17 +2518,11 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
std::vector<llvm::Type *> ArgTys = {QueueTy, IntTy, RangeTy, Int8PtrTy,
IntTy};
- // Add the variadics.
- for (unsigned I = 4; I < NumArgs; ++I) {
- llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I));
- unsigned TypeSizeInBytes =
- getContext()
- .getTypeSizeInChars(E->getArg(I)->getType())
- .getQuantity();
- Args.push_back(TypeSizeInBytes < 4
- ? Builder.CreateZExt(ArgSize, Int32Ty)
- : ArgSize);
- }
+ // Each of the following arguments specifies the size of the corresponding
+ // argument passed to the enqueued block.
+ for (unsigned I = 4/*Position of the first size arg*/; I < NumArgs; ++I)
+ Args.push_back(
+ Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy));
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), true);
@@ -2279,29 +2533,26 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Any calls now have event arguments passed.
if (NumArgs >= 7) {
llvm::Type *EventTy = ConvertType(getContext().OCLClkEventTy);
- unsigned AS4 =
- E->getArg(4)->getType()->isArrayType()
- ? E->getArg(4)->getType().getAddressSpace()
- : E->getArg(4)->getType()->getPointeeType().getAddressSpace();
- llvm::Type *EventPtrAS4Ty =
- EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS4));
- unsigned AS5 =
- E->getArg(5)->getType()->getPointeeType().getAddressSpace();
- llvm::Type *EventPtrAS5Ty =
- EventTy->getPointerTo(CGM.getContext().getTargetAddressSpace(AS5));
-
- llvm::Value *NumEvents = EmitScalarExpr(E->getArg(3));
+ llvm::Type *EventPtrTy = EventTy->getPointerTo(
+ CGM.getContext().getTargetAddressSpace(LangAS::opencl_generic));
+
+ llvm::Value *NumEvents =
+ Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(3)), Int32Ty);
llvm::Value *EventList =
E->getArg(4)->getType()->isArrayType()
? EmitArrayToPointerDecay(E->getArg(4)).getPointer()
: EmitScalarExpr(E->getArg(4));
llvm::Value *ClkEvent = EmitScalarExpr(E->getArg(5));
+ // Convert to generic address space.
+ EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
+ ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy);
llvm::Value *Block =
Builder.CreateBitCast(EmitScalarExpr(E->getArg(6)), Int8PtrTy);
- std::vector<llvm::Type *> ArgTys = {
- QueueTy, Int32Ty, RangeTy, Int32Ty,
- EventPtrAS4Ty, EventPtrAS5Ty, Int8PtrTy};
+ std::vector<llvm::Type *> ArgTys = {QueueTy, Int32Ty, RangeTy,
+ Int32Ty, EventPtrTy, EventPtrTy,
+ Int8PtrTy};
+
std::vector<llvm::Value *> Args = {Queue, Flags, Range, NumEvents,
EventList, ClkEvent, Block};
@@ -2320,17 +2571,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
ArgTys.push_back(Int32Ty);
Name = "__enqueue_kernel_events_vaargs";
- // Add the variadics.
- for (unsigned I = 7; I < NumArgs; ++I) {
- llvm::Value *ArgSize = EmitScalarExpr(E->getArg(I));
- unsigned TypeSizeInBytes =
- getContext()
- .getTypeSizeInChars(E->getArg(I)->getType())
- .getQuantity();
- Args.push_back(TypeSizeInBytes < 4
- ? Builder.CreateZExt(ArgSize, Int32Ty)
- : ArgSize);
- }
+ // Each of the following arguments specifies the size of the corresponding
+ // argument passed to the enqueued block.
+ for (unsigned I = 7/*Position of the first size arg*/; I < NumArgs; ++I)
+ Args.push_back(
+ Builder.CreateZExtOrTrunc(EmitScalarExpr(E->getArg(I)), SizeTy));
+
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys), true);
return RValue::get(
@@ -2373,6 +2619,76 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Fall through - it's already mapped to the intrinsic by GCCBuiltin.
break;
}
+ case Builtin::BI__builtin_os_log_format: {
+ assert(E->getNumArgs() >= 2 &&
+ "__builtin_os_log_format takes at least 2 arguments");
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
+ Address BufAddr = EmitPointerWithAlignment(E->getArg(0));
+ // Ignore argument 1, the format string. It is not currently used.
+ CharUnits Offset;
+ Builder.CreateStore(
+ Builder.getInt8(Layout.getSummaryByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
+ Builder.CreateStore(
+ Builder.getInt8(Layout.getNumArgsByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "numArgs"));
+
+ llvm::SmallVector<llvm::Value *, 4> RetainableOperands;
+ for (const auto &Item : Layout.Items) {
+ Builder.CreateStore(
+ Builder.getInt8(Item.getDescriptorByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "argDescriptor"));
+ Builder.CreateStore(
+ Builder.getInt8(Item.getSizeByte()),
+ Builder.CreateConstByteGEP(BufAddr, Offset++, "argSize"));
+ Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset);
+ if (const Expr *TheExpr = Item.getExpr()) {
+ Addr = Builder.CreateElementBitCast(
+ Addr, ConvertTypeForMem(TheExpr->getType()));
+ // Check if this is a retainable type.
+ if (TheExpr->getType()->isObjCRetainableType()) {
+ assert(getEvaluationKind(TheExpr->getType()) == TEK_Scalar &&
+ "Only scalar can be a ObjC retainable type");
+ llvm::Value *SV = EmitScalarExpr(TheExpr, /*Ignore*/ false);
+ RValue RV = RValue::get(SV);
+ LValue LV = MakeAddrLValue(Addr, TheExpr->getType());
+ EmitStoreThroughLValue(RV, LV);
+ // Check if the object is constant, if not, save it in
+ // RetainableOperands.
+ if (!isa<Constant>(SV))
+ RetainableOperands.push_back(SV);
+ } else {
+ EmitAnyExprToMem(TheExpr, Addr, Qualifiers(), /*isInit*/ true);
+ }
+ } else {
+ Addr = Builder.CreateElementBitCast(Addr, Int32Ty);
+ Builder.CreateStore(
+ Builder.getInt32(Item.getConstValue().getQuantity()), Addr);
+ }
+ Offset += Item.size();
+ }
+
+ // Push a clang.arc.use cleanup for each object in RetainableOperands. The
+ // cleanup will cause the use to appear after the final log call, keeping
+ // the object valid while it’s held in the log buffer. Note that if there’s
+ // a release cleanup on the object, it will already be active; since
+ // cleanups are emitted in reverse order, the use will occur before the
+ // object is released.
+ if (!RetainableOperands.empty() && getLangOpts().ObjCAutoRefCount &&
+ CGM.getCodeGenOpts().OptimizationLevel != 0)
+ for (llvm::Value *object : RetainableOperands)
+ pushFullExprCleanup<CallObjCArcUse>(getARCCleanupKind(), object);
+
+ return RValue::get(BufAddr.getPointer());
+ }
+
+ case Builtin::BI__builtin_os_log_format_buffer_size: {
+ analyze_os_log::OSLogBufferLayout Layout;
+ analyze_os_log::computeOSLogBufferLayout(CGM.getContext(), E, Layout);
+ return RValue::get(ConstantInt::get(ConvertType(E->getType()),
+ Layout.size().getQuantity()));
+ }
}
// If this is an alias for a lib function (e.g. __builtin_sin), emit
@@ -2385,7 +2701,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// If this is a predefined lib function (e.g. malloc), emit the call
// using exactly the normal call path.
if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
- return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
+ return emitLibraryCall(*this, FD, E,
+ cast<llvm::Constant>(EmitScalarExpr(E->getCallee())));
// Check that a call to a target specific builtin has the correct target
// features.
@@ -2397,14 +2714,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// See if we have a target specific intrinsic.
const char *Name = getContext().BuiltinInfo.getName(BuiltinID);
Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
- if (const char *Prefix =
- llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
- IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
+ StringRef Prefix =
+ llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
+ if (!Prefix.empty()) {
+ IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
// NOTE we dont need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
if (IntrinsicID == Intrinsic::not_intrinsic)
- IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
+ IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix.data(), Name);
}
if (IntrinsicID != Intrinsic::not_intrinsic) {
@@ -3871,7 +4189,7 @@ static Value *EmitSpecialRegisterBuiltin(CodeGenFunction &CGF,
if (SysReg.empty()) {
const Expr *SysRegStrExpr = E->getArg(0)->IgnoreParenCasts();
- SysReg = cast<StringLiteral>(SysRegStrExpr)->getString();
+ SysReg = cast<clang::StringLiteral>(SysRegStrExpr)->getString();
}
llvm::Metadata *Ops[] = { llvm::MDString::get(Context, SysReg) };
@@ -4120,19 +4438,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+ llvm::Type *PtrTy = llvm::IntegerType::get(
+ getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
? Intrinsic::arm_ldaex
: Intrinsic::arm_ldrex,
- LoadAddr->getType());
+ PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
+ llvm::Type *IntResTy = llvm::IntegerType::get(
+ getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
@@ -4173,7 +4493,10 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
else {
- StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ llvm::Type *IntTy = llvm::IntegerType::get(
+ getLLVMContext(),
+ CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
+ StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
@@ -4184,6 +4507,41 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
}
+ switch (BuiltinID) {
+ case ARM::BI__iso_volatile_load8:
+ case ARM::BI__iso_volatile_load16:
+ case ARM::BI__iso_volatile_load32:
+ case ARM::BI__iso_volatile_load64: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits LoadSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ LoadSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::LoadInst *Load =
+ Builder.CreateAlignedLoad(Ptr, LoadSize);
+ Load->setVolatile(true);
+ return Load;
+ }
+ case ARM::BI__iso_volatile_store8:
+ case ARM::BI__iso_volatile_store16:
+ case ARM::BI__iso_volatile_store32:
+ case ARM::BI__iso_volatile_store64: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ Value *Value = EmitScalarExpr(E->getArg(1));
+ QualType ElTy = E->getArg(0)->getType()->getPointeeType();
+ CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
+ llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
+ StoreSize.getQuantity() * 8);
+ Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
+ llvm::StoreInst *Store =
+ Builder.CreateAlignedStore(Value, Ptr,
+ StoreSize);
+ Store->setVolatile(true);
+ return Store;
+ }
+ }
+
if (BuiltinID == ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
@@ -4397,6 +4755,29 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
+ case ARM::BI_BitScanForward:
+ case ARM::BI_BitScanForward64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
+ case ARM::BI_BitScanReverse:
+ case ARM::BI_BitScanReverse64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+ case ARM::BI_InterlockedAnd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+ case ARM::BI_InterlockedExchange64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+ case ARM::BI_InterlockedExchangeAdd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+ case ARM::BI_InterlockedExchangeSub64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+ case ARM::BI_InterlockedOr64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+ case ARM::BI_InterlockedXor64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+ case ARM::BI_InterlockedDecrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+ case ARM::BI_InterlockedIncrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
}
// Get the last argument, which specifies the vector type.
@@ -4889,19 +5270,21 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
- getContext().getTypeSize(Ty));
- LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
+ llvm::Type *PtrTy = llvm::IntegerType::get(
+ getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
? Intrinsic::aarch64_ldaxr
: Intrinsic::aarch64_ldxr,
- LoadAddr->getType());
+ PtrTy);
Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
+ llvm::Type *IntResTy = llvm::IntegerType::get(
+ getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
return Builder.CreateBitCast(Val, RealResTy);
}
@@ -4940,7 +5323,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
if (StoreVal->getType()->isPointerTy())
StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
else {
- StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
+ llvm::Type *IntTy = llvm::IntegerType::get(
+ getLLVMContext(),
+ CGM.getDataLayout().getTypeSizeInBits(StoreVal->getType()));
+ StoreVal = Builder.CreateBitCast(StoreVal, IntTy);
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
@@ -5065,9 +5451,11 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
switch (BuiltinID) {
default: break;
case NEON::BI__builtin_neon_vldrq_p128: {
- llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
+ llvm::Type *Int128Ty = llvm::Type::getIntNTy(getLLVMContext(), 128);
+ llvm::Type *Int128PTy = llvm::PointerType::get(Int128Ty, 0);
Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
- return Builder.CreateDefaultAlignedLoad(Ptr);
+ return Builder.CreateAlignedLoad(Int128Ty, Ptr,
+ CharUnits::fromQuantity(16));
}
case NEON::BI__builtin_neon_vstrq_p128: {
llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
@@ -6240,27 +6628,37 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "");
}
case NEON::BI__builtin_neon_vld1_v:
- case NEON::BI__builtin_neon_vld1q_v:
+ case NEON::BI__builtin_neon_vld1q_v: {
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
- return Builder.CreateDefaultAlignedLoad(Ops[0]);
+ auto Alignment = CharUnits::fromQuantity(
+ BuiltinID == NEON::BI__builtin_neon_vld1_v ? 8 : 16);
+ return Builder.CreateAlignedLoad(VTy, Ops[0], Alignment);
+ }
case NEON::BI__builtin_neon_vst1_v:
case NEON::BI__builtin_neon_vst1q_v:
Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
return Builder.CreateDefaultAlignedStore(Ops[1], Ops[0]);
case NEON::BI__builtin_neon_vld1_lane_v:
- case NEON::BI__builtin_neon_vld1q_lane_v:
+ case NEON::BI__builtin_neon_vld1q_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
+ auto Alignment = CharUnits::fromQuantity(
+ BuiltinID == NEON::BI__builtin_neon_vld1_lane_v ? 8 : 16);
+ Ops[0] =
+ Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
+ }
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
Ty = llvm::PointerType::getUnqual(VTy->getElementType());
Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
- Ops[0] = Builder.CreateDefaultAlignedLoad(Ops[0]);
+ auto Alignment = CharUnits::fromQuantity(
+ BuiltinID == NEON::BI__builtin_neon_vld1_dup_v ? 8 : 16);
+ Ops[0] =
+ Builder.CreateAlignedLoad(VTy->getElementType(), Ops[0], Alignment);
llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
return EmitNeonSplat(Ops[0], CI);
@@ -6620,6 +7018,26 @@ static Value *EmitX86MaskedLoad(CodeGenFunction &CGF,
return CGF.Builder.CreateMaskedLoad(Ops[0], Align, MaskVec, Ops[1]);
}
+static Value *EmitX86SubVectorBroadcast(CodeGenFunction &CGF,
+ SmallVectorImpl<Value *> &Ops,
+ llvm::Type *DstTy,
+ unsigned SrcSizeInBits,
+ unsigned Align) {
+ // Load the subvector.
+ Ops[0] = CGF.Builder.CreateAlignedLoad(Ops[0], Align);
+
+ // Create broadcast mask.
+ unsigned NumDstElts = DstTy->getVectorNumElements();
+ unsigned NumSrcElts = SrcSizeInBits / DstTy->getScalarSizeInBits();
+
+ SmallVector<uint32_t, 8> Mask;
+ for (unsigned i = 0; i != NumDstElts; i += NumSrcElts)
+ for (unsigned j = 0; j != NumSrcElts; ++j)
+ Mask.push_back(j);
+
+ return CGF.Builder.CreateShuffleVector(Ops[0], Ops[0], Mask, "subvecbcst");
+}
+
static Value *EmitX86Select(CodeGenFunction &CGF,
Value *Mask, Value *Op0, Value *Op1) {
@@ -6676,6 +7094,18 @@ static Value *EmitX86MaskedCompare(CodeGenFunction &CGF, unsigned CC,
std::max(NumElts, 8U)));
}
+static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
+ ArrayRef<Value *> Ops) {
+ Value *Cmp = CGF.Builder.CreateICmp(Pred, Ops[0], Ops[1]);
+ Value *Res = CGF.Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
+
+ if (Ops.size() == 2)
+ return Res;
+
+ assert(Ops.size() == 4);
+ return EmitX86Select(CGF, Ops[3], Res, Ops[2]);
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_ms_va_start ||
@@ -6860,6 +7290,25 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
return Builder.CreateCall(F, {Address, RW, Locality, Data});
}
+ case X86::BI_mm_clflush: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_clflush),
+ Ops[0]);
+ }
+ case X86::BI_mm_lfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_lfence));
+ }
+ case X86::BI_mm_mfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_mfence));
+ }
+ case X86::BI_mm_sfence: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_sfence));
+ }
+ case X86::BI_mm_pause: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse2_pause));
+ }
+ case X86::BI__rdtsc: {
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_rdtsc));
+ }
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
@@ -6872,12 +7321,14 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_vec_ext_v2si:
return Builder.CreateExtractElement(Ops[0],
llvm::ConstantInt::get(Ops[1]->getType(), 0));
+ case X86::BI_mm_setcsr:
case X86::BI__builtin_ia32_ldmxcsr: {
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Builder.CreateStore(Ops[0], Tmp);
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
Builder.CreateBitCast(Tmp.getPointer(), Int8PtrTy));
}
+ case X86::BI_mm_getcsr:
case X86::BI__builtin_ia32_stmxcsr: {
Address Tmp = CreateMemTemp(E->getType());
Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
@@ -6944,6 +7395,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storeups512_mask:
return EmitX86MaskedStore(*this, Ops, 1);
+ case X86::BI__builtin_ia32_storess128_mask:
+ case X86::BI__builtin_ia32_storesd128_mask: {
+ return EmitX86MaskedStore(*this, Ops, 16);
+ }
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
@@ -6980,6 +7435,10 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_loaddqudi512_mask:
return EmitX86MaskedLoad(*this, Ops, 1);
+ case X86::BI__builtin_ia32_loadss128_mask:
+ case X86::BI__builtin_ia32_loadsd128_mask:
+ return EmitX86MaskedLoad(*this, Ops, 16);
+
case X86::BI__builtin_ia32_loadaps128_mask:
case X86::BI__builtin_ia32_loadaps256_mask:
case X86::BI__builtin_ia32_loadaps512_mask:
@@ -6996,6 +7455,13 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
getContext().getTypeAlignInChars(E->getArg(1)->getType()).getQuantity();
return EmitX86MaskedLoad(*this, Ops, Align);
}
+
+ case X86::BI__builtin_ia32_vbroadcastf128_pd256:
+ case X86::BI__builtin_ia32_vbroadcastf128_ps256: {
+ llvm::Type *DstTy = ConvertType(E->getType());
+ return EmitX86SubVectorBroadcast(*this, Ops, DstTy, 128, 1);
+ }
+
case X86::BI__builtin_ia32_storehps:
case X86::BI__builtin_ia32_storelps: {
llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
@@ -7015,8 +7481,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_palignr128:
case X86::BI__builtin_ia32_palignr256:
- case X86::BI__builtin_ia32_palignr128_mask:
- case X86::BI__builtin_ia32_palignr256_mask:
case X86::BI__builtin_ia32_palignr512_mask: {
unsigned ShiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
@@ -7059,36 +7523,26 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
}
case X86::BI__builtin_ia32_movnti:
- case X86::BI__builtin_ia32_movnti64: {
- llvm::MDNode *Node = llvm::MDNode::get(
- getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
-
- // Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Ops[1]->getType()),
- "cast");
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Ops[1], BC);
- SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
-
- // No alignment for scalar intrinsic store.
- SI->setAlignment(1);
- return SI;
- }
+ case X86::BI__builtin_ia32_movnti64:
case X86::BI__builtin_ia32_movntsd:
case X86::BI__builtin_ia32_movntss: {
llvm::MDNode *Node = llvm::MDNode::get(
getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
+ Value *Ptr = Ops[0];
+ Value *Src = Ops[1];
+
// Extract the 0'th element of the source vector.
- Value *Scl = Builder.CreateExtractElement(Ops[1], (uint64_t)0, "extract");
+ if (BuiltinID == X86::BI__builtin_ia32_movntsd ||
+ BuiltinID == X86::BI__builtin_ia32_movntss)
+ Src = Builder.CreateExtractElement(Src, (uint64_t)0, "extract");
// Convert the type of the pointer to a pointer to the stored type.
- Value *BC = Builder.CreateBitCast(Ops[0],
- llvm::PointerType::getUnqual(Scl->getType()),
- "cast");
+ Value *BC = Builder.CreateBitCast(
+ Ptr, llvm::PointerType::getUnqual(Src->getType()), "cast");
// Unaligned nontemporal store of the scalar value.
- StoreInst *SI = Builder.CreateDefaultAlignedStore(Scl, BC);
+ StoreInst *SI = Builder.CreateDefaultAlignedStore(Src, BC);
SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
SI->setAlignment(1);
return SI;
@@ -7182,43 +7636,58 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Ops[1]);
}
- // TODO: Handle 64/512-bit vector widths of min/max.
case X86::BI__builtin_ia32_pmaxsb128:
case X86::BI__builtin_ia32_pmaxsw128:
case X86::BI__builtin_ia32_pmaxsd128:
+ case X86::BI__builtin_ia32_pmaxsq128_mask:
case X86::BI__builtin_ia32_pmaxsb256:
case X86::BI__builtin_ia32_pmaxsw256:
- case X86::BI__builtin_ia32_pmaxsd256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SGT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pmaxsd256:
+ case X86::BI__builtin_ia32_pmaxsq256_mask:
+ case X86::BI__builtin_ia32_pmaxsb512_mask:
+ case X86::BI__builtin_ia32_pmaxsw512_mask:
+ case X86::BI__builtin_ia32_pmaxsd512_mask:
+ case X86::BI__builtin_ia32_pmaxsq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_SGT, Ops);
case X86::BI__builtin_ia32_pmaxub128:
case X86::BI__builtin_ia32_pmaxuw128:
case X86::BI__builtin_ia32_pmaxud128:
+ case X86::BI__builtin_ia32_pmaxuq128_mask:
case X86::BI__builtin_ia32_pmaxub256:
case X86::BI__builtin_ia32_pmaxuw256:
- case X86::BI__builtin_ia32_pmaxud256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_UGT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pmaxud256:
+ case X86::BI__builtin_ia32_pmaxuq256_mask:
+ case X86::BI__builtin_ia32_pmaxub512_mask:
+ case X86::BI__builtin_ia32_pmaxuw512_mask:
+ case X86::BI__builtin_ia32_pmaxud512_mask:
+ case X86::BI__builtin_ia32_pmaxuq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_UGT, Ops);
case X86::BI__builtin_ia32_pminsb128:
case X86::BI__builtin_ia32_pminsw128:
case X86::BI__builtin_ia32_pminsd128:
+ case X86::BI__builtin_ia32_pminsq128_mask:
case X86::BI__builtin_ia32_pminsb256:
case X86::BI__builtin_ia32_pminsw256:
- case X86::BI__builtin_ia32_pminsd256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_SLT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pminsd256:
+ case X86::BI__builtin_ia32_pminsq256_mask:
+ case X86::BI__builtin_ia32_pminsb512_mask:
+ case X86::BI__builtin_ia32_pminsw512_mask:
+ case X86::BI__builtin_ia32_pminsd512_mask:
+ case X86::BI__builtin_ia32_pminsq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_SLT, Ops);
case X86::BI__builtin_ia32_pminub128:
case X86::BI__builtin_ia32_pminuw128:
case X86::BI__builtin_ia32_pminud128:
+ case X86::BI__builtin_ia32_pminuq128_mask:
case X86::BI__builtin_ia32_pminub256:
case X86::BI__builtin_ia32_pminuw256:
- case X86::BI__builtin_ia32_pminud256: {
- Value *Cmp = Builder.CreateICmp(ICmpInst::ICMP_ULT, Ops[0], Ops[1]);
- return Builder.CreateSelect(Cmp, Ops[0], Ops[1]);
- }
+ case X86::BI__builtin_ia32_pminud256:
+ case X86::BI__builtin_ia32_pminuq256_mask:
+ case X86::BI__builtin_ia32_pminub512_mask:
+ case X86::BI__builtin_ia32_pminuw512_mask:
+ case X86::BI__builtin_ia32_pminud512_mask:
+ case X86::BI__builtin_ia32_pminuq512_mask:
+ return EmitX86MinMax(*this, ICmpInst::ICMP_ULT, Ops);
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
@@ -7363,6 +7832,87 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 6);
case X86::BI__builtin_ia32_cmpordsd:
return getCmpIntrinsicCall(Intrinsic::x86_sse2_cmp_sd, 7);
+
+ case X86::BI__emul:
+ case X86::BI__emulu: {
+ llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
+ bool isSigned = (BuiltinID == X86::BI__emul);
+ Value *LHS = Builder.CreateIntCast(Ops[0], Int64Ty, isSigned);
+ Value *RHS = Builder.CreateIntCast(Ops[1], Int64Ty, isSigned);
+ return Builder.CreateMul(LHS, RHS, "", !isSigned, isSigned);
+ }
+ case X86::BI__mulh:
+ case X86::BI__umulh:
+ case X86::BI_mul128:
+ case X86::BI_umul128: {
+ llvm::Type *ResType = ConvertType(E->getType());
+ llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
+
+ bool IsSigned = (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI_mul128);
+ Value *LHS = Builder.CreateIntCast(Ops[0], Int128Ty, IsSigned);
+ Value *RHS = Builder.CreateIntCast(Ops[1], Int128Ty, IsSigned);
+
+ Value *MulResult, *HigherBits;
+ if (IsSigned) {
+ MulResult = Builder.CreateNSWMul(LHS, RHS);
+ HigherBits = Builder.CreateAShr(MulResult, 64);
+ } else {
+ MulResult = Builder.CreateNUWMul(LHS, RHS);
+ HigherBits = Builder.CreateLShr(MulResult, 64);
+ }
+ HigherBits = Builder.CreateIntCast(HigherBits, ResType, IsSigned);
+
+ if (BuiltinID == X86::BI__mulh || BuiltinID == X86::BI__umulh)
+ return HigherBits;
+
+ Address HighBitsAddress = EmitPointerWithAlignment(E->getArg(2));
+ Builder.CreateStore(HigherBits, HighBitsAddress);
+ return Builder.CreateIntCast(MulResult, ResType, IsSigned);
+ }
+
+ case X86::BI__faststorefence: {
+ return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
+ llvm::CrossThread);
+ }
+ case X86::BI_ReadWriteBarrier:
+ case X86::BI_ReadBarrier:
+ case X86::BI_WriteBarrier: {
+ return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
+ llvm::SingleThread);
+ }
+ case X86::BI_BitScanForward:
+ case X86::BI_BitScanForward64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanForward, E);
+ case X86::BI_BitScanReverse:
+ case X86::BI_BitScanReverse64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_BitScanReverse, E);
+
+ case X86::BI_InterlockedAnd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedAnd, E);
+ case X86::BI_InterlockedExchange64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchange, E);
+ case X86::BI_InterlockedExchangeAdd64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeAdd, E);
+ case X86::BI_InterlockedExchangeSub64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedExchangeSub, E);
+ case X86::BI_InterlockedOr64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedOr, E);
+ case X86::BI_InterlockedXor64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E);
+ case X86::BI_InterlockedDecrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedDecrement, E);
+ case X86::BI_InterlockedIncrement64:
+ return EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedIncrement, E);
+
+ case X86::BI_AddressOfReturnAddress: {
+ Value *F = CGM.getIntrinsic(Intrinsic::addressofreturnaddress);
+ return Builder.CreateCall(F);
+ }
+ case X86::BI__stosb: {
+ // We treat __stosb as a volatile memset - it may not generate "rep stosb"
+ // instruction, but it will create a memset that won't be optimized away.
+ return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
+ }
}
}
@@ -7384,7 +7934,7 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_ppc_get_timebase:
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::readcyclecounter));
- // vec_ld, vec_lvsl, vec_lvsr
+ // vec_ld, vec_xl_be, vec_lvsl, vec_lvsr
case PPC::BI__builtin_altivec_lvx:
case PPC::BI__builtin_altivec_lvxl:
case PPC::BI__builtin_altivec_lvebx:
@@ -7394,11 +7944,19 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_lvsr:
case PPC::BI__builtin_vsx_lxvd2x:
case PPC::BI__builtin_vsx_lxvw4x:
+ case PPC::BI__builtin_vsx_lxvd2x_be:
+ case PPC::BI__builtin_vsx_lxvw4x_be:
+ case PPC::BI__builtin_vsx_lxvl:
+ case PPC::BI__builtin_vsx_lxvll:
{
- Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
-
- Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
- Ops.pop_back();
+ if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
+ BuiltinID == PPC::BI__builtin_vsx_lxvll){
+ Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
+ }else {
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+ Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
+ Ops.pop_back();
+ }
switch (BuiltinID) {
default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
@@ -7429,12 +7987,24 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_lxvw4x:
ID = Intrinsic::ppc_vsx_lxvw4x;
break;
+ case PPC::BI__builtin_vsx_lxvd2x_be:
+ ID = Intrinsic::ppc_vsx_lxvd2x_be;
+ break;
+ case PPC::BI__builtin_vsx_lxvw4x_be:
+ ID = Intrinsic::ppc_vsx_lxvw4x_be;
+ break;
+ case PPC::BI__builtin_vsx_lxvl:
+ ID = Intrinsic::ppc_vsx_lxvl;
+ break;
+ case PPC::BI__builtin_vsx_lxvll:
+ ID = Intrinsic::ppc_vsx_lxvll;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
}
- // vec_st
+ // vec_st, vec_xst_be
case PPC::BI__builtin_altivec_stvx:
case PPC::BI__builtin_altivec_stvxl:
case PPC::BI__builtin_altivec_stvebx:
@@ -7442,10 +8012,19 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_altivec_stvewx:
case PPC::BI__builtin_vsx_stxvd2x:
case PPC::BI__builtin_vsx_stxvw4x:
+ case PPC::BI__builtin_vsx_stxvd2x_be:
+ case PPC::BI__builtin_vsx_stxvw4x_be:
+ case PPC::BI__builtin_vsx_stxvl:
+ case PPC::BI__builtin_vsx_stxvll:
{
- Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
- Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
- Ops.pop_back();
+ if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
+ BuiltinID == PPC::BI__builtin_vsx_stxvll ){
+ Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
+ }else {
+ Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
+ Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
+ Ops.pop_back();
+ }
switch (BuiltinID) {
default: llvm_unreachable("Unsupported st intrinsic!");
@@ -7470,6 +8049,18 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_stxvw4x:
ID = Intrinsic::ppc_vsx_stxvw4x;
break;
+ case PPC::BI__builtin_vsx_stxvd2x_be:
+ ID = Intrinsic::ppc_vsx_stxvd2x_be;
+ break;
+ case PPC::BI__builtin_vsx_stxvw4x_be:
+ ID = Intrinsic::ppc_vsx_stxvw4x_be;
+ break;
+ case PPC::BI__builtin_vsx_stxvl:
+ ID = Intrinsic::ppc_vsx_stxvl;
+ break;
+ case PPC::BI__builtin_vsx_stxvll:
+ ID = Intrinsic::ppc_vsx_stxvll;
+ break;
}
llvm::Function *F = CGM.getIntrinsic(ID);
return Builder.CreateCall(F, Ops, "");
@@ -7494,6 +8085,25 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::ctlz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
+ case PPC::BI__builtin_altivec_vctzb:
+ case PPC::BI__builtin_altivec_vctzh:
+ case PPC::BI__builtin_altivec_vctzw:
+ case PPC::BI__builtin_altivec_vctzd: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ Value *Undef = ConstantInt::get(Builder.getInt1Ty(), false);
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
+ return Builder.CreateCall(F, {X, Undef});
+ }
+ case PPC::BI__builtin_altivec_vpopcntb:
+ case PPC::BI__builtin_altivec_vpopcnth:
+ case PPC::BI__builtin_altivec_vpopcntw:
+ case PPC::BI__builtin_altivec_vpopcntd: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *X = EmitScalarExpr(E->getArg(0));
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ResultType);
+ return Builder.CreateCall(F, X);
+ }
// Copy sign
case PPC::BI__builtin_vsx_xvcpsgnsp:
case PPC::BI__builtin_vsx_xvcpsgndp: {
@@ -7625,45 +8235,73 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
return Builder.CreateCall(F, {Src0, Src1, Src2, Src3ToBool});
}
+
+ case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
+ return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
case AMDGPU::BI__builtin_amdgcn_div_fixup:
case AMDGPU::BI__builtin_amdgcn_div_fixupf:
+ case AMDGPU::BI__builtin_amdgcn_div_fixuph:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_div_fixup);
case AMDGPU::BI__builtin_amdgcn_trig_preop:
case AMDGPU::BI__builtin_amdgcn_trig_preopf:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_trig_preop);
case AMDGPU::BI__builtin_amdgcn_rcp:
case AMDGPU::BI__builtin_amdgcn_rcpf:
+ case AMDGPU::BI__builtin_amdgcn_rcph:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rcp);
case AMDGPU::BI__builtin_amdgcn_rsq:
case AMDGPU::BI__builtin_amdgcn_rsqf:
+ case AMDGPU::BI__builtin_amdgcn_rsqh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq);
case AMDGPU::BI__builtin_amdgcn_rsq_clamp:
case AMDGPU::BI__builtin_amdgcn_rsq_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_rsq_clamp);
case AMDGPU::BI__builtin_amdgcn_sinf:
+ case AMDGPU::BI__builtin_amdgcn_sinh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_sin);
case AMDGPU::BI__builtin_amdgcn_cosf:
+ case AMDGPU::BI__builtin_amdgcn_cosh:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_cos);
case AMDGPU::BI__builtin_amdgcn_log_clampf:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_log_clamp);
case AMDGPU::BI__builtin_amdgcn_ldexp:
case AMDGPU::BI__builtin_amdgcn_ldexpf:
+ case AMDGPU::BI__builtin_amdgcn_ldexph:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_ldexp);
case AMDGPU::BI__builtin_amdgcn_frexp_mant:
- case AMDGPU::BI__builtin_amdgcn_frexp_mantf: {
+ case AMDGPU::BI__builtin_amdgcn_frexp_mantf:
+ case AMDGPU::BI__builtin_amdgcn_frexp_manth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_mant);
- }
case AMDGPU::BI__builtin_amdgcn_frexp_exp:
case AMDGPU::BI__builtin_amdgcn_frexp_expf: {
- return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_frexp_exp);
+ Value *Src0 = EmitScalarExpr(E->getArg(0));
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
+ { Builder.getInt32Ty(), Src0->getType() });
+ return Builder.CreateCall(F, Src0);
+ }
+ case AMDGPU::BI__builtin_amdgcn_frexp_exph: {
+ Value *Src0 = EmitScalarExpr(E->getArg(0));
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_frexp_exp,
+ { Builder.getInt16Ty(), Src0->getType() });
+ return Builder.CreateCall(F, Src0);
}
case AMDGPU::BI__builtin_amdgcn_fract:
case AMDGPU::BI__builtin_amdgcn_fractf:
+ case AMDGPU::BI__builtin_amdgcn_fracth:
return emitUnaryBuiltin(*this, E, Intrinsic::amdgcn_fract);
case AMDGPU::BI__builtin_amdgcn_lerp:
return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_lerp);
+ case AMDGPU::BI__builtin_amdgcn_uicmp:
+ case AMDGPU::BI__builtin_amdgcn_uicmpl:
+ case AMDGPU::BI__builtin_amdgcn_sicmp:
+ case AMDGPU::BI__builtin_amdgcn_sicmpl:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_icmp);
+ case AMDGPU::BI__builtin_amdgcn_fcmp:
+ case AMDGPU::BI__builtin_amdgcn_fcmpf:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fcmp);
case AMDGPU::BI__builtin_amdgcn_class:
case AMDGPU::BI__builtin_amdgcn_classf:
+ case AMDGPU::BI__builtin_amdgcn_classh:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
case AMDGPU::BI__builtin_amdgcn_read_exec: {
@@ -7951,7 +8589,13 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
Ptr->getType()}),
{Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
};
-
+ auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
+ Ptr->getType()}),
+ {Ptr, EmitScalarExpr(E->getArg(1))});
+ };
switch (BuiltinID) {
case NVPTX::BI__nvvm_atom_add_gen_i:
case NVPTX::BI__nvvm_atom_add_gen_l:
@@ -8070,6 +8714,109 @@ Value *CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID,
case NVPTX::BI__nvvm_ldg_d:
case NVPTX::BI__nvvm_ldg_d2:
return MakeLdg(Intrinsic::nvvm_ldg_global_f);
+
+ case NVPTX::BI__nvvm_atom_cta_add_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_add_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_add_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_add_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_add_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_add_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_add_gen_f:
+ case NVPTX::BI__nvvm_atom_cta_add_gen_d:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_cta);
+ case NVPTX::BI__nvvm_atom_sys_add_gen_f:
+ case NVPTX::BI__nvvm_atom_sys_add_gen_d:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_add_gen_f_sys);
+ case NVPTX::BI__nvvm_atom_cta_xchg_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_xchg_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_xchg_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_xchg_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_xchg_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_xchg_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_exch_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_max_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ui:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ul:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ll:
+ case NVPTX::BI__nvvm_atom_cta_max_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_max_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ui:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ul:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ll:
+ case NVPTX::BI__nvvm_atom_sys_max_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_max_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_min_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ui:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ul:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ll:
+ case NVPTX::BI__nvvm_atom_cta_min_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_min_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ui:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ul:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ll:
+ case NVPTX::BI__nvvm_atom_sys_min_gen_ull:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_min_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_inc_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_cta_dec_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_inc_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_inc_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_sys_dec_gen_ui:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_dec_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_and_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_and_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_and_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_and_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_and_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_and_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_and_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_or_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_or_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_or_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_or_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_or_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_or_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_or_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_xor_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_xor_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_xor_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_cta);
+ case NVPTX::BI__nvvm_atom_sys_xor_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_xor_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_xor_gen_ll:
+ return MakeScopedAtomic(Intrinsic::nvvm_atomic_xor_gen_i_sys);
+ case NVPTX::BI__nvvm_atom_cta_cas_gen_i:
+ case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
+ case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(
+ Intrinsic::nvvm_atomic_cas_gen_i_cta,
+ {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
+ }
+ case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
+ case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
+ case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ return Builder.CreateCall(
+ CGM.getIntrinsic(
+ Intrinsic::nvvm_atomic_cas_gen_i_sys,
+ {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ {Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
+ }
default:
return nullptr;
}
diff --git a/lib/CodeGen/CGCUDABuiltin.cpp b/lib/CodeGen/CGCUDABuiltin.cpp
index ea3b888635c3..44dd003757ad 100644
--- a/lib/CodeGen/CGCUDABuiltin.cpp
+++ b/lib/CodeGen/CGCUDABuiltin.cpp
@@ -99,6 +99,12 @@ CodeGenFunction::EmitCUDADevicePrintfCallExpr(const CallExpr *E,
llvm::SmallVector<llvm::Type *, 8> ArgTypes;
for (unsigned I = 1, NumArgs = Args.size(); I < NumArgs; ++I)
ArgTypes.push_back(Args[I].RV.getScalarVal()->getType());
+
+ // Using llvm::StructType is correct only because printf doesn't accept
+ // aggregates. If we had to handle aggregates here, we'd have to manually
+ // compute the offsets within the alloca -- we wouldn't be able to assume
+ // that the alignment of the llvm type was the same as the alignment of the
+ // clang type.
llvm::Type *AllocaTy = llvm::StructType::create(ArgTypes, "printf_args");
llvm::Value *Alloca = CreateTempAlloca(AllocaTy);
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 6a04d4eea784..83febcb4af8c 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -15,6 +15,7 @@
#include "CGCUDARuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/Decl.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
@@ -29,7 +30,8 @@ namespace {
class CGNVCUDARuntime : public CGCUDARuntime {
private:
- llvm::Type *IntTy, *SizeTy, *VoidTy;
+ llvm::IntegerType *IntTy, *SizeTy;
+ llvm::Type *VoidTy;
llvm::PointerType *CharPtrTy, *VoidPtrTy, *VoidPtrPtrTy;
/// Convenience reference to LLVM Context
@@ -55,10 +57,18 @@ private:
/// where the C code specifies const char*.
llvm::Constant *makeConstantString(const std::string &Str,
const std::string &Name = "",
+ const std::string &SectionName = "",
unsigned Alignment = 0) {
llvm::Constant *Zeros[] = {llvm::ConstantInt::get(SizeTy, 0),
llvm::ConstantInt::get(SizeTy, 0)};
auto ConstStr = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>(ConstStr.getPointer());
+ if (!SectionName.empty())
+ GV->setSection(SectionName);
+ if (Alignment)
+ GV->setAlignment(Alignment);
+
return llvm::ConstantExpr::getGetElementPtr(ConstStr.getElementType(),
ConstStr.getPointer(), Zeros);
}
@@ -87,9 +97,9 @@ CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
- IntTy = Types.ConvertType(Ctx.IntTy);
- SizeTy = Types.ConvertType(Ctx.getSizeType());
- VoidTy = llvm::Type::getVoidTy(Context);
+ IntTy = CGM.IntTy;
+ SizeTy = CGM.SizeTy;
+ VoidTy = CGM.VoidTy;
CharPtrTy = llvm::PointerType::getUnqual(Types.ConvertType(Ctx.CharTy));
VoidPtrTy = cast<llvm::PointerType>(Types.ConvertType(Ctx.VoidPtrTy));
@@ -118,37 +128,28 @@ void CGNVCUDARuntime::emitDeviceStub(CodeGenFunction &CGF,
void CGNVCUDARuntime::emitDeviceStubBody(CodeGenFunction &CGF,
FunctionArgList &Args) {
- // Build the argument value list and the argument stack struct type.
- SmallVector<llvm::Value *, 16> ArgValues;
- std::vector<llvm::Type *> ArgTypes;
- for (FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
- I != E; ++I) {
- llvm::Value *V = CGF.GetAddrOfLocalVar(*I).getPointer();
- ArgValues.push_back(V);
- assert(isa<llvm::PointerType>(V->getType()) && "Arg type not PointerType");
- ArgTypes.push_back(cast<llvm::PointerType>(V->getType())->getElementType());
- }
- llvm::StructType *ArgStackTy = llvm::StructType::get(Context, ArgTypes);
-
- llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
-
- // Emit the calls to cudaSetupArgument
+ // Emit a call to cudaSetupArgument for each arg in Args.
llvm::Constant *cudaSetupArgFn = getSetupArgumentFn();
- for (unsigned I = 0, E = Args.size(); I != E; ++I) {
- llvm::Value *Args[3];
- llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
- Args[0] = CGF.Builder.CreatePointerCast(ArgValues[I], VoidPtrTy);
- Args[1] = CGF.Builder.CreateIntCast(
- llvm::ConstantExpr::getSizeOf(ArgTypes[I]),
- SizeTy, false);
- Args[2] = CGF.Builder.CreateIntCast(
- llvm::ConstantExpr::getOffsetOf(ArgStackTy, I),
- SizeTy, false);
+ llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
+ CharUnits Offset = CharUnits::Zero();
+ for (const VarDecl *A : Args) {
+ CharUnits TyWidth, TyAlign;
+ std::tie(TyWidth, TyAlign) =
+ CGM.getContext().getTypeInfoInChars(A->getType());
+ Offset = Offset.alignTo(TyAlign);
+ llvm::Value *Args[] = {
+ CGF.Builder.CreatePointerCast(CGF.GetAddrOfLocalVar(A).getPointer(),
+ VoidPtrTy),
+ llvm::ConstantInt::get(SizeTy, TyWidth.getQuantity()),
+ llvm::ConstantInt::get(SizeTy, Offset.getQuantity()),
+ };
llvm::CallSite CS = CGF.EmitRuntimeCallOrInvoke(cudaSetupArgFn, Args);
llvm::Constant *Zero = llvm::ConstantInt::get(IntTy, 0);
llvm::Value *CSZero = CGF.Builder.CreateICmpEQ(CS.getInstruction(), Zero);
+ llvm::BasicBlock *NextBlock = CGF.createBasicBlock("setup.next");
CGF.Builder.CreateCondBr(CSZero, NextBlock, EndBlock);
CGF.EmitBlock(NextBlock);
+ Offset += TyWidth;
}
// Emit the call to cudaLaunch
@@ -290,18 +291,29 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
continue;
}
- // Create initialized wrapper structure that points to the loaded GPU binary
- llvm::Constant *Values[] = {
- llvm::ConstantInt::get(IntTy, 0x466243b1), // Fatbin wrapper magic.
- llvm::ConstantInt::get(IntTy, 1), // Fatbin version.
- makeConstantString(GpuBinaryOrErr.get()->getBuffer(), "", 16), // Data.
- llvm::ConstantPointerNull::get(VoidPtrTy)}; // Unused in fatbin v1.
- llvm::GlobalVariable *FatbinWrapper = new llvm::GlobalVariable(
- TheModule, FatbinWrapperTy, true, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantStruct::get(FatbinWrapperTy, Values),
- "__cuda_fatbin_wrapper");
+ const char *FatbinConstantName =
+ CGM.getTriple().isMacOSX() ? "__NV_CUDA,__nv_fatbin" : ".nv_fatbin";
// NVIDIA's cuobjdump looks for fatbins in this section.
- FatbinWrapper->setSection(".nvFatBinSegment");
+ const char *FatbinSectionName =
+ CGM.getTriple().isMacOSX() ? "__NV_CUDA,__fatbin" : ".nvFatBinSegment";
+
+ // Create initialized wrapper structure that points to the loaded GPU binary
+ ConstantInitBuilder Builder(CGM);
+ auto Values = Builder.beginStruct(FatbinWrapperTy);
+ // Fatbin wrapper magic.
+ Values.addInt(IntTy, 0x466243b1);
+ // Fatbin version.
+ Values.addInt(IntTy, 1);
+ // Data.
+ Values.add(makeConstantString(GpuBinaryOrErr.get()->getBuffer(),
+ "", FatbinConstantName, 8));
+ // Unused in fatbin v1.
+ Values.add(llvm::ConstantPointerNull::get(VoidPtrTy));
+ llvm::GlobalVariable *FatbinWrapper =
+ Values.finishAndCreateGlobal("__cuda_fatbin_wrapper",
+ CGM.getPointerAlign(),
+ /*constant*/ true);
+ FatbinWrapper->setSection(FatbinSectionName);
// GpuBinaryHandle = __cudaRegisterFatBinary(&FatbinWrapper);
llvm::CallInst *RegisterFatbinCall = CtorBuilder.CreateCall(
diff --git a/lib/CodeGen/CGCUDARuntime.cpp b/lib/CodeGen/CGCUDARuntime.cpp
index 014a5dbd46d6..1936f9f13692 100644
--- a/lib/CodeGen/CGCUDARuntime.cpp
+++ b/lib/CodeGen/CGCUDARuntime.cpp
@@ -36,16 +36,7 @@ RValue CGCUDARuntime::EmitCUDAKernelCallExpr(CodeGenFunction &CGF,
eval.begin(CGF);
CGF.EmitBlock(ConfigOKBlock);
-
- const Decl *TargetDecl = nullptr;
- if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E->getCallee())) {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
- TargetDecl = DRE->getDecl();
- }
- }
-
- llvm::Value *Callee = CGF.EmitScalarExpr(E->getCallee());
- CGF.EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue, TargetDecl);
+ CGF.EmitSimpleCallExpr(E, ReturnValue);
CGF.EmitBranch(ContBlock);
CGF.EmitBlock(ContBlock);
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 40f1bc426ff7..59010f4407c2 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -134,6 +134,11 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
llvm::GlobalValue::LinkageTypes TargetLinkage =
getFunctionLinkage(TargetDecl);
+ // available_externally definitions aren't real definitions, so we cannot
+ // create an alias to one.
+ if (TargetLinkage == llvm::GlobalValue::AvailableExternallyLinkage)
+ return true;
+
// Check if we have it already.
StringRef MangledName = getMangledName(AliasDecl);
llvm::GlobalValue *Entry = GetGlobalValue(MangledName);
@@ -156,14 +161,7 @@ bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
- if (llvm::GlobalValue::isDiscardableIfUnused(Linkage) &&
- (TargetLinkage != llvm::GlobalValue::AvailableExternallyLinkage ||
- !TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
- // FIXME: An extern template instantiation will create functions with
- // linkage "AvailableExternally". In libc++, some classes also define
- // members with attribute "AlwaysInline" and expect no reference to
- // be generated. It is desirable to reenable this optimisation after
- // corresponding LLVM changes.
+ if (llvm::GlobalValue::isDiscardableIfUnused(Linkage)) {
addReplacement(MangledName, Aliasee);
return false;
}
@@ -220,7 +218,7 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
getTypes().arrangeCXXStructorDeclaration(MD, Type);
auto *Fn = cast<llvm::Function>(
getAddrOfCXXStructor(MD, Type, &FnInfo, /*FnType=*/nullptr,
- /*DontDefer=*/true, /*IsForDefinition=*/true));
+ /*DontDefer=*/true, ForDefinition));
GlobalDecl GD;
if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
@@ -241,7 +239,8 @@ llvm::Function *CodeGenModule::codegenCXXStructor(const CXXMethodDecl *MD,
llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
const CXXMethodDecl *MD, StructorType Type, const CGFunctionInfo *FnInfo,
- llvm::FunctionType *FnType, bool DontDefer, bool IsForDefinition) {
+ llvm::FunctionType *FnType, bool DontDefer,
+ ForDefinition_t IsForDefinition) {
GlobalDecl GD;
if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
GD = GlobalDecl(CD, toCXXCtorType(Type));
@@ -260,10 +259,10 @@ llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
/*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeSet(), IsForDefinition);
}
-static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
- GlobalDecl GD,
- llvm::Type *Ty,
- const CXXRecordDecl *RD) {
+static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ llvm::Type *Ty,
+ const CXXRecordDecl *RD) {
assert(!CGF.CGM.getTarget().getCXXABI().isMicrosoft() &&
"No kext in Microsoft ABI");
GD = GD.getCanonicalDecl();
@@ -273,22 +272,26 @@ static llvm::Value *BuildAppleKextVirtualCall(CodeGenFunction &CGF,
VTable = CGF.Builder.CreateBitCast(VTable, Ty);
assert(VTable && "BuildVirtualCall = kext vtbl pointer is null");
uint64_t VTableIndex = CGM.getItaniumVTableContext().getMethodVTableIndex(GD);
- uint64_t AddressPoint =
- CGM.getItaniumVTableContext().getVTableLayout(RD)
- .getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
- VTableIndex += AddressPoint;
+ const VTableLayout &VTLayout = CGM.getItaniumVTableContext().getVTableLayout(RD);
+ VTableLayout::AddressPointLocation AddressPoint =
+ VTLayout.getAddressPoint(BaseSubobject(RD, CharUnits::Zero()));
+ VTableIndex += VTLayout.getVTableOffset(AddressPoint.VTableIndex) +
+ AddressPoint.AddressPointIndex;
llvm::Value *VFuncPtr =
CGF.Builder.CreateConstInBoundsGEP1_64(VTable, VTableIndex, "vfnkxt");
- return CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+ llvm::Value *VFunc =
+ CGF.Builder.CreateAlignedLoad(VFuncPtr, CGF.PointerAlignInBytes);
+ CGCallee Callee(GD.getDecl(), VFunc);
+ return Callee;
}
/// BuildAppleKextVirtualCall - This routine is to support gcc's kext ABI making
/// indirect call to virtual functions. It makes the call through indexing
/// into the vtable.
-llvm::Value *
+CGCallee
CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
- NestedNameSpecifier *Qual,
- llvm::Type *Ty) {
+ NestedNameSpecifier *Qual,
+ llvm::Type *Ty) {
assert((Qual->getKind() == NestedNameSpecifier::TypeSpec) &&
"BuildAppleKextVirtualCall - bad Qual kind");
@@ -306,21 +309,15 @@ CodeGenFunction::BuildAppleKextVirtualCall(const CXXMethodDecl *MD,
/// BuildVirtualCall - This routine makes indirect vtable call for
/// call to virtual destructors. It returns 0 if it could not do it.
-llvm::Value *
+CGCallee
CodeGenFunction::BuildAppleKextVirtualDestructorCall(
const CXXDestructorDecl *DD,
CXXDtorType Type,
const CXXRecordDecl *RD) {
- const auto *MD = cast<CXXMethodDecl>(DD);
- // FIXME. Dtor_Base dtor is always direct!!
- // It need be somehow inline expanded into the caller.
- // -O does that. But need to support -O0 as well.
- if (MD->isVirtual() && Type != Dtor_Base) {
- // Compute the function type we're calling.
- const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
- DD, StructorType::Complete);
- llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
- return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
- }
- return nullptr;
+ assert(DD->isVirtual() && Type != Dtor_Base);
+ // Compute the function type we're calling.
+ const CGFunctionInfo &FInfo = CGM.getTypes().arrangeCXXStructorDeclaration(
+ DD, StructorType::Complete);
+ llvm::Type *Ty = CGM.getTypes().GetFunctionType(FInfo);
+ return ::BuildAppleKextVirtualCall(*this, GlobalDecl(DD, Type), Ty, RD);
}
diff --git a/lib/CodeGen/CGCXXABI.cpp b/lib/CodeGen/CGCXXABI.cpp
index e4da447eddc7..df75a7d7ffde 100644
--- a/lib/CodeGen/CGCXXABI.cpp
+++ b/lib/CodeGen/CGCXXABI.cpp
@@ -73,7 +73,7 @@ CGCXXABI::ConvertMemberPointerType(const MemberPointerType *MPT) {
return CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
}
-llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
+CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall,
llvm::Value *MemPtr, const MemberPointerType *MPT) {
@@ -86,7 +86,8 @@ llvm::Value *CGCXXABI::EmitLoadOfMemberFunctionPointer(
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
CGM.getTypes().arrangeCXXMethodType(RD, FPT, /*FD=*/nullptr));
- return llvm::Constant::getNullValue(FTy->getPointerTo());
+ llvm::Constant *FnPtr = llvm::Constant::getNullValue(FTy->getPointerTo());
+ return CGCallee::forDirect(FnPtr, FPT);
}
llvm::Value *
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index 9e10ec068e09..d53fd4cb63b2 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -35,6 +35,7 @@ class FieldDecl;
class MangleContext;
namespace CodeGen {
+class CGCallee;
class CodeGenFunction;
class CodeGenModule;
struct CatchTypeInfo;
@@ -154,7 +155,7 @@ public:
/// Load a member function from an object and a member function
/// pointer. Apply the this-adjustment and set 'This' to the
/// adjusted value.
- virtual llvm::Value *EmitLoadOfMemberFunctionPointer(
+ virtual CGCallee EmitLoadOfMemberFunctionPointer(
CodeGenFunction &CGF, const Expr *E, Address This,
llvm::Value *&ThisPtrForCall, llvm::Value *MemPtr,
const MemberPointerType *MPT);
@@ -403,11 +404,11 @@ public:
CharUnits VPtrOffset) = 0;
/// Build a virtual function pointer in the ABI-specific way.
- virtual llvm::Value *getVirtualFunctionPointer(CodeGenFunction &CGF,
- GlobalDecl GD,
- Address This,
- llvm::Type *Ty,
- SourceLocation Loc) = 0;
+ virtual CGCallee getVirtualFunctionPointer(CodeGenFunction &CGF,
+ GlobalDecl GD,
+ Address This,
+ llvm::Type *Ty,
+ SourceLocation Loc) = 0;
/// Emit the ABI-specific virtual destructor call.
virtual llvm::Value *
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 242b5962070a..9b96a59aec38 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -29,6 +29,7 @@
#include "clang/CodeGen/SwiftCallingConv.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Analysis/ValueTracking.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/CallSite.h"
@@ -47,6 +48,7 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
default: return llvm::CallingConv::C;
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
@@ -172,6 +174,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
if (D->hasAttr<FastCallAttr>())
return CC_X86FastCall;
+ if (D->hasAttr<RegCallAttr>())
+ return CC_X86RegCall;
+
if (D->hasAttr<ThisCallAttr>())
return CC_X86ThisCall;
@@ -1647,6 +1652,8 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
if (TargetDecl->hasAttr<NoDuplicateAttr>())
FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
+ if (TargetDecl->hasAttr<ConvergentAttr>())
+ FuncAttrs.addAttribute(llvm::Attribute::Convergent);
if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
AddAttributesFromFunctionProtoType(
@@ -1676,6 +1683,14 @@ void CodeGenModule::ConstructAttributeList(
HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
+ if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
+ Optional<unsigned> NumElemsParam;
+ // alloc_size args are base-1, 0 means not present.
+ if (unsigned N = AllocSize->getNumElemsParam())
+ NumElemsParam = N - 1;
+ FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam() - 1,
+ NumElemsParam);
+ }
}
// OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
@@ -1722,6 +1737,16 @@ void CodeGenModule::ConstructAttributeList(
FuncAttrs.addAttribute("less-precise-fpmad",
llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
+
+ if (!CodeGenOpts.FPDenormalMode.empty())
+ FuncAttrs.addAttribute("denormal-fp-math",
+ CodeGenOpts.FPDenormalMode);
+
+ FuncAttrs.addAttribute("no-trapping-math",
+ llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+
+ // TODO: Are these all needed?
+ // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
FuncAttrs.addAttribute("no-infs-fp-math",
llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
FuncAttrs.addAttribute("no-nans-fp-math",
@@ -1734,6 +1759,15 @@ void CodeGenModule::ConstructAttributeList(
llvm::utostr(CodeGenOpts.SSPBufferSize));
FuncAttrs.addAttribute("no-signed-zeros-fp-math",
llvm::toStringRef(CodeGenOpts.NoSignedZeros));
+ FuncAttrs.addAttribute(
+ "correctly-rounded-divide-sqrt-fp-math",
+ llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
+
+ // TODO: Reciprocal estimate codegen options should apply to instructions?
+ std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
+ if (!Recips.empty())
+ FuncAttrs.addAttribute("reciprocal-estimates",
+ llvm::join(Recips.begin(), Recips.end(), ","));
if (CodeGenOpts.StackRealignment)
FuncAttrs.addAttribute("stackrealign");
@@ -1794,6 +1828,9 @@ void CodeGenModule::ConstructAttributeList(
// them). LLVM will remove this attribute where it safely can.
FuncAttrs.addAttribute(llvm::Attribute::Convergent);
+ // Exceptions aren't supported in CUDA device code.
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+
// Respect -fcuda-flush-denormals-to-zero.
if (getLangOpts().CUDADeviceFlushDenormalsToZero)
FuncAttrs.addAttribute("nvptx-f32ftz", "true");
@@ -2299,13 +2336,6 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (isPromoted)
V = emitArgumentDemotion(*this, Arg, V);
- if (const CXXMethodDecl *MD =
- dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
- if (MD->isVirtual() && Arg == CXXABIThisDecl)
- V = CGM.getCXXABI().
- adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
- }
-
// Because of merging of function types from multiple decls it is
// possible for the type of an argument to not match the corresponding
// type in the function type. Since we are codegening the callee
@@ -2465,7 +2495,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
// result is in a BasicBlock and is therefore an Instruction.
llvm::Instruction *generator = cast<llvm::Instruction>(result);
- SmallVector<llvm::Instruction*,4> insnsToKill;
+ SmallVector<llvm::Instruction *, 4> InstsToKill;
// Look for:
// %generator = bitcast %type1* %generator2 to %type2*
@@ -2478,7 +2508,7 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
if (generator->getNextNode() != bitcast)
return nullptr;
- insnsToKill.push_back(bitcast);
+ InstsToKill.push_back(bitcast);
}
// Look for:
@@ -2511,27 +2541,26 @@ static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
assert(isa<llvm::CallInst>(prev));
assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
- insnsToKill.push_back(prev);
+ InstsToKill.push_back(prev);
}
} else {
return nullptr;
}
result = call->getArgOperand(0);
- insnsToKill.push_back(call);
+ InstsToKill.push_back(call);
// Keep killing bitcasts, for sanity. Note that we no longer care
// about precise ordering as long as there's exactly one use.
while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
if (!bitcast->hasOneUse()) break;
- insnsToKill.push_back(bitcast);
+ InstsToKill.push_back(bitcast);
result = bitcast->getOperand(0);
}
// Delete all the unnecessary instructions, from latest to earliest.
- for (SmallVectorImpl<llvm::Instruction*>::iterator
- i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
- (*i)->eraseFromParent();
+ for (auto *I : InstsToKill)
+ I->eraseFromParent();
// Do the fused retain/autorelease if we were asked to.
if (doRetainAutorelease)
@@ -2841,7 +2870,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
EmitCheckSourceLocation(RetNNAttr->getLocation()),
};
EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
- "nonnull_return", StaticData, None);
+ SanitizerHandler::NonnullReturn, StaticData, None);
}
}
Ret = Builder.CreateRet(RV);
@@ -2863,13 +2892,13 @@ static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
// FIXME: Generate IR in one pass, rather than going back and fixing up these
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
- llvm::Value *Placeholder =
- llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
- Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
+ llvm::Type *IRPtrTy = IRTy->getPointerTo();
+ llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
// FIXME: When we generate this IR in one pass, we shouldn't need
// this win32-specific alignment hack.
CharUnits Align = CharUnits::fromQuantity(4);
+ Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
return AggValueSlot::forAddr(Address(Placeholder, Align),
Ty.getQualifiers(),
@@ -2891,22 +2920,36 @@ void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
"cannot emit delegate call arguments for inalloca arguments!");
+ // GetAddrOfLocalVar returns a pointer-to-pointer for references,
+ // but the argument needs to be the original pointer.
+ if (type->isReferenceType()) {
+ args.add(RValue::get(Builder.CreateLoad(local)), type);
+
+ // In ARC, move out of consumed arguments so that the release cleanup
+ // entered by StartFunction doesn't cause an over-release. This isn't
+ // optimal -O0 code generation, but it should get cleaned up when
+ // optimization is enabled. This also assumes that delegate calls are
+ // performed exactly once for a set of arguments, but that should be safe.
+ } else if (getLangOpts().ObjCAutoRefCount &&
+ param->hasAttr<NSConsumedAttr>() &&
+ type->isObjCRetainableType()) {
+ llvm::Value *ptr = Builder.CreateLoad(local);
+ auto null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
+ Builder.CreateStore(null, local);
+ args.add(RValue::get(ptr), type);
+
// For the most part, we just need to load the alloca, except that
// aggregate r-values are actually pointers to temporaries.
- if (type->isReferenceType())
- args.add(RValue::get(Builder.CreateLoad(local)), type);
- else
+ } else {
args.add(convertTempToRValue(local, type, loc), type);
+ }
}
static bool isProvablyNull(llvm::Value *addr) {
return isa<llvm::ConstantPointerNull>(addr);
}
-static bool isProvablyNonNull(llvm::Value *addr) {
- return isa<llvm::AllocaInst>(addr);
-}
-
/// Emit the actual writing-back of a writeback.
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
@@ -2919,7 +2962,7 @@ static void emitWriteback(CodeGenFunction &CGF,
// If the argument wasn't provably non-null, we need to null check
// before doing the store.
- bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
+ bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
if (!provablyNonNull) {
llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
contBB = CGF.createBasicBlock("icr.done");
@@ -3059,7 +3102,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// If the address is *not* known to be non-null, we need to switch.
llvm::Value *finalArgument;
- bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
+ bool provablyNonNull = llvm::isKnownNonNull(srcAddr.getPointer());
if (provablyNonNull) {
finalArgument = temp.getPointer();
} else {
@@ -3130,7 +3173,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
- assert(!StackBase && !StackCleanup.isValid());
+ assert(!StackBase);
// Save the stack.
llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
@@ -3167,13 +3210,14 @@ void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
};
EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
- "nonnull_arg", StaticData, None);
+ SanitizerHandler::NonnullArg, StaticData, None);
}
void CodeGenFunction::EmitCallArgs(
CallArgList &Args, ArrayRef<QualType> ArgTypes,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
- const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
+ const FunctionDecl *CalleeDecl, unsigned ParamsToSkip,
+ EvaluationOrder Order) {
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
@@ -3191,10 +3235,18 @@ void CodeGenFunction::EmitCallArgs(
};
// We *have* to evaluate arguments from right to left in the MS C++ ABI,
- // because arguments are destroyed left to right in the callee.
- if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
- // Insert a stack save if we're going to need any inalloca args.
- bool HasInAllocaArgs = false;
+ // because arguments are destroyed left to right in the callee. As a special
+ // case, there are certain language constructs that require left-to-right
+ // evaluation, and in those cases we consider the evaluation order requirement
+ // to trump the "destruction order is reverse construction order" guarantee.
+ bool LeftToRight =
+ CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
+ ? Order == EvaluationOrder::ForceLeftToRight
+ : Order != EvaluationOrder::ForceRightToLeft;
+
+ // Insert a stack save if we're going to need any inalloca args.
+ bool HasInAllocaArgs = false;
+ if (CGM.getTarget().getCXXABI().isMicrosoft()) {
for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
I != E && !HasInAllocaArgs; ++I)
HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
@@ -3202,30 +3254,24 @@ void CodeGenFunction::EmitCallArgs(
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
Args.allocateArgumentMemory(*this);
}
+ }
- // Evaluate each argument.
- size_t CallArgsStart = Args.size();
- for (int I = ArgTypes.size() - 1; I >= 0; --I) {
- CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
- MaybeEmitImplicitObjectSize(I, *Arg);
- EmitCallArg(Args, *Arg, ArgTypes[I]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
- CalleeDecl, ParamsToSkip + I);
- }
+ // Evaluate each argument in the appropriate order.
+ size_t CallArgsStart = Args.size();
+ for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
+ unsigned Idx = LeftToRight ? I : E - I - 1;
+ CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
+ if (!LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
+ EmitCallArg(Args, *Arg, ArgTypes[Idx]);
+ EmitNonNullArgCheck(Args.back().RV, ArgTypes[Idx], (*Arg)->getExprLoc(),
+ CalleeDecl, ParamsToSkip + Idx);
+ if (LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
+ }
+ if (!LeftToRight) {
// Un-reverse the arguments we just evaluated so they match up with the LLVM
// IR function.
std::reverse(Args.begin() + CallArgsStart, Args.end());
- return;
- }
-
- for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
- CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
- assert(Arg != ArgRange.end());
- EmitCallArg(Args, *Arg, ArgTypes[I]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
- CalleeDecl, ParamsToSkip + I);
- MaybeEmitImplicitObjectSize(I, *Arg);
}
}
@@ -3267,7 +3313,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
if (const ObjCIndirectCopyRestoreExpr *CRE
= dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
assert(getLangOpts().ObjCAutoRefCount);
- assert(getContext().hasSameType(E->getType(), type));
+ assert(getContext().hasSameUnqualifiedType(E->getType(), type));
return emitWritebackArg(*this, args, CRE);
}
@@ -3505,21 +3551,22 @@ void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
}
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
- llvm::Value *Callee,
+ const CGCallee &Callee,
ReturnValueSlot ReturnValue,
const CallArgList &CallArgs,
- CGCalleeInfo CalleeInfo,
llvm::Instruction **callOrInvoke) {
// FIXME: We no longer need the types from CallArgs; lift up and simplify.
+ assert(Callee.isOrdinary());
+
// Handle struct-return functions by passing a pointer to the
// location that we would like to return into.
QualType RetTy = CallInfo.getReturnType();
const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
- llvm::FunctionType *IRFuncTy =
- cast<llvm::FunctionType>(
- cast<llvm::PointerType>(Callee->getType())->getElementType());
+ llvm::FunctionType *IRFuncTy = Callee.getFunctionType();
+
+ // 1. Set up the arguments.
// If we're using inalloca, insert the allocation after the stack save.
// FIXME: Do this earlier rather than hacking it in here!
@@ -3579,6 +3626,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Address swiftErrorTemp = Address::invalid();
Address swiftErrorArg = Address::invalid();
+ // Translate all of the arguments as necessary to match the IR lowering.
assert(CallInfo.arg_size() == CallArgs.size() &&
"Mismatch between function signature & arguments.");
unsigned ArgNo = 0;
@@ -3826,6 +3874,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+
+ // If we're using inalloca, set up that argument.
if (ArgMemory.isValid()) {
llvm::Value *Arg = ArgMemory.getPointer();
if (CallInfo.isVariadic()) {
@@ -3833,10 +3884,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// end up with a variadic prototype and an inalloca call site. In such
// cases, we can't do any parameter mismatch checks. Give up and bitcast
// the callee.
- unsigned CalleeAS =
- cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
- Callee = Builder.CreateBitCast(
- Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
+ unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
+ auto FnTy = getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS);
+ CalleePtr = Builder.CreateBitCast(CalleePtr, FnTy);
} else {
llvm::Type *LastParamTy =
IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
@@ -3860,39 +3910,57 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
}
- if (!CallArgs.getCleanupsToDeactivate().empty())
- deactivateArgCleanupsBeforeCall(*this, CallArgs);
+ // 2. Prepare the function pointer.
+
+ // If the callee is a bitcast of a non-variadic function to have a
+ // variadic function pointer type, check to see if we can remove the
+ // bitcast. This comes up with unprototyped functions.
+ //
+ // This makes the IR nicer, but more importantly it ensures that we
+ // can inline the function at -O0 if it is marked always_inline.
+ auto simplifyVariadicCallee = [](llvm::Value *Ptr) -> llvm::Value* {
+ llvm::FunctionType *CalleeFT =
+ cast<llvm::FunctionType>(Ptr->getType()->getPointerElementType());
+ if (!CalleeFT->isVarArg())
+ return Ptr;
+
+ llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr);
+ if (!CE || CE->getOpcode() != llvm::Instruction::BitCast)
+ return Ptr;
+
+ llvm::Function *OrigFn = dyn_cast<llvm::Function>(CE->getOperand(0));
+ if (!OrigFn)
+ return Ptr;
+
+ llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
+
+ // If the original type is variadic, or if any of the component types
+ // disagree, we cannot remove the cast.
+ if (OrigFT->isVarArg() ||
+ OrigFT->getNumParams() != CalleeFT->getNumParams() ||
+ OrigFT->getReturnType() != CalleeFT->getReturnType())
+ return Ptr;
+
+ for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
+ if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
+ return Ptr;
+
+ return OrigFn;
+ };
+ CalleePtr = simplifyVariadicCallee(CalleePtr);
- // If the callee is a bitcast of a function to a varargs pointer to function
- // type, check to see if we can remove the bitcast. This handles some cases
- // with unprototyped functions.
- if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
- if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
- llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
- llvm::FunctionType *CurFT =
- cast<llvm::FunctionType>(CurPT->getElementType());
- llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
-
- if (CE->getOpcode() == llvm::Instruction::BitCast &&
- ActualFT->getReturnType() == CurFT->getReturnType() &&
- ActualFT->getNumParams() == CurFT->getNumParams() &&
- ActualFT->getNumParams() == IRCallArgs.size() &&
- (CurFT->isVarArg() || !ActualFT->isVarArg())) {
- bool ArgsMatch = true;
- for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
- if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
- ArgsMatch = false;
- break;
- }
+ // 3. Perform the actual call.
- // Strip the cast if we can get away with it. This is a nice cleanup,
- // but also allows us to inline the function at -O0 if it is marked
- // always_inline.
- if (ArgsMatch)
- Callee = CalleeF;
- }
- }
+ // Deactivate any cleanups that we're supposed to do immediately before
+ // the call.
+ if (!CallArgs.getCleanupsToDeactivate().empty())
+ deactivateArgCleanupsBeforeCall(*this, CallArgs);
+ // Assert that the arguments we computed match up. The IR verifier
+ // will catch this, but this is a common enough source of problems
+ // during IRGen changes that it's way better for debugging to catch
+ // it ourselves here.
+#ifndef NDEBUG
assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
// Inalloca argument can have different type.
@@ -3902,75 +3970,106 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (i < IRFuncTy->getNumParams())
assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
}
+#endif
+ // Compute the calling convention and attributes.
unsigned CallingConv;
CodeGen::AttributeListType AttributeList;
- CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
+ CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
+ Callee.getAbstractInfo(),
AttributeList, CallingConv,
/*AttrOnCallSite=*/true);
llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
AttributeList);
+ // Apply some call-site-specific attributes.
+ // TODO: work this into building the attribute set.
+
+ // Apply always_inline to all calls within flatten functions.
+ // FIXME: should this really take priority over __try, below?
+ if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
+ !(Callee.getAbstractInfo().getCalleeDecl() &&
+ Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(),
+ llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::AlwaysInline);
+ }
+
+ // Disable inlining inside SEH __try blocks.
+ if (isSEHTryScope()) {
+ Attrs =
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ llvm::Attribute::NoInline);
+ }
+
+ // Decide whether to use a call or an invoke.
bool CannotThrow;
if (currentFunctionUsesSEHTry()) {
- // SEH cares about asynchronous exceptions, everything can "throw."
+ // SEH cares about asynchronous exceptions, so everything can "throw."
CannotThrow = false;
} else if (isCleanupPadScope() &&
EHPersonality::get(*this).isMSVCXXPersonality()) {
// The MSVC++ personality will implicitly terminate the program if an
- // exception is thrown. An unwind edge cannot be reached.
+ // exception is thrown during a cleanup outside of a try/catch.
+ // We don't need to model anything in IR to get this behavior.
CannotThrow = true;
} else {
- // Otherwise, nowunind callsites will never throw.
+ // Otherwise, nounwind call sites will never throw.
CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
llvm::Attribute::NoUnwind);
}
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
SmallVector<llvm::OperandBundleDef, 1> BundleList;
- getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
+ getBundlesForFunclet(CalleePtr, CurrentFuncletPad, BundleList);
+ // Emit the actual call/invoke instruction.
llvm::CallSite CS;
if (!InvokeDest) {
- CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
+ CS = Builder.CreateCall(CalleePtr, IRCallArgs, BundleList);
} else {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
+ CS = Builder.CreateInvoke(CalleePtr, Cont, InvokeDest, IRCallArgs,
BundleList);
EmitBlock(Cont);
}
+ llvm::Instruction *CI = CS.getInstruction();
if (callOrInvoke)
- *callOrInvoke = CS.getInstruction();
-
- if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
- !CS.hasFnAttr(llvm::Attribute::NoInline))
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::AlwaysInline);
-
- // Disable inlining inside SEH __try blocks.
- if (isSEHTryScope())
- Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::NoInline);
+ *callOrInvoke = CI;
+ // Apply the attributes and calling convention.
CS.setAttributes(Attrs);
CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
+ // Apply various metadata.
+
+ if (!CI->getType()->isVoidTy())
+ CI->setName("call");
+
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
// IPVK_IndirectCallTarget in InstrProfData.inc.
if (!CS.getCalledFunction())
PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
- CS.getInstruction(), Callee);
+ CI, CalleePtr);
// In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
// optimizer it can aggressively ignore unwind edges.
if (CGM.getLangOpts().ObjCAutoRefCount)
- AddObjCARCExceptionMetadata(CS.getInstruction());
+ AddObjCARCExceptionMetadata(CI);
+
+ // Suppress tail calls if requested.
+ if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
+ if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
+ Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
+ }
+
+ // 4. Finish the call.
// If the call doesn't return, finish the basic block and clear the
- // insertion point; this allows the rest of IRgen to discard
+ // insertion point; this allows the rest of IRGen to discard
// unreachable code.
if (CS.doesNotReturn()) {
if (UnusedReturnSize)
@@ -3989,18 +4088,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
return GetUndefRValue(RetTy);
}
- llvm::Instruction *CI = CS.getInstruction();
- if (!CI->getType()->isVoidTy())
- CI->setName("call");
-
// Perform the swifterror writeback.
if (swiftErrorTemp.isValid()) {
llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
Builder.CreateStore(errorResult, swiftErrorArg);
}
- // Emit any writebacks immediately. Arguably this should happen
- // after any return-value munging.
+ // Emit any call-associated writebacks immediately. Arguably this
+ // should happen after any return-value munging.
if (CallArgs.hasWritebacks())
emitWritebacks(*this, CallArgs);
@@ -4008,12 +4103,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// lexical order, so deactivate it and run it manually here.
CallArgs.freeArgumentMemory(*this);
- if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
- if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
- Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
- }
-
+ // Extract the return value.
RValue Ret = [&] {
switch (RetAI.getKind()) {
case ABIArgInfo::CoerceAndExpand: {
@@ -4110,8 +4200,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm_unreachable("Unhandled ABIArgInfo::Kind");
} ();
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
-
+ // Emit the assume_aligned check on the return value.
+ const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl();
if (Ret.isScalar() && TargetDecl) {
if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
llvm::Value *OffsetValue = nullptr;
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 2ebd09b9eb57..031ce831cb37 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -19,7 +19,6 @@
#include "EHScopeStack.h"
#include "clang/AST/CanonicalType.h"
#include "clang/AST/Type.h"
-#include "llvm/ADT/FoldingSet.h"
#include "llvm/IR/Value.h"
// FIXME: Restructure so we don't have to expose so much stuff.
@@ -42,6 +41,134 @@ namespace clang {
namespace CodeGen {
typedef SmallVector<llvm::AttributeSet, 8> AttributeListType;
+ /// Abstract information about a function or function prototype.
+ class CGCalleeInfo {
+ /// \brief The function prototype of the callee.
+ const FunctionProtoType *CalleeProtoTy;
+ /// \brief The function declaration of the callee.
+ const Decl *CalleeDecl;
+
+ public:
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
+ CGCalleeInfo(const Decl *calleeDecl)
+ : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
+
+ const FunctionProtoType *getCalleeFunctionProtoType() const {
+ return CalleeProtoTy;
+ }
+ const Decl *getCalleeDecl() const { return CalleeDecl; }
+ };
+
+ /// All available information about a concrete callee.
+ class CGCallee {
+ enum class SpecialKind : uintptr_t {
+ Invalid,
+ Builtin,
+ PseudoDestructor,
+
+ Last = PseudoDestructor
+ };
+
+ struct BuiltinInfoStorage {
+ const FunctionDecl *Decl;
+ unsigned ID;
+ };
+ struct PseudoDestructorInfoStorage {
+ const CXXPseudoDestructorExpr *Expr;
+ };
+
+ SpecialKind KindOrFunctionPointer;
+ union {
+ CGCalleeInfo AbstractInfo;
+ BuiltinInfoStorage BuiltinInfo;
+ PseudoDestructorInfoStorage PseudoDestructorInfo;
+ };
+
+ explicit CGCallee(SpecialKind kind) : KindOrFunctionPointer(kind) {}
+
+ CGCallee(const FunctionDecl *builtinDecl, unsigned builtinID)
+ : KindOrFunctionPointer(SpecialKind::Builtin) {
+ BuiltinInfo.Decl = builtinDecl;
+ BuiltinInfo.ID = builtinID;
+ }
+
+ public:
+ CGCallee() : KindOrFunctionPointer(SpecialKind::Invalid) {}
+
+ /// Construct a callee. Call this constructor directly when this
+ /// isn't a direct call.
+ CGCallee(const CGCalleeInfo &abstractInfo, llvm::Value *functionPtr)
+ : KindOrFunctionPointer(SpecialKind(uintptr_t(functionPtr))) {
+ AbstractInfo = abstractInfo;
+ assert(functionPtr && "configuring callee without function pointer");
+ assert(functionPtr->getType()->isPointerTy());
+ assert(functionPtr->getType()->getPointerElementType()->isFunctionTy());
+ }
+
+ static CGCallee forBuiltin(unsigned builtinID,
+ const FunctionDecl *builtinDecl) {
+ CGCallee result(SpecialKind::Builtin);
+ result.BuiltinInfo.Decl = builtinDecl;
+ result.BuiltinInfo.ID = builtinID;
+ return result;
+ }
+
+ static CGCallee forPseudoDestructor(const CXXPseudoDestructorExpr *E) {
+ CGCallee result(SpecialKind::PseudoDestructor);
+ result.PseudoDestructorInfo.Expr = E;
+ return result;
+ }
+
+ static CGCallee forDirect(llvm::Constant *functionPtr,
+ const CGCalleeInfo &abstractInfo = CGCalleeInfo()) {
+ return CGCallee(abstractInfo, functionPtr);
+ }
+
+ bool isBuiltin() const {
+ return KindOrFunctionPointer == SpecialKind::Builtin;
+ }
+ const FunctionDecl *getBuiltinDecl() const {
+ assert(isBuiltin());
+ return BuiltinInfo.Decl;
+ }
+ unsigned getBuiltinID() const {
+ assert(isBuiltin());
+ return BuiltinInfo.ID;
+ }
+
+ bool isPseudoDestructor() const {
+ return KindOrFunctionPointer == SpecialKind::PseudoDestructor;
+ }
+ const CXXPseudoDestructorExpr *getPseudoDestructorExpr() const {
+ assert(isPseudoDestructor());
+ return PseudoDestructorInfo.Expr;
+ }
+
+ bool isOrdinary() const {
+ return uintptr_t(KindOrFunctionPointer) > uintptr_t(SpecialKind::Last);
+ }
+ const CGCalleeInfo &getAbstractInfo() const {
+ assert(isOrdinary());
+ return AbstractInfo;
+ }
+ llvm::Value *getFunctionPointer() const {
+ assert(isOrdinary());
+ return reinterpret_cast<llvm::Value*>(uintptr_t(KindOrFunctionPointer));
+ }
+ llvm::FunctionType *getFunctionType() const {
+ return cast<llvm::FunctionType>(
+ getFunctionPointer()->getType()->getPointerElementType());
+ }
+ void setFunctionPointer(llvm::Value *functionPtr) {
+ assert(isOrdinary());
+ KindOrFunctionPointer = SpecialKind(uintptr_t(functionPtr));
+ }
+ };
+
struct CallArg {
RValue RV;
QualType Ty;
@@ -82,10 +209,19 @@ namespace CodeGen {
push_back(CallArg(rvalue, type, needscopy));
}
+ /// Add all the arguments from another CallArgList to this one. After doing
+ /// this, the old CallArgList retains its list of arguments, but must not
+ /// be used to emit a call.
void addFrom(const CallArgList &other) {
insert(end(), other.begin(), other.end());
Writebacks.insert(Writebacks.end(),
other.Writebacks.begin(), other.Writebacks.end());
+ CleanupsToDeactivate.insert(CleanupsToDeactivate.end(),
+ other.CleanupsToDeactivate.begin(),
+ other.CleanupsToDeactivate.end());
+ assert(!(StackBase && other.StackBase) && "can't merge stackbases");
+ if (!StackBase)
+ StackBase = other.StackBase;
}
void addWriteback(LValue srcLV, Address temporary,
@@ -133,11 +269,6 @@ namespace CodeGen {
/// The stacksave call. It dominates all of the argument evaluation.
llvm::CallInst *StackBase;
-
- /// The iterator pointing to the stack restore cleanup. We manually run and
- /// deactivate this cleanup after the call in the unexceptional case because
- /// it doesn't run in the normal order.
- EHScopeStack::stable_iterator StackCleanup;
};
/// FunctionArgList - Type for representing both the decl and type
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 7ed891f426aa..05d056739524 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -562,105 +562,6 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
isBaseVirtual);
}
-static void EmitAggMemberInitializer(CodeGenFunction &CGF,
- LValue LHS,
- Expr *Init,
- Address ArrayIndexVar,
- QualType T,
- ArrayRef<VarDecl *> ArrayIndexes,
- unsigned Index) {
- if (Index == ArrayIndexes.size()) {
- LValue LV = LHS;
-
- if (ArrayIndexVar.isValid()) {
- // If we have an array index variable, load it and use it as an offset.
- // Then, increment the value.
- llvm::Value *Dest = LHS.getPointer();
- llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
- Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
- llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
- Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
- CGF.Builder.CreateStore(Next, ArrayIndexVar);
-
- // Update the LValue.
- CharUnits EltSize = CGF.getContext().getTypeSizeInChars(T);
- CharUnits Align = LV.getAlignment().alignmentOfArrayElement(EltSize);
- LV.setAddress(Address(Dest, Align));
- }
-
- switch (CGF.getEvaluationKind(T)) {
- case TEK_Scalar:
- CGF.EmitScalarInit(Init, /*decl*/ nullptr, LV, false);
- break;
- case TEK_Complex:
- CGF.EmitComplexExprIntoLValue(Init, LV, /*isInit*/ true);
- break;
- case TEK_Aggregate: {
- AggValueSlot Slot =
- AggValueSlot::forLValue(LV,
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased);
-
- CGF.EmitAggExpr(Init, Slot);
- break;
- }
- }
-
- return;
- }
-
- const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
- assert(Array && "Array initialization without the array type?");
- Address IndexVar = CGF.GetAddrOfLocalVar(ArrayIndexes[Index]);
-
- // Initialize this index variable to zero.
- llvm::Value* Zero
- = llvm::Constant::getNullValue(IndexVar.getElementType());
- CGF.Builder.CreateStore(Zero, IndexVar);
-
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
-
- CGF.EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
- // Generate: if (loop-index < number-of-elements) fall to the loop body,
- // otherwise, go to the block after the for-loop.
- uint64_t NumElements = Array->getSize().getZExtValue();
- llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
- llvm::Value *NumElementsPtr =
- llvm::ConstantInt::get(Counter->getType(), NumElements);
- llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
- "isless");
-
- // If the condition is true, execute the body.
- CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- CGF.EmitBlock(ForBody);
- llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
-
- // Inside the loop body recurse to emit the inner loop or, eventually, the
- // constructor call.
- EmitAggMemberInitializer(CGF, LHS, Init, ArrayIndexVar,
- Array->getElementType(), ArrayIndexes, Index + 1);
-
- CGF.EmitBlock(ContinueBlock);
-
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
- Counter = CGF.Builder.CreateLoad(IndexVar);
- NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
- CGF.Builder.CreateStore(NextVal, IndexVar);
-
- // Finally, branch back up to the condition for the next iteration.
- CGF.EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- CGF.EmitBlock(AfterFor, true);
-}
-
static bool isMemcpyEquivalentSpecialMember(const CXXMethodDecl *D) {
auto *CD = dyn_cast<CXXConstructorDecl>(D);
if (!(CD && CD->isCopyOrMoveConstructor()) &&
@@ -744,14 +645,11 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
}
}
- ArrayRef<VarDecl *> ArrayIndexes;
- if (MemberInit->getNumArrayIndices())
- ArrayIndexes = MemberInit->getArrayIndices();
- CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit(), ArrayIndexes);
+ CGF.EmitInitializerForField(Field, LHS, MemberInit->getInit());
}
void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
- Expr *Init, ArrayRef<VarDecl *> ArrayIndexes) {
+ Expr *Init) {
QualType FieldType = Field->getType();
switch (getEvaluationKind(FieldType)) {
case TEK_Scalar:
@@ -766,30 +664,13 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
EmitComplexExprIntoLValue(Init, LHS, /*isInit*/ true);
break;
case TEK_Aggregate: {
- Address ArrayIndexVar = Address::invalid();
- if (ArrayIndexes.size()) {
- // The LHS is a pointer to the first object we'll be constructing, as
- // a flat array.
- QualType BaseElementTy = getContext().getBaseElementType(FieldType);
- llvm::Type *BasePtr = ConvertType(BaseElementTy);
- BasePtr = llvm::PointerType::getUnqual(BasePtr);
- Address BaseAddrPtr = Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- LHS = MakeAddrLValue(BaseAddrPtr, BaseElementTy);
-
- // Create an array index that will be used to walk over all of the
- // objects we're constructing.
- ArrayIndexVar = CreateMemTemp(getContext().getSizeType(), "object.index");
- llvm::Value *Zero =
- llvm::Constant::getNullValue(ArrayIndexVar.getElementType());
- Builder.CreateStore(Zero, ArrayIndexVar);
-
- // Emit the block variables for the array indices, if any.
- for (unsigned I = 0, N = ArrayIndexes.size(); I != N; ++I)
- EmitAutoVarDecl(*ArrayIndexes[I]);
- }
-
- EmitAggMemberInitializer(*this, LHS, Init, ArrayIndexVar, FieldType,
- ArrayIndexes, 0);
+ AggValueSlot Slot =
+ AggValueSlot::forLValue(LHS,
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(Init, Slot);
+ break;
}
}
@@ -2146,10 +2027,12 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
*this, D, Type, ForVirtualBase, Delegating, Args);
// Emit the call.
- llvm::Value *Callee = CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
+ llvm::Constant *CalleePtr =
+ CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
const CGFunctionInfo &Info =
- CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
- EmitCall(Info, Callee, ReturnValueSlot(), Args, D);
+ CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, D);
+ EmitCall(Info, Callee, ReturnValueSlot(), Args);
// Generate vtable assumptions if we're constructing a complete object
// with a vtable. We don't do this for base subobjects for two reasons:
@@ -2765,8 +2648,8 @@ void CodeGenFunction::EmitVTablePtrCheck(const CXXRecordDecl *RD,
llvm::MDString::get(CGM.getLLVMContext(), "all-vtables"));
llvm::Value *ValidVtable = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedVTable, AllVtables});
- EmitCheck(std::make_pair(TypeTest, M), "cfi_check_fail", StaticData,
- {CastedVTable, ValidVtable});
+ EmitCheck(std::make_pair(TypeTest, M), SanitizerHandler::CFICheckFail,
+ StaticData, {CastedVTable, ValidVtable});
}
bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
@@ -2798,38 +2681,13 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
llvm::Value *CheckResult = Builder.CreateExtractValue(CheckedLoad, 1);
EmitCheck(std::make_pair(CheckResult, SanitizerKind::CFIVCall),
- "cfi_check_fail", nullptr, nullptr);
+ SanitizerHandler::CFICheckFail, nullptr, nullptr);
return Builder.CreateBitCast(
Builder.CreateExtractValue(CheckedLoad, 0),
cast<llvm::PointerType>(VTable->getType())->getElementType());
}
-// FIXME: Ideally Expr::IgnoreParenNoopCasts should do this, but it doesn't do
-// quite what we want.
-static const Expr *skipNoOpCastsAndParens(const Expr *E) {
- while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
- E = PE->getSubExpr();
- continue;
- }
-
- if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
- if (CE->getCastKind() == CK_NoOp) {
- E = CE->getSubExpr();
- continue;
- }
- }
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
- if (UO->getOpcode() == UO_Extension) {
- E = UO->getSubExpr();
- continue;
- }
- }
- return E;
- }
-}
-
bool
CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
const CXXMethodDecl *MD) {
@@ -2838,31 +2696,41 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
if (getLangOpts().AppleKext)
return false;
- // If the most derived class is marked final, we know that no subclass can
- // override this member function and so we can devirtualize it. For example:
- //
- // struct A { virtual void f(); }
- // struct B final : A { };
- //
- // void f(B *b) {
- // b->f();
- // }
- //
- const CXXRecordDecl *MostDerivedClassDecl = Base->getBestDynamicClassType();
- if (MostDerivedClassDecl->hasAttr<FinalAttr>())
- return true;
-
// If the member function is marked 'final', we know that it can't be
- // overridden and can therefore devirtualize it.
+ // overridden and can therefore devirtualize it unless it's pure virtual.
if (MD->hasAttr<FinalAttr>())
+ return !MD->isPure();
+
+ // If the base expression (after skipping derived-to-base conversions) is a
+ // class prvalue, then we can devirtualize.
+ Base = Base->getBestDynamicClassTypeExpr();
+ if (Base->isRValue() && Base->getType()->isRecordType())
+ return true;
+
+ // If we don't even know what we would call, we can't devirtualize.
+ const CXXRecordDecl *BestDynamicDecl = Base->getBestDynamicClassType();
+ if (!BestDynamicDecl)
+ return false;
+
+ // There may be a method corresponding to MD in a derived class.
+ const CXXMethodDecl *DevirtualizedMethod =
+ MD->getCorrespondingMethodInClass(BestDynamicDecl);
+
+ // If that method is pure virtual, we can't devirtualize. If this code is
+ // reached, the result would be UB, not a direct call to the derived class
+ // function, and we can't assume the derived class function is defined.
+ if (DevirtualizedMethod->isPure())
+ return false;
+
+ // If that method is marked final, we can devirtualize it.
+ if (DevirtualizedMethod->hasAttr<FinalAttr>())
return true;
// Similarly, if the class itself is marked 'final' it can't be overridden
// and we can therefore devirtualize the member function call.
- if (MD->getParent()->hasAttr<FinalAttr>())
+ if (BestDynamicDecl->hasAttr<FinalAttr>())
return true;
- Base = skipNoOpCastsAndParens(Base);
if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Base)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
// This is a record decl. We know the type and can devirtualize it.
@@ -2879,17 +2747,15 @@ CodeGenFunction::CanDevirtualizeMemberFunctionCall(const Expr *Base,
if (const ValueDecl *VD = dyn_cast<ValueDecl>(ME->getMemberDecl()))
return VD->getType()->isRecordType();
- // We can always devirtualize calls on temporary object expressions.
- if (isa<CXXConstructExpr>(Base))
- return true;
-
- // And calls on bound temporaries.
- if (isa<CXXBindTemporaryExpr>(Base))
- return true;
-
- // Check if this is a call expr that returns a record type.
- if (const CallExpr *CE = dyn_cast<CallExpr>(Base))
- return CE->getCallReturnType(getContext())->isRecordType();
+ // Likewise for calls on an object accessed by a (non-reference) pointer to
+ // member access.
+ if (auto *BO = dyn_cast<BinaryOperator>(Base)) {
+ if (BO->isPtrMemOp()) {
+ auto *MPT = BO->getRHS()->getType()->castAs<MemberPointerType>();
+ if (MPT->getPointeeType()->isRecordType())
+ return true;
+ }
+ }
// We can't devirtualize the call.
return false;
@@ -2901,7 +2767,7 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// Get the address of the call operator.
const CGFunctionInfo &calleeFnInfo =
CGM.getTypes().arrangeCXXMethodDeclaration(callOperator);
- llvm::Value *callee =
+ llvm::Constant *calleePtr =
CGM.GetAddrOfFunction(GlobalDecl(callOperator),
CGM.getTypes().GetFunctionType(calleeFnInfo));
@@ -2920,8 +2786,8 @@ void CodeGenFunction::EmitForwardingCallToLambda(
// variadic arguments.
// Now emit our call.
- RValue RV = EmitCall(calleeFnInfo, callee, returnSlot,
- callArgs, callOperator);
+ auto callee = CGCallee::forDirect(calleePtr, callOperator);
+ RValue RV = EmitCall(calleeFnInfo, callee, returnSlot, callArgs);
// If necessary, copy the returned value into the slot.
if (!resultType->isVoidType() && returnSlot.isNull())
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index b3278b3b4fef..3666858e63d2 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -445,7 +445,7 @@ CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
for (size_t I = OldLifetimeExtendedSize,
E = LifetimeExtendedCleanupStack.size(); I != E; /**/) {
// Alignment should be guaranteed by the vptrs in the individual cleanups.
- assert((I % llvm::alignOf<LifetimeExtendedCleanupHeader>() == 0) &&
+ assert((I % alignof(LifetimeExtendedCleanupHeader) == 0) &&
"misaligned cleanup stack entry");
LifetimeExtendedCleanupHeader &Header =
diff --git a/lib/CodeGen/CGCleanup.h b/lib/CodeGen/CGCleanup.h
index 98d01b1326c9..2166490ec1fd 100644
--- a/lib/CodeGen/CGCleanup.h
+++ b/lib/CodeGen/CGCleanup.h
@@ -427,8 +427,7 @@ public:
// EHCleanupScope ought to have alignment equal to that -- not more
// (would be misaligned by the stack allocator), and not less (would
// break the appended classes).
-static_assert(llvm::AlignOf<EHCleanupScope>::Alignment ==
- EHScopeStack::ScopeStackAlignment,
+static_assert(alignof(EHCleanupScope) == EHScopeStack::ScopeStackAlignment,
"EHCleanupScope expected alignment");
/// An exceptions scope which filters exceptions thrown through it.
diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp
new file mode 100644
index 000000000000..2fdb1279ece9
--- /dev/null
+++ b/lib/CodeGen/CGCoroutine.cpp
@@ -0,0 +1,116 @@
+//===----- CGCoroutine.cpp - Emit LLVM Code for C++ coroutines ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This contains code dealing with C++ code generation of coroutines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CodeGenFunction.h"
+#include "clang/AST/StmtCXX.h"
+
+using namespace clang;
+using namespace CodeGen;
+
+namespace clang {
+namespace CodeGen {
+
+struct CGCoroData {
+ // Stores the llvm.coro.id emitted in the function so that we can supply it
+ // as the first argument to coro.begin, coro.alloc and coro.free intrinsics.
+ // Note: llvm.coro.id returns a token that cannot be directly expressed in a
+ // builtin.
+ llvm::CallInst *CoroId = nullptr;
+ // If coro.id came from the builtin, remember the expression to give better
+ // diagnostic. If CoroIdExpr is nullptr, the coro.id was created by
+ // EmitCoroutineBody.
+ CallExpr const *CoroIdExpr = nullptr;
+};
+}
+}
+
+clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {}
+CodeGenFunction::CGCoroInfo::~CGCoroInfo() {}
+
+static void createCoroData(CodeGenFunction &CGF,
+ CodeGenFunction::CGCoroInfo &CurCoro,
+ llvm::CallInst *CoroId,
+ CallExpr const *CoroIdExpr = nullptr) {
+ if (CurCoro.Data) {
+ if (CurCoro.Data->CoroIdExpr)
+ CGF.CGM.Error(CoroIdExpr->getLocStart(),
+ "only one __builtin_coro_id can be used in a function");
+ else if (CoroIdExpr)
+ CGF.CGM.Error(CoroIdExpr->getLocStart(),
+ "__builtin_coro_id shall not be used in a C++ coroutine");
+ else
+ llvm_unreachable("EmitCoroutineBodyStatement called twice?");
+
+ return;
+ }
+
+ CurCoro.Data = std::unique_ptr<CGCoroData>(new CGCoroData);
+ CurCoro.Data->CoroId = CoroId;
+ CurCoro.Data->CoroIdExpr = CoroIdExpr;
+}
+
+void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
+ auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
+ auto &TI = CGM.getContext().getTargetInfo();
+ unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
+
+ auto *CoroId = Builder.CreateCall(
+ CGM.getIntrinsic(llvm::Intrinsic::coro_id),
+ {Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr});
+ createCoroData(*this, CurCoro, CoroId);
+
+ EmitScalarExpr(S.getAllocate());
+ // FIXME: Emit the rest of the coroutine.
+ EmitStmt(S.getDeallocate());
+}
+
+// Emit coroutine intrinsic and patch up arguments of the token type.
+RValue CodeGenFunction::EmitCoroutineIntrinsic(const CallExpr *E,
+ unsigned int IID) {
+ SmallVector<llvm::Value *, 8> Args;
+ switch (IID) {
+ default:
+ break;
+ // The following three intrinsics take a token parameter referring to a token
+ // returned by earlier call to @llvm.coro.id. Since we cannot represent it in
+ // builtins, we patch it up here.
+ case llvm::Intrinsic::coro_alloc:
+ case llvm::Intrinsic::coro_begin:
+ case llvm::Intrinsic::coro_free: {
+ if (CurCoro.Data && CurCoro.Data->CoroId) {
+ Args.push_back(CurCoro.Data->CoroId);
+ break;
+ }
+ CGM.Error(E->getLocStart(), "this builtin expect that __builtin_coro_id has"
+ " been used earlier in this function");
+ // Fallthrough to the next case to add TokenNone as the first argument.
+ }
+ // @llvm.coro.suspend takes a token parameter. Add token 'none' as the first
+ // argument.
+ case llvm::Intrinsic::coro_suspend:
+ Args.push_back(llvm::ConstantTokenNone::get(getLLVMContext()));
+ break;
+ }
+ for (auto &Arg : E->arguments())
+ Args.push_back(EmitScalarExpr(Arg));
+
+ llvm::Value *F = CGM.getIntrinsic(IID);
+ llvm::CallInst *Call = Builder.CreateCall(F, Args);
+
+ // If we see @llvm.coro.id remember it in the CoroData. We will update
+ // coro.alloc, coro.begin and coro.free intrinsics to refer to it.
+ if (IID == llvm::Intrinsic::coro_id) {
+ createCoroData(*this, CurCoro, Call, E);
+ }
+ return RValue::get(Call);
+}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 0607a5157a6f..12a68036b09c 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -13,9 +13,9 @@
#include "CGDebugInfo.h"
#include "CGBlocks.h"
-#include "CGRecordLayout.h"
#include "CGCXXABI.h"
#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/AST/ASTContext.h"
@@ -31,6 +31,7 @@
#include "clang/Lex/HeaderSearchOptions.h"
#include "clang/Lex/ModuleMap.h"
#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/IR/Constants.h"
@@ -40,10 +41,24 @@
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MD5.h"
#include "llvm/Support/Path.h"
using namespace clang;
using namespace clang::CodeGen;
+static uint32_t getTypeAlignIfRequired(const Type *Ty, const ASTContext &Ctx) {
+ auto TI = Ctx.getTypeInfo(Ty);
+ return TI.AlignIsRequired ? TI.Align : 0;
+}
+
+static uint32_t getTypeAlignIfRequired(QualType Ty, const ASTContext &Ctx) {
+ return getTypeAlignIfRequired(Ty.getTypePtr(), Ctx);
+}
+
+static uint32_t getDeclAlignIfRequired(const Decl *D, const ASTContext &Ctx) {
+ return D->hasAttr<AlignedAttr>() ? D->getMaxAlignment() : 0;
+}
+
CGDebugInfo::CGDebugInfo(CodeGenModule &CGM)
: CGM(CGM), DebugKind(CGM.getCodeGenOpts().getDebugInfo()),
DebugTypeExtRefs(CGM.getCodeGenOpts().DebugTypeExtRefs),
@@ -306,11 +321,36 @@ StringRef CGDebugInfo::getClassName(const RecordDecl *RD) {
return StringRef();
}
+llvm::DIFile::ChecksumKind
+CGDebugInfo::computeChecksum(FileID FID, SmallString<32> &Checksum) const {
+ Checksum.clear();
+
+ if (!CGM.getCodeGenOpts().EmitCodeView)
+ return llvm::DIFile::CSK_None;
+
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ bool Invalid;
+ llvm::MemoryBuffer *MemBuffer = SM.getBuffer(FID, &Invalid);
+ if (Invalid)
+ return llvm::DIFile::CSK_None;
+
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result Result;
+
+ Hash.update(MemBuffer->getBuffer());
+ Hash.final(Result);
+
+ Hash.stringifyResult(Result, Checksum);
+ return llvm::DIFile::CSK_MD5;
+}
+
llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (!Loc.isValid())
// If Location is not valid then use main input file.
return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()));
+ remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksumKind(),
+ TheCU->getFile()->getChecksum());
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
@@ -318,7 +358,9 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
if (PLoc.isInvalid() || StringRef(PLoc.getFilename()).empty())
// If the location is not valid then use main input file.
return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()));
+ remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksumKind(),
+ TheCU->getFile()->getChecksum());
// Cache the results.
const char *fname = PLoc.getFilename();
@@ -330,8 +372,13 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
return cast<llvm::DIFile>(V);
}
+ SmallString<32> Checksum;
+ llvm::DIFile::ChecksumKind CSKind =
+ computeChecksum(SM.getFileID(Loc), Checksum);
+
llvm::DIFile *F = DBuilder.createFile(remapDIPath(PLoc.getFilename()),
- remapDIPath(getCurrentDirname()));
+ remapDIPath(getCurrentDirname()),
+ CSKind, Checksum);
DIFileCache[fname].reset(F);
return F;
@@ -339,7 +386,9 @@ llvm::DIFile *CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
llvm::DIFile *CGDebugInfo::getOrCreateMainFile() {
return DBuilder.createFile(remapDIPath(TheCU->getFilename()),
- remapDIPath(TheCU->getDirectory()));
+ remapDIPath(TheCU->getDirectory()),
+ TheCU->getFile()->getChecksumKind(),
+ TheCU->getFile()->getChecksum());
}
std::string CGDebugInfo::remapDIPath(StringRef Path) const {
@@ -382,6 +431,8 @@ StringRef CGDebugInfo::getCurrentDirname() {
}
void CGDebugInfo::CreateCompileUnit() {
+ SmallString<32> Checksum;
+ llvm::DIFile::ChecksumKind CSKind = llvm::DIFile::CSK_None;
// Should we be asking the SourceManager for the main file name, instead of
// accepting it as an argument? This just causes the main file name to
@@ -408,6 +459,7 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::sys::path::append(MainFileDirSS, MainFileName);
MainFileName = MainFileDirSS.str();
}
+ CSKind = computeChecksum(SM.getMainFileID(), Checksum);
}
llvm::dwarf::SourceLanguage LangTag;
@@ -452,9 +504,12 @@ void CGDebugInfo::CreateCompileUnit() {
// Create new compile unit.
// FIXME - Eliminate TheCU.
TheCU = DBuilder.createCompileUnit(
- LangTag, remapDIPath(MainFileName), remapDIPath(getCurrentDirname()),
+ LangTag, DBuilder.createFile(remapDIPath(MainFileName),
+ remapDIPath(getCurrentDirname()), CSKind,
+ Checksum),
Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers,
- CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */);
+ CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */,
+ CGM.getCodeGenOpts().SplitDwarfInlining);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -494,14 +549,14 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
auto *ISATy = DBuilder.createPointerType(ClassTy, Size);
- ObjTy =
- DBuilder.createStructType(TheCU, "objc_object", getOrCreateMainFile(),
- 0, 0, 0, 0, nullptr, llvm::DINodeArray());
+ ObjTy = DBuilder.createStructType(
+ TheCU, "objc_object", getOrCreateMainFile(), 0, 0, 0,
+ llvm::DINode::FlagZero, nullptr, llvm::DINodeArray());
DBuilder.replaceArrays(
- ObjTy,
- DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
- ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0, 0, ISATy)));
+ ObjTy, DBuilder.getOrCreateArray(&*DBuilder.createMemberType(
+ ObjTy, "isa", getOrCreateMainFile(), 0, Size, 0, 0,
+ llvm::DINode::FlagZero, ISATy)));
return ObjTy;
}
case BuiltinType::ObjCSel: {
@@ -518,9 +573,8 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
SingletonId);
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
- return DBuilder.createBasicType(
- "opencl_sampler_t", CGM.getContext().getTypeSize(BT),
- CGM.getContext().getTypeAlign(BT), llvm::dwarf::DW_ATE_unsigned);
+ return getOrCreateStructPtrType("opencl_sampler_t",
+ OCLSamplerDITy);
case BuiltinType::OCLEvent:
return getOrCreateStructPtrType("opencl_event_t", OCLEventDITy);
case BuiltinType::OCLClkEvent:
@@ -594,21 +648,19 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
BTName = BT->getName(CGM.getLangOpts());
break;
}
- // Bit size, align and offset of the type.
+ // Bit size and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(BT);
- uint64_t Align = CGM.getContext().getTypeAlign(BT);
- return DBuilder.createBasicType(BTName, Size, Align, Encoding);
+ return DBuilder.createBasicType(BTName, Size, Encoding);
}
llvm::DIType *CGDebugInfo::CreateType(const ComplexType *Ty) {
- // Bit size, align and offset of the type.
+ // Bit size and offset of the type.
llvm::dwarf::TypeKind Encoding = llvm::dwarf::DW_ATE_complex_float;
if (Ty->isComplexIntegerType())
Encoding = llvm::dwarf::DW_ATE_lo_user;
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
- return DBuilder.createBasicType("complex", Size, Align, Encoding);
+ return DBuilder.createBasicType("complex", Size, Encoding);
}
llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
@@ -721,13 +773,7 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
StringRef RDName = getClassName(RD);
uint64_t Size = 0;
- uint64_t Align = 0;
-
- const RecordDecl *D = RD->getDefinition();
- if (D && D->isCompleteDefinition()) {
- Size = CGM.getContext().getTypeSize(Ty);
- Align = CGM.getContext().getTypeAlign(Ty);
- }
+ uint32_t Align = 0;
// Create the type.
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
@@ -749,7 +795,7 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
// because that does not return the correct value for references.
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
@@ -776,7 +822,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
SmallVector<llvm::Metadata *, 8> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
- unsigned FieldAlign;
+ uint32_t FieldAlign;
llvm::DINodeArray Elements;
FieldOffset = 0;
@@ -787,7 +833,7 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
Elements = DBuilder.getOrCreateArray(EltTys);
EltTys.clear();
- unsigned Flags = llvm::DINode::FlagAppleBlock;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagAppleBlock;
unsigned LineNo = 0;
auto *EltTy =
@@ -811,9 +857,9 @@ llvm::DIType *CGDebugInfo::CreateType(const BlockPointerType *Ty,
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
FieldSize = CGM.getContext().getTypeSize(Ty);
FieldAlign = CGM.getContext().getTypeAlign(Ty);
- EltTys.push_back(DBuilder.createMemberType(Unit, "__descriptor", nullptr, LineNo,
- FieldSize, FieldAlign, FieldOffset,
- 0, DescTy));
+ EltTys.push_back(DBuilder.createMemberType(
+ Unit, "__descriptor", nullptr, LineNo, FieldSize, FieldAlign, FieldOffset,
+ llvm::DINode::FlagZero, DescTy));
FieldOffset += FieldSize;
Elements = DBuilder.getOrCreateArray(EltTys);
@@ -893,6 +939,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_Swift:
case CC_PreserveMost:
case CC_PreserveAll:
+ case CC_X86RegCall:
return 0;
}
return 0;
@@ -917,14 +964,15 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
}
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(EltTypeArray, 0,
+ return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
getDwarfCC(Ty->getCallConv()));
}
/// Convert an AccessSpecifier into the corresponding DINode flag.
/// As an optimization, return 0 if the access specifier equals the
/// default for the containing type.
-static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
+static llvm::DINode::DIFlags getAccessFlag(AccessSpecifier Access,
+ const RecordDecl *RD) {
AccessSpecifier Default = clang::AS_none;
if (RD && RD->isClass())
Default = clang::AS_private;
@@ -932,7 +980,7 @@ static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
Default = clang::AS_public;
if (Access == Default)
- return 0;
+ return llvm::DINode::FlagZero;
switch (Access) {
case clang::AS_private:
@@ -942,7 +990,7 @@ static unsigned getAccessFlag(AccessSpecifier Access, const RecordDecl *RD) {
case clang::AS_public:
return llvm::DINode::FlagPublic;
case clang::AS_none:
- return 0;
+ return llvm::DINode::FlagZero;
}
llvm_unreachable("unexpected access enumerator");
}
@@ -964,21 +1012,20 @@ llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
CGM.getTypes().getCGRecordLayout(RD).getBitFieldInfo(BitFieldDecl);
uint64_t SizeInBits = BitFieldInfo.Size;
assert(SizeInBits > 0 && "found named 0-width bitfield");
- unsigned AlignInBits = CGM.getContext().getTypeAlign(Ty);
uint64_t StorageOffsetInBits =
CGM.getContext().toBits(BitFieldInfo.StorageOffset);
uint64_t OffsetInBits = StorageOffsetInBits + BitFieldInfo.Offset;
- unsigned Flags = getAccessFlag(BitFieldDecl->getAccess(), RD);
+ llvm::DINode::DIFlags Flags = getAccessFlag(BitFieldDecl->getAccess(), RD);
return DBuilder.createBitFieldMemberType(
- RecordTy, Name, File, Line, SizeInBits, AlignInBits, OffsetInBits,
- StorageOffsetInBits, Flags, DebugType);
+ RecordTy, Name, File, Line, SizeInBits, OffsetInBits, StorageOffsetInBits,
+ Flags, DebugType);
}
llvm::DIType *
CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
AccessSpecifier AS, uint64_t offsetInBits,
- llvm::DIFile *tunit, llvm::DIScope *scope,
- const RecordDecl *RD) {
+ uint32_t AlignInBits, llvm::DIFile *tunit,
+ llvm::DIScope *scope, const RecordDecl *RD) {
llvm::DIType *debugType = getOrCreateType(type, tunit);
// Get the location for the field.
@@ -986,16 +1033,17 @@ CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
unsigned line = getLineNumber(loc);
uint64_t SizeInBits = 0;
- unsigned AlignInBits = 0;
+ auto Align = AlignInBits;
if (!type->isIncompleteArrayType()) {
TypeInfo TI = CGM.getContext().getTypeInfo(type);
SizeInBits = TI.Width;
- AlignInBits = TI.Align;
+ if (!Align)
+ Align = getTypeAlignIfRequired(type, CGM.getContext());
}
- unsigned flags = getAccessFlag(AS, RD);
+ llvm::DINode::DIFlags flags = getAccessFlag(AS, RD);
return DBuilder.createMemberType(scope, name, file, line, SizeInBits,
- AlignInBits, offsetInBits, flags, debugType);
+ Align, offsetInBits, flags, debugType);
}
void CGDebugInfo::CollectRecordLambdaFields(
@@ -1017,9 +1065,10 @@ void CGDebugInfo::CollectRecordLambdaFields(
VarDecl *V = C.getCapturedVar();
StringRef VName = V->getName();
llvm::DIFile *VUnit = getOrCreateFile(Loc);
+ auto Align = getDeclAlignIfRequired(V, CGM.getContext());
llvm::DIType *FieldType = createFieldType(
VName, Field->getType(), Loc, Field->getAccess(),
- layout.getFieldOffset(fieldno), VUnit, RecordTy, CXXDecl);
+ layout.getFieldOffset(fieldno), Align, VUnit, RecordTy, CXXDecl);
elements.push_back(FieldType);
} else if (C.capturesThis()) {
// TODO: Need to handle 'this' in some way by probably renaming the
@@ -1060,9 +1109,10 @@ CGDebugInfo::CreateRecordStaticField(const VarDecl *Var, llvm::DIType *RecordTy,
}
}
- unsigned Flags = getAccessFlag(Var->getAccess(), RD);
+ llvm::DINode::DIFlags Flags = getAccessFlag(Var->getAccess(), RD);
+ auto Align = getDeclAlignIfRequired(Var, CGM.getContext());
llvm::DIDerivedType *GV = DBuilder.createStaticMemberType(
- RecordTy, VName, VUnit, LineNumber, VTy, Flags, C);
+ RecordTy, VName, VUnit, LineNumber, VTy, Flags, C, Align);
StaticDataMemberCache[Var->getCanonicalDecl()].reset(GV);
return GV;
}
@@ -1082,14 +1132,26 @@ void CGDebugInfo::CollectRecordNormalField(
if (field->isBitField()) {
FieldType = createBitFieldType(field, RecordTy, RD);
} else {
+ auto Align = getDeclAlignIfRequired(field, CGM.getContext());
FieldType =
createFieldType(name, type, field->getLocation(), field->getAccess(),
- OffsetInBits, tunit, RecordTy, RD);
+ OffsetInBits, Align, tunit, RecordTy, RD);
}
elements.push_back(FieldType);
}
+void CGDebugInfo::CollectRecordNestedRecord(
+ const RecordDecl *RD, SmallVectorImpl<llvm::Metadata *> &elements) {
+ QualType Ty = CGM.getContext().getTypeDeclType(RD);
+ // Injected class names are not considered nested records.
+ if (isa<InjectedClassNameType>(Ty))
+ return;
+ SourceLocation Loc = RD->getLocation();
+ llvm::DIType *nestedType = getOrCreateType(Ty, getOrCreateFile(Loc));
+ elements.push_back(nestedType);
+}
+
void CGDebugInfo::CollectRecordFields(
const RecordDecl *record, llvm::DIFile *tunit,
SmallVectorImpl<llvm::Metadata *> &elements,
@@ -1101,6 +1163,10 @@ void CGDebugInfo::CollectRecordFields(
else {
const ASTRecordLayout &layout = CGM.getContext().getASTRecordLayout(record);
+ // Debug info for nested records is included in the member list only for
+ // CodeView.
+ bool IncludeNestedRecords = CGM.getCodeGenOpts().EmitCodeView;
+
// Field number for non-static fields.
unsigned fieldNo = 0;
@@ -1126,7 +1192,10 @@ void CGDebugInfo::CollectRecordFields(
// Bump field number for next field.
++fieldNo;
- }
+ } else if (const auto *nestedRec = dyn_cast<CXXRecordDecl>(I))
+ if (IncludeNestedRecords && !nestedRec->isImplicit() &&
+ nestedRec->getDeclContext() == record)
+ CollectRecordNestedRecord(nestedRec, elements);
}
}
@@ -1162,7 +1231,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
QualType PointeeTy = ThisPtrTy->getPointeeType();
unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
uint64_t Size = CGM.getTarget().getPointerWidth(AS);
- uint64_t Align = CGM.getContext().getTypeAlign(ThisPtrTy);
+ auto Align = getTypeAlignIfRequired(ThisPtrTy, CGM.getContext());
llvm::DIType *PointeeType = getOrCreateType(PointeeTy, Unit);
llvm::DIType *ThisPtrType =
DBuilder.createPointerType(PointeeType, Size, Align);
@@ -1185,7 +1254,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateInstanceMethodType(
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Func->getExtProtoInfo().RefQualifier == RQ_LValue)
Flags |= llvm::DINode::FlagLValueReference;
if (Func->getExtProtoInfo().RefQualifier == RQ_RValue)
@@ -1236,7 +1305,7 @@ llvm::DISubprogram *CGDebugInfo::CreateCXXMemberFunction(
llvm::DIType *ContainingType = nullptr;
unsigned Virtuality = 0;
unsigned VIndex = 0;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
int ThisAdjustment = 0;
if (Method->isVirtual()) {
@@ -1347,13 +1416,33 @@ void CGDebugInfo::CollectCXXMemberFunctions(
void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit,
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType *RecordTy) {
- const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
- for (const auto &BI : RD->bases()) {
- unsigned BFlags = 0;
- uint64_t BaseOffset;
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> SeenTypes;
+ CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->bases(), SeenTypes,
+ llvm::DINode::FlagZero);
+
+ // If we are generating CodeView debug info, we also need to emit records for
+ // indirect virtual base classes.
+ if (CGM.getCodeGenOpts().EmitCodeView) {
+ CollectCXXBasesAux(RD, Unit, EltTys, RecordTy, RD->vbases(), SeenTypes,
+ llvm::DINode::FlagIndirectVirtualBase);
+ }
+}
+void CGDebugInfo::CollectCXXBasesAux(
+ const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys, llvm::DIType *RecordTy,
+ const CXXRecordDecl::base_class_const_range &Bases,
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
+ llvm::DINode::DIFlags StartingFlags) {
+ const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ for (const auto &BI : Bases) {
const auto *Base =
cast<CXXRecordDecl>(BI.getType()->getAs<RecordType>()->getDecl());
+ if (!SeenTypes.insert(Base).second)
+ continue;
+ auto *BaseTy = getOrCreateType(BI.getType(), Unit);
+ llvm::DINode::DIFlags BFlags = StartingFlags;
+ uint64_t BaseOffset;
if (BI.isVirtual()) {
if (CGM.getTarget().getCXXABI().isItaniumFamily()) {
@@ -1368,15 +1457,15 @@ void CGDebugInfo::CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile *Unit,
BaseOffset =
4 * CGM.getMicrosoftVTableContext().getVBTableIndex(RD, Base);
}
- BFlags = llvm::DINode::FlagVirtual;
+ BFlags |= llvm::DINode::FlagVirtual;
} else
BaseOffset = CGM.getContext().toBits(RL.getBaseClassOffset(Base));
// FIXME: Inconsistent units for BaseOffset. It is in bytes when
// BI->isVirtual() and bits when not.
BFlags |= getAccessFlag(BI.getAccessSpecifier(), RD);
- llvm::DIType *DTy = DBuilder.createInheritance(
- RecordTy, getOrCreateType(BI.getType(), Unit), BaseOffset, BFlags);
+ llvm::DIType *DTy =
+ DBuilder.createInheritance(RecordTy, BaseTy, BaseOffset, BFlags);
EltTys.push_back(DTy);
}
}
@@ -1531,22 +1620,56 @@ StringRef CGDebugInfo::getVTableName(const CXXRecordDecl *RD) {
}
void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
- SmallVectorImpl<llvm::Metadata *> &EltTys) {
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DICompositeType *RecordTy) {
+ // If this class is not dynamic then there is not any vtable info to collect.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Don't emit any vtable shape or vptr info if this class doesn't have an
+ // extendable vfptr. This can happen if the class doesn't have virtual
+ // methods, or in the MS ABI if those virtual methods only come from virtually
+ // inherited bases.
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
+ if (!RL.hasExtendableVFPtr())
+ return;
- // If there is a primary base then it will hold vtable info.
+ // CodeView needs to know how large the vtable of every dynamic class is, so
+ // emit a special named pointer type into the element list. The vptr type
+ // points to this type as well.
+ llvm::DIType *VPtrTy = nullptr;
+ bool NeedVTableShape = CGM.getCodeGenOpts().EmitCodeView &&
+ CGM.getTarget().getCXXABI().isMicrosoft();
+ if (NeedVTableShape) {
+ uint64_t PtrWidth =
+ CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
+ const VTableLayout &VFTLayout =
+ CGM.getMicrosoftVTableContext().getVFTableLayout(RD, CharUnits::Zero());
+ unsigned VSlotCount =
+ VFTLayout.vtable_components().size() - CGM.getLangOpts().RTTIData;
+ unsigned VTableWidth = PtrWidth * VSlotCount;
+
+ // Create a very wide void* type and insert it directly in the element list.
+ llvm::DIType *VTableType =
+ DBuilder.createPointerType(nullptr, VTableWidth, 0, "__vtbl_ptr_type");
+ EltTys.push_back(VTableType);
+
+ // The vptr is a pointer to this special vtable type.
+ VPtrTy = DBuilder.createPointerType(VTableType, PtrWidth);
+ }
+
+ // If there is a primary base then the artificial vptr member lives there.
if (RL.getPrimaryBase())
return;
- // If this class is not dynamic then there is not any vtable info to collect.
- if (!RD->isDynamicClass())
- return;
+ if (!VPtrTy)
+ VPtrTy = getOrCreateVTablePtrType(Unit);
unsigned Size = CGM.getContext().getTypeSize(CGM.getContext().VoidPtrTy);
- llvm::DIType *VPTR = DBuilder.createMemberType(
+ llvm::DIType *VPtrMember = DBuilder.createMemberType(
Unit, getVTableName(RD), Unit, 0, Size, 0, 0,
- llvm::DINode::FlagArtificial, getOrCreateVTablePtrType(Unit));
- EltTys.push_back(VPTR);
+ llvm::DINode::FlagArtificial, VPtrTy);
+ EltTys.push_back(VPtrMember);
}
llvm::DIType *CGDebugInfo::getOrCreateRecordType(QualType RTy,
@@ -1591,23 +1714,6 @@ void CGDebugInfo::completeType(const RecordDecl *RD) {
completeRequiredType(RD);
}
-void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
- if (DebugKind <= codegenoptions::DebugLineTablesOnly)
- return;
-
- if (const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD))
- if (CXXDecl->isDynamicClass())
- return;
-
- if (DebugTypeExtRefs && RD->isFromASTFile())
- return;
-
- QualType Ty = CGM.getContext().getRecordType(RD);
- llvm::DIType *T = getTypeOrNull(Ty);
- if (T && T->isForwardDecl())
- completeClassData(RD);
-}
-
void CGDebugInfo::completeClassData(const RecordDecl *RD) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
@@ -1633,21 +1739,37 @@ static bool hasExplicitMemberDefinition(CXXRecordDecl::method_iterator I,
/// Does a type definition exist in an imported clang module?
static bool isDefinedInClangModule(const RecordDecl *RD) {
+ // Only definitions that where imported from an AST file come from a module.
if (!RD || !RD->isFromASTFile())
return false;
+ // Anonymous entities cannot be addressed. Treat them as not from module.
if (!RD->isExternallyVisible() && RD->getName().empty())
return false;
if (auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD)) {
- assert(CXXDecl->isCompleteDefinition() && "incomplete record definition");
- if (CXXDecl->getTemplateSpecializationKind() != TSK_Undeclared)
- // Make sure the instantiation is actually in a module.
- if (CXXDecl->field_begin() != CXXDecl->field_end())
- return CXXDecl->field_begin()->isFromASTFile();
+ if (!CXXDecl->isCompleteDefinition())
+ return false;
+ auto TemplateKind = CXXDecl->getTemplateSpecializationKind();
+ if (TemplateKind != TSK_Undeclared) {
+ // This is a template, check the origin of the first member.
+ if (CXXDecl->field_begin() == CXXDecl->field_end())
+ return TemplateKind == TSK_ExplicitInstantiationDeclaration;
+ if (!CXXDecl->field_begin()->isFromASTFile())
+ return false;
+ }
}
-
return true;
}
+/// Return true if the class or any of its methods are marked dllimport.
+static bool isClassOrMethodDLLImport(const CXXRecordDecl *RD) {
+ if (RD->hasAttr<DLLImportAttr>())
+ return true;
+ for (const CXXMethodDecl *MD : RD->methods())
+ if (MD->hasAttr<DLLImportAttr>())
+ return true;
+ return false;
+}
+
static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
@@ -1668,7 +1790,14 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
if (!CXXDecl)
return false;
- if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass())
+ // Only emit complete debug info for a dynamic class when its vtable is
+ // emitted. However, Microsoft debuggers don't resolve type information
+ // across DLL boundaries, so skip this optimization if the class or any of its
+ // methods are marked dllimport. This isn't a complete solution, since objects
+ // without any dllimport methods can be used in one DLL and constructed in
+ // another, but it is the current behavior of LimitedDebugInfo.
+ if (CXXDecl->hasDefinition() && CXXDecl->isDynamicClass() &&
+ !isClassOrMethodDLLImport(CXXDecl))
return true;
TemplateSpecializationKind Spec = TSK_Undeclared;
@@ -1683,6 +1812,16 @@ static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
return false;
}
+void CGDebugInfo::completeRequiredType(const RecordDecl *RD) {
+ if (shouldOmitDefinition(DebugKind, DebugTypeExtRefs, RD, CGM.getLangOpts()))
+ return;
+
+ QualType Ty = CGM.getContext().getRecordType(RD);
+ llvm::DIType *T = getTypeOrNull(Ty);
+ if (T && T->isForwardDecl())
+ completeClassData(RD);
+}
+
llvm::DIType *CGDebugInfo::CreateType(const RecordType *Ty) {
RecordDecl *RD = Ty->getDecl();
llvm::DIType *T = cast_or_null<llvm::DIType>(getTypeOrNull(QualType(Ty, 0)));
@@ -1732,7 +1871,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const RecordType *Ty) {
const auto *CXXDecl = dyn_cast<CXXRecordDecl>(RD);
if (CXXDecl) {
CollectCXXBases(CXXDecl, DefUnit, EltTys, FwdDecl);
- CollectVTableInfo(CXXDecl, DefUnit, EltTys);
+ CollectVTableInfo(CXXDecl, DefUnit, EltTys, FwdDecl);
}
// Collect data fields (including static variables and any initializers).
@@ -1760,6 +1899,18 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectType *Ty,
return getOrCreateType(Ty->getBaseType(), Unit);
}
+llvm::DIType *CGDebugInfo::CreateType(const ObjCTypeParamType *Ty,
+ llvm::DIFile *Unit) {
+ // Ignore protocols.
+ SourceLocation Loc = Ty->getDecl()->getLocation();
+
+ // Use Typedefs to represent ObjCTypeParamType.
+ return DBuilder.createTypedef(
+ getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit),
+ Ty->getDecl()->getName(), getOrCreateFile(Loc), getLineNumber(Loc),
+ getDeclContextDescriptor(Ty->getDecl()));
+}
+
/// \return true if Getter has the default name for the property PD.
static bool hasDefaultGetterName(const ObjCPropertyDecl *PD,
const ObjCMethodDecl *Getter) {
@@ -1860,10 +2011,11 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
uint64_t Signature = Mod.getSignature() ? Mod.getSignature() : ~1ULL;
llvm::DIBuilder DIB(CGM.getModule());
- DIB.createCompileUnit(TheCU->getSourceLanguage(), Mod.getModuleName(),
- Mod.getPath(), TheCU->getProducer(), true,
- StringRef(), 0, Mod.getASTFile(),
- llvm::DICompileUnit::FullDebug, Signature);
+ DIB.createCompileUnit(TheCU->getSourceLanguage(),
+ DIB.createFile(Mod.getModuleName(), Mod.getPath()),
+ TheCU->getProducer(), true, StringRef(), 0,
+ Mod.getASTFile(), llvm::DICompileUnit::FullDebug,
+ Signature);
DIB.finalize();
}
llvm::DIModule *Parent =
@@ -1887,9 +2039,9 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (ID->getImplementation())
Flags |= llvm::DINode::FlagObjcClassComplete;
@@ -1915,7 +2067,8 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
if (!SClassTy)
return nullptr;
- llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0, 0);
+ llvm::DIType *InhTag = DBuilder.createInheritance(RealDecl, SClassTy, 0,
+ llvm::DINode::FlagZero);
EltTys.push_back(InhTag);
}
@@ -1970,7 +2123,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
unsigned FieldLine = getLineNumber(Field->getLocation());
QualType FType = Field->getType();
uint64_t FieldSize = 0;
- unsigned FieldAlign = 0;
+ uint32_t FieldAlign = 0;
if (!FType->isIncompleteArrayType()) {
@@ -1978,7 +2131,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
FieldSize = Field->isBitField()
? Field->getBitWidthValue(CGM.getContext())
: CGM.getContext().getTypeSize(FType);
- FieldAlign = CGM.getContext().getTypeAlign(FType);
+ FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext());
}
uint64_t FieldOffset;
@@ -1997,7 +2150,7 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
FieldOffset = RL.getFieldOffset(FieldNo);
}
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (Field->getAccessControl() == ObjCIvarDecl::Protected)
Flags = llvm::DINode::FlagProtected;
else if (Field->getAccessControl() == ObjCIvarDecl::Private)
@@ -2052,33 +2205,33 @@ llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DINodeArray SubscriptArray = DBuilder.getOrCreateArray(Subscript);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
return DBuilder.createVectorType(Size, Align, ElementTy, SubscriptArray);
}
llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
uint64_t Size;
- uint64_t Align;
+ uint32_t Align;
// FIXME: make getTypeAlign() aware of VLAs and incomplete array types
if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
Size = 0;
- Align =
- CGM.getContext().getTypeAlign(CGM.getContext().getBaseElementType(VAT));
+ Align = getTypeAlignIfRequired(CGM.getContext().getBaseElementType(VAT),
+ CGM.getContext());
} else if (Ty->isIncompleteArrayType()) {
Size = 0;
if (Ty->getElementType()->isIncompleteType())
Align = 0;
else
- Align = CGM.getContext().getTypeAlign(Ty->getElementType());
+ Align = getTypeAlignIfRequired(Ty->getElementType(), CGM.getContext());
} else if (Ty->isIncompleteType()) {
Size = 0;
Align = 0;
} else {
// Size and align of the whole array, not the element type.
Size = CGM.getContext().getTypeSize(Ty);
- Align = CGM.getContext().getTypeAlign(Ty);
+ Align = getTypeAlignIfRequired(Ty, CGM.getContext());
}
// Add the dimensions of the array. FIXME: This loses CV qualifiers from
@@ -2097,6 +2250,13 @@ llvm::DIType *CGDebugInfo::CreateType(const ArrayType *Ty, llvm::DIFile *Unit) {
int64_t Count = -1; // Count == -1 is an unbounded array.
if (const auto *CAT = dyn_cast<ConstantArrayType>(Ty))
Count = CAT->getSize().getZExtValue();
+ else if (const auto *VAT = dyn_cast<VariableArrayType>(Ty)) {
+ if (Expr *Size = VAT->getSizeExpr()) {
+ llvm::APSInt V;
+ if (Size->EvaluateAsInt(V, CGM.getContext()))
+ Count = V.getExtValue();
+ }
+ }
// FIXME: Verify this is right for VLAs.
Subscripts.push_back(DBuilder.getOrCreateSubrange(0, Count));
@@ -2123,7 +2283,7 @@ llvm::DIType *CGDebugInfo::CreateType(const RValueReferenceType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
llvm::DIFile *U) {
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
uint64_t Size = 0;
if (!Ty->isIncompleteType()) {
@@ -2163,9 +2323,8 @@ llvm::DIType *CGDebugInfo::CreateType(const MemberPointerType *Ty,
}
llvm::DIType *CGDebugInfo::CreateType(const AtomicType *Ty, llvm::DIFile *U) {
- // Ignore the atomic wrapping
- // FIXME: What is the correct representation?
- return getOrCreateType(Ty->getValueType(), U);
+ auto *FromTy = getOrCreateType(Ty->getValueType(), U);
+ return DBuilder.createQualifiedType(llvm::dwarf::DW_TAG_atomic_type, FromTy);
}
llvm::DIType* CGDebugInfo::CreateType(const PipeType *Ty,
@@ -2177,10 +2336,10 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
- uint64_t Align = 0;
+ uint32_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
@@ -2220,10 +2379,10 @@ llvm::DIType *CGDebugInfo::CreateEnumType(const EnumType *Ty) {
llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
const EnumDecl *ED = Ty->getDecl();
uint64_t Size = 0;
- uint64_t Align = 0;
+ uint32_t Align = 0;
if (!ED->getTypeForDecl()->isIncompleteType()) {
Size = CGM.getContext().getTypeSize(ED->getTypeForDecl());
- Align = CGM.getContext().getTypeAlign(ED->getTypeForDecl());
+ Align = getDeclAlignIfRequired(ED, CGM.getContext());
}
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
@@ -2292,12 +2451,18 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
- case Type::Auto:
+ case Type::Auto: {
QualType DT = cast<AutoType>(T)->getDeducedType();
assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
T = DT;
break;
}
+ case Type::Adjusted:
+ case Type::Decayed:
+ // Decayed and adjusted types use the adjusted type in LLVM and DWARF.
+ T = cast<AdjustedType>(T)->getAdjustedType();
+ break;
+ }
assert(T != LastT && "Type unwrapping failed to unwrap!");
(void)LastT;
@@ -2406,6 +2571,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
case Type::ObjCObject:
return CreateType(cast<ObjCObjectType>(Ty), Unit);
+ case Type::ObjCTypeParam:
+ return CreateType(cast<ObjCTypeParamType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
case Type::Builtin:
@@ -2414,11 +2581,6 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
return CreateType(cast<ComplexType>(Ty));
case Type::Pointer:
return CreateType(cast<PointerType>(Ty), Unit);
- case Type::Adjusted:
- case Type::Decayed:
- // Decayed and adjusted types use the adjusted type in LLVM and DWARF.
- return CreateType(
- cast<PointerType>(cast<AdjustedType>(Ty)->getAdjustedType()), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef:
@@ -2454,6 +2616,8 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Auto:
case Type::Attributed:
+ case Type::Adjusted:
+ case Type::Decayed:
case Type::Elaborated:
case Type::Paren:
case Type::SubstTemplateTypeParm:
@@ -2518,13 +2682,13 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
return getOrCreateRecordFwdDecl(Ty, RDContext);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+ auto Align = getDeclAlignIfRequired(D, CGM.getContext());
SmallString<256> FullName = getUniqueTagTypeName(Ty, CGM, TheCU);
llvm::DICompositeType *RealDecl = DBuilder.createReplaceableCompositeType(
- getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align, 0,
- FullName);
+ getTagForRecord(RD), RDName, RDContext, DefUnit, Line, 0, Size, Align,
+ llvm::DINode::FlagZero, FullName);
// Elements of composite types usually have back to the type, creating
// uniquing cycles. Distinct nodes are more efficient.
@@ -2587,9 +2751,10 @@ llvm::DIType *CGDebugInfo::CreateMemberType(llvm::DIFile *Unit, QualType FType,
StringRef Name, uint64_t *Offset) {
llvm::DIType *FieldTy = CGDebugInfo::getOrCreateType(FType, Unit);
uint64_t FieldSize = CGM.getContext().getTypeSize(FType);
- unsigned FieldAlign = CGM.getContext().getTypeAlign(FType);
- llvm::DIType *Ty = DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize,
- FieldAlign, *Offset, 0, FieldTy);
+ auto FieldAlign = getTypeAlignIfRequired(FType, CGM.getContext());
+ llvm::DIType *Ty =
+ DBuilder.createMemberType(Unit, Name, Unit, 0, FieldSize, FieldAlign,
+ *Offset, llvm::DINode::FlagZero, FieldTy);
*Offset += FieldSize;
return Ty;
}
@@ -2599,7 +2764,7 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
StringRef &LinkageName,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
- unsigned &Flags) {
+ llvm::DINode::DIFlags &Flags) {
const auto *FD = cast<FunctionDecl>(GD.getDecl());
Name = getFunctionName(FD);
// Use mangled name as linkage name for C/C++ functions.
@@ -2624,6 +2789,9 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
llvm::DIScope *Mod = getParentModuleOrNull(RDecl);
FDContext = getContextDescriptor(RDecl, Mod ? Mod : TheCU);
}
+ // Check if it is a noreturn-marked function
+ if (FD->isNoReturn())
+ Flags |= llvm::DINode::FlagNoReturn;
// Collect template parameters.
TParamsArray = CollectFunctionTemplateParams(FD, Unit);
}
@@ -2680,7 +2848,7 @@ llvm::DISubprogram *
CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
llvm::DINodeArray TParamsArray;
StringRef Name, LinkageName;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
SourceLocation Loc = FD->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
@@ -2717,9 +2885,10 @@ CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
unsigned Line = getLineNumber(Loc);
collectVarDeclProps(VD, Unit, Line, T, Name, LinkageName, DContext);
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *GV = DBuilder.createTempGlobalVariableFwdDecl(
DContext, Name, LinkageName, Unit, Line, getOrCreateType(T, Unit),
- !VD->isExternallyVisible(), nullptr, nullptr);
+ !VD->isExternallyVisible(), nullptr, Align);
FwdDeclReplaceMap.emplace_back(
std::piecewise_construct,
std::make_tuple(cast<VarDecl>(VD->getCanonicalDecl())),
@@ -2737,8 +2906,12 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
getOrCreateFile(TD->getLocation()));
auto I = DeclCache.find(D->getCanonicalDecl());
- if (I != DeclCache.end())
- return dyn_cast_or_null<llvm::DINode>(I->second);
+ if (I != DeclCache.end()) {
+ auto N = I->second;
+ if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(N))
+ return GVE->getVariable();
+ return dyn_cast_or_null<llvm::DINode>(N);
+ }
// No definition for now. Emit a forward definition that might be
// merged with a potential upcoming definition.
@@ -2834,7 +3007,8 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
Elts.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(Elts);
- return DBuilder.createSubroutineType(EltTypeArray, 0, getDwarfCC(CC));
+ return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
+ getDwarfCC(CC));
}
// Handle variadic function types; they need an additional
@@ -2848,7 +3022,8 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
EltTys.push_back(getOrCreateType(ParamType, F));
EltTys.push_back(DBuilder.createUnspecifiedParameter());
llvm::DITypeRefArray EltTypeArray = DBuilder.getOrCreateTypeArray(EltTys);
- return DBuilder.createSubroutineType(EltTypeArray, 0, getDwarfCC(CC));
+ return DBuilder.createSubroutineType(EltTypeArray, llvm::DINode::FlagZero,
+ getDwarfCC(CC));
}
return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
@@ -2866,7 +3041,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
const Decl *D = GD.getDecl();
bool HasDecl = (D != nullptr);
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = Unit;
llvm::DINodeArray TParamsArray;
@@ -2899,9 +3074,8 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, SourceLocation Loc,
if (!HasDecl || D->isImplicit()) {
Flags |= llvm::DINode::FlagArtificial;
- // Artificial functions without a location should not silently reuse CurLoc.
- if (Loc.isInvalid())
- CurLoc = SourceLocation();
+ // Artificial functions should not silently reuse CurLoc.
+ CurLoc = SourceLocation();
}
unsigned LineNo = getLineNumber(Loc);
unsigned ScopeLine = getLineNumber(ScopeLoc);
@@ -2939,7 +3113,7 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
if (!D)
return;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *FDContext = getDeclContextDescriptor(D);
llvm::DINodeArray TParamsArray;
@@ -3042,7 +3216,7 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
SmallVector<llvm::Metadata *, 5> EltTys;
QualType FType;
uint64_t FieldSize, FieldOffset;
- unsigned FieldAlign;
+ uint32_t FieldAlign;
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
QualType Type = VD->getType();
@@ -3096,13 +3270,14 @@ llvm::DIType *CGDebugInfo::EmitTypeForVarWithBlocksAttr(const VarDecl *VD,
*XOffset = FieldOffset;
FieldTy = DBuilder.createMemberType(Unit, VD->getName(), Unit, 0, FieldSize,
- FieldAlign, FieldOffset, 0, FieldTy);
+ FieldAlign, FieldOffset,
+ llvm::DINode::FlagZero, FieldTy);
EltTys.push_back(FieldTy);
FieldOffset += FieldSize;
llvm::DINodeArray Elements = DBuilder.getOrCreateArray(EltTys);
- unsigned Flags = llvm::DINode::FlagBlockByrefStruct;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagBlockByrefStruct;
return DBuilder.createStructType(Unit, "", Unit, 0, FieldOffset, 0, Flags,
nullptr, Elements);
@@ -3142,9 +3317,12 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
Column = getColumnNumber(VD->getLocation());
}
SmallVector<int64_t, 9> Expr;
- unsigned Flags = 0;
+ llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
+
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
+
// If this is the first argument and it is implicit then
// give it an object pointer flag.
// FIXME: There has to be a better way to do this, but for static
@@ -3179,7 +3357,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
? DBuilder.createParameterVariable(Scope, VD->getName(),
*ArgNo, Unit, Line, Ty)
: DBuilder.createAutoVariable(Scope, VD->getName(), Unit,
- Line, Ty);
+ Line, Ty, Align);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -3209,9 +3387,10 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
continue;
// Use VarDecl's Tag, Scope and Line number.
+ auto FieldAlign = getDeclAlignIfRequired(Field, CGM.getContext());
auto *D = DBuilder.createAutoVariable(
Scope, FieldName, Unit, Line, FieldTy, CGM.getLangOpts().Optimize,
- Flags | llvm::DINode::FlagArtificial);
+ Flags | llvm::DINode::FlagArtificial, FieldAlign);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -3222,13 +3401,13 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
}
// Create the descriptor for the variable.
- auto *D =
- ArgNo
- ? DBuilder.createParameterVariable(Scope, Name, *ArgNo, Unit, Line,
- Ty, CGM.getLangOpts().Optimize,
- Flags)
- : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
- CGM.getLangOpts().Optimize, Flags);
+ auto *D = ArgNo
+ ? DBuilder.createParameterVariable(
+ Scope, Name, *ArgNo, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags)
+ : DBuilder.createAutoVariable(Scope, Name, Unit, Line, Ty,
+ CGM.getLangOpts().Optimize, Flags,
+ Align);
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
@@ -3307,9 +3486,10 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
}
// Create the descriptor for the variable.
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
auto *D = DBuilder.createAutoVariable(
cast<llvm::DILocalScope>(LexicalBlockStack.back()), VD->getName(), Unit,
- Line, Ty);
+ Line, Ty, false, llvm::DINode::FlagZero, Align);
// Insert an llvm.dbg.declare into the current block.
auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back());
@@ -3438,17 +3618,19 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DIType *fieldType;
if (capture->isByRef()) {
TypeInfo PtrInfo = C.getTypeInfo(C.VoidPtrTy);
+ auto Align = PtrInfo.AlignIsRequired ? PtrInfo.Align : 0;
// FIXME: this creates a second copy of this type!
uint64_t xoffset;
fieldType = EmitTypeForVarWithBlocksAttr(variable, &xoffset);
fieldType = DBuilder.createPointerType(fieldType, PtrInfo.Width);
- fieldType =
- DBuilder.createMemberType(tunit, name, tunit, line, PtrInfo.Width,
- PtrInfo.Align, offsetInBits, 0, fieldType);
+ fieldType = DBuilder.createMemberType(tunit, name, tunit, line,
+ PtrInfo.Width, Align, offsetInBits,
+ llvm::DINode::FlagZero, fieldType);
} else {
+ auto Align = getDeclAlignIfRequired(variable, CGM.getContext());
fieldType = createFieldType(name, variable->getType(), loc, AS_public,
- offsetInBits, tunit, tunit);
+ offsetInBits, Align, tunit, tunit);
}
fields.push_back(fieldType);
}
@@ -3459,14 +3641,14 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
llvm::DINodeArray fieldsArray = DBuilder.getOrCreateArray(fields);
- llvm::DIType *type = DBuilder.createStructType(
- tunit, typeName.str(), tunit, line,
- CGM.getContext().toBits(block.BlockSize),
- CGM.getContext().toBits(block.BlockAlign), 0, nullptr, fieldsArray);
+ llvm::DIType *type =
+ DBuilder.createStructType(tunit, typeName.str(), tunit, line,
+ CGM.getContext().toBits(block.BlockSize), 0,
+ llvm::DINode::FlagZero, nullptr, fieldsArray);
type = DBuilder.createPointerType(type, CGM.PointerWidthInBits);
// Get overall information about the block.
- unsigned flags = llvm::DINode::FlagArtificial;
+ llvm::DINode::DIFlags flags = llvm::DINode::FlagArtificial;
auto *scope = cast<llvm::DILocalScope>(LexicalBlockStack.back());
// Create the descriptor for the parameter.
@@ -3505,10 +3687,10 @@ CGDebugInfo::getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D) {
return CreateRecordStaticField(D, Ctxt, cast<RecordDecl>(DC));
}
-llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls(
+llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
const RecordDecl *RD, llvm::DIFile *Unit, unsigned LineNo,
StringRef LinkageName, llvm::GlobalVariable *Var, llvm::DIScope *DContext) {
- llvm::DIGlobalVariable *GV = nullptr;
+ llvm::DIGlobalVariableExpression *GVE = nullptr;
for (const auto *Field : RD->fields()) {
llvm::DIType *FieldTy = getOrCreateType(Field->getType(), Unit);
@@ -3517,16 +3699,17 @@ llvm::DIGlobalVariable *CGDebugInfo::CollectAnonRecordDecls(
// Ignore unnamed fields, but recurse into anonymous records.
if (FieldName.empty()) {
if (const auto *RT = dyn_cast<RecordType>(Field->getType()))
- GV = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
+ GVE = CollectAnonRecordDecls(RT->getDecl(), Unit, LineNo, LinkageName,
Var, DContext);
continue;
}
// Use VarDecl's Tag, Scope and Line number.
- GV = DBuilder.createGlobalVariable(DContext, FieldName, LinkageName, Unit,
- LineNo, FieldTy,
- Var->hasLocalLinkage(), Var, nullptr);
+ GVE = DBuilder.createGlobalVariableExpression(
+ DContext, FieldName, LinkageName, Unit, LineNo, FieldTy,
+ Var->hasLocalLinkage());
+ Var->addDebugInfo(GVE);
}
- return GV;
+ return GVE;
}
void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
@@ -3534,6 +3717,14 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
if (D->hasAttr<NoDebugAttr>())
return;
+
+ // If we already created a DIGlobalVariable for this declaration, just attach
+ // it to the llvm::GlobalVariable.
+ auto Cached = DeclCache.find(D->getCanonicalDecl());
+ if (Cached != DeclCache.end())
+ return Var->addDebugInfo(
+ cast<llvm::DIGlobalVariableExpression>(Cached->second));
+
// Create global variable debug descriptor.
llvm::DIFile *Unit = nullptr;
llvm::DIScope *DContext = nullptr;
@@ -3544,7 +3735,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// Attempt to store one global variable for the declaration - even if we
// emit a lot of fields.
- llvm::DIGlobalVariable *GV = nullptr;
+ llvm::DIGlobalVariableExpression *GVE = nullptr;
// If this is an anonymous union then we'll want to emit a global
// variable for each member of the anonymous union so that it's possible
@@ -3553,21 +3744,23 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
const RecordDecl *RD = T->castAs<RecordType>()->getDecl();
assert(RD->isAnonymousStructOrUnion() &&
"unnamed non-anonymous struct or union?");
- GV = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
+ GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
} else {
- GV = DBuilder.createGlobalVariable(
+ auto Align = getDeclAlignIfRequired(D, CGM.getContext());
+ GVE = DBuilder.createGlobalVariableExpression(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
- Var->hasLocalLinkage(), Var,
- getOrCreateStaticDataMemberDeclarationOrNull(D));
+ Var->hasLocalLinkage(), /*Expr=*/nullptr,
+ getOrCreateStaticDataMemberDeclarationOrNull(D), Align);
+ Var->addDebugInfo(GVE);
}
- DeclCache[D->getCanonicalDecl()].reset(GV);
+ DeclCache[D->getCanonicalDecl()].reset(GVE);
}
-void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
- llvm::Constant *Init) {
+void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD, const APValue &Init) {
assert(DebugKind >= codegenoptions::LimitedDebugInfo);
if (VD->hasAttr<NoDebugAttr>())
return;
+ auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
// Create the descriptor for the variable.
llvm::DIFile *Unit = getOrCreateFile(VD->getLocation());
StringRef Name = VD->getName();
@@ -3604,9 +3797,20 @@ void CGDebugInfo::EmitGlobalVariable(const ValueDecl *VD,
auto &GV = DeclCache[VD];
if (GV)
return;
- GV.reset(DBuilder.createGlobalVariable(
+ llvm::DIExpression *InitExpr = nullptr;
+ if (CGM.getContext().getTypeSize(VD->getType()) <= 64) {
+ // FIXME: Add a representation for integer constants wider than 64 bits.
+ if (Init.isInt())
+ InitExpr =
+ DBuilder.createConstantValueExpression(Init.getInt().getExtValue());
+ else if (Init.isFloat())
+ InitExpr = DBuilder.createConstantValueExpression(
+ Init.getFloat().bitcastToAPInt().getZExtValue());
+ }
+ GV.reset(DBuilder.createGlobalVariableExpression(
DContext, Name, StringRef(), Unit, getLineNumber(VD->getLocation()), Ty,
- true, Init, getOrCreateStaticDataMemberDeclarationOrNull(VarD)));
+ true, InitExpr, getOrCreateStaticDataMemberDeclarationOrNull(VarD),
+ Align));
}
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
@@ -3620,8 +3824,8 @@ void CGDebugInfo::EmitUsingDirective(const UsingDirectiveDecl &UD) {
if (CGM.getCodeGenOpts().getDebugInfo() < codegenoptions::LimitedDebugInfo)
return;
const NamespaceDecl *NSDecl = UD.getNominatedNamespace();
- if (!NSDecl->isAnonymousNamespace() ||
- CGM.getCodeGenOpts().DebugExplicitImport) {
+ if (!NSDecl->isAnonymousNamespace() ||
+ CGM.getCodeGenOpts().DebugExplicitImport) {
DBuilder.createImportedModule(
getCurrentContextDescriptor(cast<Decl>(UD.getDeclContext())),
getOrCreateNameSpace(NSDecl),
@@ -3700,8 +3904,8 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl) {
unsigned LineNo = getLineNumber(NSDecl->getLocation());
llvm::DIFile *FileD = getOrCreateFile(NSDecl->getLocation());
llvm::DIScope *Context = getDeclContextDescriptor(NSDecl);
- llvm::DINamespace *NS =
- DBuilder.createNameSpace(Context, NSDecl->getName(), FileD, LineNo);
+ llvm::DINamespace *NS = DBuilder.createNameSpace(
+ Context, NSDecl->getName(), FileD, LineNo, NSDecl->isInline());
NameSpaceCache[NSDecl].reset(NS);
return NS;
}
@@ -3750,6 +3954,8 @@ void CGDebugInfo::finalize() {
else
Repl = it->second;
+ if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(Repl))
+ Repl = GVE->getVariable();
DBuilder.replaceTemporary(std::move(FwdDecl), cast<llvm::MDNode>(Repl));
}
@@ -3770,3 +3976,12 @@ void CGDebugInfo::EmitExplicitCastType(QualType Ty) {
// Don't ignore in case of explicit cast where it is referenced indirectly.
DBuilder.retainType(DieTy);
}
+
+llvm::DebugLoc CGDebugInfo::SourceLocToDebugLoc(SourceLocation Loc) {
+ if (LexicalBlockStack.empty())
+ return llvm::DebugLoc();
+
+ llvm::MDNode *Scope = LexicalBlockStack.back();
+ return llvm::DebugLoc::get(
+ getLineNumber(Loc), getColumnNumber(Loc), Scope);
+}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 366dd81ac812..ac2e8dd2e0a4 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -15,12 +15,14 @@
#define LLVM_CLANG_LIB_CODEGEN_CGDEBUGINFO_H
#include "CGBuilder.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/Type.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/DIBuilder.h"
#include "llvm/IR/DebugInfo.h"
@@ -32,7 +34,6 @@ class MDNode;
}
namespace clang {
-class CXXMethodDecl;
class ClassTemplateSpecializationDecl;
class GlobalDecl;
class ModuleMap;
@@ -67,6 +68,7 @@ class CGDebugInfo {
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
llvm::DIType *SingletonId = nullptr;
#include "clang/Basic/OpenCLImageTypes.def"
+ llvm::DIType *OCLSamplerDITy = nullptr;
llvm::DIType *OCLEventDITy = nullptr;
llvm::DIType *OCLClkEventDITy = nullptr;
llvm::DIType *OCLQueueDITy = nullptr;
@@ -96,8 +98,7 @@ class CGDebugInfo {
/// List of interfaces we want to keep even if orphaned.
std::vector<void *> RetainedTypes;
- /// Cache of forward declared types to RAUW at the end of
- /// compilation.
+ /// Cache of forward declared types to RAUW at the end of compilation.
std::vector<std::pair<const TagType *, llvm::TrackingMDRef>> ReplaceMap;
/// Cache of replaceable forward declarations (functions and
@@ -155,6 +156,8 @@ class CGDebugInfo {
llvm::DIFile *F);
/// Get Objective-C object type.
llvm::DIType *CreateType(const ObjCObjectType *Ty, llvm::DIFile *F);
+ llvm::DIType *CreateType(const ObjCTypeParamType *Ty, llvm::DIFile *Unit);
+
llvm::DIType *CreateType(const VectorType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const ArrayType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const LValueReferenceType *Ty, llvm::DIFile *F);
@@ -216,6 +219,15 @@ class CGDebugInfo {
SmallVectorImpl<llvm::Metadata *> &EltTys,
llvm::DIType *RecordTy);
+ /// Helper function for CollectCXXBases.
+ /// Adds debug info entries for types in Bases that are not in SeenTypes.
+ void CollectCXXBasesAux(const CXXRecordDecl *RD, llvm::DIFile *Unit,
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DIType *RecordTy,
+ const CXXRecordDecl::base_class_const_range &Bases,
+ llvm::DenseSet<CanonicalDeclPtr<const CXXRecordDecl>> &SeenTypes,
+ llvm::DINode::DIFlags StartingFlags);
+
/// A helper function to collect template parameters.
llvm::DINodeArray CollectTemplateParams(const TemplateParameterList *TPList,
ArrayRef<TemplateArgument> TAList,
@@ -233,9 +245,19 @@ class CGDebugInfo {
llvm::DIType *createFieldType(StringRef name, QualType type,
SourceLocation loc, AccessSpecifier AS,
+ uint64_t offsetInBits,
+ uint32_t AlignInBits,
+ llvm::DIFile *tunit, llvm::DIScope *scope,
+ const RecordDecl *RD = nullptr);
+
+ llvm::DIType *createFieldType(StringRef name, QualType type,
+ SourceLocation loc, AccessSpecifier AS,
uint64_t offsetInBits, llvm::DIFile *tunit,
llvm::DIScope *scope,
- const RecordDecl *RD = nullptr);
+ const RecordDecl *RD = nullptr) {
+ return createFieldType(name, type, loc, AS, offsetInBits, 0, tunit, scope,
+ RD);
+ }
/// Create new bit field member.
llvm::DIType *createBitFieldType(const FieldDecl *BitFieldDecl,
@@ -254,6 +276,8 @@ class CGDebugInfo {
llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DIType *RecordTy, const RecordDecl *RD);
+ void CollectRecordNestedRecord(const RecordDecl *RD,
+ SmallVectorImpl<llvm::Metadata *> &E);
void CollectRecordFields(const RecordDecl *Decl, llvm::DIFile *F,
SmallVectorImpl<llvm::Metadata *> &E,
llvm::DICompositeType *RecordTy);
@@ -261,7 +285,8 @@ class CGDebugInfo {
/// If the C++ class has vtable info then insert appropriate debug
/// info entry in EltTys vector.
void CollectVTableInfo(const CXXRecordDecl *Decl, llvm::DIFile *F,
- SmallVectorImpl<llvm::Metadata *> &EltTys);
+ SmallVectorImpl<llvm::Metadata *> &EltTys,
+ llvm::DICompositeType *RecordTy);
/// @}
/// Create a new lexical block node and push it on the stack.
@@ -295,6 +320,9 @@ public:
/// ignored.
void setLocation(SourceLocation Loc);
+ // Converts a SourceLocation to a DebugLoc
+ llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Loc);
+
/// Emit metadata to indicate a change in line/column information in
/// the source file. If the location is invalid, the previous
/// location will be reused.
@@ -350,8 +378,8 @@ public:
/// Emit information about a global variable.
void EmitGlobalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
- /// Emit global variable's debug info.
- void EmitGlobalVariable(const ValueDecl *VD, llvm::Constant *Init);
+ /// Emit a constant global variable's debug info.
+ void EmitGlobalVariable(const ValueDecl *VD, const APValue &Init);
/// Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
@@ -414,6 +442,10 @@ private:
/// Remap a given path with the current debug prefix map
std::string remapDIPath(StringRef) const;
+ /// Compute the file checksum debug info for input file ID.
+ llvm::DIFile::ChecksumKind computeChecksum(FileID FID,
+ SmallString<32> &Checksum) const;
+
/// Get the file debug info descriptor for the input location.
llvm::DIFile *getOrCreateFile(SourceLocation Loc);
@@ -468,14 +500,14 @@ private:
llvm::DIGlobalVariable *
getGlobalVariableForwardDeclaration(const VarDecl *VD);
- /// \brief Return a global variable that represents one of the
- /// collection of global variables created for an anonmyous union.
+ /// Return a global variable that represents one of the collection of global
+ /// variables created for an anonmyous union.
///
/// Recursively collect all of the member fields of a global
/// anonymous decl and create static variables for them. The first
/// time this is called it needs to be on a union and then from
/// there we can have additional unnamed fields.
- llvm::DIGlobalVariable *
+ llvm::DIGlobalVariableExpression *
CollectAnonRecordDecls(const RecordDecl *RD, llvm::DIFile *Unit,
unsigned LineNo, StringRef LinkageName,
llvm::GlobalVariable *Var, llvm::DIScope *DContext);
@@ -514,7 +546,7 @@ private:
StringRef &Name, StringRef &LinkageName,
llvm::DIScope *&FDContext,
llvm::DINodeArray &TParamsArray,
- unsigned &Flags);
+ llvm::DINode::DIFlags &Flags);
/// Collect various properties of a VarDecl.
void collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 89407cd70c3d..d76136380160 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -13,6 +13,7 @@
#include "CodeGenFunction.h"
#include "CGBlocks.h"
+#include "CGCXXABI.h"
#include "CGCleanup.h"
#include "CGDebugInfo.h"
#include "CGOpenCLRuntime.h"
@@ -77,6 +78,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::PragmaDetectMismatch:
case Decl::AccessSpec:
case Decl::LinkageSpec:
+ case Decl::Export:
case Decl::ObjCPropertyImpl:
case Decl::FileScopeAsm:
case Decl::Friend:
@@ -87,6 +89,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::UsingShadow:
case Decl::ConstructorUsingShadow:
case Decl::ObjCTypeParam:
+ case Decl::Binding:
llvm_unreachable("Declaration should not be in declstmts!");
case Decl::Function: // void X();
case Decl::Record: // struct/union/class X;
@@ -110,15 +113,25 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDecl(cast<UsingDecl>(D));
return;
+ case Decl::UsingPack:
+ for (auto *Using : cast<UsingPackDecl>(D).expansions())
+ EmitDecl(*Using);
+ return;
case Decl::UsingDirective: // using namespace X; [C++]
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitUsingDirective(cast<UsingDirectiveDecl>(D));
return;
- case Decl::Var: {
+ case Decl::Var:
+ case Decl::Decomposition: {
const VarDecl &VD = cast<VarDecl>(D);
assert(VD.isLocalVarDecl() &&
"Should not see file-scope variables inside a function!");
- return EmitVarDecl(VD);
+ EmitVarDecl(VD);
+ if (auto *DD = dyn_cast<DecompositionDecl>(&VD))
+ for (auto *B : DD->bindings())
+ if (auto *HD = B->getHoldingVar())
+ EmitVarDecl(*HD);
+ return;
}
case Decl::OMPDeclareReduction:
@@ -526,7 +539,8 @@ namespace {
CallArgList Args;
Args.add(RValue::get(Arg),
CGF.getContext().getPointerType(Var.getType()));
- CGF.EmitCall(FnInfo, CleanupFn, ReturnValueSlot(), Args);
+ auto Callee = CGCallee::forDirect(CleanupFn);
+ CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
}
};
} // end anonymous namespace
@@ -698,7 +712,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
}
auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
- llvm::Value *zero = llvm::ConstantPointerNull::get(ty);
+ llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
@@ -765,37 +779,6 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
EmitStoreOfScalar(value, lvalue, /* isInitialization */ true);
}
-/// EmitScalarInit - Initialize the given lvalue with the given object.
-void CodeGenFunction::EmitScalarInit(llvm::Value *init, LValue lvalue) {
- Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
- if (!lifetime)
- return EmitStoreThroughLValue(RValue::get(init), lvalue, true);
-
- switch (lifetime) {
- case Qualifiers::OCL_None:
- llvm_unreachable("present but none");
-
- case Qualifiers::OCL_ExplicitNone:
- // nothing to do
- break;
-
- case Qualifiers::OCL_Strong:
- init = EmitARCRetain(lvalue.getType(), init);
- break;
-
- case Qualifiers::OCL_Weak:
- // Initialize and then skip the primitive store.
- EmitARCInitWeak(lvalue.getAddress(), init);
- return;
-
- case Qualifiers::OCL_Autoreleasing:
- init = EmitARCRetainAutorelease(lvalue.getType(), init);
- break;
- }
-
- EmitStoreOfScalar(init, lvalue, /* isInitialization */ true);
-}
-
/// canEmitInitWithFewStoresAfterMemset - Decide whether we can emit the
/// non-zero parts of the specified initializer with equal or fewer than
/// NumStores scalar stores.
@@ -907,29 +890,12 @@ void CodeGenFunction::EmitAutoVarDecl(const VarDecl &D) {
EmitAutoVarCleanups(emission);
}
-/// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
-/// markers.
-static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
- const LangOptions &LangOpts) {
- // Asan uses markers for use-after-scope checks.
- if (CGOpts.SanitizeAddressUseAfterScope)
- return true;
-
- // Disable lifetime markers in msan builds.
- // FIXME: Remove this when msan works with lifetime markers.
- if (LangOpts.Sanitize.has(SanitizerKind::Memory))
- return false;
-
- // For now, only in optimized builds.
- return CGOpts.OptimizationLevel != 0;
-}
-
/// Emit a lifetime.begin marker if some criteria are satisfied.
/// \return a pointer to the temporary size Value if a marker was emitted, null
/// otherwise
llvm::Value *CodeGenFunction::EmitLifetimeStart(uint64_t Size,
llvm::Value *Addr) {
- if (!shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), getLangOpts()))
+ if (!ShouldEmitLifetimeMarkers)
return nullptr;
llvm::Value *SizeV = llvm::ConstantInt::get(Int64Ty, Size);
@@ -986,8 +952,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// If the variable's a const type, and it's neither an NRVO
// candidate nor a __block variable and has no mutable members,
// emit it as a global instead.
- if (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
- CGM.isTypeConstant(Ty, true)) {
+ // Exception is if a variable is located in non-constant address space
+ // in OpenCL.
+ if ((!getLangOpts().OpenCL ||
+ Ty.getAddressSpace() == LangAS::opencl_constant) &&
+ (CGM.getCodeGenOpts().MergeAllConstants && !NRVO && !isByRef &&
+ CGM.isTypeConstant(Ty, true))) {
EmitStaticVarDecl(D, llvm::GlobalValue::InternalLinkage);
// Signal this condition to later callbacks.
@@ -1049,12 +1019,18 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
bool IsMSCatchParam =
D.isExceptionVariable() && getTarget().getCXXABI().isMicrosoft();
- // Emit a lifetime intrinsic if meaningful. There's no point
- // in doing this if we don't have a valid insertion point (?).
+ // Emit a lifetime intrinsic if meaningful. There's no point in doing this
+ // if we don't have a valid insertion point (?).
if (HaveInsertPoint() && !IsMSCatchParam) {
- uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
- emission.SizeForLifetimeMarkers =
- EmitLifetimeStart(size, address.getPointer());
+ // goto or switch-case statements can break lifetime into several
+ // regions which need more efforts to handle them correctly. PR28267
+ // This is rare case, but it's better just omit intrinsics than have
+ // them incorrectly placed.
+ if (!Bypasses.IsBypassed(&D)) {
+ uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
+ emission.SizeForLifetimeMarkers =
+ EmitLifetimeStart(size, address.getPointer());
+ }
} else {
assert(!emission.useLifetimeMarkers());
}
@@ -1257,10 +1233,16 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
// Otherwise, create a temporary global with the initializer then
// memcpy from the global to the alloca.
std::string Name = getStaticDeclName(CGM, D);
+ unsigned AS = 0;
+ if (getLangOpts().OpenCL) {
+ AS = CGM.getContext().getTargetAddressSpace(LangAS::opencl_constant);
+ BP = llvm::PointerType::getInt8PtrTy(getLLVMContext(), AS);
+ }
llvm::GlobalVariable *GV =
new llvm::GlobalVariable(CGM.getModule(), constant->getType(), true,
llvm::GlobalValue::PrivateLinkage,
- constant, Name);
+ constant, Name, nullptr,
+ llvm::GlobalValue::NotThreadLocal, AS);
GV->setAlignment(Loc.getAlignment().getQuantity());
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
@@ -1762,6 +1744,24 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
setBlockContextParameter(IPD, ArgNo, Arg.getDirectValue());
return;
}
+
+ // Apply any prologue 'this' adjustments required by the ABI. Be careful to
+ // handle the case where 'this' is passed indirectly as part of an inalloca
+ // struct.
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
+ if (MD->isVirtual() && IPD == CXXABIThisDecl) {
+ llvm::Value *This = Arg.isIndirect()
+ ? Builder.CreateLoad(Arg.getIndirectAddress())
+ : Arg.getDirectValue();
+ This = CGM.getCXXABI().adjustThisParameterInVirtualFunctionPrologue(
+ *this, CurGD, This);
+ if (Arg.isIndirect())
+ Builder.CreateStore(This, Arg.getIndirectAddress());
+ else
+ Arg = ParamValue::forDirect(This);
+ }
+ }
}
Address DeclPtr = Address::invalid();
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 89d142e44b49..8d9d0b21bfe1 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -121,13 +121,15 @@ static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
/// constant from this point onwards.
static void EmitDeclInvariant(CodeGenFunction &CGF, const VarDecl &D,
llvm::Constant *Addr) {
- // Don't emit the intrinsic if we're not optimizing.
+ // Do not emit the intrinsic if we're not optimizing.
if (!CGF.CGM.getCodeGenOpts().OptimizationLevel)
return;
// Grab the llvm.invariant.start intrinsic.
llvm::Intrinsic::ID InvStartID = llvm::Intrinsic::invariant_start;
- llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID);
+ // Overloaded address space type.
+ llvm::Type *ObjectPtr[1] = {CGF.Int8PtrTy};
+ llvm::Constant *InvariantStart = CGF.CGM.getIntrinsic(InvStartID, ObjectPtr);
// Emit a call with the size in bytes of the object.
CharUnits WidthChars = CGF.getContext().getTypeSizeInChars(D.getType());
@@ -235,7 +237,8 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::Constant *atexit =
- CGM.CreateRuntimeFunction(atexitTy, "atexit");
+ CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeSet(),
+ /*Local=*/true);
if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
atexitFn->setDoesNotThrow();
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index 4a7dc4205e09..7b7880e07a95 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -221,10 +221,9 @@ const EHPersonality &EHPersonality::get(CodeGenFunction &CGF) {
static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
- llvm::Constant *Fn =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
- Personality.PersonalityFn);
- return Fn;
+ return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
+ Personality.PersonalityFn,
+ llvm::AttributeSet(), /*Local=*/true);
}
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
@@ -698,6 +697,10 @@ llvm::BasicBlock *CodeGenFunction::getInvokeDestImpl() {
return nullptr;
}
+ // CUDA device code doesn't have exceptions.
+ if (LO.CUDA && LO.CUDAIsDevice)
+ return nullptr;
+
// Check the innermost scope for a cached landing pad. If this is
// a non-EH cleanup, we'll check enclosing scopes in EmitLandingPad.
llvm::BasicBlock *LP = EHStack.begin()->getCachedLandingPad();
@@ -1429,7 +1432,8 @@ struct PerformSEHFinally final : EHScopeStack::Cleanup {
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, Args);
- CGF.EmitCall(FnInfo, OutlinedFinally, ReturnValueSlot(), Args);
+ auto Callee = CGCallee::forDirect(OutlinedFinally);
+ CGF.EmitCall(FnInfo, Callee, ReturnValueSlot(), Args);
}
};
} // end anonymous namespace
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 5f3b290d8eb1..183201c78e36 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/NSAPI.h"
#include "clang/Frontend/CodeGenOptions.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringExtras.h"
@@ -36,6 +37,8 @@
#include "llvm/Support/Path.h"
#include "llvm/Transforms/Utils/SanitizerStats.h"
+#include <string>
+
using namespace clang;
using namespace CodeGen;
@@ -607,7 +610,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::ConstantInt::get(SizeTy, AlignVal),
llvm::ConstantInt::get(Int8Ty, TCK)
};
- EmitCheck(Checks, "type_mismatch", StaticData, Ptr);
+ EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData, Ptr);
}
// If possible, check that the vptr indicates that there is a subobject of
@@ -675,7 +678,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
};
llvm::Value *DynamicData[] = { Ptr, Hash };
EmitCheck(std::make_pair(EqualHash, SanitizerKind::Vptr),
- "dynamic_type_cache_miss", StaticData, DynamicData);
+ SanitizerHandler::DynamicTypeCacheMiss, StaticData,
+ DynamicData);
}
}
@@ -708,6 +712,8 @@ static bool isFlexibleArrayMemberExpr(const Expr *E) {
DeclContext::decl_iterator(const_cast<FieldDecl *>(FD)));
return ++FI == FD->getParent()->field_end();
}
+ } else if (const auto *IRE = dyn_cast<ObjCIvarRefExpr>(E)) {
+ return IRE->getDecl()->getNextIvar() == nullptr;
}
return false;
@@ -763,8 +769,8 @@ void CodeGenFunction::EmitBoundsCheck(const Expr *E, const Expr *Base,
};
llvm::Value *Check = Accessed ? Builder.CreateICmpULT(IndexVal, BoundVal)
: Builder.CreateICmpULE(IndexVal, BoundVal);
- EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds), "out_of_bounds",
- StaticData, Index);
+ EmitCheck(std::make_pair(Check, SanitizerKind::ArrayBounds),
+ SanitizerHandler::OutOfBounds, StaticData, Index);
}
@@ -1180,10 +1186,10 @@ CodeGenFunction::tryEmitAsConstant(DeclRefExpr *refExpr) {
// This should probably fire even for
if (isa<VarDecl>(value)) {
if (!getContext().DeclMustBeEmitted(cast<VarDecl>(value)))
- EmitDeclRefExprDbgValue(refExpr, C);
+ EmitDeclRefExprDbgValue(refExpr, result.Val);
} else {
assert(isa<EnumConstantDecl>(value));
- EmitDeclRefExprDbgValue(refExpr, C);
+ EmitDeclRefExprDbgValue(refExpr, result.Val);
}
// If we emitted a reference constant, we need to dereference that.
@@ -1217,11 +1223,10 @@ static bool hasBooleanRepresentation(QualType Ty) {
static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::APInt &Min, llvm::APInt &End,
- bool StrictEnums) {
+ bool StrictEnums, bool IsBool) {
const EnumType *ET = Ty->getAs<EnumType>();
bool IsRegularCPlusPlusEnum = CGF.getLangOpts().CPlusPlus && StrictEnums &&
ET && !ET->getDecl()->isFixed();
- bool IsBool = hasBooleanRepresentation(Ty);
if (!IsBool && !IsRegularCPlusPlusEnum)
return false;
@@ -1251,8 +1256,8 @@ static bool getRangeForType(CodeGenFunction &CGF, QualType Ty,
llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
llvm::APInt Min, End;
- if (!getRangeForType(*this, Ty, Min, End,
- CGM.getCodeGenOpts().StrictEnums))
+ if (!getRangeForType(*this, Ty, Min, End, CGM.getCodeGenOpts().StrictEnums,
+ hasBooleanRepresentation(Ty)))
return nullptr;
llvm::MDBuilder MDHelper(getLLVMContext());
@@ -1311,14 +1316,15 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
false /*ConvertTypeToTag*/);
}
- bool NeedsBoolCheck =
- SanOpts.has(SanitizerKind::Bool) && hasBooleanRepresentation(Ty);
+ bool IsBool = hasBooleanRepresentation(Ty) ||
+ NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
+ bool NeedsBoolCheck = SanOpts.has(SanitizerKind::Bool) && IsBool;
bool NeedsEnumCheck =
SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>();
if (NeedsBoolCheck || NeedsEnumCheck) {
SanitizerScope SanScope(this);
llvm::APInt Min, End;
- if (getRangeForType(*this, Ty, Min, End, true)) {
+ if (getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) {
--End;
llvm::Value *Check;
if (!Min)
@@ -1336,8 +1342,8 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
EmitCheckTypeDescriptor(Ty)
};
SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
- EmitCheck(std::make_pair(Check, Kind), "load_invalid_value", StaticArgs,
- EmitCheckValue(Load));
+ EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
+ StaticArgs, EmitCheckValue(Load));
}
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
@@ -1627,11 +1633,19 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
break;
case Qualifiers::OCL_Strong:
+ if (isInit) {
+ Src = RValue::get(EmitARCRetain(Dst.getType(), Src.getScalarVal()));
+ break;
+ }
EmitARCStoreStrong(Dst, Src.getScalarVal(), /*ignore*/ true);
return;
case Qualifiers::OCL_Weak:
- EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
+ if (isInit)
+ // Initialize and then skip the primitive store.
+ EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
+ else
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(), /*ignore*/ true);
return;
case Qualifiers::OCL_Autoreleasing:
@@ -2015,9 +2029,14 @@ static LValue EmitGlobalVarDeclLValue(CodeGenFunction &CGF,
return LV;
}
-static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
- const Expr *E, const FunctionDecl *FD) {
- llvm::Value *V = CGF.CGM.GetAddrOfFunction(FD);
+static llvm::Constant *EmitFunctionDeclPointer(CodeGenModule &CGM,
+ const FunctionDecl *FD) {
+ if (FD->hasAttr<WeakRefAttr>()) {
+ ConstantAddress aliasee = CGM.GetWeakRefReference(FD);
+ return aliasee.getPointer();
+ }
+
+ llvm::Constant *V = CGM.GetAddrOfFunction(FD);
if (!FD->hasPrototype()) {
if (const FunctionProtoType *Proto =
FD->getType()->getAs<FunctionProtoType>()) {
@@ -2025,11 +2044,18 @@ static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
// isn't the same as the type of a use. Correct for this with a
// bitcast.
QualType NoProtoType =
- CGF.getContext().getFunctionNoProtoType(Proto->getReturnType());
- NoProtoType = CGF.getContext().getPointerType(NoProtoType);
- V = CGF.Builder.CreateBitCast(V, CGF.ConvertType(NoProtoType));
+ CGM.getContext().getFunctionNoProtoType(Proto->getReturnType());
+ NoProtoType = CGM.getContext().getPointerType(NoProtoType);
+ V = llvm::ConstantExpr::getBitCast(V,
+ CGM.getTypes().ConvertType(NoProtoType));
}
}
+ return V;
+}
+
+static LValue EmitFunctionDeclLValue(CodeGenFunction &CGF,
+ const Expr *E, const FunctionDecl *FD) {
+ llvm::Value *V = EmitFunctionDeclPointer(CGF.CGM, FD);
CharUnits Alignment = CGF.getContext().getDeclAlign(FD);
return CGF.MakeAddrLValue(V, E->getType(), Alignment, AlignmentSource::Decl);
}
@@ -2205,6 +2231,12 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
if (const auto *FD = dyn_cast<FunctionDecl>(ND))
return EmitFunctionDeclLValue(*this, E, FD);
+ // FIXME: While we're emitting a binding from an enclosing scope, all other
+ // DeclRefExprs we see should be implicitly treated as if they also refer to
+ // an enclosing scope.
+ if (const auto *BD = dyn_cast<BindingDecl>(ND))
+ return EmitLValue(BD->getBinding());
+
llvm_unreachable("Unhandled DeclRefExpr");
}
@@ -2291,9 +2323,19 @@ LValue CodeGenFunction::EmitPredefinedLValue(const PredefinedExpr *E) {
StringRef NameItems[] = {
PredefinedExpr::getIdentTypeName(E->getIdentType()), FnName};
std::string GVName = llvm::join(NameItems, NameItems + 2, ".");
- if (CurCodeDecl && isa<BlockDecl>(CurCodeDecl)) {
- auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
- return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ if (auto *BD = dyn_cast<BlockDecl>(CurCodeDecl)) {
+ std::string Name = SL->getString();
+ if (!Name.empty()) {
+ unsigned Discriminator =
+ CGM.getCXXABI().getMangleContext().getBlockId(BD, true);
+ if (Discriminator)
+ Name += "_" + Twine(Discriminator + 1).str();
+ auto C = CGM.GetAddrOfConstantCString(Name, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ } else {
+ auto C = CGM.GetAddrOfConstantCString(FnName, GVName.c_str());
+ return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
+ }
}
auto C = CGM.GetAddrOfConstantStringFromLiteral(SL, GVName);
return MakeAddrLValue(C, E->getType(), AlignmentSource::Decl);
@@ -2461,17 +2503,35 @@ static CheckRecoverableKind getRecoverableKind(SanitizerMask Kind) {
}
}
+namespace {
+struct SanitizerHandlerInfo {
+ char const *const Name;
+ unsigned Version;
+};
+}
+
+const SanitizerHandlerInfo SanitizerHandlers[] = {
+#define SANITIZER_CHECK(Enum, Name, Version) {#Name, Version},
+ LIST_SANITIZER_CHECKS
+#undef SANITIZER_CHECK
+};
+
static void emitCheckHandlerCall(CodeGenFunction &CGF,
llvm::FunctionType *FnType,
ArrayRef<llvm::Value *> FnArgs,
- StringRef CheckName,
+ SanitizerHandler CheckHandler,
CheckRecoverableKind RecoverKind, bool IsFatal,
llvm::BasicBlock *ContBB) {
assert(IsFatal || RecoverKind != CheckRecoverableKind::Unrecoverable);
bool NeedsAbortSuffix =
IsFatal && RecoverKind != CheckRecoverableKind::Unrecoverable;
- std::string FnName = ("__ubsan_handle_" + CheckName +
- (NeedsAbortSuffix ? "_abort" : "")).str();
+ const SanitizerHandlerInfo &CheckInfo = SanitizerHandlers[CheckHandler];
+ const StringRef CheckName = CheckInfo.Name;
+ std::string FnName =
+ ("__ubsan_handle_" + CheckName +
+ (CheckInfo.Version ? "_v" + llvm::utostr(CheckInfo.Version) : "") +
+ (NeedsAbortSuffix ? "_abort" : ""))
+ .str();
bool MayReturn =
!IsFatal || RecoverKind == CheckRecoverableKind::AlwaysRecoverable;
@@ -2485,7 +2545,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
llvm::AttributeSet::get(CGF.getLLVMContext(),
- llvm::AttributeSet::FunctionIndex, B));
+ llvm::AttributeSet::FunctionIndex, B),
+ /*Local=*/true);
llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
if (!MayReturn) {
HandlerCall->setDoesNotReturn();
@@ -2497,10 +2558,13 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
void CodeGenFunction::EmitCheck(
ArrayRef<std::pair<llvm::Value *, SanitizerMask>> Checked,
- StringRef CheckName, ArrayRef<llvm::Constant *> StaticArgs,
+ SanitizerHandler CheckHandler, ArrayRef<llvm::Constant *> StaticArgs,
ArrayRef<llvm::Value *> DynamicArgs) {
assert(IsSanitizerScope);
assert(Checked.size() > 0);
+ assert(CheckHandler >= 0 &&
+ CheckHandler < sizeof(SanitizerHandlers) / sizeof(*SanitizerHandlers));
+ const StringRef CheckName = SanitizerHandlers[CheckHandler].Name;
llvm::Value *FatalCond = nullptr;
llvm::Value *RecoverableCond = nullptr;
@@ -2580,7 +2644,7 @@ void CodeGenFunction::EmitCheck(
if (!FatalCond || !RecoverableCond) {
// Simple case: we need to generate a single handler call, either
// fatal, or non-fatal.
- emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind,
+ emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind,
(FatalCond != nullptr), Cont);
} else {
// Emit two handler calls: first one for set of unrecoverable checks,
@@ -2590,10 +2654,10 @@ void CodeGenFunction::EmitCheck(
llvm::BasicBlock *FatalHandlerBB = createBasicBlock("fatal." + CheckName);
Builder.CreateCondBr(FatalCond, NonFatalHandlerBB, FatalHandlerBB);
EmitBlock(FatalHandlerBB);
- emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, true,
+ emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, true,
NonFatalHandlerBB);
EmitBlock(NonFatalHandlerBB);
- emitCheckHandlerCall(*this, FnType, Args, CheckName, RecoverKind, false,
+ emitCheckHandlerCall(*this, FnType, Args, CheckHandler, RecoverKind, false,
Cont);
}
@@ -2718,7 +2782,7 @@ void CodeGenFunction::EmitCfiCheckFail() {
llvm::Value *Cond =
Builder.CreateICmpNE(CheckKind, llvm::ConstantInt::get(Int8Ty, Kind));
if (CGM.getLangOpts().Sanitize.has(Mask))
- EmitCheck(std::make_pair(Cond, Mask), "cfi_check_fail", {},
+ EmitCheck(std::make_pair(Cond, Mask), SanitizerHandler::CFICheckFail, {},
{Data, Addr, ValidVtable});
else
EmitTrapCheck(Cond);
@@ -2753,10 +2817,11 @@ void CodeGenFunction::EmitTrapCheck(llvm::Value *Checked) {
llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
llvm::CallInst *TrapCall = Builder.CreateCall(CGM.getIntrinsic(IntrID));
- if (!CGM.getCodeGenOpts().TrapFuncName.empty())
- TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex,
- "trap-func-name",
- CGM.getCodeGenOpts().TrapFuncName);
+ if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
+ auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
+ CGM.getCodeGenOpts().TrapFuncName);
+ TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex, A);
+ }
return TrapCall;
}
@@ -2869,13 +2934,30 @@ static Address emitArraySubscriptGEP(CodeGenFunction &CGF, Address addr,
LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
bool Accessed) {
- // The index must always be an integer, which is not an aggregate. Emit it.
- llvm::Value *Idx = EmitScalarExpr(E->getIdx());
- QualType IdxTy = E->getIdx()->getType();
- bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
+ // The index must always be an integer, which is not an aggregate. Emit it
+ // in lexical order (this complexity is, sadly, required by C++17).
+ llvm::Value *IdxPre =
+ (E->getLHS() == E->getIdx()) ? EmitScalarExpr(E->getIdx()) : nullptr;
+ auto EmitIdxAfterBase = [&, IdxPre](bool Promote) -> llvm::Value * {
+ auto *Idx = IdxPre;
+ if (E->getLHS() != E->getIdx()) {
+ assert(E->getRHS() == E->getIdx() && "index was neither LHS nor RHS");
+ Idx = EmitScalarExpr(E->getIdx());
+ }
+
+ QualType IdxTy = E->getIdx()->getType();
+ bool IdxSigned = IdxTy->isSignedIntegerOrEnumerationType();
- if (SanOpts.has(SanitizerKind::ArrayBounds))
- EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
+ if (SanOpts.has(SanitizerKind::ArrayBounds))
+ EmitBoundsCheck(E, E->getBase(), Idx, IdxTy, Accessed);
+
+ // Extend or truncate the index type to 32 or 64-bits.
+ if (Promote && Idx->getType() != IntPtrTy)
+ Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
+
+ return Idx;
+ };
+ IdxPre = nullptr;
// If the base is a vector type, then we are forming a vector element lvalue
// with this subscript.
@@ -2883,6 +2965,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
!isa<ExtVectorElementExpr>(E->getBase())) {
// Emit the vector as an lvalue to get its address.
LValue LHS = EmitLValue(E->getBase());
+ auto *Idx = EmitIdxAfterBase(/*Promote*/false);
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
return LValue::MakeVectorElt(LHS.getAddress(), Idx,
E->getBase()->getType(),
@@ -2891,13 +2974,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// All the other cases basically behave like simple offsetting.
- // Extend or truncate the index type to 32 or 64-bits.
- if (Idx->getType() != IntPtrTy)
- Idx = Builder.CreateIntCast(Idx, IntPtrTy, IdxSigned, "idxprom");
-
// Handle the extvector case we ignored above.
if (isa<ExtVectorElementExpr>(E->getBase())) {
LValue LV = EmitLValue(E->getBase());
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Address Addr = EmitExtVectorElementLValue(LV);
QualType EltType = LV.getType()->castAs<VectorType>()->getElementType();
@@ -2913,6 +2993,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// it. It needs to be emitted first in case it's what captures
// the VLA bounds.
Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// The element count here is the total number of non-VLA elements.
llvm::Value *numElements = getVLASize(vla).first;
@@ -2932,14 +3013,16 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
} else if (const ObjCObjectType *OIT = E->getType()->getAs<ObjCObjectType>()){
// Indexing over an interface, as in "NSString *P; P[4];"
- CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
- llvm::Value *InterfaceSizeVal =
- llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());;
-
- llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
// Emit the base pointer.
Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
+
+ CharUnits InterfaceSize = getContext().getTypeSizeInChars(OIT);
+ llvm::Value *InterfaceSizeVal =
+ llvm::ConstantInt::get(Idx->getType(), InterfaceSize.getQuantity());
+
+ llvm::Value *ScaledIdx = Builder.CreateMul(Idx, InterfaceSizeVal);
// We don't necessarily build correct LLVM struct types for ObjC
// interfaces, so we can't rely on GEP to do this scaling
@@ -2971,6 +3054,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
ArrayLV = EmitArraySubscriptExpr(ASE, /*Accessed*/ true);
else
ArrayLV = EmitLValue(Array);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
// Propagate the alignment from the array itself to the result.
Addr = emitArraySubscriptGEP(*this, ArrayLV.getAddress(),
@@ -2981,6 +3065,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
} else {
// The base must be a pointer; emit it with an estimate of its alignment.
Addr = EmitPointerWithAlignment(E->getBase(), &AlignSource);
+ auto *Idx = EmitIdxAfterBase(/*Promote*/true);
Addr = emitArraySubscriptGEP(*this, Addr, Idx, E->getType(),
!getLangOpts().isSignedOverflowDefined());
}
@@ -3463,7 +3548,7 @@ LValue CodeGenFunction::EmitInitListLValue(const InitListExpr *E) {
return EmitAggExprToLValue(E);
// An lvalue initializer list must be initializing a reference.
- assert(E->getNumInits() == 1 && "reference init with multiple values");
+ assert(E->isTransparent() && "non-transparent glvalue init list");
return EmitLValue(E->getInit(0));
}
@@ -3602,6 +3687,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_ARCExtendBlockObject:
case CK_CopyAndAutoreleaseBlockObject:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
return EmitUnsupportedLValue(E, "unexpected cast lvalue");
case CK_Dependent:
@@ -3695,6 +3781,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getAlignmentSource());
}
+ case CK_ZeroToOCLQueue:
+ llvm_unreachable("NULL to OpenCL queue lvalue cast is not valid");
case CK_ZeroToOCLEvent:
llvm_unreachable("NULL to OpenCL event lvalue cast is not valid");
}
@@ -3743,70 +3831,86 @@ RValue CodeGenFunction::EmitCallExpr(const CallExpr *E,
if (const auto *CE = dyn_cast<CUDAKernelCallExpr>(E))
return EmitCUDAKernelCallExpr(CE, ReturnValue);
- const Decl *TargetDecl = E->getCalleeDecl();
- if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
- if (unsigned builtinID = FD->getBuiltinID())
- return EmitBuiltinExpr(FD, builtinID, E, ReturnValue);
- }
-
if (const auto *CE = dyn_cast<CXXOperatorCallExpr>(E))
- if (const CXXMethodDecl *MD = dyn_cast_or_null<CXXMethodDecl>(TargetDecl))
+ if (const CXXMethodDecl *MD =
+ dyn_cast_or_null<CXXMethodDecl>(CE->getCalleeDecl()))
return EmitCXXOperatorMemberCallExpr(CE, MD, ReturnValue);
- if (const auto *PseudoDtor =
- dyn_cast<CXXPseudoDestructorExpr>(E->getCallee()->IgnoreParens())) {
- QualType DestroyedType = PseudoDtor->getDestroyedType();
- if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
- // Automatic Reference Counting:
- // If the pseudo-expression names a retainable object with weak or
- // strong lifetime, the object shall be released.
- Expr *BaseExpr = PseudoDtor->getBase();
- Address BaseValue = Address::invalid();
- Qualifiers BaseQuals;
-
- // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
- if (PseudoDtor->isArrow()) {
- BaseValue = EmitPointerWithAlignment(BaseExpr);
- const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
- BaseQuals = PTy->getPointeeType().getQualifiers();
- } else {
- LValue BaseLV = EmitLValue(BaseExpr);
- BaseValue = BaseLV.getAddress();
- QualType BaseTy = BaseExpr->getType();
- BaseQuals = BaseTy.getQualifiers();
- }
+ CGCallee callee = EmitCallee(E->getCallee());
- switch (DestroyedType.getObjCLifetime()) {
- case Qualifiers::OCL_None:
- case Qualifiers::OCL_ExplicitNone:
- case Qualifiers::OCL_Autoreleasing:
- break;
+ if (callee.isBuiltin()) {
+ return EmitBuiltinExpr(callee.getBuiltinDecl(), callee.getBuiltinID(),
+ E, ReturnValue);
+ }
- case Qualifiers::OCL_Strong:
- EmitARCRelease(Builder.CreateLoad(BaseValue,
- PseudoDtor->getDestroyedType().isVolatileQualified()),
- ARCPreciseLifetime);
- break;
+ if (callee.isPseudoDestructor()) {
+ return EmitCXXPseudoDestructorExpr(callee.getPseudoDestructorExpr());
+ }
- case Qualifiers::OCL_Weak:
- EmitARCDestroyWeak(BaseValue);
- break;
- }
- } else {
- // C++ [expr.pseudo]p1:
- // The result shall only be used as the operand for the function call
- // operator (), and the result of such a call has type void. The only
- // effect is the evaluation of the postfix-expression before the dot or
- // arrow.
- EmitScalarExpr(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), callee, E, ReturnValue);
+}
+
+/// Emit a CallExpr without considering whether it might be a subclass.
+RValue CodeGenFunction::EmitSimpleCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ CGCallee Callee = EmitCallee(E->getCallee());
+ return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue);
+}
+
+static CGCallee EmitDirectCallee(CodeGenFunction &CGF, const FunctionDecl *FD) {
+ if (auto builtinID = FD->getBuiltinID()) {
+ return CGCallee::forBuiltin(builtinID, FD);
+ }
+
+ llvm::Constant *calleePtr = EmitFunctionDeclPointer(CGF.CGM, FD);
+ return CGCallee::forDirect(calleePtr, FD);
+}
+
+CGCallee CodeGenFunction::EmitCallee(const Expr *E) {
+ E = E->IgnoreParens();
+
+ // Look through function-to-pointer decay.
+ if (auto ICE = dyn_cast<ImplicitCastExpr>(E)) {
+ if (ICE->getCastKind() == CK_FunctionToPointerDecay ||
+ ICE->getCastKind() == CK_BuiltinFnToFnPtr) {
+ return EmitCallee(ICE->getSubExpr());
}
- return RValue::get(nullptr);
+ // Resolve direct calls.
+ } else if (auto DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (auto FD = dyn_cast<FunctionDecl>(DRE->getDecl())) {
+ return EmitDirectCallee(*this, FD);
+ }
+ } else if (auto ME = dyn_cast<MemberExpr>(E)) {
+ if (auto FD = dyn_cast<FunctionDecl>(ME->getMemberDecl())) {
+ EmitIgnoredExpr(ME->getBase());
+ return EmitDirectCallee(*this, FD);
+ }
+
+ // Look through template substitutions.
+ } else if (auto NTTP = dyn_cast<SubstNonTypeTemplateParmExpr>(E)) {
+ return EmitCallee(NTTP->getReplacement());
+
+ // Treat pseudo-destructor calls differently.
+ } else if (auto PDE = dyn_cast<CXXPseudoDestructorExpr>(E)) {
+ return CGCallee::forPseudoDestructor(PDE);
}
- llvm::Value *Callee = EmitScalarExpr(E->getCallee());
- return EmitCall(E->getCallee()->getType(), Callee, E, ReturnValue,
- TargetDecl);
+ // Otherwise, we have an indirect reference.
+ llvm::Value *calleePtr;
+ QualType functionType;
+ if (auto ptrType = E->getType()->getAs<PointerType>()) {
+ calleePtr = EmitScalarExpr(E);
+ functionType = ptrType->getPointeeType();
+ } else {
+ functionType = E->getType();
+ calleePtr = EmitLValue(E).getPointer();
+ }
+ assert(functionType->isFunctionType());
+ CGCalleeInfo calleeInfo(functionType->getAs<FunctionProtoType>(),
+ E->getReferencedDeclOfCallee());
+ CGCallee callee(calleeInfo, calleePtr);
+ return callee;
}
LValue CodeGenFunction::EmitBinaryOperatorLValue(const BinaryOperator *E) {
@@ -3982,22 +4086,15 @@ LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
AlignmentSource::Decl);
}
-RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
+RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee,
const CallExpr *E, ReturnValueSlot ReturnValue,
- CGCalleeInfo CalleeInfo, llvm::Value *Chain) {
+ llvm::Value *Chain) {
// Get the actual function type. The callee type will always be a pointer to
// function type or a block pointer type.
assert(CalleeType->isFunctionPointerType() &&
"Call must have function pointer type!");
- // Preserve the non-canonical function type because things like exception
- // specifications disappear in the canonical type. That information is useful
- // to drive the generation of more accurate code for this call later on.
- const FunctionProtoType *NonCanonicalFTP = CalleeType->getAs<PointerType>()
- ->getPointeeType()
- ->getAs<FunctionProtoType>();
-
- const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
+ const Decl *TargetDecl = OrigCallee.getAbstractInfo().getCalleeDecl();
if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl))
// We can only guarantee that a function is called from the correct
@@ -4015,6 +4112,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
const auto *FnType =
cast<FunctionType>(cast<PointerType>(CalleeType)->getPointeeType());
+ CGCallee Callee = OrigCallee;
+
if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function) &&
(!TargetDecl || !isa<FunctionDecl>(TargetDecl))) {
if (llvm::Constant *PrefixSig =
@@ -4029,8 +4128,10 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
llvm::StructType *PrefixStructTy = llvm::StructType::get(
CGM.getLLVMContext(), PrefixStructTyElems, /*isPacked=*/true);
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+
llvm::Value *CalleePrefixStruct = Builder.CreateBitCast(
- Callee, llvm::PointerType::getUnqual(PrefixStructTy));
+ CalleePtr, llvm::PointerType::getUnqual(PrefixStructTy));
llvm::Value *CalleeSigPtr =
Builder.CreateConstGEP2_32(PrefixStructTy, CalleePrefixStruct, 0, 0);
llvm::Value *CalleeSig =
@@ -4053,7 +4154,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
EmitCheckTypeDescriptor(CalleeType)
};
EmitCheck(std::make_pair(CalleeRTTIMatch, SanitizerKind::Function),
- "function_type_mismatch", StaticData, Callee);
+ SanitizerHandler::FunctionTypeMismatch, StaticData, CalleePtr);
Builder.CreateBr(Cont);
EmitBlock(Cont);
@@ -4070,7 +4171,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
llvm::Metadata *MD = CGM.CreateMetadataIdentifierForType(QualType(FnType, 0));
llvm::Value *TypeId = llvm::MetadataAsValue::get(getLLVMContext(), MD);
- llvm::Value *CastedCallee = Builder.CreateBitCast(Callee, Int8PtrTy);
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ llvm::Value *CastedCallee = Builder.CreateBitCast(CalleePtr, Int8PtrTy);
llvm::Value *TypeTest = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::type_test), {CastedCallee, TypeId});
@@ -4085,7 +4187,7 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
CastedCallee, StaticData);
} else {
EmitCheck(std::make_pair(TypeTest, SanitizerKind::CFIICall),
- "cfi_check_fail", StaticData,
+ SanitizerHandler::CFICheckFail, StaticData,
{CastedCallee, llvm::UndefValue::get(IntPtrTy)});
}
}
@@ -4094,8 +4196,35 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
if (Chain)
Args.add(RValue::get(Builder.CreateBitCast(Chain, CGM.VoidPtrTy)),
CGM.getContext().VoidPtrTy);
+
+ // C++17 requires that we evaluate arguments to a call using assignment syntax
+ // right-to-left, and that we evaluate arguments to certain other operators
+ // left-to-right. Note that we allow this to override the order dictated by
+ // the calling convention on the MS ABI, which means that parameter
+ // destruction order is not necessarily reverse construction order.
+ // FIXME: Revisit this based on C++ committee response to unimplementability.
+ EvaluationOrder Order = EvaluationOrder::Default;
+ if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(E)) {
+ if (OCE->isAssignmentOp())
+ Order = EvaluationOrder::ForceRightToLeft;
+ else {
+ switch (OCE->getOperator()) {
+ case OO_LessLess:
+ case OO_GreaterGreater:
+ case OO_AmpAmp:
+ case OO_PipePipe:
+ case OO_Comma:
+ case OO_ArrowStar:
+ Order = EvaluationOrder::ForceLeftToRight;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
EmitCallArgs(Args, dyn_cast<FunctionProtoType>(FnType), E->arguments(),
- E->getDirectCallee(), /*ParamsToSkip*/ 0);
+ E->getDirectCallee(), /*ParamsToSkip*/ 0, Order);
const CGFunctionInfo &FnInfo = CGM.getTypes().arrangeFreeFunctionCall(
Args, FnType, /*isChainCall=*/Chain);
@@ -4123,11 +4252,13 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, llvm::Value *Callee,
if (isa<FunctionNoProtoType>(FnType) || Chain) {
llvm::Type *CalleeTy = getTypes().GetFunctionType(FnInfo);
CalleeTy = CalleeTy->getPointerTo();
- Callee = Builder.CreateBitCast(Callee, CalleeTy, "callee.knr.cast");
+
+ llvm::Value *CalleePtr = Callee.getFunctionPointer();
+ CalleePtr = Builder.CreateBitCast(CalleePtr, CalleeTy, "callee.knr.cast");
+ Callee.setFunctionPointer(CalleePtr);
}
- return EmitCall(FnInfo, Callee, ReturnValue, Args,
- CGCalleeInfo(NonCanonicalFTP, TargetDecl));
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
LValue CodeGenFunction::
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 6d18843591f3..009244784e50 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -164,6 +164,8 @@ public:
void VisitAbstractConditionalOperator(const AbstractConditionalOperator *CO);
void VisitChooseExpr(const ChooseExpr *CE);
void VisitInitListExpr(InitListExpr *E);
+ void VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
+ llvm::Value *outerBegin = nullptr);
void VisitImplicitValueInitExpr(ImplicitValueInitExpr *E);
void VisitNoInitExpr(NoInitExpr *E) { } // Do nothing.
void VisitCXXDefaultArgExpr(CXXDefaultArgExpr *DAE) {
@@ -749,7 +751,9 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
llvm_unreachable("cast kind invalid for aggregate types");
}
}
@@ -1049,7 +1053,8 @@ static bool isSimpleZero(const Expr *E, CodeGenFunction &CGF) {
return true;
// (int*)0 - Null pointer expressions.
if (const CastExpr *ICE = dyn_cast<CastExpr>(E))
- return ICE->getCastKind() == CK_NullToPointer;
+ return ICE->getCastKind() == CK_NullToPointer &&
+ CGF.getTypes().isPointerZeroInitializable(E->getType());
// '\0'
if (const CharacterLiteral *CL = dyn_cast<CharacterLiteral>(E))
return CL->getValue() == 0;
@@ -1144,15 +1149,15 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
if (E->hadArrayRangeDesignator())
CGF.ErrorUnsupported(E, "GNU array range designator extension");
+ if (E->isTransparent())
+ return Visit(E->getInit(0));
+
AggValueSlot Dest = EnsureSlot(E->getType());
LValue DestLV = CGF.MakeAddrLValue(Dest.getAddress(), E->getType());
// Handle initialization of an array.
if (E->getType()->isArrayType()) {
- if (E->isStringLiteralInit())
- return Visit(E->getInit(0));
-
QualType elementType =
CGF.getContext().getAsArrayType(E->getType())->getElementType();
@@ -1161,16 +1166,6 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
return;
}
- if (E->getType()->isAtomicType()) {
- // An _Atomic(T) object can be list-initialized from an expression
- // of the same type.
- assert(E->getNumInits() == 1 &&
- CGF.getContext().hasSameUnqualifiedType(E->getInit(0)->getType(),
- E->getType()) &&
- "unexpected list initialization for atomic object");
- return Visit(E->getInit(0));
- }
-
assert(E->getType()->isRecordType() && "Only support structs/unions here!");
// Do struct initialization; this code just sets each individual member
@@ -1316,6 +1311,98 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
cleanupDominator->eraseFromParent();
}
+void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
+ llvm::Value *outerBegin) {
+ // Emit the common subexpression.
+ CodeGenFunction::OpaqueValueMapping binding(CGF, E->getCommonExpr());
+
+ Address destPtr = EnsureSlot(E->getType()).getAddress();
+ uint64_t numElements = E->getArraySize().getZExtValue();
+
+ if (!numElements)
+ return;
+
+ // destPtr is an array*. Construct an elementType* by drilling down a level.
+ llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ llvm::Value *indices[] = {zero, zero};
+ llvm::Value *begin = Builder.CreateInBoundsGEP(destPtr.getPointer(), indices,
+ "arrayinit.begin");
+
+ // Prepare to special-case multidimensional array initialization: we avoid
+ // emitting multiple destructor loops in that case.
+ if (!outerBegin)
+ outerBegin = begin;
+ ArrayInitLoopExpr *InnerLoop = dyn_cast<ArrayInitLoopExpr>(E->getSubExpr());
+
+ QualType elementType =
+ CGF.getContext().getAsArrayType(E->getType())->getElementType();
+ CharUnits elementSize = CGF.getContext().getTypeSizeInChars(elementType);
+ CharUnits elementAlign =
+ destPtr.getAlignment().alignmentOfArrayElement(elementSize);
+
+ llvm::BasicBlock *entryBB = Builder.GetInsertBlock();
+ llvm::BasicBlock *bodyBB = CGF.createBasicBlock("arrayinit.body");
+
+ // Jump into the body.
+ CGF.EmitBlock(bodyBB);
+ llvm::PHINode *index =
+ Builder.CreatePHI(zero->getType(), 2, "arrayinit.index");
+ index->addIncoming(zero, entryBB);
+ llvm::Value *element = Builder.CreateInBoundsGEP(begin, index);
+
+ // Prepare for a cleanup.
+ QualType::DestructionKind dtorKind = elementType.isDestructedType();
+ EHScopeStack::stable_iterator cleanup;
+ if (CGF.needsEHCleanup(dtorKind) && !InnerLoop) {
+ if (outerBegin->getType() != element->getType())
+ outerBegin = Builder.CreateBitCast(outerBegin, element->getType());
+ CGF.pushRegularPartialArrayCleanup(outerBegin, element, elementType,
+ elementAlign,
+ CGF.getDestroyer(dtorKind));
+ cleanup = CGF.EHStack.stable_begin();
+ } else {
+ dtorKind = QualType::DK_none;
+ }
+
+ // Emit the actual filler expression.
+ {
+ // Temporaries created in an array initialization loop are destroyed
+ // at the end of each iteration.
+ CodeGenFunction::RunCleanupsScope CleanupsScope(CGF);
+ CodeGenFunction::ArrayInitLoopExprScope Scope(CGF, index);
+ LValue elementLV =
+ CGF.MakeAddrLValue(Address(element, elementAlign), elementType);
+
+ if (InnerLoop) {
+ // If the subexpression is an ArrayInitLoopExpr, share its cleanup.
+ auto elementSlot = AggValueSlot::forLValue(
+ elementLV, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased);
+ AggExprEmitter(CGF, elementSlot, false)
+ .VisitArrayInitLoopExpr(InnerLoop, outerBegin);
+ } else
+ EmitInitializationToLValue(E->getSubExpr(), elementLV);
+ }
+
+ // Move on to the next element.
+ llvm::Value *nextIndex = Builder.CreateNUWAdd(
+ index, llvm::ConstantInt::get(CGF.SizeTy, 1), "arrayinit.next");
+ index->addIncoming(nextIndex, Builder.GetInsertBlock());
+
+ // Leave the loop if we're done.
+ llvm::Value *done = Builder.CreateICmpEQ(
+ nextIndex, llvm::ConstantInt::get(CGF.SizeTy, numElements),
+ "arrayinit.done");
+ llvm::BasicBlock *endBB = CGF.createBasicBlock("arrayinit.end");
+ Builder.CreateCondBr(done, endBB, bodyBB);
+
+ CGF.EmitBlock(endBB);
+
+ // Leave the partial-array cleanup if we entered one.
+ if (dtorKind)
+ CGF.DeactivateCleanupBlock(cleanup, index);
+}
+
void AggExprEmitter::VisitDesignatedInitUpdateExpr(DesignatedInitUpdateExpr *E) {
AggValueSlot Dest = EnsureSlot(E->getType());
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index eec2aceb88a2..71c8fb8b7ae3 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -28,25 +28,18 @@ static RequiredArgs
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE,
- CallArgList &Args) {
+ CallArgList &Args, CallArgList *RtlArgs) {
assert(CE == nullptr || isa<CXXMemberCallExpr>(CE) ||
isa<CXXOperatorCallExpr>(CE));
assert(MD->isInstance() &&
"Trying to emit a member or operator call expr on a static method!");
-
- // C++11 [class.mfct.non-static]p2:
- // If a non-static member function of a class X is called for an object that
- // is not of type X, or of a type derived from X, the behavior is undefined.
- SourceLocation CallLoc;
- if (CE)
- CallLoc = CE->getExprLoc();
- CGF.EmitTypeCheck(
- isa<CXXConstructorDecl>(MD) ? CodeGenFunction::TCK_ConstructorCall
- : CodeGenFunction::TCK_MemberCall,
- CallLoc, This, CGF.getContext().getRecordType(MD->getParent()));
+ ASTContext &C = CGF.getContext();
// Push the this ptr.
- Args.add(RValue::get(This), MD->getThisType(CGF.getContext()));
+ const CXXRecordDecl *RD =
+ CGF.CGM.getCXXABI().getThisArgumentTypeForMethod(MD);
+ Args.add(RValue::get(This),
+ RD ? C.getPointerType(C.getTypeDeclType(RD)) : C.VoidPtrTy);
// If there is an implicit parameter (e.g. VTT), emit it.
if (ImplicitParam) {
@@ -57,7 +50,12 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
// And the rest of the call args.
- if (CE) {
+ if (RtlArgs) {
+ // Special case: if the caller emitted the arguments right-to-left already
+ // (prior to emitting the *this argument), we're done. This happens for
+ // assignment operators.
+ Args.addFrom(*RtlArgs);
+ } else if (CE) {
// Special case: skip first argument of CXXOperatorCall (it is "this").
unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
CGF.EmitCallArgs(Args, FPT, drop_begin(CE->arguments(), ArgsToSkip),
@@ -71,26 +69,78 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
}
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
- const CXXMethodDecl *MD, llvm::Value *Callee, ReturnValueSlot ReturnValue,
+ const CXXMethodDecl *MD, const CGCallee &Callee,
+ ReturnValueSlot ReturnValue,
llvm::Value *This, llvm::Value *ImplicitParam, QualType ImplicitParamTy,
- const CallExpr *CE) {
+ const CallExpr *CE, CallArgList *RtlArgs) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
CallArgList Args;
RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
- *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args);
- return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
- Callee, ReturnValue, Args, MD);
+ *this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
+ auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required);
+ return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
RValue CodeGenFunction::EmitCXXDestructorCall(
- const CXXDestructorDecl *DD, llvm::Value *Callee, llvm::Value *This,
+ const CXXDestructorDecl *DD, const CGCallee &Callee, llvm::Value *This,
llvm::Value *ImplicitParam, QualType ImplicitParamTy, const CallExpr *CE,
StructorType Type) {
CallArgList Args;
commonEmitCXXMemberOrOperatorCall(*this, DD, This, ImplicitParam,
- ImplicitParamTy, CE, Args);
+ ImplicitParamTy, CE, Args, nullptr);
return EmitCall(CGM.getTypes().arrangeCXXStructorDeclaration(DD, Type),
- Callee, ReturnValueSlot(), Args, DD);
+ Callee, ReturnValueSlot(), Args);
+}
+
+RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
+ const CXXPseudoDestructorExpr *E) {
+ QualType DestroyedType = E->getDestroyedType();
+ if (DestroyedType.hasStrongOrWeakObjCLifetime()) {
+ // Automatic Reference Counting:
+ // If the pseudo-expression names a retainable object with weak or
+ // strong lifetime, the object shall be released.
+ Expr *BaseExpr = E->getBase();
+ Address BaseValue = Address::invalid();
+ Qualifiers BaseQuals;
+
+ // If this is s.x, emit s as an lvalue. If it is s->x, emit s as a scalar.
+ if (E->isArrow()) {
+ BaseValue = EmitPointerWithAlignment(BaseExpr);
+ const PointerType *PTy = BaseExpr->getType()->getAs<PointerType>();
+ BaseQuals = PTy->getPointeeType().getQualifiers();
+ } else {
+ LValue BaseLV = EmitLValue(BaseExpr);
+ BaseValue = BaseLV.getAddress();
+ QualType BaseTy = BaseExpr->getType();
+ BaseQuals = BaseTy.getQualifiers();
+ }
+
+ switch (DestroyedType.getObjCLifetime()) {
+ case Qualifiers::OCL_None:
+ case Qualifiers::OCL_ExplicitNone:
+ case Qualifiers::OCL_Autoreleasing:
+ break;
+
+ case Qualifiers::OCL_Strong:
+ EmitARCRelease(Builder.CreateLoad(BaseValue,
+ DestroyedType.isVolatileQualified()),
+ ARCPreciseLifetime);
+ break;
+
+ case Qualifiers::OCL_Weak:
+ EmitARCDestroyWeak(BaseValue);
+ break;
+ }
+ } else {
+ // C++ [expr.pseudo]p1:
+ // The result shall only be used as the operand for the function call
+ // operator (), and the result of such a call has type void. The only
+ // effect is the evaluation of the postfix-expression before the dot or
+ // arrow.
+ EmitIgnoredExpr(E->getBase());
+ }
+
+ return RValue::get(nullptr);
}
static CXXRecordDecl *getCXXRecord(const Expr *E) {
@@ -115,8 +165,8 @@ RValue CodeGenFunction::EmitCXXMemberCallExpr(const CXXMemberCallExpr *CE,
if (MD->isStatic()) {
// The method is static, emit it as we would a regular call.
- llvm::Value *Callee = CGM.GetAddrOfFunction(MD);
- return EmitCall(getContext().getPointerType(MD->getType()), Callee, CE,
+ CGCallee callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD), MD);
+ return EmitCall(getContext().getPointerType(MD->getType()), callee, CE,
ReturnValue);
}
@@ -166,6 +216,19 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
}
}
+ // C++17 demands that we evaluate the RHS of a (possibly-compound) assignment
+ // operator before the LHS.
+ CallArgList RtlArgStorage;
+ CallArgList *RtlArgs = nullptr;
+ if (auto *OCE = dyn_cast<CXXOperatorCallExpr>(CE)) {
+ if (OCE->isAssignmentOp()) {
+ RtlArgs = &RtlArgStorage;
+ EmitCallArgs(*RtlArgs, MD->getType()->castAs<FunctionProtoType>(),
+ drop_begin(CE->arguments(), 1), CE->getDirectCallee(),
+ /*ParamsToSkip*/0, EvaluationOrder::ForceRightToLeft);
+ }
+ }
+
Address This = Address::invalid();
if (IsArrow)
This = EmitPointerWithAlignment(Base);
@@ -183,10 +246,12 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (MD->isCopyAssignmentOperator() || MD->isMoveAssignmentOperator()) {
// We don't like to generate the trivial copy/move assignment operator
// when it isn't necessary; just produce the proper effect here.
- // Special case: skip first argument of CXXOperatorCall (it is "this").
- unsigned ArgsToSkip = isa<CXXOperatorCallExpr>(CE) ? 1 : 0;
- Address RHS = EmitLValue(*(CE->arg_begin() + ArgsToSkip)).getAddress();
- EmitAggregateAssign(This, RHS, CE->getType());
+ LValue RHS = isa<CXXOperatorCallExpr>(CE)
+ ? MakeNaturalAlignAddrLValue(
+ (*RtlArgs)[0].RV.getScalarVal(),
+ (*(CE->arg_begin() + 1))->getType())
+ : EmitLValue(*CE->arg_begin());
+ EmitAggregateAssign(This, RHS.getAddress(), CE->getType());
return RValue::get(This.getPointer());
}
@@ -217,6 +282,22 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
llvm::FunctionType *Ty = CGM.getTypes().GetFunctionType(*FInfo);
+ // C++11 [class.mfct.non-static]p2:
+ // If a non-static member function of a class X is called for an object that
+ // is not of type X, or of a type derived from X, the behavior is undefined.
+ SourceLocation CallLoc;
+ ASTContext &C = getContext();
+ if (CE)
+ CallLoc = CE->getExprLoc();
+
+ EmitTypeCheck(isa<CXXConstructorDecl>(CalleeDecl)
+ ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()));
+
+ // FIXME: Uses of 'MD' past this point need to be audited. We may need to use
+ // 'CalleeDecl' instead.
+
// C++ [class.virtual]p12:
// Explicit qualification with the scope operator (5.1) suppresses the
// virtual call mechanism.
@@ -224,8 +305,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
// We also don't emit a virtual call if the base expression has a record type
// because then we know what the type is.
bool UseVirtualCall = CanUseVirtualCall && !DevirtualizedMethod;
- llvm::Value *Callee;
-
+
if (const CXXDestructorDecl *Dtor = dyn_cast<CXXDestructorDecl>(MD)) {
assert(CE->arg_begin() == CE->arg_end() &&
"Destructor shouldn't have explicit parameters");
@@ -234,24 +314,32 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGM.getCXXABI().EmitVirtualDestructorCall(
*this, Dtor, Dtor_Complete, This, cast<CXXMemberCallExpr>(CE));
} else {
+ CGCallee Callee;
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee =
- CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty);
+ Callee = CGCallee::forDirect(
+ CGM.getAddrOfCXXStructor(Dtor, StructorType::Complete, FInfo, Ty),
+ Dtor);
else {
const CXXDestructorDecl *DDtor =
cast<CXXDestructorDecl>(DevirtualizedMethod);
- Callee = CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(GlobalDecl(DDtor, Dtor_Complete), Ty),
+ DDtor);
}
- EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ EmitCXXMemberOrOperatorCall(
+ CalleeDecl, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE, nullptr);
}
return RValue::get(nullptr);
}
+ CGCallee Callee;
if (const CXXConstructorDecl *Ctor = dyn_cast<CXXConstructorDecl>(MD)) {
- Callee = CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(GlobalDecl(Ctor, Ctor_Complete), Ty),
+ Ctor);
} else if (UseVirtualCall) {
Callee = CGM.getCXXABI().getVirtualFunctionPointer(*this, MD, This, Ty,
CE->getLocStart());
@@ -266,9 +354,11 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (getLangOpts().AppleKext && MD->isVirtual() && HasQualifier)
Callee = BuildAppleKextVirtualCall(MD, Qualifier, Ty);
else if (!DevirtualizedMethod)
- Callee = CGM.GetAddrOfFunction(MD, Ty);
+ Callee = CGCallee::forDirect(CGM.GetAddrOfFunction(MD, Ty), MD);
else {
- Callee = CGM.GetAddrOfFunction(DevirtualizedMethod, Ty);
+ Callee = CGCallee::forDirect(
+ CGM.GetAddrOfFunction(DevirtualizedMethod, Ty),
+ DevirtualizedMethod);
}
}
@@ -277,8 +367,9 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
*this, CalleeDecl, This, UseVirtualCall);
}
- return EmitCXXMemberOrOperatorCall(MD, Callee, ReturnValue, This.getPointer(),
- /*ImplicitParam=*/nullptr, QualType(), CE);
+ return EmitCXXMemberOrOperatorCall(
+ CalleeDecl, Callee, ReturnValue, This.getPointer(),
+ /*ImplicitParam=*/nullptr, QualType(), CE, RtlArgs);
}
RValue
@@ -297,9 +388,6 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
const CXXRecordDecl *RD =
cast<CXXRecordDecl>(MPT->getClass()->getAs<RecordType>()->getDecl());
- // Get the member function pointer.
- llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
-
// Emit the 'this' pointer.
Address This = Address::invalid();
if (BO->getOpcode() == BO_PtrMemI)
@@ -310,9 +398,12 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.getPointer(),
QualType(MPT->getClass(), 0));
+ // Get the member function pointer.
+ llvm::Value *MemFnPtr = EmitScalarExpr(MemFnExpr);
+
// Ask the ABI to load the callee. Note that This is modified.
llvm::Value *ThisPtrForCall = nullptr;
- llvm::Value *Callee =
+ CGCallee Callee =
CGM.getCXXABI().EmitLoadOfMemberFunctionPointer(*this, BO, This,
ThisPtrForCall, MemFnPtr, MPT);
@@ -851,8 +942,68 @@ void CodeGenFunction::EmitNewArrayInitializer(
CharUnits ElementAlign =
BeginPtr.getAlignment().alignmentOfArrayElement(ElementSize);
+ // Attempt to perform zero-initialization using memset.
+ auto TryMemsetInitialization = [&]() -> bool {
+ // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
+ // we can initialize with a memset to -1.
+ if (!CGM.getTypes().isZeroInitializable(ElementType))
+ return false;
+
+ // Optimization: since zero initialization will just set the memory
+ // to all zeroes, generate a single memset to do it in one shot.
+
+ // Subtract out the size of any elements we've already initialized.
+ auto *RemainingSize = AllocSizeWithoutCookie;
+ if (InitListElements) {
+ // We know this can't overflow; we check this when doing the allocation.
+ auto *InitializedSize = llvm::ConstantInt::get(
+ RemainingSize->getType(),
+ getContext().getTypeSizeInChars(ElementType).getQuantity() *
+ InitListElements);
+ RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
+ }
+
+ // Create the memset.
+ Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
+ return true;
+ };
+
// If the initializer is an initializer list, first do the explicit elements.
if (const InitListExpr *ILE = dyn_cast<InitListExpr>(Init)) {
+ // Initializing from a (braced) string literal is a special case; the init
+ // list element does not initialize a (single) array element.
+ if (ILE->isStringLiteralInit()) {
+ // Initialize the initial portion of length equal to that of the string
+ // literal. The allocation must be for at least this much; we emitted a
+ // check for that earlier.
+ AggValueSlot Slot =
+ AggValueSlot::forAddr(CurPtr, ElementType.getQualifiers(),
+ AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased);
+ EmitAggExpr(ILE->getInit(0), Slot);
+
+ // Move past these elements.
+ InitListElements =
+ cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
+ ->getSize().getZExtValue();
+ CurPtr =
+ Address(Builder.CreateInBoundsGEP(CurPtr.getPointer(),
+ Builder.getSize(InitListElements),
+ "string.init.end"),
+ CurPtr.getAlignment().alignmentAtOffset(InitListElements *
+ ElementSize));
+
+ // Zero out the rest, if any remain.
+ llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
+ if (!ConstNum || !ConstNum->equalsInt(InitListElements)) {
+ bool OK = TryMemsetInitialization();
+ (void)OK;
+ assert(OK && "couldn't memset character type?");
+ }
+ return;
+ }
+
InitListElements = ILE->getNumInits();
// If this is a multi-dimensional array new, we will initialize multiple
@@ -919,32 +1070,6 @@ void CodeGenFunction::EmitNewArrayInitializer(
CurPtr = Builder.CreateBitCast(CurPtr, BeginPtr.getType());
}
- // Attempt to perform zero-initialization using memset.
- auto TryMemsetInitialization = [&]() -> bool {
- // FIXME: If the type is a pointer-to-data-member under the Itanium ABI,
- // we can initialize with a memset to -1.
- if (!CGM.getTypes().isZeroInitializable(ElementType))
- return false;
-
- // Optimization: since zero initialization will just set the memory
- // to all zeroes, generate a single memset to do it in one shot.
-
- // Subtract out the size of any elements we've already initialized.
- auto *RemainingSize = AllocSizeWithoutCookie;
- if (InitListElements) {
- // We know this can't overflow; we check this when doing the allocation.
- auto *InitializedSize = llvm::ConstantInt::get(
- RemainingSize->getType(),
- getContext().getTypeSizeInChars(ElementType).getQuantity() *
- InitListElements);
- RemainingSize = Builder.CreateSub(RemainingSize, InitializedSize);
- }
-
- // Create the memset.
- Builder.CreateMemSet(CurPtr, Builder.getInt8(0), RemainingSize, false);
- return true;
- };
-
// If all elements have already been initialized, skip any further
// initialization.
llvm::ConstantInt *ConstNum = dyn_cast<llvm::ConstantInt>(NumElements);
@@ -1110,23 +1235,24 @@ static void EmitNewInitializer(CodeGenFunction &CGF, const CXXNewExpr *E,
/// Emit a call to an operator new or operator delete function, as implicitly
/// created by new-expressions and delete-expressions.
static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
- const FunctionDecl *Callee,
+ const FunctionDecl *CalleeDecl,
const FunctionProtoType *CalleeType,
const CallArgList &Args) {
llvm::Instruction *CallOrInvoke;
- llvm::Value *CalleeAddr = CGF.CGM.GetAddrOfFunction(Callee);
+ llvm::Constant *CalleePtr = CGF.CGM.GetAddrOfFunction(CalleeDecl);
+ CGCallee Callee = CGCallee::forDirect(CalleePtr, CalleeDecl);
RValue RV =
CGF.EmitCall(CGF.CGM.getTypes().arrangeFreeFunctionCall(
Args, CalleeType, /*chainCall=*/false),
- CalleeAddr, ReturnValueSlot(), Args, Callee, &CallOrInvoke);
+ Callee, ReturnValueSlot(), Args, &CallOrInvoke);
/// C++1y [expr.new]p10:
/// [In a new-expression,] an implementation is allowed to omit a call
/// to a replaceable global allocation function.
///
/// We model such elidable calls with the 'builtin' attribute.
- llvm::Function *Fn = dyn_cast<llvm::Function>(CalleeAddr);
- if (Callee->isReplaceableGlobalAllocationFunction() &&
+ llvm::Function *Fn = dyn_cast<llvm::Function>(CalleePtr);
+ if (CalleeDecl->isReplaceableGlobalAllocationFunction() &&
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
// FIXME: Add addAttribute to CallSite.
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
@@ -1159,111 +1285,116 @@ RValue CodeGenFunction::EmitBuiltinNewDeleteCall(const FunctionProtoType *Type,
llvm_unreachable("predeclared global operator new/delete is missing");
}
-namespace {
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression.
- class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
- const FunctionDecl *OperatorDelete;
- llvm::Value *Ptr;
- llvm::Value *AllocSize;
-
- RValue *getPlacementArgs() { return reinterpret_cast<RValue*>(this+1); }
+static std::pair<bool, bool>
+shouldPassSizeAndAlignToUsualDelete(const FunctionProtoType *FPT) {
+ auto AI = FPT->param_type_begin(), AE = FPT->param_type_end();
- public:
- static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(RValue);
- }
+ // The first argument is always a void*.
+ ++AI;
- CallDeleteDuringNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- llvm::Value *Ptr,
- llvm::Value *AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
-
- void setPlacementArg(unsigned I, RValue Arg) {
- assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
- }
-
- void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
- (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
+ // Figure out what other parameters we should be implicitly passing.
+ bool PassSize = false;
+ bool PassAlignment = false;
- CallArgList DeleteArgs;
-
- // The first argument is always a void*.
- FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
- DeleteArgs.add(RValue::get(Ptr), *AI++);
-
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumParams() == NumPlacementArgs + 2)
- DeleteArgs.add(RValue::get(AllocSize), *AI++);
+ if (AI != AE && (*AI)->isIntegerType()) {
+ PassSize = true;
+ ++AI;
+ }
- // Pass the rest of the arguments, which must match exactly.
- for (unsigned I = 0; I != NumPlacementArgs; ++I)
- DeleteArgs.add(getPlacementArgs()[I], *AI++);
+ if (AI != AE && (*AI)->isAlignValT()) {
+ PassAlignment = true;
+ ++AI;
+ }
- // Call 'operator delete'.
- EmitNewDeleteCall(CGF, OperatorDelete, FPT, DeleteArgs);
- }
- };
+ assert(AI == AE && "unexpected usual deallocation function parameter");
+ return {PassSize, PassAlignment};
+}
- /// A cleanup to call the given 'operator delete' function upon
- /// abnormal exit from a new expression when the new expression is
- /// conditional.
- class CallDeleteDuringConditionalNew final : public EHScopeStack::Cleanup {
- size_t NumPlacementArgs;
+namespace {
+ /// A cleanup to call the given 'operator delete' function upon abnormal
+ /// exit from a new expression. Templated on a traits type that deals with
+ /// ensuring that the arguments dominate the cleanup if necessary.
+ template<typename Traits>
+ class CallDeleteDuringNew final : public EHScopeStack::Cleanup {
+ /// Type used to hold llvm::Value*s.
+ typedef typename Traits::ValueTy ValueTy;
+ /// Type used to hold RValues.
+ typedef typename Traits::RValueTy RValueTy;
+ struct PlacementArg {
+ RValueTy ArgValue;
+ QualType ArgType;
+ };
+
+ unsigned NumPlacementArgs : 31;
+ unsigned PassAlignmentToPlacementDelete : 1;
const FunctionDecl *OperatorDelete;
- DominatingValue<RValue>::saved_type Ptr;
- DominatingValue<RValue>::saved_type AllocSize;
+ ValueTy Ptr;
+ ValueTy AllocSize;
+ CharUnits AllocAlign;
- DominatingValue<RValue>::saved_type *getPlacementArgs() {
- return reinterpret_cast<DominatingValue<RValue>::saved_type*>(this+1);
+ PlacementArg *getPlacementArgs() {
+ return reinterpret_cast<PlacementArg *>(this + 1);
}
public:
static size_t getExtraSize(size_t NumPlacementArgs) {
- return NumPlacementArgs * sizeof(DominatingValue<RValue>::saved_type);
+ return NumPlacementArgs * sizeof(PlacementArg);
}
- CallDeleteDuringConditionalNew(size_t NumPlacementArgs,
- const FunctionDecl *OperatorDelete,
- DominatingValue<RValue>::saved_type Ptr,
- DominatingValue<RValue>::saved_type AllocSize)
- : NumPlacementArgs(NumPlacementArgs), OperatorDelete(OperatorDelete),
- Ptr(Ptr), AllocSize(AllocSize) {}
-
- void setPlacementArg(unsigned I, DominatingValue<RValue>::saved_type Arg) {
+ CallDeleteDuringNew(size_t NumPlacementArgs,
+ const FunctionDecl *OperatorDelete, ValueTy Ptr,
+ ValueTy AllocSize, bool PassAlignmentToPlacementDelete,
+ CharUnits AllocAlign)
+ : NumPlacementArgs(NumPlacementArgs),
+ PassAlignmentToPlacementDelete(PassAlignmentToPlacementDelete),
+ OperatorDelete(OperatorDelete), Ptr(Ptr), AllocSize(AllocSize),
+ AllocAlign(AllocAlign) {}
+
+ void setPlacementArg(unsigned I, RValueTy Arg, QualType Type) {
assert(I < NumPlacementArgs && "index out of range");
- getPlacementArgs()[I] = Arg;
+ getPlacementArgs()[I] = {Arg, Type};
}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *FPT
- = OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(FPT->getNumParams() == NumPlacementArgs + 1 ||
- (FPT->getNumParams() == 2 && NumPlacementArgs == 0));
-
+ const FunctionProtoType *FPT =
+ OperatorDelete->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
// The first argument is always a void*.
- FunctionProtoType::param_type_iterator AI = FPT->param_type_begin();
- DeleteArgs.add(Ptr.restore(CGF), *AI++);
-
- // A member 'operator delete' can take an extra 'size_t' argument.
- if (FPT->getNumParams() == NumPlacementArgs + 2) {
- RValue RV = AllocSize.restore(CGF);
- DeleteArgs.add(RV, *AI++);
+ DeleteArgs.add(Traits::get(CGF, Ptr), FPT->getParamType(0));
+
+ // Figure out what other parameters we should be implicitly passing.
+ bool PassSize = false;
+ bool PassAlignment = false;
+ if (NumPlacementArgs) {
+ // A placement deallocation function is implicitly passed an alignment
+ // if the placement allocation function was, but is never passed a size.
+ PassAlignment = PassAlignmentToPlacementDelete;
+ } else {
+ // For a non-placement new-expression, 'operator delete' can take a
+ // size and/or an alignment if it has the right parameters.
+ std::tie(PassSize, PassAlignment) =
+ shouldPassSizeAndAlignToUsualDelete(FPT);
}
+ // The second argument can be a std::size_t (for non-placement delete).
+ if (PassSize)
+ DeleteArgs.add(Traits::get(CGF, AllocSize),
+ CGF.getContext().getSizeType());
+
+ // The next (second or third) argument can be a std::align_val_t, which
+ // is an enum whose underlying type is std::size_t.
+ // FIXME: Use the right type as the parameter type. Note that in a call
+ // to operator delete(size_t, ...), we may not have it available.
+ if (PassAlignment)
+ DeleteArgs.add(RValue::get(llvm::ConstantInt::get(
+ CGF.SizeTy, AllocAlign.getQuantity())),
+ CGF.getContext().getSizeType());
+
// Pass the rest of the arguments, which must match exactly.
for (unsigned I = 0; I != NumPlacementArgs; ++I) {
- RValue RV = getPlacementArgs()[I].restore(CGF);
- DeleteArgs.add(RV, *AI++);
+ auto Arg = getPlacementArgs()[I];
+ DeleteArgs.add(Traits::get(CGF, Arg.ArgValue), Arg.ArgType);
}
// Call 'operator delete'.
@@ -1278,18 +1409,34 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
const CXXNewExpr *E,
Address NewPtr,
llvm::Value *AllocSize,
+ CharUnits AllocAlign,
const CallArgList &NewArgs) {
+ unsigned NumNonPlacementArgs = E->passAlignment() ? 2 : 1;
+
// If we're not inside a conditional branch, then the cleanup will
// dominate and we can do the easier (and more efficient) thing.
if (!CGF.isInConditionalBranch()) {
- CallDeleteDuringNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- NewPtr.getPointer(),
- AllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I, NewArgs[I+1].RV);
+ struct DirectCleanupTraits {
+ typedef llvm::Value *ValueTy;
+ typedef RValue RValueTy;
+ static RValue get(CodeGenFunction &, ValueTy V) { return RValue::get(V); }
+ static RValue get(CodeGenFunction &, RValueTy V) { return V; }
+ };
+
+ typedef CallDeleteDuringNew<DirectCleanupTraits> DirectCleanup;
+
+ DirectCleanup *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<DirectCleanup>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ NewPtr.getPointer(),
+ AllocSize,
+ E->passAlignment(),
+ AllocAlign);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ auto &Arg = NewArgs[I + NumNonPlacementArgs];
+ Cleanup->setPlacementArg(I, Arg.RV, Arg.Ty);
+ }
return;
}
@@ -1300,15 +1447,28 @@ static void EnterNewDeleteCleanup(CodeGenFunction &CGF,
DominatingValue<RValue>::saved_type SavedAllocSize =
DominatingValue<RValue>::save(CGF, RValue::get(AllocSize));
- CallDeleteDuringConditionalNew *Cleanup = CGF.EHStack
- .pushCleanupWithExtra<CallDeleteDuringConditionalNew>(EHCleanup,
- E->getNumPlacementArgs(),
- E->getOperatorDelete(),
- SavedNewPtr,
- SavedAllocSize);
- for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I)
- Cleanup->setPlacementArg(I,
- DominatingValue<RValue>::save(CGF, NewArgs[I+1].RV));
+ struct ConditionalCleanupTraits {
+ typedef DominatingValue<RValue>::saved_type ValueTy;
+ typedef DominatingValue<RValue>::saved_type RValueTy;
+ static RValue get(CodeGenFunction &CGF, ValueTy V) {
+ return V.restore(CGF);
+ }
+ };
+ typedef CallDeleteDuringNew<ConditionalCleanupTraits> ConditionalCleanup;
+
+ ConditionalCleanup *Cleanup = CGF.EHStack
+ .pushCleanupWithExtra<ConditionalCleanup>(EHCleanup,
+ E->getNumPlacementArgs(),
+ E->getOperatorDelete(),
+ SavedNewPtr,
+ SavedAllocSize,
+ E->passAlignment(),
+ AllocAlign);
+ for (unsigned I = 0, N = E->getNumPlacementArgs(); I != N; ++I) {
+ auto &Arg = NewArgs[I + NumNonPlacementArgs];
+ Cleanup->setPlacementArg(I, DominatingValue<RValue>::save(CGF, Arg.RV),
+ Arg.Ty);
+ }
CGF.initFullExprCleanup();
}
@@ -1323,7 +1483,12 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// If there is a brace-initializer, cannot allocate fewer elements than inits.
unsigned minElements = 0;
if (E->isArray() && E->hasInitializer()) {
- if (const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer()))
+ const InitListExpr *ILE = dyn_cast<InitListExpr>(E->getInitializer());
+ if (ILE && ILE->isStringLiteralInit())
+ minElements =
+ cast<ConstantArrayType>(ILE->getType()->getAsArrayTypeUnsafe())
+ ->getSize().getZExtValue();
+ else if (ILE)
minElements = ILE->getNumInits();
}
@@ -1332,6 +1497,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Value *allocSize =
EmitCXXNewAllocSize(*this, E, minElements, numElements,
allocSizeWithoutCookie);
+ CharUnits allocAlign = getContext().getTypeAlignInChars(allocType);
// Emit the allocation call. If the allocator is a global placement
// operator, just "inline" it directly.
@@ -1347,10 +1513,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// The pointer expression will, in many cases, be an opaque void*.
// In these cases, discard the computed alignment and use the
// formal alignment of the allocated type.
- if (alignSource != AlignmentSource::Decl) {
- allocation = Address(allocation.getPointer(),
- getContext().getTypeAlignInChars(allocType));
- }
+ if (alignSource != AlignmentSource::Decl)
+ allocation = Address(allocation.getPointer(), allocAlign);
// Set up allocatorArgs for the call to operator delete if it's not
// the reserved global operator.
@@ -1363,28 +1527,55 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
} else {
const FunctionProtoType *allocatorType =
allocator->getType()->castAs<FunctionProtoType>();
+ unsigned ParamsToSkip = 0;
// The allocation size is the first argument.
QualType sizeType = getContext().getSizeType();
allocatorArgs.add(RValue::get(allocSize), sizeType);
+ ++ParamsToSkip;
+
+ if (allocSize != allocSizeWithoutCookie) {
+ CharUnits cookieAlign = getSizeAlign(); // FIXME: Ask the ABI.
+ allocAlign = std::max(allocAlign, cookieAlign);
+ }
+
+ // The allocation alignment may be passed as the second argument.
+ if (E->passAlignment()) {
+ QualType AlignValT = sizeType;
+ if (allocatorType->getNumParams() > 1) {
+ AlignValT = allocatorType->getParamType(1);
+ assert(getContext().hasSameUnqualifiedType(
+ AlignValT->castAs<EnumType>()->getDecl()->getIntegerType(),
+ sizeType) &&
+ "wrong type for alignment parameter");
+ ++ParamsToSkip;
+ } else {
+ // Corner case, passing alignment to 'operator new(size_t, ...)'.
+ assert(allocator->isVariadic() && "can't pass alignment to allocator");
+ }
+ allocatorArgs.add(
+ RValue::get(llvm::ConstantInt::get(SizeTy, allocAlign.getQuantity())),
+ AlignValT);
+ }
- // We start at 1 here because the first argument (the allocation size)
- // has already been emitted.
+ // FIXME: Why do we not pass a CalleeDecl here?
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
- /* CalleeDecl */ nullptr,
- /*ParamsToSkip*/ 1);
+ /*CalleeDecl*/nullptr, /*ParamsToSkip*/ParamsToSkip);
RValue RV =
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
- // For now, only assume that the allocation function returns
- // something satisfactorily aligned for the element type, plus
- // the cookie if we have one.
- CharUnits allocationAlign =
- getContext().getTypeAlignInChars(allocType);
- if (allocSize != allocSizeWithoutCookie) {
- CharUnits cookieAlign = getSizeAlign(); // FIXME?
- allocationAlign = std::max(allocationAlign, cookieAlign);
+ // If this was a call to a global replaceable allocation function that does
+ // not take an alignment argument, the allocator is known to produce
+ // storage that's suitably aligned for any object that fits, up to a known
+ // threshold. Otherwise assume it's suitably aligned for the allocated type.
+ CharUnits allocationAlign = allocAlign;
+ if (!E->passAlignment() &&
+ allocator->isReplaceableGlobalAllocationFunction()) {
+ unsigned AllocatorAlign = llvm::PowerOf2Floor(std::min<uint64_t>(
+ Target.getNewAlign(), getContext().getTypeSize(allocType)));
+ allocationAlign = std::max(
+ allocationAlign, getContext().toCharUnitsFromBits(AllocatorAlign));
}
allocation = Address(RV.getScalarVal(), allocationAlign);
@@ -1423,7 +1614,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
llvm::Instruction *cleanupDominator = nullptr;
if (E->getOperatorDelete() &&
!E->getOperatorDelete()->isReservedGlobalPlacementOperator()) {
- EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocatorArgs);
+ EnterNewDeleteCleanup(*this, E, allocation, allocSize, allocAlign,
+ allocatorArgs);
operatorDeleteCleanup = EHStack.stable_begin();
cleanupDominator = Builder.CreateUnreachable();
}
@@ -1485,31 +1677,58 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
}
void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
- llvm::Value *Ptr,
- QualType DeleteTy) {
- assert(DeleteFD->getOverloadedOperator() == OO_Delete);
+ llvm::Value *Ptr, QualType DeleteTy,
+ llvm::Value *NumElements,
+ CharUnits CookieSize) {
+ assert((!NumElements && CookieSize.isZero()) ||
+ DeleteFD->getOverloadedOperator() == OO_Array_Delete);
const FunctionProtoType *DeleteFTy =
DeleteFD->getType()->getAs<FunctionProtoType>();
CallArgList DeleteArgs;
- // Check if we need to pass the size to the delete operator.
- llvm::Value *Size = nullptr;
- QualType SizeTy;
- if (DeleteFTy->getNumParams() == 2) {
- SizeTy = DeleteFTy->getParamType(1);
- CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
- Size = llvm::ConstantInt::get(ConvertType(SizeTy),
- DeleteTypeSize.getQuantity());
- }
+ std::pair<bool, bool> PassSizeAndAlign =
+ shouldPassSizeAndAlignToUsualDelete(DeleteFTy);
+
+ auto ParamTypeIt = DeleteFTy->param_type_begin();
- QualType ArgTy = DeleteFTy->getParamType(0);
+ // Pass the pointer itself.
+ QualType ArgTy = *ParamTypeIt++;
llvm::Value *DeletePtr = Builder.CreateBitCast(Ptr, ConvertType(ArgTy));
DeleteArgs.add(RValue::get(DeletePtr), ArgTy);
- if (Size)
- DeleteArgs.add(RValue::get(Size), SizeTy);
+ // Pass the size if the delete function has a size_t parameter.
+ if (PassSizeAndAlign.first) {
+ QualType SizeType = *ParamTypeIt++;
+ CharUnits DeleteTypeSize = getContext().getTypeSizeInChars(DeleteTy);
+ llvm::Value *Size = llvm::ConstantInt::get(ConvertType(SizeType),
+ DeleteTypeSize.getQuantity());
+
+ // For array new, multiply by the number of elements.
+ if (NumElements)
+ Size = Builder.CreateMul(Size, NumElements);
+
+ // If there is a cookie, add the cookie size.
+ if (!CookieSize.isZero())
+ Size = Builder.CreateAdd(
+ Size, llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity()));
+
+ DeleteArgs.add(RValue::get(Size), SizeType);
+ }
+
+ // Pass the alignment if the delete function has an align_val_t parameter.
+ if (PassSizeAndAlign.second) {
+ QualType AlignValType = *ParamTypeIt++;
+ CharUnits DeleteTypeAlign = getContext().toCharUnitsFromBits(
+ getContext().getTypeAlignIfKnown(DeleteTy));
+ llvm::Value *Align = llvm::ConstantInt::get(ConvertType(AlignValType),
+ DeleteTypeAlign.getQuantity());
+ DeleteArgs.add(RValue::get(Align), AlignValType);
+ }
+
+ assert(ParamTypeIt == DeleteFTy->param_type_end() &&
+ "unknown parameter to usual delete function");
// Emit the call to delete.
EmitNewDeleteCall(*this, DeleteFD, DeleteFTy, DeleteArgs);
@@ -1546,6 +1765,15 @@ static void EmitObjectDelete(CodeGenFunction &CGF,
const CXXDeleteExpr *DE,
Address Ptr,
QualType ElementType) {
+ // C++11 [expr.delete]p3:
+ // If the static type of the object to be deleted is different from its
+ // dynamic type, the static type shall be a base class of the dynamic type
+ // of the object to be deleted and the static type shall have a virtual
+ // destructor or the behavior is undefined.
+ CGF.EmitTypeCheck(CodeGenFunction::TCK_MemberCall,
+ DE->getExprLoc(), Ptr.getPointer(),
+ ElementType);
+
// Find the destructor for the type, if applicable. If the
// destructor is virtual, we'll just emit the vcall and return.
const CXXDestructorDecl *Dtor = nullptr;
@@ -1613,45 +1841,8 @@ namespace {
ElementType(ElementType), CookieSize(CookieSize) {}
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const FunctionProtoType *DeleteFTy =
- OperatorDelete->getType()->getAs<FunctionProtoType>();
- assert(DeleteFTy->getNumParams() == 1 || DeleteFTy->getNumParams() == 2);
-
- CallArgList Args;
-
- // Pass the pointer as the first argument.
- QualType VoidPtrTy = DeleteFTy->getParamType(0);
- llvm::Value *DeletePtr
- = CGF.Builder.CreateBitCast(Ptr, CGF.ConvertType(VoidPtrTy));
- Args.add(RValue::get(DeletePtr), VoidPtrTy);
-
- // Pass the original requested size as the second argument.
- if (DeleteFTy->getNumParams() == 2) {
- QualType size_t = DeleteFTy->getParamType(1);
- llvm::IntegerType *SizeTy
- = cast<llvm::IntegerType>(CGF.ConvertType(size_t));
-
- CharUnits ElementTypeSize =
- CGF.CGM.getContext().getTypeSizeInChars(ElementType);
-
- // The size of an element, multiplied by the number of elements.
- llvm::Value *Size
- = llvm::ConstantInt::get(SizeTy, ElementTypeSize.getQuantity());
- if (NumElements)
- Size = CGF.Builder.CreateMul(Size, NumElements);
-
- // Plus the size of the cookie if applicable.
- if (!CookieSize.isZero()) {
- llvm::Value *CookieSizeV
- = llvm::ConstantInt::get(SizeTy, CookieSize.getQuantity());
- Size = CGF.Builder.CreateAdd(Size, CookieSizeV);
- }
-
- Args.add(RValue::get(Size), size_t);
- }
-
- // Emit the call to delete.
- EmitNewDeleteCall(CGF, OperatorDelete, DeleteFTy, Args);
+ CGF.EmitDeleteCall(OperatorDelete, Ptr, ElementType, NumElements,
+ CookieSize);
}
};
}
@@ -1949,10 +2140,7 @@ void CodeGenFunction::EmitLambdaExpr(const LambdaExpr *E, AggValueSlot Slot) {
auto VAT = CurField->getCapturedVLAType();
EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
} else {
- ArrayRef<VarDecl *> ArrayIndexes;
- if (CurField->getType()->isArrayType())
- ArrayIndexes = E->getCaptureInitIndexVars(i);
- EmitInitializerForField(*CurField, LV, *i, ArrayIndexes);
+ EmitInitializerForField(*CurField, LV, *i);
}
}
}
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 22910d931ded..59bc9cdbc056 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -13,12 +13,9 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/SmallString.h"
#include "llvm/IR/Constants.h"
-#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/Metadata.h"
@@ -483,7 +480,9 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_CopyAndAutoreleaseBlockObject:
case CK_BuiltinFnToFnPtr:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
case CK_AddressSpaceConversion:
+ case CK_IntToOCLSampler:
llvm_unreachable("invalid cast kind for complex value");
case CK_FloatingRealToComplex:
@@ -600,10 +599,10 @@ ComplexPairTy ComplexExprEmitter::EmitComplexBinOpLibCall(StringRef LibCallName,
llvm::FunctionType *FTy = CGF.CGM.getTypes().GetFunctionType(FuncInfo);
llvm::Constant *Func = CGF.CGM.CreateBuiltinFunction(FTy, LibCallName);
- llvm::Instruction *Call;
+ CGCallee Callee = CGCallee::forDirect(Func, FQTy->getAs<FunctionProtoType>());
- RValue Res = CGF.EmitCall(FuncInfo, Func, ReturnValueSlot(), Args,
- FQTy->getAs<FunctionProtoType>(), &Call);
+ llvm::Instruction *Call;
+ RValue Res = CGF.EmitCall(FuncInfo, Callee, ReturnValueSlot(), Args, &Call);
cast<llvm::CallInst>(Call)->setCallingConv(CGF.CGM.getBuiltinCC());
return Res.getComplexVal();
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 803b39907dd7..3db15c646f43 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -16,6 +16,7 @@
#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenModule.h"
+#include "TargetInfo.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
@@ -690,6 +691,9 @@ public:
case CK_ConstructorConversion:
return C;
+ case CK_IntToOCLSampler:
+ llvm_unreachable("global sampler variables are not generated");
+
case CK_Dependent: llvm_unreachable("saw dependent cast!");
case CK_BuiltinFnToFnPtr:
@@ -749,6 +753,7 @@ public:
case CK_FloatingToBoolean:
case CK_FloatingCast:
case CK_ZeroToOCLEvent:
+ case CK_ZeroToOCLQueue:
return nullptr;
}
llvm_unreachable("Invalid CastKind");
@@ -775,9 +780,6 @@ public:
}
llvm::Constant *EmitArrayInitialization(InitListExpr *ILE) {
- if (ILE->isStringLiteralInit())
- return Visit(ILE->getInit(0));
-
llvm::ArrayType *AType =
cast<llvm::ArrayType>(ConvertType(ILE->getType()));
llvm::Type *ElemTy = AType->getElementType();
@@ -842,6 +844,9 @@ public:
}
llvm::Constant *VisitInitListExpr(InitListExpr *ILE) {
+ if (ILE->isTransparent())
+ return Visit(ILE->getInit(0));
+
if (ILE->getType()->isArrayType())
return EmitArrayInitialization(ILE);
@@ -1018,16 +1023,17 @@ public:
switch (E->getStmtClass()) {
default: break;
case Expr::CompoundLiteralExprClass: {
- // Note that due to the nature of compound literals, this is guaranteed
- // to be the only use of the variable, so we just generate it here.
CompoundLiteralExpr *CLE = cast<CompoundLiteralExpr>(E);
+ CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
+ if (llvm::GlobalVariable *Addr =
+ CGM.getAddrOfConstantCompoundLiteralIfEmitted(CLE))
+ return ConstantAddress(Addr, Align);
+
llvm::Constant* C = CGM.EmitConstantExpr(CLE->getInitializer(),
CLE->getType(), CGF);
// FIXME: "Leaked" on failure.
if (!C) return ConstantAddress::invalid();
- CharUnits Align = CGM.getContext().getTypeAlignInChars(E->getType());
-
auto GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(),
E->getType().isConstant(CGM.getContext()),
llvm::GlobalValue::InternalLinkage,
@@ -1035,6 +1041,7 @@ public:
llvm::GlobalVariable::NotThreadLocal,
CGM.getContext().getTargetAddressSpace(E->getType()));
GV->setAlignment(Align.getQuantity());
+ CGM.setAddrOfConstantCompoundLiteral(CLE, GV);
return ConstantAddress(GV, Align);
}
case Expr::StringLiteralClass:
@@ -1083,7 +1090,7 @@ public:
return CGM.GetAddrOfConstantCFString(Literal);
}
case Expr::BlockExprClass: {
- std::string FunctionName;
+ StringRef FunctionName;
if (CGF)
FunctionName = CGF->CurFn->getName();
else
@@ -1091,7 +1098,7 @@ public:
// This is not really an l-value.
llvm::Constant *Ptr =
- CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName.c_str());
+ CGM.GetAddrOfGlobalBlock(cast<BlockExpr>(E), FunctionName);
return ConstantAddress(Ptr, CGM.getPointerAlign());
}
case Expr::CXXTypeidExprClass: {
@@ -1259,6 +1266,10 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return C;
}
+llvm::Constant *CodeGenModule::getNullPointer(llvm::PointerType *T, QualType QT) {
+ return getTargetCodeGenInfo().getNullPointer(*this, T, QT);
+}
+
llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
QualType DestType,
CodeGenFunction *CGF) {
@@ -1290,6 +1301,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
llvm::ConstantInt::get(Int64Ty, Value.getLValueOffset().getQuantity());
llvm::Constant *C = nullptr;
+
if (APValue::LValueBase LVBase = Value.getLValueBase()) {
// An array can be represented as an lvalue referring to the base.
if (isa<llvm::ArrayType>(DestTy)) {
@@ -1320,7 +1332,9 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
// Convert to the appropriate type; this could be an lvalue for
// an integer.
- if (isa<llvm::PointerType>(DestTy)) {
+ if (auto PT = dyn_cast<llvm::PointerType>(DestTy)) {
+ if (Value.isNullPointer())
+ return getNullPointer(PT, DestType);
// Convert the integer to a pointer-sized integer before converting it
// to a pointer.
C = llvm::ConstantExpr::getIntegerCast(
@@ -1354,7 +1368,7 @@ llvm::Constant *CodeGenModule::EmitConstantValue(const APValue &Value,
}
case APValue::Float: {
const llvm::APFloat &Init = Value.getFloat();
- if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf &&
+ if (&Init.getSemantics() == &llvm::APFloat::IEEEhalf() &&
!Context.getLangOpts().NativeHalfType &&
!Context.getLangOpts().HalfArgsAndReturns)
return llvm::ConstantInt::get(VMContext, Init.bitcastToAPInt());
@@ -1480,6 +1494,18 @@ CodeGenModule::EmitConstantValueForMemory(const APValue &Value,
return C;
}
+llvm::GlobalVariable *CodeGenModule::getAddrOfConstantCompoundLiteralIfEmitted(
+ const CompoundLiteralExpr *E) {
+ return EmittedCompoundLiterals.lookup(E);
+}
+
+void CodeGenModule::setAddrOfConstantCompoundLiteral(
+ const CompoundLiteralExpr *CLE, llvm::GlobalVariable *GV) {
+ bool Ok = EmittedCompoundLiterals.insert(std::make_pair(CLE, GV)).second;
+ (void)Ok;
+ assert(Ok && "CLE has already been emitted!");
+}
+
ConstantAddress
CodeGenModule::GetAddrOfConstantCompoundLiteral(const CompoundLiteralExpr *E) {
assert(E->isFileScope() && "not a file-scope compound literal expr");
@@ -1507,7 +1533,7 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
const CXXRecordDecl *base);
static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
- const CXXRecordDecl *record,
+ const RecordDecl *record,
bool asCompleteObject) {
const CGRecordLayout &layout = CGM.getTypes().getCGRecordLayout(record);
llvm::StructType *structure =
@@ -1517,24 +1543,29 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
unsigned numElements = structure->getNumElements();
std::vector<llvm::Constant *> elements(numElements);
+ auto CXXR = dyn_cast<CXXRecordDecl>(record);
// Fill in all the bases.
- for (const auto &I : record->bases()) {
- if (I.isVirtual()) {
- // Ignore virtual bases; if we're laying out for a complete
- // object, we'll lay these out later.
- continue;
- }
+ if (CXXR) {
+ for (const auto &I : CXXR->bases()) {
+ if (I.isVirtual()) {
+ // Ignore virtual bases; if we're laying out for a complete
+ // object, we'll lay these out later.
+ continue;
+ }
- const CXXRecordDecl *base =
- cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
+ const CXXRecordDecl *base =
+ cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
- // Ignore empty bases.
- if (base->isEmpty())
- continue;
-
- unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
- llvm::Type *baseType = structure->getElementType(fieldIndex);
- elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ // Ignore empty bases.
+ if (base->isEmpty() ||
+ CGM.getContext().getASTRecordLayout(base).getNonVirtualSize()
+ .isZero())
+ continue;
+
+ unsigned fieldIndex = layout.getNonVirtualBaseLLVMFieldNo(base);
+ llvm::Type *baseType = structure->getElementType(fieldIndex);
+ elements[fieldIndex] = EmitNullConstantForBase(CGM, baseType, base);
+ }
}
// Fill in all the fields.
@@ -1558,8 +1589,8 @@ static llvm::Constant *EmitNullConstant(CodeGenModule &CGM,
}
// Fill in the virtual bases, if we're working with the complete object.
- if (asCompleteObject) {
- for (const auto &I : record->vbases()) {
+ if (CXXR && asCompleteObject) {
+ for (const auto &I : CXXR->vbases()) {
const CXXRecordDecl *base =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
@@ -1601,6 +1632,10 @@ static llvm::Constant *EmitNullConstantForBase(CodeGenModule &CGM,
}
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
+ if (T->getAs<PointerType>())
+ return getNullPointer(
+ cast<llvm::PointerType>(getTypes().ConvertTypeForMem(T)), T);
+
if (getTypes().isZeroInitializable(T))
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
@@ -1616,10 +1651,8 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::ConstantArray::get(ATy, Array);
}
- if (const RecordType *RT = T->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- return ::EmitNullConstant(*this, RD, /*complete object*/ true);
- }
+ if (const RecordType *RT = T->getAs<RecordType>())
+ return ::EmitNullConstant(*this, RT->getDecl(), /*complete object*/ true);
assert(T->isMemberDataPointerType() &&
"Should only see pointers to data members here!");
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 120dacfbb011..1b85c45cd4be 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -19,6 +19,7 @@
#include "TargetInfo.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/TargetInfo.h"
@@ -171,9 +172,9 @@ public:
}
/// EmitPointerToBoolConversion - Perform a pointer to boolean conversion.
- Value *EmitPointerToBoolConversion(Value *V) {
- Value *Zero = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(V->getType()));
+ Value *EmitPointerToBoolConversion(Value *V, QualType QT) {
+ Value *Zero = CGF.CGM.getNullPointer(cast<llvm::PointerType>(V->getType()), QT);
+
return Builder.CreateICmpNE(V, Zero, "tobool");
}
@@ -310,6 +311,12 @@ public:
Value *VisitInitListExpr(InitListExpr *E);
+ Value *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E) {
+ assert(CGF.getArrayInitIndex() &&
+ "ArrayInitIndexExpr not inside an ArrayInitLoopExpr?");
+ return CGF.getArrayInitIndex();
+ }
+
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
return EmitNullValue(E->getType());
}
@@ -591,7 +598,7 @@ Value *ScalarExprEmitter::EmitConversionToBool(Value *Src, QualType SrcType) {
return EmitIntToBoolConversion(Src);
assert(isa<llvm::PointerType>(Src->getType()));
- return EmitPointerToBoolConversion(Src);
+ return EmitPointerToBoolConversion(Src, SrcType);
}
void ScalarExprEmitter::EmitFloatConversionCheck(
@@ -724,7 +731,7 @@ void ScalarExprEmitter::EmitFloatConversionCheck(
CGF.EmitCheckTypeDescriptor(OrigSrcType),
CGF.EmitCheckTypeDescriptor(DstType)};
CGF.EmitCheck(std::make_pair(Check, SanitizerKind::FloatCastOverflow),
- "float_cast_overflow", StaticArgs, OrigSrc);
+ SanitizerHandler::FloatCastOverflow, StaticArgs, OrigSrc);
}
/// Emit a conversion from the specified type to the specified destination type,
@@ -787,7 +794,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
// Handle pointer conversions next: pointers can only be converted to/from
// other pointers and integers. Check for pointer types in terms of LLVM, as
// some native types (like Obj-C id) may map to a pointer type.
- if (isa<llvm::PointerType>(DstTy)) {
+ if (auto DstPT = dyn_cast<llvm::PointerType>(DstTy)) {
// The source value may be an integer, or a pointer.
if (isa<llvm::PointerType>(SrcTy))
return Builder.CreateBitCast(Src, DstTy, "conv");
@@ -795,7 +802,7 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
assert(SrcType->isIntegerType() && "Not ptr->ptr or int->ptr conversion?");
// First, convert to the correct width so that we control the kind of
// extension.
- llvm::Type *MiddleTy = CGF.IntPtrTy;
+ llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DstPT);
bool InputSigned = SrcType->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
@@ -927,7 +934,7 @@ Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
void ScalarExprEmitter::EmitBinOpCheck(
ArrayRef<std::pair<Value *, SanitizerMask>> Checks, const BinOpInfo &Info) {
assert(CGF.IsSanitizerScope);
- StringRef CheckName;
+ SanitizerHandler Check;
SmallVector<llvm::Constant *, 4> StaticData;
SmallVector<llvm::Value *, 2> DynamicData;
@@ -938,13 +945,13 @@ void ScalarExprEmitter::EmitBinOpCheck(
StaticData.push_back(CGF.EmitCheckSourceLocation(Info.E->getExprLoc()));
const UnaryOperator *UO = dyn_cast<UnaryOperator>(Info.E);
if (UO && UO->getOpcode() == UO_Minus) {
- CheckName = "negate_overflow";
+ Check = SanitizerHandler::NegateOverflow;
StaticData.push_back(CGF.EmitCheckTypeDescriptor(UO->getType()));
DynamicData.push_back(Info.RHS);
} else {
if (BinaryOperator::isShiftOp(Opcode)) {
// Shift LHS negative or too large, or RHS out of bounds.
- CheckName = "shift_out_of_bounds";
+ Check = SanitizerHandler::ShiftOutOfBounds;
const BinaryOperator *BO = cast<BinaryOperator>(Info.E);
StaticData.push_back(
CGF.EmitCheckTypeDescriptor(BO->getLHS()->getType()));
@@ -952,14 +959,14 @@ void ScalarExprEmitter::EmitBinOpCheck(
CGF.EmitCheckTypeDescriptor(BO->getRHS()->getType()));
} else if (Opcode == BO_Div || Opcode == BO_Rem) {
// Divide or modulo by zero, or signed overflow (eg INT_MAX / -1).
- CheckName = "divrem_overflow";
+ Check = SanitizerHandler::DivremOverflow;
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
} else {
// Arithmetic overflow (+, -, *).
switch (Opcode) {
- case BO_Add: CheckName = "add_overflow"; break;
- case BO_Sub: CheckName = "sub_overflow"; break;
- case BO_Mul: CheckName = "mul_overflow"; break;
+ case BO_Add: Check = SanitizerHandler::AddOverflow; break;
+ case BO_Sub: Check = SanitizerHandler::SubOverflow; break;
+ case BO_Mul: Check = SanitizerHandler::MulOverflow; break;
default: llvm_unreachable("unexpected opcode for bin op check");
}
StaticData.push_back(CGF.EmitCheckTypeDescriptor(Info.Ty));
@@ -968,7 +975,7 @@ void ScalarExprEmitter::EmitBinOpCheck(
DynamicData.push_back(Info.RHS);
}
- CGF.EmitCheck(Checks, CheckName, StaticData, DynamicData);
+ CGF.EmitCheck(Checks, Check, StaticData, DynamicData);
}
//===----------------------------------------------------------------------===//
@@ -1394,11 +1401,23 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
- Value *Src = Visit(const_cast<Expr*>(E));
+ Expr::EvalResult Result;
+ if (E->EvaluateAsRValue(Result, CGF.getContext()) &&
+ Result.Val.isNullPointer()) {
+ // If E has side effect, it is emitted even if its final result is a
+ // null pointer. In that case, a DCE pass should be able to
+ // eliminate the useless instructions emitted during translating E.
+ if (Result.HasSideEffects)
+ Visit(E);
+ return CGF.CGM.getNullPointer(cast<llvm::PointerType>(
+ ConvertType(DestTy)), DestTy);
+ }
// Since target may map different address spaces in AST to the same address
// space, an address space conversion may end up as a bitcast.
- return Builder.CreatePointerBitCastOrAddrSpaceCast(Src,
- ConvertType(DestTy));
+ auto *Src = Visit(E);
+ return CGF.CGM.getTargetCodeGenInfo().performAddrSpaceCast(CGF, Src,
+ E->getType(),
+ DestTy);
}
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
@@ -1453,8 +1472,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
if (MustVisitNullValue(E))
(void) Visit(E);
- return llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(ConvertType(DestTy)));
+ return CGF.CGM.getNullPointer(cast<llvm::PointerType>(ConvertType(DestTy)),
+ DestTy);
case CK_NullToMemberPointer: {
if (MustVisitNullValue(E))
@@ -1510,12 +1529,13 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
// First, convert to the correct width so that we control the kind of
// extension.
- llvm::Type *MiddleTy = CGF.IntPtrTy;
+ auto DestLLVMTy = ConvertType(DestTy);
+ llvm::Type *MiddleTy = CGF.CGM.getDataLayout().getIntPtrType(DestLLVMTy);
bool InputSigned = E->getType()->isSignedIntegerOrEnumerationType();
llvm::Value* IntResult =
Builder.CreateIntCast(Src, MiddleTy, InputSigned, "conv");
- return Builder.CreateIntToPtr(IntResult, ConvertType(DestTy));
+ return Builder.CreateIntToPtr(IntResult, DestLLVMTy);
}
case CK_PointerToIntegral:
assert(!DestTy->isBooleanType() && "bool should use PointerToBool");
@@ -1546,7 +1566,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_IntegralToBoolean:
return EmitIntToBoolConversion(Visit(E));
case CK_PointerToBoolean:
- return EmitPointerToBoolConversion(Visit(E));
+ return EmitPointerToBoolConversion(Visit(E), E->getType());
case CK_FloatingToBoolean:
return EmitFloatToBoolConversion(Visit(E));
case CK_MemberPointerToBoolean: {
@@ -1573,8 +1593,16 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
return llvm::Constant::getNullValue(ConvertType(DestTy));
}
+ case CK_ZeroToOCLQueue: {
+ assert(DestTy->isQueueT() && "CK_ZeroToOCLQueue cast on non queue_t type");
+ return llvm::Constant::getNullValue(ConvertType(DestTy));
}
+ case CK_IntToOCLSampler:
+ return CGF.CGM.createOpenCLIntToSamplerConversion(E, CGF);
+
+ } // end of switch
+
llvm_unreachable("unknown scalar cast");
}
@@ -2273,8 +2301,13 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
llvm::Value *Val = Builder.CreateFDiv(Ops.LHS, Ops.RHS, "div");
- if (CGF.getLangOpts().OpenCL) {
- // OpenCL 1.1 7.4: minimum accuracy of single precision / is 2.5ulp
+ if (CGF.getLangOpts().OpenCL &&
+ !CGF.CGM.getCodeGenOpts().CorrectlyRoundedDivSqrt) {
+ // OpenCL v1.1 s7.4: minimum accuracy of single precision / is 2.5ulp
+ // OpenCL v1.2 s5.6.4.2: The -cl-fp32-correctly-rounded-divide-sqrt
+ // build option allows an application to specify that single precision
+ // floating-point divide (x/y and 1/x) and sqrt used in the program
+ // source are correctly rounded.
llvm::Type *ValTy = Val->getType();
if (ValTy->isFloatTy() ||
(isa<llvm::VectorType>(ValTy) &&
@@ -2363,9 +2396,8 @@ Value *ScalarExprEmitter::EmitOverflowCheckedBinOp(const BinOpInfo &Ops) {
// Branch in case of overflow.
llvm::BasicBlock *initialBB = Builder.GetInsertBlock();
- llvm::Function::iterator insertPt = initialBB->getIterator();
- llvm::BasicBlock *continueBB = CGF.createBasicBlock("nooverflow", CGF.CurFn,
- &*std::next(insertPt));
+ llvm::BasicBlock *continueBB =
+ CGF.createBasicBlock("nooverflow", CGF.CurFn, initialBB->getNextNode());
llvm::BasicBlock *overflowBB = CGF.createBasicBlock("overflow", CGF.CurFn);
Builder.CreateCondBr(overflow, overflowBB, continueBB);
@@ -2429,11 +2461,13 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
}
unsigned width = cast<llvm::IntegerType>(index->getType())->getBitWidth();
- if (width != CGF.PointerWidthInBits) {
+ auto &DL = CGF.CGM.getDataLayout();
+ auto PtrTy = cast<llvm::PointerType>(pointer->getType());
+ if (width != DL.getTypeSizeInBits(PtrTy)) {
// Zero-extend or sign-extend the pointer value according to
// whether the index is signed or not.
bool isSigned = indexOperand->getType()->isSignedIntegerOrEnumerationType();
- index = CGF.Builder.CreateIntCast(index, CGF.PtrDiffTy, isSigned,
+ index = CGF.Builder.CreateIntCast(index, DL.getIntPtrType(PtrTy), isSigned,
"idx.ext");
}
@@ -3397,6 +3431,52 @@ static Value *ConvertVec3AndVec4(CGBuilderTy &Builder, CodeGenFunction &CGF,
return Builder.CreateShuffleVector(Src, UnV, Mask);
}
+// Create cast instructions for converting LLVM value \p Src to LLVM type \p
+// DstTy. \p Src has the same size as \p DstTy. Both are single value types
+// but could be scalar or vectors of different lengths, and either can be
+// pointer.
+// There are 4 cases:
+// 1. non-pointer -> non-pointer : needs 1 bitcast
+// 2. pointer -> pointer : needs 1 bitcast or addrspacecast
+// 3. pointer -> non-pointer
+// a) pointer -> intptr_t : needs 1 ptrtoint
+// b) pointer -> non-intptr_t : needs 1 ptrtoint then 1 bitcast
+// 4. non-pointer -> pointer
+// a) intptr_t -> pointer : needs 1 inttoptr
+// b) non-intptr_t -> pointer : needs 1 bitcast then 1 inttoptr
+// Note: for cases 3b and 4b two casts are required since LLVM casts do not
+// allow casting directly between pointer types and non-integer non-pointer
+// types.
+static Value *createCastsForTypeOfSameSize(CGBuilderTy &Builder,
+ const llvm::DataLayout &DL,
+ Value *Src, llvm::Type *DstTy,
+ StringRef Name = "") {
+ auto SrcTy = Src->getType();
+
+ // Case 1.
+ if (!SrcTy->isPointerTy() && !DstTy->isPointerTy())
+ return Builder.CreateBitCast(Src, DstTy, Name);
+
+ // Case 2.
+ if (SrcTy->isPointerTy() && DstTy->isPointerTy())
+ return Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy, Name);
+
+ // Case 3.
+ if (SrcTy->isPointerTy() && !DstTy->isPointerTy()) {
+ // Case 3b.
+ if (!DstTy->isIntegerTy())
+ Src = Builder.CreatePtrToInt(Src, DL.getIntPtrType(SrcTy));
+ // Cases 3a and 3b.
+ return Builder.CreateBitOrPointerCast(Src, DstTy, Name);
+ }
+
+ // Case 4b.
+ if (!SrcTy->isIntegerTy())
+ Src = Builder.CreateBitCast(Src, DL.getIntPtrType(DstTy));
+ // Cases 4a and 4b.
+ return Builder.CreateIntToPtr(Src, DstTy, Name);
+}
+
Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
Value *Src = CGF.EmitScalarExpr(E->getSrcExpr());
llvm::Type *DstTy = ConvertType(E->getType());
@@ -3411,7 +3491,8 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// vector to get a vec4, then a bitcast if the target type is different.
if (NumElementsSrc == 3 && NumElementsDst != 3) {
Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
- Src = Builder.CreateBitCast(Src, DstTy);
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ DstTy);
Src->setName("astype");
return Src;
}
@@ -3421,13 +3502,15 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// get a vec3.
if (NumElementsSrc != 3 && NumElementsDst == 3) {
auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
- Src = Builder.CreateBitCast(Src, Vec4Ty);
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ Vec4Ty);
Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
Src->setName("astype");
return Src;
}
- return Builder.CreateBitCast(Src, DstTy, "astype");
+ return Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(),
+ Src, DstTy, "astype");
}
Value *ScalarExprEmitter::VisitAtomicExpr(AtomicExpr *E) {
diff --git a/lib/CodeGen/CGLoopInfo.cpp b/lib/CodeGen/CGLoopInfo.cpp
index 51474f16a018..28998ce8db44 100644
--- a/lib/CodeGen/CGLoopInfo.cpp
+++ b/lib/CodeGen/CGLoopInfo.cpp
@@ -20,14 +20,15 @@ using namespace clang::CodeGen;
using namespace llvm;
static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
- llvm::DebugLoc Location) {
+ const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc) {
if (!Attrs.IsParallel && Attrs.VectorizeWidth == 0 &&
Attrs.InterleaveCount == 0 && Attrs.UnrollCount == 0 &&
Attrs.VectorizeEnable == LoopAttributes::Unspecified &&
Attrs.UnrollEnable == LoopAttributes::Unspecified &&
Attrs.DistributeEnable == LoopAttributes::Unspecified &&
- !Location)
+ !StartLoc && !EndLoc)
return nullptr;
SmallVector<Metadata *, 4> Args;
@@ -35,9 +36,14 @@ static MDNode *createMetadata(LLVMContext &Ctx, const LoopAttributes &Attrs,
auto TempNode = MDNode::getTemporary(Ctx, None);
Args.push_back(TempNode.get());
- // If we have a valid debug location for the loop, add it.
- if (Location)
- Args.push_back(Location.getAsMDNode());
+ // If we have a valid start debug location for the loop, add it.
+ if (StartLoc) {
+ Args.push_back(StartLoc.getAsMDNode());
+
+ // If we also have a valid end debug location for the loop, add it.
+ if (EndLoc)
+ Args.push_back(EndLoc.getAsMDNode());
+ }
// Setting vectorize.width
if (Attrs.VectorizeWidth > 0) {
@@ -112,23 +118,26 @@ void LoopAttributes::clear() {
UnrollCount = 0;
VectorizeEnable = LoopAttributes::Unspecified;
UnrollEnable = LoopAttributes::Unspecified;
+ DistributeEnable = LoopAttributes::Unspecified;
}
LoopInfo::LoopInfo(BasicBlock *Header, const LoopAttributes &Attrs,
- llvm::DebugLoc Location)
+ const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc)
: LoopID(nullptr), Header(Header), Attrs(Attrs) {
- LoopID = createMetadata(Header->getContext(), Attrs, Location);
+ LoopID = createMetadata(Header->getContext(), Attrs, StartLoc, EndLoc);
}
-void LoopInfoStack::push(BasicBlock *Header, llvm::DebugLoc Location) {
- Active.push_back(LoopInfo(Header, StagedAttrs, Location));
+void LoopInfoStack::push(BasicBlock *Header, const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc) {
+ Active.push_back(LoopInfo(Header, StagedAttrs, StartLoc, EndLoc));
// Clear the attributes so nested loops do not inherit them.
StagedAttrs.clear();
}
void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
ArrayRef<const clang::Attr *> Attrs,
- llvm::DebugLoc Location) {
+ const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc) {
// Identify loop hint attributes from Attrs.
for (const auto *Attr : Attrs) {
@@ -266,7 +275,7 @@ void LoopInfoStack::push(BasicBlock *Header, clang::ASTContext &Ctx,
}
/// Stage the attributes.
- push(Header, Location);
+ push(Header, StartLoc, EndLoc);
}
void LoopInfoStack::pop() {
diff --git a/lib/CodeGen/CGLoopInfo.h b/lib/CodeGen/CGLoopInfo.h
index a0111edde5de..15608c105dc7 100644
--- a/lib/CodeGen/CGLoopInfo.h
+++ b/lib/CodeGen/CGLoopInfo.h
@@ -16,7 +16,6 @@
#define LLVM_CLANG_LIB_CODEGEN_CGLOOPINFO_H
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/Value.h"
@@ -68,7 +67,7 @@ class LoopInfo {
public:
/// \brief Construct a new LoopInfo for the loop with entry Header.
LoopInfo(llvm::BasicBlock *Header, const LoopAttributes &Attrs,
- llvm::DebugLoc Location);
+ const llvm::DebugLoc &StartLoc, const llvm::DebugLoc &EndLoc);
/// \brief Get the loop id metadata for this loop.
llvm::MDNode *getLoopID() const { return LoopID; }
@@ -100,14 +99,14 @@ public:
/// \brief Begin a new structured loop. The set of staged attributes will be
/// applied to the loop and then cleared.
- void push(llvm::BasicBlock *Header,
- llvm::DebugLoc Location = llvm::DebugLoc());
+ void push(llvm::BasicBlock *Header, const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc);
/// \brief Begin a new structured loop. Stage attributes from the Attrs list.
/// The staged attributes are applied to the loop and then cleared.
void push(llvm::BasicBlock *Header, clang::ASTContext &Ctx,
- llvm::ArrayRef<const Attr *> Attrs,
- llvm::DebugLoc Location = llvm::DebugLoc());
+ llvm::ArrayRef<const Attr *> Attrs, const llvm::DebugLoc &StartLoc,
+ const llvm::DebugLoc &EndLoc);
/// \brief End the current loop.
void pop();
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index db894ce67470..932b8a129e6e 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -589,9 +589,10 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
args.add(RValue::get(CGF.Builder.getInt1(isAtomic)), Context.BoolTy);
args.add(RValue::get(CGF.Builder.getInt1(hasStrong)), Context.BoolTy);
- llvm::Value *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetGetStructFunction();
+ CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(CGF.getTypes().arrangeBuiltinFunctionCall(Context.VoidTy, args),
- fn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
/// Determine whether the given architecture supports unaligned atomic
@@ -852,11 +853,12 @@ static void emitCPPObjectAtomicGetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Value *copyCppAtomicObjectFn =
+ llvm::Constant *copyCppAtomicObjectFn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectGetFunction();
+ CGCallee callee = CGCallee::forDirect(copyCppAtomicObjectFn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
- copyCppAtomicObjectFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
void
@@ -927,12 +929,13 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
}
case PropertyImplStrategy::GetSetProperty: {
- llvm::Value *getPropertyFn =
+ llvm::Constant *getPropertyFn =
CGM.getObjCRuntime().GetPropertyGetFunction();
if (!getPropertyFn) {
CGM.ErrorUnsupported(propImpl, "Obj-C getter requiring atomic copy");
return;
}
+ CGCallee callee = CGCallee::forDirect(getPropertyFn);
// Return (ivar-type) objc_getProperty((id) self, _cmd, offset, true).
// FIXME: Can't this be simpler? This might even be worse than the
@@ -955,8 +958,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
llvm::Instruction *CallInstruction;
RValue RV = EmitCall(
getTypes().arrangeBuiltinFunctionCall(propType, args),
- getPropertyFn, ReturnValueSlot(), args, CGCalleeInfo(),
- &CallInstruction);
+ callee, ReturnValueSlot(), args, &CallInstruction);
if (llvm::CallInst *call = dyn_cast<llvm::CallInst>(CallInstruction))
call->setTailCall();
@@ -1068,10 +1070,11 @@ static void emitStructSetterCall(CodeGenFunction &CGF, ObjCMethodDecl *OMD,
// FIXME: should this really always be false?
args.add(RValue::get(CGF.Builder.getFalse()), CGF.getContext().BoolTy);
- llvm::Value *copyStructFn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ llvm::Constant *fn = CGF.CGM.getObjCRuntime().GetSetStructFunction();
+ CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
- copyStructFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
/// emitCPPObjectAtomicSetterCall - Call the runtime function to store
@@ -1103,11 +1106,12 @@ static void emitCPPObjectAtomicSetterCall(CodeGenFunction &CGF,
// Third argument is the helper function.
args.add(RValue::get(AtomicHelperFn), CGF.getContext().VoidPtrTy);
- llvm::Value *copyCppAtomicObjectFn =
+ llvm::Constant *fn =
CGF.CGM.getObjCRuntime().GetCppAtomicObjectSetFunction();
+ CGCallee callee = CGCallee::forDirect(fn);
CGF.EmitCall(
CGF.getTypes().arrangeBuiltinFunctionCall(CGF.getContext().VoidTy, args),
- copyCppAtomicObjectFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
@@ -1197,8 +1201,8 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
case PropertyImplStrategy::GetSetProperty:
case PropertyImplStrategy::SetPropertyAndExpressionGet: {
- llvm::Value *setOptimizedPropertyFn = nullptr;
- llvm::Value *setPropertyFn = nullptr;
+ llvm::Constant *setOptimizedPropertyFn = nullptr;
+ llvm::Constant *setPropertyFn = nullptr;
if (UseOptimizedSetter(CGM)) {
// 10.8 and iOS 6.0 code and GC is off
setOptimizedPropertyFn =
@@ -1236,8 +1240,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
if (setOptimizedPropertyFn) {
args.add(RValue::get(arg), getContext().getObjCIdType());
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
+ CGCallee callee = CGCallee::forDirect(setOptimizedPropertyFn);
EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
- setOptimizedPropertyFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
} else {
args.add(RValue::get(ivarOffset), getContext().getPointerDiffType());
args.add(RValue::get(arg), getContext().getObjCIdType());
@@ -1247,8 +1252,9 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
getContext().BoolTy);
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
+ CGCallee callee = CGCallee::forDirect(setPropertyFn);
EmitCall(getTypes().arrangeBuiltinFunctionCall(getContext().VoidTy, args),
- setPropertyFn, ReturnValueSlot(), args);
+ callee, ReturnValueSlot(), args);
}
return;
@@ -1450,13 +1456,14 @@ QualType CodeGenFunction::TypeOfSelfObject() {
}
void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
- llvm::Constant *EnumerationMutationFn =
+ llvm::Constant *EnumerationMutationFnPtr =
CGM.getObjCRuntime().EnumerationMutationFunction();
-
- if (!EnumerationMutationFn) {
+ if (!EnumerationMutationFnPtr) {
CGM.ErrorUnsupported(&S, "Obj-C fast enumeration for this runtime");
return;
}
+ CGCallee EnumerationMutationFn =
+ CGCallee::forDirect(EnumerationMutationFnPtr);
CGDebugInfo *DI = getDebugInfo();
if (DI)
@@ -1662,7 +1669,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
elementLValue = EmitLValue(cast<Expr>(S.getElement()));
EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue);
} else {
- EmitScalarInit(CurrentItem, elementLValue);
+ EmitStoreThroughLValue(RValue::get(CurrentItem), elementLValue,
+ /*isInit*/ true);
}
// If we do have an element variable, this assignment is the end of
@@ -1803,7 +1811,8 @@ static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
// permit this to fail, but we need a particular relocation style.
- if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC()) {
+ if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
+ !CGM.getTriple().isOSBinFormatCOFF()) {
f->setLinkage(llvm::Function::ExternalWeakLinkage);
} else if (fnName == "objc_retain" || fnName == "objc_release") {
// If we have Native ARC, set nonlazybind attribute for these APIs for
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index caafef84c333..fa2b3d81e29a 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -18,6 +18,7 @@
#include "CGCleanup.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -163,9 +164,8 @@ protected:
/// Helper function that generates a constant string and returns a pointer to
/// the start of the string. The result of this function can be used anywhere
/// where the C code specifies const char*.
- llvm::Constant *MakeConstantString(const std::string &Str,
- const std::string &Name="") {
- ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name.c_str());
+ llvm::Constant *MakeConstantString(StringRef Str, const char *Name = "") {
+ ConstantAddress Array = CGM.GetAddrOfConstantCString(Str, Name);
return llvm::ConstantExpr::getGetElementPtr(Array.getElementType(),
Array.getPointer(), Zeros);
}
@@ -174,14 +174,14 @@ protected:
/// string value. This allows the linker to combine the strings between
/// different modules. Used for EH typeinfo names, selector strings, and a
/// few other things.
- llvm::Constant *ExportUniqueString(const std::string &Str,
- const std::string prefix) {
- std::string name = prefix + Str;
- auto *ConstStr = TheModule.getGlobalVariable(name);
+ llvm::Constant *ExportUniqueString(const std::string &Str, StringRef Prefix) {
+ std::string Name = Prefix.str() + Str;
+ auto *ConstStr = TheModule.getGlobalVariable(Name);
if (!ConstStr) {
llvm::Constant *value = llvm::ConstantDataArray::getString(VMContext,Str);
ConstStr = new llvm::GlobalVariable(TheModule, value->getType(), true,
- llvm::GlobalValue::LinkOnceODRLinkage, value, prefix + Str);
+ llvm::GlobalValue::LinkOnceODRLinkage,
+ value, Name);
}
return llvm::ConstantExpr::getGetElementPtr(ConstStr->getValueType(),
ConstStr, Zeros);
@@ -190,47 +190,17 @@ protected:
/// Generates a global structure, initialized by the elements in the vector.
/// The element types must match the types of the structure elements in the
/// first argument.
- llvm::GlobalVariable *MakeGlobal(llvm::StructType *Ty,
- ArrayRef<llvm::Constant *> V,
+ llvm::GlobalVariable *MakeGlobal(llvm::Constant *C,
CharUnits Align,
StringRef Name="",
llvm::GlobalValue::LinkageTypes linkage
=llvm::GlobalValue::InternalLinkage) {
- llvm::Constant *C = llvm::ConstantStruct::get(Ty, V);
- auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
+ auto GV = new llvm::GlobalVariable(TheModule, C->getType(), false,
linkage, C, Name);
GV->setAlignment(Align.getQuantity());
return GV;
}
- /// Generates a global array. The vector must contain the same number of
- /// elements that the array type declares, of the type specified as the array
- /// element type.
- llvm::GlobalVariable *MakeGlobal(llvm::ArrayType *Ty,
- ArrayRef<llvm::Constant *> V,
- CharUnits Align,
- StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage
- =llvm::GlobalValue::InternalLinkage) {
- llvm::Constant *C = llvm::ConstantArray::get(Ty, V);
- auto GV = new llvm::GlobalVariable(TheModule, Ty, false,
- linkage, C, Name);
- GV->setAlignment(Align.getQuantity());
- return GV;
- }
-
- /// Generates a global array, inferring the array type from the specified
- /// element type and the size of the initialiser.
- llvm::GlobalVariable *MakeGlobalArray(llvm::Type *Ty,
- ArrayRef<llvm::Constant *> V,
- CharUnits Align,
- StringRef Name="",
- llvm::GlobalValue::LinkageTypes linkage
- =llvm::GlobalValue::InternalLinkage) {
- llvm::ArrayType *ArrayTy = llvm::ArrayType::get(Ty, V.size());
- return MakeGlobal(ArrayTy, V, Align, Name, linkage);
- }
-
/// Returns a property name and encoding string.
llvm::Constant *MakePropertyEncodingString(const ObjCPropertyDecl *PD,
const Decl *Container) {
@@ -238,8 +208,8 @@ protected:
if ((R.getKind() == ObjCRuntime::GNUstep) &&
(R.getVersion() >= VersionTuple(1, 6))) {
std::string NameAndAttributes;
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container);
NameAndAttributes += '\0';
NameAndAttributes += TypeStr.length() + 3;
NameAndAttributes += TypeStr;
@@ -251,7 +221,7 @@ protected:
}
/// Push the property attributes into two structure fields.
- void PushPropertyAttributes(std::vector<llvm::Constant*> &Fields,
+ void PushPropertyAttributes(ConstantStructBuilder &Fields,
ObjCPropertyDecl *property, bool isSynthesized=true, bool
isDynamic=true) {
int attrs = property->getPropertyAttributes();
@@ -263,7 +233,7 @@ protected:
attrs &= ~ObjCPropertyDecl::OBJC_PR_strong;
}
// The first flags field has the same attribute values as clang uses internally
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff));
+ Fields.addInt(Int8Ty, attrs & 0xff);
attrs >>= 8;
attrs <<= 2;
// For protocol properties, synthesized and dynamic have no meaning, so we
@@ -273,10 +243,10 @@ protected:
attrs |= isDynamic ? (1<<1) : 0;
// The second field is the next four fields left shifted by two, with the
// low bit set to indicate whether the field is synthesized or dynamic.
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, attrs & 0xff));
+ Fields.addInt(Int8Ty, attrs & 0xff);
// Two padding fields
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
- Fields.push_back(llvm::ConstantInt::get(Int8Ty, 0));
+ Fields.addInt(Int8Ty, 0);
+ Fields.addInt(Int8Ty, 0);
}
/// Ensures that the value has the required type, by inserting a bitcast if
@@ -590,11 +560,6 @@ public:
llvm::Constant *BuildByrefLayout(CodeGenModule &CGM, QualType T) override {
return NULLPtr;
}
-
- llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) override {
- return nullptr;
- }
};
/// Class representing the legacy GCC Objective-C ABI. This is the default when
@@ -1152,8 +1117,7 @@ llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF, Selector Sel) {
llvm::Value *CGObjCGNU::GetSelector(CodeGenFunction &CGF,
const ObjCMethodDecl *Method) {
- std::string SelTypes;
- CGM.getContext().getObjCEncodingForMethodDecl(Method, SelTypes);
+ std::string SelTypes = CGM.getContext().getObjCEncodingForMethodDecl(Method);
return GetSelector(CGF, Method->getSelector(), SelTypes);
}
@@ -1233,14 +1197,15 @@ llvm::Constant *CGObjCGNUstep::GetEHType(QualType T) {
llvm::Constant *typeName =
ExportUniqueString(className, "__objc_eh_typename_");
- std::vector<llvm::Constant*> fields;
- fields.push_back(BVtable);
- fields.push_back(typeName);
- llvm::Constant *TI =
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr),
- fields, CGM.getPointerAlign(),
- "__objc_eh_typeinfo_" + className,
- llvm::GlobalValue::LinkOnceODRLinkage);
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct();
+ fields.add(BVtable);
+ fields.add(typeName);
+ llvm::Constant *TI =
+ fields.finishAndCreateGlobal("__objc_eh_typeinfo_" + className,
+ CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::LinkOnceODRLinkage);
return llvm::ConstantExpr::getBitCast(TI, PtrToInt8Ty);
}
@@ -1270,13 +1235,13 @@ ConstantAddress CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
else if (isa->getType() != PtrToIdTy)
isa = llvm::ConstantExpr::getBitCast(isa, PtrToIdTy);
- std::vector<llvm::Constant*> Ivars;
- Ivars.push_back(isa);
- Ivars.push_back(MakeConstantString(Str));
- Ivars.push_back(llvm::ConstantInt::get(IntTy, Str.size()));
- llvm::Constant *ObjCStr = MakeGlobal(
- llvm::StructType::get(PtrToIdTy, PtrToInt8Ty, IntTy, nullptr),
- Ivars, Align, ".objc_str");
+ ConstantInitBuilder Builder(CGM);
+ auto Fields = Builder.beginStruct();
+ Fields.add(isa);
+ Fields.add(MakeConstantString(Str));
+ Fields.addInt(IntTy, Str.size());
+ llvm::Constant *ObjCStr =
+ Fields.finishAndCreateGlobal(".objc_str", Align);
ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStr, PtrToInt8Ty);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
@@ -1386,9 +1351,10 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
llvm::Type::getInt1Ty(VMContext), IsClassMessage))};
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD);
+ CGCallee callee(CGCalleeInfo(), imp);
+
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
- CGCalleeInfo(), &call);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
}
@@ -1500,8 +1466,8 @@ CGObjCGNU::GenerateMessageSend(CodeGenFunction &CGF,
imp = EnforceType(Builder, imp, MSI.MessengerType);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(MSI.CallInfo, imp, Return, ActualArgs,
- CGCalleeInfo(), &call);
+ CGCallee callee(CGCalleeInfo(), imp);
+ RValue msgRet = CGF.EmitCall(MSI.CallInfo, callee, Return, ActualArgs, &call);
call->setMetadata(msgSendMDKind, node);
@@ -1550,50 +1516,38 @@ GenerateMethodList(StringRef ClassName,
bool isClassMethodList) {
if (MethodSels.empty())
return NULLPtr;
+
+ ConstantInitBuilder Builder(CGM);
+
+ auto MethodList = Builder.beginStruct();
+ MethodList.addNullPointer(CGM.Int8PtrTy);
+ MethodList.addInt(Int32Ty, MethodTypes.size());
+
// Get the method structure type.
- llvm::StructType *ObjCMethodTy = llvm::StructType::get(
- PtrToInt8Ty, // Really a selector, but the runtime creates it us.
- PtrToInt8Ty, // Method types
- IMPTy, //Method pointer
- nullptr);
- std::vector<llvm::Constant*> Methods;
+ llvm::StructType *ObjCMethodTy =
+ llvm::StructType::get(CGM.getLLVMContext(), {
+ PtrToInt8Ty, // Really a selector, but the runtime creates it us.
+ PtrToInt8Ty, // Method types
+ IMPTy // Method pointer
+ });
+ auto Methods = MethodList.beginArray();
for (unsigned int i = 0, e = MethodTypes.size(); i < e; ++i) {
- llvm::Constant *Method =
+ llvm::Constant *FnPtr =
TheModule.getFunction(SymbolNameForMethod(ClassName, CategoryName,
MethodSels[i],
isClassMethodList));
- assert(Method && "Can't generate metadata for method that doesn't exist");
- llvm::Constant *C = MakeConstantString(MethodSels[i].getAsString());
- Method = llvm::ConstantExpr::getBitCast(Method,
- IMPTy);
- Methods.push_back(
- llvm::ConstantStruct::get(ObjCMethodTy, {C, MethodTypes[i], Method}));
- }
-
- // Array of method structures
- llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodTy,
- Methods.size());
- llvm::Constant *MethodArray = llvm::ConstantArray::get(ObjCMethodArrayTy,
- Methods);
-
- // Structure containing list pointer, array and array count
- llvm::StructType *ObjCMethodListTy = llvm::StructType::create(VMContext);
- llvm::Type *NextPtrTy = llvm::PointerType::getUnqual(ObjCMethodListTy);
- ObjCMethodListTy->setBody(
- NextPtrTy,
- IntTy,
- ObjCMethodArrayTy,
- nullptr);
-
- Methods.clear();
- Methods.push_back(llvm::ConstantPointerNull::get(
- llvm::PointerType::getUnqual(ObjCMethodListTy)));
- Methods.push_back(llvm::ConstantInt::get(Int32Ty, MethodTypes.size()));
- Methods.push_back(MethodArray);
+ assert(FnPtr && "Can't generate metadata for method that doesn't exist");
+ auto Method = Methods.beginStruct(ObjCMethodTy);
+ Method.add(MakeConstantString(MethodSels[i].getAsString()));
+ Method.add(MethodTypes[i]);
+ Method.addBitCast(FnPtr, IMPTy);
+ Method.finishAndAddTo(Methods);
+ }
+ Methods.finishAndAddTo(MethodList);
// Create an instance of the structure
- return MakeGlobal(ObjCMethodListTy, Methods, CGM.getPointerAlign(),
- ".objc_method_list");
+ return MethodList.finishAndCreateGlobal(".objc_method_list",
+ CGM.getPointerAlign());
}
/// Generates an IvarList. Used in construction of a objc_class.
@@ -1601,35 +1555,36 @@ llvm::Constant *CGObjCGNU::
GenerateIvarList(ArrayRef<llvm::Constant *> IvarNames,
ArrayRef<llvm::Constant *> IvarTypes,
ArrayRef<llvm::Constant *> IvarOffsets) {
- if (IvarNames.size() == 0)
+ if (IvarNames.empty())
return NULLPtr;
- // Get the method structure type.
+
+ ConstantInitBuilder Builder(CGM);
+
+ // Structure containing array count followed by array.
+ auto IvarList = Builder.beginStruct();
+ IvarList.addInt(IntTy, (int)IvarNames.size());
+
+ // Get the ivar structure type.
llvm::StructType *ObjCIvarTy = llvm::StructType::get(
PtrToInt8Ty,
PtrToInt8Ty,
IntTy,
nullptr);
- std::vector<llvm::Constant*> Ivars;
+
+ // Array of ivar structures.
+ auto Ivars = IvarList.beginArray(ObjCIvarTy);
for (unsigned int i = 0, e = IvarNames.size() ; i < e ; i++) {
- Ivars.push_back(llvm::ConstantStruct::get(
- ObjCIvarTy, {IvarNames[i], IvarTypes[i], IvarOffsets[i]}));
+ auto Ivar = Ivars.beginStruct(ObjCIvarTy);
+ Ivar.add(IvarNames[i]);
+ Ivar.add(IvarTypes[i]);
+ Ivar.add(IvarOffsets[i]);
+ Ivar.finishAndAddTo(Ivars);
}
-
- // Array of method structures
- llvm::ArrayType *ObjCIvarArrayTy = llvm::ArrayType::get(ObjCIvarTy,
- IvarNames.size());
-
- llvm::Constant *Elements[] = {
- llvm::ConstantInt::get(IntTy, (int)IvarNames.size()),
- llvm::ConstantArray::get(ObjCIvarArrayTy, Ivars)};
- // Structure containing array and array count
- llvm::StructType *ObjCIvarListTy = llvm::StructType::get(IntTy,
- ObjCIvarArrayTy,
- nullptr);
+ Ivars.finishAndAddTo(IvarList);
// Create an instance of the structure
- return MakeGlobal(ObjCIvarListTy, Elements, CGM.getPointerAlign(),
- ".objc_ivar_list");
+ return IvarList.finishAndCreateGlobal(".objc_ivar_list",
+ CGM.getPointerAlign());
}
/// Generate a class structure
@@ -1677,34 +1632,55 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
IntPtrTy, // strong_pointers
IntPtrTy, // weak_pointers
nullptr);
- llvm::Constant *Zero = llvm::ConstantInt::get(LongTy, 0);
+
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct(ClassTy);
+
// Fill in the structure
- std::vector<llvm::Constant*> Elements;
- Elements.push_back(llvm::ConstantExpr::getBitCast(MetaClass, PtrToInt8Ty));
- Elements.push_back(SuperClass);
- Elements.push_back(MakeConstantString(Name, ".class_name"));
- Elements.push_back(Zero);
- Elements.push_back(llvm::ConstantInt::get(LongTy, info));
+
+ // isa
+ Elements.addBitCast(MetaClass, PtrToInt8Ty);
+ // super_class
+ Elements.add(SuperClass);
+ // name
+ Elements.add(MakeConstantString(Name, ".class_name"));
+ // version
+ Elements.addInt(LongTy, 0);
+ // info
+ Elements.addInt(LongTy, info);
+ // instance_size
if (isMeta) {
llvm::DataLayout td(&TheModule);
- Elements.push_back(
- llvm::ConstantInt::get(LongTy,
- td.getTypeSizeInBits(ClassTy) /
- CGM.getContext().getCharWidth()));
+ Elements.addInt(LongTy,
+ td.getTypeSizeInBits(ClassTy) /
+ CGM.getContext().getCharWidth());
} else
- Elements.push_back(InstanceSize);
- Elements.push_back(IVars);
- Elements.push_back(Methods);
- Elements.push_back(NULLPtr);
- Elements.push_back(NULLPtr);
- Elements.push_back(NULLPtr);
- Elements.push_back(llvm::ConstantExpr::getBitCast(Protocols, PtrTy));
- Elements.push_back(NULLPtr);
- Elements.push_back(llvm::ConstantInt::get(LongTy, 1));
- Elements.push_back(IvarOffsets);
- Elements.push_back(Properties);
- Elements.push_back(StrongIvarBitmap);
- Elements.push_back(WeakIvarBitmap);
+ Elements.add(InstanceSize);
+ // ivars
+ Elements.add(IVars);
+ // methods
+ Elements.add(Methods);
+ // These are all filled in by the runtime, so we pretend
+ // dtable
+ Elements.add(NULLPtr);
+ // subclass_list
+ Elements.add(NULLPtr);
+ // sibling_class
+ Elements.add(NULLPtr);
+ // protocols
+ Elements.addBitCast(Protocols, PtrTy);
+ // gc_object_type
+ Elements.add(NULLPtr);
+ // abi_version
+ Elements.addInt(LongTy, 1);
+ // ivar_offsets
+ Elements.add(IvarOffsets);
+ // properties
+ Elements.add(Properties);
+ // strong_pointers
+ Elements.add(StrongIvarBitmap);
+ // weak_pointers
+ Elements.add(WeakIvarBitmap);
// Create an instance of the structure
// This is now an externally visible symbol, so that we can speed up class
// messages in the next ABI. We may already have some weak references to
@@ -1713,13 +1689,13 @@ llvm::Constant *CGObjCGNU::GenerateClassStructure(
std::string(Name));
llvm::GlobalVariable *ClassRef = TheModule.getNamedGlobal(ClassSym);
llvm::Constant *Class =
- MakeGlobal(ClassTy, Elements, CGM.getPointerAlign(), ClassSym,
- llvm::GlobalValue::ExternalLinkage);
+ Elements.finishAndCreateGlobal(ClassSym, CGM.getPointerAlign(), false,
+ llvm::GlobalValue::ExternalLinkage);
if (ClassRef) {
- ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
+ ClassRef->replaceAllUsesWith(llvm::ConstantExpr::getBitCast(Class,
ClassRef->getType()));
- ClassRef->removeFromParent();
- Class->setName(ClassSym);
+ ClassRef->removeFromParent();
+ Class->setName(ClassSym);
}
return Class;
}
@@ -1728,38 +1704,33 @@ llvm::Constant *CGObjCGNU::
GenerateProtocolMethodList(ArrayRef<llvm::Constant *> MethodNames,
ArrayRef<llvm::Constant *> MethodTypes) {
// Get the method structure type.
- llvm::StructType *ObjCMethodDescTy = llvm::StructType::get(
- PtrToInt8Ty, // Really a selector, but the runtime does the casting for us.
- PtrToInt8Ty,
- nullptr);
- std::vector<llvm::Constant*> Methods;
+ llvm::StructType *ObjCMethodDescTy =
+ llvm::StructType::get(CGM.getLLVMContext(), { PtrToInt8Ty, PtrToInt8Ty });
+ ConstantInitBuilder Builder(CGM);
+ auto MethodList = Builder.beginStruct();
+ MethodList.addInt(IntTy, MethodNames.size());
+ auto Methods = MethodList.beginArray(ObjCMethodDescTy);
for (unsigned int i = 0, e = MethodTypes.size() ; i < e ; i++) {
- Methods.push_back(llvm::ConstantStruct::get(
- ObjCMethodDescTy, {MethodNames[i], MethodTypes[i]}));
- }
- llvm::ArrayType *ObjCMethodArrayTy = llvm::ArrayType::get(ObjCMethodDescTy,
- MethodNames.size());
- llvm::Constant *Array = llvm::ConstantArray::get(ObjCMethodArrayTy,
- Methods);
- llvm::StructType *ObjCMethodDescListTy = llvm::StructType::get(
- IntTy, ObjCMethodArrayTy, nullptr);
- Methods.clear();
- Methods.push_back(llvm::ConstantInt::get(IntTy, MethodNames.size()));
- Methods.push_back(Array);
- return MakeGlobal(ObjCMethodDescListTy, Methods, CGM.getPointerAlign(),
- ".objc_method_list");
+ auto Method = Methods.beginStruct(ObjCMethodDescTy);
+ Method.add(MethodNames[i]);
+ Method.add(MethodTypes[i]);
+ Method.finishAndAddTo(Methods);
+ }
+ Methods.finishAndAddTo(MethodList);
+ return MethodList.finishAndCreateGlobal(".objc_method_list",
+ CGM.getPointerAlign());
}
// Create the protocol list structure used in classes, categories and so on
-llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
- llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
- Protocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(
- PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- SizeTy,
- ProtocolArrayTy,
- nullptr);
- std::vector<llvm::Constant*> Elements;
+llvm::Constant *
+CGObjCGNU::GenerateProtocolList(ArrayRef<std::string> Protocols) {
+
+ ConstantInitBuilder Builder(CGM);
+ auto ProtocolList = Builder.beginStruct();
+ ProtocolList.add(NULLPtr);
+ ProtocolList.addInt(LongTy, Protocols.size());
+
+ auto Elements = ProtocolList.beginArray(PtrToInt8Ty);
for (const std::string *iter = Protocols.begin(), *endIter = Protocols.end();
iter != endIter ; iter++) {
llvm::Constant *protocol = nullptr;
@@ -1770,18 +1741,11 @@ llvm::Constant *CGObjCGNU::GenerateProtocolList(ArrayRef<std::string>Protocols){
} else {
protocol = value->getValue();
}
- llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(protocol,
- PtrToInt8Ty);
- Elements.push_back(Ptr);
- }
- llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
- Elements);
- Elements.clear();
- Elements.push_back(NULLPtr);
- Elements.push_back(llvm::ConstantInt::get(LongTy, Protocols.size()));
- Elements.push_back(ProtocolArray);
- return MakeGlobal(ProtocolListTy, Elements, CGM.getPointerAlign(),
- ".objc_protocol_list");
+ Elements.addBitCast(protocol, PtrToInt8Ty);
+ }
+ Elements.finishAndAddTo(ProtocolList);
+ return ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
+ CGM.getPointerAlign());
}
llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
@@ -1792,33 +1756,28 @@ llvm::Value *CGObjCGNU::GenerateProtocolRef(CodeGenFunction &CGF,
return CGF.Builder.CreateBitCast(protocol, llvm::PointerType::getUnqual(T));
}
-llvm::Constant *CGObjCGNU::GenerateEmptyProtocol(
- const std::string &ProtocolName) {
- SmallVector<std::string, 0> EmptyStringVector;
- SmallVector<llvm::Constant*, 0> EmptyConstantVector;
-
- llvm::Constant *ProtocolList = GenerateProtocolList(EmptyStringVector);
- llvm::Constant *MethodList =
- GenerateProtocolMethodList(EmptyConstantVector, EmptyConstantVector);
+llvm::Constant *
+CGObjCGNU::GenerateEmptyProtocol(const std::string &ProtocolName) {
+ llvm::Constant *ProtocolList = GenerateProtocolList({});
+ llvm::Constant *MethodList = GenerateProtocolMethodList({}, {});
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
- PtrToInt8Ty,
- ProtocolList->getType(),
- MethodList->getType(),
- MethodList->getType(),
- MethodList->getType(),
- MethodList->getType(),
- nullptr);
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
- llvm::Constant *Elements[] = {
- llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy),
- MakeConstantString(ProtocolName, ".objc_protocol_name"), ProtocolList,
- MethodList, MethodList, MethodList, MethodList};
- return MakeGlobal(ProtocolTy, Elements, CGM.getPointerAlign(),
- ".objc_protocol");
+ Elements.add(llvm::ConstantExpr::getIntToPtr(
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+
+ Elements.add(MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.add(ProtocolList);
+ Elements.add(MethodList);
+ Elements.add(MethodList);
+ Elements.add(MethodList);
+ Elements.add(MethodList);
+ return Elements.finishAndCreateGlobal(".objc_protocol",
+ CGM.getPointerAlign());
}
void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
@@ -1837,8 +1796,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
SmallVector<llvm::Constant*, 16> OptionalInstanceMethodNames;
SmallVector<llvm::Constant*, 16> OptionalInstanceMethodTypes;
for (const auto *I : PD->instance_methods()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I, TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
OptionalInstanceMethodNames.push_back(
MakeConstantString(I->getSelector().getAsString()));
@@ -1855,8 +1813,7 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
SmallVector<llvm::Constant*, 16> OptionalClassMethodNames;
SmallVector<llvm::Constant*, 16> OptionalClassMethodTypes;
for (const auto *I : PD->class_methods()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
if (I->getImplementationControl() == ObjCMethodDecl::Optional) {
OptionalClassMethodNames.push_back(
MakeConstantString(I->getSelector().getAsString()));
@@ -1885,142 +1842,139 @@ void CGObjCGNU::GenerateProtocol(const ObjCProtocolDecl *PD) {
// The isSynthesized value is always set to 0 in a protocol. It exists to
// simplify the runtime library by allowing it to use the same data
// structures for protocol metadata everywhere.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
- PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
- PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr);
- std::vector<llvm::Constant*> Properties;
- std::vector<llvm::Constant*> OptionalProperties;
- // Add all of the property methods need adding to the method list and to the
- // property metadata list.
- for (auto *property : PD->instance_properties()) {
- std::vector<llvm::Constant*> Fields;
+ llvm::Constant *PropertyList;
+ llvm::Constant *OptionalPropertyList;
+ {
+ llvm::StructType *propertyMetadataTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
+
+ unsigned numReqProperties = 0, numOptProperties = 0;
+ for (auto property : PD->instance_properties()) {
+ if (property->isOptional())
+ numOptProperties++;
+ else
+ numReqProperties++;
+ }
- Fields.push_back(MakePropertyEncodingString(property, nullptr));
- PushPropertyAttributes(Fields, property);
+ ConstantInitBuilder reqPropertyListBuilder(CGM);
+ auto reqPropertiesList = reqPropertyListBuilder.beginStruct();
+ reqPropertiesList.addInt(IntTy, numReqProperties);
+ reqPropertiesList.add(NULLPtr);
+ auto reqPropertiesArray = reqPropertiesList.beginArray(propertyMetadataTy);
+
+ ConstantInitBuilder optPropertyListBuilder(CGM);
+ auto optPropertiesList = optPropertyListBuilder.beginStruct();
+ optPropertiesList.addInt(IntTy, numOptProperties);
+ optPropertiesList.add(NULLPtr);
+ auto optPropertiesArray = optPropertiesList.beginArray(propertyMetadataTy);
+
+ // Add all of the property methods need adding to the method list and to the
+ // property metadata list.
+ for (auto *property : PD->instance_properties()) {
+ auto &propertiesArray =
+ (property->isOptional() ? optPropertiesArray : reqPropertiesArray);
+ auto fields = propertiesArray.beginStruct(propertyMetadataTy);
+
+ fields.add(MakePropertyEncodingString(property, nullptr));
+ PushPropertyAttributes(fields, property);
+
+ if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
+ std::string typeStr = Context.getObjCEncodingForMethodDecl(getter);
+ llvm::Constant *typeEncoding = MakeConstantString(typeStr);
+ InstanceMethodTypes.push_back(typeEncoding);
+ fields.add(MakeConstantString(getter->getSelector().getAsString()));
+ fields.add(typeEncoding);
+ } else {
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
+ }
+ if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
+ std::string typeStr = Context.getObjCEncodingForMethodDecl(setter);
+ llvm::Constant *typeEncoding = MakeConstantString(typeStr);
+ InstanceMethodTypes.push_back(typeEncoding);
+ fields.add(MakeConstantString(setter->getSelector().getAsString()));
+ fields.add(typeEncoding);
+ } else {
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
+ }
- if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(getter,TypeStr);
- llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- InstanceMethodTypes.push_back(TypeEncoding);
- Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
- } else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
- }
- if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(setter,TypeStr);
- llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- InstanceMethodTypes.push_back(TypeEncoding);
- Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
- } else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
- }
- if (property->getPropertyImplementation() == ObjCPropertyDecl::Optional) {
- OptionalProperties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
- } else {
- Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
+ fields.finishAndAddTo(propertiesArray);
}
+
+ reqPropertiesArray.finishAndAddTo(reqPropertiesList);
+ PropertyList =
+ reqPropertiesList.finishAndCreateGlobal(".objc_property_list",
+ CGM.getPointerAlign());
+
+ optPropertiesArray.finishAndAddTo(optPropertiesList);
+ OptionalPropertyList =
+ optPropertiesList.finishAndCreateGlobal(".objc_property_list",
+ CGM.getPointerAlign());
}
- llvm::Constant *PropertyArray = llvm::ConstantArray::get(
- llvm::ArrayType::get(PropertyMetadataTy, Properties.size()), Properties);
- llvm::Constant* PropertyListInitFields[] =
- {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
-
- llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::getAnon(PropertyListInitFields);
- llvm::Constant *PropertyList = new llvm::GlobalVariable(TheModule,
- PropertyListInit->getType(), false, llvm::GlobalValue::InternalLinkage,
- PropertyListInit, ".objc_property_list");
-
- llvm::Constant *OptionalPropertyArray =
- llvm::ConstantArray::get(llvm::ArrayType::get(PropertyMetadataTy,
- OptionalProperties.size()) , OptionalProperties);
- llvm::Constant* OptionalPropertyListInitFields[] = {
- llvm::ConstantInt::get(IntTy, OptionalProperties.size()), NULLPtr,
- OptionalPropertyArray };
-
- llvm::Constant *OptionalPropertyListInit =
- llvm::ConstantStruct::getAnon(OptionalPropertyListInitFields);
- llvm::Constant *OptionalPropertyList = new llvm::GlobalVariable(TheModule,
- OptionalPropertyListInit->getType(), false,
- llvm::GlobalValue::InternalLinkage, OptionalPropertyListInit,
- ".objc_property_list");
// Protocols are objects containing lists of the methods implemented and
// protocols adopted.
- llvm::StructType *ProtocolTy = llvm::StructType::get(IdTy,
- PtrToInt8Ty,
- ProtocolList->getType(),
- InstanceMethodList->getType(),
- ClassMethodList->getType(),
- OptionalInstanceMethodList->getType(),
- OptionalClassMethodList->getType(),
- PropertyList->getType(),
- OptionalPropertyList->getType(),
- nullptr);
// The isa pointer must be set to a magic number so the runtime knows it's
// the correct layout.
- llvm::Constant *Elements[] = {
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+ Elements.add(
llvm::ConstantExpr::getIntToPtr(
- llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy),
- MakeConstantString(ProtocolName, ".objc_protocol_name"), ProtocolList,
- InstanceMethodList, ClassMethodList, OptionalInstanceMethodList,
- OptionalClassMethodList, PropertyList, OptionalPropertyList};
+ llvm::ConstantInt::get(Int32Ty, ProtocolVersion), IdTy));
+ Elements.add(
+ MakeConstantString(ProtocolName, ".objc_protocol_name"));
+ Elements.add(ProtocolList);
+ Elements.add(InstanceMethodList);
+ Elements.add(ClassMethodList);
+ Elements.add(OptionalInstanceMethodList);
+ Elements.add(OptionalClassMethodList);
+ Elements.add(PropertyList);
+ Elements.add(OptionalPropertyList);
ExistingProtocols[ProtocolName] =
- llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolTy, Elements,
- CGM.getPointerAlign(), ".objc_protocol"), IdTy);
+ llvm::ConstantExpr::getBitCast(
+ Elements.finishAndCreateGlobal(".objc_protocol", CGM.getPointerAlign()),
+ IdTy);
}
void CGObjCGNU::GenerateProtocolHolderCategory() {
// Collect information about instance methods
SmallVector<Selector, 1> MethodSels;
SmallVector<llvm::Constant*, 1> MethodTypes;
- std::vector<llvm::Constant*> Elements;
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+
const std::string ClassName = "__ObjC_Protocol_Holder_Ugly_Hack";
const std::string CategoryName = "AnotherHack";
- Elements.push_back(MakeConstantString(CategoryName));
- Elements.push_back(MakeConstantString(ClassName));
+ Elements.add(MakeConstantString(CategoryName));
+ Elements.add(MakeConstantString(ClassName));
// Instance method list
- Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
- ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy));
+ Elements.addBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, false), PtrTy);
// Class method list
- Elements.push_back(llvm::ConstantExpr::getBitCast(GenerateMethodList(
- ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy));
+ Elements.addBitCast(GenerateMethodList(
+ ClassName, CategoryName, MethodSels, MethodTypes, true), PtrTy);
+
// Protocol list
- llvm::ArrayType *ProtocolArrayTy = llvm::ArrayType::get(PtrTy,
- ExistingProtocols.size());
- llvm::StructType *ProtocolListTy = llvm::StructType::get(
- PtrTy, //Should be a recurisve pointer, but it's always NULL here.
- SizeTy,
- ProtocolArrayTy,
- nullptr);
- std::vector<llvm::Constant*> ProtocolElements;
- for (llvm::StringMapIterator<llvm::Constant*> iter =
- ExistingProtocols.begin(), endIter = ExistingProtocols.end();
+ ConstantInitBuilder ProtocolListBuilder(CGM);
+ auto ProtocolList = ProtocolListBuilder.beginStruct();
+ ProtocolList.add(NULLPtr);
+ ProtocolList.addInt(LongTy, ExistingProtocols.size());
+ auto ProtocolElements = ProtocolList.beginArray(PtrTy);
+ for (auto iter = ExistingProtocols.begin(), endIter = ExistingProtocols.end();
iter != endIter ; iter++) {
- llvm::Constant *Ptr = llvm::ConstantExpr::getBitCast(iter->getValue(),
- PtrTy);
- ProtocolElements.push_back(Ptr);
- }
- llvm::Constant * ProtocolArray = llvm::ConstantArray::get(ProtocolArrayTy,
- ProtocolElements);
- ProtocolElements.clear();
- ProtocolElements.push_back(NULLPtr);
- ProtocolElements.push_back(llvm::ConstantInt::get(LongTy,
- ExistingProtocols.size()));
- ProtocolElements.push_back(ProtocolArray);
- Elements.push_back(llvm::ConstantExpr::getBitCast(MakeGlobal(ProtocolListTy,
- ProtocolElements, CGM.getPointerAlign(),
- ".objc_protocol_list"), PtrTy));
+ ProtocolElements.addBitCast(iter->getValue(), PtrTy);
+ }
+ ProtocolElements.finishAndAddTo(ProtocolList);
+ Elements.addBitCast(
+ ProtocolList.finishAndCreateGlobal(".objc_protocol_list",
+ CGM.getPointerAlign()),
+ PtrTy);
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ Elements.finishAndCreateGlobal("", CGM.getPointerAlign()),
PtrTy));
}
@@ -2055,13 +2009,16 @@ llvm::Constant *CGObjCGNU::MakeBitField(ArrayRef<bool> bits) {
}
values.push_back(llvm::ConstantInt::get(Int32Ty, word));
}
- llvm::ArrayType *arrayTy = llvm::ArrayType::get(Int32Ty, values.size());
- llvm::Constant *array = llvm::ConstantArray::get(arrayTy, values);
- llvm::Constant *fields[2] = {
- llvm::ConstantInt::get(Int32Ty, values.size()),
- array };
- llvm::Constant *GS = MakeGlobal(llvm::StructType::get(Int32Ty, arrayTy,
- nullptr), fields, CharUnits::fromQuantity(4));
+
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct();
+ fields.addInt(Int32Ty, values.size());
+ auto array = fields.beginArray();
+ for (auto v : values) array.add(v);
+ array.finishAndAddTo(fields);
+
+ llvm::Constant *GS =
+ fields.finishAndCreateGlobal("", CharUnits::fromQuantity(4));
llvm::Constant *ptr = llvm::ConstantExpr::getPtrToInt(GS, IntPtrTy);
return ptr;
}
@@ -2074,8 +2031,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
for (const auto *I : OCD->instance_methods()) {
InstanceMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = CGM.getContext().getObjCEncodingForMethodDecl(I);
InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
@@ -2084,8 +2040,7 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
SmallVector<llvm::Constant*, 16> ClassMethodTypes;
for (const auto *I : OCD->class_methods()) {
ClassMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = CGM.getContext().getObjCEncodingForMethodDecl(I);
ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
@@ -2097,23 +2052,24 @@ void CGObjCGNU::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
E = Protos.end(); I != E; ++I)
Protocols.push_back((*I)->getNameAsString());
- llvm::Constant *Elements[] = {
- MakeConstantString(CategoryName), MakeConstantString(ClassName),
- // Instance method list
- llvm::ConstantExpr::getBitCast(
+ ConstantInitBuilder Builder(CGM);
+ auto Elements = Builder.beginStruct();
+ Elements.add(MakeConstantString(CategoryName));
+ Elements.add(MakeConstantString(ClassName));
+ // Instance method list
+ Elements.addBitCast(
GenerateMethodList(ClassName, CategoryName, InstanceMethodSels,
InstanceMethodTypes, false),
- PtrTy),
- // Class method list
- llvm::ConstantExpr::getBitCast(GenerateMethodList(ClassName, CategoryName,
- ClassMethodSels,
- ClassMethodTypes, true),
- PtrTy),
- // Protocol list
- llvm::ConstantExpr::getBitCast(GenerateProtocolList(Protocols), PtrTy)};
+ PtrTy);
+ // Class method list
+ Elements.addBitCast(
+ GenerateMethodList(ClassName, CategoryName, ClassMethodSels,
+ ClassMethodTypes, true),
+ PtrTy);
+ // Protocol list
+ Elements.addBitCast(GenerateProtocolList(Protocols), PtrTy);
Categories.push_back(llvm::ConstantExpr::getBitCast(
- MakeGlobal(llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty,
- PtrTy, PtrTy, PtrTy, nullptr), Elements, CGM.getPointerAlign()),
+ Elements.finishAndCreateGlobal("", CGM.getPointerAlign()),
PtrTy));
}
@@ -2123,65 +2079,67 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
ASTContext &Context = CGM.getContext();
// Property metadata: name, attributes, attributes2, padding1, padding2,
// setter name, setter types, getter name, getter types.
- llvm::StructType *PropertyMetadataTy = llvm::StructType::get(
- PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
- PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty, nullptr);
- std::vector<llvm::Constant*> Properties;
+ llvm::StructType *propertyMetadataTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, Int8Ty, Int8Ty, Int8Ty, Int8Ty, PtrToInt8Ty,
+ PtrToInt8Ty, PtrToInt8Ty, PtrToInt8Ty });
+
+ unsigned numProperties = 0;
+ for (auto *propertyImpl : OID->property_impls()) {
+ (void) propertyImpl;
+ numProperties++;
+ }
+
+ ConstantInitBuilder builder(CGM);
+ auto propertyList = builder.beginStruct();
+ propertyList.addInt(IntTy, numProperties);
+ propertyList.add(NULLPtr);
+ auto properties = propertyList.beginArray(propertyMetadataTy);
// Add all of the property methods need adding to the method list and to the
// property metadata list.
for (auto *propertyImpl : OID->property_impls()) {
- std::vector<llvm::Constant*> Fields;
+ auto fields = properties.beginStruct(propertyMetadataTy);
ObjCPropertyDecl *property = propertyImpl->getPropertyDecl();
bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Synthesize);
bool isDynamic = (propertyImpl->getPropertyImplementation() ==
ObjCPropertyImplDecl::Dynamic);
- Fields.push_back(MakePropertyEncodingString(property, OID));
- PushPropertyAttributes(Fields, property, isSynthesized, isDynamic);
+ fields.add(MakePropertyEncodingString(property, OID));
+ PushPropertyAttributes(fields, property, isSynthesized, isDynamic);
if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(getter,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(getter);
llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
if (isSynthesized) {
InstanceMethodTypes.push_back(TypeEncoding);
InstanceMethodSels.push_back(getter->getSelector());
}
- Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
+ fields.add(MakeConstantString(getter->getSelector().getAsString()));
+ fields.add(TypeEncoding);
} else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
}
if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(setter,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(setter);
llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
if (isSynthesized) {
InstanceMethodTypes.push_back(TypeEncoding);
InstanceMethodSels.push_back(setter->getSelector());
}
- Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
- Fields.push_back(TypeEncoding);
+ fields.add(MakeConstantString(setter->getSelector().getAsString()));
+ fields.add(TypeEncoding);
} else {
- Fields.push_back(NULLPtr);
- Fields.push_back(NULLPtr);
+ fields.add(NULLPtr);
+ fields.add(NULLPtr);
}
- Properties.push_back(llvm::ConstantStruct::get(PropertyMetadataTy, Fields));
- }
- llvm::ArrayType *PropertyArrayTy =
- llvm::ArrayType::get(PropertyMetadataTy, Properties.size());
- llvm::Constant *PropertyArray = llvm::ConstantArray::get(PropertyArrayTy,
- Properties);
- llvm::Constant* PropertyListInitFields[] =
- {llvm::ConstantInt::get(IntTy, Properties.size()), NULLPtr, PropertyArray};
-
- llvm::Constant *PropertyListInit =
- llvm::ConstantStruct::getAnon(PropertyListInitFields);
- return new llvm::GlobalVariable(TheModule, PropertyListInit->getType(), false,
- llvm::GlobalValue::InternalLinkage, PropertyListInit,
- ".objc_property_list");
+ fields.finishAndAddTo(properties);
+ }
+ properties.finishAndAddTo(propertyList);
+
+ return propertyList.finishAndCreateGlobal(".objc_property_list",
+ CGM.getPointerAlign());
}
void CGObjCGNU::RegisterAlias(const ObjCCompatibleAliasDecl *OAD) {
@@ -2230,7 +2188,8 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SmallVector<llvm::Constant*, 16> IvarTypes;
SmallVector<llvm::Constant*, 16> IvarOffsets;
- std::vector<llvm::Constant*> IvarOffsetValues;
+ ConstantInitBuilder IvarOffsetBuilder(CGM);
+ auto IvarOffsetValues = IvarOffsetBuilder.beginArray(PtrToIntTy);
SmallVector<bool, 16> WeakIvars;
SmallVector<bool, 16> StrongIvars;
@@ -2274,7 +2233,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
"__objc_ivar_offset_value_" + ClassName +"." +
IVD->getNameAsString());
IvarOffsets.push_back(OffsetValue);
- IvarOffsetValues.push_back(OffsetVar);
+ IvarOffsetValues.add(OffsetVar);
Qualifiers::ObjCLifetime lt = IVD->getType().getQualifiers().getObjCLifetime();
switch (lt) {
case Qualifiers::OCL_Strong:
@@ -2293,16 +2252,15 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
llvm::Constant *StrongIvarBitmap = MakeBitField(StrongIvars);
llvm::Constant *WeakIvarBitmap = MakeBitField(WeakIvars);
llvm::GlobalVariable *IvarOffsetArray =
- MakeGlobalArray(PtrToIntTy, IvarOffsetValues, CGM.getPointerAlign(),
- ".ivar.offsets");
+ IvarOffsetValues.finishAndCreateGlobal(".ivar.offsets",
+ CGM.getPointerAlign());
// Collect information about instance methods
SmallVector<Selector, 16> InstanceMethodSels;
SmallVector<llvm::Constant*, 16> InstanceMethodTypes;
for (const auto *I : OID->instance_methods()) {
InstanceMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
InstanceMethodTypes.push_back(MakeConstantString(TypeStr));
}
@@ -2314,8 +2272,7 @@ void CGObjCGNU::GenerateClass(const ObjCImplementationDecl *OID) {
SmallVector<llvm::Constant*, 16> ClassMethodTypes;
for (const auto *I : OID->class_methods()) {
ClassMethodSels.push_back(I->getSelector());
- std::string TypeStr;
- Context.getObjCEncodingForMethodDecl(I,TypeStr);
+ std::string TypeStr = Context.getObjCEncodingForMethodDecl(I);
ClassMethodTypes.push_back(MakeConstantString(TypeStr));
}
// Collect the names of referenced protocols
@@ -2439,170 +2396,180 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Add all referenced protocols to a category.
GenerateProtocolHolderCategory();
- llvm::StructType *SelStructTy = dyn_cast<llvm::StructType>(
- SelectorTy->getElementType());
- llvm::Type *SelStructPtrTy = SelectorTy;
- if (!SelStructTy) {
- SelStructTy = llvm::StructType::get(PtrToInt8Ty, PtrToInt8Ty, nullptr);
- SelStructPtrTy = llvm::PointerType::getUnqual(SelStructTy);
+ llvm::StructType *selStructTy =
+ dyn_cast<llvm::StructType>(SelectorTy->getElementType());
+ llvm::Type *selStructPtrTy = SelectorTy;
+ if (!selStructTy) {
+ selStructTy = llvm::StructType::get(CGM.getLLVMContext(),
+ { PtrToInt8Ty, PtrToInt8Ty });
+ selStructPtrTy = llvm::PointerType::getUnqual(selStructTy);
}
- std::vector<llvm::Constant*> Elements;
- llvm::Constant *Statics = NULLPtr;
// Generate statics list:
+ llvm::Constant *statics = NULLPtr;
if (!ConstantStrings.empty()) {
- llvm::ArrayType *StaticsArrayTy = llvm::ArrayType::get(PtrToInt8Ty,
- ConstantStrings.size() + 1);
- ConstantStrings.push_back(NULLPtr);
-
- StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
-
- if (StringClass.empty()) StringClass = "NXConstantString";
-
- Elements.push_back(MakeConstantString(StringClass,
- ".objc_static_class_name"));
- Elements.push_back(llvm::ConstantArray::get(StaticsArrayTy,
- ConstantStrings));
- llvm::StructType *StaticsListTy =
- llvm::StructType::get(PtrToInt8Ty, StaticsArrayTy, nullptr);
- llvm::Type *StaticsListPtrTy =
- llvm::PointerType::getUnqual(StaticsListTy);
- Statics = MakeGlobal(StaticsListTy, Elements, CGM.getPointerAlign(),
- ".objc_statics");
- llvm::ArrayType *StaticsListArrayTy =
- llvm::ArrayType::get(StaticsListPtrTy, 2);
- Elements.clear();
- Elements.push_back(Statics);
- Elements.push_back(llvm::Constant::getNullValue(StaticsListPtrTy));
- Statics = MakeGlobal(StaticsListArrayTy, Elements,
- CGM.getPointerAlign(), ".objc_statics_ptr");
- Statics = llvm::ConstantExpr::getBitCast(Statics, PtrTy);
- }
- // Array of classes, categories, and constant objects
- llvm::ArrayType *ClassListTy = llvm::ArrayType::get(PtrToInt8Ty,
- Classes.size() + Categories.size() + 2);
- llvm::StructType *SymTabTy = llvm::StructType::get(LongTy, SelStructPtrTy,
- llvm::Type::getInt16Ty(VMContext),
- llvm::Type::getInt16Ty(VMContext),
- ClassListTy, nullptr);
-
- Elements.clear();
- // Pointer to an array of selectors used in this module.
- std::vector<llvm::Constant*> Selectors;
- std::vector<llvm::GlobalAlias*> SelectorAliases;
- for (SelectorMap::iterator iter = SelectorTable.begin(),
- iterEnd = SelectorTable.end(); iter != iterEnd ; ++iter) {
+ llvm::GlobalVariable *fileStatics = [&] {
+ ConstantInitBuilder builder(CGM);
+ auto staticsStruct = builder.beginStruct();
- std::string SelNameStr = iter->first.getAsString();
- llvm::Constant *SelName = ExportUniqueString(SelNameStr, ".objc_sel_name");
+ StringRef stringClass = CGM.getLangOpts().ObjCConstantStringClass;
+ if (stringClass.empty()) stringClass = "NXConstantString";
+ staticsStruct.add(MakeConstantString(stringClass,
+ ".objc_static_class_name"));
- SmallVectorImpl<TypedSelector> &Types = iter->second;
- for (SmallVectorImpl<TypedSelector>::iterator i = Types.begin(),
- e = Types.end() ; i!=e ; i++) {
+ auto array = staticsStruct.beginArray();
+ array.addAll(ConstantStrings);
+ array.add(NULLPtr);
+ array.finishAndAddTo(staticsStruct);
- llvm::Constant *SelectorTypeEncoding = NULLPtr;
- if (!i->first.empty())
- SelectorTypeEncoding = MakeConstantString(i->first, ".objc_sel_types");
+ return staticsStruct.finishAndCreateGlobal(".objc_statics",
+ CGM.getPointerAlign());
+ }();
- Elements.push_back(SelName);
- Elements.push_back(SelectorTypeEncoding);
- Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
- Elements.clear();
+ ConstantInitBuilder builder(CGM);
+ auto allStaticsArray = builder.beginArray(fileStatics->getType());
+ allStaticsArray.add(fileStatics);
+ allStaticsArray.addNullPointer(fileStatics->getType());
- // Store the selector alias for later replacement
- SelectorAliases.push_back(i->second);
- }
+ statics = allStaticsArray.finishAndCreateGlobal(".objc_statics_ptr",
+ CGM.getPointerAlign());
+ statics = llvm::ConstantExpr::getBitCast(statics, PtrTy);
}
- unsigned SelectorCount = Selectors.size();
- // NULL-terminate the selector list. This should not actually be required,
- // because the selector list has a length field. Unfortunately, the GCC
- // runtime decides to ignore the length field and expects a NULL terminator,
- // and GCC cooperates with this by always setting the length to 0.
- Elements.push_back(NULLPtr);
- Elements.push_back(NULLPtr);
- Selectors.push_back(llvm::ConstantStruct::get(SelStructTy, Elements));
- Elements.clear();
-
- // Number of static selectors
- Elements.push_back(llvm::ConstantInt::get(LongTy, SelectorCount));
- llvm::GlobalVariable *SelectorList =
- MakeGlobalArray(SelStructTy, Selectors, CGM.getPointerAlign(),
- ".objc_selector_list");
- Elements.push_back(llvm::ConstantExpr::getBitCast(SelectorList,
- SelStructPtrTy));
- // Now that all of the static selectors exist, create pointers to them.
- for (unsigned int i=0 ; i<SelectorCount ; i++) {
+ // Array of classes, categories, and constant objects.
+
+ SmallVector<llvm::GlobalAlias*, 16> selectorAliases;
+ unsigned selectorCount;
+
+ // Pointer to an array of selectors used in this module.
+ llvm::GlobalVariable *selectorList = [&] {
+ ConstantInitBuilder builder(CGM);
+ auto selectors = builder.beginArray(selStructTy);
+ auto &table = SelectorTable; // MSVC workaround
+ for (auto &entry : table) {
+
+ std::string selNameStr = entry.first.getAsString();
+ llvm::Constant *selName = ExportUniqueString(selNameStr, ".objc_sel_name");
+
+ for (TypedSelector &sel : entry.second) {
+ llvm::Constant *selectorTypeEncoding = NULLPtr;
+ if (!sel.first.empty())
+ selectorTypeEncoding =
+ MakeConstantString(sel.first, ".objc_sel_types");
+
+ auto selStruct = selectors.beginStruct(selStructTy);
+ selStruct.add(selName);
+ selStruct.add(selectorTypeEncoding);
+ selStruct.finishAndAddTo(selectors);
+
+ // Store the selector alias for later replacement
+ selectorAliases.push_back(sel.second);
+ }
+ }
+
+ // Remember the number of entries in the selector table.
+ selectorCount = selectors.size();
- llvm::Constant *Idxs[] = {Zeros[0],
- llvm::ConstantInt::get(Int32Ty, i), Zeros[0]};
+ // NULL-terminate the selector list. This should not actually be required,
+ // because the selector list has a length field. Unfortunately, the GCC
+ // runtime decides to ignore the length field and expects a NULL terminator,
+ // and GCC cooperates with this by always setting the length to 0.
+ auto selStruct = selectors.beginStruct(selStructTy);
+ selStruct.add(NULLPtr);
+ selStruct.add(NULLPtr);
+ selStruct.finishAndAddTo(selectors);
+
+ return selectors.finishAndCreateGlobal(".objc_selector_list",
+ CGM.getPointerAlign());
+ }();
+
+ // Now that all of the static selectors exist, create pointers to them.
+ for (unsigned i = 0; i < selectorCount; ++i) {
+ llvm::Constant *idxs[] = {
+ Zeros[0],
+ llvm::ConstantInt::get(Int32Ty, i)
+ };
// FIXME: We're generating redundant loads and stores here!
- llvm::Constant *SelPtr = llvm::ConstantExpr::getGetElementPtr(
- SelectorList->getValueType(), SelectorList, makeArrayRef(Idxs, 2));
+ llvm::Constant *selPtr = llvm::ConstantExpr::getGetElementPtr(
+ selectorList->getValueType(), selectorList, idxs);
// If selectors are defined as an opaque type, cast the pointer to this
// type.
- SelPtr = llvm::ConstantExpr::getBitCast(SelPtr, SelectorTy);
- SelectorAliases[i]->replaceAllUsesWith(SelPtr);
- SelectorAliases[i]->eraseFromParent();
- }
-
- // Number of classes defined.
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
- Classes.size()));
- // Number of categories defined
- Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
- Categories.size()));
- // Create an array of classes, then categories, then static object instances
- Classes.insert(Classes.end(), Categories.begin(), Categories.end());
- // NULL-terminated list of static object instances (mainly constant strings)
- Classes.push_back(Statics);
- Classes.push_back(NULLPtr);
- llvm::Constant *ClassList = llvm::ConstantArray::get(ClassListTy, Classes);
- Elements.push_back(ClassList);
- // Construct the symbol table
- llvm::Constant *SymTab =
- MakeGlobal(SymTabTy, Elements, CGM.getPointerAlign());
+ selPtr = llvm::ConstantExpr::getBitCast(selPtr, SelectorTy);
+ selectorAliases[i]->replaceAllUsesWith(selPtr);
+ selectorAliases[i]->eraseFromParent();
+ }
+
+ llvm::GlobalVariable *symtab = [&] {
+ ConstantInitBuilder builder(CGM);
+ auto symtab = builder.beginStruct();
+
+ // Number of static selectors
+ symtab.addInt(LongTy, selectorCount);
+
+ symtab.addBitCast(selectorList, selStructPtrTy);
+
+ // Number of classes defined.
+ symtab.addInt(CGM.Int16Ty, Classes.size());
+ // Number of categories defined
+ symtab.addInt(CGM.Int16Ty, Categories.size());
+
+ // Create an array of classes, then categories, then static object instances
+ auto classList = symtab.beginArray(PtrToInt8Ty);
+ classList.addAll(Classes);
+ classList.addAll(Categories);
+ // NULL-terminated list of static object instances (mainly constant strings)
+ classList.add(statics);
+ classList.add(NULLPtr);
+ classList.finishAndAddTo(symtab);
+
+ // Construct the symbol table.
+ return symtab.finishAndCreateGlobal("", CGM.getPointerAlign());
+ }();
// The symbol table is contained in a module which has some version-checking
// constants
- llvm::StructType * ModuleTy = llvm::StructType::get(LongTy, LongTy,
- PtrToInt8Ty, llvm::PointerType::getUnqual(SymTabTy),
- (RuntimeVersion >= 10) ? IntTy : nullptr, nullptr);
- Elements.clear();
- // Runtime version, used for ABI compatibility checking.
- Elements.push_back(llvm::ConstantInt::get(LongTy, RuntimeVersion));
- // sizeof(ModuleTy)
- llvm::DataLayout td(&TheModule);
- Elements.push_back(
- llvm::ConstantInt::get(LongTy,
- td.getTypeSizeInBits(ModuleTy) /
- CGM.getContext().getCharWidth()));
-
- // The path to the source file where this module was declared
- SourceManager &SM = CGM.getContext().getSourceManager();
- const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
- std::string path =
- std::string(mainFile->getDir()->getName()) + '/' + mainFile->getName();
- Elements.push_back(MakeConstantString(path, ".objc_source_file_name"));
- Elements.push_back(SymTab);
-
- if (RuntimeVersion >= 10)
- switch (CGM.getLangOpts().getGC()) {
+ llvm::Constant *module = [&] {
+ llvm::Type *moduleEltTys[] = {
+ LongTy, LongTy, PtrToInt8Ty, symtab->getType(), IntTy
+ };
+ llvm::StructType *moduleTy =
+ llvm::StructType::get(CGM.getLLVMContext(),
+ makeArrayRef(moduleEltTys).drop_back(unsigned(RuntimeVersion < 10)));
+
+ ConstantInitBuilder builder(CGM);
+ auto module = builder.beginStruct(moduleTy);
+ // Runtime version, used for ABI compatibility checking.
+ module.addInt(LongTy, RuntimeVersion);
+ // sizeof(ModuleTy)
+ module.addInt(LongTy, CGM.getDataLayout().getTypeStoreSize(moduleTy));
+
+ // The path to the source file where this module was declared
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ std::string path =
+ (Twine(mainFile->getDir()->getName()) + "/" + mainFile->getName()).str();
+ module.add(MakeConstantString(path, ".objc_source_file_name"));
+ module.add(symtab);
+
+ if (RuntimeVersion >= 10) {
+ switch (CGM.getLangOpts().getGC()) {
case LangOptions::GCOnly:
- Elements.push_back(llvm::ConstantInt::get(IntTy, 2));
+ module.addInt(IntTy, 2);
break;
case LangOptions::NonGC:
if (CGM.getLangOpts().ObjCAutoRefCount)
- Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ module.addInt(IntTy, 1);
else
- Elements.push_back(llvm::ConstantInt::get(IntTy, 0));
+ module.addInt(IntTy, 0);
break;
case LangOptions::HybridGC:
- Elements.push_back(llvm::ConstantInt::get(IntTy, 1));
+ module.addInt(IntTy, 1);
break;
+ }
}
- llvm::Value *Module = MakeGlobal(ModuleTy, Elements, CGM.getPointerAlign());
+ return module.finishAndCreateGlobal("", CGM.getPointerAlign());
+ }();
// Create the load function calling the runtime entry point with the module
// structure
@@ -2616,10 +2583,9 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
Builder.SetInsertPoint(EntryBB);
llvm::FunctionType *FT =
- llvm::FunctionType::get(Builder.getVoidTy(),
- llvm::PointerType::getUnqual(ModuleTy), true);
+ llvm::FunctionType::get(Builder.getVoidTy(), module->getType(), true);
llvm::Value *Register = CGM.CreateRuntimeFunction(FT, "__objc_exec_class");
- Builder.CreateCall(Register, Module);
+ Builder.CreateCall(Register, module);
if (!ClassAliases.empty()) {
llvm::Type *ArgTypes[2] = {PtrTy, PtrToInt8Ty};
@@ -2646,8 +2612,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
for (std::vector<ClassAliasPair>::iterator iter = ClassAliases.begin();
iter != ClassAliases.end(); ++iter) {
llvm::Constant *TheClass =
- TheModule.getGlobalVariable(("_OBJC_CLASS_" + iter->first).c_str(),
- true);
+ TheModule.getGlobalVariable("_OBJC_CLASS_" + iter->first, true);
if (TheClass) {
TheClass = llvm::ConstantExpr::getBitCast(TheClass, PtrTy);
Builder.CreateCall(RegisterAlias,
@@ -2910,9 +2875,11 @@ llvm::Value *CGObjCGNU::EmitIvarOffset(CodeGenFunction &CGF,
if (RuntimeVersion < 10 ||
CGF.CGM.getTarget().getTriple().isKnownWindowsMSVCEnvironment())
return CGF.Builder.CreateZExtOrBitCast(
- CGF.Builder.CreateDefaultAlignedLoad(CGF.Builder.CreateAlignedLoad(
- ObjCIvarOffsetVariable(Interface, Ivar),
- CGF.getPointerAlign(), "ivar")),
+ CGF.Builder.CreateAlignedLoad(
+ Int32Ty, CGF.Builder.CreateAlignedLoad(
+ ObjCIvarOffsetVariable(Interface, Ivar),
+ CGF.getPointerAlign(), "ivar"),
+ CharUnits::fromQuantity(4)),
PtrDiffTy);
std::string name = "__objc_ivar_offset_value_" +
Interface->getNameAsString() +"." + Ivar->getNameAsString();
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 5ab9fc4f9710..7219592fffcd 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -11,12 +11,13 @@
//
//===----------------------------------------------------------------------===//
-#include "CGObjCRuntime.h"
#include "CGBlocks.h"
#include "CGCleanup.h"
+#include "CGObjCRuntime.h"
#include "CGRecordLayout.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
+#include "ConstantBuilder.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclObjC.h"
@@ -25,6 +26,7 @@
#include "clang/Basic/LangOptions.h"
#include "clang/CodeGen/CGFunctionInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/CachedHashString.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
@@ -172,18 +174,18 @@ protected:
CodeGen::CodeGenModule &CGM;
public:
- llvm::Type *ShortTy, *IntTy, *LongTy, *LongLongTy;
- llvm::Type *Int8PtrTy, *Int8PtrPtrTy;
+ llvm::IntegerType *ShortTy, *IntTy, *LongTy;
+ llvm::PointerType *Int8PtrTy, *Int8PtrPtrTy;
llvm::Type *IvarOffsetVarTy;
/// ObjectPtrTy - LLVM type for object handles (typeof(id))
- llvm::Type *ObjectPtrTy;
+ llvm::PointerType *ObjectPtrTy;
/// PtrObjectPtrTy - LLVM type for id *
- llvm::Type *PtrObjectPtrTy;
+ llvm::PointerType *PtrObjectPtrTy;
/// SelectorPtrTy - LLVM type for selector handles (typeof(SEL))
- llvm::Type *SelectorPtrTy;
+ llvm::PointerType *SelectorPtrTy;
private:
/// ProtocolPtrTy - LLVM type for external protocol handles
@@ -212,7 +214,7 @@ public:
/// SuperTy - LLVM type for struct objc_super.
llvm::StructType *SuperTy;
/// SuperPtrTy - LLVM type for struct objc_super *.
- llvm::Type *SuperPtrTy;
+ llvm::PointerType *SuperPtrTy;
/// PropertyTy - LLVM type for struct objc_property (struct _prop_t
/// in GCC parlance).
@@ -222,7 +224,7 @@ public:
/// (_prop_list_t in GCC parlance).
llvm::StructType *PropertyListTy;
/// PropertyListPtrTy - LLVM type for struct objc_property_list*.
- llvm::Type *PropertyListPtrTy;
+ llvm::PointerType *PropertyListPtrTy;
// MethodTy - LLVM type for struct objc_method.
llvm::StructType *MethodTy;
@@ -230,7 +232,7 @@ public:
/// CacheTy - LLVM type for struct objc_cache.
llvm::Type *CacheTy;
/// CachePtrTy - LLVM type for struct objc_cache *.
- llvm::Type *CachePtrTy;
+ llvm::PointerType *CachePtrTy;
llvm::Constant *getGetPropertyFn() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
@@ -500,20 +502,20 @@ public:
/// SymtabTy - LLVM type for struct objc_symtab.
llvm::StructType *SymtabTy;
/// SymtabPtrTy - LLVM type for struct objc_symtab *.
- llvm::Type *SymtabPtrTy;
+ llvm::PointerType *SymtabPtrTy;
/// ModuleTy - LLVM type for struct objc_module.
llvm::StructType *ModuleTy;
/// ProtocolTy - LLVM type for struct objc_protocol.
llvm::StructType *ProtocolTy;
/// ProtocolPtrTy - LLVM type for struct objc_protocol *.
- llvm::Type *ProtocolPtrTy;
+ llvm::PointerType *ProtocolPtrTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension.
llvm::StructType *ProtocolExtensionTy;
/// ProtocolExtensionTy - LLVM type for struct
/// objc_protocol_extension *.
- llvm::Type *ProtocolExtensionPtrTy;
+ llvm::PointerType *ProtocolExtensionPtrTy;
/// MethodDescriptionTy - LLVM type for struct
/// objc_method_description.
llvm::StructType *MethodDescriptionTy;
@@ -522,34 +524,34 @@ public:
llvm::StructType *MethodDescriptionListTy;
/// MethodDescriptionListPtrTy - LLVM type for struct
/// objc_method_description_list *.
- llvm::Type *MethodDescriptionListPtrTy;
+ llvm::PointerType *MethodDescriptionListPtrTy;
/// ProtocolListTy - LLVM type for struct objc_property_list.
llvm::StructType *ProtocolListTy;
/// ProtocolListPtrTy - LLVM type for struct objc_property_list*.
- llvm::Type *ProtocolListPtrTy;
+ llvm::PointerType *ProtocolListPtrTy;
/// CategoryTy - LLVM type for struct objc_category.
llvm::StructType *CategoryTy;
/// ClassTy - LLVM type for struct objc_class.
llvm::StructType *ClassTy;
/// ClassPtrTy - LLVM type for struct objc_class *.
- llvm::Type *ClassPtrTy;
+ llvm::PointerType *ClassPtrTy;
/// ClassExtensionTy - LLVM type for struct objc_class_ext.
llvm::StructType *ClassExtensionTy;
/// ClassExtensionPtrTy - LLVM type for struct objc_class_ext *.
- llvm::Type *ClassExtensionPtrTy;
+ llvm::PointerType *ClassExtensionPtrTy;
// IvarTy - LLVM type for struct objc_ivar.
llvm::StructType *IvarTy;
/// IvarListTy - LLVM type for struct objc_ivar_list.
- llvm::Type *IvarListTy;
+ llvm::StructType *IvarListTy;
/// IvarListPtrTy - LLVM type for struct objc_ivar_list *.
- llvm::Type *IvarListPtrTy;
+ llvm::PointerType *IvarListPtrTy;
/// MethodListTy - LLVM type for struct objc_method_list.
- llvm::Type *MethodListTy;
+ llvm::StructType *MethodListTy;
/// MethodListPtrTy - LLVM type for struct objc_method_list *.
- llvm::Type *MethodListPtrTy;
+ llvm::PointerType *MethodListPtrTy;
/// ExceptionDataTy - LLVM type for struct _objc_exception_data.
- llvm::Type *ExceptionDataTy;
+ llvm::StructType *ExceptionDataTy;
/// ExceptionTryEnterFn - LLVM objc_exception_try_enter function.
llvm::Constant *getExceptionTryEnterFn() {
@@ -608,25 +610,25 @@ public:
llvm::StructType *MethodListnfABITy;
// MethodListnfABIPtrTy - LLVM for struct _method_list_t*
- llvm::Type *MethodListnfABIPtrTy;
+ llvm::PointerType *MethodListnfABIPtrTy;
// ProtocolnfABITy = LLVM for struct _protocol_t
llvm::StructType *ProtocolnfABITy;
// ProtocolnfABIPtrTy = LLVM for struct _protocol_t*
- llvm::Type *ProtocolnfABIPtrTy;
+ llvm::PointerType *ProtocolnfABIPtrTy;
// ProtocolListnfABITy - LLVM for struct _objc_protocol_list
llvm::StructType *ProtocolListnfABITy;
// ProtocolListnfABIPtrTy - LLVM for struct _objc_protocol_list*
- llvm::Type *ProtocolListnfABIPtrTy;
+ llvm::PointerType *ProtocolListnfABIPtrTy;
// ClassnfABITy - LLVM for struct _class_t
llvm::StructType *ClassnfABITy;
// ClassnfABIPtrTy - LLVM for struct _class_t*
- llvm::Type *ClassnfABIPtrTy;
+ llvm::PointerType *ClassnfABIPtrTy;
// IvarnfABITy - LLVM for struct _ivar_t
llvm::StructType *IvarnfABITy;
@@ -635,13 +637,13 @@ public:
llvm::StructType *IvarListnfABITy;
// IvarListnfABIPtrTy = LLVM for struct _ivar_list_t*
- llvm::Type *IvarListnfABIPtrTy;
+ llvm::PointerType *IvarListnfABIPtrTy;
// ClassRonfABITy - LLVM for struct _class_ro_t
llvm::StructType *ClassRonfABITy;
// ImpnfABITy - LLVM for id (*)(id, SEL, ...)
- llvm::Type *ImpnfABITy;
+ llvm::PointerType *ImpnfABITy;
// CategorynfABITy - LLVM for struct _category_t
llvm::StructType *CategorynfABITy;
@@ -670,7 +672,7 @@ public:
llvm::StructType *SuperMessageRefTy;
// SuperMessageRefPtrTy - LLVM for struct _super_message_ref_t*
- llvm::Type *SuperMessageRefPtrTy;
+ llvm::PointerType *SuperMessageRefPtrTy;
llvm::Constant *getMessageSendFixupFn() {
// id objc_msgSend_fixup(id, struct message_ref_t*, ...)
@@ -733,6 +735,13 @@ public:
ObjCNonFragileABITypesHelper(CodeGen::CodeGenModule &cgm);
};
+enum class ObjCLabelType {
+ ClassName,
+ MethodVarName,
+ MethodVarType,
+ PropertyName,
+};
+
class CGObjCCommonMac : public CodeGen::CGObjCRuntime {
public:
class SKIP_SCAN {
@@ -836,7 +845,7 @@ protected:
llvm::DenseMap<Selector, llvm::GlobalVariable*> MethodVarNames;
/// DefinedCategoryNames - list of category names in form Class_Category.
- llvm::SmallSetVector<std::string, 16> DefinedCategoryNames;
+ llvm::SmallSetVector<llvm::CachedHashString, 16> DefinedCategoryNames;
/// MethodVarTypes - uniqued method type signatures. We have to use
/// a StringMap here because have no other unique reference.
@@ -879,6 +888,15 @@ protected:
/// DefinedNonLazyCategories - List of defined "non-lazy" categories.
SmallVector<llvm::GlobalValue*, 16> DefinedNonLazyCategories;
+ /// Cached reference to the class for constant strings. This value has type
+ /// int * but is actually an Obj-C class pointer.
+ llvm::WeakVH ConstantStringClassRef;
+
+ /// \brief The LLVM type corresponding to NSConstantString.
+ llvm::StructType *NSConstantStringType = nullptr;
+
+ llvm::StringMap<llvm::GlobalVariable *> NSConstantStringMap;
+
/// GetNameForMethod - Return a name for the given method.
/// \param[out] NameOut - The return value.
void GetNameForMethod(const ObjCMethodDecl *OMD,
@@ -979,15 +997,6 @@ protected:
ArrayRef<llvm::Constant*> MethodTypes,
const ObjCCommonTypesHelper &ObjCTypes);
- /// PushProtocolProperties - Push protocol's property on the input stack.
- void PushProtocolProperties(
- llvm::SmallPtrSet<const IdentifierInfo*, 16> &PropertySet,
- SmallVectorImpl<llvm::Constant*> &Properties,
- const Decl *Container,
- const ObjCProtocolDecl *Proto,
- const ObjCCommonTypesHelper &ObjCTypes,
- bool IsClassProperty);
-
/// GetProtocolRef - Return a reference to the internal protocol
/// description, creating an empty one if it has not been
/// defined. The return value has type ProtocolPtrTy.
@@ -1009,15 +1018,25 @@ public:
///
/// \param Name - The variable name.
/// \param Init - The variable initializer; this is also used to
- /// define the type of the variable.
+ /// define the type of the variable.
/// \param Section - The section the variable should go into, or empty.
/// \param Align - The alignment for the variable, or 0.
/// \param AddToUsed - Whether the variable should be added to
- /// "llvm.used".
- llvm::GlobalVariable *CreateMetadataVar(Twine Name, llvm::Constant *Init,
+ /// "llvm.used".
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
+ ConstantStructBuilder &Init,
+ StringRef Section, CharUnits Align,
+ bool AddToUsed);
+ llvm::GlobalVariable *CreateMetadataVar(Twine Name,
+ llvm::Constant *Init,
StringRef Section, CharUnits Align,
bool AddToUsed);
+ llvm::GlobalVariable *CreateCStringLiteral(StringRef Name,
+ ObjCLabelType LabelType,
+ bool ForceNonFragileABI = false,
+ bool NullTerminate = true);
+
protected:
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
ReturnValueSlot Return,
@@ -1044,6 +1063,7 @@ public:
}
ConstantAddress GenerateConstantString(const StringLiteral *SL) override;
+ ConstantAddress GenerateConstantNSString(const StringLiteral *SL);
llvm::Function *GenerateMethod(const ObjCMethodDecl *OMD,
const ObjCContainerDecl *CD=nullptr) override;
@@ -1060,6 +1080,9 @@ public:
/// forward references will be filled in with empty bodies if no
/// definition is seen. The return value has type ProtocolPtrTy.
virtual llvm::Constant *GetOrEmitProtocolRef(const ObjCProtocolDecl *PD)=0;
+
+ virtual llvm::Constant *getNSConstantStringClassRef() = 0;
+
llvm::Constant *BuildGCBlockLayout(CodeGen::CodeGenModule &CGM,
const CGBlockInfo &blockInfo) override;
llvm::Constant *BuildRCBlockLayout(CodeGen::CodeGenModule &CGM,
@@ -1069,8 +1092,95 @@ public:
QualType T) override;
};
+namespace {
+
+enum class MethodListType {
+ CategoryInstanceMethods,
+ CategoryClassMethods,
+ InstanceMethods,
+ ClassMethods,
+ ProtocolInstanceMethods,
+ ProtocolClassMethods,
+ OptionalProtocolInstanceMethods,
+ OptionalProtocolClassMethods,
+};
+
+/// A convenience class for splitting the methods of a protocol into
+/// the four interesting groups.
+class ProtocolMethodLists {
+public:
+ enum Kind {
+ RequiredInstanceMethods,
+ RequiredClassMethods,
+ OptionalInstanceMethods,
+ OptionalClassMethods
+ };
+ enum {
+ NumProtocolMethodLists = 4
+ };
+
+ static MethodListType getMethodListKind(Kind kind) {
+ switch (kind) {
+ case RequiredInstanceMethods:
+ return MethodListType::ProtocolInstanceMethods;
+ case RequiredClassMethods:
+ return MethodListType::ProtocolClassMethods;
+ case OptionalInstanceMethods:
+ return MethodListType::OptionalProtocolInstanceMethods;
+ case OptionalClassMethods:
+ return MethodListType::OptionalProtocolClassMethods;
+ }
+ llvm_unreachable("bad kind");
+ }
+
+ SmallVector<const ObjCMethodDecl *, 4> Methods[NumProtocolMethodLists];
+
+ static ProtocolMethodLists get(const ObjCProtocolDecl *PD) {
+ ProtocolMethodLists result;
+
+ for (auto MD : PD->methods()) {
+ size_t index = (2 * size_t(MD->isOptional()))
+ + (size_t(MD->isClassMethod()));
+ result.Methods[index].push_back(MD);
+ }
+
+ return result;
+ }
+
+ template <class Self>
+ SmallVector<llvm::Constant*, 8> emitExtendedTypesArray(Self *self) const {
+ // In both ABIs, the method types list is parallel with the
+ // concatenation of the methods arrays in the following order:
+ // instance methods
+ // class methods
+ // optional instance methods
+ // optional class methods
+ SmallVector<llvm::Constant*, 8> result;
+
+ // Methods is already in the correct order for both ABIs.
+ for (auto &list : Methods) {
+ for (auto MD : list) {
+ result.push_back(self->GetMethodVarType(MD, true));
+ }
+ }
+
+ return result;
+ }
+
+ template <class Self>
+ llvm::Constant *emitMethodList(Self *self, const ObjCProtocolDecl *PD,
+ Kind kind) const {
+ return self->emitMethodList(PD->getObjCRuntimeNameAsString(),
+ getMethodListKind(kind), Methods[kind]);
+ }
+};
+
+} // end anonymous namespace
+
class CGObjCMac : public CGObjCCommonMac {
private:
+ friend ProtocolMethodLists;
+
ObjCTypesHelper ObjCTypes;
/// EmitModuleInfo - Another marker encoding module level
@@ -1091,7 +1201,7 @@ private:
llvm::Constant *EmitClassExtension(const ObjCImplementationDecl *ID,
CharUnits instanceSize,
bool hasMRCWeakIvars,
- bool isClassProperty);
+ bool isMetaclass);
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class.
@@ -1123,30 +1233,18 @@ private:
/// given implementation. The return value has type ClassPtrTy.
llvm::Constant *EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- ArrayRef<llvm::Constant*> Methods);
+ ArrayRef<const ObjCMethodDecl *> Methods);
- llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+ void emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD);
- llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
+ void emitMethodDescriptionConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD);
/// EmitMethodList - Emit the method list for the given
/// implementation. The return value has type MethodListPtrTy.
- llvm::Constant *EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods);
-
- /// EmitMethodDescList - Emit a method description list for a list of
- /// method declarations.
- /// - TypeName: The name for the type containing the methods.
- /// - IsProtocol: True iff these methods are for a protocol.
- /// - ClassMethds: True iff these are class methods.
- /// - Required: When true, only "required" methods are
- /// listed. Similarly, when false only "optional" methods are
- /// listed. For classes this should always be true.
- /// - begin, end: The method list to output.
- ///
- /// The return value has type MethodDescriptionListPtrTy.
- llvm::Constant *EmitMethodDescList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods);
+ llvm::Constant *emitMethodList(Twine Name, MethodListType MLT,
+ ArrayRef<const ObjCMethodDecl *> Methods);
/// GetOrEmitProtocol - Get the protocol object for the given
/// declaration, emitting it if necessary. The return value has type
@@ -1165,9 +1263,7 @@ private:
/// ProtocolExtensionPtrTy.
llvm::Constant *
EmitProtocolExtension(const ObjCProtocolDecl *PD,
- ArrayRef<llvm::Constant*> OptInstanceMethods,
- ArrayRef<llvm::Constant*> OptClassMethods,
- ArrayRef<llvm::Constant*> MethodTypesExt);
+ const ProtocolMethodLists &methodLists);
/// EmitProtocolList - Generate the list of referenced
/// protocols. The return value has type ProtocolListPtrTy.
@@ -1183,6 +1279,8 @@ private:
public:
CGObjCMac(CodeGen::CodeGenModule &cgm);
+ llvm::Constant *getNSConstantStringClassRef() override;
+
llvm::Function *ModuleInitFunction() override;
CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
@@ -1262,20 +1360,14 @@ public:
llvm::Value *EmitIvarOffset(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar) override;
-
- /// GetClassGlobal - Return the global variable for the Objective-C
- /// class of the given name.
- llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) override {
- llvm_unreachable("CGObjCMac::GetClassGlobal");
- }
};
class CGObjCNonFragileABIMac : public CGObjCCommonMac {
private:
+ friend ProtocolMethodLists;
ObjCNonFragileABITypesHelper ObjCTypes;
llvm::GlobalVariable* ObjCEmptyCacheVar;
- llvm::GlobalVariable* ObjCEmptyVtableVar;
+ llvm::Constant* ObjCEmptyVtableVar;
/// SuperClassReferences - uniqued super class references.
llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*> SuperClassReferences;
@@ -1310,21 +1402,22 @@ private:
unsigned InstanceStart,
unsigned InstanceSize,
const ObjCImplementationDecl *ID);
- llvm::GlobalVariable * BuildClassMetaData(const std::string &ClassName,
- llvm::Constant *IsAGV,
- llvm::Constant *SuperClassGV,
- llvm::Constant *ClassRoGV,
- bool HiddenVisibility,
- bool Weak);
-
- llvm::Constant *GetMethodConstant(const ObjCMethodDecl *MD);
+ llvm::GlobalVariable *BuildClassObject(const ObjCInterfaceDecl *CI,
+ bool isMetaclass,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility);
+
+ void emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD,
+ bool forProtocol);
+
+ /// Emit the method list for the given implementation. The return value
+ /// has type MethodListnfABITy.
+ llvm::Constant *emitMethodList(Twine Name, MethodListType MLT,
+ ArrayRef<const ObjCMethodDecl *> Methods);
- llvm::Constant *GetMethodDescriptionConstant(const ObjCMethodDecl *MD);
-
- /// EmitMethodList - Emit the method list for the given
- /// implementation. The return value has type MethodListnfABITy.
- llvm::Constant *EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods);
/// EmitIvarList - Emit the ivar list for the given
/// implementation. If ForClass is true the list of class ivars
/// (i.e. metaclass ivars) is emitted, otherwise the list of
@@ -1365,8 +1458,12 @@ private:
/// GetClassGlobal - Return the global variable for the Objective-C
/// class of the given name.
- llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) override;
+ llvm::Constant *GetClassGlobal(StringRef Name,
+ ForDefinition_t IsForDefinition,
+ bool Weak = false, bool DLLImport = false);
+ llvm::Constant *GetClassGlobal(const ObjCInterfaceDecl *ID,
+ bool isMetaclass,
+ ForDefinition_t isForDefinition);
/// EmitClassRef - Return a Value*, of type ObjCTypes.ClassPtrTy,
/// for the given class reference.
@@ -1374,7 +1471,7 @@ private:
const ObjCInterfaceDecl *ID);
llvm::Value *EmitClassRefFromId(CodeGenFunction &CGF,
- IdentifierInfo *II, bool Weak,
+ IdentifierInfo *II,
const ObjCInterfaceDecl *ID);
llvm::Value *EmitNSAutoreleasePoolClassRef(CodeGenFunction &CGF) override;
@@ -1404,7 +1501,7 @@ private:
/// GetInterfaceEHType - Get the cached ehtype for the given Objective-C
/// interface. The return value has type EHTypePtrTy.
llvm::Constant *GetInterfaceEHType(const ObjCInterfaceDecl *ID,
- bool ForDefinition);
+ ForDefinition_t IsForDefinition);
StringRef getMetaclassSymbolPrefix() const { return "OBJC_METACLASS_$_"; }
@@ -1451,7 +1548,9 @@ private:
public:
CGObjCNonFragileABIMac(CodeGen::CodeGenModule &cgm);
- // FIXME. All stubs for now!
+
+ llvm::Constant *getNSConstantStringClassRef() override;
+
llvm::Function *ModuleInitFunction() override;
CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
@@ -1761,11 +1860,115 @@ llvm::Constant *CGObjCMac::GetEHType(QualType T) {
};
*/
-ConstantAddress CGObjCCommonMac::GenerateConstantString(
- const StringLiteral *SL) {
- return (CGM.getLangOpts().NoConstantCFStrings == 0 ?
- CGM.GetAddrOfConstantCFString(SL) :
- CGM.GetAddrOfConstantString(SL));
+ConstantAddress
+CGObjCCommonMac::GenerateConstantString(const StringLiteral *SL) {
+ return (!CGM.getLangOpts().NoConstantCFStrings
+ ? CGM.GetAddrOfConstantCFString(SL)
+ : GenerateConstantNSString(SL));
+}
+
+static llvm::StringMapEntry<llvm::GlobalVariable *> &
+GetConstantStringEntry(llvm::StringMap<llvm::GlobalVariable *> &Map,
+ const StringLiteral *Literal, unsigned &StringLength) {
+ StringRef String = Literal->getString();
+ StringLength = String.size();
+ return *Map.insert(std::make_pair(String, nullptr)).first;
+}
+
+llvm::Constant *CGObjCMac::getNSConstantStringClassRef() {
+ if (llvm::Value *V = ConstantStringClassRef)
+ return cast<llvm::Constant>(V);
+
+ auto &StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+ std::string str =
+ StringClass.empty() ? "_NSConstantStringClassReference"
+ : "_" + StringClass + "ClassReference";
+
+ llvm::Type *PTy = llvm::ArrayType::get(CGM.IntTy, 0);
+ auto GV = CGM.CreateRuntimeVariable(PTy, str);
+ auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
+ ConstantStringClassRef = V;
+ return V;
+}
+
+llvm::Constant *CGObjCNonFragileABIMac::getNSConstantStringClassRef() {
+ if (llvm::Value *V = ConstantStringClassRef)
+ return cast<llvm::Constant>(V);
+
+ auto &StringClass = CGM.getLangOpts().ObjCConstantStringClass;
+ std::string str =
+ StringClass.empty() ? "OBJC_CLASS_$_NSConstantString"
+ : "OBJC_CLASS_$_" + StringClass;
+ auto GV = GetClassGlobal(str, NotForDefinition);
+
+ // Make sure the result is of the correct type.
+ auto V = llvm::ConstantExpr::getBitCast(GV, CGM.IntTy->getPointerTo());
+
+ ConstantStringClassRef = V;
+ return V;
+}
+
+ConstantAddress
+CGObjCCommonMac::GenerateConstantNSString(const StringLiteral *Literal) {
+ unsigned StringLength = 0;
+ llvm::StringMapEntry<llvm::GlobalVariable *> &Entry =
+ GetConstantStringEntry(NSConstantStringMap, Literal, StringLength);
+
+ if (auto *C = Entry.second)
+ return ConstantAddress(C, CharUnits::fromQuantity(C->getAlignment()));
+
+ // If we don't already have it, get _NSConstantStringClassReference.
+ llvm::Constant *Class = getNSConstantStringClassRef();
+
+ // If we don't already have it, construct the type for a constant NSString.
+ if (!NSConstantStringType) {
+ NSConstantStringType =
+ llvm::StructType::create({
+ CGM.Int32Ty->getPointerTo(),
+ CGM.Int8PtrTy,
+ CGM.IntTy
+ }, "struct.__builtin_NSString");
+ }
+
+ ConstantInitBuilder Builder(CGM);
+ auto Fields = Builder.beginStruct(NSConstantStringType);
+
+ // Class pointer.
+ Fields.add(Class);
+
+ // String pointer.
+ llvm::Constant *C =
+ llvm::ConstantDataArray::getString(VMContext, Entry.first());
+
+ llvm::GlobalValue::LinkageTypes Linkage = llvm::GlobalValue::PrivateLinkage;
+ bool isConstant = !CGM.getLangOpts().WritableStrings;
+
+ auto *GV = new llvm::GlobalVariable(CGM.getModule(), C->getType(), isConstant,
+ Linkage, C, ".str");
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ // Don't enforce the target's minimum global alignment, since the only use
+ // of the string is via this class initializer.
+ GV->setAlignment(1);
+ Fields.addBitCast(GV, CGM.Int8PtrTy);
+
+ // String length.
+ Fields.addInt(CGM.IntTy, StringLength);
+
+ // The struct.
+ CharUnits Alignment = CGM.getPointerAlign();
+ GV = Fields.finishAndCreateGlobal("_unnamed_nsstring_", Alignment,
+ /*constant*/ true,
+ llvm::GlobalVariable::PrivateLinkage);
+ const char *NSStringSection = "__OBJC,__cstring_object,regular,no_dead_strip";
+ const char *NSStringNonFragileABISection =
+ "__DATA,__objc_stringobj,regular,no_dead_strip";
+ // FIXME. Fix section.
+ GV->setSection(CGM.getLangOpts().ObjCRuntime.isNonFragile()
+ ? NSStringNonFragileABISection
+ : NSStringSection);
+ Entry.second = GV;
+
+ return ConstantAddress(GV, Alignment);
}
enum {
@@ -1953,8 +2156,9 @@ CGObjCCommonMac::EmitMessageSend(CodeGen::CodeGenFunction &CGF,
llvm::Instruction *CallSite;
Fn = llvm::ConstantExpr::getBitCast(Fn, MSI.MessengerType);
- RValue rvalue = CGF.EmitCall(MSI.CallInfo, Fn, Return, ActualArgs,
- CGCalleeInfo(), &CallSite);
+ CGCallee Callee = CGCallee::forDirect(Fn);
+ RValue rvalue = CGF.EmitCall(MSI.CallInfo, Callee, Return, ActualArgs,
+ &CallSite);
// Mark the call as noreturn if the method is marked noreturn and the
// receiver cannot be null.
@@ -2576,10 +2780,9 @@ llvm::Constant *CGObjCCommonMac::getBitmapBlockLayout(bool ComputeByrefLayout) {
}
}
- llvm::GlobalVariable *Entry = CreateMetadataVar(
- "OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, BitMap, false),
- "__TEXT,__objc_classname,cstring_literals", CharUnits::One(), true);
+ auto *Entry = CreateCStringLiteral(BitMap, ObjCLabelType::ClassName,
+ /*ForceNonFragileABI=*/true,
+ /*NullTerminate=*/false);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -2730,66 +2933,29 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocol(const ObjCProtocolDecl *PD) {
LazySymbols.insert(&CGM.getContext().Idents.get("Protocol"));
// Construct method lists.
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
- std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
- std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
- for (const auto *MD : PD->instance_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptInstanceMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- InstanceMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
-
- for (const auto *MD : PD->class_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptClassMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- ClassMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
-
- MethodTypesExt.insert(MethodTypesExt.end(),
- OptMethodTypesExt.begin(), OptMethodTypesExt.end());
-
- llvm::Constant *Values[] = {
- EmitProtocolExtension(PD, OptInstanceMethods, OptClassMethods,
- MethodTypesExt),
- GetClassName(PD->getObjCRuntimeNameAsString()),
- EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
- PD->protocol_begin(), PD->protocol_end()),
- EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_" + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods),
- EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods)};
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
- Values);
+ auto methodLists = ProtocolMethodLists::get(PD);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolTy);
+ values.add(EmitProtocolExtension(PD, methodLists));
+ values.add(GetClassName(PD->getObjCRuntimeNameAsString()));
+ values.add(EmitProtocolList("OBJC_PROTOCOL_REFS_" + PD->getName(),
+ PD->protocol_begin(), PD->protocol_end()));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredInstanceMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredClassMethods));
if (Entry) {
// Already created, update the initializer.
assert(Entry->hasPrivateLinkage());
- Entry->setInitializer(Init);
+ values.finishAndSetAsInitializer(Entry);
} else {
- Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolTy,
- false, llvm::GlobalValue::PrivateLinkage,
- Init, "OBJC_PROTOCOL_" + PD->getName());
+ Entry = values.finishAndCreateGlobal("OBJC_PROTOCOL_" + PD->getName(),
+ CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
Entry->setSection("__OBJC,__protocol,regular,no_dead_strip");
- // FIXME: Is this necessary? Why only for protocol?
- Entry->setAlignment(4);
Protocols[PD->getIdentifier()] = Entry;
}
@@ -2828,37 +2994,49 @@ llvm::Constant *CGObjCMac::GetOrEmitProtocolRef(const ObjCProtocolDecl *PD) {
*/
llvm::Constant *
CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
- ArrayRef<llvm::Constant*> OptInstanceMethods,
- ArrayRef<llvm::Constant*> OptClassMethods,
- ArrayRef<llvm::Constant*> MethodTypesExt) {
- uint64_t Size =
- CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
- llvm::Constant *Values[] = {
- llvm::ConstantInt::get(ObjCTypes.IntTy, Size),
- EmitMethodDescList("OBJC_PROTOCOL_INSTANCE_METHODS_OPT_" + PD->getName(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- OptInstanceMethods),
- EmitMethodDescList("OBJC_PROTOCOL_CLASS_METHODS_OPT_" + PD->getName(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- OptClassMethods),
- EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
- ObjCTypes, false),
- EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
- MethodTypesExt, ObjCTypes),
- EmitPropertyList("OBJC_$_CLASS_PROP_PROTO_LIST_" + PD->getName(), nullptr,
- PD, ObjCTypes, true)};
+ const ProtocolMethodLists &methodLists) {
+ auto optInstanceMethods =
+ methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalInstanceMethods);
+ auto optClassMethods =
+ methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalClassMethods);
+
+ auto extendedMethodTypes =
+ EmitProtocolMethodTypes("OBJC_PROTOCOL_METHOD_TYPES_" + PD->getName(),
+ methodLists.emitExtendedTypesArray(this),
+ ObjCTypes);
+
+ auto instanceProperties =
+ EmitPropertyList("OBJC_$_PROP_PROTO_LIST_" + PD->getName(), nullptr, PD,
+ ObjCTypes, false);
+ auto classProperties =
+ EmitPropertyList("OBJC_$_CLASS_PROP_PROTO_LIST_" + PD->getName(), nullptr,
+ PD, ObjCTypes, true);
// Return null if no extension bits are used.
- if (Values[1]->isNullValue() && Values[2]->isNullValue() &&
- Values[3]->isNullValue() && Values[4]->isNullValue() &&
- Values[5]->isNullValue())
+ if (optInstanceMethods->isNullValue() &&
+ optClassMethods->isNullValue() &&
+ extendedMethodTypes->isNullValue() &&
+ instanceProperties->isNullValue() &&
+ classProperties->isNullValue()) {
return llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
+ }
- llvm::Constant *Init =
- llvm::ConstantStruct::get(ObjCTypes.ProtocolExtensionTy, Values);
+ uint64_t size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolExtensionTy);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolExtensionTy);
+ values.addInt(ObjCTypes.IntTy, size);
+ values.add(optInstanceMethods);
+ values.add(optClassMethods);
+ values.add(instanceProperties);
+ values.add(extendedMethodTypes);
+ values.add(classProperties);
// No special section, but goes in llvm.used
- return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), Init,
+ return CreateMetadataVar("\01l_OBJC_PROTOCOLEXT_" + PD->getName(), values,
StringRef(), CGM.getPointerAlign(), true);
}
@@ -2870,59 +3048,57 @@ CGObjCMac::EmitProtocolExtension(const ObjCProtocolDecl *PD,
};
*/
llvm::Constant *
-CGObjCMac::EmitProtocolList(Twine Name,
+CGObjCMac::EmitProtocolList(Twine name,
ObjCProtocolDecl::protocol_iterator begin,
ObjCProtocolDecl::protocol_iterator end) {
- SmallVector<llvm::Constant *, 16> ProtocolRefs;
-
- for (; begin != end; ++begin)
- ProtocolRefs.push_back(GetProtocolRef(*begin));
-
// Just return null for empty protocol lists
- if (ProtocolRefs.empty())
+ if (begin == end)
return llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
- // This list is null terminated.
- ProtocolRefs.push_back(llvm::Constant::getNullValue(ObjCTypes.ProtocolPtrTy));
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
- llvm::Constant *Values[3];
// This field is only used by the runtime.
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.LongTy,
- ProtocolRefs.size() - 1);
- Values[2] =
- llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolPtrTy,
- ProtocolRefs.size()),
- ProtocolRefs);
-
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ values.addNullPointer(ObjCTypes.ProtocolListPtrTy);
+
+ // Reserve a slot for the count.
+ auto countSlot = values.addPlaceholder();
+
+ auto refsArray = values.beginArray(ObjCTypes.ProtocolPtrTy);
+ for (; begin != end; ++begin) {
+ refsArray.add(GetProtocolRef(*begin));
+ }
+ auto count = refsArray.size();
+
+ // This list is null terminated.
+ refsArray.addNullPointer(ObjCTypes.ProtocolPtrTy);
+
+ refsArray.finishAndAddTo(values);
+ values.fillPlaceholderWithInt(countSlot, ObjCTypes.LongTy, count);
+
+ StringRef section;
+ if (CGM.getTriple().isOSBinFormatMachO())
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+
llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init, "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- CGM.getPointerAlign(), false);
+ CreateMetadataVar(name, values, section, CGM.getPointerAlign(), false);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListPtrTy);
}
-void CGObjCCommonMac::
+static void
PushProtocolProperties(llvm::SmallPtrSet<const IdentifierInfo*,16> &PropertySet,
- SmallVectorImpl<llvm::Constant *> &Properties,
- const Decl *Container,
+ SmallVectorImpl<const ObjCPropertyDecl *> &Properties,
const ObjCProtocolDecl *Proto,
- const ObjCCommonTypesHelper &ObjCTypes,
bool IsClassProperty) {
for (const auto *P : Proto->protocols())
- PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes,
- IsClassProperty);
+ PushProtocolProperties(PropertySet, Properties, P, IsClassProperty);
for (const auto *PD : Proto->properties()) {
if (IsClassProperty != PD->isClassProperty())
continue;
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
- llvm::Constant *Prop[] = {
- GetPropertyName(PD->getIdentifier()),
- GetPropertyTypeString(PD, Container)
- };
- Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
+ Properties.push_back(PD);
}
}
@@ -2952,21 +3128,16 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
}
- SmallVector<llvm::Constant *, 16> Properties;
+ SmallVector<const ObjCPropertyDecl *, 16> Properties;
llvm::SmallPtrSet<const IdentifierInfo*, 16> PropertySet;
- auto AddProperty = [&](const ObjCPropertyDecl *PD) {
- llvm::Constant *Prop[] = {GetPropertyName(PD->getIdentifier()),
- GetPropertyTypeString(PD, Container)};
- Properties.push_back(llvm::ConstantStruct::get(ObjCTypes.PropertyTy, Prop));
- };
if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD))
for (const ObjCCategoryDecl *ClassExt : OID->known_extensions())
for (auto *PD : ClassExt->properties()) {
if (IsClassProperty != PD->isClassProperty())
continue;
PropertySet.insert(PD->getIdentifier());
- AddProperty(PD);
+ Properties.push_back(PD);
}
for (const auto *PD : OCD->properties()) {
@@ -2976,40 +3147,45 @@ llvm::Constant *CGObjCCommonMac::EmitPropertyList(Twine Name,
// class extension.
if (!PropertySet.insert(PD->getIdentifier()).second)
continue;
- AddProperty(PD);
+ Properties.push_back(PD);
}
if (const ObjCInterfaceDecl *OID = dyn_cast<ObjCInterfaceDecl>(OCD)) {
for (const auto *P : OID->all_referenced_protocols())
- PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes,
- IsClassProperty);
+ PushProtocolProperties(PropertySet, Properties, P, IsClassProperty);
}
else if (const ObjCCategoryDecl *CD = dyn_cast<ObjCCategoryDecl>(OCD)) {
for (const auto *P : CD->protocols())
- PushProtocolProperties(PropertySet, Properties, Container, P, ObjCTypes,
- IsClassProperty);
+ PushProtocolProperties(PropertySet, Properties, P, IsClassProperty);
}
// Return null for empty list.
if (Properties.empty())
return llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- unsigned PropertySize =
+ unsigned propertySize =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.PropertyTy);
- llvm::Constant *Values[3];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, PropertySize);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Properties.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.PropertyTy,
- Properties.size());
- Values[2] = llvm::ConstantArray::get(AT, Properties);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addInt(ObjCTypes.IntTy, propertySize);
+ values.addInt(ObjCTypes.IntTy, Properties.size());
+ auto propertiesArray = values.beginArray(ObjCTypes.PropertyTy);
+ for (auto PD : Properties) {
+ auto property = propertiesArray.beginStruct(ObjCTypes.PropertyTy);
+ property.add(GetPropertyName(PD->getIdentifier()));
+ property.add(GetPropertyTypeString(PD, Container));
+ property.finishAndAddTo(propertiesArray);
+ }
+ propertiesArray.finishAndAddTo(values);
+
+ StringRef Section;
+ if (CGM.getTriple().isOSBinFormatMachO())
+ Section = (ObjCABI == 2) ? "__DATA, __objc_const"
+ : "__OBJC,__property,regular,no_dead_strip";
llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init,
- (ObjCABI == 2) ? "__DATA, __objc_const" :
- "__OBJC,__property,regular,no_dead_strip",
- CGM.getPointerAlign(),
- true);
+ CreateMetadataVar(Name, values, Section, CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.PropertyListPtrTy);
}
@@ -3025,50 +3201,13 @@ CGObjCCommonMac::EmitProtocolMethodTypes(Twine Name,
MethodTypes.size());
llvm::Constant *Init = llvm::ConstantArray::get(AT, MethodTypes);
- llvm::GlobalVariable *GV = CreateMetadataVar(
- Name, Init, (ObjCABI == 2) ? "__DATA, __objc_const" : StringRef(),
- CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
-}
-
-/*
- struct objc_method_description_list {
- int count;
- struct objc_method_description list[];
- };
-*/
-llvm::Constant *
-CGObjCMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- llvm::Constant *Desc[] = {
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy),
- GetMethodVarType(MD)
- };
- if (!Desc[1])
- return nullptr;
-
- return llvm::ConstantStruct::get(ObjCTypes.MethodDescriptionTy,
- Desc);
-}
-
-llvm::Constant *
-CGObjCMac::EmitMethodDescList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods) {
- // Return null for empty list.
- if (Methods.empty())
- return llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
-
- llvm::Constant *Values[2];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodDescriptionTy,
- Methods.size());
- Values[1] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ StringRef Section;
+ if (CGM.getTriple().isOSBinFormatMachO() && ObjCABI == 2)
+ Section = "__DATA, __objc_const";
llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
- return llvm::ConstantExpr::getBitCast(GV,
- ObjCTypes.MethodDescriptionListPtrTy);
+ CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.Int8PtrPtrTy);
}
/*
@@ -3098,54 +3237,53 @@ void CGObjCMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
llvm::raw_svector_ostream(ExtName) << Interface->getName() << '_'
<< OCD->getName();
- SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
- for (const auto *I : OCD->instance_methods())
- // Instance methods should always be defined.
- InstanceMethods.push_back(GetMethodConstant(I));
+ ConstantInitBuilder Builder(CGM);
+ auto Values = Builder.beginStruct(ObjCTypes.CategoryTy);
- for (const auto *I : OCD->class_methods())
- // Class methods should always be defined.
- ClassMethods.push_back(GetMethodConstant(I));
+ enum {
+ InstanceMethods,
+ ClassMethods,
+ NumMethodLists
+ };
+ SmallVector<const ObjCMethodDecl *, 16> Methods[NumMethodLists];
+ for (const auto *MD : OCD->methods()) {
+ Methods[unsigned(MD->isClassMethod())].push_back(MD);
+ }
- llvm::Constant *Values[8];
- Values[0] = GetClassName(OCD->getName());
- Values[1] = GetClassName(Interface->getObjCRuntimeNameAsString());
+ Values.add(GetClassName(OCD->getName()));
+ Values.add(GetClassName(Interface->getObjCRuntimeNameAsString()));
LazySymbols.insert(Interface->getIdentifier());
- Values[2] = EmitMethodList("OBJC_CATEGORY_INSTANCE_METHODS_" + ExtName.str(),
- "__OBJC,__cat_inst_meth,regular,no_dead_strip",
- InstanceMethods);
- Values[3] = EmitMethodList("OBJC_CATEGORY_CLASS_METHODS_" + ExtName.str(),
- "__OBJC,__cat_cls_meth,regular,no_dead_strip",
- ClassMethods);
+
+ Values.add(emitMethodList(ExtName, MethodListType::CategoryInstanceMethods,
+ Methods[InstanceMethods]));
+ Values.add(emitMethodList(ExtName, MethodListType::CategoryClassMethods,
+ Methods[ClassMethods]));
if (Category) {
- Values[4] =
+ Values.add(
EmitProtocolList("OBJC_CATEGORY_PROTOCOLS_" + ExtName.str(),
- Category->protocol_begin(), Category->protocol_end());
+ Category->protocol_begin(), Category->protocol_end()));
} else {
- Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
+ Values.addNullPointer(ObjCTypes.ProtocolListPtrTy);
}
- Values[5] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ Values.addInt(ObjCTypes.IntTy, Size);
// If there is no category @interface then there can be no properties.
if (Category) {
- Values[6] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, false);
- Values[7] = EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, true);
+ Values.add(EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, false));
+ Values.add(EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, true));
} else {
- Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- Values[7] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ Values.addNullPointer(ObjCTypes.PropertyListPtrTy);
+ Values.addNullPointer(ObjCTypes.PropertyListPtrTy);
}
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.CategoryTy,
- Values);
-
llvm::GlobalVariable *GV =
- CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Init,
+ CreateMetadataVar("OBJC_CATEGORY_" + ExtName.str(), Values,
"__OBJC,__category,regular,no_dead_strip",
CGM.getPointerAlign(), true);
DefinedCategories.push_back(GV);
- DefinedCategoryNames.insert(ExtName.str());
+ DefinedCategoryNames.insert(llvm::CachedHashString(ExtName));
// method definition entries must be clear for next implementation.
MethodDefinitions.clear();
}
@@ -3282,57 +3420,56 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= FragileABI_Class_Hidden;
- SmallVector<llvm::Constant *, 16> InstanceMethods, ClassMethods;
- for (const auto *I : ID->instance_methods())
- // Instance methods should always be defined.
- InstanceMethods.push_back(GetMethodConstant(I));
-
- for (const auto *I : ID->class_methods())
- // Class methods should always be defined.
- ClassMethods.push_back(GetMethodConstant(I));
+ enum {
+ InstanceMethods,
+ ClassMethods,
+ NumMethodLists
+ };
+ SmallVector<const ObjCMethodDecl *, 16> Methods[NumMethodLists];
+ for (const auto *MD : ID->methods()) {
+ Methods[unsigned(MD->isClassMethod())].push_back(MD);
+ }
for (const auto *PID : ID->property_impls()) {
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize) {
ObjCPropertyDecl *PD = PID->getPropertyDecl();
if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- InstanceMethods.push_back(C);
+ if (GetMethodDefinition(MD))
+ Methods[InstanceMethods].push_back(MD);
if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- InstanceMethods.push_back(C);
+ if (GetMethodDefinition(MD))
+ Methods[InstanceMethods].push_back(MD);
}
}
- llvm::Constant *Values[12];
- Values[ 0] = EmitMetaClass(ID, Protocols, ClassMethods);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassTy);
+ values.add(EmitMetaClass(ID, Protocols, Methods[ClassMethods]));
if (ObjCInterfaceDecl *Super = Interface->getSuperClass()) {
// Record a reference to the super class.
LazySymbols.insert(Super->getIdentifier());
- Values[ 1] =
- llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.addBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
} else {
- Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ values.addNullPointer(ObjCTypes.ClassPtrTy);
}
- Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString());
+ values.add(GetClassName(ID->getObjCRuntimeNameAsString()));
// Version is always 0.
- Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
- Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
- Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size.getQuantity());
- Values[ 6] = EmitIvarList(ID, false);
- Values[7] = EmitMethodList("OBJC_INSTANCE_METHODS_" + ID->getName(),
- "__OBJC,__inst_meth,regular,no_dead_strip",
- InstanceMethods);
+ values.addInt(ObjCTypes.LongTy, 0);
+ values.addInt(ObjCTypes.LongTy, Flags);
+ values.addInt(ObjCTypes.LongTy, Size.getQuantity());
+ values.add(EmitIvarList(ID, false));
+ values.add(emitMethodList(ID->getName(), MethodListType::InstanceMethods,
+ Methods[InstanceMethods]));
// cache is always NULL.
- Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
- Values[ 9] = Protocols;
- Values[10] = BuildStrongIvarLayout(ID, CharUnits::Zero(), Size);
- Values[11] = EmitClassExtension(ID, Size, hasMRCWeak,
- false/*isClassProperty*/);
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
- Values);
+ values.addNullPointer(ObjCTypes.CachePtrTy);
+ values.add(Protocols);
+ values.add(BuildStrongIvarLayout(ID, CharUnits::Zero(), Size));
+ values.add(EmitClassExtension(ID, Size, hasMRCWeak,
+ /*isMetaclass*/ false));
+
std::string Name("OBJC_CLASS_");
Name += ClassName;
const char *Section = "__OBJC,__class,regular,no_dead_strip";
@@ -3341,12 +3478,12 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
if (GV) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
- GV->setInitializer(Init);
+ values.finishAndSetAsInitializer(GV);
GV->setSection(Section);
GV->setAlignment(CGM.getPointerAlign().getQuantity());
CGM.addCompilerUsedGlobal(GV);
} else
- GV = CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ GV = CreateMetadataVar(Name, values, Section, CGM.getPointerAlign(), true);
DefinedClasses.push_back(GV);
ImplementedClasses.push_back(Interface);
// method definition entries must be clear for next implementation.
@@ -3355,50 +3492,46 @@ void CGObjCMac::GenerateClass(const ObjCImplementationDecl *ID) {
llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
llvm::Constant *Protocols,
- ArrayRef<llvm::Constant*> Methods) {
+ ArrayRef<const ObjCMethodDecl*> Methods) {
unsigned Flags = FragileABI_Class_Meta;
unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassTy);
if (ID->getClassInterface()->getVisibility() == HiddenVisibility)
Flags |= FragileABI_Class_Hidden;
- llvm::Constant *Values[12];
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassTy);
// The isa for the metaclass is the root of the hierarchy.
const ObjCInterfaceDecl *Root = ID->getClassInterface();
while (const ObjCInterfaceDecl *Super = Root->getSuperClass())
Root = Super;
- Values[ 0] =
- llvm::ConstantExpr::getBitCast(GetClassName(Root->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.addBitCast(GetClassName(Root->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
// The super class for the metaclass is emitted as the name of the
// super class. The runtime fixes this up to point to the
// *metaclass* for the super class.
if (ObjCInterfaceDecl *Super = ID->getClassInterface()->getSuperClass()) {
- Values[ 1] =
- llvm::ConstantExpr::getBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
- ObjCTypes.ClassPtrTy);
+ values.addBitCast(GetClassName(Super->getObjCRuntimeNameAsString()),
+ ObjCTypes.ClassPtrTy);
} else {
- Values[ 1] = llvm::Constant::getNullValue(ObjCTypes.ClassPtrTy);
+ values.addNullPointer(ObjCTypes.ClassPtrTy);
}
- Values[ 2] = GetClassName(ID->getObjCRuntimeNameAsString());
+ values.add(GetClassName(ID->getObjCRuntimeNameAsString()));
// Version is always 0.
- Values[ 3] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
- Values[ 4] = llvm::ConstantInt::get(ObjCTypes.LongTy, Flags);
- Values[ 5] = llvm::ConstantInt::get(ObjCTypes.LongTy, Size);
- Values[ 6] = EmitIvarList(ID, true);
- Values[7] =
- EmitMethodList("OBJC_CLASS_METHODS_" + ID->getNameAsString(),
- "__OBJC,__cls_meth,regular,no_dead_strip", Methods);
+ values.addInt(ObjCTypes.LongTy, 0);
+ values.addInt(ObjCTypes.LongTy, Flags);
+ values.addInt(ObjCTypes.LongTy, Size);
+ values.add(EmitIvarList(ID, true));
+ values.add(emitMethodList(ID->getName(), MethodListType::ClassMethods,
+ Methods));
// cache is always NULL.
- Values[ 8] = llvm::Constant::getNullValue(ObjCTypes.CachePtrTy);
- Values[ 9] = Protocols;
+ values.addNullPointer(ObjCTypes.CachePtrTy);
+ values.add(Protocols);
// ivar_layout for metaclass is always NULL.
- Values[10] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ values.addNullPointer(ObjCTypes.Int8PtrTy);
// The class extension is used to store class properties for metaclasses.
- Values[11] = EmitClassExtension(ID, CharUnits::Zero(), false/*hasMRCWeak*/,
- true/*isClassProperty*/);
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassTy,
- Values);
+ values.add(EmitClassExtension(ID, CharUnits::Zero(), false/*hasMRCWeak*/,
+ /*isMetaclass*/true));
std::string Name("OBJC_METACLASS_");
Name += ID->getName();
@@ -3408,14 +3541,13 @@ llvm::Constant *CGObjCMac::EmitMetaClass(const ObjCImplementationDecl *ID,
if (GV) {
assert(GV->getType()->getElementType() == ObjCTypes.ClassTy &&
"Forward metaclass reference has incorrect type.");
- GV->setInitializer(Init);
+ values.finishAndSetAsInitializer(GV);
} else {
- GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassTy, false,
- llvm::GlobalValue::PrivateLinkage,
- Init, Name);
+ GV = values.finishAndCreateGlobal(Name, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
}
GV->setSection("__OBJC,__meta_class,regular,no_dead_strip");
- GV->setAlignment(4);
CGM.addCompilerUsedGlobal(GV);
return GV;
@@ -3471,32 +3603,38 @@ llvm::Value *CGObjCMac::EmitSuperClassRef(const ObjCInterfaceDecl *ID) {
llvm::Constant *
CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
CharUnits InstanceSize, bool hasMRCWeakIvars,
- bool isClassProperty) {
- uint64_t Size =
- CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+ bool isMetaclass) {
+ // Weak ivar layout.
+ llvm::Constant *layout;
+ if (isMetaclass) {
+ layout = llvm::ConstantPointerNull::get(CGM.Int8PtrTy);
+ } else {
+ layout = BuildWeakIvarLayout(ID, CharUnits::Zero(), InstanceSize,
+ hasMRCWeakIvars);
+ }
- llvm::Constant *Values[3];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- if (isClassProperty) {
- llvm::Type *PtrTy = CGM.Int8PtrTy;
- Values[1] = llvm::Constant::getNullValue(PtrTy);
- } else
- Values[1] = BuildWeakIvarLayout(ID, CharUnits::Zero(), InstanceSize,
- hasMRCWeakIvars);
- if (isClassProperty)
- Values[2] = EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ID->getName(),
- ID, ID->getClassInterface(), ObjCTypes, true);
- else
- Values[2] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ID->getName(),
- ID, ID->getClassInterface(), ObjCTypes, false);
+ // Properties.
+ llvm::Constant *propertyList =
+ EmitPropertyList((isMetaclass ? Twine("\01l_OBJC_$_CLASS_PROP_LIST_")
+ : Twine("\01l_OBJC_$_PROP_LIST_"))
+ + ID->getName(),
+ ID, ID->getClassInterface(), ObjCTypes, isMetaclass);
// Return null if no extension bits are used.
- if ((!Values[1] || Values[1]->isNullValue()) && Values[2]->isNullValue())
+ if (layout->isNullValue() && propertyList->isNullValue()) {
return llvm::Constant::getNullValue(ObjCTypes.ClassExtensionPtrTy);
+ }
- llvm::Constant *Init =
- llvm::ConstantStruct::get(ObjCTypes.ClassExtensionTy, Values);
- return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), Init,
+ uint64_t size =
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ClassExtensionTy);
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassExtensionTy);
+ values.addInt(ObjCTypes.IntTy, size);
+ values.add(layout);
+ values.add(propertyList);
+
+ return CreateMetadataVar("OBJC_CLASSEXT_" + ID->getName(), values,
"__OBJC,__class_ext,regular,no_dead_strip",
CGM.getPointerAlign(), true);
}
@@ -3515,8 +3653,6 @@ CGObjCMac::EmitClassExtension(const ObjCImplementationDecl *ID,
*/
llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
bool ForClass) {
- std::vector<llvm::Constant*> Ivars;
-
// When emitting the root class GCC emits ivar entries for the
// actual class structure. It is not clear if we need to follow this
// behavior; for now lets try and get away with not doing it. If so,
@@ -3527,91 +3663,181 @@ llvm::Constant *CGObjCMac::EmitIvarList(const ObjCImplementationDecl *ID,
const ObjCInterfaceDecl *OID = ID->getClassInterface();
+ ConstantInitBuilder builder(CGM);
+ auto ivarList = builder.beginStruct();
+ auto countSlot = ivarList.addPlaceholder();
+ auto ivars = ivarList.beginArray(ObjCTypes.IvarTy);
+
for (const ObjCIvarDecl *IVD = OID->all_declared_ivar_begin();
IVD; IVD = IVD->getNextIvar()) {
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
- llvm::Constant *Ivar[] = {
- GetMethodVarName(IVD->getIdentifier()),
- GetMethodVarType(IVD),
- llvm::ConstantInt::get(ObjCTypes.IntTy,
- ComputeIvarBaseOffset(CGM, OID, IVD))
- };
- Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarTy, Ivar));
+
+ auto ivar = ivars.beginStruct(ObjCTypes.IvarTy);
+ ivar.add(GetMethodVarName(IVD->getIdentifier()));
+ ivar.add(GetMethodVarType(IVD));
+ ivar.addInt(ObjCTypes.IntTy, ComputeIvarBaseOffset(CGM, OID, IVD));
+ ivar.finishAndAddTo(ivars);
}
// Return null for empty list.
- if (Ivars.empty())
+ auto count = ivars.size();
+ if (count == 0) {
+ ivars.abandon();
+ ivarList.abandon();
return llvm::Constant::getNullValue(ObjCTypes.IvarListPtrTy);
+ }
- llvm::Constant *Values[2];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarTy,
- Ivars.size());
- Values[1] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ ivars.finishAndAddTo(ivarList);
+ ivarList.fillPlaceholderWithInt(countSlot, ObjCTypes.IntTy, count);
llvm::GlobalVariable *GV;
if (ForClass)
GV =
- CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), Init,
+ CreateMetadataVar("OBJC_CLASS_VARIABLES_" + ID->getName(), ivarList,
"__OBJC,__class_vars,regular,no_dead_strip",
CGM.getPointerAlign(), true);
else
- GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), Init,
+ GV = CreateMetadataVar("OBJC_INSTANCE_VARIABLES_" + ID->getName(), ivarList,
"__OBJC,__instance_vars,regular,no_dead_strip",
CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListPtrTy);
}
-/*
- struct objc_method {
- SEL method_name;
- char *method_types;
- void *method;
- };
+/// Build a struct objc_method_description constant for the given method.
+///
+/// struct objc_method_description {
+/// SEL method_name;
+/// char *method_types;
+/// };
+void CGObjCMac::emitMethodDescriptionConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD) {
+ auto description = builder.beginStruct(ObjCTypes.MethodDescriptionTy);
+ description.addBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ description.add(GetMethodVarType(MD));
+ description.finishAndAddTo(builder);
+}
- struct objc_method_list {
- struct objc_method_list *obsolete;
- int count;
- struct objc_method methods_list[count];
- };
-*/
+/// Build a struct objc_method constant for the given method.
+///
+/// struct objc_method {
+/// SEL method_name;
+/// char *method_types;
+/// void *method;
+/// };
+void CGObjCMac::emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD) {
+ llvm::Function *fn = GetMethodDefinition(MD);
+ assert(fn && "no definition registered for method");
-/// GetMethodConstant - Return a struct objc_method constant for the
-/// given method if it has been defined. The result is null if the
-/// method has not been defined. The return value has type MethodPtrTy.
-llvm::Constant *CGObjCMac::GetMethodConstant(const ObjCMethodDecl *MD) {
- llvm::Function *Fn = GetMethodDefinition(MD);
- if (!Fn)
- return nullptr;
-
- llvm::Constant *Method[] = {
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy),
- GetMethodVarType(MD),
- llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
- };
- return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+ auto method = builder.beginStruct(ObjCTypes.MethodTy);
+ method.addBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ method.add(GetMethodVarType(MD));
+ method.addBitCast(fn, ObjCTypes.Int8PtrTy);
+ method.finishAndAddTo(builder);
}
-llvm::Constant *CGObjCMac::EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods) {
+/// Build a struct objc_method_list or struct objc_method_description_list,
+/// as appropriate.
+///
+/// struct objc_method_list {
+/// struct objc_method_list *obsolete;
+/// int count;
+/// struct objc_method methods_list[count];
+/// };
+///
+/// struct objc_method_description_list {
+/// int count;
+/// struct objc_method_description list[count];
+/// };
+llvm::Constant *CGObjCMac::emitMethodList(Twine name, MethodListType MLT,
+ ArrayRef<const ObjCMethodDecl *> methods) {
+ StringRef prefix;
+ StringRef section;
+ bool forProtocol = false;
+ switch (MLT) {
+ case MethodListType::CategoryInstanceMethods:
+ prefix = "OBJC_CATEGORY_INSTANCE_METHODS_";
+ section = "__OBJC,__cat_inst_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::CategoryClassMethods:
+ prefix = "OBJC_CATEGORY_CLASS_METHODS_";
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::InstanceMethods:
+ prefix = "OBJC_INSTANCE_METHODS_";
+ section = "__OBJC,__inst_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::ClassMethods:
+ prefix = "OBJC_CLASS_METHODS_";
+ section = "__OBJC,__cls_meth,regular,no_dead_strip";
+ forProtocol = false;
+ break;
+ case MethodListType::ProtocolInstanceMethods:
+ prefix = "OBJC_PROTOCOL_INSTANCE_METHODS_";
+ section = "__OBJC,__cat_inst_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ case MethodListType::ProtocolClassMethods:
+ prefix = "OBJC_PROTOCOL_CLASS_METHODS_";
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolInstanceMethods:
+ prefix = "OBJC_PROTOCOL_INSTANCE_METHODS_OPT_";
+ section = "__OBJC,__cat_inst_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolClassMethods:
+ prefix = "OBJC_PROTOCOL_CLASS_METHODS_OPT_";
+ section = "__OBJC,__cat_cls_meth,regular,no_dead_strip";
+ forProtocol = true;
+ break;
+ }
+
// Return null for empty list.
- if (Methods.empty())
- return llvm::Constant::getNullValue(ObjCTypes.MethodListPtrTy);
+ if (methods.empty())
+ return llvm::Constant::getNullValue(forProtocol
+ ? ObjCTypes.MethodDescriptionListPtrTy
+ : ObjCTypes.MethodListPtrTy);
+
+ // For protocols, this is an objc_method_description_list, which has
+ // a slightly different structure.
+ if (forProtocol) {
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addInt(ObjCTypes.IntTy, methods.size());
+ auto methodArray = values.beginArray(ObjCTypes.MethodDescriptionTy);
+ for (auto MD : methods) {
+ emitMethodDescriptionConstant(methodArray, MD);
+ }
+ methodArray.finishAndAddTo(values);
+
+ llvm::GlobalVariable *GV = CreateMetadataVar(prefix + name, values, section,
+ CGM.getPointerAlign(), true);
+ return llvm::ConstantExpr::getBitCast(GV,
+ ObjCTypes.MethodDescriptionListPtrTy);
+ }
- llvm::Constant *Values[3];
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
- Methods.size());
- Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ // Otherwise, it's an objc_method_list.
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addNullPointer(ObjCTypes.Int8PtrTy);
+ values.addInt(ObjCTypes.IntTy, methods.size());
+ auto methodArray = values.beginArray(ObjCTypes.MethodTy);
+ for (auto MD : methods) {
+ emitMethodConstant(methodArray, MD);
+ }
+ methodArray.finishAndAddTo(values);
- llvm::GlobalVariable *GV =
- CreateMetadataVar(Name, Init, Section, CGM.getPointerAlign(), true);
+ llvm::GlobalVariable *GV = CreateMetadataVar(prefix + name, values, section,
+ CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListPtrTy);
}
@@ -3634,6 +3860,21 @@ llvm::Function *CGObjCCommonMac::GenerateMethod(const ObjCMethodDecl *OMD,
}
llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
+ ConstantStructBuilder &Init,
+ StringRef Section,
+ CharUnits Align,
+ bool AddToUsed) {
+ llvm::GlobalVariable *GV =
+ Init.finishAndCreateGlobal(Name, Align, /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (!Section.empty())
+ GV->setSection(Section);
+ if (AddToUsed)
+ CGM.addCompilerUsedGlobal(GV);
+ return GV;
+}
+
+llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
llvm::Constant *Init,
StringRef Section,
CharUnits Align,
@@ -3650,6 +3891,54 @@ llvm::GlobalVariable *CGObjCCommonMac::CreateMetadataVar(Twine Name,
return GV;
}
+llvm::GlobalVariable *
+CGObjCCommonMac::CreateCStringLiteral(StringRef Name, ObjCLabelType Type,
+ bool ForceNonFragileABI,
+ bool NullTerminate) {
+ StringRef Label;
+ switch (Type) {
+ case ObjCLabelType::ClassName: Label = "OBJC_CLASS_NAME_"; break;
+ case ObjCLabelType::MethodVarName: Label = "OBJC_METH_VAR_NAME_"; break;
+ case ObjCLabelType::MethodVarType: Label = "OBJC_METH_VAR_TYPE_"; break;
+ case ObjCLabelType::PropertyName: Label = "OBJC_PROP_NAME_ATTR_"; break;
+ }
+
+ bool NonFragile = ForceNonFragileABI || isNonFragileABI();
+
+ StringRef Section;
+ switch (Type) {
+ case ObjCLabelType::ClassName:
+ Section = NonFragile ? "__TEXT,__objc_classname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
+ break;
+ case ObjCLabelType::MethodVarName:
+ Section = NonFragile ? "__TEXT,__objc_methname,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
+ break;
+ case ObjCLabelType::MethodVarType:
+ Section = NonFragile ? "__TEXT,__objc_methtype,cstring_literals"
+ : "__TEXT,__cstring,cstring_literals";
+ break;
+ case ObjCLabelType::PropertyName:
+ Section = "__TEXT,__cstring,cstring_literals";
+ break;
+ }
+
+ llvm::Constant *Value =
+ llvm::ConstantDataArray::getString(VMContext, Name, NullTerminate);
+ llvm::GlobalVariable *GV =
+ new llvm::GlobalVariable(CGM.getModule(), Value->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Value, Label);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection(Section);
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(CharUnits::One().getQuantity());
+ CGM.addCompilerUsedGlobal(GV);
+
+ return GV;
+}
+
llvm::Function *CGObjCMac::ModuleInitFunction() {
// Abuse this interface function as a place to finalize.
FinishModule();
@@ -4390,8 +4679,8 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4411,8 +4700,8 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4437,8 +4726,8 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4456,8 +4745,8 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
if (!isa<llvm::PointerType>(SrcTy)) {
unsigned Size = CGM.getDataLayout().getTypeAllocSize(SrcTy);
assert(Size <= 8 && "does not support size > 8");
- src = (Size == 4) ? CGF.Builder.CreateBitCast(src, ObjCTypes.IntTy)
- : CGF.Builder.CreateBitCast(src, ObjCTypes.LongLongTy);
+ src = (Size == 4) ? CGF.Builder.CreateBitCast(src, CGM.Int32Ty)
+ : CGF.Builder.CreateBitCast(src, CGM.Int64Ty);
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
@@ -4590,15 +4879,14 @@ static const int ModuleVersion = 7;
void CGObjCMac::EmitModuleInfo() {
uint64_t Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ModuleTy);
- llvm::Constant *Values[] = {
- llvm::ConstantInt::get(ObjCTypes.LongTy, ModuleVersion),
- llvm::ConstantInt::get(ObjCTypes.LongTy, Size),
- // This used to be the filename, now it is unused. <rdr://4327263>
- GetClassName(StringRef("")),
- EmitModuleSymbols()
- };
- CreateMetadataVar("OBJC_MODULES",
- llvm::ConstantStruct::get(ObjCTypes.ModuleTy, Values),
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ModuleTy);
+ values.addInt(ObjCTypes.LongTy, ModuleVersion);
+ values.addInt(ObjCTypes.LongTy, Size);
+ // This used to be the filename, now it is unused. <rdr://4327263>
+ values.add(GetClassName(StringRef("")));
+ values.add(EmitModuleSymbols());
+ CreateMetadataVar("OBJC_MODULES", values,
"__OBJC,__module_info,regular,no_dead_strip",
CGM.getPointerAlign(), true);
}
@@ -4611,15 +4899,16 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
if (!NumClasses && !NumCategories)
return llvm::Constant::getNullValue(ObjCTypes.SymtabPtrTy);
- llvm::Constant *Values[5];
- Values[0] = llvm::ConstantInt::get(ObjCTypes.LongTy, 0);
- Values[1] = llvm::Constant::getNullValue(ObjCTypes.SelectorPtrTy);
- Values[2] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumClasses);
- Values[3] = llvm::ConstantInt::get(ObjCTypes.ShortTy, NumCategories);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.addInt(ObjCTypes.LongTy, 0);
+ values.addNullPointer(ObjCTypes.SelectorPtrTy);
+ values.addInt(ObjCTypes.ShortTy, NumClasses);
+ values.addInt(ObjCTypes.ShortTy, NumCategories);
// The runtime expects exactly the list of defined classes followed
// by the list of defined categories, in a single array.
- SmallVector<llvm::Constant*, 8> Symbols(NumClasses + NumCategories);
+ auto array = values.beginArray(ObjCTypes.Int8PtrTy);
for (unsigned i=0; i<NumClasses; i++) {
const ObjCInterfaceDecl *ID = ImplementedClasses[i];
assert(ID);
@@ -4627,24 +4916,16 @@ llvm::Constant *CGObjCMac::EmitModuleSymbols() {
// We are implementing a weak imported interface. Give it external linkage
if (ID->isWeakImported() && !IMP->isWeakImported())
DefinedClasses[i]->setLinkage(llvm::GlobalVariable::ExternalLinkage);
-
- Symbols[i] = llvm::ConstantExpr::getBitCast(DefinedClasses[i],
- ObjCTypes.Int8PtrTy);
+
+ array.addBitCast(DefinedClasses[i], ObjCTypes.Int8PtrTy);
}
for (unsigned i=0; i<NumCategories; i++)
- Symbols[NumClasses + i] =
- llvm::ConstantExpr::getBitCast(DefinedCategories[i],
- ObjCTypes.Int8PtrTy);
+ array.addBitCast(DefinedCategories[i], ObjCTypes.Int8PtrTy);
- Values[4] =
- llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.Int8PtrTy,
- Symbols.size()),
- Symbols);
-
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ array.finishAndAddTo(values);
llvm::GlobalVariable *GV = CreateMetadataVar(
- "OBJC_SYMBOLS", Init, "__OBJC,__symbols,regular,no_dead_strip",
+ "OBJC_SYMBOLS", values, "__OBJC,__symbols,regular,no_dead_strip",
CGM.getPointerAlign(), true);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.SymtabPtrTy);
}
@@ -4707,12 +4988,7 @@ Address CGObjCMac::EmitSelectorAddr(CodeGenFunction &CGF, Selector Sel) {
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
llvm::GlobalVariable *&Entry = ClassNames[RuntimeName];
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::getString(VMContext, RuntimeName),
- ((ObjCABI == 2) ? "__TEXT,__objc_classname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
+ Entry = CreateCStringLiteral(RuntimeName, ObjCLabelType::ClassName);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -4960,14 +5236,8 @@ llvm::Constant *IvarLayoutBuilder::buildBitmap(CGObjCCommonMac &CGObjC,
// Null terminate the string.
buffer.push_back(0);
- bool isNonFragileABI = CGObjC.isNonFragileABI();
-
- llvm::GlobalVariable *Entry = CGObjC.CreateMetadataVar(
- "OBJC_CLASS_NAME_",
- llvm::ConstantDataArray::get(CGM.getLLVMContext(), buffer),
- (isNonFragileABI ? "__TEXT,__objc_classname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
+ auto *Entry = CGObjC.CreateCStringLiteral(
+ reinterpret_cast<char *>(buffer.data()), ObjCLabelType::ClassName);
return getConstantGEP(CGM.getLLVMContext(), Entry, 0, 0);
}
@@ -5062,16 +5332,9 @@ CGObjCCommonMac::BuildIvarLayout(const ObjCImplementationDecl *OMD,
llvm::Constant *CGObjCCommonMac::GetMethodVarName(Selector Sel) {
llvm::GlobalVariable *&Entry = MethodVarNames[Sel];
-
// FIXME: Avoid std::string in "Sel.getAsString()"
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_METH_VAR_NAME_",
- llvm::ConstantDataArray::getString(VMContext, Sel.getAsString()),
- ((ObjCABI == 2) ? "__TEXT,__objc_methname,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(Sel.getAsString(), ObjCLabelType::MethodVarName);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -5085,47 +5348,27 @@ llvm::Constant *CGObjCCommonMac::GetMethodVarType(const FieldDecl *Field) {
CGM.getContext().getObjCEncodingForType(Field->getType(), TypeStr, Field);
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
-
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(TypeStr, ObjCLabelType::MethodVarType);
return getConstantGEP(VMContext, Entry, 0, 0);
}
llvm::Constant *CGObjCCommonMac::GetMethodVarType(const ObjCMethodDecl *D,
bool Extended) {
- std::string TypeStr;
- if (CGM.getContext().getObjCEncodingForMethodDecl(D, TypeStr, Extended))
- return nullptr;
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForMethodDecl(D, Extended);
llvm::GlobalVariable *&Entry = MethodVarTypes[TypeStr];
-
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_METH_VAR_TYPE_",
- llvm::ConstantDataArray::getString(VMContext, TypeStr),
- ((ObjCABI == 2) ? "__TEXT,__objc_methtype,cstring_literals"
- : "__TEXT,__cstring,cstring_literals"),
- CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(TypeStr, ObjCLabelType::MethodVarType);
return getConstantGEP(VMContext, Entry, 0, 0);
}
// FIXME: Merge into a single cstring creation function.
llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
llvm::GlobalVariable *&Entry = PropertyNames[Ident];
-
if (!Entry)
- Entry = CreateMetadataVar(
- "OBJC_PROP_NAME_ATTR_",
- llvm::ConstantDataArray::getString(VMContext, Ident->getName()),
- "__TEXT,__cstring,cstring_literals", CharUnits::One(), true);
-
+ Entry = CreateCStringLiteral(Ident->getName(), ObjCLabelType::PropertyName);
return getConstantGEP(VMContext, Entry, 0, 0);
}
@@ -5134,8 +5377,8 @@ llvm::Constant *CGObjCCommonMac::GetPropertyName(IdentifierInfo *Ident) {
llvm::Constant *
CGObjCCommonMac::GetPropertyTypeString(const ObjCPropertyDecl *PD,
const Decl *Container) {
- std::string TypeStr;
- CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container, TypeStr);
+ std::string TypeStr =
+ CGM.getContext().getObjCEncodingForPropertyDecl(PD, Container);
return GetPropertyName(&CGM.getContext().Idents.get(TypeStr));
}
@@ -5157,20 +5400,20 @@ void CGObjCMac::FinishModule() {
// Emit the dummy bodies for any protocols which were referenced but
// never defined.
- for (llvm::DenseMap<IdentifierInfo*, llvm::GlobalVariable*>::iterator
- I = Protocols.begin(), e = Protocols.end(); I != e; ++I) {
- if (I->second->hasInitializer())
+ for (auto &entry : Protocols) {
+ llvm::GlobalVariable *global = entry.second;
+ if (global->hasInitializer())
continue;
- llvm::Constant *Values[5];
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.ProtocolExtensionPtrTy);
- Values[1] = GetClassName(I->first->getName());
- Values[2] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListPtrTy);
- Values[3] = Values[4] =
- llvm::Constant::getNullValue(ObjCTypes.MethodDescriptionListPtrTy);
- I->second->setInitializer(llvm::ConstantStruct::get(ObjCTypes.ProtocolTy,
- Values));
- CGM.addCompilerUsedGlobal(I->second);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolTy);
+ values.addNullPointer(ObjCTypes.ProtocolExtensionPtrTy);
+ values.add(GetClassName(entry.first->getName()));
+ values.addNullPointer(ObjCTypes.ProtocolListPtrTy);
+ values.addNullPointer(ObjCTypes.MethodDescriptionListPtrTy);
+ values.addNullPointer(ObjCTypes.MethodDescriptionListPtrTy);
+ values.finishAndSetAsInitializer(global);
+ CGM.addCompilerUsedGlobal(global);
}
// Add assembler directives to add lazy undefined symbol references
@@ -5178,27 +5421,23 @@ void CGObjCMac::FinishModule() {
// important for correct linker interaction.
//
// FIXME: It would be nice if we had an LLVM construct for this.
- if (!LazySymbols.empty() || !DefinedSymbols.empty()) {
+ if ((!LazySymbols.empty() || !DefinedSymbols.empty()) &&
+ CGM.getTriple().isOSBinFormatMachO()) {
SmallString<256> Asm;
Asm += CGM.getModule().getModuleInlineAsm();
if (!Asm.empty() && Asm.back() != '\n')
Asm += '\n';
llvm::raw_svector_ostream OS(Asm);
- for (llvm::SetVector<IdentifierInfo*>::iterator I = DefinedSymbols.begin(),
- e = DefinedSymbols.end(); I != e; ++I)
- OS << "\t.objc_class_name_" << (*I)->getName() << "=0\n"
- << "\t.globl .objc_class_name_" << (*I)->getName() << "\n";
- for (llvm::SetVector<IdentifierInfo*>::iterator I = LazySymbols.begin(),
- e = LazySymbols.end(); I != e; ++I) {
- OS << "\t.lazy_reference .objc_class_name_" << (*I)->getName() << "\n";
- }
+ for (const auto *Sym : DefinedSymbols)
+ OS << "\t.objc_class_name_" << Sym->getName() << "=0\n"
+ << "\t.globl .objc_class_name_" << Sym->getName() << "\n";
+ for (const auto *Sym : LazySymbols)
+ OS << "\t.lazy_reference .objc_class_name_" << Sym->getName() << "\n";
+ for (const auto &Category : DefinedCategoryNames)
+ OS << "\t.objc_category_name_" << Category << "=0\n"
+ << "\t.globl .objc_category_name_" << Category << "\n";
- for (size_t i = 0, e = DefinedCategoryNames.size(); i < e; ++i) {
- OS << "\t.objc_category_name_" << DefinedCategoryNames[i] << "=0\n"
- << "\t.globl .objc_category_name_" << DefinedCategoryNames[i] << "\n";
- }
-
CGM.getModule().setModuleInlineAsm(OS.str());
}
}
@@ -5217,10 +5456,9 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
- ShortTy = Types.ConvertType(Ctx.ShortTy);
- IntTy = Types.ConvertType(Ctx.IntTy);
- LongTy = Types.ConvertType(Ctx.LongTy);
- LongLongTy = Types.ConvertType(Ctx.LongLongTy);
+ ShortTy = cast<llvm::IntegerType>(Types.ConvertType(Ctx.ShortTy));
+ IntTy = CGM.IntTy;
+ LongTy = cast<llvm::IntegerType>(Types.ConvertType(Ctx.LongTy));
Int8PtrTy = CGM.Int8PtrTy;
Int8PtrPtrTy = CGM.Int8PtrPtrTy;
@@ -5231,9 +5469,12 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
else
IvarOffsetVarTy = LongTy;
- ObjectPtrTy = Types.ConvertType(Ctx.getObjCIdType());
- PtrObjectPtrTy = llvm::PointerType::getUnqual(ObjectPtrTy);
- SelectorPtrTy = Types.ConvertType(Ctx.getObjCSelType());
+ ObjectPtrTy =
+ cast<llvm::PointerType>(Types.ConvertType(Ctx.getObjCIdType()));
+ PtrObjectPtrTy =
+ llvm::PointerType::getUnqual(ObjectPtrTy);
+ SelectorPtrTy =
+ cast<llvm::PointerType>(Types.ConvertType(Ctx.getObjCSelType()));
// I'm not sure I like this. The implicit coordination is a bit
// gross. We should solve this in a reasonable fashion because this
@@ -5831,7 +6072,6 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
unsigned InstanceSize,
const ObjCImplementationDecl *ID) {
std::string ClassName = ID->getObjCRuntimeNameAsString();
- llvm::Constant *Values[10]; // 11 for 64bit targets!
CharUnits beginInstance = CharUnits::fromQuantity(InstanceStart);
CharUnits endInstance = CharUnits::fromQuantity(InstanceSize);
@@ -5842,85 +6082,84 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
else if ((hasMRCWeak = hasMRCWeakIvars(CGM, ID)))
flags |= NonFragileABI_Class_HasMRCWeakIvars;
- Values[ 0] = llvm::ConstantInt::get(ObjCTypes.IntTy, flags);
- Values[ 1] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceStart);
- Values[ 2] = llvm::ConstantInt::get(ObjCTypes.IntTy, InstanceSize);
- // FIXME. For 64bit targets add 0 here.
- Values[ 3] = (flags & NonFragileABI_Class_Meta)
- ? GetIvarLayoutName(nullptr, ObjCTypes)
- : BuildStrongIvarLayout(ID, beginInstance, endInstance);
- Values[ 4] = GetClassName(ID->getObjCRuntimeNameAsString());
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassRonfABITy);
+
+ values.addInt(ObjCTypes.IntTy, flags);
+ values.addInt(ObjCTypes.IntTy, InstanceStart);
+ values.addInt(ObjCTypes.IntTy, InstanceSize);
+ values.add((flags & NonFragileABI_Class_Meta)
+ ? GetIvarLayoutName(nullptr, ObjCTypes)
+ : BuildStrongIvarLayout(ID, beginInstance, endInstance));
+ values.add(GetClassName(ID->getObjCRuntimeNameAsString()));
+
// const struct _method_list_t * const baseMethods;
- std::vector<llvm::Constant*> Methods;
- std::string MethodListName("\01l_OBJC_$_");
+ SmallVector<const ObjCMethodDecl*, 16> methods;
if (flags & NonFragileABI_Class_Meta) {
- MethodListName += "CLASS_METHODS_";
- MethodListName += ID->getObjCRuntimeNameAsString();
- for (const auto *I : ID->class_methods())
- // Class methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
+ for (const auto *MD : ID->class_methods())
+ methods.push_back(MD);
} else {
- MethodListName += "INSTANCE_METHODS_";
- MethodListName += ID->getObjCRuntimeNameAsString();
- for (const auto *I : ID->instance_methods())
- // Instance methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
+ for (const auto *MD : ID->instance_methods())
+ methods.push_back(MD);
for (const auto *PID : ID->property_impls()) {
if (PID->getPropertyImplementation() == ObjCPropertyImplDecl::Synthesize){
ObjCPropertyDecl *PD = PID->getPropertyDecl();
- if (ObjCMethodDecl *MD = PD->getGetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- Methods.push_back(C);
- if (ObjCMethodDecl *MD = PD->getSetterMethodDecl())
- if (llvm::Constant *C = GetMethodConstant(MD))
- Methods.push_back(C);
+ if (auto MD = PD->getGetterMethodDecl())
+ if (GetMethodDefinition(MD))
+ methods.push_back(MD);
+ if (auto MD = PD->getSetterMethodDecl())
+ if (GetMethodDefinition(MD))
+ methods.push_back(MD);
}
}
}
- Values[ 5] = EmitMethodList(MethodListName,
- "__DATA, __objc_const", Methods);
+
+ values.add(emitMethodList(ID->getObjCRuntimeNameAsString(),
+ (flags & NonFragileABI_Class_Meta)
+ ? MethodListType::ClassMethods
+ : MethodListType::InstanceMethods,
+ methods));
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::BuildClassRoTInitializer");
- Values[ 6] = EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ values.add(EmitProtocolList("\01l_OBJC_CLASS_PROTOCOLS_$_"
+ OID->getObjCRuntimeNameAsString(),
- OID->all_referenced_protocol_begin(),
- OID->all_referenced_protocol_end());
+ OID->all_referenced_protocol_begin(),
+ OID->all_referenced_protocol_end()));
if (flags & NonFragileABI_Class_Meta) {
- Values[ 7] = llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
- Values[ 8] = GetIvarLayoutName(nullptr, ObjCTypes);
- Values[ 9] = EmitPropertyList(
+ values.addNullPointer(ObjCTypes.IvarListnfABIPtrTy);
+ values.add(GetIvarLayoutName(nullptr, ObjCTypes));
+ values.add(EmitPropertyList(
"\01l_OBJC_$_CLASS_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
- ID, ID->getClassInterface(), ObjCTypes, true);
+ ID, ID->getClassInterface(), ObjCTypes, true));
} else {
- Values[ 7] = EmitIvarList(ID);
- Values[ 8] = BuildWeakIvarLayout(ID, beginInstance, endInstance,
- hasMRCWeak);
- Values[ 9] = EmitPropertyList(
+ values.add(EmitIvarList(ID));
+ values.add(BuildWeakIvarLayout(ID, beginInstance, endInstance, hasMRCWeak));
+ values.add(EmitPropertyList(
"\01l_OBJC_$_PROP_LIST_" + ID->getObjCRuntimeNameAsString(),
- ID, ID->getClassInterface(), ObjCTypes, false);
+ ID, ID->getClassInterface(), ObjCTypes, false));
}
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassRonfABITy,
- Values);
+
+ llvm::SmallString<64> roLabel;
+ llvm::raw_svector_ostream(roLabel)
+ << ((flags & NonFragileABI_Class_Meta) ? "\01l_OBJC_METACLASS_RO_$_"
+ : "\01l_OBJC_CLASS_RO_$_")
+ << ClassName;
+
llvm::GlobalVariable *CLASS_RO_GV =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassRonfABITy, false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- (flags & NonFragileABI_Class_Meta) ?
- std::string("\01l_OBJC_METACLASS_RO_$_")+ClassName :
- std::string("\01l_OBJC_CLASS_RO_$_")+ClassName);
- CLASS_RO_GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassRonfABITy));
- CLASS_RO_GV->setSection("__DATA, __objc_const");
+ values.finishAndCreateGlobal(roLabel, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ CLASS_RO_GV->setSection("__DATA, __objc_const");
return CLASS_RO_GV;
-
}
-/// BuildClassMetaData - This routine defines that to-level meta-data
-/// for the given ClassName for:
+/// Build the metaclass object for a class.
+///
/// struct _class_t {
/// struct _class_t *isa;
/// struct _class_t * const superclass;
@@ -5929,28 +6168,33 @@ llvm::GlobalVariable * CGObjCNonFragileABIMac::BuildClassRoTInitializer(
/// struct class_ro_t *ro;
/// }
///
-llvm::GlobalVariable *CGObjCNonFragileABIMac::BuildClassMetaData(
- const std::string &ClassName, llvm::Constant *IsAGV, llvm::Constant *SuperClassGV,
- llvm::Constant *ClassRoGV, bool HiddenVisibility, bool Weak) {
- llvm::Constant *Values[] = {
- IsAGV,
- SuperClassGV,
- ObjCEmptyCacheVar, // &ObjCEmptyCacheVar
- ObjCEmptyVtableVar, // &ObjCEmptyVtableVar
- ClassRoGV // &CLASS_RO_GV
- };
- if (!Values[1])
- Values[1] = llvm::Constant::getNullValue(ObjCTypes.ClassnfABIPtrTy);
- if (!Values[3])
- Values[3] = llvm::Constant::getNullValue(
- llvm::PointerType::getUnqual(ObjCTypes.ImpnfABITy));
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ClassnfABITy,
- Values);
- llvm::GlobalVariable *GV = GetClassGlobal(ClassName, Weak);
- GV->setInitializer(Init);
- GV->setSection("__DATA, __objc_data");
+llvm::GlobalVariable *
+CGObjCNonFragileABIMac::BuildClassObject(const ObjCInterfaceDecl *CI,
+ bool isMetaclass,
+ llvm::Constant *IsAGV,
+ llvm::Constant *SuperClassGV,
+ llvm::Constant *ClassRoGV,
+ bool HiddenVisibility) {
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ClassnfABITy);
+ values.add(IsAGV);
+ if (SuperClassGV) {
+ values.add(SuperClassGV);
+ } else {
+ values.addNullPointer(ObjCTypes.ClassnfABIPtrTy);
+ }
+ values.add(ObjCEmptyCacheVar);
+ values.add(ObjCEmptyVtableVar);
+ values.add(ClassRoGV);
+
+ llvm::GlobalVariable *GV =
+ cast<llvm::GlobalVariable>(GetClassGlobal(CI, isMetaclass, ForDefinition));
+ values.finishAndSetAsInitializer(GV);
+
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_data");
GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
+ CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ClassnfABITy));
if (!CGM.getTriple().isOSBinFormatCOFF())
if (HiddenVisibility)
GV->setVisibility(llvm::GlobalValue::HiddenVisibility);
@@ -6014,6 +6258,9 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ImpnfABITy, false,
llvm::GlobalValue::ExternalLinkage, nullptr,
"_objc_empty_vtable");
+ else
+ ObjCEmptyVtableVar =
+ llvm::ConstantPointerNull::get(ObjCTypes.ImpnfABITy->getPointerTo());
}
// FIXME: Is this correct (that meta class size is never computed)?
@@ -6022,9 +6269,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
uint32_t InstanceSize = InstanceStart;
uint32_t flags = NonFragileABI_Class_Meta;
- llvm::GlobalVariable *SuperClassGV, *IsAGV;
+ llvm::Constant *SuperClassGV, *IsAGV;
- StringRef ClassName = ID->getObjCRuntimeNameAsString();
const auto *CI = ID->getClassInterface();
assert(CI && "CGObjCNonFragileABIMac::GenerateClass - class is 0");
@@ -6047,17 +6293,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
// class is root
flags |= NonFragileABI_Class_Root;
- SuperClassGV = GetClassGlobal((getClassSymbolPrefix() + ClassName).str(),
- CI->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (CI->hasAttr<DLLImportAttr>())
- SuperClassGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
-
- IsAGV = GetClassGlobal((getMetaclassSymbolPrefix() + ClassName).str(),
- CI->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (CI->hasAttr<DLLImportAttr>())
- IsAGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ SuperClassGV = GetClassGlobal(CI, /*metaclass*/ false, NotForDefinition);
+ IsAGV = GetClassGlobal(CI, /*metaclass*/ true, NotForDefinition);
} else {
// Has a root. Current class is not a root.
const ObjCInterfaceDecl *Root = ID->getClassInterface();
@@ -6065,31 +6302,16 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
Root = Super;
const auto *Super = CI->getSuperClass();
- StringRef RootClassName = Root->getObjCRuntimeNameAsString();
- StringRef SuperClassName = Super->getObjCRuntimeNameAsString();
-
- IsAGV = GetClassGlobal((getMetaclassSymbolPrefix() + RootClassName).str(),
- Root->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (Root->hasAttr<DLLImportAttr>())
- IsAGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
-
- // work on super class metadata symbol.
- SuperClassGV =
- GetClassGlobal((getMetaclassSymbolPrefix() + SuperClassName).str(),
- Super->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (Super->hasAttr<DLLImportAttr>())
- SuperClassGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ IsAGV = GetClassGlobal(Root, /*metaclass*/ true, NotForDefinition);
+ SuperClassGV = GetClassGlobal(Super, /*metaclass*/ true, NotForDefinition);
}
llvm::GlobalVariable *CLASS_RO_GV =
BuildClassRoTInitializer(flags, InstanceStart, InstanceSize, ID);
llvm::GlobalVariable *MetaTClass =
- BuildClassMetaData((getMetaclassSymbolPrefix() + ClassName).str(), IsAGV,
- SuperClassGV, CLASS_RO_GV, classIsHidden,
- CI->isWeakImported());
+ BuildClassObject(CI, /*metaclass*/ true,
+ IsAGV, SuperClassGV, CLASS_RO_GV, classIsHidden);
if (CGM.getTriple().isOSBinFormatCOFF())
if (CI->hasAttr<DLLExportAttr>())
MetaTClass->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
@@ -6122,14 +6344,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
} else {
// Has a root. Current class is not a root.
const auto *Super = CI->getSuperClass();
- StringRef SuperClassName = Super->getObjCRuntimeNameAsString();
-
- SuperClassGV =
- GetClassGlobal((getClassSymbolPrefix() + SuperClassName).str(),
- Super->isWeakImported());
- if (CGM.getTriple().isOSBinFormatCOFF())
- if (Super->hasAttr<DLLImportAttr>())
- SuperClassGV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ SuperClassGV = GetClassGlobal(Super, /*metaclass*/ false, NotForDefinition);
}
GetClassSizeInfo(ID, InstanceStart, InstanceSize);
@@ -6137,9 +6352,8 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
BuildClassRoTInitializer(flags, InstanceStart, InstanceSize, ID);
llvm::GlobalVariable *ClassMD =
- BuildClassMetaData((getClassSymbolPrefix() + ClassName).str(), MetaTClass,
- SuperClassGV, CLASS_RO_GV, classIsHidden,
- CI->isWeakImported());
+ BuildClassObject(CI, /*metaclass*/ false,
+ MetaTClass, SuperClassGV, CLASS_RO_GV, classIsHidden);
if (CGM.getTriple().isOSBinFormatCOFF())
if (CI->hasAttr<DLLExportAttr>())
ClassMD->setDLLStorageClass(llvm::GlobalValue::DLLExportStorageClass);
@@ -6152,7 +6366,7 @@ void CGObjCNonFragileABIMac::GenerateClass(const ObjCImplementationDecl *ID) {
// Force the definition of the EHType if necessary.
if (flags & NonFragileABI_Class_Exception)
- GetInterfaceEHType(CI, true);
+ (void) GetInterfaceEHType(CI, ForDefinition);
// Make sure method definition entries are all clear for next implementation.
MethodDefinitions.clear();
}
@@ -6217,82 +6431,59 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
ExtCatName += "_$_";
ExtCatName += OCD->getNameAsString();
- llvm::SmallString<64> ExtClassName(getClassSymbolPrefix());
- ExtClassName += Interface->getObjCRuntimeNameAsString();
-
- llvm::Constant *Values[8];
- Values[0] = GetClassName(OCD->getIdentifier()->getName());
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.CategorynfABITy);
+ values.add(GetClassName(OCD->getIdentifier()->getName()));
// meta-class entry symbol
- llvm::GlobalVariable *ClassGV =
- GetClassGlobal(ExtClassName.str(), Interface->isWeakImported());
+ values.add(GetClassGlobal(Interface, /*metaclass*/ false, NotForDefinition));
+ std::string listName =
+ (Interface->getObjCRuntimeNameAsString() + "_$_" + OCD->getName()).str();
+
+ SmallVector<const ObjCMethodDecl *, 16> instanceMethods;
+ SmallVector<const ObjCMethodDecl *, 8> classMethods;
+ for (const auto *MD : OCD->methods()) {
+ if (MD->isInstanceMethod()) {
+ instanceMethods.push_back(MD);
+ } else {
+ classMethods.push_back(MD);
+ }
+ }
+
+ values.add(emitMethodList(listName, MethodListType::CategoryInstanceMethods,
+ instanceMethods));
+ values.add(emitMethodList(listName, MethodListType::CategoryClassMethods,
+ classMethods));
- Values[1] = ClassGV;
- std::vector<llvm::Constant*> Methods;
- llvm::SmallString<64> MethodListName(Prefix);
-
- MethodListName += "INSTANCE_METHODS_";
- MethodListName += Interface->getObjCRuntimeNameAsString();
- MethodListName += "_$_";
- MethodListName += OCD->getName();
-
- for (const auto *I : OCD->instance_methods())
- // Instance methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
-
- Values[2] = EmitMethodList(MethodListName.str(),
- "__DATA, __objc_const",
- Methods);
-
- MethodListName = Prefix;
- MethodListName += "CLASS_METHODS_";
- MethodListName += Interface->getObjCRuntimeNameAsString();
- MethodListName += "_$_";
- MethodListName += OCD->getNameAsString();
-
- Methods.clear();
- for (const auto *I : OCD->class_methods())
- // Class methods should always be defined.
- Methods.push_back(GetMethodConstant(I));
-
- Values[3] = EmitMethodList(MethodListName.str(),
- "__DATA, __objc_const",
- Methods);
const ObjCCategoryDecl *Category =
Interface->FindCategoryDeclaration(OCD->getIdentifier());
if (Category) {
SmallString<256> ExtName;
llvm::raw_svector_ostream(ExtName) << Interface->getObjCRuntimeNameAsString() << "_$_"
<< OCD->getName();
- Values[4] = EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ values.add(EmitProtocolList("\01l_OBJC_CATEGORY_PROTOCOLS_$_"
+ Interface->getObjCRuntimeNameAsString() + "_$_"
+ Category->getName(),
- Category->protocol_begin(),
- Category->protocol_end());
- Values[5] = EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, false);
- Values[6] = EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
- OCD, Category, ObjCTypes, true);
+ Category->protocol_begin(),
+ Category->protocol_end()));
+ values.add(EmitPropertyList("\01l_OBJC_$_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, false));
+ values.add(EmitPropertyList("\01l_OBJC_$_CLASS_PROP_LIST_" + ExtName.str(),
+ OCD, Category, ObjCTypes, true));
} else {
- Values[4] = llvm::Constant::getNullValue(ObjCTypes.ProtocolListnfABIPtrTy);
- Values[5] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
- Values[6] = llvm::Constant::getNullValue(ObjCTypes.PropertyListPtrTy);
+ values.addNullPointer(ObjCTypes.ProtocolListnfABIPtrTy);
+ values.addNullPointer(ObjCTypes.PropertyListPtrTy);
+ values.addNullPointer(ObjCTypes.PropertyListPtrTy);
}
unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.CategorynfABITy);
- Values[7] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
-
- llvm::Constant *Init =
- llvm::ConstantStruct::get(ObjCTypes.CategorynfABITy,
- Values);
- llvm::GlobalVariable *GCATV
- = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.CategorynfABITy,
- false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- ExtCatName.str());
- GCATV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.CategorynfABITy));
- GCATV->setSection("__DATA, __objc_const");
+ values.addInt(ObjCTypes.IntTy, Size);
+
+ llvm::GlobalVariable *GCATV =
+ values.finishAndCreateGlobal(ExtCatName.str(), CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GCATV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GCATV);
DefinedCategories.push_back(GCATV);
@@ -6303,25 +6494,37 @@ void CGObjCNonFragileABIMac::GenerateCategory(const ObjCCategoryImplDecl *OCD) {
MethodDefinitions.clear();
}
-/// GetMethodConstant - Return a struct objc_method constant for the
-/// given method if it has been defined. The result is null if the
-/// method has not been defined. The return value has type MethodPtrTy.
-llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
- const ObjCMethodDecl *MD) {
- llvm::Function *Fn = GetMethodDefinition(MD);
- if (!Fn)
- return nullptr;
-
- llvm::Constant *Method[] = {
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy),
- GetMethodVarType(MD),
- llvm::ConstantExpr::getBitCast(Fn, ObjCTypes.Int8PtrTy)
- };
- return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Method);
+/// emitMethodConstant - Return a struct objc_method constant. If
+/// forProtocol is true, the implementation will be null; otherwise,
+/// the method must have a definition registered with the runtime.
+///
+/// struct _objc_method {
+/// SEL _cmd;
+/// char *method_type;
+/// char *_imp;
+/// }
+void CGObjCNonFragileABIMac::emitMethodConstant(ConstantArrayBuilder &builder,
+ const ObjCMethodDecl *MD,
+ bool forProtocol) {
+ auto method = builder.beginStruct(ObjCTypes.MethodTy);
+ method.addBitCast(GetMethodVarName(MD->getSelector()),
+ ObjCTypes.SelectorPtrTy);
+ method.add(GetMethodVarType(MD));
+
+ if (forProtocol) {
+ // Protocol methods have no implementation. So, this entry is always NULL.
+ method.addNullPointer(ObjCTypes.Int8PtrTy);
+ } else {
+ llvm::Function *fn = GetMethodDefinition(MD);
+ assert(fn && "no definition for method?");
+ method.addBitCast(fn, ObjCTypes.Int8PtrTy);
+ }
+
+ method.finishAndAddTo(builder);
}
-/// EmitMethodList - Build meta-data for method declarations
+/// Build meta-data for method declarations.
+///
/// struct _method_list_t {
/// uint32_t entsize; // sizeof(struct _objc_method)
/// uint32_t method_count;
@@ -6329,28 +6532,69 @@ llvm::Constant *CGObjCNonFragileABIMac::GetMethodConstant(
/// }
///
llvm::Constant *
-CGObjCNonFragileABIMac::EmitMethodList(Twine Name, StringRef Section,
- ArrayRef<llvm::Constant *> Methods) {
+CGObjCNonFragileABIMac::emitMethodList(Twine name, MethodListType kind,
+ ArrayRef<const ObjCMethodDecl *> methods) {
// Return null for empty list.
- if (Methods.empty())
+ if (methods.empty())
return llvm::Constant::getNullValue(ObjCTypes.MethodListnfABIPtrTy);
- llvm::Constant *Values[3];
+ StringRef prefix;
+ bool forProtocol;
+ switch (kind) {
+ case MethodListType::CategoryInstanceMethods:
+ prefix = "\01l_OBJC_$_CATEGORY_INSTANCE_METHODS_";
+ forProtocol = false;
+ break;
+ case MethodListType::CategoryClassMethods:
+ prefix = "\01l_OBJC_$_CATEGORY_CLASS_METHODS_";
+ forProtocol = false;
+ break;
+ case MethodListType::InstanceMethods:
+ prefix = "\01l_OBJC_$_INSTANCE_METHODS_";
+ forProtocol = false;
+ break;
+ case MethodListType::ClassMethods:
+ prefix = "\01l_OBJC_$_CLASS_METHODS_";
+ forProtocol = false;
+ break;
+
+ case MethodListType::ProtocolInstanceMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_";
+ forProtocol = true;
+ break;
+ case MethodListType::ProtocolClassMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_CLASS_METHODS_";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolInstanceMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_";
+ forProtocol = true;
+ break;
+ case MethodListType::OptionalProtocolClassMethods:
+ prefix = "\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_";
+ forProtocol = true;
+ break;
+ }
+
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+
// sizeof(struct _objc_method)
unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.MethodTy);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
+ values.addInt(ObjCTypes.IntTy, Size);
// method_count
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Methods.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.MethodTy,
- Methods.size());
- Values[2] = llvm::ConstantArray::get(AT, Methods);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
+ values.addInt(ObjCTypes.IntTy, methods.size());
+ auto methodArray = values.beginArray(ObjCTypes.MethodTy);
+ for (auto MD : methods) {
+ emitMethodConstant(methodArray, MD, forProtocol);
+ }
+ methodArray.finishAndAddTo(values);
- llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage, Init, Name);
- GV->setAlignment(CGM.getDataLayout().getABITypeAlignment(Init->getType()));
- GV->setSection(Section);
+ auto *GV = values.finishAndCreateGlobal(prefix + name, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.MethodListnfABIPtrTy);
}
@@ -6406,7 +6650,8 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
IvarOffsetGV->setVisibility(llvm::GlobalValue::DefaultVisibility);
}
- IvarOffsetGV->setSection("__DATA, __objc_ivar");
+ if (CGM.getTriple().isOSBinFormatMachO())
+ IvarOffsetGV->setSection("__DATA, __objc_ivar");
return IvarOffsetGV;
}
@@ -6430,7 +6675,12 @@ CGObjCNonFragileABIMac::EmitIvarOffsetVar(const ObjCInterfaceDecl *ID,
llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
const ObjCImplementationDecl *ID) {
- std::vector<llvm::Constant*> Ivars;
+ ConstantInitBuilder builder(CGM);
+ auto ivarList = builder.beginStruct();
+ ivarList.addInt(ObjCTypes.IntTy,
+ CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy));
+ auto ivarCountSlot = ivarList.addPlaceholder();
+ auto ivars = ivarList.beginArray(ObjCTypes.IvarnfABITy);
const ObjCInterfaceDecl *OID = ID->getClassInterface();
assert(OID && "CGObjCNonFragileABIMac::EmitIvarList - null interface");
@@ -6442,48 +6692,45 @@ llvm::Constant *CGObjCNonFragileABIMac::EmitIvarList(
// Ignore unnamed bit-fields.
if (!IVD->getDeclName())
continue;
- llvm::Constant *Ivar[5];
- Ivar[0] = EmitIvarOffsetVar(ID->getClassInterface(), IVD,
- ComputeIvarBaseOffset(CGM, ID, IVD));
- Ivar[1] = GetMethodVarName(IVD->getIdentifier());
- Ivar[2] = GetMethodVarType(IVD);
+
+ auto ivar = ivars.beginStruct(ObjCTypes.IvarnfABITy);
+ ivar.add(EmitIvarOffsetVar(ID->getClassInterface(), IVD,
+ ComputeIvarBaseOffset(CGM, ID, IVD)));
+ ivar.add(GetMethodVarName(IVD->getIdentifier()));
+ ivar.add(GetMethodVarType(IVD));
llvm::Type *FieldTy =
CGM.getTypes().ConvertTypeForMem(IVD->getType());
unsigned Size = CGM.getDataLayout().getTypeAllocSize(FieldTy);
unsigned Align = CGM.getContext().getPreferredTypeAlign(
IVD->getType().getTypePtr()) >> 3;
Align = llvm::Log2_32(Align);
- Ivar[3] = llvm::ConstantInt::get(ObjCTypes.IntTy, Align);
+ ivar.addInt(ObjCTypes.IntTy, Align);
// NOTE. Size of a bitfield does not match gcc's, because of the
// way bitfields are treated special in each. But I am told that
// 'size' for bitfield ivars is ignored by the runtime so it does
// not matter. If it matters, there is enough info to get the
// bitfield right!
- Ivar[4] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Ivars.push_back(llvm::ConstantStruct::get(ObjCTypes.IvarnfABITy, Ivar));
+ ivar.addInt(ObjCTypes.IntTy, Size);
+ ivar.finishAndAddTo(ivars);
}
// Return null for empty list.
- if (Ivars.empty())
+ if (ivars.empty()) {
+ ivars.abandon();
+ ivarList.abandon();
return llvm::Constant::getNullValue(ObjCTypes.IvarListnfABIPtrTy);
+ }
+
+ auto ivarCount = ivars.size();
+ ivars.finishAndAddTo(ivarList);
+ ivarList.fillPlaceholderWithInt(ivarCountSlot, ObjCTypes.IntTy, ivarCount);
- llvm::Constant *Values[3];
- unsigned Size = CGM.getDataLayout().getTypeAllocSize(ObjCTypes.IvarnfABITy);
- Values[0] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[1] = llvm::ConstantInt::get(ObjCTypes.IntTy, Ivars.size());
- llvm::ArrayType *AT = llvm::ArrayType::get(ObjCTypes.IvarnfABITy,
- Ivars.size());
- Values[2] = llvm::ConstantArray::get(AT, Ivars);
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
const char *Prefix = "\01l_OBJC_$_INSTANCE_VARIABLES_";
llvm::GlobalVariable *GV =
- new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage,
- Init,
- Prefix + OID->getObjCRuntimeNameAsString());
- GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(Init->getType()));
- GV->setSection("__DATA, __objc_const");
-
+ ivarList.finishAndCreateGlobal(Prefix + OID->getObjCRuntimeNameAsString(),
+ CGM.getPointerAlign(), /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.IvarListnfABIPtrTy);
}
@@ -6492,15 +6739,20 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocolRef(
const ObjCProtocolDecl *PD) {
llvm::GlobalVariable *&Entry = Protocols[PD->getIdentifier()];
- if (!Entry)
+ if (!Entry) {
// We use the initializer as a marker of whether this is a forward
// reference or not. At module finalization we add the empty
// contents for protocols which were referenced but never defined.
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
- false, llvm::GlobalValue::ExternalLinkage,
- nullptr,
- "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ llvm::SmallString<64> Protocol;
+ llvm::raw_svector_ostream(Protocol) << "\01l_OBJC_PROTOCOL_$_"
+ << PD->getObjCRuntimeNameAsString();
+
+ Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
+ false, llvm::GlobalValue::ExternalLinkage,
+ nullptr, Protocol);
+ if (!CGM.getTriple().isOSBinFormatMachO())
+ Entry->setComdat(CGM.getModule().getOrInsertComdat(Protocol));
+ }
return Entry;
}
@@ -6537,96 +6789,59 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
if (const ObjCProtocolDecl *Def = PD->getDefinition())
PD = Def;
- // Construct method lists.
- std::vector<llvm::Constant*> InstanceMethods, ClassMethods;
- std::vector<llvm::Constant*> OptInstanceMethods, OptClassMethods;
- std::vector<llvm::Constant*> MethodTypesExt, OptMethodTypesExt;
- for (const auto *MD : PD->instance_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptInstanceMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- InstanceMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
-
- for (const auto *MD : PD->class_methods()) {
- llvm::Constant *C = GetMethodDescriptionConstant(MD);
- if (!C)
- return GetOrEmitProtocolRef(PD);
-
- if (MD->getImplementationControl() == ObjCMethodDecl::Optional) {
- OptClassMethods.push_back(C);
- OptMethodTypesExt.push_back(GetMethodVarType(MD, true));
- } else {
- ClassMethods.push_back(C);
- MethodTypesExt.push_back(GetMethodVarType(MD, true));
- }
- }
+ auto methodLists = ProtocolMethodLists::get(PD);
- MethodTypesExt.insert(MethodTypesExt.end(),
- OptMethodTypesExt.begin(), OptMethodTypesExt.end());
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.ProtocolnfABITy);
- llvm::Constant *Values[13];
// isa is NULL
- Values[0] = llvm::Constant::getNullValue(ObjCTypes.ObjectPtrTy);
- Values[1] = GetClassName(PD->getObjCRuntimeNameAsString());
- Values[2] = EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_" + PD->getObjCRuntimeNameAsString(),
+ values.addNullPointer(ObjCTypes.ObjectPtrTy);
+ values.add(GetClassName(PD->getObjCRuntimeNameAsString()));
+ values.add(EmitProtocolList("\01l_OBJC_$_PROTOCOL_REFS_"
+ + PD->getObjCRuntimeNameAsString(),
PD->protocol_begin(),
- PD->protocol_end());
-
- Values[3] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- InstanceMethods);
- Values[4] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- ClassMethods);
- Values[5] = EmitMethodList("\01l_OBJC_$_PROTOCOL_INSTANCE_METHODS_OPT_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- OptInstanceMethods);
- Values[6] = EmitMethodList("\01l_OBJC_$_PROTOCOL_CLASS_METHODS_OPT_"
- + PD->getObjCRuntimeNameAsString(),
- "__DATA, __objc_const",
- OptClassMethods);
- Values[7] = EmitPropertyList(
- "\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
- nullptr, PD, ObjCTypes, false);
+ PD->protocol_end()));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredInstanceMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::RequiredClassMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalInstanceMethods));
+ values.add(methodLists.emitMethodList(this, PD,
+ ProtocolMethodLists::OptionalClassMethods));
+ values.add(EmitPropertyList(
+ "\01l_OBJC_$_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
+ nullptr, PD, ObjCTypes, false));
uint32_t Size =
CGM.getDataLayout().getTypeAllocSize(ObjCTypes.ProtocolnfABITy);
- Values[8] = llvm::ConstantInt::get(ObjCTypes.IntTy, Size);
- Values[9] = llvm::Constant::getNullValue(ObjCTypes.IntTy);
- Values[10] = EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ values.addInt(ObjCTypes.IntTy, Size);
+ values.addInt(ObjCTypes.IntTy, 0);
+ values.add(EmitProtocolMethodTypes("\01l_OBJC_$_PROTOCOL_METHOD_TYPES_"
+ PD->getObjCRuntimeNameAsString(),
- MethodTypesExt, ObjCTypes);
+ methodLists.emitExtendedTypesArray(this),
+ ObjCTypes));
+
// const char *demangledName;
- Values[11] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
+ values.addNullPointer(ObjCTypes.Int8PtrTy);
- Values[12] = EmitPropertyList(
+ values.add(EmitPropertyList(
"\01l_OBJC_$_CLASS_PROP_LIST_" + PD->getObjCRuntimeNameAsString(),
- nullptr, PD, ObjCTypes, true);
+ nullptr, PD, ObjCTypes, true));
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.ProtocolnfABITy,
- Values);
-
if (Entry) {
// Already created, fix the linkage and update the initializer.
Entry->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
- Entry->setInitializer(Init);
+ values.finishAndSetAsInitializer(Entry);
} else {
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABITy,
- false, llvm::GlobalValue::WeakAnyLinkage, Init,
- "\01l_OBJC_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
- Entry->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABITy));
+ llvm::SmallString<64> symbolName;
+ llvm::raw_svector_ostream(symbolName)
+ << "\01l_OBJC_PROTOCOL_$_" << PD->getObjCRuntimeNameAsString();
+
+ Entry = values.finishAndCreateGlobal(symbolName, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage);
+ if (!CGM.getTriple().isOSBinFormatMachO())
+ Entry->setComdat(CGM.getModule().getOrInsertComdat(symbolName));
Protocols[PD->getIdentifier()] = Entry;
}
@@ -6635,13 +6850,20 @@ llvm::Constant *CGObjCNonFragileABIMac::GetOrEmitProtocol(
// Use this protocol meta-data to build protocol list table in section
// __DATA, __objc_protolist
+ llvm::SmallString<64> ProtocolRef;
+ llvm::raw_svector_ostream(ProtocolRef) << "\01l_OBJC_LABEL_PROTOCOL_$_"
+ << PD->getObjCRuntimeNameAsString();
+
llvm::GlobalVariable *PTGV =
new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ProtocolnfABIPtrTy,
false, llvm::GlobalValue::WeakAnyLinkage, Entry,
- "\01l_OBJC_LABEL_PROTOCOL_$_" + PD->getObjCRuntimeNameAsString());
+ ProtocolRef);
+ if (!CGM.getTriple().isOSBinFormatMachO())
+ PTGV->setComdat(CGM.getModule().getOrInsertComdat(ProtocolRef));
PTGV->setAlignment(
CGM.getDataLayout().getABITypeAlignment(ObjCTypes.ProtocolnfABIPtrTy));
- PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
+ if (CGM.getTriple().isOSBinFormatMachO())
+ PTGV->setSection("__DATA, __objc_protolist, coalesced, no_dead_strip");
PTGV->setVisibility(llvm::GlobalValue::HiddenVisibility);
CGM.addCompilerUsedGlobal(PTGV);
return Entry;
@@ -6673,55 +6895,30 @@ CGObjCNonFragileABIMac::EmitProtocolList(Twine Name,
if (GV)
return llvm::ConstantExpr::getBitCast(GV, ObjCTypes.ProtocolListnfABIPtrTy);
- for (; begin != end; ++begin)
- ProtocolRefs.push_back(GetProtocolRef(*begin)); // Implemented???
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ auto countSlot = values.addPlaceholder();
- // This list is null terminated.
- ProtocolRefs.push_back(llvm::Constant::getNullValue(
- ObjCTypes.ProtocolnfABIPtrTy));
-
- llvm::Constant *Values[2];
- Values[0] =
- llvm::ConstantInt::get(ObjCTypes.LongTy, ProtocolRefs.size() - 1);
- Values[1] =
- llvm::ConstantArray::get(llvm::ArrayType::get(ObjCTypes.ProtocolnfABIPtrTy,
- ProtocolRefs.size()),
- ProtocolRefs);
-
- llvm::Constant *Init = llvm::ConstantStruct::getAnon(Values);
- GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(), false,
- llvm::GlobalValue::PrivateLinkage,
- Init, Name);
- GV->setSection("__DATA, __objc_const");
- GV->setAlignment(
- CGM.getDataLayout().getABITypeAlignment(Init->getType()));
+ // A null-terminated array of protocols.
+ auto array = values.beginArray(ObjCTypes.ProtocolnfABIPtrTy);
+ for (; begin != end; ++begin)
+ array.add(GetProtocolRef(*begin)); // Implemented???
+ auto count = array.size();
+ array.addNullPointer(ObjCTypes.ProtocolnfABIPtrTy);
+
+ array.finishAndAddTo(values);
+ values.fillPlaceholderWithInt(countSlot, ObjCTypes.LongTy, count);
+
+ GV = values.finishAndCreateGlobal(Name, CGM.getPointerAlign(),
+ /*constant*/ false,
+ llvm::GlobalValue::PrivateLinkage);
+ if (CGM.getTriple().isOSBinFormatMachO())
+ GV->setSection("__DATA, __objc_const");
CGM.addCompilerUsedGlobal(GV);
return llvm::ConstantExpr::getBitCast(GV,
ObjCTypes.ProtocolListnfABIPtrTy);
}
-/// GetMethodDescriptionConstant - This routine build following meta-data:
-/// struct _objc_method {
-/// SEL _cmd;
-/// char *method_type;
-/// char *_imp;
-/// }
-
-llvm::Constant *
-CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
- llvm::Constant *Desc[3];
- Desc[0] =
- llvm::ConstantExpr::getBitCast(GetMethodVarName(MD->getSelector()),
- ObjCTypes.SelectorPtrTy);
- Desc[1] = GetMethodVarType(MD);
- if (!Desc[1])
- return nullptr;
-
- // Protocol methods have no implementation. So, this entry is always NULL.
- Desc[2] = llvm::Constant::getNullValue(ObjCTypes.Int8PtrTy);
- return llvm::ConstantStruct::get(ObjCTypes.MethodTy, Desc);
-}
-
/// EmitObjCValueForIvar - Code Gen for nonfragile ivar reference.
/// This code gen. amounts to generating code for:
/// @code
@@ -6853,16 +7050,15 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
= CGM.getModule().getGlobalVariable(messageRefName);
if (!messageRef) {
// Build the message ref structure.
- llvm::Constant *values[] = { fn, GetMethodVarName(selector) };
- llvm::Constant *init = llvm::ConstantStruct::getAnon(values);
- messageRef = new llvm::GlobalVariable(CGM.getModule(),
- init->getType(),
- /*constant*/ false,
- llvm::GlobalValue::WeakAnyLinkage,
- init,
- messageRefName);
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct();
+ values.add(fn);
+ values.add(GetMethodVarName(selector));
+ messageRef = values.finishAndCreateGlobal(messageRefName,
+ CharUnits::fromQuantity(16),
+ /*constant*/ false,
+ llvm::GlobalValue::WeakAnyLinkage);
messageRef->setVisibility(llvm::GlobalValue::HiddenVisibility);
- messageRef->setAlignment(16);
messageRef->setSection("__DATA, __objc_msgrefs, coalesced");
}
@@ -6887,9 +7083,10 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
// Load the function to call from the message ref table.
Address calleeAddr =
CGF.Builder.CreateStructGEP(mref, 0, CharUnits::Zero());
- llvm::Value *callee = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
+ llvm::Value *calleePtr = CGF.Builder.CreateLoad(calleeAddr, "msgSend_fn");
- callee = CGF.Builder.CreateBitCast(callee, MSI.MessengerType);
+ calleePtr = CGF.Builder.CreateBitCast(calleePtr, MSI.MessengerType);
+ CGCallee callee(CGCalleeInfo(), calleePtr);
RValue result = CGF.EmitCall(MSI.CallInfo, callee, returnSlot, args);
return nullReturn.complete(CGF, result, resultType, formalArgs,
@@ -6916,33 +7113,59 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
false, CallArgs, Method, Class, ObjCTypes);
}
-llvm::GlobalVariable *
-CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name, bool Weak) {
+llvm::Constant *
+CGObjCNonFragileABIMac::GetClassGlobal(const ObjCInterfaceDecl *ID,
+ bool metaclass,
+ ForDefinition_t isForDefinition) {
+ auto prefix =
+ (metaclass ? getMetaclassSymbolPrefix() : getClassSymbolPrefix());
+ return GetClassGlobal((prefix + ID->getObjCRuntimeNameAsString()).str(),
+ isForDefinition,
+ ID->isWeakImported(),
+ !isForDefinition
+ && CGM.getTriple().isOSBinFormatCOFF()
+ && ID->hasAttr<DLLImportAttr>());
+}
+
+llvm::Constant *
+CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
+ ForDefinition_t IsForDefinition,
+ bool Weak, bool DLLImport) {
llvm::GlobalValue::LinkageTypes L =
Weak ? llvm::GlobalValue::ExternalWeakLinkage
: llvm::GlobalValue::ExternalLinkage;
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV)
+
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ if (!GV) {
GV = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABITy,
false, L, nullptr, Name);
+ if (DLLImport)
+ GV->setDLLStorageClass(llvm::GlobalValue::DLLImportStorageClass);
+ }
+
assert(GV->getLinkage() == L);
return GV;
}
-llvm::Value *CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
- IdentifierInfo *II,
- bool Weak,
- const ObjCInterfaceDecl *ID) {
+llvm::Value *
+CGObjCNonFragileABIMac::EmitClassRefFromId(CodeGenFunction &CGF,
+ IdentifierInfo *II,
+ const ObjCInterfaceDecl *ID) {
CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable *&Entry = ClassReferences[II];
if (!Entry) {
- StringRef Name = ID ? ID->getObjCRuntimeNameAsString() : II->getName();
- std::string ClassName = (getClassSymbolPrefix() + Name).str();
- llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName, Weak);
+ llvm::Constant *ClassGV;
+ if (ID) {
+ ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
+ } else {
+ ClassGV = GetClassGlobal((getClassSymbolPrefix() + II->getName()).str(),
+ NotForDefinition);
+ }
+
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_REFERENCES_$_");
@@ -6960,13 +7183,13 @@ llvm::Value *CGObjCNonFragileABIMac::EmitClassRef(CodeGenFunction &CGF,
if (ID->hasAttr<ObjCRuntimeVisibleAttr>())
return EmitClassRefViaRuntime(CGF, ID, ObjCTypes);
- return EmitClassRefFromId(CGF, ID->getIdentifier(), ID->isWeakImported(), ID);
+ return EmitClassRefFromId(CGF, ID->getIdentifier(), ID);
}
llvm::Value *CGObjCNonFragileABIMac::EmitNSAutoreleasePoolClassRef(
CodeGenFunction &CGF) {
IdentifierInfo *II = &CGM.getContext().Idents.get("NSAutoreleasePool");
- return EmitClassRefFromId(CGF, II, false, nullptr);
+ return EmitClassRefFromId(CGF, II, nullptr);
}
llvm::Value *
@@ -6976,10 +7199,7 @@ CGObjCNonFragileABIMac::EmitSuperClassRef(CodeGenFunction &CGF,
llvm::GlobalVariable *&Entry = SuperClassReferences[ID->getIdentifier()];
if (!Entry) {
- llvm::SmallString<64> ClassName(getClassSymbolPrefix());
- ClassName += ID->getObjCRuntimeNameAsString();
- llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(),
- ID->isWeakImported());
+ auto ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
ClassGV, "OBJC_CLASSLIST_SUP_REFS_$_");
@@ -6999,10 +7219,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
CharUnits Align = CGF.getPointerAlign();
llvm::GlobalVariable * &Entry = MetaClassReferences[ID->getIdentifier()];
if (!Entry) {
- llvm::SmallString<64> MetaClassName(getMetaclassSymbolPrefix());
- MetaClassName += ID->getObjCRuntimeNameAsString();
- llvm::GlobalVariable *MetaClassGV =
- GetClassGlobal(MetaClassName.str(), Weak);
+ auto MetaClassGV = GetClassGlobal(ID, /*metaclass*/ true, NotForDefinition);
Entry = new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.ClassnfABIPtrTy,
false, llvm::GlobalValue::PrivateLinkage,
@@ -7021,11 +7238,10 @@ llvm::Value *CGObjCNonFragileABIMac::EmitMetaClassRef(CodeGenFunction &CGF,
llvm::Value *CGObjCNonFragileABIMac::GetClass(CodeGenFunction &CGF,
const ObjCInterfaceDecl *ID) {
if (ID->isWeakImported()) {
- llvm::SmallString<64> ClassName(getClassSymbolPrefix());
- ClassName += ID->getObjCRuntimeNameAsString();
- llvm::GlobalVariable *ClassGV = GetClassGlobal(ClassName.str(), true);
+ auto ClassGV = GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition);
(void)ClassGV;
- assert(ClassGV->hasExternalWeakLinkage());
+ assert(!isa<llvm::GlobalVariable>(ClassGV) ||
+ cast<llvm::GlobalVariable>(ClassGV)->hasExternalWeakLinkage());
}
return EmitClassRef(CGF, ID);
@@ -7258,7 +7474,7 @@ CGObjCNonFragileABIMac::GetEHType(QualType T) {
const ObjCInterfaceType *IT = PT->getInterfaceType();
assert(IT && "Invalid @catch type.");
- return GetInterfaceEHType(IT->getDecl(), false);
+ return GetInterfaceEHType(IT->getDecl(), NotForDefinition);
}
void CGObjCNonFragileABIMac::EmitTryStmt(CodeGen::CodeGenFunction &CGF,
@@ -7290,13 +7506,13 @@ void CGObjCNonFragileABIMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Constant *
CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
- bool ForDefinition) {
+ ForDefinition_t IsForDefinition) {
llvm::GlobalVariable * &Entry = EHTypeReferences[ID->getIdentifier()];
StringRef ClassName = ID->getObjCRuntimeNameAsString();
// If we don't need a definition, return the entry if found or check
// if we use an external reference.
- if (!ForDefinition) {
+ if (!IsForDefinition) {
if (Entry)
return Entry;
@@ -7332,23 +7548,24 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
}
llvm::Value *VTableIdx = llvm::ConstantInt::get(CGM.Int32Ty, 2);
- llvm::Constant *Values[] = {
- llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(), VTableGV,
- VTableIdx),
- GetClassName(ID->getObjCRuntimeNameAsString()),
- GetClassGlobal((getClassSymbolPrefix() + ClassName).str()),
- };
- llvm::Constant *Init = llvm::ConstantStruct::get(ObjCTypes.EHTypeTy, Values);
-
- llvm::GlobalValue::LinkageTypes L = ForDefinition
+ ConstantInitBuilder builder(CGM);
+ auto values = builder.beginStruct(ObjCTypes.EHTypeTy);
+ values.add(llvm::ConstantExpr::getGetElementPtr(VTableGV->getValueType(),
+ VTableGV, VTableIdx));
+ values.add(GetClassName(ClassName));
+ values.add(GetClassGlobal(ID, /*metaclass*/ false, NotForDefinition));
+
+ llvm::GlobalValue::LinkageTypes L = IsForDefinition
? llvm::GlobalValue::ExternalLinkage
: llvm::GlobalValue::WeakAnyLinkage;
if (Entry) {
- Entry->setInitializer(Init);
+ values.finishAndSetAsInitializer(Entry);
+ Entry->setAlignment(CGM.getPointerAlign().getQuantity());
} else {
- Entry =
- new llvm::GlobalVariable(CGM.getModule(), ObjCTypes.EHTypeTy, false, L,
- Init, ("OBJC_EHTYPE_$_" + ClassName).str());
+ Entry = values.finishAndCreateGlobal("OBJC_EHTYPE_$_" + ClassName,
+ CGM.getPointerAlign(),
+ /*constant*/ false,
+ L);
if (CGM.getTriple().isOSBinFormatCOFF())
if (hasObjCExceptionAttribute(CGM.getContext(), ID))
if (ID->hasAttr<DLLExportAttr>())
@@ -7360,11 +7577,9 @@ CGObjCNonFragileABIMac::GetInterfaceEHType(const ObjCInterfaceDecl *ID,
if (ID->getVisibility() == HiddenVisibility)
Entry->setVisibility(llvm::GlobalValue::HiddenVisibility);
- const auto &DL = CGM.getDataLayout();
- Entry->setAlignment(DL.getABITypeAlignment(ObjCTypes.EHTypeTy));
-
- if (ForDefinition)
- Entry->setSection("__DATA,__objc_const");
+ if (IsForDefinition)
+ if (CGM.getTriple().isOSBinFormatMachO())
+ Entry->setSection("__DATA,__objc_const");
return Entry;
}
diff --git a/lib/CodeGen/CGObjCRuntime.cpp b/lib/CodeGen/CGObjCRuntime.cpp
index 0caf6d9f210a..3da7ed230edd 100644
--- a/lib/CodeGen/CGObjCRuntime.cpp
+++ b/lib/CodeGen/CGObjCRuntime.cpp
@@ -90,7 +90,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
unsigned CVRQualifiers,
llvm::Value *Offset) {
// Compute (type*) ( (char *) BaseValue + Offset)
- QualType IvarTy = Ivar->getType();
+ QualType IvarTy = Ivar->getType().withCVRQualifiers(CVRQualifiers);
llvm::Type *LTy = CGF.CGM.getTypes().ConvertTypeForMem(IvarTy);
llvm::Value *V = CGF.Builder.CreateBitCast(BaseValue, CGF.Int8PtrTy);
V = CGF.Builder.CreateInBoundsGEP(V, Offset, "add.ptr");
@@ -98,7 +98,6 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
if (!Ivar->isBitField()) {
V = CGF.Builder.CreateBitCast(V, llvm::PointerType::getUnqual(LTy));
LValue LV = CGF.MakeNaturalAlignAddrLValue(V, IvarTy);
- LV.getQuals().addCVRQualifiers(CVRQualifiers);
return LV;
}
@@ -139,9 +138,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
Addr = CGF.Builder.CreateElementBitCast(Addr,
llvm::Type::getIntNTy(CGF.getLLVMContext(),
Info->StorageSize));
- return LValue::MakeBitfield(Addr, *Info,
- IvarTy.withCVRQualifiers(CVRQualifiers),
- AlignmentSource::Decl);
+ return LValue::MakeBitfield(Addr, *Info, IvarTy, AlignmentSource::Decl);
}
namespace {
@@ -153,18 +150,16 @@ namespace {
};
struct CallObjCEndCatch final : EHScopeStack::Cleanup {
- CallObjCEndCatch(bool MightThrow, llvm::Value *Fn) :
- MightThrow(MightThrow), Fn(Fn) {}
+ CallObjCEndCatch(bool MightThrow, llvm::Value *Fn)
+ : MightThrow(MightThrow), Fn(Fn) {}
bool MightThrow;
llvm::Value *Fn;
void Emit(CodeGenFunction &CGF, Flags flags) override {
- if (!MightThrow) {
- CGF.Builder.CreateCall(Fn)->setDoesNotThrow();
- return;
- }
-
- CGF.EmitRuntimeCallOrInvoke(Fn);
+ if (MightThrow)
+ CGF.EmitRuntimeCallOrInvoke(Fn);
+ else
+ CGF.EmitNounwindRuntimeCall(Fn);
}
};
}
@@ -233,10 +228,8 @@ void CGObjCRuntime::EmitTryCatchStmt(CodeGenFunction &CGF,
// Enter the catch.
llvm::Value *Exn = RawExn;
- if (beginCatchFn) {
- Exn = CGF.Builder.CreateCall(beginCatchFn, RawExn, "exn.adjusted");
- cast<llvm::CallInst>(Exn)->setDoesNotThrow();
- }
+ if (beginCatchFn)
+ Exn = CGF.EmitNounwindRuntimeCall(beginCatchFn, RawExn, "exn.adjusted");
CodeGenFunction::LexicalScope cleanups(CGF, Handler.Body->getSourceRange());
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 6c330590f7cd..a14b44abf413 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -280,9 +280,6 @@ public:
virtual llvm::Constant *BuildByrefLayout(CodeGen::CodeGenModule &CGM,
QualType T) = 0;
- virtual llvm::GlobalVariable *GetClassGlobal(StringRef Name,
- bool Weak = false) = 0;
-
struct MessageSendInfo {
const CGFunctionInfo &CallInfo;
llvm::PointerType *MessengerType;
diff --git a/lib/CodeGen/CGOpenCLRuntime.cpp b/lib/CodeGen/CGOpenCLRuntime.cpp
index 38aebea18ed3..9062936fdd14 100644
--- a/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -15,6 +15,7 @@
#include "CGOpenCLRuntime.h"
#include "CodeGenFunction.h"
+#include "TargetInfo.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
#include <assert.h>
@@ -34,10 +35,10 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
"Not an OpenCL specific type!");
llvm::LLVMContext& Ctx = CGM.getLLVMContext();
- uint32_t ImgAddrSpc =
- CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
+ uint32_t ImgAddrSpc = CGM.getContext().getTargetAddressSpace(
+ CGM.getTarget().getOpenCLImageAddrSpace());
switch (cast<BuiltinType>(T)->getKind()) {
- default:
+ default:
llvm_unreachable("Unexpected opencl builtin type!");
return nullptr;
#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
@@ -47,7 +48,7 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
ImgAddrSpc);
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
- return llvm::IntegerType::get(Ctx, 32);
+ return getSamplerType();
case BuiltinType::OCLEvent:
return llvm::PointerType::get(llvm::StructType::create(
Ctx, "opencl.event_t"), 0);
@@ -76,3 +77,32 @@ llvm::Type *CGOpenCLRuntime::getPipeType() {
return PipeTy;
}
+
+llvm::PointerType *CGOpenCLRuntime::getSamplerType() {
+ if (!SamplerTy)
+ SamplerTy = llvm::PointerType::get(llvm::StructType::create(
+ CGM.getLLVMContext(), "opencl.sampler_t"),
+ CGM.getContext().getTargetAddressSpace(
+ LangAS::opencl_constant));
+ return SamplerTy;
+}
+
+llvm::Value *CGOpenCLRuntime::getPipeElemSize(const Expr *PipeArg) {
+ const PipeType *PipeTy = PipeArg->getType()->getAs<PipeType>();
+ // The type of the last (implicit) argument to be passed.
+ llvm::Type *Int32Ty = llvm::IntegerType::getInt32Ty(CGM.getLLVMContext());
+ unsigned TypeSize = CGM.getContext()
+ .getTypeSizeInChars(PipeTy->getElementType())
+ .getQuantity();
+ return llvm::ConstantInt::get(Int32Ty, TypeSize, false);
+}
+
+llvm::Value *CGOpenCLRuntime::getPipeElemAlign(const Expr *PipeArg) {
+ const PipeType *PipeTy = PipeArg->getType()->getAs<PipeType>();
+ // The type of the last (implicit) argument to be passed.
+ llvm::Type *Int32Ty = llvm::IntegerType::getInt32Ty(CGM.getLLVMContext());
+ unsigned TypeSize = CGM.getContext()
+ .getTypeAlignInChars(PipeTy->getElementType())
+ .getQuantity();
+ return llvm::ConstantInt::get(Int32Ty, TypeSize, false);
+}
diff --git a/lib/CodeGen/CGOpenCLRuntime.h b/lib/CodeGen/CGOpenCLRuntime.h
index f1a7a3106443..ee3cb3dda063 100644
--- a/lib/CodeGen/CGOpenCLRuntime.h
+++ b/lib/CodeGen/CGOpenCLRuntime.h
@@ -33,9 +33,11 @@ class CGOpenCLRuntime {
protected:
CodeGenModule &CGM;
llvm::Type *PipeTy;
+ llvm::PointerType *SamplerTy;
public:
- CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM), PipeTy(nullptr) {}
+ CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM), PipeTy(nullptr),
+ SamplerTy(nullptr) {}
virtual ~CGOpenCLRuntime();
/// Emit the IR required for a work-group-local variable declaration, and add
@@ -47,6 +49,16 @@ public:
virtual llvm::Type *convertOpenCLSpecificType(const Type *T);
virtual llvm::Type *getPipeType();
+
+ llvm::PointerType *getSamplerType();
+
+ // \brief Returnes a value which indicates the size in bytes of the pipe
+ // element.
+ virtual llvm::Value *getPipeElemSize(const Expr *PipeArg);
+
+ // \brief Returnes a value which indicates the alignment in bytes of the pipe
+ // element.
+ virtual llvm::Value *getPipeElemAlign(const Expr *PipeArg);
};
}
diff --git a/lib/CodeGen/CGOpenMPRuntime.cpp b/lib/CodeGen/CGOpenMPRuntime.cpp
index 6a0edbe0e7a9..0624d86b564a 100644
--- a/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -15,10 +15,11 @@
#include "CGCleanup.h"
#include "CGOpenMPRuntime.h"
#include "CodeGenFunction.h"
+#include "ConstantBuilder.h"
#include "clang/AST/Decl.h"
#include "clang/AST/StmtOpenMP.h"
#include "llvm/ADT/ArrayRef.h"
-#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/CallSite.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
@@ -489,7 +490,7 @@ enum OpenMPSchedType {
OMP_sch_runtime = 37,
OMP_sch_auto = 38,
/// static with chunk adjustment (e.g., simd)
- OMP_sch_static_balanced_chunked = 45,
+ OMP_sch_static_balanced_chunked = 45,
/// \brief Lower bound for 'ordered' versions.
OMP_ord_lower = 64,
OMP_ord_static_chunked = 65,
@@ -756,6 +757,7 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
FnTy, llvm::GlobalValue::InternalLinkage,
IsCombiner ? ".omp_combiner." : ".omp_initializer.", &CGM.getModule());
CGM.SetInternalFunctionAttributes(/*D=*/nullptr, Fn, FnInfo);
+ Fn->removeFnAttr(llvm::Attribute::NoInline);
Fn->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
// Map "T omp_in;" variable to "*omp_in_parm" value in all expressions.
@@ -906,18 +908,19 @@ Address CGOpenMPRuntime::getOrCreateDefaultLocation(unsigned Flags) {
DefaultOpenMPPSource =
llvm::ConstantExpr::getBitCast(DefaultOpenMPPSource, CGM.Int8PtrTy);
}
- auto DefaultOpenMPLocation = new llvm::GlobalVariable(
- CGM.getModule(), IdentTy, /*isConstant*/ true,
- llvm::GlobalValue::PrivateLinkage, /*Initializer*/ nullptr);
+
+ ConstantInitBuilder builder(CGM);
+ auto fields = builder.beginStruct(IdentTy);
+ fields.addInt(CGM.Int32Ty, 0);
+ fields.addInt(CGM.Int32Ty, Flags);
+ fields.addInt(CGM.Int32Ty, 0);
+ fields.addInt(CGM.Int32Ty, 0);
+ fields.add(DefaultOpenMPPSource);
+ auto DefaultOpenMPLocation =
+ fields.finishAndCreateGlobal("", Align, /*isConstant*/ true,
+ llvm::GlobalValue::PrivateLinkage);
DefaultOpenMPLocation->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- DefaultOpenMPLocation->setAlignment(Align.getQuantity());
-
- llvm::Constant *Zero = llvm::ConstantInt::get(CGM.Int32Ty, 0, true);
- llvm::Constant *Values[] = {Zero,
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- Zero, Zero, DefaultOpenMPPSource};
- llvm::Constant *Init = llvm::ConstantStruct::get(IdentTy, Values);
- DefaultOpenMPLocation->setInitializer(Init);
+
OpenMPDefaultLocMap[Flags] = Entry = DefaultOpenMPLocation;
}
return Address(Entry, Align);
@@ -2767,7 +2770,6 @@ createOffloadingBinaryDescriptorFunction(CodeGenModule &CGM, StringRef Name,
Args.push_back(&DummyPtr);
CodeGenFunction CGF(CGM);
- GlobalDecl();
auto &FI = CGM.getTypes().arrangeBuiltinFunctionDeclaration(C.VoidTy, Args);
auto FTy = CGM.getTypes().GetFunctionType(FI);
auto *Fn =
@@ -2810,9 +2812,10 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
".omp_offloading.entries_end");
// Create all device images
- llvm::SmallVector<llvm::Constant *, 4> DeviceImagesEntires;
auto *DeviceImageTy = cast<llvm::StructType>(
CGM.getTypes().ConvertTypeForMem(getTgtDeviceImageQTy()));
+ ConstantInitBuilder DeviceImagesBuilder(CGM);
+ auto DeviceImagesEntries = DeviceImagesBuilder.beginArray(DeviceImageTy);
for (unsigned i = 0; i < Devices.size(); ++i) {
StringRef T = Devices[i].getTriple();
@@ -2824,22 +2827,19 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
M, CGM.Int8Ty, /*isConstant=*/true, llvm::GlobalValue::ExternalLinkage,
/*Initializer=*/nullptr, Twine(".omp_offloading.img_end.") + Twine(T));
- llvm::Constant *Dev =
- llvm::ConstantStruct::get(DeviceImageTy, ImgBegin, ImgEnd,
- HostEntriesBegin, HostEntriesEnd, nullptr);
- DeviceImagesEntires.push_back(Dev);
+ auto Dev = DeviceImagesEntries.beginStruct(DeviceImageTy);
+ Dev.add(ImgBegin);
+ Dev.add(ImgEnd);
+ Dev.add(HostEntriesBegin);
+ Dev.add(HostEntriesEnd);
+ Dev.finishAndAddTo(DeviceImagesEntries);
}
// Create device images global array.
- llvm::ArrayType *DeviceImagesInitTy =
- llvm::ArrayType::get(DeviceImageTy, DeviceImagesEntires.size());
- llvm::Constant *DeviceImagesInit =
- llvm::ConstantArray::get(DeviceImagesInitTy, DeviceImagesEntires);
-
- llvm::GlobalVariable *DeviceImages = new llvm::GlobalVariable(
- M, DeviceImagesInitTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, DeviceImagesInit,
- ".omp_offloading.device_images");
+ llvm::GlobalVariable *DeviceImages =
+ DeviceImagesEntries.finishAndCreateGlobal(".omp_offloading.device_images",
+ CGM.getPointerAlign(),
+ /*isConstant=*/true);
DeviceImages->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
// This is a Zero array to be used in the creation of the constant expressions
@@ -2849,16 +2849,18 @@ CGOpenMPRuntime::createOffloadingBinaryDescriptorRegistration() {
// Create the target region descriptor.
auto *BinaryDescriptorTy = cast<llvm::StructType>(
CGM.getTypes().ConvertTypeForMem(getTgtBinaryDescriptorQTy()));
- llvm::Constant *TargetRegionsDescriptorInit = llvm::ConstantStruct::get(
- BinaryDescriptorTy, llvm::ConstantInt::get(CGM.Int32Ty, Devices.size()),
- llvm::ConstantExpr::getGetElementPtr(DeviceImagesInitTy, DeviceImages,
- Index),
- HostEntriesBegin, HostEntriesEnd, nullptr);
-
- auto *Desc = new llvm::GlobalVariable(
- M, BinaryDescriptorTy, /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, TargetRegionsDescriptorInit,
- ".omp_offloading.descriptor");
+ ConstantInitBuilder DescBuilder(CGM);
+ auto DescInit = DescBuilder.beginStruct(BinaryDescriptorTy);
+ DescInit.addInt(CGM.Int32Ty, Devices.size());
+ DescInit.add(llvm::ConstantExpr::getGetElementPtr(DeviceImages->getValueType(),
+ DeviceImages,
+ Index));
+ DescInit.add(HostEntriesBegin);
+ DescInit.add(HostEntriesEnd);
+
+ auto *Desc = DescInit.finishAndCreateGlobal(".omp_offloading.descriptor",
+ CGM.getPointerAlign(),
+ /*isConstant=*/true);
// Emit code to register or unregister the descriptor at execution
// startup or closing, respectively.
@@ -2906,25 +2908,30 @@ void CGOpenMPRuntime::createOffloadEntry(llvm::Constant *ID,
Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
llvm::Constant *StrPtr = llvm::ConstantExpr::getBitCast(Str, CGM.Int8PtrTy);
+ // We can't have any padding between symbols, so we need to have 1-byte
+ // alignment.
+ auto Align = CharUnits::fromQuantity(1);
+
// Create the entry struct.
- llvm::Constant *EntryInit = llvm::ConstantStruct::get(
- TgtOffloadEntryType, AddrPtr, StrPtr,
- llvm::ConstantInt::get(CGM.SizeTy, Size), nullptr);
- llvm::GlobalVariable *Entry = new llvm::GlobalVariable(
- M, TgtOffloadEntryType, true, llvm::GlobalValue::ExternalLinkage,
- EntryInit, ".omp_offloading.entry");
+ ConstantInitBuilder EntryBuilder(CGM);
+ auto EntryInit = EntryBuilder.beginStruct(TgtOffloadEntryType);
+ EntryInit.add(AddrPtr);
+ EntryInit.add(StrPtr);
+ EntryInit.addInt(CGM.SizeTy, Size);
+ llvm::GlobalVariable *Entry =
+ EntryInit.finishAndCreateGlobal(".omp_offloading.entry",
+ Align,
+ /*constant*/ true,
+ llvm::GlobalValue::ExternalLinkage);
// The entry has to be created in the section the linker expects it to be.
Entry->setSection(".omp_offloading.entries");
- // We can't have any padding between symbols, so we need to have 1-byte
- // alignment.
- Entry->setAlignment(1);
}
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
// Emit the offloading entries and metadata so that the device codegen side
- // can
- // easily figure out what to emit. The produced metadata looks like this:
+ // can easily figure out what to emit. The produced metadata looks like
+ // this:
//
// !omp_offload.info = !{!1, ...}
//
@@ -3012,7 +3019,8 @@ void CGOpenMPRuntime::loadOffloadInfoMetadata() {
return;
llvm::LLVMContext C;
- auto ME = llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C);
+ auto ME = expectedToErrorOrAndEmitErrors(
+ C, llvm::parseBitcodeFile(Buf.get()->getMemBufferRef(), C));
if (ME.getError())
return;
@@ -3465,6 +3473,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
".omp_task_privates_map.", &CGM.getModule());
CGM.SetInternalFunctionAttributes(/*D=*/nullptr, TaskPrivatesMap,
TaskPrivatesMapFnInfo);
+ TaskPrivatesMap->removeFnAttr(llvm::Attribute::NoInline);
TaskPrivatesMap->addFnAttr(llvm::Attribute::AlwaysInline);
CodeGenFunction CGF(CGM);
CGF.disableDebugInfo();
@@ -4436,9 +4445,8 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto *IdentTLoc = emitUpdateLocation(CGF, Loc, OMP_ATOMIC_REDUCE);
auto *ThreadId = getThreadID(CGF, Loc);
auto *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
- auto *RL =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(ReductionList.getPointer(),
- CGF.VoidPtrTy);
+ auto *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ ReductionList.getPointer(), CGF.VoidPtrTy);
llvm::Value *Args[] = {
IdentTLoc, // ident_t *<loc>
ThreadId, // i32 <gtid>
@@ -4981,6 +4989,9 @@ public:
/// map/privatization results in multiple arguments passed to the runtime
/// library.
OMP_MAP_FIRST_REF = 0x20,
+ /// \brief Signal that the runtime library has to return the device pointer
+ /// in the current position for the data being mapped.
+ OMP_MAP_RETURN_PTR = 0x40,
/// \brief This flag signals that the reference being passed is a pointer to
/// private data.
OMP_MAP_PRIVATE_PTR = 0x80,
@@ -4988,12 +4999,30 @@ public:
OMP_MAP_PRIVATE_VAL = 0x100,
};
+ /// Class that associates information with a base pointer to be passed to the
+ /// runtime library.
+ class BasePointerInfo {
+ /// The base pointer.
+ llvm::Value *Ptr = nullptr;
+ /// The base declaration that refers to this device pointer, or null if
+ /// there is none.
+ const ValueDecl *DevPtrDecl = nullptr;
+
+ public:
+ BasePointerInfo(llvm::Value *Ptr, const ValueDecl *DevPtrDecl = nullptr)
+ : Ptr(Ptr), DevPtrDecl(DevPtrDecl) {}
+ llvm::Value *operator*() const { return Ptr; }
+ const ValueDecl *getDevicePtrDecl() const { return DevPtrDecl; }
+ void setDevicePtrDecl(const ValueDecl *D) { DevPtrDecl = D; }
+ };
+
+ typedef SmallVector<BasePointerInfo, 16> MapBaseValuesArrayTy;
typedef SmallVector<llvm::Value *, 16> MapValuesArrayTy;
typedef SmallVector<unsigned, 16> MapFlagsArrayTy;
private:
/// \brief Directive from where the map clauses were extracted.
- const OMPExecutableDirective &Directive;
+ const OMPExecutableDirective &CurDir;
/// \brief Function the directive is being generated for.
CodeGenFunction &CGF;
@@ -5001,6 +5030,13 @@ private:
/// \brief Set of all first private variables in the current directive.
llvm::SmallPtrSet<const VarDecl *, 8> FirstPrivateDecls;
+ /// Map between device pointer declarations and their expression components.
+ /// The key value for declarations in 'this' is null.
+ llvm::DenseMap<
+ const ValueDecl *,
+ SmallVector<OMPClauseMappableExprCommon::MappableExprComponentListRef, 4>>
+ DevPointersMap;
+
llvm::Value *getExprTypeSize(const Expr *E) const {
auto ExprTy = E->getType().getCanonicalType();
@@ -5129,7 +5165,7 @@ private:
void generateInfoForComponentList(
OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
- MapValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
+ MapBaseValuesArrayTy &BasePointers, MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes, MapFlagsArrayTy &Types,
bool IsFirstComponentList) const {
@@ -5271,15 +5307,13 @@ private:
// If the variable is a pointer and is being dereferenced (i.e. is not
// the last component), the base has to be the pointer itself, not its
- // reference.
- if (I->getAssociatedDeclaration()->getType()->isAnyPointerType() &&
- std::next(I) != CE) {
- auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(
- BP, I->getAssociatedDeclaration()->getType());
+ // reference. References are ignored for mapping purposes.
+ QualType Ty =
+ I->getAssociatedDeclaration()->getType().getNonReferenceType();
+ if (Ty->isAnyPointerType() && std::next(I) != CE) {
+ auto PtrAddr = CGF.MakeNaturalAlignAddrLValue(BP, Ty);
BP = CGF.EmitLoadOfPointerLValue(PtrAddr.getAddress(),
- I->getAssociatedDeclaration()
- ->getType()
- ->getAs<PointerType>())
+ Ty->castAs<PointerType>())
.getPointer();
// We do not need to generate individual map information for the
@@ -5322,14 +5356,34 @@ private:
isa<OMPArraySectionExpr>(Next->getAssociatedExpression())) &&
"Unexpected expression");
- // Save the base we are currently using.
- BasePointers.push_back(BP);
-
auto *LB = CGF.EmitLValue(I->getAssociatedExpression()).getPointer();
auto *Size = getExprTypeSize(I->getAssociatedExpression());
+ // If we have a member expression and the current component is a
+ // reference, we have to map the reference too. Whenever we have a
+ // reference, the section that reference refers to is going to be a
+ // load instruction from the storage assigned to the reference.
+ if (isa<MemberExpr>(I->getAssociatedExpression()) &&
+ I->getAssociatedDeclaration()->getType()->isReferenceType()) {
+ auto *LI = cast<llvm::LoadInst>(LB);
+ auto *RefAddr = LI->getPointerOperand();
+
+ BasePointers.push_back(BP);
+ Pointers.push_back(RefAddr);
+ Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Types.push_back(getMapTypeBits(
+ /*MapType*/ OMPC_MAP_alloc, /*MapTypeModifier=*/OMPC_MAP_unknown,
+ !IsExpressionFirstInfo, IsCaptureFirstInfo));
+ IsExpressionFirstInfo = false;
+ IsCaptureFirstInfo = false;
+ // The reference will be the next base address.
+ BP = RefAddr;
+ }
+
+ BasePointers.push_back(BP);
Pointers.push_back(LB);
Sizes.push_back(Size);
+
// We need to add a pointer flag for each map that comes from the
// same expression except for the first one. We also need to signal
// this map is the first one that relates with the current capture
@@ -5373,17 +5427,23 @@ private:
public:
MappableExprsHandler(const OMPExecutableDirective &Dir, CodeGenFunction &CGF)
- : Directive(Dir), CGF(CGF) {
+ : CurDir(Dir), CGF(CGF) {
// Extract firstprivate clause information.
for (const auto *C : Dir.getClausesOfKind<OMPFirstprivateClause>())
for (const auto *D : C->varlists())
FirstPrivateDecls.insert(
cast<VarDecl>(cast<DeclRefExpr>(D)->getDecl())->getCanonicalDecl());
+ // Extract device pointer clause information.
+ for (const auto *C : Dir.getClausesOfKind<OMPIsDevicePtrClause>())
+ for (auto L : C->component_lists())
+ DevPointersMap[L.first].push_back(L.second);
}
/// \brief Generate all the base pointers, section pointers, sizes and map
- /// types for the extracted mappable expressions.
- void generateAllInfo(MapValuesArrayTy &BasePointers,
+ /// types for the extracted mappable expressions. Also, for each item that
+ /// relates with a device pointer, a pair of the relevant declaration and
+ /// index where it occurs is appended to the device pointers info array.
+ void generateAllInfo(MapBaseValuesArrayTy &BasePointers,
MapValuesArrayTy &Pointers, MapValuesArrayTy &Sizes,
MapFlagsArrayTy &Types) const {
BasePointers.clear();
@@ -5392,9 +5452,32 @@ public:
Types.clear();
struct MapInfo {
+ /// Kind that defines how a device pointer has to be returned.
+ enum ReturnPointerKind {
+ // Don't have to return any pointer.
+ RPK_None,
+ // Pointer is the base of the declaration.
+ RPK_Base,
+ // Pointer is a member of the base declaration - 'this'
+ RPK_Member,
+ // Pointer is a reference and a member of the base declaration - 'this'
+ RPK_MemberReference,
+ };
OMPClauseMappableExprCommon::MappableExprComponentListRef Components;
OpenMPMapClauseKind MapType;
OpenMPMapClauseKind MapTypeModifier;
+ ReturnPointerKind ReturnDevicePointer;
+
+ MapInfo()
+ : MapType(OMPC_MAP_unknown), MapTypeModifier(OMPC_MAP_unknown),
+ ReturnDevicePointer(RPK_None) {}
+ MapInfo(
+ OMPClauseMappableExprCommon::MappableExprComponentListRef Components,
+ OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapTypeModifier,
+ ReturnPointerKind ReturnDevicePointer)
+ : Components(Components), MapType(MapType),
+ MapTypeModifier(MapTypeModifier),
+ ReturnDevicePointer(ReturnDevicePointer) {}
};
// We have to process the component lists that relate with the same
@@ -5404,24 +5487,77 @@ public:
// Helper function to fill the information map for the different supported
// clauses.
- auto &&InfoGen =
- [&Info](const ValueDecl *D,
- OMPClauseMappableExprCommon::MappableExprComponentListRef L,
- OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier) {
- const ValueDecl *VD =
- D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
- Info[VD].push_back({L, MapType, MapModifier});
- };
+ auto &&InfoGen = [&Info](
+ const ValueDecl *D,
+ OMPClauseMappableExprCommon::MappableExprComponentListRef L,
+ OpenMPMapClauseKind MapType, OpenMPMapClauseKind MapModifier,
+ MapInfo::ReturnPointerKind ReturnDevicePointer) {
+ const ValueDecl *VD =
+ D ? cast<ValueDecl>(D->getCanonicalDecl()) : nullptr;
+ Info[VD].push_back({L, MapType, MapModifier, ReturnDevicePointer});
+ };
- for (auto *C : Directive.getClausesOfKind<OMPMapClause>())
+ // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
+ for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
for (auto L : C->component_lists())
- InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier());
- for (auto *C : Directive.getClausesOfKind<OMPToClause>())
+ InfoGen(L.first, L.second, C->getMapType(), C->getMapTypeModifier(),
+ MapInfo::RPK_None);
+ for (auto *C : this->CurDir.getClausesOfKind<OMPToClause>())
for (auto L : C->component_lists())
- InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown);
- for (auto *C : Directive.getClausesOfKind<OMPFromClause>())
+ InfoGen(L.first, L.second, OMPC_MAP_to, OMPC_MAP_unknown,
+ MapInfo::RPK_None);
+ for (auto *C : this->CurDir.getClausesOfKind<OMPFromClause>())
for (auto L : C->component_lists())
- InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown);
+ InfoGen(L.first, L.second, OMPC_MAP_from, OMPC_MAP_unknown,
+ MapInfo::RPK_None);
+
+ // Look at the use_device_ptr clause information and mark the existing map
+ // entries as such. If there is no map information for an entry in the
+ // use_device_ptr list, we create one with map type 'alloc' and zero size
+ // section. It is the user fault if that was not mapped before.
+ // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
+ for (auto *C : this->CurDir.getClausesOfKind<OMPUseDevicePtrClause>())
+ for (auto L : C->component_lists()) {
+ assert(!L.second.empty() && "Not expecting empty list of components!");
+ const ValueDecl *VD = L.second.back().getAssociatedDeclaration();
+ VD = cast<ValueDecl>(VD->getCanonicalDecl());
+ auto *IE = L.second.back().getAssociatedExpression();
+ // If the first component is a member expression, we have to look into
+ // 'this', which maps to null in the map of map information. Otherwise
+ // look directly for the information.
+ auto It = Info.find(isa<MemberExpr>(IE) ? nullptr : VD);
+
+ // We potentially have map information for this declaration already.
+ // Look for the first set of components that refer to it.
+ if (It != Info.end()) {
+ auto CI = std::find_if(
+ It->second.begin(), It->second.end(), [VD](const MapInfo &MI) {
+ return MI.Components.back().getAssociatedDeclaration() == VD;
+ });
+ // If we found a map entry, signal that the pointer has to be returned
+ // and move on to the next declaration.
+ if (CI != It->second.end()) {
+ CI->ReturnDevicePointer = isa<MemberExpr>(IE)
+ ? (VD->getType()->isReferenceType()
+ ? MapInfo::RPK_MemberReference
+ : MapInfo::RPK_Member)
+ : MapInfo::RPK_Base;
+ continue;
+ }
+ }
+
+ // We didn't find any match in our map information - generate a zero
+ // size array section.
+ // FIXME: MSVC 2013 seems to require this-> to find member CGF.
+ llvm::Value *Ptr =
+ this->CGF
+ .EmitLoadOfLValue(this->CGF.EmitLValue(IE), SourceLocation())
+ .getScalarVal();
+ BasePointers.push_back({Ptr, VD});
+ Pointers.push_back(Ptr);
+ Sizes.push_back(llvm::Constant::getNullValue(this->CGF.SizeTy));
+ Types.push_back(OMP_MAP_RETURN_PTR | OMP_MAP_FIRST_REF);
+ }
for (auto &M : Info) {
// We need to know when we generate information for the first component
@@ -5430,9 +5566,36 @@ public:
for (MapInfo &L : M.second) {
assert(!L.Components.empty() &&
"Not expecting declaration with no component lists.");
- generateInfoForComponentList(L.MapType, L.MapTypeModifier, L.Components,
- BasePointers, Pointers, Sizes, Types,
- IsFirstComponentList);
+
+ // Remember the current base pointer index.
+ unsigned CurrentBasePointersIdx = BasePointers.size();
+ // FIXME: MSVC 2013 seems to require this-> to find the member method.
+ this->generateInfoForComponentList(L.MapType, L.MapTypeModifier,
+ L.Components, BasePointers, Pointers,
+ Sizes, Types, IsFirstComponentList);
+
+ // If this entry relates with a device pointer, set the relevant
+ // declaration and add the 'return pointer' flag.
+ if (IsFirstComponentList &&
+ L.ReturnDevicePointer != MapInfo::RPK_None) {
+ // If the pointer is not the base of the map, we need to skip the
+ // base. If it is a reference in a member field, we also need to skip
+ // the map of the reference.
+ if (L.ReturnDevicePointer != MapInfo::RPK_Base) {
+ ++CurrentBasePointersIdx;
+ if (L.ReturnDevicePointer == MapInfo::RPK_MemberReference)
+ ++CurrentBasePointersIdx;
+ }
+ assert(BasePointers.size() > CurrentBasePointersIdx &&
+ "Unexpected number of mapped base pointers.");
+
+ auto *RelevantVD = L.Components.back().getAssociatedDeclaration();
+ assert(RelevantVD &&
+ "No relevant declaration related with device pointer??");
+
+ BasePointers[CurrentBasePointersIdx].setDevicePtrDecl(RelevantVD);
+ Types[CurrentBasePointersIdx] |= OMP_MAP_RETURN_PTR;
+ }
IsFirstComponentList = false;
}
}
@@ -5441,7 +5604,8 @@ public:
/// \brief Generate the base pointers, section pointers, sizes and map types
/// associated to a given capture.
void generateInfoForCapture(const CapturedStmt::Capture *Cap,
- MapValuesArrayTy &BasePointers,
+ llvm::Value *Arg,
+ MapBaseValuesArrayTy &BasePointers,
MapValuesArrayTy &Pointers,
MapValuesArrayTy &Sizes,
MapFlagsArrayTy &Types) const {
@@ -5453,15 +5617,40 @@ public:
Sizes.clear();
Types.clear();
+ // We need to know when we generating information for the first component
+ // associated with a capture, because the mapping flags depend on it.
+ bool IsFirstComponentList = true;
+
const ValueDecl *VD =
Cap->capturesThis()
? nullptr
: cast<ValueDecl>(Cap->getCapturedVar()->getCanonicalDecl());
- // We need to know when we generating information for the first component
- // associated with a capture, because the mapping flags depend on it.
- bool IsFirstComponentList = true;
- for (auto *C : Directive.getClausesOfKind<OMPMapClause>())
+ // If this declaration appears in a is_device_ptr clause we just have to
+ // pass the pointer by value. If it is a reference to a declaration, we just
+ // pass its value, otherwise, if it is a member expression, we need to map
+ // 'to' the field.
+ if (!VD) {
+ auto It = DevPointersMap.find(VD);
+ if (It != DevPointersMap.end()) {
+ for (auto L : It->second) {
+ generateInfoForComponentList(
+ /*MapType=*/OMPC_MAP_to, /*MapTypeModifier=*/OMPC_MAP_unknown, L,
+ BasePointers, Pointers, Sizes, Types, IsFirstComponentList);
+ IsFirstComponentList = false;
+ }
+ return;
+ }
+ } else if (DevPointersMap.count(VD)) {
+ BasePointers.push_back({Arg, VD});
+ Pointers.push_back(Arg);
+ Sizes.push_back(CGF.getTypeSize(CGF.getContext().VoidPtrTy));
+ Types.push_back(OMP_MAP_PRIVATE_VAL | OMP_MAP_FIRST_REF);
+ return;
+ }
+
+ // FIXME: MSVC 2013 seems to require this-> to find member CurDir.
+ for (auto *C : this->CurDir.getClausesOfKind<OMPMapClause>())
for (auto L : C->decl_component_lists(VD)) {
assert(L.first == VD &&
"We got information for the wrong declaration??");
@@ -5478,12 +5667,12 @@ public:
/// \brief Generate the default map information for a given capture \a CI,
/// record field declaration \a RI and captured value \a CV.
- void generateDefaultMapInfo(
- const CapturedStmt::Capture &CI, const FieldDecl &RI, llvm::Value *CV,
- MappableExprsHandler::MapValuesArrayTy &CurBasePointers,
- MappableExprsHandler::MapValuesArrayTy &CurPointers,
- MappableExprsHandler::MapValuesArrayTy &CurSizes,
- MappableExprsHandler::MapFlagsArrayTy &CurMapTypes) {
+ void generateDefaultMapInfo(const CapturedStmt::Capture &CI,
+ const FieldDecl &RI, llvm::Value *CV,
+ MapBaseValuesArrayTy &CurBasePointers,
+ MapValuesArrayTy &CurPointers,
+ MapValuesArrayTy &CurSizes,
+ MapFlagsArrayTy &CurMapTypes) {
// Do the default mapping.
if (CI.capturesThis()) {
@@ -5492,15 +5681,14 @@ public:
const PointerType *PtrTy = cast<PointerType>(RI.getType().getTypePtr());
CurSizes.push_back(CGF.getTypeSize(PtrTy->getPointeeType()));
// Default map type.
- CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM);
+ CurMapTypes.push_back(OMP_MAP_TO | OMP_MAP_FROM);
} else if (CI.capturesVariableByCopy()) {
CurBasePointers.push_back(CV);
CurPointers.push_back(CV);
if (!RI.getType()->isAnyPointerType()) {
// We have to signal to the runtime captures passed by value that are
// not pointers.
- CurMapTypes.push_back(MappableExprsHandler::OMP_MAP_PRIVATE_VAL);
+ CurMapTypes.push_back(OMP_MAP_PRIVATE_VAL);
CurSizes.push_back(CGF.getTypeSize(RI.getType()));
} else {
// Pointers are implicitly mapped with a zero size and no flags
@@ -5521,9 +5709,8 @@ public:
// default the value doesn't have to be retrieved. For an aggregate
// type, the default is 'tofrom'.
CurMapTypes.push_back(ElementType->isAggregateType()
- ? (MappableExprsHandler::OMP_MAP_TO |
- MappableExprsHandler::OMP_MAP_FROM)
- : MappableExprsHandler::OMP_MAP_TO);
+ ? (OMP_MAP_TO | OMP_MAP_FROM)
+ : OMP_MAP_TO);
// If we have a capture by reference we may need to add the private
// pointer flag if the base declaration shows in some first-private
@@ -5533,7 +5720,7 @@ public:
}
// Every default map produces a single argument, so, it is always the
// first one.
- CurMapTypes.back() |= MappableExprsHandler::OMP_MAP_FIRST_REF;
+ CurMapTypes.back() |= OMP_MAP_FIRST_REF;
}
};
@@ -5548,19 +5735,20 @@ enum OpenMPOffloadingReservedDeviceIDs {
/// offloading runtime library. If there is no map or capture information,
/// return nullptr by reference.
static void
-emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
- llvm::Value *&PointersArray, llvm::Value *&SizesArray,
- llvm::Value *&MapTypesArray,
- MappableExprsHandler::MapValuesArrayTy &BasePointers,
+emitOffloadingArrays(CodeGenFunction &CGF,
+ MappableExprsHandler::MapBaseValuesArrayTy &BasePointers,
MappableExprsHandler::MapValuesArrayTy &Pointers,
MappableExprsHandler::MapValuesArrayTy &Sizes,
- MappableExprsHandler::MapFlagsArrayTy &MapTypes) {
+ MappableExprsHandler::MapFlagsArrayTy &MapTypes,
+ CGOpenMPRuntime::TargetDataInfo &Info) {
auto &CGM = CGF.CGM;
auto &Ctx = CGF.getContext();
- BasePointersArray = PointersArray = SizesArray = MapTypesArray = nullptr;
+ // Reset the array information.
+ Info.clearArrayInfo();
+ Info.NumberOfPtrs = BasePointers.size();
- if (unsigned PointerNumVal = BasePointers.size()) {
+ if (Info.NumberOfPtrs) {
// Detect if we have any capture size requiring runtime evaluation of the
// size so that a constant array could be eventually used.
bool hasRuntimeEvaluationCaptureSize = false;
@@ -5570,14 +5758,14 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
break;
}
- llvm::APInt PointerNumAP(32, PointerNumVal, /*isSigned=*/true);
+ llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
QualType PointerArrayType =
Ctx.getConstantArrayType(Ctx.VoidPtrTy, PointerNumAP, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- BasePointersArray =
+ Info.BasePointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_baseptrs").getPointer();
- PointersArray =
+ Info.PointersArray =
CGF.CreateMemTemp(PointerArrayType, ".offload_ptrs").getPointer();
// If we don't have any VLA types or other types that require runtime
@@ -5587,7 +5775,7 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
QualType SizeArrayType = Ctx.getConstantArrayType(
Ctx.getSizeType(), PointerNumAP, ArrayType::Normal,
/*IndexTypeQuals=*/0);
- SizesArray =
+ Info.SizesArray =
CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
} else {
// We expect all the sizes to be constant, so we collect them to create
@@ -5603,7 +5791,7 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
SizesArrayInit, ".offload_sizes");
SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- SizesArray = SizesArrayGbl;
+ Info.SizesArray = SizesArrayGbl;
}
// The map types are always constant so we don't need to generate code to
@@ -5615,10 +5803,10 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
/*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
MapTypesArrayInit, ".offload_maptypes");
MapTypesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- MapTypesArray = MapTypesArrayGbl;
+ Info.MapTypesArray = MapTypesArrayGbl;
- for (unsigned i = 0; i < PointerNumVal; ++i) {
- llvm::Value *BPVal = BasePointers[i];
+ for (unsigned i = 0; i < Info.NumberOfPtrs; ++i) {
+ llvm::Value *BPVal = *BasePointers[i];
if (BPVal->getType()->isPointerTy())
BPVal = CGF.Builder.CreateBitCast(BPVal, CGM.VoidPtrTy);
else {
@@ -5627,11 +5815,15 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
BPVal = CGF.Builder.CreateIntToPtr(BPVal, CGM.VoidPtrTy);
}
llvm::Value *BP = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), BasePointersArray,
- 0, i);
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.BasePointersArray, 0, i);
Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(BPVal, BPAddr);
+ if (Info.requiresDevicePointerInfo())
+ if (auto *DevVD = BasePointers[i].getDevicePtrDecl())
+ Info.CaptureDeviceAddrMap.insert(std::make_pair(DevVD, BPAddr));
+
llvm::Value *PVal = Pointers[i];
if (PVal->getType()->isPointerTy())
PVal = CGF.Builder.CreateBitCast(PVal, CGM.VoidPtrTy);
@@ -5641,14 +5833,15 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
PVal = CGF.Builder.CreateIntToPtr(PVal, CGM.VoidPtrTy);
}
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, PointerNumVal), PointersArray, 0,
- i);
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.PointersArray, 0, i);
Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(PVal, PAddr);
if (hasRuntimeEvaluationCaptureSize) {
llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.SizeTy, PointerNumVal), SizesArray,
+ llvm::ArrayType::get(CGM.SizeTy, Info.NumberOfPtrs),
+ Info.SizesArray,
/*Idx0=*/0,
/*Idx1=*/i);
Address SAddr(S, Ctx.getTypeAlignInChars(Ctx.getSizeType()));
@@ -5664,23 +5857,24 @@ emitOffloadingArrays(CodeGenFunction &CGF, llvm::Value *&BasePointersArray,
static void emitOffloadingArraysArgument(
CodeGenFunction &CGF, llvm::Value *&BasePointersArrayArg,
llvm::Value *&PointersArrayArg, llvm::Value *&SizesArrayArg,
- llvm::Value *&MapTypesArrayArg, llvm::Value *BasePointersArray,
- llvm::Value *PointersArray, llvm::Value *SizesArray,
- llvm::Value *MapTypesArray, unsigned NumElems) {
+ llvm::Value *&MapTypesArrayArg, CGOpenMPRuntime::TargetDataInfo &Info) {
auto &CGM = CGF.CGM;
- if (NumElems) {
+ if (Info.NumberOfPtrs) {
BasePointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, NumElems), BasePointersArray,
+ llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
+ Info.BasePointersArray,
/*Idx0=*/0, /*Idx1=*/0);
PointersArrayArg = CGF.Builder.CreateConstInBoundsGEP2_32(
- llvm::ArrayType::get(CGM.VoidPtrTy, NumElems), PointersArray,
+ llvm::ArrayType::get(CGM.VoidPtrTy, Inf