aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRoman Divacky <rdivacky@FreeBSD.org>2010-05-27 15:17:06 +0000
committerRoman Divacky <rdivacky@FreeBSD.org>2010-05-27 15:17:06 +0000
commitd7279c4c177bca357ef96ff1379fd9bc420bfe83 (patch)
tree3558f327a6f9ab59c5d7a06528d84e1560445247 /lib
parentbe17651f5cd2e94922c1b732bc8589e180698193 (diff)
downloadsrc-d7279c4c177bca357ef96ff1379fd9bc420bfe83.tar.gz
src-d7279c4c177bca357ef96ff1379fd9bc420bfe83.zip
Update clang to r104832.vendor/clang/clang-r104832
Notes
Notes: svn path=/vendor/clang/dist/; revision=208600 svn path=/vendor/clang/clang-r104832/; revision=208977; tag=vendor/clang/clang-r104832
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp550
-rw-r--r--lib/AST/ASTDiagnostic.cpp139
-rw-r--r--lib/AST/ASTImporter.cpp123
-rw-r--r--lib/AST/AttrImpl.cpp10
-rw-r--r--lib/AST/CMakeLists.txt2
-rw-r--r--lib/AST/CXXInheritance.cpp5
-rw-r--r--lib/AST/Decl.cpp62
-rw-r--r--lib/AST/DeclBase.cpp4
-rw-r--r--lib/AST/DeclCXX.cpp67
-rw-r--r--lib/AST/DeclTemplate.cpp111
-rw-r--r--lib/AST/DeclarationName.cpp44
-rw-r--r--lib/AST/Expr.cpp419
-rw-r--r--lib/AST/ExprCXX.cpp101
-rw-r--r--lib/AST/ExprConstant.cpp1167
-rw-r--r--lib/AST/NestedNameSpecifier.cpp6
-rw-r--r--lib/AST/RecordLayout.cpp7
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp907
-rw-r--r--lib/AST/RecordLayoutBuilder.h170
-rw-r--r--lib/AST/Stmt.cpp10
-rw-r--r--lib/AST/StmtDumper.cpp2
-rw-r--r--lib/AST/StmtPrinter.cpp2
-rw-r--r--lib/AST/StmtProfile.cpp210
-rw-r--r--lib/AST/TemplateBase.cpp42
-rw-r--r--lib/AST/Type.cpp259
-rw-r--r--lib/AST/TypeLoc.cpp59
-rw-r--r--lib/AST/TypePrinter.cpp76
-rw-r--r--lib/Analysis/CFG.cpp18
-rw-r--r--lib/Analysis/CMakeLists.txt2
-rw-r--r--lib/Basic/Diagnostic.cpp37
-rw-r--r--lib/Basic/IdentifierTable.cpp4
-rw-r--r--lib/Basic/SourceManager.cpp131
-rw-r--r--lib/Basic/TargetInfo.cpp1
-rw-r--r--lib/Basic/Targets.cpp5
-rw-r--r--lib/Basic/Version.cpp3
-rw-r--r--lib/Checker/BasicObjCFoundationChecks.cpp4
-rw-r--r--lib/Checker/BasicStore.cpp8
-rw-r--r--lib/Checker/CFRefCount.cpp83
-rw-r--r--lib/Checker/CMakeLists.txt3
-rw-r--r--lib/Checker/CastSizeChecker.cpp82
-rw-r--r--lib/Checker/CheckObjCDealloc.cpp3
-rw-r--r--lib/Checker/FlatStore.cpp4
-rw-r--r--lib/Checker/GRCXXExprEngine.cpp4
-rw-r--r--lib/Checker/GRExprEngine.cpp35
-rw-r--r--lib/Checker/GRExprEngineExperimentalChecks.cpp1
-rw-r--r--lib/Checker/GRExprEngineInternalChecks.h1
-rw-r--r--lib/Checker/GRState.cpp7
-rw-r--r--lib/Checker/LLVMConventionsChecker.cpp2
-rw-r--r--lib/Checker/MallocChecker.cpp5
-rw-r--r--lib/Checker/MemRegion.cpp2
-rw-r--r--lib/Checker/ObjCUnusedIVarsChecker.cpp3
-rw-r--r--lib/Checker/RegionStore.cpp41
-rw-r--r--lib/CodeGen/CGBlocks.cpp483
-rw-r--r--lib/CodeGen/CGBlocks.h46
-rw-r--r--lib/CodeGen/CGBuiltin.cpp70
-rw-r--r--lib/CodeGen/CGCXX.cpp9
-rw-r--r--lib/CodeGen/CGCXXABI.h37
-rw-r--r--lib/CodeGen/CGCall.cpp34
-rw-r--r--lib/CodeGen/CGClass.cpp394
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp246
-rw-r--r--lib/CodeGen/CGDebugInfo.h8
-rw-r--r--lib/CodeGen/CGDecl.cpp108
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp153
-rw-r--r--lib/CodeGen/CGException.cpp74
-rw-r--r--lib/CodeGen/CGExpr.cpp180
-rw-r--r--lib/CodeGen/CGExprAgg.cpp123
-rw-r--r--lib/CodeGen/CGExprCXX.cpp55
-rw-r--r--lib/CodeGen/CGExprConstant.cpp143
-rw-r--r--lib/CodeGen/CGExprScalar.cpp29
-rw-r--r--lib/CodeGen/CGObjC.cpp92
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp67
-rw-r--r--lib/CodeGen/CGObjCMac.cpp58
-rw-r--r--lib/CodeGen/CGObjCRuntime.h2
-rw-r--r--lib/CodeGen/CGRecordLayout.h9
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp122
-rw-r--r--lib/CodeGen/CGStmt.cpp31
-rw-r--r--lib/CodeGen/CGVTT.cpp2
-rw-r--r--lib/CodeGen/CGVTables.cpp74
-rw-r--r--lib/CodeGen/CGVTables.h10
-rw-r--r--lib/CodeGen/CMakeLists.txt3
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp22
-rw-r--r--lib/CodeGen/CodeGenFunction.h61
-rw-r--r--lib/CodeGen/CodeGenModule.cpp57
-rw-r--r--lib/CodeGen/CodeGenModule.h27
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp32
-rw-r--r--lib/CodeGen/CodeGenTypes.h8
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp39
-rw-r--r--lib/CodeGen/Mangle.cpp113
-rw-r--r--lib/CodeGen/Mangle.h76
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp7
-rw-r--r--lib/CodeGen/TargetInfo.cpp189
-rw-r--r--lib/Driver/ArgList.cpp28
-rw-r--r--lib/Driver/CC1AsOptions.cpp39
-rw-r--r--lib/Driver/CC1Options.cpp2
-rw-r--r--lib/Driver/CMakeLists.txt4
-rw-r--r--lib/Driver/Driver.cpp65
-rw-r--r--lib/Driver/Tool.cpp6
-rw-r--r--lib/Driver/ToolChains.cpp20
-rw-r--r--lib/Driver/ToolChains.h29
-rw-r--r--lib/Driver/Tools.cpp143
-rw-r--r--lib/Driver/Tools.h123
-rw-r--r--lib/Frontend/AnalysisConsumer.cpp4
-rw-r--r--lib/Frontend/BoostConAction.cpp39
-rw-r--r--lib/Frontend/CMakeLists.txt4
-rw-r--r--lib/Frontend/CodeGenAction.cpp15
-rw-r--r--lib/Frontend/CompilerInstance.cpp6
-rw-r--r--lib/Frontend/CompilerInvocation.cpp144
-rw-r--r--lib/Frontend/DeclXML.cpp48
-rw-r--r--lib/Frontend/DocumentXML.cpp29
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp200
-rw-r--r--lib/Frontend/InitPreprocessor.cpp5
-rw-r--r--lib/Frontend/PCHReader.cpp93
-rw-r--r--lib/Frontend/PCHReaderDecl.cpp303
-rw-r--r--lib/Frontend/PCHReaderStmt.cpp140
-rw-r--r--lib/Frontend/PCHWriter.cpp110
-rw-r--r--lib/Frontend/PCHWriterDecl.cpp217
-rw-r--r--lib/Frontend/PCHWriterStmt.cpp120
-rw-r--r--lib/Frontend/PrintParserCallbacks.cpp6
-rw-r--r--lib/Frontend/RewriteObjC.cpp127
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp40
-rw-r--r--lib/Headers/emmintrin.h5
-rw-r--r--lib/Headers/xmmintrin.h3
-rw-r--r--lib/Index/ASTLocation.cpp2
-rw-r--r--lib/Index/Analyzer.cpp8
-rw-r--r--lib/Index/ResolveLocation.cpp29
-rw-r--r--lib/Lex/Lexer.cpp11
-rw-r--r--lib/Lex/LiteralSupport.cpp55
-rw-r--r--lib/Parse/AttributeList.cpp6
-rw-r--r--lib/Parse/MinimalAction.cpp4
-rw-r--r--lib/Parse/ParseDecl.cpp70
-rw-r--r--lib/Parse/ParseDeclCXX.cpp21
-rw-r--r--lib/Parse/ParseExpr.cpp33
-rw-r--r--lib/Parse/ParseExprCXX.cpp140
-rw-r--r--lib/Parse/ParseObjc.cpp58
-rw-r--r--lib/Parse/ParsePragma.cpp53
-rw-r--r--lib/Parse/ParsePragma.h9
-rw-r--r--lib/Parse/ParseStmt.cpp94
-rw-r--r--lib/Parse/ParseTemplate.cpp37
-rw-r--r--lib/Parse/ParseTentative.cpp1
-rw-r--r--lib/Parse/Parser.cpp38
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp29
-rw-r--r--lib/Sema/CMakeLists.txt2
-rw-r--r--lib/Sema/CodeCompleteConsumer.cpp67
-rw-r--r--lib/Sema/JumpDiagnostics.cpp378
-rw-r--r--lib/Sema/Sema.cpp43
-rw-r--r--lib/Sema/Sema.h146
-rw-r--r--lib/Sema/SemaAttr.cpp98
-rw-r--r--lib/Sema/SemaCXXCast.cpp25
-rw-r--r--lib/Sema/SemaChecking.cpp406
-rw-r--r--lib/Sema/SemaCodeComplete.cpp555
-rw-r--r--lib/Sema/SemaDecl.cpp263
-rw-r--r--lib/Sema/SemaDeclAttr.cpp68
-rw-r--r--lib/Sema/SemaDeclCXX.cpp605
-rw-r--r--lib/Sema/SemaDeclObjC.cpp52
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp2
-rw-r--r--lib/Sema/SemaExpr.cpp258
-rw-r--r--lib/Sema/SemaExprCXX.cpp153
-rw-r--r--lib/Sema/SemaExprObjC.cpp59
-rw-r--r--lib/Sema/SemaInit.cpp87
-rw-r--r--lib/Sema/SemaInit.h44
-rw-r--r--lib/Sema/SemaLookup.cpp97
-rw-r--r--lib/Sema/SemaObjCProperty.cpp184
-rw-r--r--lib/Sema/SemaOverload.cpp588
-rw-r--r--lib/Sema/SemaOverload.h45
-rw-r--r--lib/Sema/SemaStmt.cpp397
-rw-r--r--lib/Sema/SemaTemplate.cpp286
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp33
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp85
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp87
-rw-r--r--lib/Sema/SemaType.cpp242
-rw-r--r--lib/Sema/TreeTransform.h328
170 files changed, 11411 insertions, 5423 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index eea727deb44d..851f8d1c68bf 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/Builtins.h"
@@ -27,7 +28,6 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
-#include "RecordLayoutBuilder.h"
using namespace clang;
@@ -46,7 +46,9 @@ ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0),
SourceMgr(SM), LangOpts(LOpts), FreeMemory(FreeMem), Target(t),
Idents(idents), Selectors(sels),
- BuiltinInfo(builtins), ExternalSource(0), PrintingPolicy(LOpts),
+ BuiltinInfo(builtins),
+ DeclarationNames(*this),
+ ExternalSource(0), PrintingPolicy(LOpts),
LastSDM(0, 0) {
ObjCIdRedefinitionType = QualType();
ObjCClassRedefinitionType = QualType();
@@ -61,6 +63,12 @@ ASTContext::~ASTContext() {
// FIXME: Is this the ideal solution?
ReleaseDeclContextMaps();
+ if (!FreeMemory) {
+ // Call all of the deallocation functions.
+ for (unsigned I = 0, N = Deallocations.size(); I != N; ++I)
+ Deallocations[I].first(Deallocations[I].second);
+ }
+
// Release all of the memory associated with overridden C++ methods.
for (llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::iterator
OM = OverriddenMethods.begin(), OMEnd = OverriddenMethods.end();
@@ -111,6 +119,10 @@ ASTContext::~ASTContext() {
TUDecl->Destroy(*this);
}
+void ASTContext::AddDeallocation(void (*Callback)(void*), void *Data) {
+ Deallocations.push_back(std::make_pair(Callback, Data));
+}
+
void
ASTContext::setExternalSource(llvm::OwningPtr<ExternalASTSource> &Source) {
ExternalSource.reset(Source.take());
@@ -419,6 +431,18 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) {
return CharUnits::fromQuantity(Align / Target.getCharWidth());
}
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(const Type *T) {
+ std::pair<uint64_t, unsigned> Info = getTypeInfo(T);
+ return std::make_pair(CharUnits::fromQuantity(Info.first / getCharWidth()),
+ CharUnits::fromQuantity(Info.second / getCharWidth()));
+}
+
+std::pair<CharUnits, CharUnits>
+ASTContext::getTypeInfoInChars(QualType T) {
+ return getTypeInfoInChars(T.getTypePtr());
+}
+
/// getTypeSize - Return the size of the specified type, in bits. This method
/// does not work on incomplete types.
///
@@ -593,6 +617,8 @@ ASTContext::getTypeInfo(const Type *T) {
Align = EltInfo.second;
break;
}
+ case Type::ObjCObject:
+ return getTypeInfo(cast<ObjCObjectType>(T)->getBaseType().getTypePtr());
case Type::ObjCInterface: {
const ObjCInterfaceType *ObjCI = cast<ObjCInterfaceType>(T);
const ASTRecordLayout &Layout = getASTObjCInterfaceLayout(ObjCI->getDecl());
@@ -624,10 +650,6 @@ ASTContext::getTypeInfo(const Type *T) {
return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
getReplacementType().getTypePtr());
- case Type::Elaborated:
- return getTypeInfo(cast<ElaboratedType>(T)->getUnderlyingType()
- .getTypePtr());
-
case Type::Typedef: {
const TypedefDecl *Typedef = cast<TypedefType>(T)->getDecl();
if (const AlignedAttr *Aligned = Typedef->getAttr<AlignedAttr>()) {
@@ -650,8 +672,8 @@ ASTContext::getTypeInfo(const Type *T) {
return getTypeInfo(cast<DecltypeType>(T)->getUnderlyingExpr()->getType()
.getTypePtr());
- case Type::QualifiedName:
- return getTypeInfo(cast<QualifiedNameType>(T)->getNamedType().getTypePtr());
+ case Type::Elaborated:
+ return getTypeInfo(cast<ElaboratedType>(T)->getNamedType().getTypePtr());
case Type::TemplateSpecialization:
assert(getCanonicalType(T) != T &&
@@ -875,40 +897,6 @@ TypeSourceInfo *ASTContext::getTrivialTypeSourceInfo(QualType T,
return DI;
}
-/// getInterfaceLayoutImpl - Get or compute information about the
-/// layout of the given interface.
-///
-/// \param Impl - If given, also include the layout of the interface's
-/// implementation. This may differ by including synthesized ivars.
-const ASTRecordLayout &
-ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl) {
- assert(!D->isForwardDecl() && "Invalid interface decl!");
-
- // Look up this layout, if already laid out, return what we have.
- ObjCContainerDecl *Key =
- Impl ? (ObjCContainerDecl*) Impl : (ObjCContainerDecl*) D;
- if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
- return *Entry;
-
- // Add in synthesized ivar count if laying out an implementation.
- if (Impl) {
- unsigned SynthCount = CountNonClassIvars(D);
- // If there aren't any sythesized ivars then reuse the interface
- // entry. Note we can't cache this because we simply free all
- // entries later; however we shouldn't look up implementations
- // frequently.
- if (SynthCount == 0)
- return getObjCLayout(D, 0);
- }
-
- const ASTRecordLayout *NewEntry =
- ASTRecordLayoutBuilder::ComputeLayout(*this, D, Impl);
- ObjCLayouts[Key] = NewEntry;
-
- return *NewEntry;
-}
-
const ASTRecordLayout &
ASTContext::getASTObjCInterfaceLayout(const ObjCInterfaceDecl *D) {
return getObjCLayout(D, 0);
@@ -919,45 +907,6 @@ ASTContext::getASTObjCImplementationLayout(const ObjCImplementationDecl *D) {
return getObjCLayout(D->getClassInterface(), D);
}
-/// getASTRecordLayout - Get or compute information about the layout of the
-/// specified record (struct/union/class), which indicates its size and field
-/// position information.
-const ASTRecordLayout &ASTContext::getASTRecordLayout(const RecordDecl *D) {
- D = D->getDefinition();
- assert(D && "Cannot get layout of forward declarations!");
-
- // Look up this layout, if already laid out, return what we have.
- // Note that we can't save a reference to the entry because this function
- // is recursive.
- const ASTRecordLayout *Entry = ASTRecordLayouts[D];
- if (Entry) return *Entry;
-
- const ASTRecordLayout *NewEntry =
- ASTRecordLayoutBuilder::ComputeLayout(*this, D);
- ASTRecordLayouts[D] = NewEntry;
-
- if (getLangOptions().DumpRecordLayouts) {
- llvm::errs() << "\n*** Dumping AST Record Layout\n";
- DumpRecordLayout(D, llvm::errs());
- }
-
- return *NewEntry;
-}
-
-const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
- RD = cast<CXXRecordDecl>(RD->getDefinition());
- assert(RD && "Cannot get key function for forward declarations!");
-
- const CXXMethodDecl *&Entry = KeyFunctions[RD];
- if (!Entry)
- Entry = ASTRecordLayoutBuilder::ComputeKeyFunction(RD);
- else
- assert(Entry == ASTRecordLayoutBuilder::ComputeKeyFunction(RD) &&
- "Key function changed!");
-
- return Entry;
-}
-
//===----------------------------------------------------------------------===//
// Type creation/memoization methods
//===----------------------------------------------------------------------===//
@@ -1343,9 +1292,17 @@ QualType ASTContext::getVariableArrayType(QualType EltTy,
SourceRange Brackets) {
// Since we don't unique expressions, it isn't possible to unique VLA's
// that have an expression provided for their size.
-
+ QualType CanonType;
+
+ if (!EltTy.isCanonical()) {
+ if (NumElts)
+ NumElts->Retain();
+ CanonType = getVariableArrayType(getCanonicalType(EltTy), NumElts, ASM,
+ EltTypeQuals, Brackets);
+ }
+
VariableArrayType *New = new(*this, TypeAlignment)
- VariableArrayType(EltTy, QualType(), NumElts, ASM, EltTypeQuals, Brackets);
+ VariableArrayType(EltTy, CanonType, NumElts, ASM, EltTypeQuals, Brackets);
VariableArrayTypes.push_back(New);
Types.push_back(New);
@@ -1694,10 +1651,6 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
if (const TypedefDecl *Typedef = dyn_cast<TypedefDecl>(Decl))
return getTypedefType(Typedef);
- if (const ObjCInterfaceDecl *ObjCInterface
- = dyn_cast<ObjCInterfaceDecl>(Decl))
- return getObjCInterfaceType(ObjCInterface);
-
assert(!isa<TemplateTypeParmDecl>(Decl) &&
"Template type parameter types are always available.");
@@ -1888,29 +1841,28 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
}
QualType
-ASTContext::getQualifiedNameType(NestedNameSpecifier *NNS,
- QualType NamedType) {
+ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ QualType NamedType) {
llvm::FoldingSetNodeID ID;
- QualifiedNameType::Profile(ID, NNS, NamedType);
+ ElaboratedType::Profile(ID, Keyword, NNS, NamedType);
void *InsertPos = 0;
- QualifiedNameType *T
- = QualifiedNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
QualType Canon = NamedType;
if (!Canon.isCanonical()) {
Canon = getCanonicalType(NamedType);
- QualifiedNameType *CheckT
- = QualifiedNameTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CheckT && "Qualified name canonical type broken");
+ ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ assert(!CheckT && "Elaborated canonical type broken");
(void)CheckT;
}
- T = new (*this) QualifiedNameType(NNS, NamedType, Canon);
+ T = new (*this) ElaboratedType(Keyword, NNS, NamedType, Canon);
Types.push_back(T);
- QualifiedNameTypes.InsertNode(T, InsertPos);
+ ElaboratedTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
@@ -1987,30 +1939,6 @@ ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
return QualType(T, 0);
}
-QualType
-ASTContext::getElaboratedType(QualType UnderlyingType,
- ElaboratedType::TagKind Tag) {
- llvm::FoldingSetNodeID ID;
- ElaboratedType::Profile(ID, UnderlyingType, Tag);
-
- void *InsertPos = 0;
- ElaboratedType *T = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
- if (T)
- return QualType(T, 0);
-
- QualType Canon = UnderlyingType;
- if (!Canon.isCanonical()) {
- Canon = getCanonicalType(Canon);
- ElaboratedType *CheckT = ElaboratedTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CheckT && "Elaborated canonical type is broken"); (void)CheckT;
- }
-
- T = new (*this) ElaboratedType(UnderlyingType, Tag, Canon);
- Types.push_back(T);
- ElaboratedTypes.InsertNode(T, InsertPos);
- return QualType(T, 0);
-}
-
/// CmpProtocolNames - Comparison predicate for sorting protocols
/// alphabetically.
static bool CmpProtocolNames(const ObjCProtocolDecl *LHS,
@@ -2018,7 +1946,7 @@ static bool CmpProtocolNames(const ObjCProtocolDecl *LHS,
return LHS->getDeclName() < RHS->getDeclName();
}
-static bool areSortedAndUniqued(ObjCProtocolDecl **Protocols,
+static bool areSortedAndUniqued(ObjCProtocolDecl * const *Protocols,
unsigned NumProtocols) {
if (NumProtocols == 0) return true;
@@ -2040,96 +1968,98 @@ static void SortAndUniqueProtocols(ObjCProtocolDecl **Protocols,
NumProtocols = ProtocolsEnd-Protocols;
}
-/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
-/// the given interface decl and the conforming protocol list.
-QualType ASTContext::getObjCObjectPointerType(QualType InterfaceT,
- ObjCProtocolDecl **Protocols,
- unsigned NumProtocols,
- unsigned Quals) {
- llvm::FoldingSetNodeID ID;
- ObjCObjectPointerType::Profile(ID, InterfaceT, Protocols, NumProtocols);
- Qualifiers Qs = Qualifiers::fromCVRMask(Quals);
+QualType ASTContext::getObjCObjectType(QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) {
+ // If the base type is an interface and there aren't any protocols
+ // to add, then the interface type will do just fine.
+ if (!NumProtocols && isa<ObjCInterfaceType>(BaseType))
+ return BaseType;
+ // Look in the folding set for an existing type.
+ llvm::FoldingSetNodeID ID;
+ ObjCObjectTypeImpl::Profile(ID, BaseType, Protocols, NumProtocols);
void *InsertPos = 0;
- if (ObjCObjectPointerType *QT =
- ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
- return getQualifiedType(QualType(QT, 0), Qs);
+ if (ObjCObjectType *QT = ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(QT, 0);
- // Sort the protocol list alphabetically to canonicalize it.
+ // Build the canonical type, which has the canonical base type and
+ // a sorted-and-uniqued list of protocols.
QualType Canonical;
- if (!InterfaceT.isCanonical() ||
- !areSortedAndUniqued(Protocols, NumProtocols)) {
- if (!areSortedAndUniqued(Protocols, NumProtocols)) {
+ bool ProtocolsSorted = areSortedAndUniqued(Protocols, NumProtocols);
+ if (!ProtocolsSorted || !BaseType.isCanonical()) {
+ if (!ProtocolsSorted) {
llvm::SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols,
Protocols + NumProtocols);
unsigned UniqueCount = NumProtocols;
SortAndUniqueProtocols(&Sorted[0], UniqueCount);
-
- Canonical = getObjCObjectPointerType(getCanonicalType(InterfaceT),
- &Sorted[0], UniqueCount);
+ Canonical = getObjCObjectType(getCanonicalType(BaseType),
+ &Sorted[0], UniqueCount);
} else {
- Canonical = getObjCObjectPointerType(getCanonicalType(InterfaceT),
- Protocols, NumProtocols);
+ Canonical = getObjCObjectType(getCanonicalType(BaseType),
+ Protocols, NumProtocols);
}
// Regenerate InsertPos.
- ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
+ ObjCObjectTypes.FindNodeOrInsertPos(ID, InsertPos);
}
- // No match.
- unsigned Size = sizeof(ObjCObjectPointerType)
- + NumProtocols * sizeof(ObjCProtocolDecl *);
+ unsigned Size = sizeof(ObjCObjectTypeImpl);
+ Size += NumProtocols * sizeof(ObjCProtocolDecl *);
void *Mem = Allocate(Size, TypeAlignment);
- ObjCObjectPointerType *QType = new (Mem) ObjCObjectPointerType(Canonical,
- InterfaceT,
- Protocols,
- NumProtocols);
+ ObjCObjectTypeImpl *T =
+ new (Mem) ObjCObjectTypeImpl(Canonical, BaseType, Protocols, NumProtocols);
- Types.push_back(QType);
- ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
- return getQualifiedType(QualType(QType, 0), Qs);
+ Types.push_back(T);
+ ObjCObjectTypes.InsertNode(T, InsertPos);
+ return QualType(T, 0);
}
-/// getObjCInterfaceType - Return the unique reference to the type for the
-/// specified ObjC interface decl. The list of protocols is optional.
-QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl,
- ObjCProtocolDecl **Protocols, unsigned NumProtocols) {
+/// getObjCObjectPointerType - Return a ObjCObjectPointerType type for
+/// the given object type.
+QualType ASTContext::getObjCObjectPointerType(QualType ObjectT) {
llvm::FoldingSetNodeID ID;
- ObjCInterfaceType::Profile(ID, Decl, Protocols, NumProtocols);
+ ObjCObjectPointerType::Profile(ID, ObjectT);
void *InsertPos = 0;
- if (ObjCInterfaceType *QT =
- ObjCInterfaceTypes.FindNodeOrInsertPos(ID, InsertPos))
+ if (ObjCObjectPointerType *QT =
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(QT, 0);
- // Sort the protocol list alphabetically to canonicalize it.
+ // Find the canonical object type.
QualType Canonical;
- if (NumProtocols && !areSortedAndUniqued(Protocols, NumProtocols)) {
- llvm::SmallVector<ObjCProtocolDecl*, 8> Sorted(Protocols,
- Protocols + NumProtocols);
-
- unsigned UniqueCount = NumProtocols;
- SortAndUniqueProtocols(&Sorted[0], UniqueCount);
+ if (!ObjectT.isCanonical()) {
+ Canonical = getObjCObjectPointerType(getCanonicalType(ObjectT));
- Canonical = getObjCInterfaceType(Decl, &Sorted[0], UniqueCount);
-
- ObjCInterfaceTypes.FindNodeOrInsertPos(ID, InsertPos);
+ // Regenerate InsertPos.
+ ObjCObjectPointerTypes.FindNodeOrInsertPos(ID, InsertPos);
}
- unsigned Size = sizeof(ObjCInterfaceType)
- + NumProtocols * sizeof(ObjCProtocolDecl *);
- void *Mem = Allocate(Size, TypeAlignment);
- ObjCInterfaceType *QType = new (Mem) ObjCInterfaceType(Canonical,
- const_cast<ObjCInterfaceDecl*>(Decl),
- Protocols,
- NumProtocols);
+ // No match.
+ void *Mem = Allocate(sizeof(ObjCObjectPointerType), TypeAlignment);
+ ObjCObjectPointerType *QType =
+ new (Mem) ObjCObjectPointerType(Canonical, ObjectT);
Types.push_back(QType);
- ObjCInterfaceTypes.InsertNode(QType, InsertPos);
+ ObjCObjectPointerTypes.InsertNode(QType, InsertPos);
return QualType(QType, 0);
}
+/// getObjCInterfaceType - Return the unique reference to the type for the
+/// specified ObjC interface decl. The list of protocols is optional.
+QualType ASTContext::getObjCInterfaceType(const ObjCInterfaceDecl *Decl) {
+ if (Decl->TypeForDecl)
+ return QualType(Decl->TypeForDecl, 0);
+
+ // FIXME: redeclarations?
+ void *Mem = Allocate(sizeof(ObjCInterfaceType), TypeAlignment);
+ ObjCInterfaceType *T = new (Mem) ObjCInterfaceType(Decl);
+ Decl->TypeForDecl = T;
+ Types.push_back(T);
+ return QualType(T, 0);
+}
+
/// getTypeOfExprType - Unlike many "get<Type>" functions, we can't unique
/// TypeOfExprType AST's (since expression's are never shared). For example,
/// multiple declarations that refer to "typeof(x)" all contain different
@@ -2362,26 +2292,35 @@ CanQualType ASTContext::getCanonicalType(QualType T) {
QualType ASTContext::getUnqualifiedArrayType(QualType T,
Qualifiers &Quals) {
Quals = T.getQualifiers();
- if (!isa<ArrayType>(T)) {
+ const ArrayType *AT = getAsArrayType(T);
+ if (!AT) {
return T.getUnqualifiedType();
}
- const ArrayType *AT = cast<ArrayType>(T);
QualType Elt = AT->getElementType();
QualType UnqualElt = getUnqualifiedArrayType(Elt, Quals);
if (Elt == UnqualElt)
return T;
- if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(T)) {
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
return getConstantArrayType(UnqualElt, CAT->getSize(),
CAT->getSizeModifier(), 0);
}
- if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(T)) {
+ if (const IncompleteArrayType *IAT = dyn_cast<IncompleteArrayType>(AT)) {
return getIncompleteArrayType(UnqualElt, IAT->getSizeModifier(), 0);
}
- const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(T);
+ if (const VariableArrayType *VAT = dyn_cast<VariableArrayType>(AT)) {
+ return getVariableArrayType(UnqualElt,
+ VAT->getSizeExpr() ?
+ VAT->getSizeExpr()->Retain() : 0,
+ VAT->getSizeModifier(),
+ VAT->getIndexTypeCVRQualifiers(),
+ VAT->getBracketsRange());
+ }
+
+ const DependentSizedArrayType *DSAT = cast<DependentSizedArrayType>(AT);
return getDependentSizedArrayType(UnqualElt, DSAT->getSizeExpr()->Retain(),
DSAT->getSizeModifier(), 0,
SourceRange());
@@ -2716,6 +2655,9 @@ unsigned ASTContext::getIntegerRank(Type *T) {
/// \returns the type this bit-field will promote to, or NULL if no
/// promotion occurs.
QualType ASTContext::isPromotableBitField(Expr *E) {
+ if (E->isTypeDependent() || E->isValueDependent())
+ return QualType();
+
FieldDecl *Field = E->getBitField();
if (!Field)
return QualType();
@@ -2811,7 +2753,7 @@ CreateRecordDecl(ASTContext &Ctx, RecordDecl::TagKind TK, DeclContext *DC,
QualType ASTContext::getCFConstantStringType() {
if (!CFConstantStringTypeDecl) {
CFConstantStringTypeDecl =
- CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get("NSConstantString"));
CFConstantStringTypeDecl->startDefinition();
@@ -2853,7 +2795,7 @@ void ASTContext::setCFConstantStringType(QualType T) {
QualType ASTContext::getNSConstantStringType() {
if (!NSConstantStringTypeDecl) {
NSConstantStringTypeDecl =
- CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get("__builtin_NSString"));
NSConstantStringTypeDecl->startDefinition();
@@ -2892,7 +2834,7 @@ void ASTContext::setNSConstantStringType(QualType T) {
QualType ASTContext::getObjCFastEnumerationStateType() {
if (!ObjCFastEnumerationStateTypeDecl) {
ObjCFastEnumerationStateTypeDecl =
- CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get("__objcFastEnumerationState"));
ObjCFastEnumerationStateTypeDecl->startDefinition();
@@ -2927,7 +2869,7 @@ QualType ASTContext::getBlockDescriptorType() {
RecordDecl *T;
// FIXME: Needs the FlagAppleBlock bit.
- T = CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get("__block_descriptor"));
T->startDefinition();
@@ -2972,7 +2914,7 @@ QualType ASTContext::getBlockDescriptorExtendedType() {
RecordDecl *T;
// FIXME: Needs the FlagAppleBlock bit.
- T = CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get("__block_descriptor_withcopydispose"));
T->startDefinition();
@@ -3044,7 +2986,7 @@ QualType ASTContext::BuildByRefType(const char *DeclName, QualType Ty) {
llvm::raw_svector_ostream(Name) << "__Block_byref_" <<
++UniqueBlockByRefTypeID << '_' << DeclName;
RecordDecl *T;
- T = CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get(Name.str()));
T->startDefinition();
QualType Int32Ty = IntTy;
@@ -3088,14 +3030,15 @@ QualType ASTContext::BuildByRefType(const char *DeclName, QualType Ty) {
QualType ASTContext::getBlockParmType(
bool BlockHasCopyDispose,
- llvm::SmallVector<const Expr *, 8> &BlockDeclRefDecls) {
+ llvm::SmallVectorImpl<const Expr *> &Layout) {
+
// FIXME: Move up
static unsigned int UniqueBlockParmTypeID = 0;
llvm::SmallString<36> Name;
llvm::raw_svector_ostream(Name) << "__block_literal_"
<< ++UniqueBlockParmTypeID;
RecordDecl *T;
- T = CreateRecordDecl(*this, TagDecl::TK_struct, TUDecl, SourceLocation(),
+ T = CreateRecordDecl(*this, TTK_Struct, TUDecl, SourceLocation(),
&Idents.get(Name.str()));
T->startDefinition();
QualType FieldTypes[] = {
@@ -3125,22 +3068,28 @@ QualType ASTContext::getBlockParmType(
T->addDecl(Field);
}
- for (size_t i = 0; i < BlockDeclRefDecls.size(); ++i) {
- const Expr *E = BlockDeclRefDecls[i];
- const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
- clang::IdentifierInfo *Name = 0;
- if (BDRE) {
+ for (unsigned i = 0; i < Layout.size(); ++i) {
+ const Expr *E = Layout[i];
+
+ QualType FieldType = E->getType();
+ IdentifierInfo *FieldName = 0;
+ if (isa<CXXThisExpr>(E)) {
+ FieldName = &Idents.get("this");
+ } else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E)) {
const ValueDecl *D = BDRE->getDecl();
- Name = &Idents.get(D->getName());
+ FieldName = D->getIdentifier();
+ if (BDRE->isByRef())
+ FieldType = BuildByRefType(D->getNameAsCString(), FieldType);
+ } else {
+ // Padding.
+ assert(isa<ConstantArrayType>(FieldType) &&
+ isa<DeclRefExpr>(E) &&
+ !cast<DeclRefExpr>(E)->getDecl()->getDeclName() &&
+ "doesn't match characteristics of padding decl");
}
- QualType FieldType = E->getType();
-
- if (BDRE && BDRE->isByRef())
- FieldType = BuildByRefType(BDRE->getDecl()->getNameAsCString(),
- FieldType);
FieldDecl *Field = FieldDecl::Create(*this, T, SourceLocation(),
- Name, FieldType, /*TInfo=*/0,
+ FieldName, FieldType, /*TInfo=*/0,
/*BitWidth=*/0, /*Mutable=*/false);
Field->setAccess(AS_public);
T->addDecl(Field);
@@ -3593,6 +3542,17 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// Anonymous structures print as '?'
if (const IdentifierInfo *II = RDecl->getIdentifier()) {
S += II->getName();
+ if (ClassTemplateSpecializationDecl *Spec
+ = dyn_cast<ClassTemplateSpecializationDecl>(RDecl)) {
+ const TemplateArgumentList &TemplateArgs = Spec->getTemplateArgs();
+ std::string TemplateArgsStr
+ = TemplateSpecializationType::PrintTemplateArgumentList(
+ TemplateArgs.getFlatArgumentList(),
+ TemplateArgs.flat_size(),
+ (*this).PrintingPolicy);
+
+ S += TemplateArgsStr;
+ }
} else {
S += '?';
}
@@ -3636,6 +3596,10 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
return;
}
+ // Ignore protocol qualifiers when mangling at this level.
+ if (const ObjCObjectType *OT = T->getAs<ObjCObjectType>())
+ T = OT->getBaseType();
+
if (const ObjCInterfaceType *OIT = T->getAs<ObjCInterfaceType>()) {
// @encode(class_name)
ObjCInterfaceDecl *OI = OIT->getDecl();
@@ -3718,6 +3682,11 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
return;
}
+ // gcc just blithely ignores member pointers.
+ // TODO: maybe there should be a mangling for these
+ if (T->getAs<MemberPointerType>())
+ return;
+
assert(0 && "@encode for type not implemented!");
}
@@ -4106,18 +4075,21 @@ bool ASTContext::ObjCQualifiedIdTypesAreCompatible(QualType lhs, QualType rhs,
///
bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
const ObjCObjectPointerType *RHSOPT) {
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+
// If either type represents the built-in 'id' or 'Class' types, return true.
- if (LHSOPT->isObjCBuiltinType() || RHSOPT->isObjCBuiltinType())
+ if (LHS->isObjCUnqualifiedIdOrClass() ||
+ RHS->isObjCUnqualifiedIdOrClass())
return true;
- if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
+ if (LHS->isObjCQualifiedId() || RHS->isObjCQualifiedId())
return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
QualType(RHSOPT,0),
false);
- const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
- const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
- if (LHS && RHS) // We have 2 user-defined types.
+ // If we have 2 user-defined types, fall into that path.
+ if (LHS->getInterface() && RHS->getInterface())
return canAssignObjCInterfaces(LHS, RHS);
return false;
@@ -4168,8 +4140,10 @@ void getIntersectionOfProtocols(ASTContext &Context,
const ObjCObjectPointerType *RHSOPT,
llvm::SmallVectorImpl<ObjCProtocolDecl *> &IntersectionOfProtocols) {
- const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
- const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
+ const ObjCObjectType* LHS = LHSOPT->getObjectType();
+ const ObjCObjectType* RHS = RHSOPT->getObjectType();
+ assert(LHS->getInterface() && "LHS must have an interface base");
+ assert(RHS->getInterface() && "RHS must have an interface base");
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> InheritedProtocolSet;
unsigned LHSNumProtocols = LHS->getNumProtocols();
@@ -4177,7 +4151,8 @@ void getIntersectionOfProtocols(ASTContext &Context,
InheritedProtocolSet.insert(LHS->qual_begin(), LHS->qual_end());
else {
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> LHSInheritedProtocols;
- Context.CollectInheritedProtocols(LHS->getDecl(), LHSInheritedProtocols);
+ Context.CollectInheritedProtocols(LHS->getInterface(),
+ LHSInheritedProtocols);
InheritedProtocolSet.insert(LHSInheritedProtocols.begin(),
LHSInheritedProtocols.end());
}
@@ -4192,7 +4167,8 @@ void getIntersectionOfProtocols(ASTContext &Context,
}
else {
llvm::SmallPtrSet<ObjCProtocolDecl *, 8> RHSInheritedProtocols;
- Context.CollectInheritedProtocols(RHS->getDecl(), RHSInheritedProtocols);
+ Context.CollectInheritedProtocols(RHS->getInterface(),
+ RHSInheritedProtocols);
for (llvm::SmallPtrSet<ObjCProtocolDecl*,8>::iterator I =
RHSInheritedProtocols.begin(),
E = RHSInheritedProtocols.end(); I != E; ++I)
@@ -4206,37 +4182,40 @@ void getIntersectionOfProtocols(ASTContext &Context,
/// last type comparison in a ?-exp of ObjC pointer types before a
/// warning is issued. So, its invokation is extremely rare.
QualType ASTContext::areCommonBaseCompatible(
- const ObjCObjectPointerType *LHSOPT,
- const ObjCObjectPointerType *RHSOPT) {
- const ObjCInterfaceType* LHS = LHSOPT->getInterfaceType();
- const ObjCInterfaceType* RHS = RHSOPT->getInterfaceType();
- if (!LHS || !RHS)
+ const ObjCObjectPointerType *Lptr,
+ const ObjCObjectPointerType *Rptr) {
+ const ObjCObjectType *LHS = Lptr->getObjectType();
+ const ObjCObjectType *RHS = Rptr->getObjectType();
+ const ObjCInterfaceDecl* LDecl = LHS->getInterface();
+ const ObjCInterfaceDecl* RDecl = RHS->getInterface();
+ if (!LDecl || !RDecl)
return QualType();
- while (const ObjCInterfaceDecl *LHSIDecl = LHS->getDecl()->getSuperClass()) {
- QualType LHSTy = getObjCInterfaceType(LHSIDecl);
- LHS = LHSTy->getAs<ObjCInterfaceType>();
+ while ((LDecl = LDecl->getSuperClass())) {
+ LHS = cast<ObjCInterfaceType>(getObjCInterfaceType(LDecl));
if (canAssignObjCInterfaces(LHS, RHS)) {
- llvm::SmallVector<ObjCProtocolDecl *, 8> IntersectionOfProtocols;
- getIntersectionOfProtocols(*this,
- LHSOPT, RHSOPT, IntersectionOfProtocols);
- if (IntersectionOfProtocols.empty())
- LHSTy = getObjCObjectPointerType(LHSTy);
- else
- LHSTy = getObjCObjectPointerType(LHSTy, &IntersectionOfProtocols[0],
- IntersectionOfProtocols.size());
- return LHSTy;
+ llvm::SmallVector<ObjCProtocolDecl *, 8> Protocols;
+ getIntersectionOfProtocols(*this, Lptr, Rptr, Protocols);
+
+ QualType Result = QualType(LHS, 0);
+ if (!Protocols.empty())
+ Result = getObjCObjectType(Result, Protocols.data(), Protocols.size());
+ Result = getObjCObjectPointerType(Result);
+ return Result;
}
}
return QualType();
}
-bool ASTContext::canAssignObjCInterfaces(const ObjCInterfaceType *LHS,
- const ObjCInterfaceType *RHS) {
+bool ASTContext::canAssignObjCInterfaces(const ObjCObjectType *LHS,
+ const ObjCObjectType *RHS) {
+ assert(LHS->getInterface() && "LHS is not an interface type");
+ assert(RHS->getInterface() && "RHS is not an interface type");
+
// Verify that the base decls are compatible: the RHS must be a subclass of
// the LHS.
- if (!LHS->getDecl()->isSuperClassOf(RHS->getDecl()))
+ if (!LHS->getInterface()->isSuperClassOf(RHS->getInterface()))
return false;
// RHS must have a superset of the protocols in the LHS. If the LHS is not
@@ -4249,15 +4228,15 @@ bool ASTContext::canAssignObjCInterfaces(const ObjCInterfaceType *LHS,
if (RHS->getNumProtocols() == 0)
return true; // FIXME: should return false!
- for (ObjCInterfaceType::qual_iterator LHSPI = LHS->qual_begin(),
- LHSPE = LHS->qual_end();
+ for (ObjCObjectType::qual_iterator LHSPI = LHS->qual_begin(),
+ LHSPE = LHS->qual_end();
LHSPI != LHSPE; LHSPI++) {
bool RHSImplementsProtocol = false;
// If the RHS doesn't implement the protocol on the left, the types
// are incompatible.
- for (ObjCInterfaceType::qual_iterator RHSPI = RHS->qual_begin(),
- RHSPE = RHS->qual_end();
+ for (ObjCObjectType::qual_iterator RHSPI = RHS->qual_begin(),
+ RHSPE = RHS->qual_end();
RHSPI != RHSPE; RHSPI++) {
if ((*RHSPI)->lookupProtocolNamed((*LHSPI)->getIdentifier())) {
RHSImplementsProtocol = true;
@@ -4483,6 +4462,10 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
if (RHSClass == Type::VariableArray || RHSClass == Type::IncompleteArray)
RHSClass = Type::ConstantArray;
+ // ObjCInterfaces are just specialized ObjCObjects.
+ if (LHSClass == Type::ObjCInterface) LHSClass = Type::ObjCObject;
+ if (RHSClass == Type::ObjCInterface) RHSClass = Type::ObjCObject;
+
// Canonicalize ExtVector -> Vector.
if (LHSClass == Type::ExtVector) LHSClass = Type::Vector;
if (RHSClass == Type::ExtVector) RHSClass = Type::Vector;
@@ -4522,6 +4505,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
assert(false && "C++ should never be in mergeTypes");
return QualType();
+ case Type::ObjCInterface:
case Type::IncompleteArray:
case Type::VariableArray:
case Type::FunctionProto:
@@ -4614,14 +4598,13 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
RHSCan->getAs<VectorType>()))
return LHS;
return QualType();
- case Type::ObjCInterface: {
- // Check if the interfaces are assignment compatible.
+ case Type::ObjCObject: {
+ // Check if the types are assignment compatible.
// FIXME: This should be type compatibility, e.g. whether
// "LHS x; RHS x;" at global scope is legal.
- const ObjCInterfaceType* LHSIface = LHS->getAs<ObjCInterfaceType>();
- const ObjCInterfaceType* RHSIface = RHS->getAs<ObjCInterfaceType>();
- if (LHSIface && RHSIface &&
- canAssignObjCInterfaces(LHSIface, RHSIface))
+ const ObjCObjectType* LHSIface = LHS->getAs<ObjCObjectType>();
+ const ObjCObjectType* RHSIface = RHS->getAs<ObjCObjectType>();
+ if (canAssignObjCInterfaces(LHSIface, RHSIface))
return LHS;
return QualType();
@@ -4645,6 +4628,87 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
return QualType();
}
+/// mergeObjCGCQualifiers - This routine merges ObjC's GC attribute of 'LHS' and
+/// 'RHS' attributes and returns the merged version; including for function
+/// return types.
+QualType ASTContext::mergeObjCGCQualifiers(QualType LHS, QualType RHS) {
+ QualType LHSCan = getCanonicalType(LHS),
+ RHSCan = getCanonicalType(RHS);
+ // If two types are identical, they are compatible.
+ if (LHSCan == RHSCan)
+ return LHS;
+ if (RHSCan->isFunctionType()) {
+ if (!LHSCan->isFunctionType())
+ return QualType();
+ QualType OldReturnType =
+ cast<FunctionType>(RHSCan.getTypePtr())->getResultType();
+ QualType NewReturnType =
+ cast<FunctionType>(LHSCan.getTypePtr())->getResultType();
+ QualType ResReturnType =
+ mergeObjCGCQualifiers(NewReturnType, OldReturnType);
+ if (ResReturnType.isNull())
+ return QualType();
+ if (ResReturnType == NewReturnType || ResReturnType == OldReturnType) {
+ // id foo(); ... __strong id foo(); or: __strong id foo(); ... id foo();
+ // In either case, use OldReturnType to build the new function type.
+ const FunctionType *F = LHS->getAs<FunctionType>();
+ if (const FunctionProtoType *FPT = cast<FunctionProtoType>(F)) {
+ FunctionType::ExtInfo Info = getFunctionExtInfo(LHS);
+ QualType ResultType
+ = getFunctionType(OldReturnType, FPT->arg_type_begin(),
+ FPT->getNumArgs(), FPT->isVariadic(),
+ FPT->getTypeQuals(),
+ FPT->hasExceptionSpec(),
+ FPT->hasAnyExceptionSpec(),
+ FPT->getNumExceptions(),
+ FPT->exception_begin(),
+ Info);
+ return ResultType;
+ }
+ }
+ return QualType();
+ }
+
+ // If the qualifiers are different, the types can still be merged.
+ Qualifiers LQuals = LHSCan.getLocalQualifiers();
+ Qualifiers RQuals = RHSCan.getLocalQualifiers();
+ if (LQuals != RQuals) {
+ // If any of these qualifiers are different, we have a type mismatch.
+ if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
+ LQuals.getAddressSpace() != RQuals.getAddressSpace())
+ return QualType();
+
+ // Exactly one GC qualifier difference is allowed: __strong is
+ // okay if the other type has no GC qualifier but is an Objective
+ // C object pointer (i.e. implicitly strong by default). We fix
+ // this by pretending that the unqualified type was actually
+ // qualified __strong.
+ Qualifiers::GC GC_L = LQuals.getObjCGCAttr();
+ Qualifiers::GC GC_R = RQuals.getObjCGCAttr();
+ assert((GC_L != GC_R) && "unequal qualifier sets had only equal elements");
+
+ if (GC_L == Qualifiers::Weak || GC_R == Qualifiers::Weak)
+ return QualType();
+
+ if (GC_L == Qualifiers::Strong)
+ return LHS;
+ if (GC_R == Qualifiers::Strong)
+ return RHS;
+ return QualType();
+ }
+
+ if (LHSCan->isObjCObjectPointerType() && RHSCan->isObjCObjectPointerType()) {
+ QualType LHSBaseQT = LHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType RHSBaseQT = RHS->getAs<ObjCObjectPointerType>()->getPointeeType();
+ QualType ResQT = mergeObjCGCQualifiers(LHSBaseQT, RHSBaseQT);
+ if (ResQT == LHSBaseQT)
+ return LHS;
+ if (ResQT == RHSBaseQT)
+ return RHS;
+ }
+ return QualType();
+}
+
//===----------------------------------------------------------------------===//
// Integer Predicates
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/ASTDiagnostic.cpp b/lib/AST/ASTDiagnostic.cpp
index e4cd2a9c1053..0d609bfa6d39 100644
--- a/lib/AST/ASTDiagnostic.cpp
+++ b/lib/AST/ASTDiagnostic.cpp
@@ -19,65 +19,41 @@
using namespace clang;
-/// Determines whether we should have an a.k.a. clause when
-/// pretty-printing a type. There are three main criteria:
-///
-/// 1) Some types provide very minimal sugar that doesn't impede the
-/// user's understanding --- for example, elaborated type
-/// specifiers. If this is all the sugar we see, we don't want an
-/// a.k.a. clause.
-/// 2) Some types are technically sugared but are much more familiar
-/// when seen in their sugared form --- for example, va_list,
-/// vector types, and the magic Objective C types. We don't
-/// want to desugar these, even if we do produce an a.k.a. clause.
-/// 3) Some types may have already been desugared previously in this diagnostic.
-/// if this is the case, doing another "aka" would just be clutter.
-///
-static bool ShouldAKA(ASTContext &Context, QualType QT,
- const Diagnostic::ArgumentValue *PrevArgs,
- unsigned NumPrevArgs,
- QualType &DesugaredQT) {
- QualType InputTy = QT;
-
- bool AKA = false;
- QualifierCollector Qc;
-
+// Returns a desugared version of the QualType, and marks ShouldAKA as true
+// whenever we remove significant sugar from the type.
+static QualType Desugar(ASTContext &Context, QualType QT, bool &ShouldAKA) {
+ QualifierCollector QC;
+
while (true) {
- const Type *Ty = Qc.strip(QT);
-
+ const Type *Ty = QC.strip(QT);
+
// Don't aka just because we saw an elaborated type...
if (isa<ElaboratedType>(Ty)) {
QT = cast<ElaboratedType>(Ty)->desugar();
continue;
}
-
- // ...or a qualified name type...
- if (isa<QualifiedNameType>(Ty)) {
- QT = cast<QualifiedNameType>(Ty)->desugar();
- continue;
- }
// ...or a substituted template type parameter.
if (isa<SubstTemplateTypeParmType>(Ty)) {
QT = cast<SubstTemplateTypeParmType>(Ty)->desugar();
continue;
}
-
+
// Don't desugar template specializations.
if (isa<TemplateSpecializationType>(Ty))
break;
-
+
// Don't desugar magic Objective-C types.
if (QualType(Ty,0) == Context.getObjCIdType() ||
QualType(Ty,0) == Context.getObjCClassType() ||
QualType(Ty,0) == Context.getObjCSelType() ||
QualType(Ty,0) == Context.getObjCProtoType())
break;
-
+
// Don't desugar va_list.
if (QualType(Ty,0) == Context.getBuiltinVaListType())
break;
-
+
// Otherwise, do a single-step desugar.
QualType Underlying;
bool IsSugar = false;
@@ -94,50 +70,56 @@ break; \
}
#include "clang/AST/TypeNodes.def"
}
-
+
// If it wasn't sugared, we're done.
if (!IsSugar)
break;
-
+
// If the desugared type is a vector type, we don't want to expand
// it, it will turn into an attribute mess. People want their "vec4".
if (isa<VectorType>(Underlying))
break;
-
+
// Don't desugar through the primary typedef of an anonymous type.
if (isa<TagType>(Underlying) && isa<TypedefType>(QT))
if (cast<TagType>(Underlying)->getDecl()->getTypedefForAnonDecl() ==
cast<TypedefType>(QT)->getDecl())
break;
-
- // Otherwise, we're tearing through something opaque; note that
- // we'll eventually need an a.k.a. clause and keep going.
- AKA = true;
+
+ // Record that we actually looked through an opaque type here.
+ ShouldAKA = true;
QT = Underlying;
- continue;
}
-
- // If we never tore through opaque sugar, don't print aka.
- if (!AKA) return false;
-
- // If we did, check to see if we already desugared this type in this
- // diagnostic. If so, don't do it again.
- for (unsigned i = 0; i != NumPrevArgs; ++i) {
- // TODO: Handle ak_declcontext case.
- if (PrevArgs[i].first == Diagnostic::ak_qualtype) {
- void *Ptr = (void*)PrevArgs[i].second;
- QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
- if (PrevTy == InputTy)
- return false;
- }
+
+ // If we have a pointer-like type, desugar the pointee as well.
+ // FIXME: Handle other pointer-like types.
+ if (const PointerType *Ty = QT->getAs<PointerType>()) {
+ QT = Context.getPointerType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
+ } else if (const LValueReferenceType *Ty = QT->getAs<LValueReferenceType>()) {
+ QT = Context.getLValueReferenceType(Desugar(Context, Ty->getPointeeType(),
+ ShouldAKA));
}
-
- DesugaredQT = Qc.apply(QT);
- return true;
+
+ return QC.apply(QT);
}
/// \brief Convert the given type to a string suitable for printing as part of
-/// a diagnostic.
+/// a diagnostic.
+///
+/// There are three main criteria when determining whether we should have an
+/// a.k.a. clause when pretty-printing a type:
+///
+/// 1) Some types provide very minimal sugar that doesn't impede the
+/// user's understanding --- for example, elaborated type
+/// specifiers. If this is all the sugar we see, we don't want an
+/// a.k.a. clause.
+/// 2) Some types are technically sugared but are much more familiar
+/// when seen in their sugared form --- for example, va_list,
+/// vector types, and the magic Objective C types. We don't
+/// want to desugar these, even if we do produce an a.k.a. clause.
+/// 3) Some types may have already been desugared previously in this diagnostic.
+/// if this is the case, doing another "aka" would just be clutter.
///
/// \param Context the context in which the type was allocated
/// \param Ty the type to print
@@ -147,18 +129,35 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
unsigned NumPrevArgs) {
// FIXME: Playing with std::string is really slow.
std::string S = Ty.getAsString(Context.PrintingPolicy);
-
+
+ // Check to see if we already desugared this type in this
+ // diagnostic. If so, don't do it again.
+ bool Repeated = false;
+ for (unsigned i = 0; i != NumPrevArgs; ++i) {
+ // TODO: Handle ak_declcontext case.
+ if (PrevArgs[i].first == Diagnostic::ak_qualtype) {
+ void *Ptr = (void*)PrevArgs[i].second;
+ QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
+ if (PrevTy == Ty) {
+ Repeated = true;
+ break;
+ }
+ }
+ }
+
// Consider producing an a.k.a. clause if removing all the direct
// sugar gives us something "significantly different".
-
- QualType DesugaredTy;
- if (ShouldAKA(Context, Ty, PrevArgs, NumPrevArgs, DesugaredTy)) {
- S = "'"+S+"' (aka '";
- S += DesugaredTy.getAsString(Context.PrintingPolicy);
- S += "')";
- return S;
+ if (!Repeated) {
+ bool ShouldAKA = false;
+ QualType DesugaredTy = Desugar(Context, Ty, ShouldAKA);
+ if (ShouldAKA) {
+ S = "'"+S+"' (aka '";
+ S += DesugaredTy.getAsString(Context.PrintingPolicy);
+ S += "')";
+ return S;
+ }
}
-
+
S = "'" + S + "'";
return S;
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index ae09d7978ea5..6ed08d1e1e29 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -68,13 +68,13 @@ namespace {
// FIXME: DependentDecltypeType
QualType VisitRecordType(RecordType *T);
QualType VisitEnumType(EnumType *T);
- QualType VisitElaboratedType(ElaboratedType *T);
// FIXME: TemplateTypeParmType
// FIXME: SubstTemplateTypeParmType
// FIXME: TemplateSpecializationType
- QualType VisitQualifiedNameType(QualifiedNameType *T);
+ QualType VisitElaboratedType(ElaboratedType *T);
// FIXME: DependentNameType
QualType VisitObjCInterfaceType(ObjCInterfaceType *T);
+ QualType VisitObjCObjectType(ObjCObjectType *T);
QualType VisitObjCObjectPointerType(ObjCObjectPointerType *T);
// Importing declarations
@@ -532,19 +532,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
cast<TagType>(T2)->getDecl()))
return false;
break;
-
- case Type::Elaborated: {
- const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
- const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
- if (Elab1->getTagKind() != Elab2->getTagKind())
- return false;
- if (!IsStructurallyEquivalent(Context,
- Elab1->getUnderlyingType(),
- Elab2->getUnderlyingType()))
- return false;
- break;
- }
-
+
case Type::TemplateTypeParm: {
const TemplateTypeParmType *Parm1 = cast<TemplateTypeParmType>(T1);
const TemplateTypeParmType *Parm2 = cast<TemplateTypeParmType>(T2);
@@ -594,16 +582,19 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
- case Type::QualifiedName: {
- const QualifiedNameType *Qual1 = cast<QualifiedNameType>(T1);
- const QualifiedNameType *Qual2 = cast<QualifiedNameType>(T2);
+ case Type::Elaborated: {
+ const ElaboratedType *Elab1 = cast<ElaboratedType>(T1);
+ const ElaboratedType *Elab2 = cast<ElaboratedType>(T2);
+ // CHECKME: what if a keyword is ETK_None or ETK_typename ?
+ if (Elab1->getKeyword() != Elab2->getKeyword())
+ return false;
if (!IsStructurallyEquivalent(Context,
- Qual1->getQualifier(),
- Qual2->getQualifier()))
+ Elab1->getQualifier(),
+ Elab2->getQualifier()))
return false;
if (!IsStructurallyEquivalent(Context,
- Qual1->getNamedType(),
- Qual2->getNamedType()))
+ Elab1->getNamedType(),
+ Elab2->getNamedType()))
return false;
break;
}
@@ -642,12 +633,22 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Context,
Iface1->getDecl(), Iface2->getDecl()))
return false;
- if (Iface1->getNumProtocols() != Iface2->getNumProtocols())
+ break;
+ }
+
+ case Type::ObjCObject: {
+ const ObjCObjectType *Obj1 = cast<ObjCObjectType>(T1);
+ const ObjCObjectType *Obj2 = cast<ObjCObjectType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Obj1->getBaseType(),
+ Obj2->getBaseType()))
+ return false;
+ if (Obj1->getNumProtocols() != Obj2->getNumProtocols())
return false;
- for (unsigned I = 0, N = Iface1->getNumProtocols(); I != N; ++I) {
+ for (unsigned I = 0, N = Obj1->getNumProtocols(); I != N; ++I) {
if (!IsStructurallyEquivalent(Context,
- Iface1->getProtocol(I),
- Iface2->getProtocol(I)))
+ Obj1->getProtocol(I),
+ Obj2->getProtocol(I)))
return false;
}
break;
@@ -660,14 +661,6 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Ptr1->getPointeeType(),
Ptr2->getPointeeType()))
return false;
- if (Ptr1->getNumProtocols() != Ptr2->getNumProtocols())
- return false;
- for (unsigned I = 0, N = Ptr1->getNumProtocols(); I != N; ++I) {
- if (!IsStructurallyEquivalent(Context,
- Ptr1->getProtocol(I),
- Ptr2->getProtocol(I)))
- return false;
- }
break;
}
@@ -1293,24 +1286,20 @@ QualType ASTNodeImporter::VisitEnumType(EnumType *T) {
}
QualType ASTNodeImporter::VisitElaboratedType(ElaboratedType *T) {
- QualType ToUnderlyingType = Importer.Import(T->getUnderlyingType());
- if (ToUnderlyingType.isNull())
- return QualType();
-
- return Importer.getToContext().getElaboratedType(ToUnderlyingType,
- T->getTagKind());
-}
-
-QualType ASTNodeImporter::VisitQualifiedNameType(QualifiedNameType *T) {
- NestedNameSpecifier *ToQualifier = Importer.Import(T->getQualifier());
- if (!ToQualifier)
- return QualType();
+ NestedNameSpecifier *ToQualifier = 0;
+ // Note: the qualifier in an ElaboratedType is optional.
+ if (T->getQualifier()) {
+ ToQualifier = Importer.Import(T->getQualifier());
+ if (!ToQualifier)
+ return QualType();
+ }
QualType ToNamedType = Importer.Import(T->getNamedType());
if (ToNamedType.isNull())
return QualType();
- return Importer.getToContext().getQualifiedNameType(ToQualifier, ToNamedType);
+ return Importer.getToContext().getElaboratedType(T->getKeyword(),
+ ToQualifier, ToNamedType);
}
QualType ASTNodeImporter::VisitObjCInterfaceType(ObjCInterfaceType *T) {
@@ -1319,8 +1308,16 @@ QualType ASTNodeImporter::VisitObjCInterfaceType(ObjCInterfaceType *T) {
if (!Class)
return QualType();
+ return Importer.getToContext().getObjCInterfaceType(Class);
+}
+
+QualType ASTNodeImporter::VisitObjCObjectType(ObjCObjectType *T) {
+ QualType ToBaseType = Importer.Import(T->getBaseType());
+ if (ToBaseType.isNull())
+ return QualType();
+
llvm::SmallVector<ObjCProtocolDecl *, 4> Protocols;
- for (ObjCInterfaceType::qual_iterator P = T->qual_begin(),
+ for (ObjCObjectType::qual_iterator P = T->qual_begin(),
PEnd = T->qual_end();
P != PEnd; ++P) {
ObjCProtocolDecl *Protocol
@@ -1330,9 +1327,9 @@ QualType ASTNodeImporter::VisitObjCInterfaceType(ObjCInterfaceType *T) {
Protocols.push_back(Protocol);
}
- return Importer.getToContext().getObjCInterfaceType(Class,
- Protocols.data(),
- Protocols.size());
+ return Importer.getToContext().getObjCObjectType(ToBaseType,
+ Protocols.data(),
+ Protocols.size());
}
QualType ASTNodeImporter::VisitObjCObjectPointerType(ObjCObjectPointerType *T) {
@@ -1340,20 +1337,7 @@ QualType ASTNodeImporter::VisitObjCObjectPointerType(ObjCObjectPointerType *T) {
if (ToPointeeType.isNull())
return QualType();
- llvm::SmallVector<ObjCProtocolDecl *, 4> Protocols;
- for (ObjCObjectPointerType::qual_iterator P = T->qual_begin(),
- PEnd = T->qual_end();
- P != PEnd; ++P) {
- ObjCProtocolDecl *Protocol
- = dyn_cast_or_null<ObjCProtocolDecl>(Importer.Import(*P));
- if (!Protocol)
- return QualType();
- Protocols.push_back(Protocol);
- }
-
- return Importer.getToContext().getObjCObjectPointerType(ToPointeeType,
- Protocols.data(),
- Protocols.size());
+ return Importer.getToContext().getObjCObjectPointerType(ToPointeeType);
}
//----------------------------------------------------------------------------
@@ -1617,7 +1601,12 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
D2->startDefinition();
ImportDeclContext(D);
- D2->completeDefinition(T, ToPromotionType);
+
+ // FIXME: we might need to merge the number of positive or negative bits
+ // if the enumerator lists don't match.
+ D2->completeDefinition(T, ToPromotionType,
+ D->getNumPositiveBits(),
+ D->getNumNegativeBits());
}
return D2;
@@ -2961,7 +2950,7 @@ TypeSourceInfo *ASTImporter::Import(TypeSourceInfo *FromTSI) {
return 0;
return ToContext.getTrivialTypeSourceInfo(T,
- FromTSI->getTypeLoc().getFullSourceRange().getBegin());
+ FromTSI->getTypeLoc().getSourceRange().getBegin());
}
Decl *ASTImporter::Import(Decl *FromD) {
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index 423aa065e57c..0fab22caced8 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -74,6 +74,7 @@ void NonNullAttr::Destroy(ASTContext &C) {
// FIXME: Can we use variadic macro to define DEF_SIMPLE_ATTR_CLONE for
// "non-simple" classes?
+DEF_SIMPLE_ATTR_CLONE(AlignMac68k)
DEF_SIMPLE_ATTR_CLONE(AlwaysInline)
DEF_SIMPLE_ATTR_CLONE(AnalyzerNoReturn)
DEF_SIMPLE_ATTR_CLONE(BaseCheck)
@@ -100,6 +101,7 @@ DEF_SIMPLE_ATTR_CLONE(Override)
DEF_SIMPLE_ATTR_CLONE(Packed)
DEF_SIMPLE_ATTR_CLONE(Pure)
DEF_SIMPLE_ATTR_CLONE(StdCall)
+DEF_SIMPLE_ATTR_CLONE(ThisCall)
DEF_SIMPLE_ATTR_CLONE(TransparentUnion)
DEF_SIMPLE_ATTR_CLONE(Unavailable)
DEF_SIMPLE_ATTR_CLONE(Unused)
@@ -110,8 +112,8 @@ DEF_SIMPLE_ATTR_CLONE(WeakImport)
DEF_SIMPLE_ATTR_CLONE(WeakRef)
DEF_SIMPLE_ATTR_CLONE(X86ForceAlignArgPointer)
-Attr* PragmaPackAttr::clone(ASTContext &C) const {
- return ::new (C) PragmaPackAttr(Alignment);
+Attr* MaxFieldAlignmentAttr::clone(ASTContext &C) const {
+ return ::new (C) MaxFieldAlignmentAttr(Alignment);
}
Attr* AlignedAttr::clone(ASTContext &C) const {
@@ -142,6 +144,10 @@ Attr *IBOutletAttr::clone(ASTContext &C) const {
return ::new (C) IBOutletAttr;
}
+Attr *IBOutletCollectionAttr::clone(ASTContext &C) const {
+ return ::new (C) IBOutletCollectionAttr(D);
+}
+
Attr *IBActionAttr::clone(ASTContext &C) const {
return ::new (C) IBActionAttr;
}
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 91aaddc9a481..bce3646feedb 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -39,4 +39,4 @@ add_clang_library(clangAST
TypePrinter.cpp
)
-add_dependencies(clangAST ClangDiagnosticAST)
+add_dependencies(clangAST ClangDiagnosticAST ClangStmtNodes)
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index a9f223045864..d616e42e0076 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -49,9 +49,8 @@ CXXBasePaths::decl_iterator CXXBasePaths::found_decls_end() {
/// ambiguous, i.e., there are two or more paths that refer to
/// different base class subobjects of the same type. BaseType must be
/// an unqualified, canonical class type.
-bool CXXBasePaths::isAmbiguous(QualType BaseType) {
- assert(BaseType.isCanonical() && "Base type must be the canonical type");
- assert(BaseType.hasQualifiers() == 0 && "Base type must be unqualified");
+bool CXXBasePaths::isAmbiguous(CanQualType BaseType) {
+ BaseType = BaseType.getUnqualifiedType();
std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
return Subobjects.second + (Subobjects.first? 1 : 0) > 1;
}
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index ffe49671d8a3..ffdcb471d082 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -23,16 +23,11 @@
#include "clang/AST/PrettyPrinter.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/IdentifierTable.h"
-#include "clang/Parse/DeclSpec.h"
+#include "clang/Basic/Specifiers.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
-/// \brief Return the TypeLoc wrapper for the type source info.
-TypeLoc TypeSourceInfo::getTypeLoc() const {
- return TypeLoc(Ty, (void*)(this + 1));
-}
-
//===----------------------------------------------------------------------===//
// NamedDecl Implementation
//===----------------------------------------------------------------------===//
@@ -541,7 +536,7 @@ SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
while (true) {
TypeLoc NextTL = TL.getNextTypeLoc();
if (!NextTL)
- return TL.getSourceRange().getBegin();
+ return TL.getLocalSourceRange().getBegin();
TL = NextTL;
}
}
@@ -1224,9 +1219,8 @@ FunctionDecl::setInstantiationOfMemberFunction(FunctionDecl *FD,
}
bool FunctionDecl::isImplicitlyInstantiable() const {
- // If this function already has a definition or is invalid, it can't be
- // implicitly instantiated.
- if (isInvalidDecl() || getBody())
+ // If the function is invalid, it can't be implicitly instantiated.
+ if (isInvalidDecl())
return false;
switch (getTemplateSpecializationKind()) {
@@ -1295,11 +1289,22 @@ FunctionDecl::getTemplateSpecializationArgs() const {
return 0;
}
+const TemplateArgumentListInfo *
+FunctionDecl::getTemplateSpecializationArgsAsWritten() const {
+ if (FunctionTemplateSpecializationInfo *Info
+ = TemplateOrSpecialization
+ .dyn_cast<FunctionTemplateSpecializationInfo*>()) {
+ return Info->TemplateArgumentsAsWritten;
+ }
+ return 0;
+}
+
void
FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
- TemplateSpecializationKind TSK) {
+ TemplateSpecializationKind TSK,
+ const TemplateArgumentListInfo *TemplateArgsAsWritten) {
assert(TSK != TSK_Undeclared &&
"Must specify the type of function template specialization");
FunctionTemplateSpecializationInfo *Info
@@ -1311,6 +1316,7 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
Info->Template.setPointer(Template);
Info->Template.setInt(TSK - 1);
Info->TemplateArguments = TemplateArgs;
+ Info->TemplateArgumentsAsWritten = TemplateArgsAsWritten;
TemplateOrSpecialization = Info;
// Insert this function template specialization into the set of known
@@ -1475,6 +1481,12 @@ TagDecl* TagDecl::getCanonicalDecl() {
return getFirstDeclaration();
}
+void TagDecl::setTypedefForAnonDecl(TypedefDecl *TDD) {
+ TypedefDeclOrQualifier = TDD;
+ if (TypeForDecl)
+ TypeForDecl->ClearLinkageCache();
+}
+
void TagDecl::startDefinition() {
if (TagType *TagT = const_cast<TagType *>(TypeForDecl->getAs<TagType>())) {
TagT->decl.setPointer(this);
@@ -1509,6 +1521,7 @@ void TagDecl::completeDefinition() {
TypeForDecl->getAs<InjectedClassNameType>())) {
assert(Injected->Decl == this &&
"Attempt to redefine a class template definition?");
+ (void)Injected;
}
}
@@ -1524,16 +1537,6 @@ TagDecl* TagDecl::getDefinition() const {
return 0;
}
-TagDecl::TagKind TagDecl::getTagKindForTypeSpec(unsigned TypeSpec) {
- switch (TypeSpec) {
- default: llvm_unreachable("unexpected type specifier");
- case DeclSpec::TST_struct: return TK_struct;
- case DeclSpec::TST_class: return TK_class;
- case DeclSpec::TST_union: return TK_union;
- case DeclSpec::TST_enum: return TK_enum;
- }
-}
-
void TagDecl::setQualifierInfo(NestedNameSpecifier *Qualifier,
SourceRange QualifierRange) {
if (Qualifier) {
@@ -1571,10 +1574,14 @@ void EnumDecl::Destroy(ASTContext& C) {
}
void EnumDecl::completeDefinition(QualType NewType,
- QualType NewPromotionType) {
+ QualType NewPromotionType,
+ unsigned NumPositiveBits,
+ unsigned NumNegativeBits) {
assert(!isDefinition() && "Cannot redefine enums!");
IntegerType = NewType;
PromotionType = NewPromotionType;
+ setNumPositiveBits(NumPositiveBits);
+ setNumNegativeBits(NumNegativeBits);
TagDecl::completeDefinition();
}
@@ -1620,6 +1627,17 @@ void RecordDecl::completeDefinition() {
TagDecl::completeDefinition();
}
+ValueDecl *RecordDecl::getAnonymousStructOrUnionObject() {
+ // Force the decl chain to come into existence properly.
+ if (!getNextDeclInContext()) getParent()->decls_begin();
+
+ assert(isAnonymousStructOrUnion());
+ ValueDecl *D = cast<ValueDecl>(getNextDeclInContext());
+ assert(D->getType()->isRecordType());
+ assert(D->getType()->getAs<RecordType>()->getDecl() == this);
+ return D;
+}
+
//===----------------------------------------------------------------------===//
// BlockDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index b5aec0c5125c..42a372632099 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -811,12 +811,12 @@ DeclContext::lookup(DeclarationName Name) {
buildLookup(this);
if (!LookupPtr)
- return lookup_result(0, 0);
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
}
StoredDeclsMap::iterator Pos = LookupPtr->find(Name);
if (Pos == LookupPtr->end())
- return lookup_result(0, 0);
+ return lookup_result(lookup_iterator(0), lookup_iterator(0));
return Pos->second.getLookupResult(getParentASTContext());
}
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 68f4a821e689..cd7afd98b63d 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -137,7 +137,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
data().VBases[I] =
CXXBaseSpecifier(VBaseClassDecl->getSourceRange(), true,
- VBaseClassDecl->getTagKind() == RecordDecl::TK_class,
+ VBaseClassDecl->getTagKind() == TTK_Class,
VBases[I]->getAccessSpecifier(), VBaseType);
}
}
@@ -700,8 +700,9 @@ CXXBaseOrMemberInitializer::
CXXBaseOrMemberInitializer(ASTContext &Context,
TypeSourceInfo *TInfo, bool IsVirtual,
SourceLocation L, Expr *Init, SourceLocation R)
- : BaseOrMember(TInfo), Init(Init), AnonUnionMember(0), IsVirtual(IsVirtual),
- LParenLoc(L), RParenLoc(R)
+ : BaseOrMember(TInfo), Init(Init), AnonUnionMember(0),
+ LParenLoc(L), RParenLoc(R), IsVirtual(IsVirtual), IsWritten(false),
+ SourceOrderOrNumArrayIndices(0)
{
}
@@ -709,14 +710,46 @@ CXXBaseOrMemberInitializer::
CXXBaseOrMemberInitializer(ASTContext &Context,
FieldDecl *Member, SourceLocation MemberLoc,
SourceLocation L, Expr *Init, SourceLocation R)
+ : BaseOrMember(Member), MemberLocation(MemberLoc), Init(Init),
+ AnonUnionMember(0), LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(0)
+{
+}
+
+CXXBaseOrMemberInitializer::
+CXXBaseOrMemberInitializer(ASTContext &Context,
+ FieldDecl *Member, SourceLocation MemberLoc,
+ SourceLocation L, Expr *Init, SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices)
: BaseOrMember(Member), MemberLocation(MemberLoc), Init(Init),
- AnonUnionMember(0), LParenLoc(L), RParenLoc(R)
+ AnonUnionMember(0), LParenLoc(L), RParenLoc(R), IsVirtual(false),
+ IsWritten(false), SourceOrderOrNumArrayIndices(NumIndices)
{
+ VarDecl **MyIndices = reinterpret_cast<VarDecl **> (this + 1);
+ memcpy(MyIndices, Indices, NumIndices * sizeof(VarDecl *));
+}
+
+CXXBaseOrMemberInitializer *
+CXXBaseOrMemberInitializer::Create(ASTContext &Context,
+ FieldDecl *Member,
+ SourceLocation MemberLoc,
+ SourceLocation L,
+ Expr *Init,
+ SourceLocation R,
+ VarDecl **Indices,
+ unsigned NumIndices) {
+ void *Mem = Context.Allocate(sizeof(CXXBaseOrMemberInitializer) +
+ sizeof(VarDecl *) * NumIndices,
+ llvm::alignof<CXXBaseOrMemberInitializer>());
+ return new (Mem) CXXBaseOrMemberInitializer(Context, Member, MemberLoc,
+ L, Init, R, Indices, NumIndices);
}
void CXXBaseOrMemberInitializer::Destroy(ASTContext &Context) {
if (Init)
Init->Destroy(Context);
+ // FIXME: Destroy indices
this->~CXXBaseOrMemberInitializer();
}
@@ -745,7 +778,7 @@ SourceLocation CXXBaseOrMemberInitializer::getSourceLocation() const {
if (isMemberInitializer())
return getMemberLocation();
- return getBaseClassLoc().getSourceRange().getBegin();
+ return getBaseClassLoc().getLocalSourceRange().getBegin();
}
SourceRange CXXBaseOrMemberInitializer::getSourceRange() const {
@@ -753,6 +786,12 @@ SourceRange CXXBaseOrMemberInitializer::getSourceRange() const {
}
CXXConstructorDecl *
+CXXConstructorDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) CXXConstructorDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, false, false, false);
+}
+
+CXXConstructorDecl *
CXXConstructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation L, DeclarationName N,
QualType T, TypeSourceInfo *TInfo,
@@ -855,6 +894,12 @@ bool CXXConstructorDecl::isCopyConstructorLikeSpecialization() const {
}
CXXDestructorDecl *
+CXXDestructorDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) CXXDestructorDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), false, false);
+}
+
+CXXDestructorDecl *
CXXDestructorDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation L, DeclarationName N,
QualType T, bool isInline,
@@ -871,6 +916,12 @@ CXXConstructorDecl::Destroy(ASTContext& C) {
}
CXXConversionDecl *
+CXXConversionDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) CXXConversionDecl(0, SourceLocation(), DeclarationName(),
+ QualType(), 0, false, false);
+}
+
+CXXConversionDecl *
CXXConversionDecl::Create(ASTContext &C, CXXRecordDecl *RD,
SourceLocation L, DeclarationName N,
QualType T, TypeSourceInfo *TInfo,
@@ -908,6 +959,12 @@ NamespaceDecl *UsingDirectiveDecl::getNominatedNamespace() {
return cast_or_null<NamespaceDecl>(NominatedNamespace);
}
+void UsingDirectiveDecl::setNominatedNamespace(NamedDecl* ND) {
+ assert((isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND)) &&
+ "expected a NamespaceDecl or NamespaceAliasDecl");
+ NominatedNamespace = ND;
+}
+
NamespaceAliasDecl *NamespaceAliasDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
SourceLocation AliasLoc,
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index c498dea17703..26e291c94f64 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -94,6 +94,10 @@ TemplateDecl::~TemplateDecl() {
// FunctionTemplateDecl Implementation
//===----------------------------------------------------------------------===//
+void FunctionTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
@@ -129,8 +133,9 @@ FunctionTemplateDecl::Common *FunctionTemplateDecl::getCommonPtr() {
First = First->getPreviousDeclaration();
if (First->CommonOrPrev.isNull()) {
- // FIXME: Allocate with the ASTContext
- First->CommonOrPrev = new Common;
+ Common *CommonPtr = new (getASTContext()) Common;
+ getASTContext().AddDeallocation(DeallocateCommon, CommonPtr);
+ First->CommonOrPrev = CommonPtr;
}
return First->CommonOrPrev.get<Common*>();
}
@@ -139,6 +144,10 @@ FunctionTemplateDecl::Common *FunctionTemplateDecl::getCommonPtr() {
// ClassTemplateDecl Implementation
//===----------------------------------------------------------------------===//
+void ClassTemplateDecl::DeallocateCommon(void *Ptr) {
+ static_cast<Common *>(Ptr)->~Common();
+}
+
ClassTemplateDecl *ClassTemplateDecl::getCanonicalDecl() {
ClassTemplateDecl *Template = this;
while (Template->getPreviousDeclaration())
@@ -156,8 +165,10 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
Common *CommonPtr;
if (PrevDecl)
CommonPtr = PrevDecl->CommonPtr;
- else
+ else {
CommonPtr = new (C) Common;
+ C.AddDeallocation(DeallocateCommon, CommonPtr);
+ }
return new (C) ClassTemplateDecl(DC, L, Name, Params, Decl, PrevDecl,
CommonPtr);
@@ -259,7 +270,7 @@ TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC,
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
- return DefaultArgument->getTypeLoc().getFullSourceRange().getBegin();
+ return DefaultArgument->getTypeLoc().getSourceRange().getBegin();
}
unsigned TemplateTypeParmDecl::getDepth() const {
@@ -303,22 +314,14 @@ TemplateTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
// TemplateArgumentListBuilder Implementation
//===----------------------------------------------------------------------===//
-void TemplateArgumentListBuilder::Append(const TemplateArgument& Arg) {
- switch (Arg.getKind()) {
- default: break;
- case TemplateArgument::Type:
- assert(Arg.getAsType().isCanonical() && "Type must be canonical!");
- break;
- }
-
- assert(NumFlatArgs < MaxFlatArgs && "Argument list builder is full!");
+void TemplateArgumentListBuilder::Append(const TemplateArgument &Arg) {
+ assert((Arg.getKind() != TemplateArgument::Type ||
+ Arg.getAsType().isCanonical()) && "Type must be canonical!");
+ assert(FlatArgs.size() < MaxFlatArgs && "Argument list builder is full!");
assert(!StructuredArgs &&
"Can't append arguments when an argument pack has been added!");
- if (!FlatArgs)
- FlatArgs = new TemplateArgument[MaxFlatArgs];
-
- FlatArgs[NumFlatArgs++] = Arg;
+ FlatArgs.push_back(Arg);
}
void TemplateArgumentListBuilder::BeginPack() {
@@ -326,7 +329,7 @@ void TemplateArgumentListBuilder::BeginPack() {
assert(!StructuredArgs && "Argument list already contains a pack!");
AddingToPack = true;
- PackBeginIndex = NumFlatArgs;
+ PackBeginIndex = FlatArgs.size();
}
void TemplateArgumentListBuilder::EndPack() {
@@ -335,6 +338,7 @@ void TemplateArgumentListBuilder::EndPack() {
AddingToPack = false;
+ // FIXME: This is a memory leak!
StructuredArgs = new TemplateArgument[MaxStructuredArgs];
// First copy the flat entries over to the list (if any)
@@ -346,22 +350,14 @@ void TemplateArgumentListBuilder::EndPack() {
// Next, set the pack.
TemplateArgument *PackArgs = 0;
unsigned NumPackArgs = NumFlatArgs - PackBeginIndex;
+ // FIXME: NumPackArgs shouldn't be negative here???
if (NumPackArgs)
- PackArgs = &FlatArgs[PackBeginIndex];
+ PackArgs = FlatArgs.data()+PackBeginIndex;
StructuredArgs[NumStructuredArgs++].setArgumentPack(PackArgs, NumPackArgs,
/*CopyArgs=*/false);
}
-void TemplateArgumentListBuilder::ReleaseArgs() {
- FlatArgs = 0;
- NumFlatArgs = 0;
- MaxFlatArgs = 0;
- StructuredArgs = 0;
- NumStructuredArgs = 0;
- MaxStructuredArgs = 0;
-}
-
//===----------------------------------------------------------------------===//
// TemplateArgumentList Implementation
//===----------------------------------------------------------------------===//
@@ -376,35 +372,56 @@ TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
if (!TakeArgs)
return;
- if (Builder.getStructuredArguments() == Builder.getFlatArguments())
+ // If this does take ownership of the arguments, then we have to new them
+ // and copy over.
+ TemplateArgument *NewArgs =
+ new (Context) TemplateArgument[Builder.flatSize()];
+ std::copy(Builder.getFlatArguments(),
+ Builder.getFlatArguments()+Builder.flatSize(), NewArgs);
+ FlatArguments.setPointer(NewArgs);
+
+ // Just reuse the structured and flat arguments array if possible.
+ if (Builder.getStructuredArguments() == Builder.getFlatArguments()) {
+ StructuredArguments.setPointer(NewArgs);
StructuredArguments.setInt(0);
- Builder.ReleaseArgs();
+ } else {
+ TemplateArgument *NewSArgs =
+ new (Context) TemplateArgument[Builder.flatSize()];
+ std::copy(Builder.getFlatArguments(),
+ Builder.getFlatArguments()+Builder.flatSize(), NewSArgs);
+ StructuredArguments.setPointer(NewSArgs);
+ }
}
-TemplateArgumentList::TemplateArgumentList(const TemplateArgumentList &Other)
- : FlatArguments(Other.FlatArguments.getPointer(), 1),
- NumFlatArguments(Other.flat_size()),
- StructuredArguments(Other.StructuredArguments.getPointer(), 1),
- NumStructuredArguments(Other.NumStructuredArguments) { }
+/// Produces a shallow copy of the given template argument list. This
+/// assumes that the input argument list outlives it. This takes the list as
+/// a pointer to avoid looking like a copy constructor, since this really
+/// really isn't safe to use that way.
+TemplateArgumentList::TemplateArgumentList(const TemplateArgumentList *Other)
+ : FlatArguments(Other->FlatArguments.getPointer(), false),
+ NumFlatArguments(Other->flat_size()),
+ StructuredArguments(Other->StructuredArguments.getPointer(), false),
+ NumStructuredArguments(Other->NumStructuredArguments) { }
-TemplateArgumentList::~TemplateArgumentList() {
- // FIXME: Deallocate template arguments
+void TemplateArgumentList::Destroy(ASTContext &C) {
+ if (FlatArguments.getInt())
+ C.Deallocate((void*)FlatArguments.getPointer());
+ if (StructuredArguments.getInt())
+ C.Deallocate((void*)StructuredArguments.getPointer());
}
+TemplateArgumentList::~TemplateArgumentList() {}
+
//===----------------------------------------------------------------------===//
// ClassTemplateSpecializationDecl Implementation
//===----------------------------------------------------------------------===//
ClassTemplateSpecializationDecl::
-ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK,
+ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
TemplateArgumentListBuilder &Builder,
ClassTemplateSpecializationDecl *PrevDecl)
- : CXXRecordDecl(DK,
- SpecializedTemplate->getTemplatedDecl()->getTagKind(),
- DC, L,
- // FIXME: Should we use DeclarationName for the name of
- // class template specializations?
+ : CXXRecordDecl(DK, TK, DC, L,
SpecializedTemplate->getIdentifier(),
PrevDecl),
SpecializedTemplate(SpecializedTemplate),
@@ -414,7 +431,7 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK,
}
ClassTemplateSpecializationDecl *
-ClassTemplateSpecializationDecl::Create(ASTContext &Context,
+ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
DeclContext *DC, SourceLocation L,
ClassTemplateDecl *SpecializedTemplate,
TemplateArgumentListBuilder &Builder,
@@ -422,7 +439,7 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context,
ClassTemplateSpecializationDecl *Result
= new (Context)ClassTemplateSpecializationDecl(Context,
ClassTemplateSpecialization,
- DC, L,
+ TK, DC, L,
SpecializedTemplate,
Builder,
PrevDecl);
@@ -464,7 +481,7 @@ ClassTemplateSpecializationDecl::getSpecializedTemplate() const {
//===----------------------------------------------------------------------===//
ClassTemplatePartialSpecializationDecl *
ClassTemplatePartialSpecializationDecl::
-Create(ASTContext &Context, DeclContext *DC, SourceLocation L,
+Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L,
TemplateParameterList *Params,
ClassTemplateDecl *SpecializedTemplate,
TemplateArgumentListBuilder &Builder,
@@ -478,7 +495,7 @@ Create(ASTContext &Context, DeclContext *DC, SourceLocation L,
ClonedArgs[I] = ArgInfos[I];
ClassTemplatePartialSpecializationDecl *Result
- = new (Context)ClassTemplatePartialSpecializationDecl(Context,
+ = new (Context)ClassTemplatePartialSpecializationDecl(Context, TK,
DC, L, Params,
SpecializedTemplate,
Builder,
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index 4f85fca53868..343d403e76ad 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -11,10 +11,11 @@
// classes.
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeOrdering.h"
-#include "clang/AST/Decl.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
@@ -383,12 +384,12 @@ void DeclarationName::dump() const {
llvm::errs() << '\n';
}
-DeclarationNameTable::DeclarationNameTable() {
+DeclarationNameTable::DeclarationNameTable(ASTContext &C) : Ctx(C) {
CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
// Initialize the overloaded operator names.
- CXXOperatorNames = new CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
+ CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
for (unsigned Op = 0; Op < NUM_OVERLOADED_OPERATORS; ++Op) {
CXXOperatorNames[Op].ExtraKindOrNumArgs
= Op + DeclarationNameExtra::CXXConversionFunction;
@@ -399,29 +400,32 @@ DeclarationNameTable::DeclarationNameTable() {
DeclarationNameTable::~DeclarationNameTable() {
llvm::FoldingSet<CXXSpecialName> *SpecialNames =
static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
- llvm::FoldingSetIterator<CXXSpecialName>
- SI = SpecialNames->begin(), SE = SpecialNames->end();
+ llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
+ = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
+ (CXXLiteralOperatorNames);
- while (SI != SE) {
- CXXSpecialName *n = &*SI++;
- delete n;
- }
+ if (Ctx.FreeMemory) {
+ llvm::FoldingSetIterator<CXXSpecialName>
+ SI = SpecialNames->begin(), SE = SpecialNames->end();
+ while (SI != SE) {
+ CXXSpecialName *n = &*SI++;
+ Ctx.Deallocate(n);
+ }
- llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
- = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
- (CXXLiteralOperatorNames);
- llvm::FoldingSetIterator<CXXLiteralOperatorIdName>
- LI = LiteralNames->begin(), LE = LiteralNames->end();
+ llvm::FoldingSetIterator<CXXLiteralOperatorIdName>
+ LI = LiteralNames->begin(), LE = LiteralNames->end();
+
+ while (LI != LE) {
+ CXXLiteralOperatorIdName *n = &*LI++;
+ Ctx.Deallocate(n);
+ }
- while (LI != LE) {
- CXXLiteralOperatorIdName *n = &*LI++;
- delete n;
+ Ctx.Deallocate(CXXOperatorNames);
}
delete SpecialNames;
delete LiteralNames;
- delete [] CXXOperatorNames;
}
DeclarationName
@@ -459,7 +463,7 @@ DeclarationNameTable::getCXXSpecialName(DeclarationName::NameKind Kind,
if (CXXSpecialName *Name = SpecialNames->FindNodeOrInsertPos(ID, InsertPos))
return DeclarationName(Name);
- CXXSpecialName *SpecialName = new CXXSpecialName;
+ CXXSpecialName *SpecialName = new (Ctx) CXXSpecialName;
SpecialName->ExtraKindOrNumArgs = EKind;
SpecialName->Type = Ty;
SpecialName->FETokenInfo = 0;
@@ -487,7 +491,7 @@ DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
LiteralNames->FindNodeOrInsertPos(ID, InsertPos))
return DeclarationName (Name);
- CXXLiteralOperatorIdName *LiteralName = new CXXLiteralOperatorIdName;
+ CXXLiteralOperatorIdName *LiteralName = new (Ctx) CXXLiteralOperatorIdName;
LiteralName->ExtraKindOrNumArgs = DeclarationNameExtra::CXXLiteralOperator;
LiteralName->ID = II;
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 00662a53afed..c38cec32c3b2 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -27,6 +27,8 @@
#include <algorithm>
using namespace clang;
+void Expr::ANCHOR() {} // key function for Expr class.
+
/// isKnownToHaveBooleanValue - Return true if this is an integer expression
/// that is known to return 0 or 1. This happens for _Bool/bool expressions
/// but also int expressions which are produced by things like comparisons in
@@ -161,8 +163,19 @@ void DeclRefExpr::computeDependence() {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent())
ValueDependent = true;
- }
- }
+ }
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ else if (Var->isStaticDataMember() &&
+ Var->getDeclContext()->isDependentContext())
+ ValueDependent = true;
+ }
+ // (VD) - FIXME: Missing from the standard:
+ // - a member function or a static data member of the current
+ // instantiation
+ else if (isa<CXXMethodDecl>(D) && D->getDeclContext()->isDependentContext())
+ ValueDependent = true;
// (TD) - a nested-name-specifier or a qualified-id that names a
// member of an unknown specialization.
// (handled by DependentScopeDeclRefExpr)
@@ -976,6 +989,7 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
return true;
}
case CompoundAssignOperatorClass:
+ case VAArgExprClass:
return false;
case ConditionalOperatorClass: {
@@ -1557,6 +1571,18 @@ Expr *Expr::IgnoreParenCasts() {
}
}
+Expr *Expr::IgnoreParenImpCasts() {
+ Expr *E = this;
+ while (true) {
+ if (ParenExpr *P = dyn_cast<ParenExpr>(E))
+ E = P->getSubExpr();
+ else if (ImplicitCastExpr *P = dyn_cast<ImplicitCastExpr>(E))
+ E = P->getSubExpr();
+ else
+ return E;
+ }
+}
+
/// IgnoreParenNoopCasts - Ignore parentheses and casts that do not change the
/// value (including ptr->int casts of the same size). Strip off any
/// ParenExpr or CastExprs, returning their operand.
@@ -1757,385 +1783,6 @@ bool Expr::isConstantInitializer(ASTContext &Ctx) const {
return isEvaluatable(Ctx);
}
-/// isIntegerConstantExpr - this recursive routine will test if an expression is
-/// an integer constant expression.
-
-/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
-/// comma, etc
-///
-/// FIXME: Handle offsetof. Two things to do: Handle GCC's __builtin_offsetof
-/// to support gcc 4.0+ and handle the idiom GCC recognizes with a null pointer
-/// cast+dereference.
-
-// CheckICE - This function does the fundamental ICE checking: the returned
-// ICEDiag contains a Val of 0, 1, or 2, and a possibly null SourceLocation.
-// Note that to reduce code duplication, this helper does no evaluation
-// itself; the caller checks whether the expression is evaluatable, and
-// in the rare cases where CheckICE actually cares about the evaluated
-// value, it calls into Evalute.
-//
-// Meanings of Val:
-// 0: This expression is an ICE if it can be evaluated by Evaluate.
-// 1: This expression is not an ICE, but if it isn't evaluated, it's
-// a legal subexpression for an ICE. This return value is used to handle
-// the comma operator in C99 mode.
-// 2: This expression is not an ICE, and is not a legal subexpression for one.
-
-struct ICEDiag {
- unsigned Val;
- SourceLocation Loc;
-
- public:
- ICEDiag(unsigned v, SourceLocation l) : Val(v), Loc(l) {}
- ICEDiag() : Val(0) {}
-};
-
-ICEDiag NoDiag() { return ICEDiag(); }
-
-static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
- Expr::EvalResult EVResult;
- if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
- !EVResult.Val.isInt()) {
- return ICEDiag(2, E->getLocStart());
- }
- return NoDiag();
-}
-
-static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
- assert(!E->isValueDependent() && "Should not see value dependent exprs!");
- if (!E->getType()->isIntegralType()) {
- return ICEDiag(2, E->getLocStart());
- }
-
- switch (E->getStmtClass()) {
-#define STMT(Node, Base) case Expr::Node##Class:
-#define EXPR(Node, Base)
-#include "clang/AST/StmtNodes.def"
- case Expr::PredefinedExprClass:
- case Expr::FloatingLiteralClass:
- case Expr::ImaginaryLiteralClass:
- case Expr::StringLiteralClass:
- case Expr::ArraySubscriptExprClass:
- case Expr::MemberExprClass:
- case Expr::CompoundAssignOperatorClass:
- case Expr::CompoundLiteralExprClass:
- case Expr::ExtVectorElementExprClass:
- case Expr::InitListExprClass:
- case Expr::DesignatedInitExprClass:
- case Expr::ImplicitValueInitExprClass:
- case Expr::ParenListExprClass:
- case Expr::VAArgExprClass:
- case Expr::AddrLabelExprClass:
- case Expr::StmtExprClass:
- case Expr::CXXMemberCallExprClass:
- case Expr::CXXDynamicCastExprClass:
- case Expr::CXXTypeidExprClass:
- case Expr::CXXNullPtrLiteralExprClass:
- case Expr::CXXThisExprClass:
- case Expr::CXXThrowExprClass:
- case Expr::CXXNewExprClass:
- case Expr::CXXDeleteExprClass:
- case Expr::CXXPseudoDestructorExprClass:
- case Expr::UnresolvedLookupExprClass:
- case Expr::DependentScopeDeclRefExprClass:
- case Expr::CXXConstructExprClass:
- case Expr::CXXBindTemporaryExprClass:
- case Expr::CXXBindReferenceExprClass:
- case Expr::CXXExprWithTemporariesClass:
- case Expr::CXXTemporaryObjectExprClass:
- case Expr::CXXUnresolvedConstructExprClass:
- case Expr::CXXDependentScopeMemberExprClass:
- case Expr::UnresolvedMemberExprClass:
- case Expr::ObjCStringLiteralClass:
- case Expr::ObjCEncodeExprClass:
- case Expr::ObjCMessageExprClass:
- case Expr::ObjCSelectorExprClass:
- case Expr::ObjCProtocolExprClass:
- case Expr::ObjCIvarRefExprClass:
- case Expr::ObjCPropertyRefExprClass:
- case Expr::ObjCImplicitSetterGetterRefExprClass:
- case Expr::ObjCSuperExprClass:
- case Expr::ObjCIsaExprClass:
- case Expr::ShuffleVectorExprClass:
- case Expr::BlockExprClass:
- case Expr::BlockDeclRefExprClass:
- case Expr::NoStmtClass:
- return ICEDiag(2, E->getLocStart());
-
- case Expr::GNUNullExprClass:
- // GCC considers the GNU __null value to be an integral constant expression.
- return NoDiag();
-
- case Expr::ParenExprClass:
- return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
- case Expr::IntegerLiteralClass:
- case Expr::CharacterLiteralClass:
- case Expr::CXXBoolLiteralExprClass:
- case Expr::CXXZeroInitValueExprClass:
- case Expr::TypesCompatibleExprClass:
- case Expr::UnaryTypeTraitExprClass:
- return NoDiag();
- case Expr::CallExprClass:
- case Expr::CXXOperatorCallExprClass: {
- const CallExpr *CE = cast<CallExpr>(E);
- if (CE->isBuiltinCall(Ctx))
- return CheckEvalInICE(E, Ctx);
- return ICEDiag(2, E->getLocStart());
- }
- case Expr::DeclRefExprClass:
- if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
- return NoDiag();
- if (Ctx.getLangOptions().CPlusPlus &&
- E->getType().getCVRQualifiers() == Qualifiers::Const) {
- const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
-
- // Parameter variables are never constants. Without this check,
- // getAnyInitializer() can find a default argument, which leads
- // to chaos.
- if (isa<ParmVarDecl>(D))
- return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
-
- // C++ 7.1.5.1p2
- // A variable of non-volatile const-qualified integral or enumeration
- // type initialized by an ICE can be used in ICEs.
- if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
- Qualifiers Quals = Ctx.getCanonicalType(Dcl->getType()).getQualifiers();
- if (Quals.hasVolatile() || !Quals.hasConst())
- return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
-
- // Look for a declaration of this variable that has an initializer.
- const VarDecl *ID = 0;
- const Expr *Init = Dcl->getAnyInitializer(ID);
- if (Init) {
- if (ID->isInitKnownICE()) {
- // We have already checked whether this subexpression is an
- // integral constant expression.
- if (ID->isInitICE())
- return NoDiag();
- else
- return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
- }
-
- // It's an ICE whether or not the definition we found is
- // out-of-line. See DR 721 and the discussion in Clang PR
- // 6206 for details.
-
- if (Dcl->isCheckingICE()) {
- return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
- }
-
- Dcl->setCheckingICE();
- ICEDiag Result = CheckICE(Init, Ctx);
- // Cache the result of the ICE test.
- Dcl->setInitKnownICE(Result.Val == 0);
- return Result;
- }
- }
- }
- return ICEDiag(2, E->getLocStart());
- case Expr::UnaryOperatorClass: {
- const UnaryOperator *Exp = cast<UnaryOperator>(E);
- switch (Exp->getOpcode()) {
- case UnaryOperator::PostInc:
- case UnaryOperator::PostDec:
- case UnaryOperator::PreInc:
- case UnaryOperator::PreDec:
- case UnaryOperator::AddrOf:
- case UnaryOperator::Deref:
- return ICEDiag(2, E->getLocStart());
- case UnaryOperator::Extension:
- case UnaryOperator::LNot:
- case UnaryOperator::Plus:
- case UnaryOperator::Minus:
- case UnaryOperator::Not:
- case UnaryOperator::Real:
- case UnaryOperator::Imag:
- return CheckICE(Exp->getSubExpr(), Ctx);
- case UnaryOperator::OffsetOf:
- break;
- }
-
- // OffsetOf falls through here.
- }
- case Expr::OffsetOfExprClass: {
- // Note that per C99, offsetof must be an ICE. And AFAIK, using
- // Evaluate matches the proposed gcc behavior for cases like
- // "offsetof(struct s{int x[4];}, x[!.0])". This doesn't affect
- // compliance: we should warn earlier for offsetof expressions with
- // array subscripts that aren't ICEs, and if the array subscripts
- // are ICEs, the value of the offsetof must be an integer constant.
- return CheckEvalInICE(E, Ctx);
- }
- case Expr::SizeOfAlignOfExprClass: {
- const SizeOfAlignOfExpr *Exp = cast<SizeOfAlignOfExpr>(E);
- if (Exp->isSizeOf() && Exp->getTypeOfArgument()->isVariableArrayType())
- return ICEDiag(2, E->getLocStart());
- return NoDiag();
- }
- case Expr::BinaryOperatorClass: {
- const BinaryOperator *Exp = cast<BinaryOperator>(E);
- switch (Exp->getOpcode()) {
- case BinaryOperator::PtrMemD:
- case BinaryOperator::PtrMemI:
- case BinaryOperator::Assign:
- case BinaryOperator::MulAssign:
- case BinaryOperator::DivAssign:
- case BinaryOperator::RemAssign:
- case BinaryOperator::AddAssign:
- case BinaryOperator::SubAssign:
- case BinaryOperator::ShlAssign:
- case BinaryOperator::ShrAssign:
- case BinaryOperator::AndAssign:
- case BinaryOperator::XorAssign:
- case BinaryOperator::OrAssign:
- return ICEDiag(2, E->getLocStart());
-
- case BinaryOperator::Mul:
- case BinaryOperator::Div:
- case BinaryOperator::Rem:
- case BinaryOperator::Add:
- case BinaryOperator::Sub:
- case BinaryOperator::Shl:
- case BinaryOperator::Shr:
- case BinaryOperator::LT:
- case BinaryOperator::GT:
- case BinaryOperator::LE:
- case BinaryOperator::GE:
- case BinaryOperator::EQ:
- case BinaryOperator::NE:
- case BinaryOperator::And:
- case BinaryOperator::Xor:
- case BinaryOperator::Or:
- case BinaryOperator::Comma: {
- ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
- ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
- if (Exp->getOpcode() == BinaryOperator::Div ||
- Exp->getOpcode() == BinaryOperator::Rem) {
- // Evaluate gives an error for undefined Div/Rem, so make sure
- // we don't evaluate one.
- if (LHSResult.Val != 2 && RHSResult.Val != 2) {
- llvm::APSInt REval = Exp->getRHS()->EvaluateAsInt(Ctx);
- if (REval == 0)
- return ICEDiag(1, E->getLocStart());
- if (REval.isSigned() && REval.isAllOnesValue()) {
- llvm::APSInt LEval = Exp->getLHS()->EvaluateAsInt(Ctx);
- if (LEval.isMinSignedValue())
- return ICEDiag(1, E->getLocStart());
- }
- }
- }
- if (Exp->getOpcode() == BinaryOperator::Comma) {
- if (Ctx.getLangOptions().C99) {
- // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
- // if it isn't evaluated.
- if (LHSResult.Val == 0 && RHSResult.Val == 0)
- return ICEDiag(1, E->getLocStart());
- } else {
- // In both C89 and C++, commas in ICEs are illegal.
- return ICEDiag(2, E->getLocStart());
- }
- }
- if (LHSResult.Val >= RHSResult.Val)
- return LHSResult;
- return RHSResult;
- }
- case BinaryOperator::LAnd:
- case BinaryOperator::LOr: {
- ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
- ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
- if (LHSResult.Val == 0 && RHSResult.Val == 1) {
- // Rare case where the RHS has a comma "side-effect"; we need
- // to actually check the condition to see whether the side
- // with the comma is evaluated.
- if ((Exp->getOpcode() == BinaryOperator::LAnd) !=
- (Exp->getLHS()->EvaluateAsInt(Ctx) == 0))
- return RHSResult;
- return NoDiag();
- }
-
- if (LHSResult.Val >= RHSResult.Val)
- return LHSResult;
- return RHSResult;
- }
- }
- }
- case Expr::ImplicitCastExprClass:
- case Expr::CStyleCastExprClass:
- case Expr::CXXFunctionalCastExprClass:
- case Expr::CXXStaticCastExprClass:
- case Expr::CXXReinterpretCastExprClass:
- case Expr::CXXConstCastExprClass: {
- const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
- if (SubExpr->getType()->isIntegralType())
- return CheckICE(SubExpr, Ctx);
- if (isa<FloatingLiteral>(SubExpr->IgnoreParens()))
- return NoDiag();
- return ICEDiag(2, E->getLocStart());
- }
- case Expr::ConditionalOperatorClass: {
- const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
- // If the condition (ignoring parens) is a __builtin_constant_p call,
- // then only the true side is actually considered in an integer constant
- // expression, and it is fully evaluated. This is an important GNU
- // extension. See GCC PR38377 for discussion.
- if (const CallExpr *CallCE
- = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
- if (CallCE->isBuiltinCall(Ctx) == Builtin::BI__builtin_constant_p) {
- Expr::EvalResult EVResult;
- if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
- !EVResult.Val.isInt()) {
- return ICEDiag(2, E->getLocStart());
- }
- return NoDiag();
- }
- ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
- ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
- ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
- if (CondResult.Val == 2)
- return CondResult;
- if (TrueResult.Val == 2)
- return TrueResult;
- if (FalseResult.Val == 2)
- return FalseResult;
- if (CondResult.Val == 1)
- return CondResult;
- if (TrueResult.Val == 0 && FalseResult.Val == 0)
- return NoDiag();
- // Rare case where the diagnostics depend on which side is evaluated
- // Note that if we get here, CondResult is 0, and at least one of
- // TrueResult and FalseResult is non-zero.
- if (Exp->getCond()->EvaluateAsInt(Ctx) == 0) {
- return FalseResult;
- }
- return TrueResult;
- }
- case Expr::CXXDefaultArgExprClass:
- return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
- case Expr::ChooseExprClass: {
- return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(Ctx), Ctx);
- }
- }
-
- // Silence a GCC warning
- return ICEDiag(2, E->getLocStart());
-}
-
-bool Expr::isIntegerConstantExpr(llvm::APSInt &Result, ASTContext &Ctx,
- SourceLocation *Loc, bool isEvaluated) const {
- ICEDiag d = CheckICE(this, Ctx);
- if (d.Val != 0) {
- if (Loc) *Loc = d.Loc;
- return false;
- }
- EvalResult EvalResult;
- if (!Evaluate(EvalResult, Ctx))
- llvm_unreachable("ICE cannot be evaluated!");
- assert(!EvalResult.HasSideEffects && "ICE with side effects!");
- assert(EvalResult.Val.isInt() && "ICE that isn't integer!");
- Result = EvalResult.Val.getInt();
- return true;
-}
-
/// isNullPointerConstant - C99 6.3.2.3p3 - Return true if this is either an
/// integer constant expression with the value zero, or if this is one that is
/// cast to void*.
@@ -2433,9 +2080,9 @@ ObjCInterfaceDecl *ObjCMessageExpr::getReceiverInterface() const {
break;
case Class:
- if (const ObjCInterfaceType *Iface
- = getClassReceiver()->getAs<ObjCInterfaceType>())
- return Iface->getDecl();
+ if (const ObjCObjectType *Ty
+ = getClassReceiver()->getAs<ObjCObjectType>())
+ return Ty->getInterface();
break;
case SuperInstance:
@@ -2712,7 +2359,9 @@ Stmt::child_iterator ObjCPropertyRefExpr::child_end() { return &Base+1; }
// ObjCImplicitSetterGetterRefExpr
Stmt::child_iterator ObjCImplicitSetterGetterRefExpr::child_begin() {
- return &Base;
+ // If this is accessing a class member, skip that entry.
+ if (Base) return &Base;
+ return &Base+1;
}
Stmt::child_iterator ObjCImplicitSetterGetterRefExpr::child_end() {
return &Base+1;
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index 2e03beb0f050..d1a2b261f26b 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -92,12 +92,11 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SourceLocation startLoc, SourceLocation endLoc)
: Expr(CXXNewExprClass, ty, ty->isDependentType(), ty->isDependentType()),
GlobalNew(globalNew), ParenTypeId(parenTypeId),
- Initializer(initializer), Array(arraySize), NumPlacementArgs(numPlaceArgs),
- NumConstructorArgs(numConsArgs), OperatorNew(operatorNew),
+ Initializer(initializer), SubExprs(0), OperatorNew(operatorNew),
OperatorDelete(operatorDelete), Constructor(constructor),
StartLoc(startLoc), EndLoc(endLoc) {
- unsigned TotalSize = Array + NumPlacementArgs + NumConstructorArgs;
- SubExprs = new (C) Stmt*[TotalSize];
+
+ AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs);
unsigned i = 0;
if (Array)
SubExprs[i++] = arraySize;
@@ -105,9 +104,20 @@ CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
SubExprs[i++] = placementArgs[j];
for (unsigned j = 0; j < NumConstructorArgs; ++j)
SubExprs[i++] = constructorArgs[j];
- assert(i == TotalSize);
}
+void CXXNewExpr::AllocateArgsArray(ASTContext &C, bool isArray,
+ unsigned numPlaceArgs, unsigned numConsArgs){
+ assert(SubExprs == 0 && "SubExprs already allocated");
+ Array = isArray;
+ NumPlacementArgs = numPlaceArgs;
+ NumConstructorArgs = numConsArgs;
+
+ unsigned TotalSize = Array + NumPlacementArgs + NumConstructorArgs;
+ SubExprs = new (C) Stmt*[TotalSize];
+}
+
+
void CXXNewExpr::DoDestroy(ASTContext &C) {
DestroyChildren(C);
if (SubExprs)
@@ -134,7 +144,7 @@ Stmt::child_iterator CXXPseudoDestructorExpr::child_end() {
PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
: Type(Info)
{
- Location = Info->getTypeLoc().getSourceRange().getBegin();
+ Location = Info->getTypeLoc().getLocalSourceRange().getBegin();
}
QualType CXXPseudoDestructorExpr::getDestroyedType() const {
@@ -147,7 +157,7 @@ QualType CXXPseudoDestructorExpr::getDestroyedType() const {
SourceRange CXXPseudoDestructorExpr::getSourceRange() const {
SourceLocation End = DestroyedType.getLocation();
if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
- End = TInfo->getTypeLoc().getSourceRange().getEnd();
+ End = TInfo->getTypeLoc().getLocalSourceRange().getEnd();
return SourceRange(Base->getLocStart(), End);
}
@@ -159,23 +169,47 @@ UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent,
NestedNameSpecifier *Qualifier,
SourceRange QualifierRange, DeclarationName Name,
SourceLocation NameLoc, bool ADL,
- const TemplateArgumentListInfo &Args)
+ const TemplateArgumentListInfo &Args,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
{
void *Mem = C.Allocate(sizeof(UnresolvedLookupExpr) +
ExplicitTemplateArgumentList::sizeFor(Args));
UnresolvedLookupExpr *ULE
- = new (Mem) UnresolvedLookupExpr(Dependent ? C.DependentTy : C.OverloadTy,
+ = new (Mem) UnresolvedLookupExpr(C,
+ Dependent ? C.DependentTy : C.OverloadTy,
Dependent, NamingClass,
Qualifier, QualifierRange,
Name, NameLoc, ADL,
/*Overload*/ true,
- /*ExplicitTemplateArgs*/ true);
+ /*ExplicitTemplateArgs*/ true,
+ Begin, End);
reinterpret_cast<ExplicitTemplateArgumentList*>(ULE+1)->initializeFrom(Args);
return ULE;
}
+OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T,
+ bool Dependent, NestedNameSpecifier *Qualifier,
+ SourceRange QRange, DeclarationName Name,
+ SourceLocation NameLoc, bool HasTemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+ : Expr(K, T, Dependent, Dependent),
+ Results(0), NumResults(End - Begin), Name(Name), Qualifier(Qualifier),
+ QualifierRange(QRange), NameLoc(NameLoc),
+ HasExplicitTemplateArgs(HasTemplateArgs)
+{
+ if (NumResults) {
+ Results = static_cast<DeclAccessPair *>(
+ C.Allocate(sizeof(DeclAccessPair) * NumResults,
+ llvm::alignof<DeclAccessPair>()));
+ memcpy(Results, &*Begin.getIterator(),
+ (End - Begin) * sizeof(DeclAccessPair));
+ }
+}
+
bool OverloadExpr::ComputeDependence(UnresolvedSetIterator Begin,
UnresolvedSetIterator End,
const TemplateArgumentListInfo *Args) {
@@ -517,35 +551,43 @@ void CXXConstructExpr::DoDestroy(ASTContext &C) {
C.Deallocate(this);
}
-CXXExprWithTemporaries::CXXExprWithTemporaries(Expr *subexpr,
+CXXExprWithTemporaries::CXXExprWithTemporaries(ASTContext &C,
+ Expr *subexpr,
CXXTemporary **temps,
unsigned numtemps)
-: Expr(CXXExprWithTemporariesClass, subexpr->getType(),
+ : Expr(CXXExprWithTemporariesClass, subexpr->getType(),
subexpr->isTypeDependent(), subexpr->isValueDependent()),
- SubExpr(subexpr), Temps(0), NumTemps(numtemps) {
- if (NumTemps > 0) {
- Temps = new CXXTemporary*[NumTemps];
- for (unsigned i = 0; i < NumTemps; ++i)
+ SubExpr(subexpr), Temps(0), NumTemps(0) {
+ if (numtemps) {
+ setNumTemporaries(C, numtemps);
+ for (unsigned i = 0; i != numtemps; ++i)
Temps[i] = temps[i];
}
}
+void CXXExprWithTemporaries::setNumTemporaries(ASTContext &C, unsigned N) {
+ assert(Temps == 0 && "Cannot resize with this");
+ NumTemps = N;
+ Temps = new (C) CXXTemporary*[NumTemps];
+}
+
+
CXXExprWithTemporaries *CXXExprWithTemporaries::Create(ASTContext &C,
Expr *SubExpr,
CXXTemporary **Temps,
unsigned NumTemps) {
- return new (C) CXXExprWithTemporaries(SubExpr, Temps, NumTemps);
+ return new (C) CXXExprWithTemporaries(C, SubExpr, Temps, NumTemps);
}
void CXXExprWithTemporaries::DoDestroy(ASTContext &C) {
DestroyChildren(C);
+ if (Temps)
+ C.Deallocate(Temps);
this->~CXXExprWithTemporaries();
C.Deallocate(this);
}
-CXXExprWithTemporaries::~CXXExprWithTemporaries() {
- delete[] Temps;
-}
+CXXExprWithTemporaries::~CXXExprWithTemporaries() {}
// CXXBindTemporaryExpr
Stmt::child_iterator CXXBindTemporaryExpr::child_begin() {
@@ -682,7 +724,8 @@ Stmt::child_iterator CXXDependentScopeMemberExpr::child_end() {
return child_iterator(&Base + 1);
}
-UnresolvedMemberExpr::UnresolvedMemberExpr(QualType T, bool Dependent,
+UnresolvedMemberExpr::UnresolvedMemberExpr(ASTContext &C, QualType T,
+ bool Dependent,
bool HasUnresolvedUsing,
Expr *Base, QualType BaseType,
bool IsArrow,
@@ -691,10 +734,12 @@ UnresolvedMemberExpr::UnresolvedMemberExpr(QualType T, bool Dependent,
SourceRange QualifierRange,
DeclarationName MemberName,
SourceLocation MemberLoc,
- const TemplateArgumentListInfo *TemplateArgs)
- : OverloadExpr(UnresolvedMemberExprClass, T, Dependent,
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End)
+ : OverloadExpr(UnresolvedMemberExprClass, C, T, Dependent,
Qualifier, QualifierRange, MemberName, MemberLoc,
- TemplateArgs != 0),
+ TemplateArgs != 0, Begin, End),
IsArrow(IsArrow), HasUnresolvedUsing(HasUnresolvedUsing),
Base(Base), BaseType(BaseType), OperatorLoc(OperatorLoc) {
if (TemplateArgs)
@@ -710,17 +755,19 @@ UnresolvedMemberExpr::Create(ASTContext &C, bool Dependent,
SourceRange QualifierRange,
DeclarationName Member,
SourceLocation MemberLoc,
- const TemplateArgumentListInfo *TemplateArgs) {
+ const TemplateArgumentListInfo *TemplateArgs,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
std::size_t size = sizeof(UnresolvedMemberExpr);
if (TemplateArgs)
size += ExplicitTemplateArgumentList::sizeFor(*TemplateArgs);
void *Mem = C.Allocate(size, llvm::alignof<UnresolvedMemberExpr>());
- return new (Mem) UnresolvedMemberExpr(
+ return new (Mem) UnresolvedMemberExpr(C,
Dependent ? C.DependentTy : C.OverloadTy,
Dependent, HasUnresolvedUsing, Base, BaseType,
IsArrow, OperatorLoc, Qualifier, QualifierRange,
- Member, MemberLoc, TemplateArgs);
+ Member, MemberLoc, TemplateArgs, Begin, End);
}
CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index c1a42d88fffa..dc614018ec2b 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -48,31 +48,110 @@ struct EvalInfo {
/// EvalResult - Contains information about the evaluation.
Expr::EvalResult &EvalResult;
- /// AnyLValue - Stack based LValue results are not discarded.
- bool AnyLValue;
-
- EvalInfo(ASTContext &ctx, Expr::EvalResult& evalresult,
- bool anylvalue = false)
- : Ctx(ctx), EvalResult(evalresult), AnyLValue(anylvalue) {}
+ EvalInfo(ASTContext &ctx, Expr::EvalResult& evalresult)
+ : Ctx(ctx), EvalResult(evalresult) {}
};
+namespace {
+ struct ComplexValue {
+ private:
+ bool IsInt;
-static bool EvaluateLValue(const Expr *E, APValue &Result, EvalInfo &Info);
-static bool EvaluatePointer(const Expr *E, APValue &Result, EvalInfo &Info);
+ public:
+ APSInt IntReal, IntImag;
+ APFloat FloatReal, FloatImag;
+
+ ComplexValue() : FloatReal(APFloat::Bogus), FloatImag(APFloat::Bogus) {}
+
+ void makeComplexFloat() { IsInt = false; }
+ bool isComplexFloat() const { return !IsInt; }
+ APFloat &getComplexFloatReal() { return FloatReal; }
+ APFloat &getComplexFloatImag() { return FloatImag; }
+
+ void makeComplexInt() { IsInt = true; }
+ bool isComplexInt() const { return IsInt; }
+ APSInt &getComplexIntReal() { return IntReal; }
+ APSInt &getComplexIntImag() { return IntImag; }
+
+ void moveInto(APValue &v) {
+ if (isComplexFloat())
+ v = APValue(FloatReal, FloatImag);
+ else
+ v = APValue(IntReal, IntImag);
+ }
+ };
+
+ struct LValue {
+ Expr *Base;
+ CharUnits Offset;
+
+ Expr *getLValueBase() { return Base; }
+ CharUnits getLValueOffset() { return Offset; }
+
+ void moveInto(APValue &v) {
+ v = APValue(Base, Offset);
+ }
+ };
+}
+
+static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info);
+static bool EvaluatePointer(const Expr *E, LValue &Result, EvalInfo &Info);
static bool EvaluateInteger(const Expr *E, APSInt &Result, EvalInfo &Info);
static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
-static bool EvaluateComplex(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
//===----------------------------------------------------------------------===//
// Misc utilities
//===----------------------------------------------------------------------===//
-static bool EvalPointerValueAsBool(APValue& Value, bool& Result) {
- // FIXME: Is this accurate for all kinds of bases? If not, what would
- // the check look like?
- Result = Value.getLValueBase() || !Value.getLValueOffset().isZero();
+static bool IsGlobalLValue(const Expr* E) {
+ if (!E) return true;
+
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (isa<FunctionDecl>(DRE->getDecl()))
+ return true;
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ return VD->hasGlobalStorage();
+ return false;
+ }
+
+ if (const CompoundLiteralExpr *CLE = dyn_cast<CompoundLiteralExpr>(E))
+ return CLE->isFileScope();
+
+ return true;
+}
+
+static bool EvalPointerValueAsBool(LValue& Value, bool& Result) {
+ const Expr* Base = Value.Base;
+
+ // A null base expression indicates a null pointer. These are always
+ // evaluatable, and they are false unless the offset is zero.
+ if (!Base) {
+ Result = !Value.Offset.isZero();
+ return true;
+ }
+
+ // Require the base expression to be a global l-value.
+ if (!IsGlobalLValue(Base)) return false;
+
+ // We have a non-null base expression. These are generally known to
+ // be true, but if it'a decl-ref to a weak symbol it can be null at
+ // runtime.
+ Result = true;
+
+ const DeclRefExpr* DeclRef = dyn_cast<DeclRefExpr>(Base);
+ if (!DeclRef)
+ return true;
+
+ // If it's a weak symbol, it isn't constant-evaluable.
+ const ValueDecl* Decl = DeclRef->getDecl();
+ if (Decl->hasAttr<WeakAttr>() ||
+ Decl->hasAttr<WeakRefAttr>() ||
+ Decl->hasAttr<WeakImportAttr>())
+ return false;
+
return true;
}
@@ -91,12 +170,12 @@ static bool HandleConversionToBool(const Expr* E, bool& Result,
Result = !FloatResult.isZero();
return true;
} else if (E->getType()->hasPointerRepresentation()) {
- APValue PointerResult;
+ LValue PointerResult;
if (!EvaluatePointer(E, PointerResult, Info))
return false;
return EvalPointerValueAsBool(PointerResult, Result);
} else if (E->getType()->isAnyComplexType()) {
- APValue ComplexResult;
+ ComplexValue ComplexResult;
if (!EvaluateComplex(E, ComplexResult, Info))
return false;
if (ComplexResult.isComplexFloat()) {
@@ -221,34 +300,42 @@ public:
//===----------------------------------------------------------------------===//
namespace {
class LValueExprEvaluator
- : public StmtVisitor<LValueExprEvaluator, APValue> {
+ : public StmtVisitor<LValueExprEvaluator, bool> {
EvalInfo &Info;
+ LValue &Result;
+
+ bool Success(Expr *E) {
+ Result.Base = E;
+ Result.Offset = CharUnits::Zero();
+ return true;
+ }
public:
- LValueExprEvaluator(EvalInfo &info) : Info(info) {}
+ LValueExprEvaluator(EvalInfo &info, LValue &Result) :
+ Info(info), Result(Result) {}
- APValue VisitStmt(Stmt *S) {
- return APValue();
+ bool VisitStmt(Stmt *S) {
+ return false;
}
- APValue VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
- APValue VisitDeclRefExpr(DeclRefExpr *E);
- APValue VisitPredefinedExpr(PredefinedExpr *E) { return APValue(E); }
- APValue VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
- APValue VisitMemberExpr(MemberExpr *E);
- APValue VisitStringLiteral(StringLiteral *E) { return APValue(E); }
- APValue VisitObjCEncodeExpr(ObjCEncodeExpr *E) { return APValue(E); }
- APValue VisitArraySubscriptExpr(ArraySubscriptExpr *E);
- APValue VisitUnaryDeref(UnaryOperator *E);
- APValue VisitUnaryExtension(const UnaryOperator *E)
+ bool VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitDeclRefExpr(DeclRefExpr *E);
+ bool VisitPredefinedExpr(PredefinedExpr *E) { return Success(E); }
+ bool VisitCompoundLiteralExpr(CompoundLiteralExpr *E);
+ bool VisitMemberExpr(MemberExpr *E);
+ bool VisitStringLiteral(StringLiteral *E) { return Success(E); }
+ bool VisitObjCEncodeExpr(ObjCEncodeExpr *E) { return Success(E); }
+ bool VisitArraySubscriptExpr(ArraySubscriptExpr *E);
+ bool VisitUnaryDeref(UnaryOperator *E);
+ bool VisitUnaryExtension(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
- APValue VisitChooseExpr(const ChooseExpr *E)
+ bool VisitChooseExpr(const ChooseExpr *E)
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
- APValue VisitCastExpr(CastExpr *E) {
+ bool VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
default:
- return APValue();
+ return false;
case CastExpr::CK_NoOp:
return Visit(E->getSubExpr());
@@ -258,44 +345,41 @@ public:
};
} // end anonymous namespace
-static bool EvaluateLValue(const Expr* E, APValue& Result, EvalInfo &Info) {
- Result = LValueExprEvaluator(Info).Visit(const_cast<Expr*>(E));
- return Result.isLValue();
+static bool EvaluateLValue(const Expr* E, LValue& Result, EvalInfo &Info) {
+ return LValueExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
-APValue LValueExprEvaluator::VisitDeclRefExpr(DeclRefExpr *E) {
+bool LValueExprEvaluator::VisitDeclRefExpr(DeclRefExpr *E) {
if (isa<FunctionDecl>(E->getDecl())) {
- return APValue(E);
+ return Success(E);
} else if (VarDecl* VD = dyn_cast<VarDecl>(E->getDecl())) {
- if (!Info.AnyLValue && !VD->hasGlobalStorage())
- return APValue();
if (!VD->getType()->isReferenceType())
- return APValue(E);
+ return Success(E);
+ // Reference parameters can refer to anything even if they have an
+ // "initializer" in the form of a default argument.
+ if (isa<ParmVarDecl>(VD))
+ return false;
// FIXME: Check whether VD might be overridden!
if (const Expr *Init = VD->getAnyInitializer())
return Visit(const_cast<Expr *>(Init));
}
- return APValue();
+ return false;
}
-APValue LValueExprEvaluator::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
- if (!Info.AnyLValue && !E->isFileScope())
- return APValue();
- return APValue(E);
+bool LValueExprEvaluator::VisitCompoundLiteralExpr(CompoundLiteralExpr *E) {
+ return Success(E);
}
-APValue LValueExprEvaluator::VisitMemberExpr(MemberExpr *E) {
- APValue result;
+bool LValueExprEvaluator::VisitMemberExpr(MemberExpr *E) {
QualType Ty;
if (E->isArrow()) {
- if (!EvaluatePointer(E->getBase(), result, Info))
- return APValue();
+ if (!EvaluatePointer(E->getBase(), Result, Info))
+ return false;
Ty = E->getBase()->getType()->getAs<PointerType>()->getPointeeType();
} else {
- result = Visit(E->getBase());
- if (result.isUninit())
- return APValue();
+ if (!Visit(E->getBase()))
+ return false;
Ty = E->getBase()->getType();
}
@@ -304,10 +388,10 @@ APValue LValueExprEvaluator::VisitMemberExpr(MemberExpr *E) {
FieldDecl *FD = dyn_cast<FieldDecl>(E->getMemberDecl());
if (!FD) // FIXME: deal with other kinds of member expressions
- return APValue();
+ return false;
if (FD->getType()->isReferenceType())
- return APValue();
+ return false;
// FIXME: This is linear time.
unsigned i = 0;
@@ -318,36 +402,25 @@ APValue LValueExprEvaluator::VisitMemberExpr(MemberExpr *E) {
break;
}
- result.setLValue(result.getLValueBase(),
- result.getLValueOffset() +
- CharUnits::fromQuantity(RL.getFieldOffset(i) / 8));
-
- return result;
+ Result.Offset += CharUnits::fromQuantity(RL.getFieldOffset(i) / 8);
+ return true;
}
-APValue LValueExprEvaluator::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
- APValue Result;
-
+bool LValueExprEvaluator::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
if (!EvaluatePointer(E->getBase(), Result, Info))
- return APValue();
+ return false;
APSInt Index;
if (!EvaluateInteger(E->getIdx(), Index, Info))
- return APValue();
+ return false;
CharUnits ElementSize = Info.Ctx.getTypeSizeInChars(E->getType());
-
- CharUnits Offset = Index.getSExtValue() * ElementSize;
- Result.setLValue(Result.getLValueBase(),
- Result.getLValueOffset() + Offset);
- return Result;
+ Result.Offset += Index.getSExtValue() * ElementSize;
+ return true;
}
-APValue LValueExprEvaluator::VisitUnaryDeref(UnaryOperator *E) {
- APValue Result;
- if (!EvaluatePointer(E->getSubExpr(), Result, Info))
- return APValue();
- return Result;
+bool LValueExprEvaluator::VisitUnaryDeref(UnaryOperator *E) {
+ return EvaluatePointer(E->getSubExpr(), Result, Info);
}
//===----------------------------------------------------------------------===//
@@ -356,104 +429,103 @@ APValue LValueExprEvaluator::VisitUnaryDeref(UnaryOperator *E) {
namespace {
class PointerExprEvaluator
- : public StmtVisitor<PointerExprEvaluator, APValue> {
+ : public StmtVisitor<PointerExprEvaluator, bool> {
EvalInfo &Info;
+ LValue &Result;
+
+ bool Success(Expr *E) {
+ Result.Base = E;
+ Result.Offset = CharUnits::Zero();
+ return true;
+ }
public:
- PointerExprEvaluator(EvalInfo &info) : Info(info) {}
+ PointerExprEvaluator(EvalInfo &info, LValue &Result)
+ : Info(info), Result(Result) {}
- APValue VisitStmt(Stmt *S) {
- return APValue();
+ bool VisitStmt(Stmt *S) {
+ return false;
}
- APValue VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
- APValue VisitBinaryOperator(const BinaryOperator *E);
- APValue VisitCastExpr(CastExpr* E);
- APValue VisitUnaryExtension(const UnaryOperator *E)
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitCastExpr(CastExpr* E);
+ bool VisitUnaryExtension(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
- APValue VisitUnaryAddrOf(const UnaryOperator *E);
- APValue VisitObjCStringLiteral(ObjCStringLiteral *E)
- { return APValue(E); }
- APValue VisitAddrLabelExpr(AddrLabelExpr *E)
- { return APValue(E); }
- APValue VisitCallExpr(CallExpr *E);
- APValue VisitBlockExpr(BlockExpr *E) {
+ bool VisitUnaryAddrOf(const UnaryOperator *E);
+ bool VisitObjCStringLiteral(ObjCStringLiteral *E)
+ { return Success(E); }
+ bool VisitAddrLabelExpr(AddrLabelExpr *E)
+ { return Success(E); }
+ bool VisitCallExpr(CallExpr *E);
+ bool VisitBlockExpr(BlockExpr *E) {
if (!E->hasBlockDeclRefExprs())
- return APValue(E);
- return APValue();
+ return Success(E);
+ return false;
}
- APValue VisitImplicitValueInitExpr(ImplicitValueInitExpr *E)
- { return APValue((Expr*)0); }
- APValue VisitConditionalOperator(ConditionalOperator *E);
- APValue VisitChooseExpr(ChooseExpr *E)
+ bool VisitImplicitValueInitExpr(ImplicitValueInitExpr *E)
+ { return Success((Expr*)0); }
+ bool VisitConditionalOperator(ConditionalOperator *E);
+ bool VisitChooseExpr(ChooseExpr *E)
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
- APValue VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E)
- { return APValue((Expr*)0); }
+ bool VisitCXXNullPtrLiteralExpr(CXXNullPtrLiteralExpr *E)
+ { return Success((Expr*)0); }
// FIXME: Missing: @protocol, @selector
};
} // end anonymous namespace
-static bool EvaluatePointer(const Expr* E, APValue& Result, EvalInfo &Info) {
- if (!E->getType()->hasPointerRepresentation())
- return false;
- Result = PointerExprEvaluator(Info).Visit(const_cast<Expr*>(E));
- return Result.isLValue();
+static bool EvaluatePointer(const Expr* E, LValue& Result, EvalInfo &Info) {
+ assert(E->getType()->hasPointerRepresentation());
+ return PointerExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
-APValue PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (E->getOpcode() != BinaryOperator::Add &&
E->getOpcode() != BinaryOperator::Sub)
- return APValue();
+ return false;
const Expr *PExp = E->getLHS();
const Expr *IExp = E->getRHS();
if (IExp->getType()->isPointerType())
std::swap(PExp, IExp);
- APValue ResultLValue;
- if (!EvaluatePointer(PExp, ResultLValue, Info))
- return APValue();
+ if (!EvaluatePointer(PExp, Result, Info))
+ return false;
- llvm::APSInt AdditionalOffset;
- if (!EvaluateInteger(IExp, AdditionalOffset, Info))
- return APValue();
+ llvm::APSInt Offset;
+ if (!EvaluateInteger(IExp, Offset, Info))
+ return false;
+ int64_t AdditionalOffset
+ = Offset.isSigned() ? Offset.getSExtValue()
+ : static_cast<int64_t>(Offset.getZExtValue());
// Compute the new offset in the appropriate width.
QualType PointeeType =
PExp->getType()->getAs<PointerType>()->getPointeeType();
- llvm::APSInt SizeOfPointee(AdditionalOffset);
+ CharUnits SizeOfPointee;
// Explicitly handle GNU void* and function pointer arithmetic extensions.
if (PointeeType->isVoidType() || PointeeType->isFunctionType())
- SizeOfPointee = 1;
+ SizeOfPointee = CharUnits::One();
else
- SizeOfPointee = Info.Ctx.getTypeSizeInChars(PointeeType).getQuantity();
+ SizeOfPointee = Info.Ctx.getTypeSizeInChars(PointeeType);
- llvm::APSInt Offset(AdditionalOffset);
- Offset = ResultLValue.getLValueOffset().getQuantity();
if (E->getOpcode() == BinaryOperator::Add)
- Offset += AdditionalOffset * SizeOfPointee;
+ Result.Offset += AdditionalOffset * SizeOfPointee;
else
- Offset -= AdditionalOffset * SizeOfPointee;
+ Result.Offset -= AdditionalOffset * SizeOfPointee;
- // Sign extend prior to converting back to a char unit.
- if (Offset.getBitWidth() < 64)
- Offset.extend(64);
- return APValue(ResultLValue.getLValueBase(),
- CharUnits::fromQuantity(Offset.getLimitedValue()));
+ return true;
}
-APValue PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
- APValue result;
- if (EvaluateLValue(E->getSubExpr(), result, Info))
- return result;
- return APValue();
+bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
+ return EvaluateLValue(E->getSubExpr(), Result, Info);
}
-APValue PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
+bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
Expr* SubExpr = E->getSubExpr();
switch (E->getCastKind()) {
@@ -471,18 +543,20 @@ APValue PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
return Visit(SubExpr);
if (SubExpr->getType()->isIntegralType()) {
- APValue Result;
- if (!EvaluateIntegerOrLValue(SubExpr, Result, Info))
+ APValue Value;
+ if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
- if (Result.isInt()) {
- Result.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
- return APValue(0,
- CharUnits::fromQuantity(Result.getInt().getZExtValue()));
+ if (Value.isInt()) {
+ Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
+ Result.Base = 0;
+ Result.Offset = CharUnits::fromQuantity(Value.getInt().getZExtValue());
+ return true;
+ } else {
+ Result.Base = Value.getLValueBase();
+ Result.Offset = Value.getLValueOffset();
+ return true;
}
-
- // Cast is of an lvalue, no need to change value.
- return Result;
}
break;
}
@@ -494,51 +568,46 @@ APValue PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
return Visit(SubExpr);
case CastExpr::CK_IntegralToPointer: {
- APValue Result;
- if (!EvaluateIntegerOrLValue(SubExpr, Result, Info))
+ APValue Value;
+ if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
- if (Result.isInt()) {
- Result.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
- return APValue(0,
- CharUnits::fromQuantity(Result.getInt().getZExtValue()));
+ if (Value.isInt()) {
+ Value.getInt().extOrTrunc((unsigned)Info.Ctx.getTypeSize(E->getType()));
+ Result.Base = 0;
+ Result.Offset = CharUnits::fromQuantity(Value.getInt().getZExtValue());
+ return true;
+ } else {
+ // Cast is of an lvalue, no need to change value.
+ Result.Base = Value.getLValueBase();
+ Result.Offset = Value.getLValueOffset();
+ return true;
}
-
- // Cast is of an lvalue, no need to change value.
- return Result;
}
case CastExpr::CK_ArrayToPointerDecay:
- case CastExpr::CK_FunctionToPointerDecay: {
- APValue Result;
- if (EvaluateLValue(SubExpr, Result, Info))
- return Result;
- break;
- }
+ case CastExpr::CK_FunctionToPointerDecay:
+ return EvaluateLValue(SubExpr, Result, Info);
}
- return APValue();
+ return false;
}
-APValue PointerExprEvaluator::VisitCallExpr(CallExpr *E) {
+bool PointerExprEvaluator::VisitCallExpr(CallExpr *E) {
if (E->isBuiltinCall(Info.Ctx) ==
Builtin::BI__builtin___CFStringMakeConstantString ||
E->isBuiltinCall(Info.Ctx) ==
Builtin::BI__builtin___NSStringMakeConstantString)
- return APValue(E);
- return APValue();
+ return Success(E);
+ return false;
}
-APValue PointerExprEvaluator::VisitConditionalOperator(ConditionalOperator *E) {
+bool PointerExprEvaluator::VisitConditionalOperator(ConditionalOperator *E) {
bool BoolResult;
if (!HandleConversionToBool(E->getCond(), BoolResult, Info))
- return APValue();
+ return false;
Expr* EvalExpr = BoolResult ? E->getTrueExpr() : E->getFalseExpr();
-
- APValue Result;
- if (EvaluatePointer(EvalExpr, Result, Info))
- return Result;
- return APValue();
+ return Visit(EvalExpr);
}
//===----------------------------------------------------------------------===//
@@ -867,18 +936,20 @@ public:
private:
CharUnits GetAlignOfExpr(const Expr *E);
CharUnits GetAlignOfType(QualType T);
+ static QualType GetObjectType(const Expr *E);
+ bool TryEvaluateBuiltinObjectSize(CallExpr *E);
// FIXME: Missing: array subscript of vector, member of vector
};
} // end anonymous namespace
static bool EvaluateIntegerOrLValue(const Expr* E, APValue &Result, EvalInfo &Info) {
- if (!E->getType()->isIntegralType())
- return false;
-
+ assert(E->getType()->isIntegralType());
return IntExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) {
+ assert(E->getType()->isIntegralType());
+
APValue Val;
if (!EvaluateIntegerOrLValue(E, Val, Info) || !Val.isInt())
return false;
@@ -984,36 +1055,55 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E) {
return -1;
}
+/// Retrieves the "underlying object type" of the given expression,
+/// as used by __builtin_object_size.
+QualType IntExprEvaluator::GetObjectType(const Expr *E) {
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(E)) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ return VD->getType();
+ } else if (isa<CompoundLiteralExpr>(E)) {
+ return E->getType();
+ }
+
+ return QualType();
+}
+
+bool IntExprEvaluator::TryEvaluateBuiltinObjectSize(CallExpr *E) {
+ // TODO: Perhaps we should let LLVM lower this?
+ LValue Base;
+ if (!EvaluatePointer(E->getArg(0), Base, Info))
+ return false;
+
+ // If we can prove the base is null, lower to zero now.
+ const Expr *LVBase = Base.getLValueBase();
+ if (!LVBase) return Success(0, E);
+
+ QualType T = GetObjectType(LVBase);
+ if (T.isNull() ||
+ T->isIncompleteType() ||
+ !T->isObjectType() ||
+ T->isVariablyModifiedType() ||
+ T->isDependentType())
+ return false;
+
+ CharUnits Size = Info.Ctx.getTypeSizeInChars(T);
+ CharUnits Offset = Base.getLValueOffset();
+
+ if (!Offset.isNegative() && Offset <= Size)
+ Size -= Offset;
+ else
+ Size = CharUnits::Zero();
+ return Success(Size.getQuantity(), E);
+}
+
bool IntExprEvaluator::VisitCallExpr(CallExpr *E) {
switch (E->isBuiltinCall(Info.Ctx)) {
default:
return Error(E->getLocStart(), diag::note_invalid_subexpr_in_ice, E);
case Builtin::BI__builtin_object_size: {
- const Expr *Arg = E->getArg(0)->IgnoreParens();
- Expr::EvalResult Base;
-
- // TODO: Perhaps we should let LLVM lower this?
- if (Arg->EvaluateAsAny(Base, Info.Ctx)
- && Base.Val.getKind() == APValue::LValue
- && !Base.HasSideEffects)
- if (const Expr *LVBase = Base.Val.getLValueBase())
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LVBase)) {
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl())) {
- if (!VD->getType()->isIncompleteType()
- && VD->getType()->isObjectType()
- && !VD->getType()->isVariablyModifiedType()
- && !VD->getType()->isDependentType()) {
- CharUnits Size = Info.Ctx.getTypeSizeInChars(VD->getType());
- CharUnits Offset = Base.Val.getLValueOffset();
- if (!Offset.isNegative() && Offset <= Size)
- Size -= Offset;
- else
- Size = CharUnits::Zero();
- return Success(Size.getQuantity(), E);
- }
- }
- }
+ if (TryEvaluateBuiltinObjectSize(E))
+ return true;
// If evaluating the argument has side-effects we can't determine
// the size of the object and lower it to unknown now.
@@ -1098,7 +1188,7 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (LHSTy->isAnyComplexType()) {
assert(RHSTy->isAnyComplexType() && "Invalid comparison");
- APValue LHS, RHS;
+ ComplexValue LHS, RHS;
if (!EvaluateComplex(E->getLHS(), LHS, Info))
return false;
@@ -1173,11 +1263,11 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (LHSTy->isPointerType() && RHSTy->isPointerType()) {
if (E->getOpcode() == BinaryOperator::Sub || E->isEqualityOp()) {
- APValue LHSValue;
+ LValue LHSValue;
if (!EvaluatePointer(E->getLHS(), LHSValue, Info))
return false;
- APValue RHSValue;
+ LValue RHSValue;
if (!EvaluatePointer(E->getRHS(), RHSValue, Info))
return false;
@@ -1463,7 +1553,7 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
if (E->isOffsetOfOp()) {
// The AST for offsetof is defined in such a way that we can just
// directly Evaluate it as an l-value.
- APValue LV;
+ LValue LV;
if (!EvaluateLValue(E->getSubExpr(), LV, Info))
return false;
if (LV.getLValueBase())
@@ -1538,7 +1628,7 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
// FIXME: Clean this up!
if (SrcType->isPointerType()) {
- APValue LV;
+ LValue LV;
if (!EvaluatePointer(SubExpr, LV, Info))
return false;
@@ -1547,7 +1637,7 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(SrcType))
return false;
- Result = LV;
+ LV.moveInto(Result);
return true;
}
@@ -1559,19 +1649,19 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
if (SrcType->isArrayType() || SrcType->isFunctionType()) {
// This handles double-conversion cases, where there's both
// an l-value promotion and an implicit conversion to int.
- APValue LV;
+ LValue LV;
if (!EvaluateLValue(SubExpr, LV, Info))
return false;
if (Info.Ctx.getTypeSize(DestType) != Info.Ctx.getTypeSize(Info.Ctx.VoidPtrTy))
return false;
- Result = LV;
+ LV.moveInto(Result);
return true;
}
if (SrcType->isAnyComplexType()) {
- APValue C;
+ ComplexValue C;
if (!EvaluateComplex(SubExpr, C, Info))
return false;
if (C.isComplexFloat())
@@ -1596,7 +1686,7 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isAnyComplexType()) {
- APValue LV;
+ ComplexValue LV;
if (!EvaluateComplex(E->getSubExpr(), LV, Info) || !LV.isComplexInt())
return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
return Success(LV.getComplexIntReal(), E);
@@ -1607,7 +1697,7 @@ bool IntExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
bool IntExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
if (E->getSubExpr()->getType()->isComplexIntegerType()) {
- APValue LV;
+ ComplexValue LV;
if (!EvaluateComplex(E->getSubExpr(), LV, Info) || !LV.isComplexInt())
return Error(E->getExprLoc(), diag::note_invalid_subexpr_in_ice, E);
return Success(LV.getComplexIntImag(), E);
@@ -1649,13 +1739,16 @@ public:
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
bool VisitUnaryExtension(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
+ bool VisitUnaryReal(const UnaryOperator *E);
+ bool VisitUnaryImag(const UnaryOperator *E);
- // FIXME: Missing: __real__/__imag__, array subscript of vector,
- // member of vector, ImplicitValueInitExpr
+ // FIXME: Missing: array subscript of vector, member of vector,
+ // ImplicitValueInitExpr
};
} // end anonymous namespace
static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
+ assert(E->getType()->isRealFloatingType());
return FloatExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
@@ -1736,6 +1829,22 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
}
}
+bool FloatExprEvaluator::VisitUnaryReal(const UnaryOperator *E) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatReal;
+ return true;
+}
+
+bool FloatExprEvaluator::VisitUnaryImag(const UnaryOperator *E) {
+ ComplexValue CV;
+ if (!EvaluateComplex(E->getSubExpr(), CV, Info))
+ return false;
+ Result = CV.FloatImag;
+ return true;
+}
+
bool FloatExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
if (E->getOpcode() == UnaryOperator::Deref)
return false;
@@ -1838,166 +1947,170 @@ bool FloatExprEvaluator::VisitConditionalOperator(ConditionalOperator *E) {
namespace {
class ComplexExprEvaluator
- : public StmtVisitor<ComplexExprEvaluator, APValue> {
+ : public StmtVisitor<ComplexExprEvaluator, bool> {
EvalInfo &Info;
+ ComplexValue &Result;
public:
- ComplexExprEvaluator(EvalInfo &info) : Info(info) {}
+ ComplexExprEvaluator(EvalInfo &info, ComplexValue &Result)
+ : Info(info), Result(Result) {}
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
- APValue VisitStmt(Stmt *S) {
- return APValue();
+ bool VisitStmt(Stmt *S) {
+ return false;
}
- APValue VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
+ bool VisitParenExpr(ParenExpr *E) { return Visit(E->getSubExpr()); }
- APValue VisitImaginaryLiteral(ImaginaryLiteral *E) {
+ bool VisitImaginaryLiteral(ImaginaryLiteral *E) {
Expr* SubExpr = E->getSubExpr();
if (SubExpr->getType()->isRealFloatingType()) {
- APFloat Result(0.0);
-
- if (!EvaluateFloat(SubExpr, Result, Info))
- return APValue();
+ Result.makeComplexFloat();
+ APFloat &Imag = Result.FloatImag;
+ if (!EvaluateFloat(SubExpr, Imag, Info))
+ return false;
- return APValue(APFloat(Result.getSemantics(), APFloat::fcZero, false),
- Result);
+ Result.FloatReal = APFloat(Imag.getSemantics());
+ return true;
} else {
assert(SubExpr->getType()->isIntegerType() &&
"Unexpected imaginary literal.");
- llvm::APSInt Result;
- if (!EvaluateInteger(SubExpr, Result, Info))
- return APValue();
+ Result.makeComplexInt();
+ APSInt &Imag = Result.IntImag;
+ if (!EvaluateInteger(SubExpr, Imag, Info))
+ return false;
- llvm::APSInt Zero(Result.getBitWidth(), !Result.isSigned());
- Zero = 0;
- return APValue(Zero, Result);
+ Result.IntReal = APSInt(Imag.getBitWidth(), !Imag.isSigned());
+ return true;
}
}
- APValue VisitCastExpr(CastExpr *E) {
+ bool VisitCastExpr(CastExpr *E) {
Expr* SubExpr = E->getSubExpr();
QualType EltType = E->getType()->getAs<ComplexType>()->getElementType();
QualType SubType = SubExpr->getType();
if (SubType->isRealFloatingType()) {
- APFloat Result(0.0);
-
- if (!EvaluateFloat(SubExpr, Result, Info))
- return APValue();
+ APFloat &Real = Result.FloatReal;
+ if (!EvaluateFloat(SubExpr, Real, Info))
+ return false;
if (EltType->isRealFloatingType()) {
- Result = HandleFloatToFloatCast(EltType, SubType, Result, Info.Ctx);
- return APValue(Result,
- APFloat(Result.getSemantics(), APFloat::fcZero, false));
+ Result.makeComplexFloat();
+ Real = HandleFloatToFloatCast(EltType, SubType, Real, Info.Ctx);
+ Result.FloatImag = APFloat(Real.getSemantics());
+ return true;
} else {
- llvm::APSInt IResult;
- IResult = HandleFloatToIntCast(EltType, SubType, Result, Info.Ctx);
- llvm::APSInt Zero(IResult.getBitWidth(), !IResult.isSigned());
- Zero = 0;
- return APValue(IResult, Zero);
+ Result.makeComplexInt();
+ Result.IntReal = HandleFloatToIntCast(EltType, SubType, Real, Info.Ctx);
+ Result.IntImag = APSInt(Result.IntReal.getBitWidth(),
+ !Result.IntReal.isSigned());
+ return true;
}
} else if (SubType->isIntegerType()) {
- APSInt Result;
-
- if (!EvaluateInteger(SubExpr, Result, Info))
- return APValue();
+ APSInt &Real = Result.IntReal;
+ if (!EvaluateInteger(SubExpr, Real, Info))
+ return false;
if (EltType->isRealFloatingType()) {
- APFloat FResult =
- HandleIntToFloatCast(EltType, SubType, Result, Info.Ctx);
- return APValue(FResult,
- APFloat(FResult.getSemantics(), APFloat::fcZero, false));
+ Result.makeComplexFloat();
+ Result.FloatReal
+ = HandleIntToFloatCast(EltType, SubType, Real, Info.Ctx);
+ Result.FloatImag = APFloat(Result.FloatReal.getSemantics());
+ return true;
} else {
- Result = HandleIntToIntCast(EltType, SubType, Result, Info.Ctx);
- llvm::APSInt Zero(Result.getBitWidth(), !Result.isSigned());
- Zero = 0;
- return APValue(Result, Zero);
+ Result.makeComplexInt();
+ Real = HandleIntToIntCast(EltType, SubType, Real, Info.Ctx);
+ Result.IntImag = APSInt(Real.getBitWidth(), !Real.isSigned());
+ return true;
}
} else if (const ComplexType *CT = SubType->getAs<ComplexType>()) {
- APValue Src;
-
- if (!EvaluateComplex(SubExpr, Src, Info))
- return APValue();
+ if (!Visit(SubExpr))
+ return false;
QualType SrcType = CT->getElementType();
- if (Src.isComplexFloat()) {
+ if (Result.isComplexFloat()) {
if (EltType->isRealFloatingType()) {
- return APValue(HandleFloatToFloatCast(EltType, SrcType,
- Src.getComplexFloatReal(),
- Info.Ctx),
- HandleFloatToFloatCast(EltType, SrcType,
- Src.getComplexFloatImag(),
- Info.Ctx));
+ Result.makeComplexFloat();
+ Result.FloatReal = HandleFloatToFloatCast(EltType, SrcType,
+ Result.FloatReal,
+ Info.Ctx);
+ Result.FloatImag = HandleFloatToFloatCast(EltType, SrcType,
+ Result.FloatImag,
+ Info.Ctx);
+ return true;
} else {
- return APValue(HandleFloatToIntCast(EltType, SrcType,
- Src.getComplexFloatReal(),
- Info.Ctx),
- HandleFloatToIntCast(EltType, SrcType,
- Src.getComplexFloatImag(),
- Info.Ctx));
+ Result.makeComplexInt();
+ Result.IntReal = HandleFloatToIntCast(EltType, SrcType,
+ Result.FloatReal,
+ Info.Ctx);
+ Result.IntImag = HandleFloatToIntCast(EltType, SrcType,
+ Result.FloatImag,
+ Info.Ctx);
+ return true;
}
} else {
- assert(Src.isComplexInt() && "Invalid evaluate result.");
+ assert(Result.isComplexInt() && "Invalid evaluate result.");
if (EltType->isRealFloatingType()) {
- return APValue(HandleIntToFloatCast(EltType, SrcType,
- Src.getComplexIntReal(),
- Info.Ctx),
- HandleIntToFloatCast(EltType, SrcType,
- Src.getComplexIntImag(),
- Info.Ctx));
+ Result.makeComplexFloat();
+ Result.FloatReal = HandleIntToFloatCast(EltType, SrcType,
+ Result.IntReal,
+ Info.Ctx);
+ Result.FloatImag = HandleIntToFloatCast(EltType, SrcType,
+ Result.IntImag,
+ Info.Ctx);
+ return true;
} else {
- return APValue(HandleIntToIntCast(EltType, SrcType,
- Src.getComplexIntReal(),
- Info.Ctx),
- HandleIntToIntCast(EltType, SrcType,
- Src.getComplexIntImag(),
- Info.Ctx));
+ Result.makeComplexInt();
+ Result.IntReal = HandleIntToIntCast(EltType, SrcType,
+ Result.IntReal,
+ Info.Ctx);
+ Result.IntImag = HandleIntToIntCast(EltType, SrcType,
+ Result.IntImag,
+ Info.Ctx);
+ return true;
}
}
}
// FIXME: Handle more casts.
- return APValue();
+ return false;
}
- APValue VisitBinaryOperator(const BinaryOperator *E);
- APValue VisitChooseExpr(const ChooseExpr *E)
+ bool VisitBinaryOperator(const BinaryOperator *E);
+ bool VisitChooseExpr(const ChooseExpr *E)
{ return Visit(E->getChosenSubExpr(Info.Ctx)); }
- APValue VisitUnaryExtension(const UnaryOperator *E)
+ bool VisitUnaryExtension(const UnaryOperator *E)
{ return Visit(E->getSubExpr()); }
// FIXME Missing: unary +/-/~, binary div, ImplicitValueInitExpr,
// conditional ?:, comma
};
} // end anonymous namespace
-static bool EvaluateComplex(const Expr *E, APValue &Result, EvalInfo &Info) {
- Result = ComplexExprEvaluator(Info).Visit(const_cast<Expr*>(E));
- assert((!Result.isComplexFloat() ||
- (&Result.getComplexFloatReal().getSemantics() ==
- &Result.getComplexFloatImag().getSemantics())) &&
- "Invalid complex evaluation.");
- return Result.isComplexFloat() || Result.isComplexInt();
+static bool EvaluateComplex(const Expr *E, ComplexValue &Result,
+ EvalInfo &Info) {
+ assert(E->getType()->isAnyComplexType());
+ return ComplexExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
-APValue ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
- APValue Result, RHS;
-
- if (!EvaluateComplex(E->getLHS(), Result, Info))
- return APValue();
+bool ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
+ if (!Visit(E->getLHS()))
+ return false;
+ ComplexValue RHS;
if (!EvaluateComplex(E->getRHS(), RHS, Info))
- return APValue();
+ return false;
assert(Result.isComplexFloat() == RHS.isComplexFloat() &&
"Invalid operands to binary operator.");
switch (E->getOpcode()) {
- default: return APValue();
+ default: return false;
case BinaryOperator::Add:
if (Result.isComplexFloat()) {
Result.getComplexFloatReal().add(RHS.getComplexFloatReal(),
@@ -2022,7 +2135,7 @@ APValue ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
break;
case BinaryOperator::Mul:
if (Result.isComplexFloat()) {
- APValue LHS = Result;
+ ComplexValue LHS = Result;
APFloat &LHS_r = LHS.getComplexFloatReal();
APFloat &LHS_i = LHS.getComplexFloatImag();
APFloat &RHS_r = RHS.getComplexFloatReal();
@@ -2042,7 +2155,7 @@ APValue ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
Tmp.multiply(RHS_r, APFloat::rmNearestTiesToEven);
Result.getComplexFloatImag().add(Tmp, APFloat::rmNearestTiesToEven);
} else {
- APValue LHS = Result;
+ ComplexValue LHS = Result;
Result.getComplexIntReal() =
(LHS.getComplexIntReal() * RHS.getComplexIntReal() -
LHS.getComplexIntImag() * RHS.getComplexIntImag());
@@ -2053,7 +2166,7 @@ APValue ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
break;
}
- return Result;
+ return true;
}
//===----------------------------------------------------------------------===//
@@ -2065,53 +2178,32 @@ APValue ComplexExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
/// we want to. If this function returns true, it returns the folded constant
/// in Result.
bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const {
+ const Expr *E = this;
EvalInfo Info(Ctx, Result);
-
- if (getType()->isVectorType()) {
- if (!EvaluateVector(this, Result.Val, Info))
+ if (E->getType()->isVectorType()) {
+ if (!EvaluateVector(E, Info.EvalResult.Val, Info))
return false;
- } else if (getType()->isIntegerType()) {
- if (!IntExprEvaluator(Info, Result.Val).Visit(const_cast<Expr*>(this)))
+ } else if (E->getType()->isIntegerType()) {
+ if (!IntExprEvaluator(Info, Info.EvalResult.Val).Visit(const_cast<Expr*>(E)))
return false;
- } else if (getType()->hasPointerRepresentation()) {
- if (!EvaluatePointer(this, Result.Val, Info))
- return false;
- } else if (getType()->isRealFloatingType()) {
- llvm::APFloat f(0.0);
- if (!EvaluateFloat(this, f, Info))
- return false;
-
- Result.Val = APValue(f);
- } else if (getType()->isAnyComplexType()) {
- if (!EvaluateComplex(this, Result.Val, Info))
- return false;
- } else
- return false;
-
- return true;
-}
-
-bool Expr::EvaluateAsAny(EvalResult &Result, ASTContext &Ctx) const {
- EvalInfo Info(Ctx, Result, true);
-
- if (getType()->isVectorType()) {
- if (!EvaluateVector(this, Result.Val, Info))
- return false;
- } else if (getType()->isIntegerType()) {
- if (!IntExprEvaluator(Info, Result.Val).Visit(const_cast<Expr*>(this)))
+ } else if (E->getType()->hasPointerRepresentation()) {
+ LValue LV;
+ if (!EvaluatePointer(E, LV, Info))
return false;
- } else if (getType()->hasPointerRepresentation()) {
- if (!EvaluatePointer(this, Result.Val, Info))
+ if (!IsGlobalLValue(LV.Base))
return false;
- } else if (getType()->isRealFloatingType()) {
- llvm::APFloat f(0.0);
- if (!EvaluateFloat(this, f, Info))
+ LV.moveInto(Info.EvalResult.Val);
+ } else if (E->getType()->isRealFloatingType()) {
+ llvm::APFloat F(0.0);
+ if (!EvaluateFloat(E, F, Info))
return false;
- Result.Val = APValue(f);
- } else if (getType()->isAnyComplexType()) {
- if (!EvaluateComplex(this, Result.Val, Info))
+ Info.EvalResult.Val = APValue(F);
+ } else if (E->getType()->isAnyComplexType()) {
+ ComplexValue C;
+ if (!EvaluateComplex(E, C, Info))
return false;
+ C.moveInto(Info.EvalResult.Val);
} else
return false;
@@ -2128,13 +2220,25 @@ bool Expr::EvaluateAsBooleanCondition(bool &Result, ASTContext &Ctx) const {
bool Expr::EvaluateAsLValue(EvalResult &Result, ASTContext &Ctx) const {
EvalInfo Info(Ctx, Result);
- return EvaluateLValue(this, Result.Val, Info) && !Result.HasSideEffects;
+ LValue LV;
+ if (EvaluateLValue(this, LV, Info) &&
+ !Result.HasSideEffects &&
+ IsGlobalLValue(LV.Base)) {
+ LV.moveInto(Result.Val);
+ return true;
+ }
+ return false;
}
bool Expr::EvaluateAsAnyLValue(EvalResult &Result, ASTContext &Ctx) const {
- EvalInfo Info(Ctx, Result, true);
+ EvalInfo Info(Ctx, Result);
- return EvaluateLValue(this, Result.Val, Info) && !Result.HasSideEffects;
+ LValue LV;
+ if (EvaluateLValue(this, LV, Info)) {
+ LV.moveInto(Result.Val);
+ return true;
+ }
+ return false;
}
/// isEvaluatable - Call Evaluate to see if this expression can be constant
@@ -2159,3 +2263,388 @@ APSInt Expr::EvaluateAsInt(ASTContext &Ctx) const {
return EvalResult.Val.getInt();
}
+
+ bool Expr::EvalResult::isGlobalLValue() const {
+ assert(Val.isLValue());
+ return IsGlobalLValue(Val.getLValueBase());
+ }
+
+
+/// isIntegerConstantExpr - this recursive routine will test if an expression is
+/// an integer constant expression.
+
+/// FIXME: Pass up a reason why! Invalid operation in i-c-e, division by zero,
+/// comma, etc
+///
+/// FIXME: Handle offsetof. Two things to do: Handle GCC's __builtin_offsetof
+/// to support gcc 4.0+ and handle the idiom GCC recognizes with a null pointer
+/// cast+dereference.
+
+// CheckICE - This function does the fundamental ICE checking: the returned
+// ICEDiag contains a Val of 0, 1, or 2, and a possibly null SourceLocation.
+// Note that to reduce code duplication, this helper does no evaluation
+// itself; the caller checks whether the expression is evaluatable, and
+// in the rare cases where CheckICE actually cares about the evaluated
+// value, it calls into Evalute.
+//
+// Meanings of Val:
+// 0: This expression is an ICE if it can be evaluated by Evaluate.
+// 1: This expression is not an ICE, but if it isn't evaluated, it's
+// a legal subexpression for an ICE. This return value is used to handle
+// the comma operator in C99 mode.
+// 2: This expression is not an ICE, and is not a legal subexpression for one.
+
+struct ICEDiag {
+ unsigned Val;
+ SourceLocation Loc;
+
+ public:
+ ICEDiag(unsigned v, SourceLocation l) : Val(v), Loc(l) {}
+ ICEDiag() : Val(0) {}
+};
+
+ICEDiag NoDiag() { return ICEDiag(); }
+
+static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
+ Expr::EvalResult EVResult;
+ if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+ return NoDiag();
+}
+
+static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
+ assert(!E->isValueDependent() && "Should not see value dependent exprs!");
+ if (!E->getType()->isIntegralType()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+
+ switch (E->getStmtClass()) {
+#define STMT(Node, Base) case Expr::Node##Class:
+#define EXPR(Node, Base)
+#include "clang/AST/StmtNodes.inc"
+ case Expr::PredefinedExprClass:
+ case Expr::FloatingLiteralClass:
+ case Expr::ImaginaryLiteralClass:
+ case Expr::StringLiteralClass:
+ case Expr::ArraySubscriptExprClass:
+ case Expr::MemberExprClass:
+ case Expr::CompoundAssignOperatorClass:
+ case Expr::CompoundLiteralExprClass:
+ case Expr::ExtVectorElementExprClass:
+ case Expr::InitListExprClass:
+ case Expr::DesignatedInitExprClass:
+ case Expr::ImplicitValueInitExprClass:
+ case Expr::ParenListExprClass:
+ case Expr::VAArgExprClass:
+ case Expr::AddrLabelExprClass:
+ case Expr::StmtExprClass:
+ case Expr::CXXMemberCallExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXTypeidExprClass:
+ case Expr::CXXNullPtrLiteralExprClass:
+ case Expr::CXXThisExprClass:
+ case Expr::CXXThrowExprClass:
+ case Expr::CXXNewExprClass:
+ case Expr::CXXDeleteExprClass:
+ case Expr::CXXPseudoDestructorExprClass:
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::DependentScopeDeclRefExprClass:
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXBindTemporaryExprClass:
+ case Expr::CXXBindReferenceExprClass:
+ case Expr::CXXExprWithTemporariesClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXUnresolvedConstructExprClass:
+ case Expr::CXXDependentScopeMemberExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ case Expr::ObjCStringLiteralClass:
+ case Expr::ObjCEncodeExprClass:
+ case Expr::ObjCMessageExprClass:
+ case Expr::ObjCSelectorExprClass:
+ case Expr::ObjCProtocolExprClass:
+ case Expr::ObjCIvarRefExprClass:
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ case Expr::ObjCSuperExprClass:
+ case Expr::ObjCIsaExprClass:
+ case Expr::ShuffleVectorExprClass:
+ case Expr::BlockExprClass:
+ case Expr::BlockDeclRefExprClass:
+ case Expr::NoStmtClass:
+ return ICEDiag(2, E->getLocStart());
+
+ case Expr::GNUNullExprClass:
+ // GCC considers the GNU __null value to be an integral constant expression.
+ return NoDiag();
+
+ case Expr::ParenExprClass:
+ return CheckICE(cast<ParenExpr>(E)->getSubExpr(), Ctx);
+ case Expr::IntegerLiteralClass:
+ case Expr::CharacterLiteralClass:
+ case Expr::CXXBoolLiteralExprClass:
+ case Expr::CXXZeroInitValueExprClass:
+ case Expr::TypesCompatibleExprClass:
+ case Expr::UnaryTypeTraitExprClass:
+ return NoDiag();
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass: {
+ const CallExpr *CE = cast<CallExpr>(E);
+ if (CE->isBuiltinCall(Ctx))
+ return CheckEvalInICE(E, Ctx);
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::DeclRefExprClass:
+ if (isa<EnumConstantDecl>(cast<DeclRefExpr>(E)->getDecl()))
+ return NoDiag();
+ if (Ctx.getLangOptions().CPlusPlus &&
+ E->getType().getCVRQualifiers() == Qualifiers::Const) {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ // Parameter variables are never constants. Without this check,
+ // getAnyInitializer() can find a default argument, which leads
+ // to chaos.
+ if (isa<ParmVarDecl>(D))
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
+ // C++ 7.1.5.1p2
+ // A variable of non-volatile const-qualified integral or enumeration
+ // type initialized by an ICE can be used in ICEs.
+ if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
+ Qualifiers Quals = Ctx.getCanonicalType(Dcl->getType()).getQualifiers();
+ if (Quals.hasVolatile() || !Quals.hasConst())
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
+ // Look for a declaration of this variable that has an initializer.
+ const VarDecl *ID = 0;
+ const Expr *Init = Dcl->getAnyInitializer(ID);
+ if (Init) {
+ if (ID->isInitKnownICE()) {
+ // We have already checked whether this subexpression is an
+ // integral constant expression.
+ if (ID->isInitICE())
+ return NoDiag();
+ else
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+ }
+
+ // It's an ICE whether or not the definition we found is
+ // out-of-line. See DR 721 and the discussion in Clang PR
+ // 6206 for details.
+
+ if (Dcl->isCheckingICE()) {
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+ }
+
+ Dcl->setCheckingICE();
+ ICEDiag Result = CheckICE(Init, Ctx);
+ // Cache the result of the ICE test.
+ Dcl->setInitKnownICE(Result.Val == 0);
+ return Result;
+ }
+ }
+ }
+ return ICEDiag(2, E->getLocStart());
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *Exp = cast<UnaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case UnaryOperator::PostInc:
+ case UnaryOperator::PostDec:
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec:
+ case UnaryOperator::AddrOf:
+ case UnaryOperator::Deref:
+ return ICEDiag(2, E->getLocStart());
+ case UnaryOperator::Extension:
+ case UnaryOperator::LNot:
+ case UnaryOperator::Plus:
+ case UnaryOperator::Minus:
+ case UnaryOperator::Not:
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ return CheckICE(Exp->getSubExpr(), Ctx);
+ case UnaryOperator::OffsetOf:
+ break;
+ }
+
+ // OffsetOf falls through here.
+ }
+ case Expr::OffsetOfExprClass: {
+ // Note that per C99, offsetof must be an ICE. And AFAIK, using
+ // Evaluate matches the proposed gcc behavior for cases like
+ // "offsetof(struct s{int x[4];}, x[!.0])". This doesn't affect
+ // compliance: we should warn earlier for offsetof expressions with
+ // array subscripts that aren't ICEs, and if the array subscripts
+ // are ICEs, the value of the offsetof must be an integer constant.
+ return CheckEvalInICE(E, Ctx);
+ }
+ case Expr::SizeOfAlignOfExprClass: {
+ const SizeOfAlignOfExpr *Exp = cast<SizeOfAlignOfExpr>(E);
+ if (Exp->isSizeOf() && Exp->getTypeOfArgument()->isVariableArrayType())
+ return ICEDiag(2, E->getLocStart());
+ return NoDiag();
+ }
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *Exp = cast<BinaryOperator>(E);
+ switch (Exp->getOpcode()) {
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ case BinaryOperator::Assign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::RemAssign:
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::OrAssign:
+ return ICEDiag(2, E->getLocStart());
+
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::Rem:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
+ case BinaryOperator::Comma: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (Exp->getOpcode() == BinaryOperator::Div ||
+ Exp->getOpcode() == BinaryOperator::Rem) {
+ // Evaluate gives an error for undefined Div/Rem, so make sure
+ // we don't evaluate one.
+ if (LHSResult.Val != 2 && RHSResult.Val != 2) {
+ llvm::APSInt REval = Exp->getRHS()->EvaluateAsInt(Ctx);
+ if (REval == 0)
+ return ICEDiag(1, E->getLocStart());
+ if (REval.isSigned() && REval.isAllOnesValue()) {
+ llvm::APSInt LEval = Exp->getLHS()->EvaluateAsInt(Ctx);
+ if (LEval.isMinSignedValue())
+ return ICEDiag(1, E->getLocStart());
+ }
+ }
+ }
+ if (Exp->getOpcode() == BinaryOperator::Comma) {
+ if (Ctx.getLangOptions().C99) {
+ // C99 6.6p3 introduces a strange edge case: comma can be in an ICE
+ // if it isn't evaluated.
+ if (LHSResult.Val == 0 && RHSResult.Val == 0)
+ return ICEDiag(1, E->getLocStart());
+ } else {
+ // In both C89 and C++, commas in ICEs are illegal.
+ return ICEDiag(2, E->getLocStart());
+ }
+ }
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr: {
+ ICEDiag LHSResult = CheckICE(Exp->getLHS(), Ctx);
+ ICEDiag RHSResult = CheckICE(Exp->getRHS(), Ctx);
+ if (LHSResult.Val == 0 && RHSResult.Val == 1) {
+ // Rare case where the RHS has a comma "side-effect"; we need
+ // to actually check the condition to see whether the side
+ // with the comma is evaluated.
+ if ((Exp->getOpcode() == BinaryOperator::LAnd) !=
+ (Exp->getLHS()->EvaluateAsInt(Ctx) == 0))
+ return RHSResult;
+ return NoDiag();
+ }
+
+ if (LHSResult.Val >= RHSResult.Val)
+ return LHSResult;
+ return RHSResult;
+ }
+ }
+ }
+ case Expr::ImplicitCastExprClass:
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass: {
+ const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
+ if (SubExpr->getType()->isIntegralType())
+ return CheckICE(SubExpr, Ctx);
+ if (isa<FloatingLiteral>(SubExpr->IgnoreParens()))
+ return NoDiag();
+ return ICEDiag(2, E->getLocStart());
+ }
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *Exp = cast<ConditionalOperator>(E);
+ // If the condition (ignoring parens) is a __builtin_constant_p call,
+ // then only the true side is actually considered in an integer constant
+ // expression, and it is fully evaluated. This is an important GNU
+ // extension. See GCC PR38377 for discussion.
+ if (const CallExpr *CallCE
+ = dyn_cast<CallExpr>(Exp->getCond()->IgnoreParenCasts()))
+ if (CallCE->isBuiltinCall(Ctx) == Builtin::BI__builtin_constant_p) {
+ Expr::EvalResult EVResult;
+ if (!E->Evaluate(EVResult, Ctx) || EVResult.HasSideEffects ||
+ !EVResult.Val.isInt()) {
+ return ICEDiag(2, E->getLocStart());
+ }
+ return NoDiag();
+ }
+ ICEDiag CondResult = CheckICE(Exp->getCond(), Ctx);
+ ICEDiag TrueResult = CheckICE(Exp->getTrueExpr(), Ctx);
+ ICEDiag FalseResult = CheckICE(Exp->getFalseExpr(), Ctx);
+ if (CondResult.Val == 2)
+ return CondResult;
+ if (TrueResult.Val == 2)
+ return TrueResult;
+ if (FalseResult.Val == 2)
+ return FalseResult;
+ if (CondResult.Val == 1)
+ return CondResult;
+ if (TrueResult.Val == 0 && FalseResult.Val == 0)
+ return NoDiag();
+ // Rare case where the diagnostics depend on which side is evaluated
+ // Note that if we get here, CondResult is 0, and at least one of
+ // TrueResult and FalseResult is non-zero.
+ if (Exp->getCond()->EvaluateAsInt(Ctx) == 0) {
+ return FalseResult;
+ }
+ return TrueResult;
+ }
+ case Expr::CXXDefaultArgExprClass:
+ return CheckICE(cast<CXXDefaultArgExpr>(E)->getExpr(), Ctx);
+ case Expr::ChooseExprClass: {
+ return CheckICE(cast<ChooseExpr>(E)->getChosenSubExpr(Ctx), Ctx);
+ }
+ }
+
+ // Silence a GCC warning
+ return ICEDiag(2, E->getLocStart());
+}
+
+bool Expr::isIntegerConstantExpr(llvm::APSInt &Result, ASTContext &Ctx,
+ SourceLocation *Loc, bool isEvaluated) const {
+ ICEDiag d = CheckICE(this, Ctx);
+ if (d.Val != 0) {
+ if (Loc) *Loc = d.Loc;
+ return false;
+ }
+ EvalResult EvalResult;
+ if (!Evaluate(EvalResult, Ctx))
+ llvm_unreachable("ICE cannot be evaluated!");
+ assert(!EvalResult.HasSideEffects && "ICE with side effects!");
+ assert(EvalResult.Val.isInt() && "ICE that isn't integer!");
+ Result = EvalResult.Val.getInt();
+ return true;
+}
diff --git a/lib/AST/NestedNameSpecifier.cpp b/lib/AST/NestedNameSpecifier.cpp
index 45518e98bc15..d6594cdfd02f 100644
--- a/lib/AST/NestedNameSpecifier.cpp
+++ b/lib/AST/NestedNameSpecifier.cpp
@@ -145,14 +145,14 @@ NestedNameSpecifier::print(llvm::raw_ostream &OS,
InnerPolicy.SuppressScope = true;
// Nested-name-specifiers are intended to contain minimally-qualified
- // types. An actual QualifiedNameType will not occur, since we'll store
+ // types. An actual ElaboratedType will not occur, since we'll store
// just the type that is referred to in the nested-name-specifier (e.g.,
// a TypedefType, TagType, etc.). However, when we are dealing with
// dependent template-id types (e.g., Outer<T>::template Inner<U>),
// the type requires its own nested-name-specifier for uniqueness, so we
// suppress that nested-name-specifier during printing.
- assert(!isa<QualifiedNameType>(T) &&
- "Qualified name type in nested-name-specifier");
+ assert(!isa<ElaboratedType>(T) &&
+ "Elaborated type in nested-name-specifier");
if (const TemplateSpecializationType *SpecType
= dyn_cast<TemplateSpecializationType>(T)) {
// Print the template name without its corresponding
diff --git a/lib/AST/RecordLayout.cpp b/lib/AST/RecordLayout.cpp
index ade2483722ef..262c4597f846 100644
--- a/lib/AST/RecordLayout.cpp
+++ b/lib/AST/RecordLayout.cpp
@@ -44,7 +44,9 @@ ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx,
unsigned fieldcount,
uint64_t nonvirtualsize,
unsigned nonvirtualalign,
- const PrimaryBaseInfo &PrimaryBase,
+ uint64_t SizeOfLargestEmptySubobject,
+ const CXXRecordDecl *PrimaryBase,
+ bool PrimaryBaseIsVirtual,
const BaseOffsetsMapTy& BaseOffsets,
const BaseOffsetsMapTy& VBaseOffsets)
: Size(size), DataSize(datasize), FieldOffsets(0), Alignment(alignment),
@@ -55,9 +57,10 @@ ASTRecordLayout::ASTRecordLayout(ASTContext &Ctx,
memcpy(FieldOffsets, fieldoffsets, FieldCount * sizeof(*FieldOffsets));
}
- CXXInfo->PrimaryBase = PrimaryBase;
+ CXXInfo->PrimaryBase = PrimaryBaseInfo(PrimaryBase, PrimaryBaseIsVirtual);
CXXInfo->NonVirtualSize = nonvirtualsize;
CXXInfo->NonVirtualAlign = nonvirtualalign;
+ CXXInfo->SizeOfLargestEmptySubobject = SizeOfLargestEmptySubobject;
CXXInfo->BaseOffsets = BaseOffsets;
CXXInfo->VBaseOffsets = VBaseOffsets;
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 3782985e5302..983a2874a735 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -1,4 +1,4 @@
-//=== ASTRecordLayoutBuilder.cpp - Helper class for building record layouts ==//
+//=== RecordLayoutBuilder.cpp - Helper class for building record layouts ---==//
//
// The LLVM Compiler Infrastructure
//
@@ -7,28 +7,443 @@
//
//===----------------------------------------------------------------------===//
-#include "RecordLayoutBuilder.h"
-
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/Support/Format.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/MathExtras.h"
+#include <map>
using namespace clang;
-ASTRecordLayoutBuilder::ASTRecordLayoutBuilder(ASTContext &Context)
- : Context(Context), Size(0), Alignment(8), Packed(false),
- UnfilledBitsInLastByte(0), MaxFieldAlignment(0), DataSize(0), IsUnion(false),
- NonVirtualSize(0), NonVirtualAlignment(8), FirstNearlyEmptyVBase(0) { }
+namespace {
+
+/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
+/// offsets while laying out a C++ class.
+class EmptySubobjectMap {
+ ASTContext &Context;
+
+ /// Class - The class whose empty entries we're keeping track of.
+ const CXXRecordDecl *Class;
+
+ /// EmptyClassOffsets - A map from offsets to empty record decls.
+ typedef llvm::SmallVector<const CXXRecordDecl *, 1> ClassVectorTy;
+ typedef llvm::DenseMap<uint64_t, ClassVectorTy> EmptyClassOffsetsMapTy;
+ EmptyClassOffsetsMapTy EmptyClassOffsets;
+
+ /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
+ /// member subobject that is empty.
+ void ComputeEmptySubobjectSizes();
+
+ struct BaseInfo {
+ const CXXRecordDecl *Class;
+ bool IsVirtual;
+
+ const CXXRecordDecl *PrimaryVirtualBase;
+
+ llvm::SmallVector<BaseInfo*, 4> Bases;
+ const BaseInfo *Derived;
+ };
+
+ llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> VirtualBaseInfo;
+ llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> NonVirtualBaseInfo;
+
+ BaseInfo *ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual,
+ const BaseInfo *Derived);
+ void ComputeBaseInfo();
+
+ bool CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info, uint64_t Offset);
+ void UpdateEmptyBaseSubobjects(const BaseInfo *Info, uint64_t Offset);
+
+public:
+ /// This holds the size of the largest empty subobject (either a base
+ /// or a member). Will be zero if the record being built doesn't contain
+ /// any empty classes.
+ uint64_t SizeOfLargestEmptySubobject;
+
+ EmptySubobjectMap(ASTContext &Context, const CXXRecordDecl *Class)
+ : Context(Context), Class(Class), SizeOfLargestEmptySubobject(0) {
+ ComputeEmptySubobjectSizes();
+
+ ComputeBaseInfo();
+ }
+
+ /// CanPlaceBaseAtOffset - Return whether the given base class can be placed
+ /// at the given offset.
+ /// Returns false if placing the record will result in two components
+ /// (direct or indirect) of the same type having the same offset.
+ bool CanPlaceBaseAtOffset(const CXXRecordDecl *RD, bool BaseIsVirtual,
+ uint64_t Offset);
+};
+
+void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
+ // Check the bases.
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t EmptySize = 0;
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(BaseDecl);
+ if (BaseDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ SizeOfLargestEmptySubobject = std::max(SizeOfLargestEmptySubobject,
+ EmptySize);
+ }
+
+ // Check the fields.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I) {
+ const FieldDecl *FD = *I;
+
+ const RecordType *RT =
+ Context.getBaseElementType(FD->getType())->getAs<RecordType>();
+
+ // We only care about record types.
+ if (!RT)
+ continue;
+
+ uint64_t EmptySize = 0;
+ const CXXRecordDecl *MemberDecl = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(MemberDecl);
+ if (MemberDecl->isEmpty()) {
+ // If the class decl is empty, get its size.
+ EmptySize = Layout.getSize();
+ } else {
+ // Otherwise, we get the largest empty subobject for the decl.
+ EmptySize = Layout.getSizeOfLargestEmptySubobject();
+ }
+
+ SizeOfLargestEmptySubobject = std::max(SizeOfLargestEmptySubobject,
+ EmptySize);
+ }
+}
+
+EmptySubobjectMap::BaseInfo *
+EmptySubobjectMap::ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual,
+ const BaseInfo *Derived) {
+ BaseInfo *Info;
+
+ if (IsVirtual) {
+ BaseInfo *&InfoSlot = VirtualBaseInfo[RD];
+ if (InfoSlot) {
+ assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
+ return InfoSlot;
+ }
+
+ InfoSlot = new (Context) BaseInfo;
+ Info = InfoSlot;
+ } else {
+ Info = new (Context) BaseInfo;
+ }
+
+ Info->Class = RD;
+ Info->IsVirtual = IsVirtual;
+ Info->Derived = Derived;
+ Info->PrimaryVirtualBase = 0;
+
+ if (RD->getNumVBases()) {
+ // Check if this class has a primary virtual base.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ Info->PrimaryVirtualBase = Layout.getPrimaryBase();
+ assert(Info->PrimaryVirtualBase &&
+ "Didn't have a primary virtual base!");
+ }
+ }
+
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ Info->Bases.push_back(ComputeBaseInfo(BaseDecl, IsVirtual, Info));
+ }
+
+ return Info;
+}
+
+void EmptySubobjectMap::ComputeBaseInfo() {
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ BaseInfo *Info = ComputeBaseInfo(BaseDecl, IsVirtual, /*Derived=*/0);
+ if (IsVirtual) {
+ // ComputeBaseInfo has already added this base for us.
+ continue;
+ }
+
+ // Add the base info to the map of non-virtual bases.
+ assert(!NonVirtualBaseInfo.count(BaseDecl) &&
+ "Non-virtual base already exists!");
+ NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
+ }
+}
+
+bool
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info,
+ uint64_t Offset) {
+ // Traverse all non-virtual bases.
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ BaseInfo* Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+
+ if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
+ return false;
+ }
+
+ if (Info->PrimaryVirtualBase) {
+ BaseInfo *PrimaryVirtualBaseInfo =
+ VirtualBaseInfo.lookup(Info->PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo && "Didn't find base info!");
+
+ if (Info == PrimaryVirtualBaseInfo->Derived) {
+ if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
+ return false;
+ }
+ }
+
+ // FIXME: Member variables.
+ return true;
+}
+
+void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseInfo *Info,
+ uint64_t Offset) {
+ if (Info->Class->isEmpty()) {
+ // FIXME: Record that there is an empty class at this offset.
+ }
+
+ // Traverse all non-virtual bases.
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ BaseInfo* Base = Info->Bases[I];
+ if (Base->IsVirtual)
+ continue;
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+
+ UpdateEmptyBaseSubobjects(Base, BaseOffset);
+ }
+
+ if (Info->PrimaryVirtualBase) {
+ BaseInfo *PrimaryVirtualBaseInfo =
+ VirtualBaseInfo.lookup(Info->PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo && "Didn't find base info!");
+
+ if (Info == PrimaryVirtualBaseInfo->Derived)
+ UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset);
+ }
+
+ // FIXME: Member variables.
+}
+
+bool EmptySubobjectMap::CanPlaceBaseAtOffset(const CXXRecordDecl *RD,
+ bool BaseIsVirtual,
+ uint64_t Offset) {
+ // If we know this class doesn't have any empty subobjects we don't need to
+ // bother checking.
+ if (!SizeOfLargestEmptySubobject)
+ return true;
+
+ BaseInfo *Info;
+
+ if (BaseIsVirtual)
+ Info = VirtualBaseInfo.lookup(RD);
+ else
+ Info = NonVirtualBaseInfo.lookup(RD);
+
+ if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ return false;
+
+ UpdateEmptyBaseSubobjects(Info, Offset);
+ return true;
+}
+
+class RecordLayoutBuilder {
+ // FIXME: Remove this and make the appropriate fields public.
+ friend class clang::ASTContext;
+
+ ASTContext &Context;
+
+ EmptySubobjectMap *EmptySubobjects;
+
+ /// Size - The current size of the record layout.
+ uint64_t Size;
+
+ /// Alignment - The current alignment of the record layout.
+ unsigned Alignment;
+
+ llvm::SmallVector<uint64_t, 16> FieldOffsets;
+
+ /// Packed - Whether the record is packed or not.
+ unsigned Packed : 1;
+
+ unsigned IsUnion : 1;
+
+ unsigned IsMac68kAlign : 1;
+
+ /// UnfilledBitsInLastByte - If the last field laid out was a bitfield,
+ /// this contains the number of bits in the last byte that can be used for
+ /// an adjacent bitfield if necessary.
+ unsigned char UnfilledBitsInLastByte;
+
+ /// MaxFieldAlignment - The maximum allowed field alignment. This is set by
+ /// #pragma pack.
+ unsigned MaxFieldAlignment;
+
+ /// DataSize - The data size of the record being laid out.
+ uint64_t DataSize;
+
+ uint64_t NonVirtualSize;
+ unsigned NonVirtualAlignment;
+
+ /// PrimaryBase - the primary base class (if one exists) of the class
+ /// we're laying out.
+ const CXXRecordDecl *PrimaryBase;
+
+ /// PrimaryBaseIsVirtual - Whether the primary base of the class we're laying
+ /// out is virtual.
+ bool PrimaryBaseIsVirtual;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t> BaseOffsetsMapTy;
+
+ /// Bases - base classes and their offsets in the record.
+ BaseOffsetsMapTy Bases;
+
+ // VBases - virtual base classes and their offsets in the record.
+ BaseOffsetsMapTy VBases;
+
+ /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
+ /// primary base classes for some other direct or indirect base class.
+ llvm::SmallSet<const CXXRecordDecl*, 32> IndirectPrimaryBases;
+
+ /// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
+ /// inheritance graph order. Used for determining the primary base class.
+ const CXXRecordDecl *FirstNearlyEmptyVBase;
+
+ /// VisitedVirtualBases - A set of all the visited virtual bases, used to
+ /// avoid visiting virtual bases more than once.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// EmptyClassOffsets - A map from offsets to empty record decls.
+ typedef std::multimap<uint64_t, const CXXRecordDecl *> EmptyClassOffsetsTy;
+ EmptyClassOffsetsTy EmptyClassOffsets;
+
+ RecordLayoutBuilder(ASTContext &Context, EmptySubobjectMap *EmptySubobjects)
+ : Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(8),
+ Packed(false), IsUnion(false), IsMac68kAlign(false),
+ UnfilledBitsInLastByte(0), MaxFieldAlignment(0), DataSize(0),
+ NonVirtualSize(0), NonVirtualAlignment(8), PrimaryBase(0),
+ PrimaryBaseIsVirtual(false), FirstNearlyEmptyVBase(0) { }
+
+ void Layout(const RecordDecl *D);
+ void Layout(const CXXRecordDecl *D);
+ void Layout(const ObjCInterfaceDecl *D);
+
+ void LayoutFields(const RecordDecl *D);
+ void LayoutField(const FieldDecl *D);
+ void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize);
+ void LayoutBitField(const FieldDecl *D);
+
+ /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
+ /// member subobject that is empty.
+ void ComputeEmptySubobjectSizes(const CXXRecordDecl *RD);
+
+ /// DeterminePrimaryBase - Determine the primary base of the given class.
+ void DeterminePrimaryBase(const CXXRecordDecl *RD);
+
+ void SelectPrimaryVBase(const CXXRecordDecl *RD);
+
+ /// IdentifyPrimaryBases - Identify all virtual base classes, direct or
+ /// indirect, that are primary base classes for some other direct or indirect
+ /// base class.
+ void IdentifyPrimaryBases(const CXXRecordDecl *RD);
+
+ bool IsNearlyEmpty(const CXXRecordDecl *RD) const;
+
+ /// LayoutNonVirtualBases - Determines the primary base class (if any) and
+ /// lays it out. Will then proceed to lay out all non-virtual base clasess.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD);
+
+ /// LayoutNonVirtualBase - Lays out a single non-virtual base.
+ void LayoutNonVirtualBase(const CXXRecordDecl *Base);
+
+ void AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
+ const CXXRecordDecl *MostDerivedClass);
+
+ /// LayoutVirtualBases - Lays out all the virtual bases.
+ void LayoutVirtualBases(const CXXRecordDecl *RD,
+ const CXXRecordDecl *MostDerivedClass);
+
+ /// LayoutVirtualBase - Lays out a single virtual base.
+ void LayoutVirtualBase(const CXXRecordDecl *Base);
+
+ /// LayoutBase - Will lay out a base and return the offset where it was
+ /// placed, in bits.
+ uint64_t LayoutBase(const CXXRecordDecl *Base, bool BaseIsVirtual);
+
+ /// canPlaceRecordAtOffset - Return whether a record (either a base class
+ /// or a field) can be placed at the given offset.
+ /// Returns false if placing the record will result in two components
+ /// (direct or indirect) of the same type having the same offset.
+ bool canPlaceRecordAtOffset(const CXXRecordDecl *RD, uint64_t Offset,
+ bool CheckVBases) const;
+
+ /// canPlaceFieldAtOffset - Return whether a field can be placed at the given
+ /// offset.
+ bool canPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) const;
+
+ /// UpdateEmptyClassOffsets - Called after a record (either a base class
+ /// or a field) has been placed at the given offset. Will update the
+ /// EmptyClassOffsets map if the class is empty or has any empty bases or
+ /// fields.
+ void UpdateEmptyClassOffsets(const CXXRecordDecl *RD, uint64_t Offset,
+ bool UpdateVBases);
+
+ /// UpdateEmptyClassOffsets - Called after a field has been placed at the
+ /// given offset.
+ void UpdateEmptyClassOffsets(const FieldDecl *FD, uint64_t Offset);
+
+ /// InitializeLayout - Initialize record layout for the given record decl.
+ void InitializeLayout(const Decl *D);
+
+ /// FinishLayout - Finalize record layout. Adjust record size based on the
+ /// alignment.
+ void FinishLayout();
+
+ void UpdateAlignment(unsigned NewAlignment);
+
+ RecordLayoutBuilder(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+ void operator=(const RecordLayoutBuilder&); // DO NOT IMPLEMENT
+public:
+ static const CXXMethodDecl *ComputeKeyFunction(const CXXRecordDecl *RD);
+};
+} // end anonymous namespace
/// IsNearlyEmpty - Indicates when a class has a vtable pointer, but
/// no other data.
-bool ASTRecordLayoutBuilder::IsNearlyEmpty(const CXXRecordDecl *RD) const {
+bool RecordLayoutBuilder::IsNearlyEmpty(const CXXRecordDecl *RD) const {
// FIXME: Audit the corners
if (!RD->isDynamicClass())
return false;
@@ -38,7 +453,7 @@ bool ASTRecordLayoutBuilder::IsNearlyEmpty(const CXXRecordDecl *RD) const {
return false;
}
-void ASTRecordLayoutBuilder::IdentifyPrimaryBases(const CXXRecordDecl *RD) {
+void RecordLayoutBuilder::IdentifyPrimaryBases(const CXXRecordDecl *RD) {
const ASTRecordLayout::PrimaryBaseInfo &BaseInfo =
Context.getASTRecordLayout(RD).getPrimaryBaseInfo();
@@ -63,7 +478,7 @@ void ASTRecordLayoutBuilder::IdentifyPrimaryBases(const CXXRecordDecl *RD) {
}
void
-ASTRecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
+RecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
assert(!I->getType()->isDependentType() &&
@@ -77,8 +492,8 @@ ASTRecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
// If it's not an indirect primary base, then we've found our primary
// base.
if (!IndirectPrimaryBases.count(Base)) {
- PrimaryBase = ASTRecordLayout::PrimaryBaseInfo(Base,
- /*IsVirtual=*/true);
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = true;
return;
}
@@ -88,13 +503,13 @@ ASTRecordLayoutBuilder::SelectPrimaryVBase(const CXXRecordDecl *RD) {
}
SelectPrimaryVBase(Base);
- if (PrimaryBase.getBase())
+ if (PrimaryBase)
return;
}
}
/// DeterminePrimaryBase - Determine the primary base of the given class.
-void ASTRecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
+void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
// If the class isn't dynamic, it won't have a primary base.
if (!RD->isDynamicClass())
return;
@@ -124,7 +539,8 @@ void ASTRecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
if (Base->isDynamicClass()) {
// We found it.
- PrimaryBase = ASTRecordLayout::PrimaryBaseInfo(Base, /*IsVirtual=*/false);
+ PrimaryBase = Base;
+ PrimaryBaseIsVirtual = false;
return;
}
}
@@ -133,20 +549,20 @@ void ASTRecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
// indirect primary virtual base class, if one exists.
if (RD->getNumVBases() != 0) {
SelectPrimaryVBase(RD);
- if (PrimaryBase.getBase())
+ if (PrimaryBase)
return;
}
// Otherwise, it is the first nearly empty virtual base that is not an
// indirect primary virtual base class, if one exists.
if (FirstNearlyEmptyVBase) {
- PrimaryBase = ASTRecordLayout::PrimaryBaseInfo(FirstNearlyEmptyVBase,
- /*IsVirtual=*/true);
+ PrimaryBase = FirstNearlyEmptyVBase;
+ PrimaryBaseIsVirtual = true;
return;
}
// Otherwise there is no primary base class.
- assert(!PrimaryBase.getBase() && "Should not get here with a primary base!");
+ assert(!PrimaryBase && "Should not get here with a primary base!");
// Allocate the virtual table pointer at offset zero.
assert(DataSize == 0 && "Vtable pointer must be at offset zero!");
@@ -160,22 +576,23 @@ void ASTRecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
}
void
-ASTRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
+RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
// First, determine the primary base class.
DeterminePrimaryBase(RD);
// If we have a primary base class, lay it out.
- if (const CXXRecordDecl *Base = PrimaryBase.getBase()) {
- if (PrimaryBase.isVirtual()) {
+ if (PrimaryBase) {
+ if (PrimaryBaseIsVirtual) {
// We have a virtual primary base, insert it as an indirect primary base.
- IndirectPrimaryBases.insert(Base);
+ IndirectPrimaryBases.insert(PrimaryBase);
- assert(!VisitedVirtualBases.count(Base) && "vbase already visited!");
- VisitedVirtualBases.insert(Base);
-
- LayoutVirtualBase(Base);
+ assert(!VisitedVirtualBases.count(PrimaryBase) &&
+ "vbase already visited!");
+ VisitedVirtualBases.insert(PrimaryBase);
+
+ LayoutVirtualBase(PrimaryBase);
} else
- LayoutNonVirtualBase(Base);
+ LayoutNonVirtualBase(PrimaryBase);
}
// Now lay out the non-virtual bases.
@@ -190,7 +607,7 @@ ASTRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// Skip the primary base.
- if (Base == PrimaryBase.getBase() && !PrimaryBase.isVirtual())
+ if (Base == PrimaryBase && !PrimaryBaseIsVirtual)
continue;
// Lay out the base.
@@ -198,17 +615,17 @@ ASTRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
}
}
-void ASTRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *RD) {
+void RecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *Base) {
// Layout the base.
- uint64_t Offset = LayoutBase(RD);
+ uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/false);
// Add its base class offset.
- if (!Bases.insert(std::make_pair(RD, Offset)).second)
+ if (!Bases.insert(std::make_pair(Base, Offset)).second)
assert(false && "Added same base offset more than once!");
}
void
-ASTRecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
+RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
uint64_t Offset,
const CXXRecordDecl *MostDerivedClass) {
// We already have the offset for the primary base of the most derived class.
@@ -226,7 +643,7 @@ ASTRecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
E = RD->bases_end(); I != E; ++I) {
assert(!I->getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
-
+
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
@@ -234,17 +651,17 @@ ASTRecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
// This base isn't interesting since it doesn't have any virtual bases.
continue;
}
-
+
// Compute the offset of this base.
uint64_t BaseOffset;
-
+
if (I->isVirtual()) {
// If we don't know this vbase yet, don't visit it. It will be visited
// later.
if (!VBases.count(BaseDecl)) {
continue;
}
-
+
// Check if we've already visited this base.
if (!VisitedVirtualBases.insert(BaseDecl))
continue;
@@ -265,14 +682,14 @@ ASTRecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
}
void
-ASTRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
+RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass) {
const CXXRecordDecl *PrimaryBase;
bool PrimaryBaseIsVirtual;
if (MostDerivedClass == RD) {
- PrimaryBase = this->PrimaryBase.getBase();
- PrimaryBaseIsVirtual = this->PrimaryBase.isVirtual();
+ PrimaryBase = this->PrimaryBase;
+ PrimaryBaseIsVirtual = this->PrimaryBaseIsVirtual;
} else {
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
PrimaryBase = Layout.getPrimaryBase();
@@ -296,7 +713,7 @@ ASTRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
// Only visit virtual bases once.
if (!VisitedVirtualBases.insert(Base))
continue;
-
+
LayoutVirtualBase(Base);
}
}
@@ -311,58 +728,64 @@ ASTRecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
}
}
-void ASTRecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *RD) {
+void RecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *Base) {
// Layout the base.
- uint64_t Offset = LayoutBase(RD);
+ uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/true);
// Add its base class offset.
- if (!VBases.insert(std::make_pair(RD, Offset)).second)
+ if (!VBases.insert(std::make_pair(Base, Offset)).second)
assert(false && "Added same vbase offset more than once!");
}
-uint64_t ASTRecordLayoutBuilder::LayoutBase(const CXXRecordDecl *RD) {
- const ASTRecordLayout &BaseInfo = Context.getASTRecordLayout(RD);
+uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
+ bool BaseIsVirtual) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base);
// If we have an empty base class, try to place it at offset 0.
- if (RD->isEmpty() && canPlaceRecordAtOffset(RD, 0)) {
+ if (Base->isEmpty() &&
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, 0) &&
+ canPlaceRecordAtOffset(Base, 0, /*CheckVBases=*/false)) {
// We were able to place the class at offset 0.
- UpdateEmptyClassOffsets(RD, 0);
+ UpdateEmptyClassOffsets(Base, 0, /*UpdateVBases=*/false);
- Size = std::max(Size, BaseInfo.getSize());
+ Size = std::max(Size, Layout.getSize());
return 0;
}
- unsigned BaseAlign = BaseInfo.getNonVirtualAlign();
+ unsigned BaseAlign = Layout.getNonVirtualAlign();
// Round up the current record size to the base's alignment boundary.
uint64_t Offset = llvm::RoundUpToAlignment(DataSize, BaseAlign);
// Try to place the base.
while (true) {
- if (canPlaceRecordAtOffset(RD, Offset))
+ if (EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, Offset) &&
+ canPlaceRecordAtOffset(Base, Offset, /*CheckVBases=*/false))
break;
Offset += BaseAlign;
}
- if (!RD->isEmpty()) {
+ if (!Base->isEmpty()) {
// Update the data size.
- DataSize = Offset + BaseInfo.getNonVirtualSize();
+ DataSize = Offset + Layout.getNonVirtualSize();
Size = std::max(Size, DataSize);
} else
- Size = std::max(Size, Offset + BaseInfo.getSize());
+ Size = std::max(Size, Offset + Layout.getSize());
// Remember max struct/class alignment.
UpdateAlignment(BaseAlign);
- UpdateEmptyClassOffsets(RD, Offset);
+ UpdateEmptyClassOffsets(Base, Offset, /*UpdateVBases=*/false);
return Offset;
}
-bool ASTRecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
- uint64_t Offset) const {
+bool
+RecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset,
+ bool CheckVBases) const {
// Look for an empty class with the same type at the same offset.
for (EmptyClassOffsetsTy::const_iterator I =
EmptyClassOffsets.lower_bound(Offset),
@@ -372,7 +795,7 @@ bool ASTRecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
return false;
}
- const ASTRecordLayout &Info = Context.getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Check bases.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -382,12 +805,13 @@ bool ASTRecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
if (I->isVirtual())
continue;
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseClassOffset = Info.getBaseClassOffset(Base);
+ uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
- if (!canPlaceRecordAtOffset(Base, Offset + BaseClassOffset))
+ if (!canPlaceRecordAtOffset(BaseDecl, Offset + BaseOffset,
+ /*CheckVBases=*/false))
return false;
}
@@ -397,22 +821,25 @@ bool ASTRecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
I != E; ++I, ++FieldNo) {
const FieldDecl *FD = *I;
- uint64_t FieldOffset = Info.getFieldOffset(FieldNo);
+ uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
if (!canPlaceFieldAtOffset(FD, Offset + FieldOffset))
return false;
}
- // FIXME: virtual bases.
+ if (CheckVBases) {
+ // FIXME: virtual bases.
+ }
+
return true;
}
-bool ASTRecordLayoutBuilder::canPlaceFieldAtOffset(const FieldDecl *FD,
+bool RecordLayoutBuilder::canPlaceFieldAtOffset(const FieldDecl *FD,
uint64_t Offset) const {
QualType T = FD->getType();
if (const RecordType *RT = T->getAs<RecordType>()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return canPlaceRecordAtOffset(RD, Offset);
+ return canPlaceRecordAtOffset(RD, Offset, /*CheckVBases=*/true);
}
if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
@@ -424,27 +851,28 @@ bool ASTRecordLayoutBuilder::canPlaceFieldAtOffset(const FieldDecl *FD,
if (!RD)
return true;
- const ASTRecordLayout &Info = Context.getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
uint64_t NumElements = Context.getConstantArrayElementCount(AT);
uint64_t ElementOffset = Offset;
for (uint64_t I = 0; I != NumElements; ++I) {
- if (!canPlaceRecordAtOffset(RD, ElementOffset))
+ if (!canPlaceRecordAtOffset(RD, ElementOffset, /*CheckVBases=*/true))
return false;
- ElementOffset += Info.getSize();
+ ElementOffset += Layout.getSize();
}
}
return true;
}
-void ASTRecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD,
- uint64_t Offset) {
+void RecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD,
+ uint64_t Offset,
+ bool UpdateVBases) {
if (RD->isEmpty())
EmptyClassOffsets.insert(std::make_pair(Offset, RD));
- const ASTRecordLayout &Info = Context.getASTRecordLayout(RD);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
// Update bases.
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
@@ -457,8 +885,9 @@ void ASTRecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD,
const CXXRecordDecl *Base =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- uint64_t BaseClassOffset = Info.getBaseClassOffset(Base);
- UpdateEmptyClassOffsets(Base, Offset + BaseClassOffset);
+ uint64_t BaseClassOffset = Layout.getBaseClassOffset(Base);
+ UpdateEmptyClassOffsets(Base, Offset + BaseClassOffset,
+ /*UpdateVBases=*/false);
}
// Update fields.
@@ -467,21 +896,30 @@ void ASTRecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD,
I != E; ++I, ++FieldNo) {
const FieldDecl *FD = *I;
- uint64_t FieldOffset = Info.getFieldOffset(FieldNo);
+ uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
UpdateEmptyClassOffsets(FD, Offset + FieldOffset);
}
- // FIXME: Update virtual bases.
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (UpdateVBases) {
+ // FIXME: Update virtual bases.
+ } else if (PrimaryBase && Layout.getPrimaryBaseWasVirtual()) {
+ // We always want to update the offsets of a primary virtual base.
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "primary base class offset must always be 0!");
+ UpdateEmptyClassOffsets(PrimaryBase, Offset, /*UpdateVBases=*/false);
+ }
}
void
-ASTRecordLayoutBuilder::UpdateEmptyClassOffsets(const FieldDecl *FD,
+RecordLayoutBuilder::UpdateEmptyClassOffsets(const FieldDecl *FD,
uint64_t Offset) {
QualType T = FD->getType();
if (const RecordType *RT = T->getAs<RecordType>()) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- UpdateEmptyClassOffsets(RD, Offset);
+ UpdateEmptyClassOffsets(RD, Offset, /*UpdateVBases=*/true);
return;
}
}
@@ -501,76 +939,90 @@ ASTRecordLayoutBuilder::UpdateEmptyClassOffsets(const FieldDecl *FD,
uint64_t ElementOffset = Offset;
for (uint64_t I = 0; I != NumElements; ++I) {
- UpdateEmptyClassOffsets(RD, ElementOffset);
+ UpdateEmptyClassOffsets(RD, ElementOffset, /*UpdateVBases=*/true);
ElementOffset += Info.getSize();
}
}
}
-void ASTRecordLayoutBuilder::Layout(const RecordDecl *D) {
- IsUnion = D->isUnion();
+void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
+ if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
+ IsUnion = RD->isUnion();
Packed = D->hasAttr<PackedAttr>();
- // The #pragma pack attribute specifies the maximum field alignment.
- if (const PragmaPackAttr *PPA = D->getAttr<PragmaPackAttr>())
- MaxFieldAlignment = PPA->getAlignment();
-
- if (const AlignedAttr *AA = D->getAttr<AlignedAttr>())
- UpdateAlignment(AA->getMaxAlignment());
+ // mac68k alignment supersedes maximum field alignment and attribute aligned,
+ // and forces all structures to have 2-byte alignment. The IBM docs on it
+ // allude to additional (more complicated) semantics, especially with regard
+ // to bit-fields, but gcc appears not to follow that.
+ if (D->hasAttr<AlignMac68kAttr>()) {
+ IsMac68kAlign = true;
+ MaxFieldAlignment = 2 * 8;
+ Alignment = 2 * 8;
+ } else {
+ if (const MaxFieldAlignmentAttr *MFAA = D->getAttr<MaxFieldAlignmentAttr>())
+ MaxFieldAlignment = MFAA->getAlignment();
- // If this is a C++ class, lay out the vtable and the non-virtual bases.
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D);
- if (RD)
- LayoutNonVirtualBases(RD);
+ if (const AlignedAttr *AA = D->getAttr<AlignedAttr>())
+ UpdateAlignment(AA->getMaxAlignment());
+ }
+}
+void RecordLayoutBuilder::Layout(const RecordDecl *D) {
+ InitializeLayout(D);
LayoutFields(D);
+ // Finally, round the size of the total struct up to the alignment of the
+ // struct itself.
+ FinishLayout();
+}
+
+void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
+ InitializeLayout(RD);
+
+ // Lay out the vtable and the non-virtual bases.
+ LayoutNonVirtualBases(RD);
+
+ LayoutFields(RD);
+
NonVirtualSize = Size;
NonVirtualAlignment = Alignment;
- // If this is a C++ class, lay out its virtual bases and add its primary
- // virtual base offsets.
- if (RD) {
- LayoutVirtualBases(RD, RD);
+ // Lay out the virtual bases and add the primary virtual base offsets.
+ LayoutVirtualBases(RD, RD);
- VisitedVirtualBases.clear();
- AddPrimaryVirtualBaseOffsets(RD, 0, RD);
- }
+ VisitedVirtualBases.clear();
+ AddPrimaryVirtualBaseOffsets(RD, 0, RD);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
FinishLayout();
-
+
#ifndef NDEBUG
- if (RD) {
- // Check that we have base offsets for all bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- assert(Bases.count(BaseDecl) && "Did not find base offset!");
- }
-
- // And all virtual bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
- E = RD->vbases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- assert(VBases.count(BaseDecl) && "Did not find base offset!");
- }
+ // Check that we have base offsets for all bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ assert(Bases.count(BaseDecl) && "Did not find base offset!");
+ }
+
+ // And all virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ assert(VBases.count(BaseDecl) && "Did not find base offset!");
}
#endif
}
-// FIXME. Impl is no longer needed.
-void ASTRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl) {
+void RecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D) {
if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
const ASTRecordLayout &SL = Context.getASTObjCInterfaceLayout(SD);
@@ -582,14 +1034,8 @@ void ASTRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D,
DataSize = Size;
}
- Packed = D->hasAttr<PackedAttr>();
-
- // The #pragma pack attribute specifies the maximum field alignment.
- if (const PragmaPackAttr *PPA = D->getAttr<PragmaPackAttr>())
- MaxFieldAlignment = PPA->getAlignment();
+ InitializeLayout(D);
- if (const AlignedAttr *AA = D->getAttr<AlignedAttr>())
- UpdateAlignment(AA->getMaxAlignment());
// Layout each ivar sequentially.
llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
Context.ShallowCollectObjCIvars(D, Ivars);
@@ -601,7 +1047,7 @@ void ASTRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D,
FinishLayout();
}
-void ASTRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
+void RecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
// Layout each field, for now, just sequentially, respecting alignment. In
// the future, this will need to be tweakable by targets.
for (RecordDecl::field_iterator Field = D->field_begin(),
@@ -609,17 +1055,17 @@ void ASTRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
LayoutField(*Field);
}
-void ASTRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
+void RecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
uint64_t TypeSize) {
assert(Context.getLangOptions().CPlusPlus &&
"Can only have wide bit-fields in C++!");
-
+
// Itanium C++ ABI 2.4:
- // If sizeof(T)*8 < n, let T' be the largest integral POD type with
+ // If sizeof(T)*8 < n, let T' be the largest integral POD type with
// sizeof(T')*8 <= n.
-
+
QualType IntegralPODTypes[] = {
- Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy,
+ Context.UnsignedCharTy, Context.UnsignedShortTy, Context.UnsignedIntTy,
Context.UnsignedLongTy, Context.UnsignedLongLongTy
};
@@ -634,24 +1080,24 @@ void ASTRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
Type = IntegralPODTypes[I];
}
assert(!Type.isNull() && "Did not find a type!");
-
+
unsigned TypeAlign = Context.getTypeAlign(Type);
// We're not going to use any of the unfilled bits in the last byte.
UnfilledBitsInLastByte = 0;
uint64_t FieldOffset;
-
+
if (IsUnion) {
DataSize = std::max(DataSize, FieldSize);
FieldOffset = 0;
} else {
// The bitfield is allocated starting at the next offset aligned appropriately
- // for T', with length n bits.
+ // for T', with length n bits.
FieldOffset = llvm::RoundUpToAlignment(DataSize, TypeAlign);
-
+
uint64_t NewSizeInBits = FieldOffset + FieldSize;
-
+
DataSize = llvm::RoundUpToAlignment(NewSizeInBits, 8);
UnfilledBitsInLastByte = DataSize - NewSizeInBits;
}
@@ -661,12 +1107,12 @@ void ASTRecordLayoutBuilder::LayoutWideBitField(uint64_t FieldSize,
// Update the size.
Size = std::max(Size, DataSize);
-
+
// Remember max struct/class alignment.
UpdateAlignment(TypeAlign);
}
-void ASTRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
+void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
uint64_t FieldOffset = IsUnion ? 0 : (DataSize - UnfilledBitsInLastByte);
uint64_t FieldSize = D->getBitWidth()->EvaluateAsInt(Context).getZExtValue();
@@ -718,7 +1164,7 @@ void ASTRecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
UpdateAlignment(FieldAlign);
}
-void ASTRecordLayoutBuilder::LayoutField(const FieldDecl *D) {
+void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
if (D->isBitField()) {
LayoutBitField(D);
return;
@@ -791,7 +1237,7 @@ void ASTRecordLayoutBuilder::LayoutField(const FieldDecl *D) {
UpdateAlignment(FieldAlign);
}
-void ASTRecordLayoutBuilder::FinishLayout() {
+void RecordLayoutBuilder::FinishLayout() {
// In C++, records cannot be of size 0.
if (Context.getLangOptions().CPlusPlus && Size == 0)
Size = 8;
@@ -800,7 +1246,11 @@ void ASTRecordLayoutBuilder::FinishLayout() {
Size = llvm::RoundUpToAlignment(Size, Alignment);
}
-void ASTRecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) {
+void RecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) {
+ // The alignment is not modified when using 'mac68k' alignment.
+ if (IsMac68kAlign)
+ return;
+
if (NewAlignment <= Alignment)
return;
@@ -809,55 +1259,8 @@ void ASTRecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) {
Alignment = NewAlignment;
}
-const ASTRecordLayout *
-ASTRecordLayoutBuilder::ComputeLayout(ASTContext &Ctx,
- const RecordDecl *D) {
- ASTRecordLayoutBuilder Builder(Ctx);
-
- Builder.Layout(D);
-
- if (!isa<CXXRecordDecl>(D))
- return new (Ctx) ASTRecordLayout(Ctx, Builder.Size, Builder.Alignment,
- Builder.Size,
- Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size());
-
- // FIXME: This is not always correct. See the part about bitfields at
- // http://www.codesourcery.com/public/cxx-abi/abi.html#POD for more info.
- // FIXME: IsPODForThePurposeOfLayout should be stored in the record layout.
- bool IsPODForThePurposeOfLayout = cast<CXXRecordDecl>(D)->isPOD();
-
- // FIXME: This should be done in FinalizeLayout.
- uint64_t DataSize =
- IsPODForThePurposeOfLayout ? Builder.Size : Builder.DataSize;
- uint64_t NonVirtualSize =
- IsPODForThePurposeOfLayout ? DataSize : Builder.NonVirtualSize;
-
- return new (Ctx) ASTRecordLayout(Ctx, Builder.Size, Builder.Alignment,
- DataSize, Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size(),
- NonVirtualSize,
- Builder.NonVirtualAlignment,
- Builder.PrimaryBase,
- Builder.Bases, Builder.VBases);
-}
-
-const ASTRecordLayout *
-ASTRecordLayoutBuilder::ComputeLayout(ASTContext &Ctx,
- const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl) {
- ASTRecordLayoutBuilder Builder(Ctx);
-
- Builder.Layout(D, Impl);
-
- return new (Ctx) ASTRecordLayout(Ctx, Builder.Size, Builder.Alignment,
- Builder.DataSize,
- Builder.FieldOffsets.data(),
- Builder.FieldOffsets.size());
-}
-
const CXXMethodDecl *
-ASTRecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
+RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
assert(RD->isDynamicClass() && "Class does not have any virtual methods!");
// If a class isn't polymorphic it doesn't have a key function.
@@ -898,6 +1301,124 @@ ASTRecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
return 0;
}
+/// getASTRecordLayout - Get or compute information about the layout of the
+/// specified record (struct/union/class), which indicates its size and field
+/// position information.
+const ASTRecordLayout &ASTContext::getASTRecordLayout(const RecordDecl *D) {
+ D = D->getDefinition();
+ assert(D && "Cannot get layout of forward declarations!");
+
+ // Look up this layout, if already laid out, return what we have.
+ // Note that we can't save a reference to the entry because this function
+ // is recursive.
+ const ASTRecordLayout *Entry = ASTRecordLayouts[D];
+ if (Entry) return *Entry;
+
+ const ASTRecordLayout *NewEntry;
+
+ if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D)) {
+ EmptySubobjectMap EmptySubobjects(*this, RD);
+
+ RecordLayoutBuilder Builder(*this, &EmptySubobjects);
+ Builder.Layout(RD);
+
+ // FIXME: This is not always correct. See the part about bitfields at
+ // http://www.codesourcery.com/public/cxx-abi/abi.html#POD for more info.
+ // FIXME: IsPODForThePurposeOfLayout should be stored in the record layout.
+ bool IsPODForThePurposeOfLayout = cast<CXXRecordDecl>(D)->isPOD();
+
+ // FIXME: This should be done in FinalizeLayout.
+ uint64_t DataSize =
+ IsPODForThePurposeOfLayout ? Builder.Size : Builder.DataSize;
+ uint64_t NonVirtualSize =
+ IsPODForThePurposeOfLayout ? DataSize : Builder.NonVirtualSize;
+
+ NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.Size, Builder.Alignment,
+ DataSize, Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size(),
+ NonVirtualSize,
+ Builder.NonVirtualAlignment,
+ EmptySubobjects.SizeOfLargestEmptySubobject,
+ Builder.PrimaryBase,
+ Builder.PrimaryBaseIsVirtual,
+ Builder.Bases, Builder.VBases);
+ } else {
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
+ Builder.Layout(D);
+
+ NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.Size, Builder.Alignment,
+ Builder.Size,
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+ }
+
+ ASTRecordLayouts[D] = NewEntry;
+
+ if (getLangOptions().DumpRecordLayouts) {
+ llvm::errs() << "\n*** Dumping AST Record Layout\n";
+ DumpRecordLayout(D, llvm::errs());
+ }
+
+ return *NewEntry;
+}
+
+const CXXMethodDecl *ASTContext::getKeyFunction(const CXXRecordDecl *RD) {
+ RD = cast<CXXRecordDecl>(RD->getDefinition());
+ assert(RD && "Cannot get key function for forward declarations!");
+
+ const CXXMethodDecl *&Entry = KeyFunctions[RD];
+ if (!Entry)
+ Entry = RecordLayoutBuilder::ComputeKeyFunction(RD);
+ else
+ assert(Entry == RecordLayoutBuilder::ComputeKeyFunction(RD) &&
+ "Key function changed!");
+
+ return Entry;
+}
+
+/// getInterfaceLayoutImpl - Get or compute information about the
+/// layout of the given interface.
+///
+/// \param Impl - If given, also include the layout of the interface's
+/// implementation. This may differ by including synthesized ivars.
+const ASTRecordLayout &
+ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
+ const ObjCImplementationDecl *Impl) {
+ assert(!D->isForwardDecl() && "Invalid interface decl!");
+
+ // Look up this layout, if already laid out, return what we have.
+ ObjCContainerDecl *Key =
+ Impl ? (ObjCContainerDecl*) Impl : (ObjCContainerDecl*) D;
+ if (const ASTRecordLayout *Entry = ObjCLayouts[Key])
+ return *Entry;
+
+ // Add in synthesized ivar count if laying out an implementation.
+ if (Impl) {
+ unsigned SynthCount = CountNonClassIvars(D);
+ // If there aren't any sythesized ivars then reuse the interface
+ // entry. Note we can't cache this because we simply free all
+ // entries later; however we shouldn't look up implementations
+ // frequently.
+ if (SynthCount == 0)
+ return getObjCLayout(D, 0);
+ }
+
+ RecordLayoutBuilder Builder(*this, /*EmptySubobjects=*/0);
+ Builder.Layout(D);
+
+ const ASTRecordLayout *NewEntry =
+ new (*this) ASTRecordLayout(*this, Builder.Size, Builder.Alignment,
+ Builder.DataSize,
+ Builder.FieldOffsets.data(),
+ Builder.FieldOffsets.size());
+
+ ObjCLayouts[Key] = NewEntry;
+
+ return *NewEntry;
+}
+
static void PrintOffset(llvm::raw_ostream &OS,
uint64_t Offset, unsigned IndentLevel) {
OS << llvm::format("%4d | ", Offset);
diff --git a/lib/AST/RecordLayoutBuilder.h b/lib/AST/RecordLayoutBuilder.h
deleted file mode 100644
index f277c29398b3..000000000000
--- a/lib/AST/RecordLayoutBuilder.h
+++ /dev/null
@@ -1,170 +0,0 @@
-//===- ASTRecordLayoutBuilder.h - Helper class for building record layouts ===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_AST_RECORDLAYOUTBUILDER_H
-#define LLVM_CLANG_AST_RECORDLAYOUTBUILDER_H
-
-#include "clang/AST/RecordLayout.h"
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/SmallSet.h"
-#include "llvm/System/DataTypes.h"
-#include <map>
-
-namespace clang {
- class ASTContext;
- class ASTRecordLayout;
- class CXXRecordDecl;
- class FieldDecl;
- class ObjCImplementationDecl;
- class ObjCInterfaceDecl;
- class RecordDecl;
-
-class ASTRecordLayoutBuilder {
- ASTContext &Context;
-
- /// Size - The current size of the record layout.
- uint64_t Size;
-
- /// Alignment - The current alignment of the record layout.
- unsigned Alignment;
-
- llvm::SmallVector<uint64_t, 16> FieldOffsets;
-
- /// Packed - Whether the record is packed or not.
- bool Packed;
-
- /// UnfilledBitsInLastByte - If the last field laid out was a bitfield,
- /// this contains the number of bits in the last byte that can be used for
- /// an adjacent bitfield if necessary.
- unsigned char UnfilledBitsInLastByte;
-
- /// MaxFieldAlignment - The maximum allowed field alignment. This is set by
- /// #pragma pack.
- unsigned MaxFieldAlignment;
-
- /// DataSize - The data size of the record being laid out.
- uint64_t DataSize;
-
- bool IsUnion;
-
- uint64_t NonVirtualSize;
- unsigned NonVirtualAlignment;
-
- /// PrimaryBase - the primary base class (if one exists) of the class
- /// we're laying out.
- ASTRecordLayout::PrimaryBaseInfo PrimaryBase;
-
- /// Bases - base classes and their offsets in the record.
- ASTRecordLayout::BaseOffsetsMapTy Bases;
-
- // VBases - virtual base classes and their offsets in the record.
- ASTRecordLayout::BaseOffsetsMapTy VBases;
-
- /// IndirectPrimaryBases - Virtual base classes, direct or indirect, that are
- /// primary base classes for some other direct or indirect base class.
- llvm::SmallSet<const CXXRecordDecl*, 32> IndirectPrimaryBases;
-
- /// FirstNearlyEmptyVBase - The first nearly empty virtual base class in
- /// inheritance graph order. Used for determining the primary base class.
- const CXXRecordDecl *FirstNearlyEmptyVBase;
-
- /// VisitedVirtualBases - A set of all the visited virtual bases, used to
- /// avoid visiting virtual bases more than once.
- llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
-
- /// EmptyClassOffsets - A map from offsets to empty record decls.
- typedef std::multimap<uint64_t, const CXXRecordDecl *> EmptyClassOffsetsTy;
- EmptyClassOffsetsTy EmptyClassOffsets;
-
- ASTRecordLayoutBuilder(ASTContext &Ctx);
-
- void Layout(const RecordDecl *D);
- void Layout(const CXXRecordDecl *D);
- void Layout(const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl);
-
- void LayoutFields(const RecordDecl *D);
- void LayoutField(const FieldDecl *D);
- void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize);
- void LayoutBitField(const FieldDecl *D);
-
- /// DeterminePrimaryBase - Determine the primary base of the given class.
- void DeterminePrimaryBase(const CXXRecordDecl *RD);
-
- void SelectPrimaryVBase(const CXXRecordDecl *RD);
-
- /// IdentifyPrimaryBases - Identify all virtual base classes, direct or
- /// indirect, that are primary base classes for some other direct or indirect
- /// base class.
- void IdentifyPrimaryBases(const CXXRecordDecl *RD);
-
- bool IsNearlyEmpty(const CXXRecordDecl *RD) const;
-
- /// LayoutNonVirtualBases - Determines the primary base class (if any) and
- /// lays it out. Will then proceed to lay out all non-virtual base clasess.
- void LayoutNonVirtualBases(const CXXRecordDecl *RD);
-
- /// LayoutNonVirtualBase - Lays out a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *RD);
-
- void AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
- const CXXRecordDecl *MostDerivedClass);
-
- /// LayoutVirtualBases - Lays out all the virtual bases.
- void LayoutVirtualBases(const CXXRecordDecl *RD,
- const CXXRecordDecl *MostDerivedClass);
-
- /// LayoutVirtualBase - Lays out a single virtual base.
- void LayoutVirtualBase(const CXXRecordDecl *RD);
-
- /// LayoutBase - Will lay out a base and return the offset where it was
- /// placed, in bits.
- uint64_t LayoutBase(const CXXRecordDecl *RD);
-
- /// canPlaceRecordAtOffset - Return whether a record (either a base class
- /// or a field) can be placed at the given offset.
- /// Returns false if placing the record will result in two components
- /// (direct or indirect) of the same type having the same offset.
- bool canPlaceRecordAtOffset(const CXXRecordDecl *RD, uint64_t Offset) const;
-
- /// canPlaceFieldAtOffset - Return whether a field can be placed at the given
- /// offset.
- bool canPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) const;
-
- /// UpdateEmptyClassOffsets - Called after a record (either a base class
- /// or a field) has been placed at the given offset. Will update the
- /// EmptyClassOffsets map if the class is empty or has any empty bases or
- /// fields.
- void UpdateEmptyClassOffsets(const CXXRecordDecl *RD, uint64_t Offset);
-
- /// UpdateEmptyClassOffsets - Called after a field has been placed at the
- /// given offset.
- void UpdateEmptyClassOffsets(const FieldDecl *FD, uint64_t Offset);
-
- /// FinishLayout - Finalize record layout. Adjust record size based on the
- /// alignment.
- void FinishLayout();
-
- void UpdateAlignment(unsigned NewAlignment);
-
- ASTRecordLayoutBuilder(const ASTRecordLayoutBuilder&); // DO NOT IMPLEMENT
- void operator=(const ASTRecordLayoutBuilder&); // DO NOT IMPLEMENT
-public:
- static const ASTRecordLayout *ComputeLayout(ASTContext &Ctx,
- const RecordDecl *RD);
- static const ASTRecordLayout *ComputeLayout(ASTContext &Ctx,
- const ObjCInterfaceDecl *D,
- const ObjCImplementationDecl *Impl);
- static const CXXMethodDecl *ComputeKeyFunction(const CXXRecordDecl *RD);
-};
-
-} // end namespace clang
-
-#endif
-
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 67fd74c288c7..80f5695e42ad 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -27,7 +27,7 @@ static struct StmtClassNameTable {
const char *Name;
unsigned Counter;
unsigned Size;
-} StmtClassInfo[Stmt::lastExprConstant+1];
+} StmtClassInfo[Stmt::lastStmtConstant+1];
static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
static bool Initialized = false;
@@ -36,11 +36,11 @@ static StmtClassNameTable &getStmtInfoTableEntry(Stmt::StmtClass E) {
// Intialize the table on the first use.
Initialized = true;
-#define ABSTRACT_EXPR(CLASS, PARENT)
+#define ABSTRACT_STMT(STMT)
#define STMT(CLASS, PARENT) \
StmtClassInfo[(unsigned)Stmt::CLASS##Class].Name = #CLASS; \
StmtClassInfo[(unsigned)Stmt::CLASS##Class].Size = sizeof(CLASS);
-#include "clang/AST/StmtNodes.def"
+#include "clang/AST/StmtNodes.inc"
return StmtClassInfo[E];
}
@@ -55,13 +55,13 @@ void Stmt::PrintStats() {
unsigned sum = 0;
fprintf(stderr, "*** Stmt/Expr Stats:\n");
- for (int i = 0; i != Stmt::lastExprConstant+1; i++) {
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
if (StmtClassInfo[i].Name == 0) continue;
sum += StmtClassInfo[i].Counter;
}
fprintf(stderr, " %d stmts/exprs total.\n", sum);
sum = 0;
- for (int i = 0; i != Stmt::lastExprConstant+1; i++) {
+ for (int i = 0; i != Stmt::lastStmtConstant+1; i++) {
if (StmtClassInfo[i].Name == 0) continue;
if (StmtClassInfo[i].Counter == 0) continue;
fprintf(stderr, " %d %s, %d each (%d bytes)\n",
diff --git a/lib/AST/StmtDumper.cpp b/lib/AST/StmtDumper.cpp
index a11e313fa8ae..b388a3b18ffa 100644
--- a/lib/AST/StmtDumper.cpp
+++ b/lib/AST/StmtDumper.cpp
@@ -66,8 +66,8 @@ namespace {
DumpSubTree(*CI++);
}
}
- OS << ')';
}
+ OS << ')';
} else {
Indent();
OS << "<<<NULL>>>";
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 52f627d449e2..9bef49c3984d 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -89,7 +89,7 @@ namespace {
void VisitStmt(Stmt *Node);
#define STMT(CLASS, PARENT) \
void Visit##CLASS(CLASS *Node);
-#include "clang/AST/StmtNodes.def"
+#include "clang/AST/StmtNodes.inc"
};
}
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index d45bb2f010c0..ac3a9eeda66f 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -36,7 +36,7 @@ namespace {
void VisitStmt(Stmt *S);
#define STMT(Node, Base) void Visit##Node(Node *S);
-#include "clang/AST/StmtNodes.def"
+#include "clang/AST/StmtNodes.inc"
/// \brief Visit a declaration that is referenced within an expression
/// or statement.
@@ -430,7 +430,215 @@ void StmtProfiler::VisitBlockDeclRefExpr(BlockDeclRefExpr *S) {
ID.AddBoolean(S->isConstQualAdded());
}
+static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S,
+ UnaryOperator::Opcode &UnaryOp,
+ BinaryOperator::Opcode &BinaryOp) {
+ switch (S->getOperator()) {
+ case OO_None:
+ case OO_New:
+ case OO_Delete:
+ case OO_Array_New:
+ case OO_Array_Delete:
+ case OO_Arrow:
+ case OO_Call:
+ case OO_Conditional:
+ case NUM_OVERLOADED_OPERATORS:
+ llvm_unreachable("Invalid operator call kind");
+ return Stmt::ArraySubscriptExprClass;
+
+ case OO_Plus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UnaryOperator::Plus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BinaryOperator::Add;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Minus:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UnaryOperator::Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BinaryOperator::Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Star:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UnaryOperator::Minus;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BinaryOperator::Sub;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Slash:
+ BinaryOp = BinaryOperator::Div;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Percent:
+ BinaryOp = BinaryOperator::Rem;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Caret:
+ BinaryOp = BinaryOperator::Xor;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Amp:
+ if (S->getNumArgs() == 1) {
+ UnaryOp = UnaryOperator::AddrOf;
+ return Stmt::UnaryOperatorClass;
+ }
+
+ BinaryOp = BinaryOperator::And;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Pipe:
+ BinaryOp = BinaryOperator::Or;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Tilde:
+ UnaryOp = UnaryOperator::Not;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Exclaim:
+ UnaryOp = UnaryOperator::LNot;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Equal:
+ BinaryOp = BinaryOperator::Assign;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Less:
+ BinaryOp = BinaryOperator::LT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Greater:
+ BinaryOp = BinaryOperator::GT;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusEqual:
+ BinaryOp = BinaryOperator::AddAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_MinusEqual:
+ BinaryOp = BinaryOperator::SubAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_StarEqual:
+ BinaryOp = BinaryOperator::MulAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_SlashEqual:
+ BinaryOp = BinaryOperator::DivAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PercentEqual:
+ BinaryOp = BinaryOperator::RemAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_CaretEqual:
+ BinaryOp = BinaryOperator::XorAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_AmpEqual:
+ BinaryOp = BinaryOperator::AndAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_PipeEqual:
+ BinaryOp = BinaryOperator::OrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_LessLess:
+ BinaryOp = BinaryOperator::Shl;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterGreater:
+ BinaryOp = BinaryOperator::Shr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessLessEqual:
+ BinaryOp = BinaryOperator::ShlAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_GreaterGreaterEqual:
+ BinaryOp = BinaryOperator::ShrAssign;
+ return Stmt::CompoundAssignOperatorClass;
+
+ case OO_EqualEqual:
+ BinaryOp = BinaryOperator::EQ;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_ExclaimEqual:
+ BinaryOp = BinaryOperator::NE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_LessEqual:
+ BinaryOp = BinaryOperator::LE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_GreaterEqual:
+ BinaryOp = BinaryOperator::GE;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_AmpAmp:
+ BinaryOp = BinaryOperator::LAnd;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PipePipe:
+ BinaryOp = BinaryOperator::LOr;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_PlusPlus:
+ UnaryOp = S->getNumArgs() == 1? UnaryOperator::PreInc
+ : UnaryOperator::PostInc;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_MinusMinus:
+ UnaryOp = S->getNumArgs() == 1? UnaryOperator::PreDec
+ : UnaryOperator::PostDec;
+ return Stmt::UnaryOperatorClass;
+
+ case OO_Comma:
+ BinaryOp = BinaryOperator::Comma;
+ return Stmt::BinaryOperatorClass;
+
+
+ case OO_ArrowStar:
+ BinaryOp = BinaryOperator::PtrMemI;
+ return Stmt::BinaryOperatorClass;
+
+ case OO_Subscript:
+ return Stmt::ArraySubscriptExprClass;
+ }
+
+ llvm_unreachable("Invalid overloaded operator expression");
+}
+
+
void StmtProfiler::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *S) {
+ if (S->isTypeDependent()) {
+ // Type-dependent operator calls are profiled like their underlying
+ // syntactic operator.
+ UnaryOperator::Opcode UnaryOp = UnaryOperator::Extension;
+ BinaryOperator::Opcode BinaryOp = BinaryOperator::Comma;
+ Stmt::StmtClass SC = DecodeOperatorCall(S, UnaryOp, BinaryOp);
+
+ ID.AddInteger(SC);
+ for (unsigned I = 0, N = S->getNumArgs(); I != N; ++I)
+ Visit(S->getArg(I));
+ if (SC == Stmt::UnaryOperatorClass)
+ ID.AddInteger(UnaryOp);
+ else if (SC == Stmt::BinaryOperatorClass ||
+ SC == Stmt::CompoundAssignOperatorClass)
+ ID.AddInteger(BinaryOp);
+ else
+ assert(SC == Stmt::ArraySubscriptExprClass);
+
+ return;
+ }
+
VisitCallExpr(S);
ID.AddInteger(S->getOperator());
}
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index e9b17256415f..1c775efe5777 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/TypeLoc.h"
+#include "clang/Basic/Diagnostic.h"
using namespace clang;
@@ -102,7 +103,7 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
return getSourceDeclExpression()->getSourceRange();
case TemplateArgument::Type:
- return getTypeSourceInfo()->getTypeLoc().getFullSourceRange();
+ return getTypeSourceInfo()->getTypeLoc().getSourceRange();
case TemplateArgument::Template:
if (getTemplateQualifierRange().isValid())
@@ -119,3 +120,42 @@ SourceRange TemplateArgumentLoc::getSourceRange() const {
// Silence bonus gcc warning.
return SourceRange();
}
+
+const DiagnosticBuilder &clang::operator<<(const DiagnosticBuilder &DB,
+ const TemplateArgument &Arg) {
+ switch (Arg.getKind()) {
+ case TemplateArgument::Null:
+ return DB;
+
+ case TemplateArgument::Type:
+ return DB << Arg.getAsType();
+
+ case TemplateArgument::Declaration:
+ return DB << Arg.getAsDecl();
+
+ case TemplateArgument::Integral:
+ return DB << Arg.getAsIntegral()->toString(10);
+
+ case TemplateArgument::Template:
+ return DB << Arg.getAsTemplate();
+
+ case TemplateArgument::Expression: {
+ // This shouldn't actually ever happen, so it's okay that we're
+ // regurgitating an expression here.
+ // FIXME: We're guessing at LangOptions!
+ llvm::SmallString<32> Str;
+ llvm::raw_svector_ostream OS(Str);
+ LangOptions LangOpts;
+ LangOpts.CPlusPlus = true;
+ PrintingPolicy Policy(LangOpts);
+ Arg.getAsExpr()->printPretty(OS, 0, Policy);
+ return DB << OS.str();
+ }
+
+ case TemplateArgument::Pack:
+ // FIXME: Format arguments in a list!
+ return DB << "<parameter pack>";
+ }
+
+ return DB;
+}
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 05e7fdc49e35..1aab65ebec55 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -276,6 +277,9 @@ QualType Type::getPointeeType() const {
/// array types and types that contain variable array types in their
/// declarator
bool Type::isVariablyModifiedType() const {
+ // FIXME: We should really keep a "variably modified" bit in Type, rather
+ // than walking the type hierarchy to recompute it.
+
// A VLA is a variably modified type.
if (isVariableArrayType())
return true;
@@ -344,29 +348,36 @@ const RecordType *Type::getAsUnionType() const {
return 0;
}
-ObjCInterfaceType::ObjCInterfaceType(QualType Canonical,
- ObjCInterfaceDecl *D,
- ObjCProtocolDecl **Protos, unsigned NumP) :
- Type(ObjCInterface, Canonical, /*Dependent=*/false),
- Decl(D), NumProtocols(NumP)
-{
+void ObjCInterfaceType::Destroy(ASTContext& C) {
+ this->~ObjCInterfaceType();
+ C.Deallocate(this);
+}
+
+ObjCObjectType::ObjCObjectType(QualType Canonical, QualType Base,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols)
+ : Type(ObjCObject, Canonical, false),
+ NumProtocols(NumProtocols),
+ BaseType(Base) {
+ assert(this->NumProtocols == NumProtocols &&
+ "bitfield overflow in protocol count");
if (NumProtocols)
- memcpy(reinterpret_cast<ObjCProtocolDecl**>(this + 1), Protos,
- NumProtocols * sizeof(*Protos));
+ memcpy(getProtocolStorage(), Protocols,
+ NumProtocols * sizeof(ObjCProtocolDecl*));
}
-void ObjCInterfaceType::Destroy(ASTContext& C) {
- this->~ObjCInterfaceType();
+void ObjCObjectTypeImpl::Destroy(ASTContext& C) {
+ this->~ObjCObjectTypeImpl();
C.Deallocate(this);
}
-const ObjCInterfaceType *Type::getAsObjCQualifiedInterfaceType() const {
- // There is no sugar for ObjCInterfaceType's, just return the canonical
+const ObjCObjectType *Type::getAsObjCQualifiedInterfaceType() const {
+ // There is no sugar for ObjCObjectType's, just return the canonical
// type pointer if it is the right class. There is no typedef information to
// return and these cannot be Address-space qualified.
- if (const ObjCInterfaceType *OIT = getAs<ObjCInterfaceType>())
- if (OIT->getNumProtocols())
- return OIT;
+ if (const ObjCObjectType *T = getAs<ObjCObjectType>())
+ if (T->getNumProtocols() && T->getInterface())
+ return T;
return 0;
}
@@ -374,17 +385,6 @@ bool Type::isObjCQualifiedInterfaceType() const {
return getAsObjCQualifiedInterfaceType() != 0;
}
-ObjCObjectPointerType::ObjCObjectPointerType(QualType Canonical, QualType T,
- ObjCProtocolDecl **Protos,
- unsigned NumP) :
- Type(ObjCObjectPointer, Canonical, /*Dependent=*/false),
- PointeeType(T), NumProtocols(NumP)
-{
- if (NumProtocols)
- memcpy(reinterpret_cast<ObjCProtocolDecl **>(this + 1), Protos,
- NumProtocols * sizeof(*Protos));
-}
-
void ObjCObjectPointerType::Destroy(ASTContext& C) {
this->~ObjCObjectPointerType();
C.Deallocate(this);
@@ -637,6 +637,8 @@ bool Type::isIncompleteType() const {
case IncompleteArray:
// An array of unknown size is an incomplete type (C99 6.2.5p22).
return true;
+ case ObjCObject:
+ return cast<ObjCObjectType>(this)->getBaseType()->isIncompleteType();
case ObjCInterface:
// ObjC interfaces are incomplete if they are @class, not @interface.
return cast<ObjCInterfaceType>(this)->getDecl()->isForwardDecl();
@@ -764,36 +766,106 @@ bool Type::isSpecifierType() const {
case TemplateTypeParm:
case SubstTemplateTypeParm:
case TemplateSpecialization:
- case QualifiedName:
+ case Elaborated:
case DependentName:
case ObjCInterface:
- case ObjCObjectPointer:
- case Elaborated:
+ case ObjCObject:
+ case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
return true;
default:
return false;
}
}
-bool Type::isElaboratedTypeSpecifier() const {
- if (getTypeClass() == Elaborated)
+TypeWithKeyword::~TypeWithKeyword() {
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTypeSpec(unsigned TypeSpec) {
+ switch (TypeSpec) {
+ default: return ETK_None;
+ case TST_typename: return ETK_Typename;
+ case TST_class: return ETK_Class;
+ case TST_struct: return ETK_Struct;
+ case TST_union: return ETK_Union;
+ case TST_enum: return ETK_Enum;
+ }
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForTypeSpec(unsigned TypeSpec) {
+ switch(TypeSpec) {
+ case TST_class: return TTK_Class;
+ case TST_struct: return TTK_Struct;
+ case TST_union: return TTK_Union;
+ case TST_enum: return TTK_Enum;
+ default: llvm_unreachable("Type specifier is not a tag type kind.");
+ }
+}
+
+ElaboratedTypeKeyword
+TypeWithKeyword::getKeywordForTagTypeKind(TagTypeKind Kind) {
+ switch (Kind) {
+ case TTK_Class: return ETK_Class;
+ case TTK_Struct: return ETK_Struct;
+ case TTK_Union: return ETK_Union;
+ case TTK_Enum: return ETK_Enum;
+ }
+ llvm_unreachable("Unknown tag type kind.");
+}
+
+TagTypeKind
+TypeWithKeyword::getTagTypeKindForKeyword(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_Class: return TTK_Class;
+ case ETK_Struct: return TTK_Struct;
+ case ETK_Union: return TTK_Union;
+ case ETK_Enum: return TTK_Enum;
+ case ETK_None: // Fall through.
+ case ETK_Typename:
+ llvm_unreachable("Elaborated type keyword is not a tag type kind.");
+ }
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+bool
+TypeWithKeyword::KeywordIsTagTypeKind(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ case ETK_None:
+ case ETK_Typename:
+ return false;
+ case ETK_Class:
+ case ETK_Struct:
+ case ETK_Union:
+ case ETK_Enum:
return true;
-
- if (const DependentNameType *Dependent = dyn_cast<DependentNameType>(this)) {
- switch (Dependent->getKeyword()) {
- case ETK_None:
- case ETK_Typename:
- return false;
-
- case ETK_Class:
- case ETK_Struct:
- case ETK_Union:
- case ETK_Enum:
- return true;
- }
}
-
- return false;
+ llvm_unreachable("Unknown elaborated type keyword.");
+}
+
+const char*
+TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
+ switch (Keyword) {
+ default: llvm_unreachable("Unknown elaborated type keyword.");
+ case ETK_None: return "";
+ case ETK_Typename: return "typename";
+ case ETK_Class: return "class";
+ case ETK_Struct: return "struct";
+ case ETK_Union: return "union";
+ case ETK_Enum: return "enum";
+ }
+}
+
+bool Type::isElaboratedTypeSpecifier() const {
+ ElaboratedTypeKeyword Keyword;
+ if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
+ Keyword = Elab->getKeyword();
+ else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
+ Keyword = DepName->getKeyword();
+ else
+ return false;
+
+ return TypeWithKeyword::KeywordIsTagTypeKind(Keyword);
}
const char *Type::getTypeClassName() const {
@@ -836,10 +908,12 @@ const char *BuiltinType::getName(const LangOptions &LO) const {
case UndeducedAuto: return "auto";
case ObjCId: return "id";
case ObjCClass: return "Class";
- case ObjCSel: return "SEL";
+ case ObjCSel: return "SEL";
}
}
+void FunctionType::ANCHOR() {} // Key function for FunctionType.
+
llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) {
switch (CC) {
case CC_Default: llvm_unreachable("no name for default cc");
@@ -848,6 +922,7 @@ llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_C: return "cdecl";
case CC_X86StdCall: return "stdcall";
case CC_X86FastCall: return "fastcall";
+ case CC_X86ThisCall: return "thiscall";
}
}
@@ -881,19 +956,6 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID) {
getExtInfo());
}
-void ObjCObjectPointerType::Profile(llvm::FoldingSetNodeID &ID,
- QualType OIT,
- ObjCProtocolDecl * const *protocols,
- unsigned NumProtocols) {
- ID.AddPointer(OIT.getAsOpaquePtr());
- for (unsigned i = 0; i != NumProtocols; i++)
- ID.AddPointer(protocols[i]);
-}
-
-void ObjCObjectPointerType::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getPointeeType(), qual_begin(), getNumProtocols());
-}
-
/// LookThroughTypedefs - Return the ultimate type this typedef corresponds to
/// potentially looking through *all* consequtive typedefs. This returns the
/// sum of the type qualifiers, so if you have:
@@ -975,6 +1037,10 @@ static bool isDependent(const TemplateArgument &Arg) {
return Arg.getAsTemplate().isDependent();
case TemplateArgument::Declaration:
+ if (DeclContext *DC = dyn_cast<DeclContext>(Arg.getAsDecl()))
+ return DC->isDependentContext();
+ return Arg.getAsDecl()->getDeclContext()->isDependentContext();
+
case TemplateArgument::Integral:
// Never dependent
return false;
@@ -984,7 +1050,13 @@ static bool isDependent(const TemplateArgument &Arg) {
Arg.getAsExpr()->isValueDependent());
case TemplateArgument::Pack:
- assert(0 && "FIXME: Implement!");
+ for (TemplateArgument::pack_iterator P = Arg.pack_begin(),
+ PEnd = Arg.pack_end();
+ P != PEnd; ++P) {
+ if (isDependent(*P))
+ return true;
+ }
+
return false;
}
@@ -1081,36 +1153,53 @@ QualType QualifierCollector::apply(const Type *T) const {
return Context->getQualifiedType(T, *this);
}
-void ObjCInterfaceType::Profile(llvm::FoldingSetNodeID &ID,
- const ObjCInterfaceDecl *Decl,
- ObjCProtocolDecl * const *protocols,
- unsigned NumProtocols) {
- ID.AddPointer(Decl);
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID,
+ QualType BaseType,
+ ObjCProtocolDecl * const *Protocols,
+ unsigned NumProtocols) {
+ ID.AddPointer(BaseType.getAsOpaquePtr());
for (unsigned i = 0; i != NumProtocols; i++)
- ID.AddPointer(protocols[i]);
+ ID.AddPointer(Protocols[i]);
}
-void ObjCInterfaceType::Profile(llvm::FoldingSetNodeID &ID) {
- Profile(ID, getDecl(), qual_begin(), getNumProtocols());
+void ObjCObjectTypeImpl::Profile(llvm::FoldingSetNodeID &ID) {
+ Profile(ID, getBaseType(), qual_begin(), getNumProtocols());
}
-Linkage Type::getLinkage() const {
- // C++ [basic.link]p8:
- // Names not covered by these rules have no linkage.
+/// \brief Determine the linkage of this type.
+Linkage Type::getLinkage() const {
if (this != CanonicalType.getTypePtr())
return CanonicalType->getLinkage();
+
+ if (!LinkageKnown) {
+ CachedLinkage = getLinkageImpl();
+ LinkageKnown = true;
+ }
+
+ return static_cast<clang::Linkage>(CachedLinkage);
+}
+Linkage Type::getLinkageImpl() const {
+ // C++ [basic.link]p8:
+ // Names not covered by these rules have no linkage.
return NoLinkage;
}
-Linkage BuiltinType::getLinkage() const {
+void Type::ClearLinkageCache() {
+ if (this != CanonicalType.getTypePtr())
+ CanonicalType->ClearLinkageCache();
+ else
+ LinkageKnown = false;
+}
+
+Linkage BuiltinType::getLinkageImpl() const {
// C++ [basic.link]p8:
// A type is said to have linkage if and only if:
// - it is a fundamental type (3.9.1); or
return ExternalLinkage;
}
-Linkage TagType::getLinkage() const {
+Linkage TagType::getLinkageImpl() const {
// C++ [basic.link]p8:
// - it is a class or enumeration type that is named (or has a name for
// linkage purposes (7.1.3)) and the name has linkage; or
@@ -1121,39 +1210,39 @@ Linkage TagType::getLinkage() const {
// C++ [basic.link]p8:
// - it is a compound type (3.9.2) other than a class or enumeration,
// compounded exclusively from types that have linkage; or
-Linkage ComplexType::getLinkage() const {
+Linkage ComplexType::getLinkageImpl() const {
return ElementType->getLinkage();
}
-Linkage PointerType::getLinkage() const {
+Linkage PointerType::getLinkageImpl() const {
return PointeeType->getLinkage();
}
-Linkage BlockPointerType::getLinkage() const {
+Linkage BlockPointerType::getLinkageImpl() const {
return PointeeType->getLinkage();
}
-Linkage ReferenceType::getLinkage() const {
+Linkage ReferenceType::getLinkageImpl() const {
return PointeeType->getLinkage();
}
-Linkage MemberPointerType::getLinkage() const {
+Linkage MemberPointerType::getLinkageImpl() const {
return minLinkage(Class->getLinkage(), PointeeType->getLinkage());
}
-Linkage ArrayType::getLinkage() const {
+Linkage ArrayType::getLinkageImpl() const {
return ElementType->getLinkage();
}
-Linkage VectorType::getLinkage() const {
+Linkage VectorType::getLinkageImpl() const {
return ElementType->getLinkage();
}
-Linkage FunctionNoProtoType::getLinkage() const {
+Linkage FunctionNoProtoType::getLinkageImpl() const {
return getResultType()->getLinkage();
}
-Linkage FunctionProtoType::getLinkage() const {
+Linkage FunctionProtoType::getLinkageImpl() const {
Linkage L = getResultType()->getLinkage();
for (arg_type_iterator A = arg_type_begin(), AEnd = arg_type_end();
A != AEnd; ++A)
@@ -1162,10 +1251,10 @@ Linkage FunctionProtoType::getLinkage() const {
return L;
}
-Linkage ObjCInterfaceType::getLinkage() const {
+Linkage ObjCObjectType::getLinkageImpl() const {
return ExternalLinkage;
}
-Linkage ObjCObjectPointerType::getLinkage() const {
+Linkage ObjCObjectPointerType::getLinkageImpl() const {
return ExternalLinkage;
}
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index fd9fbc191868..4893b384dd10 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -27,13 +27,13 @@ namespace {
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
SourceRange Visit##CLASS##TypeLoc(CLASS##TypeLoc TyLoc) { \
- return TyLoc.getSourceRange(); \
+ return TyLoc.getLocalSourceRange(); \
}
#include "clang/AST/TypeLocNodes.def"
};
}
-SourceRange TypeLoc::getSourceRangeImpl(TypeLoc TL) {
+SourceRange TypeLoc::getLocalSourceRangeImpl(TypeLoc TL) {
if (TL.isNull()) return SourceRange();
return TypeLocRanger().Visit(TL);
}
@@ -92,11 +92,58 @@ namespace {
/// recursively, as if the entire tree had been written in the
/// given location.
void TypeLoc::initializeImpl(TypeLoc TL, SourceLocation Loc) {
- do {
- TypeLocInitializer(Loc).Visit(TL);
- } while ((TL = TL.getNextTypeLoc()));
+ while (true) {
+ switch (TL.getTypeLocClass()) {
+#define ABSTRACT_TYPELOC(CLASS, PARENT)
+#define TYPELOC(CLASS, PARENT) \
+ case CLASS: { \
+ CLASS##TypeLoc TLCasted = cast<CLASS##TypeLoc>(TL); \
+ TLCasted.initializeLocal(Loc); \
+ TL = TLCasted.getNextTypeLoc(); \
+ if (!TL) return; \
+ continue; \
+ }
+#include "clang/AST/TypeLocNodes.def"
+ }
+ }
+}
+
+SourceLocation TypeLoc::getBeginLoc() const {
+ TypeLoc Cur = *this;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ // FIXME: Currently QualifiedTypeLoc does not have a source range
+ // case Qualified:
+ case Elaborated:
+ break;
+ default:
+ TypeLoc Next = Cur.getNextTypeLoc();
+ if (Next.isNull()) break;
+ Cur = Next;
+ continue;
+ }
+ break;
+ }
+ return Cur.getLocalSourceRange().getBegin();
}
+SourceLocation TypeLoc::getEndLoc() const {
+ TypeLoc Cur = *this;
+ while (true) {
+ switch (Cur.getTypeLocClass()) {
+ default:
+ break;
+ case Qualified:
+ case Elaborated:
+ Cur = Cur.getNextTypeLoc();
+ continue;
+ }
+ break;
+ }
+ return Cur.getLocalSourceRange().getEnd();
+}
+
+
namespace {
struct TSTChecker : public TypeLocVisitor<TSTChecker, bool> {
// Overload resolution does the real work for us.
@@ -129,7 +176,7 @@ bool TypeSpecTypeLoc::classof(const TypeLoc *TL) {
// Reimplemented to account for GNU/C++ extension
// typeof unary-expression
// where there are no parentheses.
-SourceRange TypeOfExprTypeLoc::getSourceRange() const {
+SourceRange TypeOfExprTypeLoc::getLocalSourceRange() const {
if (getRParenLoc().isValid())
return SourceRange(getTypeofLoc(), getRParenLoc());
else
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 915d7af9a8c3..35a7e096994d 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -295,6 +295,9 @@ void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
case CC_X86FastCall:
S += " __attribute__((fastcall))";
break;
+ case CC_X86ThisCall:
+ S += " __attribute__((thiscall))";
+ break;
}
if (Info.getNoReturn())
S += " __attribute__((noreturn))";
@@ -497,16 +500,6 @@ void TypePrinter::PrintEnum(const EnumType *T, std::string &S) {
PrintTag(T->getDecl(), S);
}
-void TypePrinter::PrintElaborated(const ElaboratedType *T, std::string &S) {
- Print(T->getUnderlyingType(), S);
-
- // We don't actually make these in C, but the language options
- // sometimes lie to us -- for example, if someone calls
- // QualType::getAsString(). Just suppress the redundant tag if so.
- if (Policy.LangOpts.CPlusPlus)
- S = std::string(T->getNameForTagKind(T->getTagKind())) + ' ' + S;
-}
-
void TypePrinter::PrintTemplateTypeParm(const TemplateTypeParmType *T,
std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'parmname X'.
@@ -549,13 +542,17 @@ void TypePrinter::PrintInjectedClassName(const InjectedClassNameType *T,
PrintTemplateSpecialization(T->getInjectedTST(), S);
}
-void TypePrinter::PrintQualifiedName(const QualifiedNameType *T,
- std::string &S) {
+void TypePrinter::PrintElaborated(const ElaboratedType *T, std::string &S) {
std::string MyString;
{
llvm::raw_string_ostream OS(MyString);
- T->getQualifier()->print(OS, Policy);
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+ NestedNameSpecifier* Qualifier = T->getQualifier();
+ if (Qualifier)
+ Qualifier->print(OS, Policy);
}
std::string TypeStr;
@@ -575,14 +572,9 @@ void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S)
{
llvm::raw_string_ostream OS(MyString);
- switch (T->getKeyword()) {
- case ETK_None: break;
- case ETK_Typename: OS << "typename "; break;
- case ETK_Class: OS << "class "; break;
- case ETK_Struct: OS << "struct "; break;
- case ETK_Union: OS << "union "; break;
- case ETK_Enum: OS << "enum "; break;
- }
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
T->getQualifier()->print(OS, Policy);
@@ -607,25 +599,37 @@ void TypePrinter::PrintObjCInterface(const ObjCInterfaceType *T,
std::string &S) {
if (!S.empty()) // Prefix the basic type, e.g. 'typedefname X'.
S = ' ' + S;
-
+
std::string ObjCQIString = T->getDecl()->getNameAsString();
- if (T->getNumProtocols()) {
- ObjCQIString += '<';
- bool isFirst = true;
- for (ObjCInterfaceType::qual_iterator I = T->qual_begin(),
- E = T->qual_end();
- I != E; ++I) {
- if (isFirst)
- isFirst = false;
- else
- ObjCQIString += ',';
- ObjCQIString += (*I)->getNameAsString();
- }
- ObjCQIString += '>';
- }
S = ObjCQIString + S;
}
+void TypePrinter::PrintObjCObject(const ObjCObjectType *T,
+ std::string &S) {
+ if (T->qual_empty())
+ return Print(T->getBaseType(), S);
+
+ std::string tmp;
+ Print(T->getBaseType(), tmp);
+ tmp += '<';
+ bool isFirst = true;
+ for (ObjCObjectType::qual_iterator
+ I = T->qual_begin(), E = T->qual_end(); I != E; ++I) {
+ if (isFirst)
+ isFirst = false;
+ else
+ tmp += ',';
+ tmp += (*I)->getNameAsString();
+ }
+ tmp += '>';
+
+ if (!S.empty()) {
+ tmp += ' ';
+ tmp += S;
+ }
+ std::swap(tmp, S);
+}
+
void TypePrinter::PrintObjCObjectPointer(const ObjCObjectPointerType *T,
std::string &S) {
std::string ObjCQIString;
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 7f71e0acf7a3..6f2cb41a6e40 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -985,6 +985,11 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
} else
LoopSuccessor = Succ;
+ // Save the current value for the break targets.
+ // All breaks should go to the code following the loop.
+ SaveAndRestore<CFGBlock*> save_break(BreakTargetBlock);
+ BreakTargetBlock = LoopSuccessor;
+
// Because of short-circuit evaluation, the condition of the loop can span
// multiple basic blocks. Thus we need the "Entry" and "Exit" blocks that
// evaluate the condition.
@@ -1032,10 +1037,9 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
{
assert(F->getBody());
- // Save the current values for Block, Succ, and continue and break targets
- SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
- save_continue(ContinueTargetBlock),
- save_break(BreakTargetBlock);
+ // Save the current values for Block, Succ, and continue targets.
+ SaveAndRestore<CFGBlock*> save_Block(Block), save_Succ(Succ),
+ save_continue(ContinueTargetBlock);
// Create a new block to contain the (bottom) of the loop body.
Block = NULL;
@@ -1065,9 +1069,6 @@ CFGBlock* CFGBuilder::VisitForStmt(ForStmt* F) {
// represent the 'loop target' for looping back to the start of the loop.
ContinueTargetBlock->setLoopTarget(F);
- // All breaks should go to the code following the loop.
- BreakTargetBlock = LoopSuccessor;
-
// Now populate the body block, and in the process create new blocks as we
// walk the body of the loop.
CFGBlock* BodyBlock = addStmt(F->getBody());
@@ -1223,10 +1224,9 @@ CFGBlock* CFGBuilder::VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt* S) {
return 0;
Block = 0;
+ Succ = SyncBlock;
}
- Succ = SyncBlock;
-
// Inline the sync expression.
return addStmt(S->getSynchExpr());
}
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index b4e0e242485b..a8e37087c09c 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -9,4 +9,4 @@ add_clang_library(clangAnalysis
UninitializedValues.cpp
)
-add_dependencies(clangAnalysis ClangDiagnosticAnalysis)
+add_dependencies(clangAnalysis ClangDiagnosticAnalysis ClangStmtNodes)
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 9e6f168f1711..2fd985f53803 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -127,6 +127,35 @@ const char *Diagnostic::getWarningOptionForDiag(unsigned DiagID) {
return 0;
}
+/// getWarningOptionForDiag - Return the category number that a specified
+/// DiagID belongs to, or 0 if no category.
+unsigned Diagnostic::getCategoryNumberForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return Info->Category;
+ return 0;
+}
+
+/// getCategoryNameFromID - Given a category ID, return the name of the
+/// category, an empty string if CategoryID is zero, or null if CategoryID is
+/// invalid.
+const char *Diagnostic::getCategoryNameFromID(unsigned CategoryID) {
+ // Second the table of options, sorted by name for fast binary lookup.
+ static const char *CategoryNameTable[] = {
+#define GET_CATEGORY_TABLE
+#define CATEGORY(X) X,
+#include "clang/Basic/DiagnosticGroups.inc"
+#undef GET_CATEGORY_TABLE
+ "<<END>>"
+ };
+ static const size_t CategoryNameTableSize =
+ sizeof(CategoryNameTable) / sizeof(CategoryNameTable[0])-1;
+
+ if (CategoryID >= CategoryNameTableSize) return 0;
+ return CategoryNameTable[CategoryID];
+}
+
+
+
Diagnostic::SFINAEResponse
Diagnostic::getDiagnosticSFINAEResponse(unsigned DiagID) {
if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) {
@@ -432,7 +461,7 @@ Diagnostic::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass) const {
struct WarningOption {
const char *Name;
const short *Members;
- const char *SubGroups;
+ const short *SubGroups;
};
#define GET_DIAG_ARRAYS
@@ -462,9 +491,9 @@ static void MapGroupMembers(const WarningOption *Group, diag::Mapping Mapping,
}
// Enable/disable all subgroups along with this one.
- if (const char *SubGroups = Group->SubGroups) {
- for (; *SubGroups != (char)-1; ++SubGroups)
- MapGroupMembers(&OptionTable[(unsigned char)*SubGroups], Mapping, Diags);
+ if (const short *SubGroups = Group->SubGroups) {
+ for (; *SubGroups != (short)-1; ++SubGroups)
+ MapGroupMembers(&OptionTable[(short)*SubGroups], Mapping, Diags);
}
}
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index ed0de8c4af09..8993e6713fbe 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -70,7 +70,8 @@ namespace {
KEYGNU = 16,
KEYMS = 32,
BOOLSUPPORT = 64,
- KEYALTIVEC = 128
+ KEYALTIVEC = 128,
+ KEYNOMS = 256
};
}
@@ -94,6 +95,7 @@ static void AddKeyword(llvm::StringRef Keyword,
else if (LangOpts.Microsoft && (Flags & KEYMS)) AddResult = 1;
else if (LangOpts.Bool && (Flags & BOOLSUPPORT)) AddResult = 2;
else if (LangOpts.AltiVec && (Flags & KEYALTIVEC)) AddResult = 2;
+ else if (!LangOpts.Microsoft && (Flags & KEYNOMS)) AddResult = 2;
// Don't add this keyword if disabled in this language.
if (AddResult == 0) return;
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 3ecab1d8c16f..e6d9785e1505 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -1154,6 +1154,27 @@ SourceLocation SourceManager::getLocation(const FileEntry *SourceFile,
return getLocForStartOfFile(FirstFID).getFileLocWithOffset(FilePos + Col - 1);
}
+/// Given a decomposed source location, move it up the include/instantiation
+/// stack to the parent source location. If this is possible, return the
+/// decomposed version of the parent in Loc and return false. If Loc is the
+/// top-level entry, return true and don't modify it.
+static bool MoveUpIncludeHierarchy(std::pair<FileID, unsigned> &Loc,
+ const SourceManager &SM) {
+ SourceLocation UpperLoc;
+ const SrcMgr::SLocEntry &Entry = SM.getSLocEntry(Loc.first);
+ if (Entry.isInstantiation())
+ UpperLoc = Entry.getInstantiation().getInstantiationLocStart();
+ else
+ UpperLoc = Entry.getFile().getIncludeLoc();
+
+ if (UpperLoc.isInvalid())
+ return true; // We reached the top.
+
+ Loc = SM.getDecomposedLoc(UpperLoc);
+ return false;
+}
+
+
/// \brief Determines the order of 2 source locations in the translation unit.
///
/// \returns true if LHS source location comes before RHS, false otherwise.
@@ -1172,78 +1193,74 @@ bool SourceManager::isBeforeInTranslationUnit(SourceLocation LHS,
// If we are comparing a source location with multiple locations in the same
// file, we get a big win by caching the result.
+ if (IsBeforeInTUCache.isCacheValid(LOffs.first, ROffs.first))
+ return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
- if (LastLFIDForBeforeTUCheck == LOffs.first &&
- LastRFIDForBeforeTUCheck == ROffs.first)
- return LastResForBeforeTUCheck;
-
- LastLFIDForBeforeTUCheck = LOffs.first;
- LastRFIDForBeforeTUCheck = ROffs.first;
+ // Okay, we missed in the cache, start updating the cache for this query.
+ IsBeforeInTUCache.setQueryFIDs(LOffs.first, ROffs.first);
// "Traverse" the include/instantiation stacks of both locations and try to
- // find a common "ancestor".
+ // find a common "ancestor". FileIDs build a tree-like structure that
+ // reflects the #include hierarchy, and this algorithm needs to find the
+ // nearest common ancestor between the two locations. For example, if you
+ // have a.c that includes b.h and c.h, and are comparing a location in b.h to
+ // a location in c.h, we need to find that their nearest common ancestor is
+ // a.c, and compare the locations of the two #includes to find their relative
+ // ordering.
//
- // First we traverse the stack of the right location and check each level
- // against the level of the left location, while collecting all levels in a
- // "stack map".
-
- std::map<FileID, unsigned> ROffsMap;
- ROffsMap[ROffs.first] = ROffs.second;
-
- while (1) {
- SourceLocation UpperLoc;
- const SrcMgr::SLocEntry &Entry = getSLocEntry(ROffs.first);
- if (Entry.isInstantiation())
- UpperLoc = Entry.getInstantiation().getInstantiationLocStart();
- else
- UpperLoc = Entry.getFile().getIncludeLoc();
-
- if (UpperLoc.isInvalid())
- break; // We reached the top.
-
- ROffs = getDecomposedLoc(UpperLoc);
-
- if (LOffs.first == ROffs.first)
- return LastResForBeforeTUCheck = LOffs.second < ROffs.second;
-
- ROffsMap[ROffs.first] = ROffs.second;
- }
-
- // We didn't find a common ancestor. Now traverse the stack of the left
- // location, checking against the stack map of the right location.
-
- while (1) {
- SourceLocation UpperLoc;
- const SrcMgr::SLocEntry &Entry = getSLocEntry(LOffs.first);
- if (Entry.isInstantiation())
- UpperLoc = Entry.getInstantiation().getInstantiationLocStart();
- else
- UpperLoc = Entry.getFile().getIncludeLoc();
-
- if (UpperLoc.isInvalid())
- break; // We reached the top.
-
- LOffs = getDecomposedLoc(UpperLoc);
+ // SourceManager assigns FileIDs in order of parsing. This means that an
+ // includee always has a larger FileID than an includer. While you might
+ // think that we could just compare the FileID's here, that doesn't work to
+ // compare a point at the end of a.c with a point within c.h. Though c.h has
+ // a larger FileID, we have to compare the include point of c.h to the
+ // location in a.c.
+ //
+ // Despite not being able to directly compare FileID's, we can tell that a
+ // larger FileID is necessarily more deeply nested than a lower one and use
+ // this information to walk up the tree to the nearest common ancestor.
+ do {
+ // If LOffs is larger than ROffs, then LOffs must be more deeply nested than
+ // ROffs, walk up the #include chain.
+ if (LOffs.first.ID > ROffs.first.ID) {
+ if (MoveUpIncludeHierarchy(LOffs, *this))
+ break; // We reached the top.
+
+ } else {
+ // Otherwise, ROffs is larger than LOffs, so ROffs must be more deeply
+ // nested than LOffs, walk up the #include chain.
+ if (MoveUpIncludeHierarchy(ROffs, *this))
+ break; // We reached the top.
+ }
+ } while (LOffs.first != ROffs.first);
- std::map<FileID, unsigned>::iterator I = ROffsMap.find(LOffs.first);
- if (I != ROffsMap.end())
- return LastResForBeforeTUCheck = LOffs.second < I->second;
+ // If we exited because we found a nearest common ancestor, compare the
+ // locations within the common file and cache them.
+ if (LOffs.first == ROffs.first) {
+ IsBeforeInTUCache.setCommonLoc(LOffs.first, LOffs.second, ROffs.second);
+ return IsBeforeInTUCache.getCachedResult(LOffs.second, ROffs.second);
}
// There is no common ancestor, most probably because one location is in the
- // predefines buffer.
- //
+ // predefines buffer or a PCH file.
// FIXME: We should rearrange the external interface so this simply never
// happens; it can't conceptually happen. Also see PR5662.
+ IsBeforeInTUCache.setQueryFIDs(FileID(), FileID()); // Don't try caching.
+ // Zip both entries up to the top level record.
+ while (!MoveUpIncludeHierarchy(LOffs, *this)) /*empty*/;
+ while (!MoveUpIncludeHierarchy(ROffs, *this)) /*empty*/;
+
// If exactly one location is a memory buffer, assume it preceeds the other.
- bool LIsMB = !getSLocEntry(LOffs.first).getFile().getContentCache()->Entry;
- bool RIsMB = !getSLocEntry(ROffs.first).getFile().getContentCache()->Entry;
+
+ // Strip off macro instantation locations, going up to the top-level File
+ // SLocEntry.
+ bool LIsMB = getFileEntryForID(LOffs.first) == 0;
+ bool RIsMB = getFileEntryForID(ROffs.first) == 0;
if (LIsMB != RIsMB)
- return LastResForBeforeTUCheck = LIsMB;
+ return LIsMB;
// Otherwise, just assume FileIDs were created in order.
- return LastResForBeforeTUCheck = (LOffs.first < ROffs.first);
+ return LOffs.first < ROffs.first;
}
/// PrintStats - Print statistics to stderr.
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index 4c0c59a109e7..6692e641f2a4 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -52,6 +52,7 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-n32";
UserLabelPrefix = "_";
+ HasAlignMac68kSupport = false;
}
// Out of line virtual dtor for TargetInfo.
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 3d5048ccb9bf..92fd417173bd 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -1138,6 +1138,7 @@ public:
DescriptionString = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-"
"a0:0:64-f80:128:128-n8:16:32";
+ HasAlignMac68kSupport = true;
}
};
@@ -1669,7 +1670,9 @@ protected:
public:
DarwinARMTargetInfo(const std::string& triple)
- : DarwinTargetInfo<ARMTargetInfo>(triple) {}
+ : DarwinTargetInfo<ARMTargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ }
};
} // end anonymous namespace.
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index dc87d148ed34..e0c23362b930 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -28,7 +28,8 @@ llvm::StringRef getClangRepositoryPath() {
if (End)
URLEnd = End;
- End = strstr(URL, "/clang/tools/clang");
+ // Strip off version from a build from an integration branch.
+ End = strstr(URL, "/src/tools/clang");
if (End)
URLEnd = End;
diff --git a/lib/Checker/BasicObjCFoundationChecks.cpp b/lib/Checker/BasicObjCFoundationChecks.cpp
index e7275ca551c2..b852e2ad9a48 100644
--- a/lib/Checker/BasicObjCFoundationChecks.cpp
+++ b/lib/Checker/BasicObjCFoundationChecks.cpp
@@ -521,11 +521,11 @@ void ClassReleaseChecker::PreVisitObjCMessageExpr(CheckerContext &C,
ObjCInterfaceDecl *Class = 0;
switch (ME->getReceiverKind()) {
case ObjCMessageExpr::Class:
- Class = ME->getClassReceiver()->getAs<ObjCInterfaceType>()->getDecl();
+ Class = ME->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
break;
case ObjCMessageExpr::SuperClass:
- Class = ME->getSuperType()->getAs<ObjCInterfaceType>()->getDecl();
+ Class = ME->getSuperType()->getAs<ObjCObjectType>()->getInterface();
break;
case ObjCMessageExpr::Instance:
diff --git a/lib/Checker/BasicStore.cpp b/lib/Checker/BasicStore.cpp
index 34470af29f4a..5be5ca615ed6 100644
--- a/lib/Checker/BasicStore.cpp
+++ b/lib/Checker/BasicStore.cpp
@@ -72,7 +72,7 @@ public:
/// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
/// It updatees the GRState object in place with the values removed.
- Store RemoveDeadBindings(Store store, Stmt* Loc,
+ const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
@@ -251,11 +251,12 @@ Store BasicStoreManager::Remove(Store store, Loc loc) {
}
}
-Store BasicStoreManager::RemoveDeadBindings(Store store, Stmt* Loc,
+const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
+ Store store = state.getStore();
BindingsTy B = GetBindings(store);
typedef SVal::symbol_iterator symbol_iterator;
@@ -329,7 +330,8 @@ Store BasicStoreManager::RemoveDeadBindings(Store store, Stmt* Loc,
}
}
- return store;
+ state.setStore(store);
+ return StateMgr.getPersistentState(state);
}
Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
diff --git a/lib/Checker/CFRefCount.cpp b/lib/Checker/CFRefCount.cpp
index d26ee1db5653..42e6f67f0172 100644
--- a/lib/Checker/CFRefCount.cpp
+++ b/lib/Checker/CFRefCount.cpp
@@ -37,6 +37,49 @@ using namespace clang;
using llvm::StringRef;
using llvm::StrInStrNoCase;
+namespace {
+class InstanceReceiver {
+ const ObjCMessageExpr *ME;
+ const LocationContext *LC;
+public:
+ InstanceReceiver(const ObjCMessageExpr *me = 0,
+ const LocationContext *lc = 0) : ME(me), LC(lc) {}
+
+ bool isValid() const {
+ return ME && ME->isInstanceMessage();
+ }
+ operator bool() const {
+ return isValid();
+ }
+
+ SVal getSValAsScalarOrLoc(const GRState *state) {
+ assert(isValid());
+ // We have an expression for the receiver? Fetch the value
+ // of that expression.
+ if (const Expr *Ex = ME->getInstanceReceiver())
+ return state->getSValAsScalarOrLoc(Ex);
+
+ // Otherwise we are sending a message to super. In this case the
+ // object reference is the same as 'self'.
+ if (const ImplicitParamDecl *SelfDecl = LC->getSelfDecl())
+ return state->getSVal(state->getRegion(SelfDecl, LC));
+
+ return UnknownVal();
+ }
+
+ SourceRange getSourceRange() const {
+ assert(isValid());
+ if (const Expr *Ex = ME->getInstanceReceiver())
+ return Ex->getSourceRange();
+
+ // Otherwise we are sending a message to super.
+ SourceLocation L = ME->getSuperLoc();
+ assert(L.isValid());
+ return SourceRange(L, L);
+ }
+};
+}
+
static const ObjCMethodDecl*
ResolveToInterfaceMethodDecl(const ObjCMethodDecl *MD) {
ObjCInterfaceDecl *ID =
@@ -618,11 +661,11 @@ public:
break;
case ObjCMessageExpr::Class:
- OD = ME->getClassReceiver()->getAs<ObjCInterfaceType>()->getDecl();
+ OD = ME->getClassReceiver()->getAs<ObjCObjectType>()->getInterface();
break;
case ObjCMessageExpr::SuperClass:
- OD = ME->getSuperType()->getAs<ObjCInterfaceType>()->getDecl();
+ OD = ME->getSuperType()->getAs<ObjCObjectType>()->getInterface();
break;
}
@@ -1374,7 +1417,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
// we just use the 'ID' from the message expression.
SVal receiverV;
- if (const Expr *Receiver = ME->getInstanceReceiver()) {
+ if (Receiver) {
receiverV = state->getSValAsScalarOrLoc(Receiver);
// FIXME: Eventually replace the use of state->get<RefBindings> with
@@ -1724,7 +1767,7 @@ private:
void ProcessNonLeakError(ExplodedNodeSet& Dst,
GRStmtNodeBuilder& Builder,
- Expr* NodeExpr, Expr* ErrorExpr,
+ Expr* NodeExpr, SourceRange ErrorRange,
ExplodedNode* Pred,
const GRState* St,
RefVal::Kind hasErr, SymbolRef Sym);
@@ -1768,7 +1811,7 @@ public:
GRExprEngine& Eng,
GRStmtNodeBuilder& Builder,
Expr* Ex,
- Expr* Receiver,
+ InstanceReceiver Receiver,
const RetainSummary& Summ,
const MemRegion *Callee,
ExprIterator arg_beg, ExprIterator arg_end,
@@ -2552,7 +2595,8 @@ static QualType GetReturnType(const Expr* RetE, ASTContext& Ctx) {
// is a call to a class method whose type we can resolve. In such
// cases, promote the return type to XXX* (where XXX is the class).
const ObjCInterfaceDecl *D = ME->getReceiverInterface();
- return !D ? RetTy : Ctx.getPointerType(Ctx.getObjCInterfaceType(D));
+ return !D ? RetTy :
+ Ctx.getObjCObjectPointerType(Ctx.getObjCInterfaceType(D));
}
return RetTy;
@@ -2562,7 +2606,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
GRStmtNodeBuilder& Builder,
Expr* Ex,
- Expr* Receiver,
+ InstanceReceiver Receiver,
const RetainSummary& Summ,
const MemRegion *Callee,
ExprIterator arg_beg, ExprIterator arg_end,
@@ -2571,7 +2615,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// Evaluate the effect of the arguments.
RefVal::Kind hasErr = (RefVal::Kind) 0;
unsigned idx = 0;
- Expr* ErrorExpr = NULL;
+ SourceRange ErrorRange;
SymbolRef ErrorSym = 0;
llvm::SmallVector<const MemRegion*, 10> RegionsToInvalidate;
@@ -2584,7 +2628,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
if (RefBindings::data_type* T = state->get<RefBindings>(Sym)) {
state = Update(state, Sym, *T, Summ.getArg(idx), hasErr);
if (hasErr) {
- ErrorExpr = *I;
+ ErrorRange = (*I)->getSourceRange();
ErrorSym = Sym;
break;
}
@@ -2677,13 +2721,13 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
}
// Evaluate the effect on the message receiver.
- if (!ErrorExpr && Receiver) {
- SymbolRef Sym = state->getSValAsScalarOrLoc(Receiver).getAsLocSymbol();
+ if (!ErrorRange.isValid() && Receiver) {
+ SymbolRef Sym = Receiver.getSValAsScalarOrLoc(state).getAsLocSymbol();
if (Sym) {
if (const RefVal* T = state->get<RefBindings>(Sym)) {
state = Update(state, Sym, *T, Summ.getReceiverEffect(), hasErr);
if (hasErr) {
- ErrorExpr = Receiver;
+ ErrorRange = Receiver.getSourceRange();
ErrorSym = Sym;
}
}
@@ -2692,7 +2736,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// Process any errors.
if (hasErr) {
- ProcessNonLeakError(Dst, Builder, Ex, ErrorExpr, Pred, state,
+ ProcessNonLeakError(Dst, Builder, Ex, ErrorRange, Pred, state,
hasErr, ErrorSym);
return;
}
@@ -2703,7 +2747,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
if (RE.getKind() == RetEffect::OwnedWhenTrackedReceiver) {
bool found = false;
if (Receiver) {
- SVal V = state->getSValAsScalarOrLoc(Receiver);
+ SVal V = Receiver.getSValAsScalarOrLoc(state);
if (SymbolRef Sym = V.getAsLocSymbol())
if (state->get<RefBindings>(Sym)) {
found = true;
@@ -2759,8 +2803,8 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
}
case RetEffect::ReceiverAlias: {
- assert (Receiver);
- SVal V = state->getSValAsScalarOrLoc(Receiver);
+ assert(Receiver);
+ SVal V = Receiver.getSValAsScalarOrLoc(state);
state = state->BindExpr(Ex, V, false);
break;
}
@@ -2848,7 +2892,8 @@ void CFRefCount::EvalObjCMessageExpr(ExplodedNodeSet& Dst,
: Summaries.getClassMethodSummary(ME);
assert(Summ && "RetainSummary is null");
- EvalSummary(Dst, Eng, Builder, ME, ME->getInstanceReceiver(), *Summ, NULL,
+ EvalSummary(Dst, Eng, Builder, ME,
+ InstanceReceiver(ME, Pred->getLocationContext()), *Summ, NULL,
ME->arg_begin(), ME->arg_end(), Pred, state);
}
@@ -3408,7 +3453,7 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst,
GRStmtNodeBuilder& Builder,
- Expr* NodeExpr, Expr* ErrorExpr,
+ Expr* NodeExpr, SourceRange ErrorRange,
ExplodedNode* Pred,
const GRState* St,
RefVal::Kind hasErr, SymbolRef Sym) {
@@ -3439,7 +3484,7 @@ void CFRefCount::ProcessNonLeakError(ExplodedNodeSet& Dst,
}
CFRefReport *report = new CFRefReport(*BT, *this, N, Sym);
- report->addRange(ErrorExpr->getSourceRange());
+ report->addRange(ErrorRange);
BR->EmitReport(report);
}
diff --git a/lib/Checker/CMakeLists.txt b/lib/Checker/CMakeLists.txt
index 82e93a425e6e..9c6adc6bf2ee 100644
--- a/lib/Checker/CMakeLists.txt
+++ b/lib/Checker/CMakeLists.txt
@@ -14,6 +14,7 @@ add_clang_library(clangChecker
BuiltinFunctionChecker.cpp
CallAndMessageChecker.cpp
CallInliner.cpp
+ CastSizeChecker.cpp
CastToStructChecker.cpp
CFRefCount.cpp
CheckDeadStores.cpp
@@ -69,3 +70,5 @@ add_clang_library(clangChecker
ValueManager.cpp
VLASizeChecker.cpp
)
+
+add_dependencies(clangChecker ClangStmtNodes)
diff --git a/lib/Checker/CastSizeChecker.cpp b/lib/Checker/CastSizeChecker.cpp
new file mode 100644
index 000000000000..754d775a65d1
--- /dev/null
+++ b/lib/Checker/CastSizeChecker.cpp
@@ -0,0 +1,82 @@
+//=== CastSizeChecker.cpp ---------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// CastSizeChecker checks when casting a malloc'ed symbolic region to type T,
+// whether the size of the symbolic region is a multiple of the size of T.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/AST/CharUnits.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "GRExprEngineInternalChecks.h"
+
+using namespace clang;
+
+namespace {
+class CastSizeChecker : public CheckerVisitor<CastSizeChecker> {
+ BuiltinBug *BT;
+public:
+ CastSizeChecker() : BT(0) {}
+ static void *getTag();
+ void PreVisitCastExpr(CheckerContext &C, const CastExpr *B);
+};
+}
+
+void *CastSizeChecker::getTag() {
+ static int x;
+ return &x;
+}
+
+void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
+ const Expr *E = CE->getSubExpr();
+ ASTContext &Ctx = C.getASTContext();
+ QualType ToTy = Ctx.getCanonicalType(CE->getType());
+ PointerType *ToPTy = dyn_cast<PointerType>(ToTy.getTypePtr());
+
+ if (!ToPTy)
+ return;
+
+ QualType ToPointeeTy = ToPTy->getPointeeType();
+
+ const MemRegion *R = C.getState()->getSVal(E).getAsRegion();
+ if (R == 0)
+ return;
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
+ if (SR == 0)
+ return;
+
+ llvm::Optional<SVal> V =
+ C.getEngine().getStoreManager().getExtent(C.getState(), SR);
+ if (!V)
+ return;
+
+ const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(V);
+ if (!CI)
+ return;
+
+ CharUnits RegionSize = CharUnits::fromQuantity(CI->getValue().getSExtValue());
+ CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+ if (RegionSize % TypeSize != 0) {
+ if (ExplodedNode *N = C.GenerateSink()) {
+ if (!BT)
+ BT = new BuiltinBug("Cast region with wrong size.",
+ "Cast a region whose size is not a multiple of the"
+ " destination type size.");
+ RangedBugReport *R = new RangedBugReport(*BT, BT->getDescription(), N);
+ R->addRange(CE->getSourceRange());
+ C.EmitReport(R);
+ }
+ }
+}
+
+
+void clang::RegisterCastSizeChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new CastSizeChecker());
+}
diff --git a/lib/Checker/CheckObjCDealloc.cpp b/lib/Checker/CheckObjCDealloc.cpp
index c23be873f481..11ddaca9d6fc 100644
--- a/lib/Checker/CheckObjCDealloc.cpp
+++ b/lib/Checker/CheckObjCDealloc.cpp
@@ -115,7 +115,8 @@ void clang::CheckObjCDealloc(const ObjCImplementationDecl* D,
QualType T = ID->getType();
if (!T->isObjCObjectPointerType() ||
- ID->getAttr<IBOutletAttr>()) // Skip IBOutlets.
+ ID->getAttr<IBOutletAttr>() || // Skip IBOutlets.
+ ID->getAttr<IBOutletCollectionAttr>()) // Skip IBOutletCollections.
continue;
containsPointerIvar = true;
diff --git a/lib/Checker/FlatStore.cpp b/lib/Checker/FlatStore.cpp
index 2af9ffa4a4d9..7f1c579c6edb 100644
--- a/lib/Checker/FlatStore.cpp
+++ b/lib/Checker/FlatStore.cpp
@@ -44,11 +44,11 @@ public:
}
SVal ArrayToPointer(Loc Array);
- Store RemoveDeadBindings(Store store, Stmt* Loc,
+ const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){
- return store;
+ return StateMgr.getPersistentState(state);
}
Store BindDecl(Store store, const VarRegion *VR, SVal initVal);
diff --git a/lib/Checker/GRCXXExprEngine.cpp b/lib/Checker/GRCXXExprEngine.cpp
index 00ac99559282..18e112cc8d36 100644
--- a/lib/Checker/GRCXXExprEngine.cpp
+++ b/lib/Checker/GRCXXExprEngine.cpp
@@ -86,7 +86,7 @@ void GRExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
const CXXConstructorDecl *CD = E->getConstructor();
assert(CD);
- if (!CD->isThisDeclarationADefinition())
+ if (!(CD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
// FIXME: invalidate the object.
return;
@@ -147,7 +147,7 @@ void GRExprEngine::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE,
const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
assert(MD && "not a CXXMethodDecl?");
- if (!MD->isThisDeclarationADefinition())
+ if (!(MD->isThisDeclarationADefinition() && AMgr.shouldInlineCall()))
// FIXME: conservative method call evaluation.
return;
diff --git a/lib/Checker/GRExprEngine.cpp b/lib/Checker/GRExprEngine.cpp
index 67090b86694f..24176586728c 100644
--- a/lib/Checker/GRExprEngine.cpp
+++ b/lib/Checker/GRExprEngine.cpp
@@ -1017,9 +1017,8 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const ExplodedNode *Pred,
GRBlockCounter BC) {
-
return BC.getNumVisited(Pred->getLocationContext()->getCurrentStackFrame(),
- B->getBlockID()) < 3;
+ B->getBlockID()) < AMgr.getMaxLoop();
}
//===----------------------------------------------------------------------===//
@@ -1810,6 +1809,28 @@ void GRExprEngine::EvalLocation(ExplodedNodeSet &Dst, Stmt *S,
}
}
+bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
+ ExplodedNode *Pred) {
+ const GRState *state = GetState(Pred);
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ if (!FD->getBody(FD))
+ return false;
+
+ // Now we have the definition of the callee, create a CallEnter node.
+ CallEnter Loc(CE, FD, Pred->getLocationContext());
+
+ ExplodedNode *N = Builder->generateNode(Loc, state, Pred);
+ if (N)
+ Dst.Add(N);
+ return true;
+}
+
void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
CallExpr::arg_iterator AI,
CallExpr::arg_iterator AE,
@@ -1889,6 +1910,10 @@ void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
// If the callee is processed by a checker, skip the rest logic.
if (CheckerEvalCall(CE, DstChecker, *DI))
DstTmp3.insert(DstChecker);
+ else if (AMgr.shouldInlineCall() && InlineCall(Dst, CE, *DI)) {
+ // Callee is inlined. We shouldn't do post call checking.
+ return;
+ }
else {
for (ExplodedNodeSet::iterator DI_Checker = DstChecker.begin(),
DE_Checker = DstChecker.end();
@@ -1944,7 +1969,7 @@ void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
//===----------------------------------------------------------------------===//
static std::pair<const void*,const void*> EagerlyAssumeTag
- = std::pair<const void*,const void*>(&EagerlyAssumeTag,0);
+ = std::pair<const void*,const void*>(&EagerlyAssumeTag,static_cast<void*>(0));
void GRExprEngine::EvalEagerlyAssume(ExplodedNodeSet &Dst, ExplodedNodeSet &Src,
Expr *Ex) {
@@ -2595,8 +2620,8 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
Dst.Add(Pred);
return;
}
- else if (T->isObjCInterfaceType()) {
- // Some code tries to take the sizeof an ObjCInterfaceType, relying that
+ else if (T->getAs<ObjCObjectType>()) {
+ // Some code tries to take the sizeof an ObjCObjectType, relying that
// the compiler has laid out its representation. Just report Unknown
// for these.
Dst.Add(Pred);
diff --git a/lib/Checker/GRExprEngineExperimentalChecks.cpp b/lib/Checker/GRExprEngineExperimentalChecks.cpp
index 89b4e4b6392f..6066a1c74d33 100644
--- a/lib/Checker/GRExprEngineExperimentalChecks.cpp
+++ b/lib/Checker/GRExprEngineExperimentalChecks.cpp
@@ -36,5 +36,6 @@ void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
RegisterPointerSubChecker(Eng);
RegisterPointerArithChecker(Eng);
RegisterCastToStructChecker(Eng);
+ RegisterCastSizeChecker(Eng);
RegisterArrayBoundChecker(Eng);
}
diff --git a/lib/Checker/GRExprEngineInternalChecks.h b/lib/Checker/GRExprEngineInternalChecks.h
index d1176001cac7..335b85e308e0 100644
--- a/lib/Checker/GRExprEngineInternalChecks.h
+++ b/lib/Checker/GRExprEngineInternalChecks.h
@@ -26,6 +26,7 @@ void RegisterAttrNonNullChecker(GRExprEngine &Eng);
void RegisterBuiltinFunctionChecker(GRExprEngine &Eng);
void RegisterCallAndMessageChecker(GRExprEngine &Eng);
void RegisterCastToStructChecker(GRExprEngine &Eng);
+void RegisterCastSizeChecker(GRExprEngine &Eng);
void RegisterDereferenceChecker(GRExprEngine &Eng);
void RegisterDivZeroChecker(GRExprEngine &Eng);
void RegisterFixedAddressChecker(GRExprEngine &Eng);
diff --git a/lib/Checker/GRState.cpp b/lib/Checker/GRState.cpp
index f68e10b0cbc9..b16e922776e5 100644
--- a/lib/Checker/GRState.cpp
+++ b/lib/Checker/GRState.cpp
@@ -51,11 +51,10 @@ GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
state, RegionRoots);
// Clean up the store.
- NewState.St = StoreMgr->RemoveDeadBindings(NewState.St, Loc, LCtx, SymReaper,
- RegionRoots);
+ const GRState *s = StoreMgr->RemoveDeadBindings(NewState, Loc, LCtx,
+ SymReaper, RegionRoots);
- return ConstraintMgr->RemoveDeadBindings(getPersistentState(NewState),
- SymReaper);
+ return ConstraintMgr->RemoveDeadBindings(s, SymReaper);
}
const GRState *GRState::unbindLoc(Loc LV) const {
diff --git a/lib/Checker/LLVMConventionsChecker.cpp b/lib/Checker/LLVMConventionsChecker.cpp
index 14f0fc1280df..39ded431279c 100644
--- a/lib/Checker/LLVMConventionsChecker.cpp
+++ b/lib/Checker/LLVMConventionsChecker.cpp
@@ -47,7 +47,7 @@ static bool InStdNamespace(const Decl *D) {
}
static bool IsStdString(QualType T) {
- if (const QualifiedNameType *QT = T->getAs<QualifiedNameType>())
+ if (const ElaboratedType *QT = T->getAs<ElaboratedType>())
T = QT->getNamedType();
const TypedefType *TT = T->getAs<TypedefType>();
diff --git a/lib/Checker/MallocChecker.cpp b/lib/Checker/MallocChecker.cpp
index a22df3046920..086dbd8fdd36 100644
--- a/lib/Checker/MallocChecker.cpp
+++ b/lib/Checker/MallocChecker.cpp
@@ -182,7 +182,10 @@ const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
return state;
SymbolRef Sym = ArgVal.getAsLocSymbol();
- assert(Sym);
+
+ // Various cases could lead to non-symbol values here.
+ if (!Sym)
+ return state;
const RefState *RS = state->get<RegionState>(Sym);
diff --git a/lib/Checker/MemRegion.cpp b/lib/Checker/MemRegion.cpp
index 9a664c78a77c..575458c9dc79 100644
--- a/lib/Checker/MemRegion.cpp
+++ b/lib/Checker/MemRegion.cpp
@@ -539,7 +539,7 @@ MemRegionManager::getElementRegion(QualType elementType, SVal Idx,
const MemRegion* superRegion,
ASTContext& Ctx){
- QualType T = Ctx.getCanonicalType(elementType);
+ QualType T = Ctx.getCanonicalType(elementType).getUnqualifiedType();
llvm::FoldingSetNodeID ID;
ElementRegion::ProfileRegion(ID, T, Idx, superRegion);
diff --git a/lib/Checker/ObjCUnusedIVarsChecker.cpp b/lib/Checker/ObjCUnusedIVarsChecker.cpp
index 0e47621d4205..2523cff9d8d0 100644
--- a/lib/Checker/ObjCUnusedIVarsChecker.cpp
+++ b/lib/Checker/ObjCUnusedIVarsChecker.cpp
@@ -114,7 +114,8 @@ void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
// (b) explicitly marked unused
// (c) are iboutlets
if (ID->getAccessControl() != ObjCIvarDecl::Private ||
- ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>())
+ ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>() ||
+ ID->getAttr<IBOutletCollectionAttr>())
continue;
M[ID] = Unused;
diff --git a/lib/Checker/RegionStore.cpp b/lib/Checker/RegionStore.cpp
index 1e15d43a5ac2..c4072fd80307 100644
--- a/lib/Checker/RegionStore.cpp
+++ b/lib/Checker/RegionStore.cpp
@@ -352,9 +352,9 @@ public: // Part of public interface to class.
/// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
/// It returns a new Store with these values removed.
- Store RemoveDeadBindings(Store store, Stmt* Loc,
- const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+ const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
const GRState *EnterStackFrame(const GRState *state,
@@ -364,7 +364,18 @@ public: // Part of public interface to class.
// Region "extents".
//===------------------------------------------------------------------===//
- const GRState *setExtent(const GRState *state,const MemRegion* R,SVal Extent);
+ const GRState *setExtent(const GRState *state,const MemRegion* R,SVal Extent){
+ return state->set<RegionExtents>(R, Extent);
+ }
+
+ Optional<SVal> getExtent(const GRState *state, const MemRegion *R) {
+ const SVal *V = state->get<RegionExtents>(R);
+ if (V)
+ return *V;
+ else
+ return Optional<SVal>();
+ }
+
DefinedOrUnknownSVal getSizeInElements(const GRState *state,
const MemRegion* R, QualType EleTy);
@@ -798,12 +809,6 @@ DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
return UnknownVal();
}
-const GRState *RegionStoreManager::setExtent(const GRState *state,
- const MemRegion *region,
- SVal extent) {
- return state->set<RegionExtents>(region, extent);
-}
-
//===----------------------------------------------------------------------===//
// Location and region casting.
//===----------------------------------------------------------------------===//
@@ -1817,12 +1822,12 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() {
return changed;
}
-Store RegionStoreManager::RemoveDeadBindings(Store store, Stmt* Loc,
+const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
- RegionBindings B = GetRegionBindings(store);
+ RegionBindings B = GetRegionBindings(state.getStore());
RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, Loc, LCtx);
W.GenerateClusters();
@@ -1855,8 +1860,16 @@ Store RegionStoreManager::RemoveDeadBindings(Store store, Stmt* Loc,
for (; SI != SE; ++SI)
SymReaper.maybeDead(*SI);
}
-
- return B.getRoot();
+ state.setStore(B.getRoot());
+ const GRState *s = StateMgr.getPersistentState(state);
+ // Remove the extents of dead symbolic regions.
+ llvm::ImmutableMap<const MemRegion*,SVal> Extents = s->get<RegionExtents>();
+ for (llvm::ImmutableMap<const MemRegion *, SVal>::iterator I=Extents.begin(),
+ E = Extents.end(); I != E; ++I) {
+ if (!W.isVisited(I->first))
+ s = s->remove<RegionExtents>(I->first);
+ }
+ return s;
}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 8082b33fd66d..de58597e298d 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -24,6 +24,45 @@
using namespace clang;
using namespace CodeGen;
+/// CGBlockInfo - Information to generate a block literal.
+class clang::CodeGen::CGBlockInfo {
+public:
+ /// Name - The name of the block, kindof.
+ const char *Name;
+
+ /// DeclRefs - Variables from parent scopes that have been
+ /// imported into this block.
+ llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
+
+ /// InnerBlocks - This block and the blocks it encloses.
+ llvm::SmallPtrSet<const DeclContext *, 4> InnerBlocks;
+
+ /// CXXThisRef - Non-null if 'this' was required somewhere, in
+ /// which case this is that expression.
+ const CXXThisExpr *CXXThisRef;
+
+ /// NeedsObjCSelf - True if something in this block has an implicit
+ /// reference to 'self'.
+ bool NeedsObjCSelf;
+
+ /// These are initialized by GenerateBlockFunction.
+ bool BlockHasCopyDispose;
+ CharUnits BlockSize;
+ CharUnits BlockAlign;
+ llvm::SmallVector<const Expr*, 8> BlockLayout;
+
+ CGBlockInfo(const char *Name);
+};
+
+CGBlockInfo::CGBlockInfo(const char *N)
+ : Name(N), CXXThisRef(0), NeedsObjCSelf(false) {
+
+ // Skip asm prefix, if any.
+ if (Name && Name[0] == '\01')
+ ++Name;
+}
+
+
llvm::Constant *CodeGenFunction::
BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size,
const llvm::StructType* Ty,
@@ -86,58 +125,86 @@ llvm::Constant *BlockModule::getNSConcreteStackBlock() {
return NSConcreteStackBlock;
}
-static void CollectBlockDeclRefInfo(
- const Stmt *S, CodeGenFunction::BlockInfo &Info,
- llvm::SmallSet<const DeclContext *, 16> &InnerContexts) {
+static void CollectBlockDeclRefInfo(const Stmt *S, CGBlockInfo &Info) {
for (Stmt::const_child_iterator I = S->child_begin(), E = S->child_end();
I != E; ++I)
if (*I)
- CollectBlockDeclRefInfo(*I, Info, InnerContexts);
+ CollectBlockDeclRefInfo(*I, Info);
// We want to ensure we walk down into block literals so we can find
// all nested BlockDeclRefExprs.
if (const BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
- InnerContexts.insert(cast<DeclContext>(BE->getBlockDecl()));
- CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts);
+ Info.InnerBlocks.insert(BE->getBlockDecl());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
}
- if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+ else if (const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(S)) {
+ const ValueDecl *D = BDRE->getDecl();
// FIXME: Handle enums.
- if (isa<FunctionDecl>(BDRE->getDecl()))
+ if (isa<FunctionDecl>(D))
return;
+ if (isa<ImplicitParamDecl>(D) &&
+ isa<ObjCMethodDecl>(D->getDeclContext()) &&
+ cast<ObjCMethodDecl>(D->getDeclContext())->getSelfDecl() == D) {
+ Info.NeedsObjCSelf = true;
+ return;
+ }
+
// Only Decls that escape are added.
- if (!InnerContexts.count(BDRE->getDecl()->getDeclContext()))
+ if (!Info.InnerBlocks.count(D->getDeclContext()))
Info.DeclRefs.push_back(BDRE);
}
+
+ // Make sure to capture implicit 'self' references due to super calls.
+ else if (const ObjCMessageExpr *E = dyn_cast<ObjCMessageExpr>(S)) {
+ if (E->getReceiverKind() == ObjCMessageExpr::SuperClass ||
+ E->getReceiverKind() == ObjCMessageExpr::SuperInstance)
+ Info.NeedsObjCSelf = true;
+ }
+
+ // Getter/setter uses may also cause implicit super references,
+ // which we can check for with:
+ else if (isa<ObjCSuperExpr>(S))
+ Info.NeedsObjCSelf = true;
+
+ else if (isa<CXXThisExpr>(S))
+ Info.CXXThisRef = cast<CXXThisExpr>(S);
}
-/// CanBlockBeGlobal - Given a BlockInfo struct, determines if a block can be
+/// CanBlockBeGlobal - Given a CGBlockInfo struct, determines if a block can be
/// declared as a global variable instead of on the stack.
-static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) {
+static bool CanBlockBeGlobal(const CGBlockInfo &Info) {
return Info.DeclRefs.empty();
}
/// AllocateAllBlockDeclRefs - Preallocate all nested BlockDeclRefExprs to
/// ensure we can generate the debug information for the parameter for the block
/// invoke function.
-static void AllocateAllBlockDeclRefs(const CodeGenFunction::BlockInfo &Info,
- CodeGenFunction *CGF) {
- // FIXME: Also always forward the this pointer in C++ as well.
+static void AllocateAllBlockDeclRefs(CodeGenFunction &CGF, CGBlockInfo &Info) {
+ if (Info.CXXThisRef)
+ CGF.AllocateBlockCXXThisPointer(Info.CXXThisRef);
for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
- CGF->AllocateBlockDecl(Info.DeclRefs[i]);
+ CGF.AllocateBlockDecl(Info.DeclRefs[i]);
+
+ if (Info.NeedsObjCSelf) {
+ ValueDecl *Self = cast<ObjCMethodDecl>(CGF.CurFuncDecl)->getSelfDecl();
+ BlockDeclRefExpr *BDRE =
+ new (CGF.getContext()) BlockDeclRefExpr(Self, Self->getType(),
+ SourceLocation(), false);
+ Info.DeclRefs.push_back(BDRE);
+ CGF.AllocateBlockDecl(BDRE);
+ }
}
// FIXME: Push most into CGM, passing down a few bits, like current function
// name.
llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
-
std::string Name = CurFn->getName();
- CodeGenFunction::BlockInfo Info(0, Name.c_str());
- llvm::SmallSet<const DeclContext *, 16> InnerContexts;
- InnerContexts.insert(BE->getBlockDecl());
- CollectBlockDeclRefInfo(BE->getBody(), Info, InnerContexts);
+ CGBlockInfo Info(Name.c_str());
+ Info.InnerBlocks.insert(BE->getBlockDecl());
+ CollectBlockDeclRefInfo(BE->getBody(), Info);
// Check if the block can be global.
// FIXME: This test doesn't work for nested blocks yet. Longer term, I'd like
@@ -160,23 +227,15 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// We run this first so that we set BlockHasCopyDispose from the entire
// block literal.
// __invoke
- CharUnits subBlockSize;
- CharUnits subBlockAlign;
- llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
- bool subBlockHasCopyDispose = false;
llvm::Function *Fn
= CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
- LocalDeclMap,
- subBlockSize,
- subBlockAlign,
- subBlockDeclRefDecls,
- subBlockHasCopyDispose);
- BlockHasCopyDispose |= subBlockHasCopyDispose;
+ LocalDeclMap);
+ BlockHasCopyDispose |= Info.BlockHasCopyDispose;
Elts[3] = Fn;
// FIXME: Don't use BlockHasCopyDispose, it is set more often then
// necessary, for example: { ^{ __block int i; ^{ i = 1; }(); }(); }
- if (subBlockHasCopyDispose)
+ if (Info.BlockHasCopyDispose)
flags |= BLOCK_HAS_COPY_DISPOSE;
// __isa
@@ -206,10 +265,10 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
C = llvm::ConstantInt::get(IntTy, 0);
Elts[2] = C;
- if (subBlockDeclRefDecls.size() == 0) {
+ if (Info.BlockLayout.empty()) {
// __descriptor
- Elts[4] = BuildDescriptorBlockDecl(BE, subBlockHasCopyDispose, subBlockSize,
- 0, 0);
+ Elts[4] = BuildDescriptorBlockDecl(BE, Info.BlockHasCopyDispose,
+ Info.BlockSize, 0, 0);
// Optimize to being a global block.
Elts[0] = CGM.getNSConcreteGlobalBlock();
@@ -227,13 +286,13 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
return C;
}
- std::vector<const llvm::Type *> Types(BlockFields+subBlockDeclRefDecls.size());
+ std::vector<const llvm::Type *> Types(BlockFields+Info.BlockLayout.size());
for (int i=0; i<4; ++i)
Types[i] = Elts[i]->getType();
Types[4] = PtrToInt8Ty;
- for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) {
- const Expr *E = subBlockDeclRefDecls[i];
+ for (unsigned i = 0, n = Info.BlockLayout.size(); i != n; ++i) {
+ const Expr *E = Info.BlockLayout[i];
const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
QualType Ty = E->getType();
if (BDRE && BDRE->isByRef()) {
@@ -245,105 +304,113 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::StructType *Ty = llvm::StructType::get(VMContext, Types, true);
llvm::AllocaInst *A = CreateTempAlloca(Ty);
- A->setAlignment(subBlockAlign.getQuantity());
+ A->setAlignment(Info.BlockAlign.getQuantity());
V = A;
- std::vector<HelperInfo> NoteForHelper(subBlockDeclRefDecls.size());
- int helpersize = 0;
+ // Build layout / cleanup information for all the data entries in the
+ // layout, and write the enclosing fields into the type.
+ std::vector<HelperInfo> NoteForHelper(Info.BlockLayout.size());
+ unsigned NumHelpers = 0;
for (unsigned i=0; i<4; ++i)
Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
- for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i)
- {
- // FIXME: Push const down.
- Expr *E = const_cast<Expr*>(subBlockDeclRefDecls[i]);
- DeclRefExpr *DR;
- ValueDecl *VD;
-
- DR = dyn_cast<DeclRefExpr>(E);
- // Skip padding.
- if (DR) continue;
-
- BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
- VD = BDRE->getDecl();
-
- llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
- NoteForHelper[helpersize].index = i+5;
- NoteForHelper[helpersize].RequiresCopying
- = BlockRequiresCopying(VD->getType());
- NoteForHelper[helpersize].flag
- = (VD->getType()->isBlockPointerType()
- ? BLOCK_FIELD_IS_BLOCK
- : BLOCK_FIELD_IS_OBJECT);
-
- if (LocalDeclMap[VD]) {
- if (BDRE->isByRef()) {
- NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
- // FIXME: Someone double check this.
- (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
- llvm::Value *Loc = LocalDeclMap[VD];
- Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
- Loc = Builder.CreateLoad(Loc);
- Builder.CreateStore(Loc, Addr);
- ++helpersize;
- continue;
- } else
- E = new (getContext()) DeclRefExpr (VD,
- VD->getType(),
- SourceLocation());
- }
+ for (unsigned i=0; i < Info.BlockLayout.size(); ++i) {
+ const Expr *E = Info.BlockLayout[i];
+
+ // Skip padding.
+ if (isa<DeclRefExpr>(E)) continue;
+
+ llvm::Value* Addr = Builder.CreateStructGEP(V, i+BlockFields, "tmp");
+ HelperInfo &Note = NoteForHelper[NumHelpers++];
+
+ Note.index = i+5;
+
+ if (isa<CXXThisExpr>(E)) {
+ Note.RequiresCopying = false;
+ Note.flag = BLOCK_FIELD_IS_OBJECT;
+
+ Builder.CreateStore(LoadCXXThis(), Addr);
+ continue;
+ }
+
+ const BlockDeclRefExpr *BDRE = cast<BlockDeclRefExpr>(E);
+ const ValueDecl *VD = BDRE->getDecl();
+ QualType T = VD->getType();
+
+ Note.RequiresCopying = BlockRequiresCopying(T);
+
+ if (BDRE->isByRef()) {
+ Note.flag = BLOCK_FIELD_IS_BYREF;
+ if (T.isObjCGCWeak())
+ Note.flag |= BLOCK_FIELD_IS_WEAK;
+ } else if (T->isBlockPointerType()) {
+ Note.flag = BLOCK_FIELD_IS_BLOCK;
+ } else {
+ Note.flag = BLOCK_FIELD_IS_OBJECT;
+ }
+
+ if (LocalDeclMap[VD]) {
if (BDRE->isByRef()) {
- NoteForHelper[helpersize].flag = BLOCK_FIELD_IS_BYREF |
- // FIXME: Someone double check this.
- (VD->getType().isObjCGCWeak() ? BLOCK_FIELD_IS_WEAK : 0);
- E = new (getContext())
- UnaryOperator(E, UnaryOperator::AddrOf,
- getContext().getPointerType(E->getType()),
- SourceLocation());
- }
- ++helpersize;
-
- RValue r = EmitAnyExpr(E, Addr, false);
- if (r.isScalar()) {
- llvm::Value *Loc = r.getScalarVal();
- const llvm::Type *Ty = Types[i+BlockFields];
- if (BDRE->isByRef()) {
- // E is now the address of the value field, instead, we want the
- // address of the actual ByRef struct. We optimize this slightly
- // compared to gcc by not grabbing the forwarding slot as this must
- // be done during Block_copy for us, and we can postpone the work
- // until then.
- CharUnits offset = BlockDecls[BDRE->getDecl()];
-
- llvm::Value *BlockLiteral = LoadBlockStruct();
-
- Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
- "block.literal");
- Ty = llvm::PointerType::get(Ty, 0);
- Loc = Builder.CreateBitCast(Loc, Ty);
- Loc = Builder.CreateLoad(Loc);
- // Loc = Builder.CreateBitCast(Loc, Ty);
- }
+ llvm::Value *Loc = LocalDeclMap[VD];
+ Loc = Builder.CreateStructGEP(Loc, 1, "forwarding");
+ Loc = Builder.CreateLoad(Loc);
Builder.CreateStore(Loc, Addr);
- } else if (r.isComplex())
- // FIXME: implement
- ErrorUnsupported(BE, "complex in block literal");
- else if (r.isAggregate())
- ; // Already created into the destination
- else
- assert (0 && "bad block variable");
- // FIXME: Ensure that the offset created by the backend for
- // the struct matches the previously computed offset in BlockDecls.
+ continue;
+ } else {
+ E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
+ VD->getType(),
+ SourceLocation());
+ }
}
- NoteForHelper.resize(helpersize);
+
+ if (BDRE->isByRef()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+
+ RValue r = EmitAnyExpr(E, Addr, false);
+ if (r.isScalar()) {
+ llvm::Value *Loc = r.getScalarVal();
+ const llvm::Type *Ty = Types[i+BlockFields];
+ if (BDRE->isByRef()) {
+ // E is now the address of the value field, instead, we want the
+ // address of the actual ByRef struct. We optimize this slightly
+ // compared to gcc by not grabbing the forwarding slot as this must
+ // be done during Block_copy for us, and we can postpone the work
+ // until then.
+ CharUnits offset = BlockDecls[BDRE->getDecl()];
+
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+
+ Loc = Builder.CreateGEP(BlockLiteral,
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
+ offset.getQuantity()),
+ "block.literal");
+ Ty = llvm::PointerType::get(Ty, 0);
+ Loc = Builder.CreateBitCast(Loc, Ty);
+ Loc = Builder.CreateLoad(Loc);
+ // Loc = Builder.CreateBitCast(Loc, Ty);
+ }
+ Builder.CreateStore(Loc, Addr);
+ } else if (r.isComplex())
+ // FIXME: implement
+ ErrorUnsupported(BE, "complex in block literal");
+ else if (r.isAggregate())
+ ; // Already created into the destination
+ else
+ assert (0 && "bad block variable");
+ // FIXME: Ensure that the offset created by the backend for
+ // the struct matches the previously computed offset in BlockDecls.
+ }
+ NoteForHelper.resize(NumHelpers);
// __descriptor
llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE,
- subBlockHasCopyDispose,
- subBlockSize, Ty,
+ Info.BlockHasCopyDispose,
+ Info.BlockSize, Ty,
&NoteForHelper);
Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
Builder.CreateStore(Descriptor, Builder.CreateStructGEP(V, 4, "block.tmp"));
@@ -487,13 +554,25 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
return EmitCall(FnInfo, Func, ReturnValue, Args);
}
-CharUnits CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
+void CodeGenFunction::AllocateBlockCXXThisPointer(const CXXThisExpr *E) {
+ assert(BlockCXXThisOffset.isZero() && "already computed 'this' pointer");
+
+ // Figure out what the offset is.
+ QualType T = E->getType();
+ std::pair<CharUnits,CharUnits> TypeInfo = getContext().getTypeInfoInChars(T);
+ CharUnits Offset = getBlockOffset(TypeInfo.first, TypeInfo.second);
+
+ BlockCXXThisOffset = Offset;
+ BlockLayout.push_back(E);
+}
+
+void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
const ValueDecl *VD = E->getDecl();
- CharUnits &offset = BlockDecls[VD];
+ CharUnits &Offset = BlockDecls[VD];
// See if we have already allocated an offset for this variable.
- if (offset.isPositive())
- return offset;
+ if (!Offset.isZero())
+ return;
// Don't run the expensive check, unless we have to.
if (!BlockHasCopyDispose)
@@ -501,23 +580,34 @@ CharUnits CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
|| BlockRequiresCopying(E->getType()))
BlockHasCopyDispose = true;
- // if not, allocate one now.
- offset = getBlockOffset(E);
+ const ValueDecl *D = cast<ValueDecl>(E->getDecl());
- return offset;
+ CharUnits Size;
+ CharUnits Align;
+
+ if (E->isByRef()) {
+ llvm::tie(Size,Align) =
+ getContext().getTypeInfoInChars(getContext().VoidPtrTy);
+ } else {
+ Size = getContext().getTypeSizeInChars(D->getType());
+ Align = getContext().getDeclAlign(D);
+ }
+
+ Offset = getBlockOffset(Size, Align);
+ BlockLayout.push_back(E);
}
-llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
- const ValueDecl *VD = E->getDecl();
- CharUnits offset = AllocateBlockDecl(E);
-
+llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
+ bool IsByRef) {
+ CharUnits offset = BlockDecls[VD];
+ assert(!offset.isZero() && "getting address of unallocated decl");
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
offset.getQuantity()),
"block.literal");
- if (E->isByRef()) {
+ if (IsByRef) {
const llvm::Type *PtrStructTy
= llvm::PointerType::get(BuildByRefType(VD), 0);
// The block literal will need a copy/destroy helper.
@@ -543,19 +633,6 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
return V;
}
-void CodeGenFunction::BlockForwardSelf() {
- const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
- ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
- llvm::Value *&DMEntry = LocalDeclMap[SelfDecl];
- if (DMEntry)
- return;
- // FIXME - Eliminate BlockDeclRefExprs, clients don't need/want to care
- BlockDeclRefExpr *BDRE = new (getContext())
- BlockDeclRefExpr(SelfDecl,
- SelfDecl->getType(), SourceLocation(), false);
- DMEntry = GetAddrOfBlockDecl(BDRE);
-}
-
llvm::Constant *
BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
// Generate the block descriptor.
@@ -600,19 +677,11 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
std::vector<llvm::Constant*> LiteralFields(FieldCount);
- CodeGenFunction::BlockInfo Info(0, n);
- CharUnits subBlockSize;
- CharUnits subBlockAlign;
- llvm::SmallVector<const Expr *, 8> subBlockDeclRefDecls;
- bool subBlockHasCopyDispose = false;
+ CGBlockInfo Info(n);
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap,
- subBlockSize,
- subBlockAlign,
- subBlockDeclRefDecls,
- subBlockHasCopyDispose);
- assert(subBlockSize == BlockLiteralSize
+ = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap);
+ assert(Info.BlockSize == BlockLiteralSize
&& "no imports allowed for global block");
// isa
@@ -651,13 +720,9 @@ llvm::Value *CodeGenFunction::LoadBlockStruct() {
llvm::Function *
CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
- const BlockInfo& Info,
+ CGBlockInfo &Info,
const Decl *OuterFuncDecl,
- llvm::DenseMap<const Decl*, llvm::Value*> ldm,
- CharUnits &Size,
- CharUnits &Align,
- llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
- bool &subBlockHasCopyDispose) {
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm) {
// Check if we should generate debug info for this block.
if (CGM.getDebugInfo())
@@ -701,11 +766,12 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
- // Allocate all BlockDeclRefDecls, so we can calculate the right ParmTy below.
- AllocateAllBlockDeclRefs(Info, this);
+ // Build the block struct now.
+ AllocateAllBlockDeclRefs(*this, Info);
QualType ParmTy = getContext().getBlockParmType(BlockHasCopyDispose,
- BlockDeclRefDecls);
+ BlockLayout);
+
// FIXME: This leaks
ImplicitParamDecl *SelfDecl =
ImplicitParamDecl::Create(getContext(), const_cast<BlockDecl*>(BD),
@@ -725,10 +791,11 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
+ MangleBuffer Name;
+ CGM.getMangledName(Name, BD);
llvm::Function *Fn =
- llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
- llvm::Twine("__") + Info.Name + "_block_invoke_",
- &CGM.getModule());
+ llvm::Function::Create(LTy, llvm::GlobalValue::InternalLinkage,
+ Name.getString(), &CGM.getModule());
CGM.SetInternalFunctionAttributes(BD, Fn, FI);
@@ -738,6 +805,34 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
CurFuncDecl = OuterFuncDecl;
CurCodeDecl = BD;
+ // If we have a C++ 'this' reference, go ahead and force it into
+ // existence now.
+ if (Info.CXXThisRef) {
+ assert(!BlockCXXThisOffset.isZero() &&
+ "haven't yet allocated 'this' reference");
+
+ // TODO: I have a dream that one day this will be typed.
+ llvm::Value *BlockLiteral = LoadBlockStruct();
+ llvm::Value *ThisPtrRaw =
+ Builder.CreateConstInBoundsGEP1_64(BlockLiteral,
+ BlockCXXThisOffset.getQuantity(),
+ "this.ptr.raw");
+
+ const llvm::Type *Ty =
+ CGM.getTypes().ConvertType(Info.CXXThisRef->getType());
+ Ty = llvm::PointerType::get(Ty, 0);
+ llvm::Value *ThisPtr = Builder.CreateBitCast(ThisPtrRaw, Ty, "this.ptr");
+
+ CXXThisValue = Builder.CreateLoad(ThisPtr, "this");
+ }
+
+ // If we have an Objective C 'self' reference, go ahead and force it
+ // into existence now.
+ if (Info.NeedsObjCSelf) {
+ ValueDecl *Self = cast<ObjCMethodDecl>(CurFuncDecl)->getSelfDecl();
+ LocalDeclMap[Self] = GetAddrOfBlockDecl(Self, false);
+ }
+
// Save a spot to insert the debug information for all the BlockDeclRefDecls.
llvm::BasicBlock *entry = Builder.GetInsertBlock();
llvm::BasicBlock::iterator entry_ptr = Builder.GetInsertPoint();
@@ -754,9 +849,10 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
if (CGDebugInfo *DI = getDebugInfo()) {
// Emit debug information for all the BlockDeclRefDecls.
- for (unsigned i = 0, e = BlockDeclRefDecls.size(); i != e; ++i) {
- if (const BlockDeclRefExpr *BDRE =
- dyn_cast<BlockDeclRefExpr>(BlockDeclRefDecls[i])) {
+ // FIXME: also for 'this'
+ for (unsigned i = 0, e = BlockLayout.size(); i != e; ++i) {
+ if (const BlockDeclRefExpr *BDRE =
+ dyn_cast<BlockDeclRefExpr>(BlockLayout[i])) {
const ValueDecl *D = BDRE->getDecl();
DI->setLocation(D->getLocation());
DI->EmitDeclareOfBlockDeclRefVariable(BDRE,
@@ -779,25 +875,15 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
llvm::RoundUpToAlignment(BlockOffset.getQuantity(),
MinAlign.getQuantity()));
- Size = BlockOffset;
- Align = BlockAlign;
- subBlockDeclRefDecls = BlockDeclRefDecls;
- subBlockHasCopyDispose |= BlockHasCopyDispose;
+ Info.BlockSize = BlockOffset;
+ Info.BlockAlign = BlockAlign;
+ Info.BlockLayout = BlockLayout;
+ Info.BlockHasCopyDispose = BlockHasCopyDispose;
return Fn;
}
-CharUnits BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
- const ValueDecl *D = dyn_cast<ValueDecl>(BDRE->getDecl());
-
- CharUnits Size = getContext().getTypeSizeInChars(D->getType());
- CharUnits Align = getContext().getDeclAlign(D);
-
- if (BDRE->isByRef()) {
- Size = getContext().getTypeSizeInChars(getContext().VoidPtrTy);
- Align = getContext().getTypeAlignInChars(getContext().VoidPtrTy);
- }
-
- assert ((Align.isPositive()) && "alignment must be 1 byte or more");
+CharUnits BlockFunction::getBlockOffset(CharUnits Size, CharUnits Align) {
+ assert((Align.isPositive()) && "alignment must be 1 byte or more");
CharUnits OldOffset = BlockOffset;
@@ -808,23 +894,22 @@ CharUnits BlockFunction::getBlockOffset(const BlockDeclRefExpr *BDRE) {
CharUnits Pad = BlockOffset - OldOffset;
if (Pad.isPositive()) {
- llvm::ArrayType::get(llvm::Type::getInt8Ty(VMContext), Pad.getQuantity());
QualType PadTy = getContext().getConstantArrayType(getContext().CharTy,
llvm::APInt(32,
Pad.getQuantity()),
ArrayType::Normal, 0);
- ValueDecl *PadDecl = VarDecl::Create(getContext(), 0, SourceLocation(),
+ ValueDecl *PadDecl = VarDecl::Create(getContext(),
+ getContext().getTranslationUnitDecl(),
+ SourceLocation(),
0, QualType(PadTy), 0,
VarDecl::None, VarDecl::None);
- Expr *E;
- E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
- SourceLocation());
- BlockDeclRefDecls.push_back(E);
+ Expr *E = new (getContext()) DeclRefExpr(PadDecl, PadDecl->getType(),
+ SourceLocation());
+ BlockLayout.push_back(E);
}
- BlockDeclRefDecls.push_back(BDRE);
BlockOffset += Size;
- return BlockOffset-Size;
+ return BlockOffset - Size;
}
llvm::Constant *BlockFunction::
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index 5646d0036e90..e9b2bd549af5 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -148,26 +148,6 @@ public:
BLOCK_BYREF_CURRENT_MAX = 256
};
- /// BlockInfo - Information to generate a block literal.
- struct BlockInfo {
- /// BlockLiteralTy - The type of the block literal.
- const llvm::Type *BlockLiteralTy;
-
- /// Name - the name of the function this block was created for, if any.
- const char *Name;
-
- /// ByCopyDeclRefs - Variables from parent scopes that have been imported
- /// into this block.
- llvm::SmallVector<const BlockDeclRefExpr *, 8> DeclRefs;
-
- BlockInfo(const llvm::Type *blt, const char *n)
- : BlockLiteralTy(blt), Name(n) {
- // Skip asm prefix, if any.
- if (Name && Name[0] == '\01')
- ++Name;
- }
- };
-
CGBuilderTy &Builder;
BlockFunction(CodeGenModule &cgm, CodeGenFunction &cgf, CGBuilderTy &B);
@@ -179,19 +159,31 @@ public:
/// characters.
CharUnits BlockAlign;
- /// getBlockOffset - Allocate an offset for the ValueDecl from a
- /// BlockDeclRefExpr in a block literal (BlockExpr).
- CharUnits getBlockOffset(const BlockDeclRefExpr *E);
+ /// getBlockOffset - Allocate a location within the block's storage
+ /// for a value with the given size and alignment requirements.
+ CharUnits getBlockOffset(CharUnits Size, CharUnits Align);
/// BlockHasCopyDispose - True iff the block uses copy/dispose.
bool BlockHasCopyDispose;
- /// BlockDeclRefDecls - Decls from BlockDeclRefExprs in apperance order
- /// in a block literal. Decls without names are used for padding.
- llvm::SmallVector<const Expr *, 8> BlockDeclRefDecls;
+ /// BlockLayout - The layout of the block's storage, represented as
+ /// a sequence of expressions which require such storage. The
+ /// expressions can be:
+ /// - a BlockDeclRefExpr, indicating that the given declaration
+ /// from an enclosing scope is needed by the block;
+ /// - a DeclRefExpr, which always wraps an anonymous VarDecl with
+ /// array type, used to insert padding into the block; or
+ /// - a CXXThisExpr, indicating that the C++ 'this' value should
+ /// propagate from the parent to the block.
+ /// This is a really silly representation.
+ llvm::SmallVector<const Expr *, 8> BlockLayout;
/// BlockDecls - Offsets for all Decls in BlockDeclRefExprs.
- std::map<const Decl*, CharUnits> BlockDecls;
+ llvm::DenseMap<const Decl*, CharUnits> BlockDecls;
+
+ /// BlockCXXThisOffset - The offset of the C++ 'this' value within
+ /// the block structure.
+ CharUnits BlockCXXThisOffset;
ImplicitParamDecl *BlockStructDecl;
ImplicitParamDecl *getBlockStructDecl() { return BlockStructDecl; }
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 95c41db86e09..dd505c2ae88f 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -71,7 +71,7 @@ static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
/// Utility to insert an atomic instruction based Instrinsic::ID and
// the expression node, where the return value is the result of the
// operation.
-static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
+static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
Intrinsic::ID Id, const CallExpr *E,
Instruction::BinaryOps Op) {
const llvm::Type *ResType[2];
@@ -88,6 +88,30 @@ static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) {
return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value);
}
+
+/// EmitFAbs - Emit a call to fabs/fabsf/fabsl, depending on the type of ValTy,
+/// which must be a scalar floating point type.
+static Value *EmitFAbs(CodeGenFunction &CGF, Value *V, QualType ValTy) {
+ const BuiltinType *ValTyP = ValTy->getAs<BuiltinType>();
+ assert(ValTyP && "isn't scalar fp type!");
+
+ StringRef FnName;
+ switch (ValTyP->getKind()) {
+ default: assert(0 && "Isn't a scalar fp type!");
+ case BuiltinType::Float: FnName = "fabsf"; break;
+ case BuiltinType::Double: FnName = "fabs"; break;
+ case BuiltinType::LongDouble: FnName = "fabsl"; break;
+ }
+
+ // The prototype is something that takes and returns whatever V's type is.
+ std::vector<const llvm::Type*> Args;
+ Args.push_back(V->getType());
+ llvm::FunctionType *FT = llvm::FunctionType::get(V->getType(), Args, false);
+ llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(FT, FnName);
+
+ return CGF.Builder.CreateCall(Fn, V, "abs");
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
@@ -328,6 +352,50 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
V = Builder.CreateFCmpUNO(V, V, "cmp");
return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
}
+
+ case Builtin::BI__builtin_isinf: {
+ // isinf(x) --> fabs(x) == infinity
+ Value *V = EmitScalarExpr(E->getArg(0));
+ V = EmitFAbs(*this, V, E->getArg(0)->getType());
+
+ V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType()), "tmp"));
+ }
+
+ // TODO: BI__builtin_isinf_sign
+ // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
+
+ case Builtin::BI__builtin_isnormal: {
+ // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsLessThanInf =
+ Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+ APFloat Smallest = APFloat::getSmallestNormalized(
+ getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
+ Value *IsNormal =
+ Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
+ "isnormal");
+ V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
+ V = Builder.CreateAnd(V, IsNormal, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
+ case Builtin::BI__builtin_isfinite: {
+ // isfinite(x) --> x == x && fabs(x) != infinity; }
+ Value *V = EmitScalarExpr(E->getArg(0));
+ Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
+
+ Value *Abs = EmitFAbs(*this, V, E->getArg(0)->getType());
+ Value *IsNotInf =
+ Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
+
+ V = Builder.CreateAnd(Eq, IsNotInf, "and");
+ return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
+ }
+
case Builtin::BIalloca:
case Builtin::BI__builtin_alloca: {
// FIXME: LLVM IR Should allow alloca with an i64 size!
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 74cf1134d5b7..525877903e7a 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -13,6 +13,7 @@
// We might split this into multiple files if it gets too unwieldy
+#include "CGCXXABI.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "Mangle.h"
@@ -206,6 +207,7 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
return;
llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXConstructor(D, Type));
+ setFunctionLinkage(D, Fn);
CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
@@ -229,6 +231,10 @@ CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
+void CodeGenModule::getMangledName(MangleBuffer &Buffer, const BlockDecl *BD) {
+ getMangleContext().mangleBlock(BD, Buffer.getBuffer());
+}
+
void CodeGenModule::getMangledCXXCtorName(MangleBuffer &Name,
const CXXConstructorDecl *D,
CXXCtorType Type) {
@@ -269,6 +275,7 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
return;
llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXDestructor(D, Type));
+ setFunctionLinkage(D, Fn);
CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
@@ -327,3 +334,5 @@ CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
+
+CXXABI::~CXXABI() {}
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
new file mode 100644
index 000000000000..a7e18714e8b3
--- /dev/null
+++ b/lib/CodeGen/CGCXXABI.h
@@ -0,0 +1,37 @@
+//===----- CGCXXABI.h - Interface to C++ ABIs -------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for C++ code generation. Concrete subclasses
+// of this implement code generation for specific C++ ABIs.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CXXABI_H
+#define CLANG_CODEGEN_CXXABI_H
+
+namespace clang {
+namespace CodeGen {
+ class CodeGenModule;
+ class MangleContext;
+
+/// Implements C++ ABI-specific code generation functions.
+class CXXABI {
+public:
+ virtual ~CXXABI();
+
+ /// Gets the mangle context.
+ virtual MangleContext &getMangleContext() = 0;
+};
+
+/// Creates an instance of a C++ ABI class.
+CXXABI *CreateItaniumCXXABI(CodeGenModule &CGM);
+}
+}
+
+#endif
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 92d15d9d8c97..73cee3c10d2e 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -38,6 +38,7 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
default: return llvm::CallingConv::C;
case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
+ case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
}
}
@@ -97,6 +98,9 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
if (D->hasAttr<FastCallAttr>())
return CC_X86FastCall;
+ if (D->hasAttr<ThisCallAttr>())
+ return CC_X86ThisCall;
+
return CC_C;
}
@@ -858,6 +862,36 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
}
}
+RValue CodeGenFunction::EmitDelegateCallArg(const VarDecl *Param) {
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ llvm::Value *Local = GetAddrOfLocalVar(Param);
+
+ QualType ArgType = Param->getType();
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to aggregates are pointers directly to the aggregate.
+ // I don't know why references to non-aggregates are different here.
+ if (const ReferenceType *RefType = ArgType->getAs<ReferenceType>()) {
+ if (hasAggregateLLVMType(RefType->getPointeeType()))
+ return RValue::getAggregate(Local);
+
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ return RValue::get(Builder.CreateLoad(Local));
+ }
+
+ if (ArgType->isAnyComplexType())
+ return RValue::getComplex(LoadComplexFromAddr(Local, /*volatile*/ false));
+
+ if (hasAggregateLLVMType(ArgType))
+ return RValue::getAggregate(Local);
+
+ return RValue::get(EmitLoadOfScalar(Local, false, ArgType));
+}
+
RValue CodeGenFunction::EmitCallArg(const Expr *E, QualType ArgType) {
if (ArgType->isReferenceType())
return EmitReferenceBindingToExpr(E);
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index a604eef49a13..bebea549f965 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -262,102 +262,7 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
return Value;
}
-
-/// EmitCopyCtorCall - Emit a call to a copy constructor.
-static void
-EmitCopyCtorCall(CodeGenFunction &CGF, const CXXConstructorDecl *CopyCtor,
- llvm::Value *ThisPtr, llvm::Value *Src) {
- llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, Ctor_Complete);
-
- CallArgList CallArgs;
-
- // Push the this ptr.
- CallArgs.push_back(std::make_pair(RValue::get(ThisPtr),
- CopyCtor->getThisType(CGF.getContext())));
-
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- CopyCtor->getParamDecl(0)->getType()));
-
-
- {
- CodeGenFunction::CXXTemporariesCleanupScope Scope(CGF);
-
- // If the copy constructor has default arguments, emit them.
- for (unsigned I = 1, E = CopyCtor->getNumParams(); I < E; ++I) {
- const ParmVarDecl *Param = CopyCtor->getParamDecl(I);
- const Expr *DefaultArgExpr = Param->getDefaultArg();
-
- assert(DefaultArgExpr && "Ctor parameter must have default arg!");
-
- QualType ArgType = Param->getType();
- CallArgs.push_back(std::make_pair(CGF.EmitCallArg(DefaultArgExpr,
- ArgType),
- ArgType));
- }
-
- const FunctionProtoType *FPT =
- CopyCtor->getType()->getAs<FunctionProtoType>();
- CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, CopyCtor);
- }
-}
-/// EmitClassAggrMemberwiseCopy - This routine generates code to copy a class
-/// array of objects from SrcValue to DestValue. Copying can be either a bitwise
-/// copy or via a copy constructor call.
-// FIXME. Consolidate this with EmitCXXAggrConstructorCall.
-void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
- llvm::Value *Src,
- const ConstantArrayType *Array,
- const CXXRecordDecl *ClassDecl) {
- // Create a temporary for the loop index and initialize it with 0.
- llvm::Value *IndexPtr = CreateTempAlloca(llvm::Type::getInt64Ty(VMContext),
- "loop.index");
- llvm::Value* zeroConstant =
- llvm::Constant::getNullValue(llvm::Type::getInt64Ty(VMContext));
- Builder.CreateStore(zeroConstant, IndexPtr);
- // Start the loop with a block that tests the condition.
- llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
- llvm::BasicBlock *AfterFor = createBasicBlock("for.end");
-
- EmitBlock(CondBlock);
-
- llvm::BasicBlock *ForBody = createBasicBlock("for.body");
- // Generate: if (loop-index < number-of-elements fall to the loop body,
- // otherwise, go to the block after the for-loop.
- uint64_t NumElements = getContext().getConstantArrayElementCount(Array);
- llvm::Value * NumElementsPtr =
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), NumElements);
- llvm::Value *Counter = Builder.CreateLoad(IndexPtr);
- llvm::Value *IsLess = Builder.CreateICmpULT(Counter, NumElementsPtr,
- "isless");
- // If the condition is true, execute the body.
- Builder.CreateCondBr(IsLess, ForBody, AfterFor);
-
- EmitBlock(ForBody);
- llvm::BasicBlock *ContinueBlock = createBasicBlock("for.inc");
- // Inside the loop body, emit the constructor call on the array element.
- Counter = Builder.CreateLoad(IndexPtr);
- Src = Builder.CreateInBoundsGEP(Src, Counter, "srcaddress");
- Dest = Builder.CreateInBoundsGEP(Dest, Counter, "destaddress");
- EmitClassMemberwiseCopy(Dest, Src, ClassDecl);
-
- EmitBlock(ContinueBlock);
-
- // Emit the increment of the loop counter.
- llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
- Counter = Builder.CreateLoad(IndexPtr);
- NextVal = Builder.CreateAdd(Counter, NextVal, "inc");
- Builder.CreateStore(NextVal, IndexPtr);
-
- // Finally, branch back up to the condition for the next iteration.
- EmitBranch(CondBlock);
-
- // Emit the fall-through block.
- EmitBlock(AfterFor, true);
-}
-
/// GetVTTParameter - Return the VTT parameter that should be passed to a
/// base constructor/destructor with virtual bases.
static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
@@ -405,111 +310,6 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD,
return VTT;
}
-
-/// EmitClassMemberwiseCopy - This routine generates code to copy a class
-/// object from SrcValue to DestValue. Copying can be either a bitwise copy
-/// or via a copy constructor call.
-void CodeGenFunction::EmitClassMemberwiseCopy(
- llvm::Value *Dest, llvm::Value *Src,
- const CXXRecordDecl *ClassDecl) {
- if (ClassDecl->hasTrivialCopyConstructor()) {
- EmitAggregateCopy(Dest, Src, getContext().getTagDeclType(ClassDecl));
- return;
- }
-
- CXXConstructorDecl *CopyCtor = ClassDecl->getCopyConstructor(getContext(), 0);
- assert(CopyCtor && "Did not have copy ctor!");
-
- EmitCopyCtorCall(*this, CopyCtor, Dest, Src);
-}
-
-/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a
-/// copy constructor, in accordance with section 12.8 (p7 and p8) of C++03
-/// The implicitly-defined copy constructor for class X performs a memberwise
-/// copy of its subobjects. The order of copying is the same as the order of
-/// initialization of bases and members in a user-defined constructor
-/// Each subobject is copied in the manner appropriate to its type:
-/// if the subobject is of class type, the copy constructor for the class is
-/// used;
-/// if the subobject is an array, each element is copied, in the manner
-/// appropriate to the element type;
-/// if the subobject is of scalar type, the built-in assignment operator is
-/// used.
-/// Virtual base class subobjects shall be copied only once by the
-/// implicitly-defined copy constructor
-
-void
-CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) {
- const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
- CXXCtorType CtorType = CurGD.getCtorType();
- (void) CtorType;
-
- const CXXRecordDecl *ClassDecl = Ctor->getParent();
- assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
- "SynthesizeCXXCopyConstructor - copy constructor has definition already");
- assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor");
-
- llvm::Value *ThisPtr = LoadCXXThis();
-
- // Find the source pointer.
- unsigned SrcArgIndex = Args.size() - 1;
- assert(CtorType == Ctor_Base || SrcArgIndex == 1);
- assert(CtorType != Ctor_Base ||
- (ClassDecl->getNumVBases() != 0 && SrcArgIndex == 2) ||
- SrcArgIndex == 1);
-
- llvm::Value *SrcPtr =
- Builder.CreateLoad(GetAddrOfLocalVar(Args[SrcArgIndex].first));
-
- for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
- E = ClassDecl->field_end(); I != E; ++I) {
- const FieldDecl *Field = *I;
-
- QualType FieldType = getContext().getCanonicalType(Field->getType());
- const ConstantArrayType *Array =
- getContext().getAsConstantArrayType(FieldType);
- if (Array)
- FieldType = getContext().getBaseElementType(FieldType);
-
- if (const RecordType *FieldClassType = FieldType->getAs<RecordType>()) {
- CXXRecordDecl *FieldClassDecl
- = cast<CXXRecordDecl>(FieldClassType->getDecl());
- LValue LHS = EmitLValueForField(ThisPtr, Field, 0);
- LValue RHS = EmitLValueForField(SrcPtr, Field, 0);
- if (Array) {
- const llvm::Type *BasePtr = ConvertType(FieldType)->getPointerTo();
- llvm::Value *DestBaseAddrPtr =
- Builder.CreateBitCast(LHS.getAddress(), BasePtr);
- llvm::Value *SrcBaseAddrPtr =
- Builder.CreateBitCast(RHS.getAddress(), BasePtr);
- EmitClassAggrMemberwiseCopy(DestBaseAddrPtr, SrcBaseAddrPtr, Array,
- FieldClassDecl);
- }
- else
- EmitClassMemberwiseCopy(LHS.getAddress(), RHS.getAddress(),
- FieldClassDecl);
- continue;
- }
-
- // Do a built-in assignment of scalar data members.
- LValue LHS = EmitLValueForFieldInitialization(ThisPtr, Field, 0);
- LValue RHS = EmitLValueForFieldInitialization(SrcPtr, Field, 0);
-
- if (!hasAggregateLLVMType(Field->getType())) {
- RValue RVRHS = EmitLoadOfLValue(RHS, Field->getType());
- EmitStoreThroughLValue(RVRHS, LHS, Field->getType());
- } else if (Field->getType()->isAnyComplexType()) {
- ComplexPairTy Pair = LoadComplexFromAddr(RHS.getAddress(),
- RHS.isVolatileQualified());
- StoreComplexToAddr(Pair, LHS.getAddress(), LHS.isVolatileQualified());
- } else {
- EmitAggregateCopy(LHS.getAddress(), RHS.getAddress(), Field->getType());
- }
- }
-
- InitializeVTablePointers(ClassDecl);
-}
-
static void EmitBaseInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
CXXBaseOrMemberInitializer *BaseInit,
@@ -547,9 +347,97 @@ static void EmitBaseInitializer(CodeGenFunction &CGF,
}
}
+static void EmitAggMemberInitializer(CodeGenFunction &CGF,
+ LValue LHS,
+ llvm::Value *ArrayIndexVar,
+ CXXBaseOrMemberInitializer *MemberInit,
+ QualType T,
+ unsigned Index) {
+ if (Index == MemberInit->getNumArrayIndices()) {
+ CodeGenFunction::CleanupScope Cleanups(CGF);
+
+ llvm::Value *Dest = LHS.getAddress();
+ if (ArrayIndexVar) {
+ // If we have an array index variable, load it and use it as an offset.
+ // Then, increment the value.
+ llvm::Value *ArrayIndex = CGF.Builder.CreateLoad(ArrayIndexVar);
+ Dest = CGF.Builder.CreateInBoundsGEP(Dest, ArrayIndex, "destaddress");
+ llvm::Value *Next = llvm::ConstantInt::get(ArrayIndex->getType(), 1);
+ Next = CGF.Builder.CreateAdd(ArrayIndex, Next, "inc");
+ CGF.Builder.CreateStore(Next, ArrayIndexVar);
+ }
+
+ CGF.EmitAggExpr(MemberInit->getInit(), Dest,
+ LHS.isVolatileQualified(),
+ /*IgnoreResult*/ false,
+ /*IsInitializer*/ true);
+
+ return;
+ }
+
+ const ConstantArrayType *Array = CGF.getContext().getAsConstantArrayType(T);
+ assert(Array && "Array initialization without the array type?");
+ llvm::Value *IndexVar
+ = CGF.GetAddrOfLocalVar(MemberInit->getArrayIndex(Index));
+ assert(IndexVar && "Array index variable not loaded");
+
+ // Initialize this index variable to zero.
+ llvm::Value* Zero
+ = llvm::Constant::getNullValue(
+ CGF.ConvertType(CGF.getContext().getSizeType()));
+ CGF.Builder.CreateStore(Zero, IndexVar);
+
+ // Start the loop with a block that tests the condition.
+ llvm::BasicBlock *CondBlock = CGF.createBasicBlock("for.cond");
+ llvm::BasicBlock *AfterFor = CGF.createBasicBlock("for.end");
+
+ CGF.EmitBlock(CondBlock);
+
+ llvm::BasicBlock *ForBody = CGF.createBasicBlock("for.body");
+ // Generate: if (loop-index < number-of-elements) fall to the loop body,
+ // otherwise, go to the block after the for-loop.
+ uint64_t NumElements = Array->getSize().getZExtValue();
+ llvm::Value *Counter = CGF.Builder.CreateLoad(IndexVar);
+ llvm::Value *NumElementsPtr =
+ llvm::ConstantInt::get(Counter->getType(), NumElements);
+ llvm::Value *IsLess = CGF.Builder.CreateICmpULT(Counter, NumElementsPtr,
+ "isless");
+
+ // If the condition is true, execute the body.
+ CGF.Builder.CreateCondBr(IsLess, ForBody, AfterFor);
+
+ CGF.EmitBlock(ForBody);
+ llvm::BasicBlock *ContinueBlock = CGF.createBasicBlock("for.inc");
+
+ {
+ CodeGenFunction::CleanupScope Cleanups(CGF);
+
+ // Inside the loop body recurse to emit the inner loop or, eventually, the
+ // constructor call.
+ EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit,
+ Array->getElementType(), Index + 1);
+ }
+
+ CGF.EmitBlock(ContinueBlock);
+
+ // Emit the increment of the loop counter.
+ llvm::Value *NextVal = llvm::ConstantInt::get(Counter->getType(), 1);
+ Counter = CGF.Builder.CreateLoad(IndexVar);
+ NextVal = CGF.Builder.CreateAdd(Counter, NextVal, "inc");
+ CGF.Builder.CreateStore(NextVal, IndexVar);
+
+ // Finally, branch back up to the condition for the next iteration.
+ CGF.EmitBranch(CondBlock);
+
+ // Emit the fall-through block.
+ CGF.EmitBlock(AfterFor, true);
+}
+
static void EmitMemberInitializer(CodeGenFunction &CGF,
const CXXRecordDecl *ClassDecl,
- CXXBaseOrMemberInitializer *MemberInit) {
+ CXXBaseOrMemberInitializer *MemberInit,
+ const CXXConstructorDecl *Constructor,
+ FunctionArgList &Args) {
assert(MemberInit->isMemberInitializer() &&
"Must have member initializer!");
@@ -558,13 +446,15 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
QualType FieldType = CGF.getContext().getCanonicalType(Field->getType());
llvm::Value *ThisPtr = CGF.LoadCXXThis();
- LValue LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
+ LValue LHS;
// If we are initializing an anonymous union field, drill down to the field.
if (MemberInit->getAnonUnionMember()) {
Field = MemberInit->getAnonUnionMember();
- LHS = CGF.EmitLValueForField(LHS.getAddress(), Field, 0);
+ LHS = CGF.EmitLValueForAnonRecordField(ThisPtr, Field, 0);
FieldType = Field->getType();
+ } else {
+ LHS = CGF.EmitLValueForFieldInitialization(ThisPtr, Field, 0);
}
// FIXME: If there's no initializer and the CXXBaseOrMemberInitializer
@@ -575,7 +465,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
/*IsInitializer=*/true);
CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
} else if (FieldType->isArrayType() && !MemberInit->getInit()) {
- CGF.EmitMemSetToZero(LHS.getAddress(), Field->getType());
+ CGF.EmitNullInitialization(LHS.getAddress(), Field->getType());
} else if (!CGF.hasAggregateLLVMType(Field->getType())) {
RHS = RValue::get(CGF.EmitScalarExpr(MemberInit->getInit(), true));
CGF.EmitStoreThroughLValue(RHS, LHS, FieldType);
@@ -583,14 +473,61 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
CGF.EmitComplexExprIntoAddr(MemberInit->getInit(), LHS.getAddress(),
LHS.isVolatileQualified());
} else {
- CGF.EmitAggExpr(MemberInit->getInit(), LHS.getAddress(),
- LHS.isVolatileQualified(),
- /*IgnoreResult*/ false,
- /*IsInitializer*/ true);
+ llvm::Value *ArrayIndexVar = 0;
+ const ConstantArrayType *Array
+ = CGF.getContext().getAsConstantArrayType(FieldType);
+ if (Array && Constructor->isImplicit() &&
+ Constructor->isCopyConstructor()) {
+ const llvm::Type *SizeTy
+ = CGF.ConvertType(CGF.getContext().getSizeType());
+
+ // The LHS is a pointer to the first object we'll be constructing, as
+ // a flat array.
+ QualType BaseElementTy = CGF.getContext().getBaseElementType(Array);
+ const llvm::Type *BasePtr = CGF.ConvertType(BaseElementTy);
+ BasePtr = llvm::PointerType::getUnqual(BasePtr);
+ llvm::Value *BaseAddrPtr = CGF.Builder.CreateBitCast(LHS.getAddress(),
+ BasePtr);
+ LHS = LValue::MakeAddr(BaseAddrPtr, CGF.MakeQualifiers(BaseElementTy));
+
+ // Create an array index that will be used to walk over all of the
+ // objects we're constructing.
+ ArrayIndexVar = CGF.CreateTempAlloca(SizeTy, "object.index");
+ llvm::Value *Zero = llvm::Constant::getNullValue(SizeTy);
+ CGF.Builder.CreateStore(Zero, ArrayIndexVar);
+
+ // If we are copying an array of scalars or classes with trivial copy
+ // constructors, perform a single aggregate copy.
+ const RecordType *Record = BaseElementTy->getAs<RecordType>();
+ if (!Record ||
+ cast<CXXRecordDecl>(Record->getDecl())->hasTrivialCopyConstructor()) {
+ // Find the source pointer. We knows it's the last argument because
+ // we know we're in a copy constructor.
+ unsigned SrcArgIndex = Args.size() - 1;
+ llvm::Value *SrcPtr
+ = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(Args[SrcArgIndex].first));
+ LValue Src = CGF.EmitLValueForFieldInitialization(SrcPtr, Field, 0);
+
+ // Copy the aggregate.
+ CGF.EmitAggregateCopy(LHS.getAddress(), Src.getAddress(), FieldType,
+ LHS.isVolatileQualified());
+ return;
+ }
+
+ // Emit the block variables for the array indices, if any.
+ for (unsigned I = 0, N = MemberInit->getNumArrayIndices(); I != N; ++I)
+ CGF.EmitLocalBlockVarDecl(*MemberInit->getArrayIndex(I));
+ }
+
+ EmitAggMemberInitializer(CGF, LHS, ArrayIndexVar, MemberInit, FieldType, 0);
if (!CGF.Exceptions)
return;
+ // FIXME: If we have an array of classes w/ non-trivial destructors,
+ // we need to destroy in reverse order of construction along the exception
+ // path.
const RecordType *RT = FieldType->getAs<RecordType>();
if (!RT)
return;
@@ -680,20 +617,13 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
// Emit the constructor prologue, i.e. the base and member
// initializers.
- EmitCtorPrologue(Ctor, CtorType);
+ EmitCtorPrologue(Ctor, CtorType, Args);
// Emit the body of the statement.
if (IsTryBody)
EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
else if (Body)
EmitStmt(Body);
- else {
- assert(Ctor->isImplicit() && "bodyless ctor not implicit");
- if (!Ctor->isDefaultConstructor()) {
- assert(Ctor->isCopyConstructor());
- SynthesizeCXXCopyConstructor(Args);
- }
- }
// Emit any cleanup blocks associated with the member or base
// initializers, which includes (along the exceptional path) the
@@ -708,7 +638,8 @@ void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
/// EmitCtorPrologue - This routine generates necessary code to initialize
/// base classes and non-static data members belonging to this constructor.
void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
- CXXCtorType CtorType) {
+ CXXCtorType CtorType,
+ FunctionArgList &Args) {
const CXXRecordDecl *ClassDecl = CD->getParent();
llvm::SmallVector<CXXBaseOrMemberInitializer *, 8> MemberInitializers;
@@ -733,7 +664,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
assert(LiveTemporaries.empty() &&
"Should not have any live temporaries at initializer start!");
- EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I]);
+ EmitMemberInitializer(*this, ClassDecl, MemberInitializers[I], CD, Args);
}
}
@@ -1206,34 +1137,9 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
// Explicit arguments.
for (; I != E; ++I) {
-
const VarDecl *Param = I->first;
QualType ArgType = Param->getType(); // because we're passing it to itself
-
- // StartFunction converted the ABI-lowered parameter(s) into a
- // local alloca. We need to turn that into an r-value suitable
- // for EmitCall.
- llvm::Value *Local = GetAddrOfLocalVar(Param);
- RValue Arg;
-
- // For the most part, we just need to load the alloca, except:
- // 1) aggregate r-values are actually pointers to temporaries, and
- // 2) references to aggregates are pointers directly to the aggregate.
- // I don't know why references to non-aggregates are different here.
- if (ArgType->isReferenceType()) {
- const ReferenceType *RefType = ArgType->getAs<ReferenceType>();
- if (hasAggregateLLVMType(RefType->getPointeeType()))
- Arg = RValue::getAggregate(Local);
- else
- // Locals which are references to scalars are represented
- // with allocas holding the pointer.
- Arg = RValue::get(Builder.CreateLoad(Local));
- } else {
- if (hasAggregateLLVMType(ArgType))
- Arg = RValue::getAggregate(Local);
- else
- Arg = RValue::get(EmitLoadOfScalar(Local, false, ArgType));
- }
+ RValue Arg = EmitDelegateCallArg(Param);
DelegateArgs.push_back(std::make_pair(Arg, ArgType));
}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 48ae5113b3af..c9bcb1b7e043 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -64,7 +64,14 @@ llvm::DIDescriptor CGDebugInfo::getContextDescriptor(const Decl *Context,
// Check namespace.
if (const NamespaceDecl *NSDecl = dyn_cast<NamespaceDecl>(Context))
return llvm::DIDescriptor(getOrCreateNameSpace(NSDecl, CompileUnit));
-
+
+ if (const RecordDecl *RDecl = dyn_cast<RecordDecl>(Context)) {
+ if (!RDecl->isDependentType()) {
+ llvm::DIType Ty = getOrCreateType(CGM.getContext().getTypeDeclType(RDecl),
+ llvm::DIFile(CompileUnit));
+ return llvm::DIDescriptor(Ty);
+ }
+ }
return CompileUnit;
}
@@ -114,10 +121,29 @@ llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
llvm::DIFile F = DebugFactory.CreateFile(AbsFileName.getLast(),
AbsFileName.getDirname(), TheCU);
- DIFileCache[fname] = F.getNode();
+ DIFileCache[fname] = F;
return F;
}
+
+/// getLineNumber - Get line number for the location. If location is invalid
+/// then use current location.
+unsigned CGDebugInfo::getLineNumber(SourceLocation Loc) {
+ assert (CurLoc.isValid() && "Invalid current location!");
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.getLine();
+}
+
+/// getColumnNumber - Get column number for the location. If location is
+/// invalid then use current location.
+unsigned CGDebugInfo::getColumnNumber(SourceLocation Loc) {
+ assert (CurLoc.isValid() && "Invalid current location!");
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(Loc.isValid() ? Loc : CurLoc);
+ return PLoc.getColumn();
+}
+
/// CreateCompileUnit - Create new compile unit.
void CGDebugInfo::CreateCompileUnit() {
@@ -327,10 +353,11 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
EltTys.clear();
unsigned Flags = llvm::DIType::FlagAppleBlock;
+ unsigned LineNo = getLineNumber(CurLoc);
EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_descriptor",
- Unit, 0, FieldOffset, 0, 0, Flags,
- llvm::DIType(), Elements);
+ Unit, LineNo, FieldOffset, 0, 0,
+ Flags, llvm::DIType(), Elements);
// Bit size, align and offset of the type.
uint64_t Size = CGM.getContext().getTypeSize(Ty);
@@ -338,7 +365,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
DescTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type,
Unit, "", Unit,
- 0, Size, Align, 0, 0, EltTy);
+ LineNo, Size, Align, 0, 0, EltTy);
FieldOffset = 0;
FType = CGM.getContext().getPointerType(CGM.getContext().VoidTy);
@@ -355,7 +382,7 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
FieldAlign = CGM.getContext().getTypeAlign(Ty);
FieldTy = DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_member, Unit,
"__descriptor", Unit,
- 0, FieldSize, FieldAlign,
+ LineNo, FieldSize, FieldAlign,
FieldOffset, 0, FieldTy);
EltTys.push_back(FieldTy);
@@ -363,14 +390,14 @@ llvm::DIType CGDebugInfo::CreateType(const BlockPointerType *Ty,
Elements = DebugFactory.GetOrCreateArray(EltTys.data(), EltTys.size());
EltTy = DebugFactory.CreateCompositeType(Tag, Unit, "__block_literal_generic",
- Unit, 0, FieldOffset, 0, 0, Flags,
- llvm::DIType(), Elements);
+ Unit, LineNo, FieldOffset, 0, 0,
+ Flags, llvm::DIType(), Elements);
BlockLiteralGenericSet = true;
BlockLiteralGeneric
= DebugFactory.CreateDerivedType(llvm::dwarf::DW_TAG_pointer_type, Unit,
"", Unit,
- 0, Size, Align, 0, 0, EltTy);
+ LineNo, Size, Align, 0, 0, EltTy);
return BlockLiteralGeneric;
}
@@ -382,9 +409,7 @@ llvm::DIType CGDebugInfo::CreateType(const TypedefType *Ty,
// We don't set size information, but do specify where the typedef was
// declared.
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(Ty->getDecl()->getLocation());
- unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
+ unsigned Line = getLineNumber(Ty->getDecl()->getLocation());
llvm::DIDescriptor TyContext
= getContextDescriptor(dyn_cast<Decl>(Ty->getDecl()->getDeclContext()),
@@ -430,7 +455,6 @@ void CGDebugInfo::
CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
llvm::SmallVectorImpl<llvm::DIDescriptor> &EltTys) {
unsigned FieldNo = 0;
- SourceManager &SM = CGM.getContext().getSourceManager();
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end();
@@ -445,16 +469,8 @@ CollectRecordFields(const RecordDecl *RD, llvm::DIFile Unit,
continue;
// Get the location for the field.
- SourceLocation FieldDefLoc = Field->getLocation();
- PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
- llvm::DIFile FieldDefUnit;
- unsigned FieldLine = 0;
-
- if (!PLoc.isInvalid()) {
- FieldDefUnit = getOrCreateFile(FieldDefLoc);
- FieldLine = PLoc.getLine();
- }
-
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
QualType FType = Field->getType();
uint64_t FieldSize = 0;
unsigned FieldAlign = 0;
@@ -506,7 +522,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
// Add "this" pointer.
- llvm::DIArray Args = llvm::DICompositeType(FnTy.getNode()).getTypeArray();
+ llvm::DIArray Args = llvm::DICompositeType(FnTy).getTypeArray();
assert (Args.getNumElements() && "Invalid number of arguments!");
llvm::SmallVector<llvm::DIDescriptor, 16> Elts;
@@ -520,7 +536,7 @@ CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
Context.getPointerType(Context.getTagDeclType(Method->getParent()));
llvm::DIType ThisPtrType =
DebugFactory.CreateArtificialType(getOrCreateType(ThisPtr, Unit));
- TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType.getNode();
+ TypeCache[ThisPtr.getAsOpaquePtr()] = ThisPtrType;
Elts.push_back(ThisPtrType);
// Copy rest of the arguments.
@@ -555,18 +571,9 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
if (!IsCtorOrDtor)
CGM.getMangledName(MethodLinkageName, Method);
- SourceManager &SM = CGM.getContext().getSourceManager();
-
// Get the location for the method.
- SourceLocation MethodDefLoc = Method->getLocation();
- PresumedLoc PLoc = SM.getPresumedLoc(MethodDefLoc);
- llvm::DIFile MethodDefUnit;
- unsigned MethodLine = 0;
-
- if (!PLoc.isInvalid()) {
- MethodDefUnit = getOrCreateFile(MethodDefLoc);
- MethodLine = PLoc.getLine();
- }
+ llvm::DIFile MethodDefUnit = getOrCreateFile(Method->getLocation());
+ unsigned MethodLine = getLineNumber(Method->getLocation());
// Collect virtual method info.
llvm::DIType ContainingType;
@@ -597,7 +604,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// Don't cache ctors or dtors since we have to emit multiple functions for
// a single ctor or dtor.
if (!IsCtorOrDtor && Method->isThisDeclarationADefinition())
- SPCache[Method] = llvm::WeakVH(SP.getNode());
+ SPCache[Method] = llvm::WeakVH(SP);
return SP;
}
@@ -741,16 +748,9 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
Tag = llvm::dwarf::DW_TAG_class_type;
}
- SourceManager &SM = CGM.getContext().getSourceManager();
-
// Get overall information about the record type for the debug info.
- PresumedLoc PLoc = SM.getPresumedLoc(RD->getLocation());
- llvm::DIFile DefUnit;
- unsigned Line = 0;
- if (!PLoc.isInvalid()) {
- DefUnit = getOrCreateFile(RD->getLocation());
- Line = PLoc.getLine();
- }
+ llvm::DIFile DefUnit = getOrCreateFile(RD->getLocation());
+ unsigned Line = getLineNumber(RD->getLocation());
// Records and classes and unions can all be recursive. To handle them, we
// first generate a debug descriptor for the struct as a forward declaration.
@@ -774,13 +774,14 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
if (!RD->getDefinition())
return FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = FwdDecl.getNode();
+ llvm::MDNode *MN = FwdDecl;
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
// Otherwise, insert it into the TypeCache so that recursive uses will find
// it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
// Push the struct on region stack.
- RegionStack.push_back(FwdDecl.getNode());
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl.getNode());
+ RegionStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
@@ -799,9 +800,9 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
const ASTRecordLayout &RL = CGM.getContext().getASTRecordLayout(RD);
if (const CXXRecordDecl *PBase = RL.getPrimaryBase())
ContainingType =
- getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit).getNode();
+ getOrCreateType(QualType(PBase->getTypeForDecl(), 0), Unit);
else if (CXXDecl->isDynamicClass())
- ContainingType = FwdDecl.getNode();
+ ContainingType = FwdDecl;
}
llvm::DIArray Elements =
@@ -829,24 +830,26 @@ llvm::DIType CGDebugInfo::CreateType(const RecordType *Ty,
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
- RegionMap[RD] = llvm::WeakVH(RealDecl.getNode());
+ RegionMap[RD] = llvm::WeakVH(RealDecl);
return RealDecl;
}
+/// CreateType - get objective-c object type.
+llvm::DIType CGDebugInfo::CreateType(const ObjCObjectType *Ty,
+ llvm::DIFile Unit) {
+ // Ignore protocols.
+ return getOrCreateType(Ty->getBaseType(), Unit);
+}
+
/// CreateType - get objective-c interface type.
llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
llvm::DIFile Unit) {
ObjCInterfaceDecl *ID = Ty->getDecl();
-
unsigned Tag = llvm::dwarf::DW_TAG_structure_type;
- SourceManager &SM = CGM.getContext().getSourceManager();
// Get overall information about the record type for the debug info.
llvm::DIFile DefUnit = getOrCreateFile(ID->getLocation());
- PresumedLoc PLoc = SM.getPresumedLoc(ID->getLocation());
- unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
-
+ unsigned Line = getLineNumber(ID->getLocation());
unsigned RuntimeLang = TheCU.getLanguage();
// To handle recursive interface, we
@@ -865,13 +868,14 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
if (ID->isForwardDecl())
return FwdDecl;
- llvm::TrackingVH<llvm::MDNode> FwdDeclNode = FwdDecl.getNode();
+ llvm::MDNode *MN = FwdDecl;
+ llvm::TrackingVH<llvm::MDNode> FwdDeclNode = MN;
// Otherwise, insert it into the TypeCache so that recursive uses will find
// it.
- TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl.getNode();
+ TypeCache[QualType(Ty, 0).getAsOpaquePtr()] = FwdDecl;
// Push the struct on region stack.
- RegionStack.push_back(FwdDecl.getNode());
- RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl.getNode());
+ RegionStack.push_back(FwdDeclNode);
+ RegionMap[Ty->getDecl()] = llvm::WeakVH(FwdDecl);
// Convert all the elements.
llvm::SmallVector<llvm::DIDescriptor, 16> EltTys;
@@ -902,12 +906,8 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
continue;
// Get the location for the field.
- SourceLocation FieldDefLoc = Field->getLocation();
- llvm::DIFile FieldDefUnit = getOrCreateFile(FieldDefLoc);
- PresumedLoc PLoc = SM.getPresumedLoc(FieldDefLoc);
- unsigned FieldLine = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
-
+ llvm::DIFile FieldDefUnit = getOrCreateFile(Field->getLocation());
+ unsigned FieldLine = getLineNumber(Field->getLocation());
QualType FType = Field->getType();
uint64_t FieldSize = 0;
unsigned FieldAlign = 0;
@@ -962,7 +962,7 @@ llvm::DIType CGDebugInfo::CreateType(const ObjCInterfaceType *Ty,
// Now that we have a real decl for the struct, replace anything using the
// old decl with the new one. This will recursively update the debug info.
llvm::DIDerivedType(FwdDeclNode).replaceAllUsesWith(RealDecl);
- RegionMap[ID] = llvm::WeakVH(RealDecl.getNode());
+ RegionMap[ID] = llvm::WeakVH(RealDecl);
return RealDecl;
}
@@ -985,12 +985,8 @@ llvm::DIType CGDebugInfo::CreateType(const EnumType *Ty,
llvm::DIArray EltArray =
DebugFactory.GetOrCreateArray(Enumerators.data(), Enumerators.size());
- SourceLocation DefLoc = ED->getLocation();
- llvm::DIFile DefUnit = getOrCreateFile(DefLoc);
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(DefLoc);
- unsigned Line = PLoc.isInvalid() ? 0 : PLoc.getLine();
-
+ llvm::DIFile DefUnit = getOrCreateFile(ED->getLocation());
+ unsigned Line = getLineNumber(ED->getLocation());
// Size and align of the type.
uint64_t Size = 0;
@@ -1152,15 +1148,12 @@ static QualType UnwrapTypeForDebugInfo(QualType T) {
case Type::Decltype:
T = cast<DecltypeType>(T)->getUnderlyingType();
break;
- case Type::QualifiedName:
- T = cast<QualifiedNameType>(T)->getNamedType();
+ case Type::Elaborated:
+ T = cast<ElaboratedType>(T)->getNamedType();
break;
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
- case Type::Elaborated:
- T = cast<ElaboratedType>(T)->getUnderlyingType();
- break;
}
assert(T != LastT && "Type unwrapping failed to unwrap!");
@@ -1194,7 +1187,7 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
llvm::DIType Res = CreateTypeNode(Ty, Unit);
// And update the type cache.
- TypeCache[Ty.getAsOpaquePtr()] = Res.getNode();
+ TypeCache[Ty.getAsOpaquePtr()] = Res;
return Res;
}
@@ -1224,6 +1217,8 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
return CreateType(cast<VectorType>(Ty), Unit);
case Type::ObjCObjectPointer:
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
+ case Type::ObjCObject:
+ return CreateType(cast<ObjCObjectType>(Ty), Unit);
case Type::ObjCInterface:
return CreateType(cast<ObjCInterfaceType>(Ty), Unit);
case Type::Builtin: return CreateType(cast<BuiltinType>(Ty), Unit);
@@ -1251,7 +1246,6 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
case Type::TemplateSpecialization:
case Type::Elaborated:
- case Type::QualifiedName:
case Type::SubstTemplateTypeParm:
case Type::TypeOfExpr:
case Type::TypeOf:
@@ -1304,9 +1298,10 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
FI = SPCache.find(FD);
if (FI != SPCache.end()) {
llvm::DIDescriptor SP(dyn_cast_or_null<llvm::MDNode>(FI->second));
- if (SP.isSubprogram() && llvm::DISubprogram(SP.getNode()).isDefinition()) {
- RegionStack.push_back(SP.getNode());
- RegionMap[D] = llvm::WeakVH(SP.getNode());
+ if (SP.isSubprogram() && llvm::DISubprogram(SP).isDefinition()) {
+ llvm::MDNode *SPN = SP;
+ RegionStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
return;
}
}
@@ -1325,8 +1320,7 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
// Usually, CurLoc points to the left bracket location of compound
// statement representing function body.
llvm::DIFile Unit = getOrCreateFile(CurLoc);
- SourceManager &SM = CGM.getContext().getSourceManager();
- unsigned LineNo = SM.getPresumedLoc(CurLoc).getLine();
+ unsigned LineNo = getLineNumber(CurLoc);
llvm::DISubprogram SP =
DebugFactory.CreateSubprogram(Unit, Name, Name, LinkageName, Unit, LineNo,
@@ -1334,8 +1328,9 @@ void CGDebugInfo::EmitFunctionStart(GlobalDecl GD, QualType FnType,
Fn->hasInternalLinkage(), true/*definition*/);
// Push function on region stack.
- RegionStack.push_back(SP.getNode());
- RegionMap[D] = llvm::WeakVH(SP.getNode());
+ llvm::MDNode *SPN = SP;
+ RegionStack.push_back(SPN);
+ RegionMap[D] = llvm::WeakVH(SP);
}
@@ -1355,27 +1350,23 @@ void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
// Update last state.
PrevLoc = CurLoc;
- // Get the appropriate compile unit.
- llvm::DIFile Unit = getOrCreateFile(CurLoc);
- PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
-
llvm::MDNode *Scope = RegionStack.back();
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(PLoc.getLine(),
- PLoc.getColumn(),
+ Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(getLineNumber(CurLoc),
+ getColumnNumber(CurLoc),
Scope));
}
/// EmitRegionStart- Constructs the debug code for entering a declarative
/// region - "llvm.dbg.region.start.".
void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
llvm::DIDescriptor D =
DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
llvm::DIDescriptor() :
llvm::DIDescriptor(RegionStack.back()),
- PLoc.getLine(), PLoc.getColumn());
- RegionStack.push_back(D.getNode());
+ getLineNumber(CurLoc),
+ getColumnNumber(CurLoc));
+ llvm::MDNode *DN = D;
+ RegionStack.push_back(DN);
}
/// EmitRegionEnd - Constructs the debug code for exiting a declarative
@@ -1473,20 +1464,14 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
else
Ty = getOrCreateType(VD->getType(), Unit);
+ // If there is not any debug info for type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return;
+
// Get location information.
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(VD->getLocation());
- unsigned Line = 0;
- unsigned Column = 0;
- if (PLoc.isInvalid())
- PLoc = SM.getPresumedLoc(CurLoc);
- if (PLoc.isValid()) {
- Line = PLoc.getLine();
- Column = PLoc.getColumn();
- Unit = getOrCreateFile(CurLoc);
- } else {
- Unit = llvm::DIFile();
- }
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
// Create the descriptor for the variable.
llvm::DIVariable D =
@@ -1520,13 +1505,8 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
Ty = getOrCreateType(VD->getType(), Unit);
// Get location information.
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(VD->getLocation());
- unsigned Line = 0;
- if (!PLoc.isInvalid())
- Line = PLoc.getLine();
- else
- Unit = llvm::DIFile();
+ unsigned Line = getLineNumber(VD->getLocation());
+ unsigned Column = getColumnNumber(VD->getLocation());
CharUnits offset = CGF->BlockDecls[VD];
llvm::SmallVector<llvm::Value *, 9> addr;
@@ -1558,7 +1538,7 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
llvm::MDNode *Scope = RegionStack.back();
- Call->setDebugLoc(llvm::DebugLoc::get(Line, PLoc.getColumn(), Scope));
+ Call->setDebugLoc(llvm::DebugLoc::get(Line, Column, Scope));
}
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
@@ -1588,9 +1568,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(D->getLocation());
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(D->getLocation());
- unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+ unsigned LineNo = getLineNumber(D->getLocation());
QualType T = D->getType();
if (T->isIncompleteArrayType()) {
@@ -1605,11 +1583,13 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ArrayType::Normal, 0);
}
llvm::StringRef DeclName = D->getName();
+ llvm::StringRef LinkageName;
+ if (D->getDeclContext() && !isa<FunctionDecl>(D->getDeclContext()))
+ LinkageName = Var->getName();
llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
- DebugFactory.CreateGlobalVariable(DContext, DeclName,
- DeclName, llvm::StringRef(), Unit, LineNo,
- getOrCreateType(T, Unit),
+ DebugFactory.CreateGlobalVariable(DContext, DeclName, DeclName, LinkageName,
+ Unit, LineNo, getOrCreateType(T, Unit),
Var->hasInternalLinkage(),
true/*definition*/, Var);
}
@@ -1619,9 +1599,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
ObjCInterfaceDecl *ID) {
// Create global variable debug descriptor.
llvm::DIFile Unit = getOrCreateFile(ID->getLocation());
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(ID->getLocation());
- unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+ unsigned LineNo = getLineNumber(ID->getLocation());
llvm::StringRef Name = ID->getName();
@@ -1654,15 +1632,13 @@ CGDebugInfo::getOrCreateNameSpace(const NamespaceDecl *NSDecl,
if (I != NameSpaceCache.end())
return llvm::DINameSpace(cast<llvm::MDNode>(I->second));
- SourceManager &SM = CGM.getContext().getSourceManager();
- PresumedLoc PLoc = SM.getPresumedLoc(NSDecl->getLocation());
- unsigned LineNo = PLoc.isInvalid() ? 0 : PLoc.getLine();
+ unsigned LineNo = getLineNumber(NSDecl->getLocation());
llvm::DIDescriptor Context =
getContextDescriptor(dyn_cast<Decl>(NSDecl->getDeclContext()), Unit);
llvm::DINameSpace NS =
DebugFactory.CreateNameSpace(Context, NSDecl->getName(),
- llvm::DIFile(Unit.getNode()), LineNo);
- NameSpaceCache[NSDecl] = llvm::WeakVH(NS.getNode());
+ llvm::DIFile(Unit), LineNo);
+ NameSpaceCache[NSDecl] = llvm::WeakVH(NS);
return NS;
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index c16379aca34c..620a5f2f8480 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -80,6 +80,7 @@ class CGDebugInfo {
llvm::DIType CreateType(const TagType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const RecordType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DIFile F);
+ llvm::DIType CreateType(const ObjCObjectType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const EnumType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const VectorType *Ty, llvm::DIFile F);
llvm::DIType CreateType(const ArrayType *Ty, llvm::DIFile F);
@@ -208,6 +209,13 @@ private:
/// getVTableName - Get vtable name for the given Class.
llvm::StringRef getVTableName(const CXXRecordDecl *Decl);
+ /// getLineNumber - Get line number for the location. If location is invalid
+ /// then use current location.
+ unsigned getLineNumber(SourceLocation Loc);
+
+ /// getColumnNumber - Get column number for the location. If location is
+ /// invalid then use current location.
+ unsigned getColumnNumber(SourceLocation Loc);
};
} // namespace CodeGen
} // namespace clang
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 48198ff5761b..07edca0f5fef 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -114,14 +114,14 @@ void CodeGenFunction::EmitBlockVarDecl(const VarDecl &D) {
llvm::GlobalValue::LinkageTypes Linkage =
llvm::GlobalValue::InternalLinkage;
- // If this is a static declaration inside an inline function, it must have
- // weak linkage so that the linker will merge multiple definitions of it.
- if (getContext().getLangOptions().CPlusPlus) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(CurFuncDecl)) {
- if (FD->isInlined())
- Linkage = llvm::GlobalValue::WeakAnyLinkage;
- }
- }
+ // If the function definition has some sort of weak linkage, its
+ // static variables should also be weak so that they get properly
+ // uniqued. We can't do this in C, though, because there's no
+ // standard way to agree on which variables are the same (i.e.
+ // there's no mangling).
+ if (getContext().getLangOptions().CPlusPlus)
+ if (llvm::GlobalValue::isWeakForLinker(CurFn->getLinkage()))
+ Linkage = CurFn->getLinkage();
return EmitStaticBlockVarDecl(D, Linkage);
}
@@ -239,8 +239,6 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
// Store into LocalDeclMap before generating initializer to handle
// circular references.
DMEntry = GV;
- if (getContext().getLangOptions().CPlusPlus)
- CGM.setStaticLocalDeclAddress(&D, GV);
// We can't have a VLA here, but we can have a pointer to a VLA,
// even though that doesn't really make any sense.
@@ -269,6 +267,9 @@ void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D,
if (D.hasAttr<UsedAttr>())
CGM.AddUsedGlobal(GV);
+ if (getContext().getLangOptions().CPlusPlus)
+ CGM.setStaticLocalDeclAddress(&D, GV);
+
// We may have to cast the constant because of the initializer
// mismatch above.
//
@@ -398,16 +399,20 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
CharUnits Align = CharUnits::Zero();
bool IsSimpleConstantInitializer = false;
+ bool NRVO = false;
+ llvm::Value *NRVOFlag = 0;
llvm::Value *DeclPtr;
if (Ty->isConstantSizeType()) {
if (!Target.useGlobalsForAutomaticVariables()) {
-
+ NRVO = getContext().getLangOptions().ElideConstructors &&
+ D.isNRVOVariable();
// If this value is an array or struct, is POD, and if the initializer is
- // a staticly determinable constant, try to optimize it.
+ // a staticly determinable constant, try to optimize it (unless the NRVO
+ // is already optimizing this).
if (D.getInit() && !isByRef &&
(Ty->isArrayType() || Ty->isRecordType()) &&
Ty->isPODType() &&
- D.getInit()->isConstantInitializer(getContext())) {
+ D.getInit()->isConstantInitializer(getContext()) && !NRVO) {
// If this variable is marked 'const', emit the value as a global.
if (CGM.getCodeGenOpts().MergeAllConstants &&
Ty.isConstant(getContext())) {
@@ -418,19 +423,44 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
IsSimpleConstantInitializer = true;
}
- // A normal fixed sized variable becomes an alloca in the entry block.
+ // A normal fixed sized variable becomes an alloca in the entry block,
+ // unless it's an NRVO variable.
const llvm::Type *LTy = ConvertTypeForMem(Ty);
- if (isByRef)
- LTy = BuildByRefType(&D);
- llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
- Alloc->setName(D.getNameAsString());
-
- Align = getContext().getDeclAlign(&D);
- if (isByRef)
- Align = std::max(Align,
- CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
- Alloc->setAlignment(Align.getQuantity());
- DeclPtr = Alloc;
+
+ if (NRVO) {
+ // The named return value optimization: allocate this variable in the
+ // return slot, so that we can elide the copy when returning this
+ // variable (C++0x [class.copy]p34).
+ DeclPtr = ReturnValue;
+
+ if (const RecordType *RecordTy = Ty->getAs<RecordType>()) {
+ if (!cast<CXXRecordDecl>(RecordTy->getDecl())->hasTrivialDestructor()) {
+ // Create a flag that is used to indicate when the NRVO was applied
+ // to this variable. Set it to zero to indicate that NRVO was not
+ // applied.
+ const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
+ llvm::Value *Zero = llvm::ConstantInt::get(BoolTy, 0);
+ NRVOFlag = CreateTempAlloca(BoolTy, "nrvo");
+ Builder.CreateStore(Zero, NRVOFlag);
+
+ // Record the NRVO flag for this variable.
+ NRVOFlags[&D] = NRVOFlag;
+ }
+ }
+ } else {
+ if (isByRef)
+ LTy = BuildByRefType(&D);
+
+ llvm::AllocaInst *Alloc = CreateTempAlloca(LTy);
+ Alloc->setName(D.getNameAsString());
+
+ Align = getContext().getDeclAlign(&D);
+ if (isByRef)
+ Align = std::max(Align,
+ CharUnits::fromQuantity(Target.getPointerAlign(0) / 8));
+ Alloc->setAlignment(Align.getQuantity());
+ DeclPtr = Alloc;
+ }
} else {
// Targets that don't support recursion emit locals as globals.
const char *Class =
@@ -645,13 +675,15 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
while (const ArrayType *Array = getContext().getAsArrayType(DtorTy))
DtorTy = getContext().getBaseElementType(Array);
if (const RecordType *RT = DtorTy->getAs<RecordType>())
- if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- llvm::Value *Loc = DeclPtr;
- if (isByRef)
- Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
- D.getNameAsString());
-
+ if (CXXRecordDecl *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
if (!ClassDecl->hasTrivialDestructor()) {
+ // Note: We suppress the destructor call when the corresponding NRVO
+ // flag has been set.
+ llvm::Value *Loc = DeclPtr;
+ if (isByRef)
+ Loc = Builder.CreateStructGEP(DeclPtr, getByRefValueLLVMField(&D),
+ D.getNameAsString());
+
const CXXDestructorDecl *D = ClassDecl->getDestructor(getContext());
assert(D && "EmitLocalBlockVarDecl - destructor is nul");
@@ -680,13 +712,27 @@ void CodeGenFunction::EmitLocalBlockVarDecl(const VarDecl &D) {
}
} else {
{
+ // Normal destruction.
DelayedCleanupBlock Scope(*this);
+
+ if (NRVO) {
+ // If we exited via NRVO, we skip the destructor call.
+ llvm::BasicBlock *NoNRVO = createBasicBlock("nrvo.unused");
+ Builder.CreateCondBr(Builder.CreateLoad(NRVOFlag, "nrvo.val"),
+ Scope.getCleanupExitBlock(),
+ NoNRVO);
+ EmitBlock(NoNRVO);
+ }
+
+ // We don't call the destructor along the normal edge if we're
+ // applying the NRVO.
EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
Loc);
// Make sure to jump to the exit block.
EmitBranch(Scope.getCleanupExitBlock());
}
+
if (Exceptions) {
EHCleanupBlock Cleanup(*this);
EmitCXXDestructorCall(D, Dtor_Complete, /*ForVirtualBase=*/false,
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index 6afa53868ecc..f94ddd988662 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -13,6 +13,8 @@
#include "CodeGenFunction.h"
#include "clang/CodeGen/CodeGenOptions.h"
+#include "llvm/Intrinsics.h"
+
using namespace clang;
using namespace CodeGen;
@@ -22,7 +24,6 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
assert(!D.getType()->isReferenceType() &&
"Should not call EmitDeclInit on a reference!");
- CodeGenModule &CGM = CGF.CGM;
ASTContext &Context = CGF.getContext();
const Expr *Init = D.getInit();
@@ -36,41 +37,52 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
CGF.EmitComplexExprIntoAddr(Init, DeclPtr, isVolatile);
} else {
CGF.EmitAggExpr(Init, DeclPtr, isVolatile);
-
- // Avoid generating destructor(s) for initialized objects.
- if (!isa<CXXConstructExpr>(Init))
- return;
-
- const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
- if (Array)
- T = Context.getBaseElementType(Array);
-
- const RecordType *RT = T->getAs<RecordType>();
- if (!RT)
- return;
-
- CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- if (RD->hasTrivialDestructor())
- return;
-
- CXXDestructorDecl *Dtor = RD->getDestructor(Context);
-
- llvm::Constant *DtorFn;
- if (Array) {
- DtorFn =
- CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor,
- Array,
- DeclPtr);
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
- } else
- DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
-
- CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
}
}
+static void EmitDeclDestroy(CodeGenFunction &CGF, const VarDecl &D,
+ llvm::Constant *DeclPtr) {
+ CodeGenModule &CGM = CGF.CGM;
+ ASTContext &Context = CGF.getContext();
+
+ const Expr *Init = D.getInit();
+ QualType T = D.getType();
+ if (!CGF.hasAggregateLLVMType(T) || T->isAnyComplexType())
+ return;
+
+ // Avoid generating destructor(s) for initialized objects.
+ if (!isa<CXXConstructExpr>(Init))
+ return;
+
+ const ConstantArrayType *Array = Context.getAsConstantArrayType(T);
+ if (Array)
+ T = Context.getBaseElementType(Array);
+
+ const RecordType *RT = T->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ if (RD->hasTrivialDestructor())
+ return;
+
+ CXXDestructorDecl *Dtor = RD->getDestructor(Context);
+
+ llvm::Constant *DtorFn;
+ if (Array) {
+ DtorFn =
+ CodeGenFunction(CGM).GenerateCXXAggrDestructorHelper(Dtor,
+ Array,
+ DeclPtr);
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ DeclPtr = llvm::Constant::getNullValue(Int8PtrTy);
+ } else
+ DtorFn = CGM.GetAddrOfCXXDestructor(Dtor, Dtor_Complete);
+
+ CGF.EmitCXXGlobalDtorRegistration(DtorFn, DeclPtr);
+}
+
void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
llvm::Constant *DeclPtr) {
@@ -79,6 +91,7 @@ void CodeGenFunction::EmitCXXGlobalVarDeclInit(const VarDecl &D,
if (!T->isReferenceType()) {
EmitDeclInit(*this, D, DeclPtr);
+ EmitDeclDestroy(*this, D, DeclPtr);
return;
}
if (Init->isLvalue(getContext()) == Expr::LV_Valid) {
@@ -310,7 +323,10 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
EmitBlock(InitCheckBlock);
- if (ThreadsafeStatics) {
+ // Variables used when coping with thread-safe statics and exceptions.
+ llvm::BasicBlock *SavedLandingPad = 0;
+ llvm::BasicBlock *LandingPad = 0;
+ if (ThreadsafeStatics) {
// Call __cxa_guard_acquire.
V = Builder.CreateCall(getGuardAcquireFn(*this), GuardVariable);
@@ -319,14 +335,13 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
Builder.CreateCondBr(Builder.CreateIsNotNull(V, "tobool"),
InitBlock, EndBlock);
- EmitBlock(InitBlock);
-
if (Exceptions) {
- EHCleanupBlock Cleanup(*this);
-
- // Call __cxa_guard_abort.
- Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
+ SavedLandingPad = getInvokeDest();
+ LandingPad = createBasicBlock("guard.lpad");
+ setInvokeDest(LandingPad);
}
+
+ EmitBlock(InitBlock);
}
if (D.getType()->isReferenceType()) {
@@ -342,12 +357,68 @@ CodeGenFunction::EmitStaticCXXBlockVarDeclInit(const VarDecl &D,
if (ThreadsafeStatics) {
// Call __cxa_guard_release.
- Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
+ Builder.CreateCall(getGuardReleaseFn(*this), GuardVariable);
} else {
llvm::Value *One =
llvm::ConstantInt::get(llvm::Type::getInt8Ty(VMContext), 1);
Builder.CreateStore(One, Builder.CreateBitCast(GuardVariable, PtrTy));
}
+ // Register the call to the destructor.
+ if (!D.getType()->isReferenceType())
+ EmitDeclDestroy(*this, D, GV);
+
+ if (ThreadsafeStatics && Exceptions) {
+ // If an exception is thrown during initialization, call __cxa_guard_abort
+ // along the exceptional edge.
+ EmitBranch(EndBlock);
+
+ // Construct the landing pad.
+ EmitBlock(LandingPad);
+
+ // Personality function and LLVM intrinsics.
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
+ (VMContext),
+ true),
+ "__gxx_personality_v0");
+ Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+ llvm::Value *llvm_eh_exception =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
+ llvm::Value *llvm_eh_selector =
+ CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
+
+ // Exception object
+ llvm::Value *Exc = Builder.CreateCall(llvm_eh_exception, "exc");
+ llvm::Value *RethrowPtr = CreateTempAlloca(Exc->getType(), "_rethrow");
+
+ // Call the selector function.
+ const llvm::PointerType *PtrToInt8Ty
+ = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(VMContext));
+ llvm::Constant *Null = llvm::ConstantPointerNull::get(PtrToInt8Ty);
+ llvm::Value* SelectorArgs[3] = { Exc, Personality, Null };
+ Builder.CreateCall(llvm_eh_selector, SelectorArgs, SelectorArgs + 3,
+ "selector");
+ Builder.CreateStore(Exc, RethrowPtr);
+
+ // Call __cxa_guard_abort along the exceptional edge.
+ Builder.CreateCall(getGuardAbortFn(*this), GuardVariable);
+
+ setInvokeDest(SavedLandingPad);
+
+ // Rethrow the current exception.
+ if (getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
+ getInvokeDest(),
+ Builder.CreateLoad(RethrowPtr));
+ EmitBlock(Cont);
+ } else
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
+ Builder.CreateLoad(RethrowPtr));
+
+ Builder.CreateUnreachable();
+ }
+
EmitBlock(EndBlock);
}
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index c1d05bf233b2..ddc1c7743344 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -100,17 +100,17 @@ static llvm::Constant *getUnexpectedFn(CodeGenFunction &CGF) {
return CGF.CGM.CreateRuntimeFunction(FTy, "__cxa_call_unexpected");
}
-static llvm::Constant *getUnwindResumeOrRethrowFn(CodeGenFunction &CGF) {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+llvm::Constant *CodeGenFunction::getUnwindResumeOrRethrowFn() {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(getLLVMContext());
std::vector<const llvm::Type*> Args(1, Int8PtrTy);
const llvm::FunctionType *FTy =
- llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), Args,
+ llvm::FunctionType::get(llvm::Type::getVoidTy(getLLVMContext()), Args,
false);
- if (CGF.CGM.getLangOptions().SjLjExceptions)
- return CGF.CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
- return CGF.CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
+ if (CGM.getLangOptions().SjLjExceptions)
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_SjLj_Resume");
+ return CGM.CreateRuntimeFunction(FTy, "_Unwind_Resume_or_Rethrow");
}
static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
@@ -119,7 +119,29 @@ static llvm::Constant *getTerminateFn(CodeGenFunction &CGF) {
const llvm::FunctionType *FTy =
llvm::FunctionType::get(llvm::Type::getVoidTy(CGF.getLLVMContext()), false);
- return CGF.CGM.CreateRuntimeFunction(FTy, "_ZSt9terminatev");
+ return CGF.CGM.CreateRuntimeFunction(FTy,
+ CGF.CGM.getLangOptions().CPlusPlus ? "_ZSt9terminatev" : "abort");
+}
+
+static llvm::Constant *getPersonalityFn(CodeGenModule &CGM) {
+ const char *PersonalityFnName = "__gcc_personality_v0";
+ LangOptions Opts = CGM.getLangOptions();
+ if (Opts.CPlusPlus)
+ PersonalityFnName = "__gxx_personality_v0";
+ else if (Opts.ObjC1) {
+ if (Opts.NeXTRuntime) {
+ if (Opts.ObjCNonFragileABI)
+ PersonalityFnName = "__gcc_personality_v0";
+ } else
+ PersonalityFnName = "__gnu_objc_personality_v0";
+ }
+
+ llvm::Constant *Personality =
+ CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty(
+ CGM.getLLVMContext()),
+ true),
+ PersonalityFnName);
+ return llvm::ConstantExpr::getBitCast(Personality, CGM.PtrToInt8Ty);
}
// Emits an exception expression into the given location. This
@@ -324,12 +346,7 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
if (!Proto->hasExceptionSpec())
return;
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+ llvm::Constant *Personality = getPersonalityFn(CGM);
llvm::Value *llvm_eh_exception =
CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
llvm::Value *llvm_eh_selector =
@@ -397,7 +414,7 @@ void CodeGenFunction::EmitStartEHSpec(const Decl *D) {
if (Proto->getNumExceptions()) {
EmitBlock(Unwind);
- Builder.CreateCall(getUnwindResumeOrRethrowFn(*this),
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
Builder.CreateLoad(RethrowPtr));
Builder.CreateUnreachable();
}
@@ -444,12 +461,7 @@ CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) {
void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
CXXTryStmtInfo TryInfo) {
// Pointer to the personality function
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+ llvm::Constant *Personality = getPersonalityFn(CGM);
llvm::Value *llvm_eh_exception =
CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
llvm::Value *llvm_eh_selector =
@@ -631,12 +643,12 @@ void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
// here.
if (getInvokeDest()) {
llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
- Builder.CreateInvoke(getUnwindResumeOrRethrowFn(*this), Cont,
+ Builder.CreateInvoke(getUnwindResumeOrRethrowFn(), Cont,
getInvokeDest(),
Builder.CreateLoad(RethrowPtr));
EmitBlock(Cont);
} else
- Builder.CreateCall(getUnwindResumeOrRethrowFn(*this),
+ Builder.CreateCall(getUnwindResumeOrRethrowFn(),
Builder.CreateLoad(RethrowPtr));
Builder.CreateUnreachable();
@@ -654,12 +666,7 @@ CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
// The libstdc++ personality function.
// TODO: generalize to work with other libraries.
- llvm::Constant *Personality =
- CGF.CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (CGF.VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, CGF.PtrToInt8Ty);
+ llvm::Constant *Personality = getPersonalityFn(CGF.CGM);
// %exception = call i8* @llvm.eh.exception()
// Magic intrinsic which tells gives us a handle to the caught
@@ -687,11 +694,11 @@ CodeGenFunction::EHCleanupBlock::~EHCleanupBlock() {
// Rethrow the exception.
if (CGF.getInvokeDest()) {
llvm::BasicBlock *Cont = CGF.createBasicBlock("invoke.cont");
- CGF.Builder.CreateInvoke(getUnwindResumeOrRethrowFn(CGF), Cont,
+ CGF.Builder.CreateInvoke(CGF.getUnwindResumeOrRethrowFn(), Cont,
CGF.getInvokeDest(), Exc);
CGF.EmitBlock(Cont);
} else
- CGF.Builder.CreateCall(getUnwindResumeOrRethrowFn(CGF), Exc);
+ CGF.Builder.CreateCall(CGF.getUnwindResumeOrRethrowFn(), Exc);
CGF.Builder.CreateUnreachable();
// Resume inserting where we started, but put the new cleanup
@@ -715,12 +722,7 @@ llvm::BasicBlock *CodeGenFunction::getTerminateHandler() {
llvm::BasicBlock::iterator SavedInsertPoint = Builder.GetInsertPoint();
Builder.ClearInsertionPoint();
- llvm::Constant *Personality =
- CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
- (VMContext),
- true),
- "__gxx_personality_v0");
- Personality = llvm::ConstantExpr::getBitCast(Personality, PtrToInt8Ty);
+ llvm::Constant *Personality = getPersonalityFn(CGM);
llvm::Value *llvm_eh_exception =
CGM.getIntrinsic(llvm::Intrinsic::eh_exception);
llvm::Value *llvm_eh_selector =
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 9ade916f42b7..d67618bfca35 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -135,6 +135,39 @@ void CodeGenFunction::EmitAnyExprToMem(const Expr *E,
}
}
+/// \brief An adjustment to be made to the temporary created when emitting a
+/// reference binding, which accesses a particular subobject of that temporary.
+struct SubobjectAdjustment {
+ enum { DerivedToBaseAdjustment, FieldAdjustment } Kind;
+
+ union {
+ struct {
+ const CXXBaseSpecifierArray *BasePath;
+ const CXXRecordDecl *DerivedClass;
+ } DerivedToBase;
+
+ struct {
+ FieldDecl *Field;
+ unsigned CVRQualifiers;
+ } Field;
+ };
+
+ SubobjectAdjustment(const CXXBaseSpecifierArray *BasePath,
+ const CXXRecordDecl *DerivedClass)
+ : Kind(DerivedToBaseAdjustment)
+ {
+ DerivedToBase.BasePath = BasePath;
+ DerivedToBase.DerivedClass = DerivedClass;
+ }
+
+ SubobjectAdjustment(FieldDecl *Field, unsigned CVRQualifiers)
+ : Kind(FieldAdjustment)
+ {
+ this->Field.Field = Field;
+ this->Field.CVRQualifiers = CVRQualifiers;
+ }
+};
+
RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
bool IsInitializer) {
bool ShouldDestroyTemporaries = false;
@@ -174,19 +207,46 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
PopCXXTemporary();
}
} else {
- const CXXBaseSpecifierArray *BasePath = 0;
- const CXXRecordDecl *DerivedClassDecl = 0;
+ QualType ResultTy = E->getType();
- if (const CastExpr *CE =
- dyn_cast<CastExpr>(E->IgnoreParenNoopCasts(getContext()))) {
- if (CE->getCastKind() == CastExpr::CK_DerivedToBase) {
- E = CE->getSubExpr();
-
- BasePath = &CE->getBasePath();
- DerivedClassDecl =
- cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ llvm::SmallVector<SubobjectAdjustment, 2> Adjustments;
+ do {
+ if (const ParenExpr *PE = dyn_cast<ParenExpr>(E)) {
+ E = PE->getSubExpr();
+ continue;
+ }
+
+ if (const CastExpr *CE = dyn_cast<CastExpr>(E)) {
+ if ((CE->getCastKind() == CastExpr::CK_DerivedToBase ||
+ CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase) &&
+ E->getType()->isRecordType()) {
+ E = CE->getSubExpr();
+ CXXRecordDecl *Derived
+ = cast<CXXRecordDecl>(E->getType()->getAs<RecordType>()->getDecl());
+ Adjustments.push_back(SubobjectAdjustment(&CE->getBasePath(),
+ Derived));
+ continue;
+ }
+
+ if (CE->getCastKind() == CastExpr::CK_NoOp) {
+ E = CE->getSubExpr();
+ continue;
+ }
+ } else if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
+ if (ME->getBase()->isLvalue(getContext()) != Expr::LV_Valid &&
+ ME->getBase()->getType()->isRecordType()) {
+ if (FieldDecl *Field = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ E = ME->getBase();
+ Adjustments.push_back(SubobjectAdjustment(Field,
+ E->getType().getCVRQualifiers()));
+ continue;
+ }
+ }
}
- }
+
+ // Nothing changed.
+ break;
+ } while (true);
Val = EmitAnyExprToTemp(E, /*IsAggLocVolatile=*/false,
IsInitializer);
@@ -225,13 +285,47 @@ RValue CodeGenFunction::EmitReferenceBindingToExpr(const Expr* E,
}
}
- // Check if need to perform the derived-to-base cast.
- if (BasePath) {
- llvm::Value *Derived = Val.getAggregateAddr();
- llvm::Value *Base =
- GetAddressOfBaseClass(Derived, DerivedClassDecl, *BasePath,
- /*NullCheckValue=*/false);
- return RValue::get(Base);
+ // Check if need to perform derived-to-base casts and/or field accesses, to
+ // get from the temporary object we created (and, potentially, for which we
+ // extended the lifetime) to the subobject we're binding the reference to.
+ if (!Adjustments.empty()) {
+ llvm::Value *Object = Val.getAggregateAddr();
+ for (unsigned I = Adjustments.size(); I != 0; --I) {
+ SubobjectAdjustment &Adjustment = Adjustments[I-1];
+ switch (Adjustment.Kind) {
+ case SubobjectAdjustment::DerivedToBaseAdjustment:
+ Object = GetAddressOfBaseClass(Object,
+ Adjustment.DerivedToBase.DerivedClass,
+ *Adjustment.DerivedToBase.BasePath,
+ /*NullCheckValue=*/false);
+ break;
+
+ case SubobjectAdjustment::FieldAdjustment: {
+ unsigned CVR = Adjustment.Field.CVRQualifiers;
+ LValue LV = EmitLValueForField(Object, Adjustment.Field.Field, CVR);
+ if (LV.isSimple()) {
+ Object = LV.getAddress();
+ break;
+ }
+
+ // For non-simple lvalues, we actually have to create a copy of
+ // the object we're binding to.
+ QualType T = Adjustment.Field.Field->getType().getNonReferenceType()
+ .getUnqualifiedType();
+ Object = CreateTempAlloca(ConvertType(T), "lv");
+ EmitStoreThroughLValue(EmitLoadOfLValue(LV, T),
+ LValue::MakeAddr(Object,
+ Qualifiers::fromCVRMask(CVR)),
+ T);
+ break;
+ }
+ }
+ }
+
+ const llvm::Type *ResultPtrTy
+ = llvm::PointerType::get(ConvertType(ResultTy), 0);
+ Object = Builder.CreateBitCast(Object, ResultPtrTy, "temp");
+ return RValue::get(Object);
}
}
@@ -309,8 +403,7 @@ EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), AmountVal);
if (!isa<llvm::FunctionType>(PT->getElementType())) {
QualType PTEE = ValTy->getPointeeType();
- if (const ObjCInterfaceType *OIT =
- dyn_cast<ObjCInterfaceType>(PTEE)) {
+ if (const ObjCObjectType *OIT = PTEE->getAs<ObjCObjectType>()) {
// Handle interface types, which are not represented with a concrete
// type.
int size = getContext().getTypeSize(OIT) / 8;
@@ -1371,8 +1464,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E) {
llvm::ConstantInt::get(Idx->getType(),
BaseTypeSize.getQuantity()));
Address = Builder.CreateInBoundsGEP(Base, Idx, "arrayidx");
- } else if (const ObjCInterfaceType *OIT =
- dyn_cast<ObjCInterfaceType>(E->getType())) {
+ } else if (const ObjCObjectType *OIT =
+ E->getType()->getAs<ObjCObjectType>()) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
getContext().getTypeSizeInChars(OIT).getQuantity());
@@ -1530,6 +1623,35 @@ LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
Field->getType().getCVRQualifiers()|CVRQualifiers);
}
+/// EmitLValueForAnonRecordField - Given that the field is a member of
+/// an anonymous struct or union buried inside a record, and given
+/// that the base value is a pointer to the enclosing record, derive
+/// an lvalue for the ultimate field.
+LValue CodeGenFunction::EmitLValueForAnonRecordField(llvm::Value *BaseValue,
+ const FieldDecl *Field,
+ unsigned CVRQualifiers) {
+ llvm::SmallVector<const FieldDecl *, 8> Path;
+ Path.push_back(Field);
+
+ while (Field->getParent()->isAnonymousStructOrUnion()) {
+ const ValueDecl *VD = Field->getParent()->getAnonymousStructOrUnionObject();
+ if (!isa<FieldDecl>(VD)) break;
+ Field = cast<FieldDecl>(VD);
+ Path.push_back(Field);
+ }
+
+ llvm::SmallVectorImpl<const FieldDecl*>::reverse_iterator
+ I = Path.rbegin(), E = Path.rend();
+ while (true) {
+ LValue LV = EmitLValueForField(BaseValue, *I, CVRQualifiers);
+ if (++I == E) return LV;
+
+ assert(LV.isSimple());
+ BaseValue = LV.getAddress();
+ CVRQualifiers |= LV.getVRQualifiers();
+ }
+}
+
LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
const FieldDecl* Field,
unsigned CVRQualifiers) {
@@ -1670,7 +1792,17 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
MakeQualifiers(E->getType()));
}
- case CastExpr::CK_NoOp:
+ case CastExpr::CK_NoOp: {
+ LValue LV = EmitLValue(E->getSubExpr());
+ if (LV.isPropertyRef()) {
+ QualType QT = E->getSubExpr()->getType();
+ RValue RV = EmitLoadOfPropertyRefLValue(LV, QT);
+ assert(!RV.isScalar() && "EmitCastLValue - scalar cast of property ref");
+ llvm::Value *V = RV.getAggregateAddr();
+ return LValue::MakeAddr(V, MakeQualifiers(QT));
+ }
+ return LV;
+ }
case CastExpr::CK_ConstructorConversion:
case CastExpr::CK_UserDefinedConversion:
case CastExpr::CK_AnyPointerToObjCPointerCast:
@@ -1724,7 +1856,7 @@ LValue CodeGenFunction::EmitNullInitializationLValue(
const CXXZeroInitValueExpr *E) {
QualType Ty = E->getType();
LValue LV = LValue::MakeAddr(CreateMemTemp(Ty), MakeQualifiers(Ty));
- EmitMemSetToZero(LV.getAddress(), Ty);
+ EmitNullInitialization(LV.getAddress(), Ty);
return LV;
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index d1b0dff11e5a..a4e64fb3d638 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -37,6 +37,16 @@ class AggExprEmitter : public StmtVisitor<AggExprEmitter> {
bool IgnoreResult;
bool IsInitializer;
bool RequiresGCollection;
+
+ ReturnValueSlot getReturnValueSlot() const {
+ // If the destination slot requires garbage collection, we can't
+ // use the real return value slot, because we have to use the GC
+ // API.
+ if (RequiresGCollection) return ReturnValueSlot();
+
+ return ReturnValueSlot(DestPtr, VolatileDest);
+ }
+
public:
AggExprEmitter(CodeGenFunction &cgf, llvm::Value *destPtr, bool v,
bool ignore, bool isinit, bool requiresGCollection)
@@ -58,6 +68,10 @@ public:
void EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore = false);
void EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore = false);
+ void EmitGCMove(const Expr *E, RValue Src);
+
+ bool TypeRequiresGCollection(QualType T);
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
@@ -137,6 +151,39 @@ void AggExprEmitter::EmitAggLoadOfLValue(const Expr *E) {
EmitFinalDestCopy(E, LV);
}
+/// \brief True if the given aggregate type requires special GC API calls.
+bool AggExprEmitter::TypeRequiresGCollection(QualType T) {
+ // Only record types have members that might require garbage collection.
+ const RecordType *RecordTy = T->getAs<RecordType>();
+ if (!RecordTy) return false;
+
+ // Don't mess with non-trivial C++ types.
+ RecordDecl *Record = RecordTy->getDecl();
+ if (isa<CXXRecordDecl>(Record) &&
+ (!cast<CXXRecordDecl>(Record)->hasTrivialCopyConstructor() ||
+ !cast<CXXRecordDecl>(Record)->hasTrivialDestructor()))
+ return false;
+
+ // Check whether the type has an object member.
+ return Record->hasObjectMember();
+}
+
+/// \brief Perform the final move to DestPtr if RequiresGCollection is set.
+///
+/// The idea is that you do something like this:
+/// RValue Result = EmitSomething(..., getReturnValueSlot());
+/// EmitGCMove(E, Result);
+/// If GC doesn't interfere, this will cause the result to be emitted
+/// directly into the return value slot. If GC does interfere, a final
+/// move will be performed.
+void AggExprEmitter::EmitGCMove(const Expr *E, RValue Src) {
+ if (!RequiresGCollection) return;
+
+ CGF.CGM.getObjCRuntime().EmitGCMemmoveCollectable(CGF, DestPtr,
+ Src.getAggregateAddr(),
+ E->getType());
+}
+
/// EmitFinalDestCopy - Perform the final copy to DestPtr, if desired.
void AggExprEmitter::EmitFinalDestCopy(const Expr *E, RValue Src, bool Ignore) {
assert(Src.isAggregate() && "value must be aggregate value!");
@@ -178,7 +225,7 @@ void AggExprEmitter::EmitFinalDestCopy(const Expr *E, LValue Src, bool Ignore) {
//===----------------------------------------------------------------------===//
void AggExprEmitter::VisitCastExpr(CastExpr *E) {
- if (!DestPtr) {
+ if (!DestPtr && E->getCastKind() != CastExpr::CK_Dynamic) {
Visit(E->getSubExpr());
return;
}
@@ -186,6 +233,20 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
switch (E->getCastKind()) {
default: assert(0 && "Unhandled cast kind!");
+ case CastExpr::CK_Dynamic: {
+ assert(isa<CXXDynamicCastExpr>(E) && "CK_Dynamic without a dynamic_cast?");
+ LValue LV = CGF.EmitCheckedLValue(E->getSubExpr());
+ // FIXME: Do we also need to handle property references here?
+ if (LV.isSimple())
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
+ else
+ CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
+
+ if (DestPtr)
+ CGF.CGM.ErrorUnsupported(E, "lvalue dynamic_cast with a destination");
+ break;
+ }
+
case CastExpr::CK_ToUnion: {
// GCC union extension
QualType PtrTy =
@@ -198,6 +259,14 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
break;
}
+ case CastExpr::CK_DerivedToBase:
+ case CastExpr::CK_BaseToDerived:
+ case CastExpr::CK_UncheckedDerivedToBase: {
+ assert(0 && "cannot perform hierarchy conversion in EmitAggExpr: "
+ "should have been unpacked before we got here");
+ break;
+ }
+
// FIXME: Remove the CK_Unknown check here.
case CastExpr::CK_Unknown:
case CastExpr::CK_NoOp:
@@ -282,31 +351,24 @@ void AggExprEmitter::VisitCallExpr(const CallExpr *E) {
return;
}
- // If the struct doesn't require GC, we can just pass the destination
- // directly to EmitCall.
- if (!RequiresGCollection) {
- CGF.EmitCallExpr(E, ReturnValueSlot(DestPtr, VolatileDest));
- return;
- }
-
- RValue RV = CGF.EmitCallExpr(E);
- EmitFinalDestCopy(E, RV);
+ RValue RV = CGF.EmitCallExpr(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
}
void AggExprEmitter::VisitObjCMessageExpr(ObjCMessageExpr *E) {
- RValue RV = CGF.EmitObjCMessageExpr(E);
- EmitFinalDestCopy(E, RV);
+ RValue RV = CGF.EmitObjCMessageExpr(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
}
void AggExprEmitter::VisitObjCPropertyRefExpr(ObjCPropertyRefExpr *E) {
- RValue RV = CGF.EmitObjCPropertyGet(E);
- EmitFinalDestCopy(E, RV);
+ RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
}
void AggExprEmitter::VisitObjCImplicitSetterGetterRefExpr(
ObjCImplicitSetterGetterRefExpr *E) {
- RValue RV = CGF.EmitObjCPropertyGet(E);
- EmitFinalDestCopy(E, RV);
+ RValue RV = CGF.EmitObjCPropertyGet(E, getReturnValueSlot());
+ EmitGCMove(E, RV);
}
void AggExprEmitter::VisitBinComma(const BinaryOperator *E) {
@@ -412,11 +474,9 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
RValue::getAggregate(AggLoc, VolatileDest));
} else {
bool RequiresGCollection = false;
- if (CGF.getContext().getLangOptions().NeXTRuntime) {
- QualType LHSTy = E->getLHS()->getType();
- if (const RecordType *FDTTy = LHSTy.getTypePtr()->getAs<RecordType>())
- RequiresGCollection = FDTTy->getDecl()->hasObjectMember();
- }
+ if (CGF.getContext().getLangOptions().getGCMode())
+ RequiresGCollection = TypeRequiresGCollection(E->getLHS()->getType());
+
// Codegen the RHS so that it stores directly into the LHS.
CGF.EmitAggExpr(E->getRHS(), LHS.getAddress(), LHS.isVolatileQualified(),
false, false, RequiresGCollection);
@@ -559,14 +619,10 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue LV, QualType T) {
llvm::Value *Null = llvm::Constant::getNullValue(CGF.ConvertType(T));
CGF.EmitStoreThroughLValue(RValue::get(Null), LV, T);
} else {
- // Otherwise, just memset the whole thing to zero. This is legal
- // because in LLVM, all default initializers are guaranteed to have a
- // bit pattern of all zeros.
- // FIXME: That isn't true for member pointers!
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitMemSetToZero(LV.getAddress(), T);
+ CGF.EmitNullInitialization(LV.getAddress(), T);
}
}
@@ -738,21 +794,20 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
return LValue::MakeAddr(Temp, Q);
}
-void CodeGenFunction::EmitAggregateClear(llvm::Value *DestPtr, QualType Ty) {
- assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
-
- EmitMemSetToZero(DestPtr, Ty);
-}
-
void CodeGenFunction::EmitAggregateCopy(llvm::Value *DestPtr,
llvm::Value *SrcPtr, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- // Ignore empty classes in C++.
if (getContext().getLangOptions().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
- if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
+ CXXRecordDecl *Record = cast<CXXRecordDecl>(RT->getDecl());
+ assert((Record->hasTrivialCopyConstructor() ||
+ Record->hasTrivialCopyAssignment()) &&
+ "Trying to aggregate-copy a type without a trivial copy "
+ "constructor or assignment operator");
+ // Ignore empty classes in C++.
+ if (Record->isEmpty())
return;
}
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index b57cdc93340e..f93c79c74267 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGObjCRuntime.h"
using namespace clang;
using namespace CodeGen;
@@ -255,16 +256,29 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
ReturnValueSlot ReturnValue) {
assert(MD->isInstance() &&
"Trying to emit a member call expr on a static method!");
-
if (MD->isCopyAssignment()) {
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(MD->getDeclContext());
if (ClassDecl->hasTrivialCopyAssignment()) {
assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
"EmitCXXOperatorMemberCallExpr - user declared copy assignment");
- llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This;
+ if (LV.isPropertyRef()) {
+ llvm::Value *AggLoc = CreateMemTemp(E->getArg(1)->getType());
+ EmitAggExpr(E->getArg(1), AggLoc, false /*VolatileDest*/);
+ EmitObjCPropertySet(LV.getPropertyRefExpr(),
+ RValue::getAggregate(AggLoc, false /*VolatileDest*/));
+ return RValue::getAggregate(0, false);
+ }
+ else
+ This = LV.getAddress();
+
llvm::Value *Src = EmitLValue(E->getArg(1)).getAddress();
QualType Ty = E->getType();
- EmitAggregateCopy(This, Src, Ty);
+ if (ClassDecl->hasObjectMember())
+ CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this, This, Src, Ty);
+ else
+ EmitAggregateCopy(This, Src, Ty);
return RValue::get(This);
}
}
@@ -273,8 +287,15 @@ CodeGenFunction::EmitCXXOperatorMemberCallExpr(const CXXOperatorCallExpr *E,
const llvm::Type *Ty =
CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
FPT->isVariadic());
-
- llvm::Value *This = EmitLValue(E->getArg(0)).getAddress();
+ LValue LV = EmitLValue(E->getArg(0));
+ llvm::Value *This;
+ if (LV.isPropertyRef()) {
+ RValue RV = EmitLoadOfPropertyRefLValue(LV, E->getArg(0)->getType());
+ assert (!RV.isScalar() && "EmitCXXOperatorMemberCallExpr");
+ This = RV.getAggregateAddr();
+ }
+ else
+ This = LV.getAddress();
llvm::Value *Callee;
if (MD->isVirtual() && !canDevirtualizeMemberFunctionCalls(E->getArg(0)))
@@ -305,11 +326,13 @@ CodeGenFunction::EmitCXXConstructExpr(llvm::Value *Dest,
return;
}
// Code gen optimization to eliminate copy constructor and return
- // its first argument instead.
+ // its first argument instead, if in fact that argument is a temporary
+ // object.
if (getContext().getLangOptions().ElideConstructors && E->isElidable()) {
- const Expr *Arg = E->getArg(0)->getTemporaryObject();
- EmitAggExpr(Arg, Dest, false);
- return;
+ if (const Expr *Arg = E->getArg(0)->getTemporaryObject()) {
+ EmitAggExpr(Arg, Dest, false);
+ return;
+ }
}
if (Array) {
QualType BaseElementTy = getContext().getBaseElementType(Array);
@@ -851,6 +874,8 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
ToVoid = true;
} else {
LTy = LTy->getPointerTo();
+
+ // FIXME: What if exceptions are disabled?
ThrowOnBad = true;
}
@@ -917,15 +942,21 @@ llvm::Value *CodeGenFunction::EmitDynamicCast(llvm::Value *V,
if (ThrowOnBad) {
BadCastBlock = createBasicBlock();
-
Builder.CreateCondBr(Builder.CreateIsNotNull(V), ContBlock, BadCastBlock);
EmitBlock(BadCastBlock);
- /// Call __cxa_bad_cast
+ /// Invoke __cxa_bad_cast
ResultType = llvm::Type::getVoidTy(VMContext);
const llvm::FunctionType *FBadTy;
FBadTy = llvm::FunctionType::get(ResultType, false);
llvm::Value *F = CGM.CreateRuntimeFunction(FBadTy, "__cxa_bad_cast");
- Builder.CreateCall(F)->setDoesNotReturn();
+ if (llvm::BasicBlock *InvokeDest = getInvokeDest()) {
+ llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
+ Builder.CreateInvoke(F, Cont, InvokeDest)->setDoesNotReturn();
+ EmitBlock(Cont);
+ } else {
+ // FIXME: Does this ever make sense?
+ Builder.CreateCall(F)->setDoesNotReturn();
+ }
Builder.CreateUnreachable();
}
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index 2595ff0060ca..551a47aa9f80 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -984,32 +984,81 @@ llvm::Constant *CodeGenModule::EmitConstantExpr(const Expr *E,
return C;
}
-static bool containsPointerToDataMember(CodeGenTypes &Types, QualType T) {
- // No need to check for member pointers when not compiling C++.
- if (!Types.getContext().getLangOptions().CPlusPlus)
- return false;
-
- T = Types.getContext().getBaseElementType(T);
-
- if (const RecordType *RT = T->getAs<RecordType>()) {
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
- // FIXME: It would be better if there was a way to explicitly compute the
- // record layout instead of converting to a type.
- Types.ConvertTagDeclType(RD);
+static void
+FillInNullDataMemberPointers(CodeGenModule &CGM, QualType T,
+ std::vector<llvm::Constant *> &Elements,
+ uint64_t StartOffset) {
+ assert(StartOffset % 8 == 0 && "StartOffset not byte aligned!");
+
+ if (!CGM.getTypes().ContainsPointerToDataMember(T))
+ return;
+
+ if (const ConstantArrayType *CAT =
+ CGM.getContext().getAsConstantArrayType(T)) {
+ QualType ElementTy = CAT->getElementType();
+ uint64_t ElementSize = CGM.getContext().getTypeSize(ElementTy);
- const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
- return Layout.containsPointerToDataMember();
- }
+ for (uint64_t I = 0, E = CAT->getSize().getZExtValue(); I != E; ++I) {
+ FillInNullDataMemberPointers(CGM, ElementTy, Elements,
+ StartOffset + I * ElementSize);
+ }
+ } else if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->isVirtual() && "Should not see virtual bases here!");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (!CGM.getTypes().ContainsPointerToDataMember(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ FillInNullDataMemberPointers(CGM, I->getType(),
+ Elements, StartOffset + BaseOffset);
+ }
- if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
- return !MPT->getPointeeType()->isFunctionType();
+ // Visit all fields.
+ unsigned FieldNo = 0;
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I != E; ++I, ++FieldNo) {
+ QualType FieldType = I->getType();
+
+ if (!CGM.getTypes().ContainsPointerToDataMember(FieldType))
+ continue;
+
+ uint64_t FieldOffset = StartOffset + Layout.getFieldOffset(FieldNo);
+ FillInNullDataMemberPointers(CGM, FieldType, Elements, FieldOffset);
+ }
+ } else {
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
+ assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
+ "Should only see pointers to data members here!");
- return false;
+ uint64_t StartIndex = StartOffset / 8;
+ uint64_t EndIndex = StartIndex + CGM.getContext().getTypeSize(T) / 8;
+
+ llvm::Constant *NegativeOne =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(CGM.getLLVMContext()),
+ -1ULL, /*isSigned=*/true);
+
+ // Fill in the null data member pointer.
+ for (uint64_t I = StartIndex; I != EndIndex; ++I)
+ Elements[I] = NegativeOne;
+ }
}
llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
- if (!containsPointerToDataMember(getTypes(), T))
+ if (!getTypes().ContainsPointerToDataMember(T))
return llvm::Constant::getNullValue(getTypes().ConvertTypeForMem(T));
if (const ConstantArrayType *CAT = Context.getAsConstantArrayType(T)) {
@@ -1029,21 +1078,60 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- assert(!RD->getNumBases() &&
- "FIXME: Handle zero-initializing structs with bases and "
- "pointers to data members.");
const llvm::StructType *STy =
cast<llvm::StructType>(getTypes().ConvertTypeForMem(T));
unsigned NumElements = STy->getNumElements();
std::vector<llvm::Constant *> Elements(NumElements);
+ const CGRecordLayout &Layout = getTypes().getCGRecordLayout(RD);
+
+ // Go through all bases and fill in any null pointer to data members.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ assert(!I->isVirtual() && "Should not see virtual bases here!");
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore empty bases.
+ if (BaseDecl->isEmpty())
+ continue;
+
+ // Ignore bases that don't have any pointer to data members.
+ if (!getTypes().ContainsPointerToDataMember(BaseDecl))
+ continue;
+
+ // Currently, all bases are arrays of i8. Figure out how many elements
+ // this base array has.
+ unsigned BaseFieldNo = Layout.getNonVirtualBaseLLVMFieldNo(BaseDecl);
+ const llvm::ArrayType *BaseArrayTy =
+ cast<llvm::ArrayType>(STy->getElementType(BaseFieldNo));
+
+ unsigned NumBaseElements = BaseArrayTy->getNumElements();
+ std::vector<llvm::Constant *> BaseElements(NumBaseElements);
+
+ // Now fill in null data member pointers.
+ FillInNullDataMemberPointers(*this, I->getType(), BaseElements, 0);
+
+ // Now go through all other elements and zero them out.
+ if (NumBaseElements) {
+ llvm::Constant *Zero =
+ llvm::ConstantInt::get(llvm::Type::getInt8Ty(getLLVMContext()), 0);
+
+ for (unsigned I = 0; I != NumBaseElements; ++I) {
+ if (!BaseElements[I])
+ BaseElements[I] = Zero;
+ }
+ }
+
+ Elements[BaseFieldNo] = llvm::ConstantArray::get(BaseArrayTy,
+ BaseElements);
+ }
+
for (RecordDecl::field_iterator I = RD->field_begin(),
E = RD->field_end(); I != E; ++I) {
const FieldDecl *FD = *I;
-
- const CGRecordLayout &RL =
- getTypes().getCGRecordLayout(FD->getParent());
- unsigned FieldNo = RL.getLLVMFieldNo(FD);
+ unsigned FieldNo = Layout.getLLVMFieldNo(FD);
Elements[FieldNo] = EmitNullConstant(FD->getType());
}
@@ -1056,6 +1144,7 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
return llvm::ConstantStruct::get(STy, Elements);
}
+ assert(T->isMemberPointerType() && "Should only see member pointers here!");
assert(!T->getAs<MemberPointerType>()->getPointeeType()->isFunctionType() &&
"Should only see pointers to data members here!");
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 984968802382..2108414c5ae6 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -96,6 +96,9 @@ public:
Value *EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
QualType SrcTy, QualType DstTy);
+ /// EmitNullValue - Emit a value that corresponds to null for the given type.
+ Value *EmitNullValue(QualType Ty);
+
//===--------------------------------------------------------------------===//
// Visitor Methods
//===--------------------------------------------------------------------===//
@@ -123,10 +126,10 @@ public:
return llvm::ConstantInt::get(ConvertType(E->getType()), E->getValue());
}
Value *VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ return EmitNullValue(E->getType());
}
Value *VisitGNUNullExpr(const GNUNullExpr *E) {
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ return EmitNullValue(E->getType());
}
Value *VisitTypesCompatibleExpr(const TypesCompatibleExpr *E) {
return llvm::ConstantInt::get(ConvertType(E->getType()),
@@ -186,7 +189,7 @@ public:
Value *VisitInitListExpr(InitListExpr *E);
Value *VisitImplicitValueInitExpr(const ImplicitValueInitExpr *E) {
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ return CGF.CGM.EmitNullConstant(E->getType());
}
Value *VisitCastExpr(CastExpr *E) {
// Make sure to evaluate VLA bounds now so that we have them for later.
@@ -278,7 +281,7 @@ public:
}
Value *VisitCXXNullPtrLiteralExpr(const CXXNullPtrLiteralExpr *E) {
- return llvm::Constant::getNullValue(ConvertType(E->getType()));
+ return EmitNullValue(E->getType());
}
Value *VisitCXXThrowExpr(const CXXThrowExpr *E) {
@@ -549,6 +552,19 @@ EmitComplexToScalarConversion(CodeGenFunction::ComplexPairTy Src,
return EmitScalarConversion(Src.first, SrcTy, DstTy);
}
+Value *ScalarExprEmitter::EmitNullValue(QualType Ty) {
+ const llvm::Type *LTy = ConvertType(Ty);
+
+ if (!Ty->isMemberPointerType())
+ return llvm::Constant::getNullValue(LTy);
+
+ assert(!Ty->isMemberFunctionPointerType() &&
+ "member function pointers are not scalar!");
+
+ // Itanium C++ ABI 2.3:
+ // A NULL pointer is represented as -1.
+ return llvm::ConstantInt::get(LTy, -1ULL, /*isSigned=*/true);
+}
//===----------------------------------------------------------------------===//
// Visitor Methods
@@ -1334,7 +1350,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &Ops) {
}
const QualType ElementType = PT ? PT->getPointeeType() : OPT->getPointeeType();
// Handle interface types, which are not represented with a concrete type.
- if (const ObjCInterfaceType *OIT = dyn_cast<ObjCInterfaceType>(ElementType)) {
+ if (const ObjCObjectType *OIT = ElementType->getAs<ObjCObjectType>()) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
CGF.getContext().getTypeSizeInChars(OIT).getQuantity());
@@ -1402,8 +1418,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
Idx = Builder.CreateNeg(Idx, "sub.ptr.neg");
// Handle interface types, which are not represented with a concrete type.
- if (const ObjCInterfaceType *OIT =
- dyn_cast<ObjCInterfaceType>(LHSElementType)) {
+ if (const ObjCObjectType *OIT = LHSElementType->getAs<ObjCObjectType>()) {
llvm::Value *InterfaceSize =
llvm::ConstantInt::get(Idx->getType(),
CGF.getContext().
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 8426f7105be4..7c842a94986c 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -47,7 +47,8 @@ llvm::Value *CodeGenFunction::EmitObjCProtocolExpr(const ObjCProtocolExpr *E) {
}
-RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
+RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return) {
// Only the lookup mechanism and first two arguments of the method
// implementation vary between runtimes. We can get the receiver and
// arguments in generic code.
@@ -64,10 +65,11 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
break;
case ObjCMessageExpr::Class: {
- const ObjCInterfaceType *IFace
- = E->getClassReceiver()->getAs<ObjCInterfaceType>();
- OID = IFace->getDecl();
- assert(IFace && "Invalid Objective-C class message send");
+ const ObjCObjectType *ObjTy
+ = E->getClassReceiver()->getAs<ObjCObjectType>();
+ assert(ObjTy && "Invalid Objective-C class message send");
+ OID = ObjTy->getInterface();
+ assert(OID && "Invalid Objective-C class message send");
Receiver = Runtime.GetClass(Builder, OID);
isClassMessage = true;
break;
@@ -92,7 +94,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
// super is only valid in an Objective-C method
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
- return Runtime.GenerateMessageSendSuper(*this, E->getType(),
+ return Runtime.GenerateMessageSendSuper(*this, Return, E->getType(),
E->getSelector(),
OMD->getClassInterface(),
isCategoryImpl,
@@ -102,7 +104,8 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E) {
E->getMethodDecl());
}
- return Runtime.GenerateMessageSend(*this, E->getType(), E->getSelector(),
+ return Runtime.GenerateMessageSend(*this, Return, E->getType(),
+ E->getSelector(),
Receiver, Args, OID,
E->getMethodDecl());
}
@@ -157,9 +160,6 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
!(PD->getPropertyAttributes() & ObjCPropertyDecl::OBJC_PR_nonatomic);
ObjCMethodDecl *OMD = PD->getGetterMethodDecl();
assert(OMD && "Invalid call to generate getter (empty method)");
- // FIXME: This is rather murky, we create this here since they will not have
- // been created by Sema for us.
- OMD->createImplicitParams(getContext(), IMP->getClassInterface());
StartObjCMethod(OMD, IMP->getClassInterface());
// Determine if we should use an objc_getProperty call for
@@ -208,8 +208,9 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
Types.ConvertType(PD->getType())));
EmitReturnOfRValue(RV, PD->getType());
} else {
- LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
if (Ivar->getType()->isAnyComplexType()) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
LV.isVolatileQualified());
StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
@@ -219,6 +220,8 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
if ((IsAtomic || (IsStrong = IvarTypeWithAggrGCObjects(Ivar->getType())))
&& CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect
&& CGM.getObjCRuntime().GetCopyStructFunction()) {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
llvm::Value *GetCopyStructFn =
CGM.getObjCRuntime().GetCopyStructFunction();
CodeGenTypes &Types = CGM.getTypes();
@@ -251,9 +254,23 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
FunctionType::ExtInfo()),
GetCopyStructFn, ReturnValueSlot(), Args);
}
- else
- EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ else {
+ if (PID->getGetterCXXConstructor()) {
+ ReturnStmt *Stmt =
+ new (getContext()) ReturnStmt(SourceLocation(),
+ PID->getGetterCXXConstructor(),
+ 0);
+ EmitReturnStmt(*Stmt);
+ }
+ else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
+ EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
+ }
+ }
} else {
+ LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(),
+ Ivar, 0);
CodeGenTypes &Types = CGM.getTypes();
RValue RV = EmitLoadOfLValue(LV, Ivar->getType());
RV = RValue::get(Builder.CreateBitCast(RV.getScalarVal(),
@@ -274,9 +291,6 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
const ObjCPropertyDecl *PD = PID->getPropertyDecl();
ObjCMethodDecl *OMD = PD->getSetterMethodDecl();
assert(OMD && "Invalid call to generate setter (empty method)");
- // FIXME: This is rather murky, we create this here since they will not have
- // been created by Sema for us.
- OMD->createImplicitParams(getContext(), IMP->getClassInterface());
StartObjCMethod(OMD, IMP->getClassInterface());
bool IsCopy = PD->getSetterKind() == ObjCPropertyDecl::Copy;
@@ -368,6 +382,10 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
FunctionType::ExtInfo()),
GetCopyStructFn, ReturnValueSlot(), Args);
+ } else if (PID->getSetterCXXAssignment()) {
+ EmitAnyExpr(PID->getSetterCXXAssignment(), (llvm::Value *)0, false, true,
+ false);
+
} else {
// FIXME: Find a clean way to avoid AST node creation.
SourceLocation Loc = PD->getLocation();
@@ -425,8 +443,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
llvm::Value *SelfAsId =
Builder.CreateBitCast(LoadObjCSelf(), Types.ConvertType(IdTy));
EmitReturnOfRValue(RValue::get(SelfAsId), IdTy);
- }
- else {
+ } else {
// dtor
for (size_t i = IvarInitializers.size(); i > 0; --i) {
FieldDecl *Field = IvarInitializers[i - 1]->getMember();
@@ -442,7 +459,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
const RecordType *RT = FieldType->getAs<RecordType>();
CXXRecordDecl *FieldClassDecl = cast<CXXRecordDecl>(RT->getDecl());
CXXDestructorDecl *Dtor = FieldClassDecl->getDestructor(getContext());
- if (!Dtor->isTrivial())
+ if (!Dtor->isTrivial()) {
if (Array) {
const llvm::Type *BasePtr = ConvertType(FieldType);
BasePtr = llvm::PointerType::getUnqual(BasePtr);
@@ -450,12 +467,13 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
Builder.CreateBitCast(LV.getAddress(), BasePtr);
EmitCXXAggrDestructorCall(Dtor,
Array, BaseAddrPtr);
- }
- else
+ } else {
EmitCXXDestructorCall(Dtor,
Dtor_Complete, /*ForVirtualBase=*/false,
LV.getAddress());
- }
+ }
+ }
+ }
}
FinishFunction();
}
@@ -478,8 +496,6 @@ bool CodeGenFunction::IvarTypeWithAggrGCObjects(QualType Ty) {
llvm::Value *CodeGenFunction::LoadObjCSelf() {
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
- // See if we need to lazily forward self inside a block literal.
- BlockForwardSelf();
return Builder.CreateLoad(LocalDeclMap[OMD->getSelfDecl()], "self");
}
@@ -492,12 +508,14 @@ QualType CodeGenFunction::TypeOfSelfObject() {
}
RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
- const Selector &S) {
+ const Selector &S,
+ ReturnValueSlot Return) {
llvm::Value *Receiver = LoadObjCSelf();
const ObjCMethodDecl *OMD = cast<ObjCMethodDecl>(CurFuncDecl);
bool isClassMessage = OMD->isClassMethod();
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
return CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ Return,
Exp->getType(),
S,
OMD->getClassInterface(),
@@ -508,15 +526,16 @@ RValue CodeGenFunction::EmitObjCSuperPropertyGet(const Expr *Exp,
}
-RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
+RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp,
+ ReturnValueSlot Return) {
Exp = Exp->IgnoreParens();
// FIXME: Split it into two separate routines.
if (const ObjCPropertyRefExpr *E = dyn_cast<ObjCPropertyRefExpr>(Exp)) {
Selector S = E->getProperty()->getGetterName();
if (isa<ObjCSuperExpr>(E->getBase()))
- return EmitObjCSuperPropertyGet(E, S);
+ return EmitObjCSuperPropertyGet(E, S, Return);
return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Exp->getType(), S,
+ GenerateMessageSend(*this, Return, Exp->getType(), S,
EmitScalarExpr(E->getBase()),
CallArgList());
} else {
@@ -528,11 +547,11 @@ RValue CodeGenFunction::EmitObjCPropertyGet(const Expr *Exp) {
const ObjCInterfaceDecl *OID = KE->getInterfaceDecl();
Receiver = CGM.getObjCRuntime().GetClass(Builder, OID);
} else if (isa<ObjCSuperExpr>(KE->getBase()))
- return EmitObjCSuperPropertyGet(KE, S);
+ return EmitObjCSuperPropertyGet(KE, S, Return);
else
Receiver = EmitScalarExpr(KE->getBase());
return CGM.getObjCRuntime().
- GenerateMessageSend(*this, Exp->getType(), S,
+ GenerateMessageSend(*this, Return, Exp->getType(), S,
Receiver,
CallArgList(), KE->getInterfaceDecl());
}
@@ -548,6 +567,7 @@ void CodeGenFunction::EmitObjCSuperPropertySet(const Expr *Exp,
bool isCategoryImpl = isa<ObjCCategoryImplDecl>(OMD->getDeclContext());
Args.push_back(std::make_pair(Src, Exp->getType()));
CGM.getObjCRuntime().GenerateMessageSendSuper(*this,
+ ReturnValueSlot(),
Exp->getType(),
S,
OMD->getClassInterface(),
@@ -569,7 +589,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
}
CallArgList Args;
Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, S,
EmitScalarExpr(E->getBase()),
Args);
} else if (const ObjCImplicitSetterGetterRefExpr *E =
@@ -586,7 +607,8 @@ void CodeGenFunction::EmitObjCPropertySet(const Expr *Exp,
} else
Receiver = EmitScalarExpr(E->getBase());
Args.push_back(std::make_pair(Src, E->getType()));
- CGM.getObjCRuntime().GenerateMessageSend(*this, getContext().VoidTy, S,
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
+ getContext().VoidTy, S,
Receiver,
Args, E->getInterfaceDecl());
} else
@@ -618,7 +640,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fast enumeration state.
QualType StateTy = getContext().getObjCFastEnumerationStateType();
llvm::Value *StatePtr = CreateMemTemp(StateTy, "state.ptr");
- EmitMemSetToZero(StatePtr, StateTy);
+ EmitNullInitialization(StatePtr, StateTy);
// Number of elements in the items array.
static const unsigned NumItems = 16;
@@ -653,7 +675,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
getContext().UnsignedLongTy));
RValue CountRV =
- CGM.getObjCRuntime().GenerateMessageSend(*this,
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
@@ -778,7 +800,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
EmitBlock(FetchMore);
CountRV =
- CGM.getObjCRuntime().GenerateMessageSend(*this,
+ CGM.getObjCRuntime().GenerateMessageSend(*this, ReturnValueSlot(),
getContext().UnsignedLongTy,
FastEnumSel,
Collection, Args);
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 3c51b7ee9de7..6c25afeb9ad3 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -142,6 +142,7 @@ public:
virtual llvm::Constant *GenerateConstantString(const StringLiteral *);
virtual CodeGen::RValue
GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -150,6 +151,7 @@ public:
const ObjCMethodDecl *Method);
virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
@@ -235,6 +237,21 @@ static std::string SymbolNameForMethod(const std::string &ClassName, const
return std::string(isClassMethod ? "_c_" : "_i_") + ClassName + "_" +
CategoryName + "_" + MethodNameColonStripped;
}
+static std::string MangleSelectorTypes(const std::string &TypeString) {
+ std::string Mangled = TypeString;
+ // Simple mangling to avoid breaking when we mix JIT / static code.
+ // Not part of the ABI, subject to change without notice.
+ std::replace(Mangled.begin(), Mangled.end(), '@', '_');
+ std::replace(Mangled.begin(), Mangled.end(), ':', 'J');
+ std::replace(Mangled.begin(), Mangled.end(), '*', 'e');
+ std::replace(Mangled.begin(), Mangled.end(), '#', 'E');
+ std::replace(Mangled.begin(), Mangled.end(), ':', 'j');
+ std::replace(Mangled.begin(), Mangled.end(), '(', 'g');
+ std::replace(Mangled.begin(), Mangled.end(), ')', 'G');
+ std::replace(Mangled.begin(), Mangled.end(), '[', 'h');
+ std::replace(Mangled.begin(), Mangled.end(), ']', 'H');
+ return Mangled;
+}
CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
: CGM(cgm), TheModule(CGM.getModule()), ClassPtrAlias(0),
@@ -440,6 +457,7 @@ llvm::Constant *CGObjCGNU::GenerateConstantString(const StringLiteral *SL) {
///should be called.
CodeGen::RValue
CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
@@ -567,7 +585,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
llvm::MDNode *node = llvm::MDNode::get(VMContext, impMD, 3);
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs,
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
0, &call);
call->setMetadata(msgSendMDKind, node);
return msgRet;
@@ -576,6 +594,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
/// Generate code for a message send expression.
CodeGen::RValue
CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -611,16 +630,16 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
llvm::BasicBlock *startBB = 0;
llvm::BasicBlock *messageBB = 0;
- llvm::BasicBlock *contiueBB = 0;
+ llvm::BasicBlock *continueBB = 0;
if (!isPointerSizedReturn) {
startBB = Builder.GetInsertBlock();
messageBB = CGF.createBasicBlock("msgSend");
- contiueBB = CGF.createBasicBlock("continue");
+ continueBB = CGF.createBasicBlock("continue");
llvm::Value *isNil = Builder.CreateICmpEQ(Receiver,
llvm::Constant::getNullValue(Receiver->getType()));
- Builder.CreateCondBr(isNil, contiueBB, messageBB);
+ Builder.CreateCondBr(isNil, continueBB, messageBB);
CGF.EmitBlock(messageBB);
}
@@ -711,12 +730,15 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
cast<llvm::CallInst>(imp)->setMetadata(msgSendMDKind, node);
}
llvm::Instruction *call;
- RValue msgRet = CGF.EmitCall(FnInfo, imp, ReturnValueSlot(), ActualArgs,
+ RValue msgRet = CGF.EmitCall(FnInfo, imp, Return, ActualArgs,
0, &call);
call->setMetadata(msgSendMDKind, node);
+
if (!isPointerSizedReturn) {
- CGF.EmitBlock(contiueBB);
+ messageBB = CGF.Builder.GetInsertBlock();
+ CGF.Builder.CreateBr(continueBB);
+ CGF.EmitBlock(continueBB);
if (msgRet.isScalar()) {
llvm::Value *v = msgRet.getScalarVal();
llvm::PHINode *phi = Builder.CreatePHI(v->getType());
@@ -1665,34 +1687,36 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
llvm::Constant *Idxs[] = {Zeros[0],
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
- true, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
- ".objc_sel_ptr");
+ true, llvm::GlobalValue::LinkOnceODRLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ MangleSelectorTypes(".objc_sel_ptr"+iter->first.first+"."+
+ iter->first.second));
// If selectors are defined as an opaque type, cast the pointer to this
// type.
if (isSelOpaque) {
SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
llvm::PointerType::getUnqual(SelectorTy));
}
- (*iter).second->setAliasee(SelPtr);
+ (*iter).second->replaceAllUsesWith(SelPtr);
+ (*iter).second->eraseFromParent();
}
for (llvm::StringMap<llvm::GlobalAlias*>::iterator
iter=UntypedSelectors.begin(), iterEnd = UntypedSelectors.end();
iter != iterEnd; iter++) {
llvm::Constant *Idxs[] = {Zeros[0],
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), index++), Zeros[0]};
- llvm::Constant *SelPtr = new llvm::GlobalVariable
- (TheModule, SelStructPtrTy,
- true, llvm::GlobalValue::InternalLinkage,
- llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
- ".objc_sel_ptr");
+ llvm::Constant *SelPtr = new llvm::GlobalVariable(TheModule, SelStructPtrTy,
+ true, llvm::GlobalValue::LinkOnceODRLinkage,
+ llvm::ConstantExpr::getGetElementPtr(SelectorList, Idxs, 2),
+ MangleSelectorTypes(std::string(".objc_sel_ptr")+iter->getKey().str()));
// If selectors are defined as an opaque type, cast the pointer to this
// type.
if (isSelOpaque) {
SelPtr = llvm::ConstantExpr::getBitCast(SelPtr,
llvm::PointerType::getUnqual(SelectorTy));
}
- (*iter).second->setAliasee(SelPtr);
+ (*iter).second->replaceAllUsesWith(SelPtr);
+ (*iter).second->eraseFromParent();
}
// Number of classes defined.
Elements.push_back(llvm::ConstantInt::get(llvm::Type::getInt16Ty(VMContext),
@@ -1922,11 +1946,11 @@ void CGObjCGNU::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
const ObjCObjectPointerType *OPT =
CatchDecl->getType()->getAs<ObjCObjectPointerType>();
assert(OPT && "Invalid @catch type.");
- const ObjCInterfaceType *IT =
- OPT->getPointeeType()->getAs<ObjCInterfaceType>();
- assert(IT && "Invalid @catch type.");
+ const ObjCInterfaceDecl *IDecl =
+ OPT->getObjectType()->getInterface();
+ assert(IDecl && "Invalid @catch type.");
llvm::Value *EHType =
- MakeConstantString(IT->getDecl()->getNameAsString());
+ MakeConstantString(IDecl->getNameAsString());
ESelArgs.push_back(EHType);
}
}
@@ -2208,7 +2232,8 @@ LValue CGObjCGNU::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers) {
- const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
EmitIvarOffset(CGF, ID, Ivar));
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 77eabbfd4141..d3bafd7eda78 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -973,6 +973,7 @@ protected:
bool AddToUsed);
CodeGen::RValue EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
llvm::Value *Sel,
llvm::Value *Arg0,
@@ -1039,6 +1040,7 @@ private:
llvm::Value *EmitSuperClassRef(const ObjCInterfaceDecl *ID);
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Arg0,
@@ -1126,6 +1128,7 @@ public:
virtual llvm::Function *ModuleInitFunction();
virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -1135,6 +1138,7 @@ public:
virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
@@ -1279,6 +1283,7 @@ private:
ObjCProtocolDecl::protocol_iterator end);
CodeGen::RValue EmitMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -1354,6 +1359,7 @@ public:
virtual llvm::Function *ModuleInitFunction();
virtual CodeGen::RValue GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -1363,6 +1369,7 @@ public:
virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
@@ -1515,6 +1522,7 @@ llvm::Constant *CGObjCCommonMac::GenerateConstantString(
/// which class's method should be called.
CodeGen::RValue
CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
@@ -1566,7 +1574,7 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
Target = CGF.Builder.CreateBitCast(Target, ClassTy);
CGF.Builder.CreateStore(Target,
CGF.Builder.CreateStructGEP(ObjCSuper, 1));
- return EmitLegacyMessageSend(CGF, ResultType,
+ return EmitLegacyMessageSend(CGF, Return, ResultType,
EmitSelector(CGF.Builder, Sel),
ObjCSuper, ObjCTypes.SuperPtrCTy,
true, CallArgs, Method, ObjCTypes);
@@ -1574,13 +1582,14 @@ CGObjCMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
/// Generate code for a message send expression.
CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
const CallArgList &CallArgs,
const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
- return EmitLegacyMessageSend(CGF, ResultType,
+ return EmitLegacyMessageSend(CGF, Return, ResultType,
EmitSelector(CGF.Builder, Sel),
Receiver, CGF.getContext().getObjCIdType(),
false, CallArgs, Method, ObjCTypes);
@@ -1588,6 +1597,7 @@ CodeGen::RValue CGObjCMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
CodeGen::RValue
CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
llvm::Value *Sel,
llvm::Value *Arg0,
@@ -1634,7 +1644,7 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
assert(Fn && "EmitLegacyMessageSend - unknown API");
Fn = llvm::ConstantExpr::getBitCast(Fn,
llvm::PointerType::getUnqual(FTy));
- return CGF.EmitCall(FnInfo, Fn, ReturnValueSlot(), ActualArgs);
+ return CGF.EmitCall(FnInfo, Fn, Return, ActualArgs);
}
llvm::Value *CGObjCMac::GenerateProtocolRef(CGBuilderTy &Builder,
@@ -2701,12 +2711,12 @@ void CGObjCMac::EmitTryOrSynchronizedStmt(CodeGen::CodeGenFunction &CGF,
}
assert(OPT && "Unexpected non-object pointer type in @catch");
- QualType T = OPT->getPointeeType();
- const ObjCInterfaceType *ObjCType = T->getAs<ObjCInterfaceType>();
- assert(ObjCType && "Catch parameter must have Objective-C type!");
+ const ObjCObjectType *ObjTy = OPT->getObjectType();
+ ObjCInterfaceDecl *IDecl = ObjTy->getInterface();
+ assert(IDecl && "Catch parameter must have Objective-C type!");
// Check if the @catch block matches the exception object.
- llvm::Value *Class = EmitClassRef(CGF.Builder, ObjCType->getDecl());
+ llvm::Value *Class = EmitClassRef(CGF.Builder, IDecl);
llvm::Value *Match =
CGF.Builder.CreateCall2(ObjCTypes.getExceptionMatchFn(),
@@ -2938,7 +2948,8 @@ LValue CGObjCMac::EmitObjCValueForIvar(CodeGen::CodeGenFunction &CGF,
llvm::Value *BaseValue,
const ObjCIvarDecl *Ivar,
unsigned CVRQualifiers) {
- const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
+ const ObjCInterfaceDecl *ID =
+ ObjectTy->getAs<ObjCObjectType>()->getInterface();
return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
EmitIvarOffset(CGF, ID, Ivar));
}
@@ -3669,7 +3680,7 @@ ObjCCommonTypesHelper::ObjCCommonTypesHelper(CodeGen::CodeGenModule &cgm)
// id self;
// Class cls;
// }
- RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct,
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
Ctx.getTranslationUnitDecl(),
SourceLocation(),
&Ctx.Idents.get("_objc_super"));
@@ -4131,7 +4142,7 @@ ObjCNonFragileABITypesHelper::ObjCNonFragileABITypesHelper(CodeGen::CodeGenModul
// };
// First the clang type for struct _message_ref_t
- RecordDecl *RD = RecordDecl::Create(Ctx, TagDecl::TK_struct,
+ RecordDecl *RD = RecordDecl::Create(Ctx, TTK_Struct,
Ctx.getTranslationUnitDecl(),
SourceLocation(),
&Ctx.Idents.get("_message_ref_t"));
@@ -5095,12 +5106,12 @@ CGObjCNonFragileABIMac::GetMethodDescriptionConstant(const ObjCMethodDecl *MD) {
/// @encode
///
LValue CGObjCNonFragileABIMac::EmitObjCValueForIvar(
- CodeGen::CodeGenFunction &CGF,
- QualType ObjectTy,
- llvm::Value *BaseValue,
- const ObjCIvarDecl *Ivar,
- unsigned CVRQualifiers) {
- const ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCInterfaceType>()->getDecl();
+ CodeGen::CodeGenFunction &CGF,
+ QualType ObjectTy,
+ llvm::Value *BaseValue,
+ const ObjCIvarDecl *Ivar,
+ unsigned CVRQualifiers) {
+ ObjCInterfaceDecl *ID = ObjectTy->getAs<ObjCObjectType>()->getInterface();
return EmitValueForIvarAtOffset(CGF, ID, BaseValue, Ivar, CVRQualifiers,
EmitIvarOffset(CGF, ID, Ivar));
}
@@ -5114,6 +5125,7 @@ llvm::Value *CGObjCNonFragileABIMac::EmitIvarOffset(
CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -5213,12 +5225,13 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
Callee = CGF.Builder.CreateBitCast(Callee,
llvm::PointerType::getUnqual(FTy));
- return CGF.EmitCall(FnInfo1, Callee, ReturnValueSlot(), ActualArgs);
+ return CGF.EmitCall(FnInfo1, Callee, Return, ActualArgs);
}
/// Generate code for a message send expression in the nonfragile abi.
CodeGen::RValue
CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -5226,10 +5239,11 @@ CGObjCNonFragileABIMac::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
const ObjCInterfaceDecl *Class,
const ObjCMethodDecl *Method) {
return LegacyDispatchedSelector(Sel)
- ? EmitLegacyMessageSend(CGF, ResultType, EmitSelector(CGF.Builder, Sel),
+ ? EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
Receiver, CGF.getContext().getObjCIdType(),
false, CallArgs, Method, ObjCTypes)
- : EmitMessageSend(CGF, ResultType, Sel,
+ : EmitMessageSend(CGF, Return, ResultType, Sel,
Receiver, CGF.getContext().getObjCIdType(),
false, CallArgs);
}
@@ -5336,6 +5350,7 @@ llvm::Value *CGObjCNonFragileABIMac::GetClass(CGBuilderTy &Builder,
/// which class's method should be called.
CodeGen::RValue
CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot Return,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
@@ -5378,10 +5393,11 @@ CGObjCNonFragileABIMac::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CGF.Builder.CreateStructGEP(ObjCSuper, 1));
return (LegacyDispatchedSelector(Sel))
- ? EmitLegacyMessageSend(CGF, ResultType,EmitSelector(CGF.Builder, Sel),
+ ? EmitLegacyMessageSend(CGF, Return, ResultType,
+ EmitSelector(CGF.Builder, Sel),
ObjCSuper, ObjCTypes.SuperPtrCTy,
true, CallArgs, Method, ObjCTypes)
- : EmitMessageSend(CGF, ResultType, Sel,
+ : EmitMessageSend(CGF, Return, ResultType, Sel,
ObjCSuper, ObjCTypes.SuperPtrCTy,
true, CallArgs);
}
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index 654ad0a4bfae..8de7f10b8612 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -119,6 +119,7 @@ public:
/// a property setter or getter.
virtual CodeGen::RValue
GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
QualType ResultType,
Selector Sel,
llvm::Value *Receiver,
@@ -134,6 +135,7 @@ public:
/// a property setter or getter.
virtual CodeGen::RValue
GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
+ ReturnValueSlot ReturnSlot,
QualType ResultType,
Selector Sel,
const ObjCInterfaceDecl *Class,
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
index 9f966fb7ae46..e95591e5cc01 100644
--- a/lib/CodeGen/CGRecordLayout.h
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -168,6 +168,10 @@ private:
/// field no. This info is populated by record builder.
llvm::DenseMap<const FieldDecl *, CGBitFieldInfo> BitFields;
+ // FIXME: Maybe we could use a CXXBaseSpecifier as the key and use a single
+ // map for both virtual and non virtual bases.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> NonVirtualBaseFields;
+
/// Whether one of the fields in this record layout is a pointer to data
/// member, or a struct that contains pointer to data member.
bool ContainsPointerToDataMember : 1;
@@ -194,6 +198,11 @@ public:
return FieldInfo.lookup(FD);
}
+ unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
+ assert(NonVirtualBaseFields.count(RD) && "Invalid non-virtual base!");
+ return NonVirtualBaseFields.lookup(RD);
+ }
+
/// \brief Return the BitFieldInfo that corresponds to the field FD.
const CGBitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
assert(FD->isBitField() && "Invalid call for non bit-field decl!");
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index 6302cf8d1fc0..9f1687577c3e 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -42,6 +42,9 @@ public:
typedef std::pair<const FieldDecl *, CGBitFieldInfo> LLVMBitFieldInfo;
llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
+ typedef std::pair<const CXXRecordDecl *, unsigned> LLVMBaseInfo;
+ llvm::SmallVector<LLVMBaseInfo, 16> LLVMNonVirtualBases;
+
/// ContainsPointerToDataMember - Whether one of the fields in this record
/// layout is a pointer to data member, or a struct that contains pointer to
/// data member.
@@ -81,8 +84,13 @@ private:
/// Returns false if the operation failed because the struct is not packed.
bool LayoutFields(const RecordDecl *D);
- /// LayoutBases - layout the bases and vtable pointer of a record decl.
- void LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout);
+ /// LayoutNonVirtualBase - layout a single non-virtual base.
+ void LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ uint64_t BaseOffset);
+
+ /// LayoutNonVirtualBases - layout the non-virtual bases of a record decl.
+ void LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout);
/// LayoutField - layout a single field. Returns false if the operation failed
/// because the current struct is not packed.
@@ -110,6 +118,7 @@ private:
/// CheckForPointerToDataMember - Check if the given type contains a pointer
/// to data member.
void CheckForPointerToDataMember(QualType T);
+ void CheckForPointerToDataMember(const CXXRecordDecl *RD);
public:
CGRecordLayoutBuilder(CodeGenTypes &Types)
@@ -143,6 +152,7 @@ void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
FieldTypes.clear();
LLVMFields.clear();
LLVMBitFields.clear();
+ LLVMNonVirtualBases.clear();
LayoutFields(D);
}
@@ -319,8 +329,9 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
if (const RecordType *RT = D->getType()->getAs<RecordType>()) {
const RecordDecl *RD = cast<RecordDecl>(RT->getDecl());
- if (const PragmaPackAttr *PPA = RD->getAttr<PragmaPackAttr>()) {
- if (PPA->getAlignment() != TypeAlignment * 8 && !Packed)
+ if (const MaxFieldAlignmentAttr *MFAA =
+ RD->getAttr<MaxFieldAlignmentAttr>()) {
+ if (MFAA->getAlignment() != TypeAlignment * 8 && !Packed)
return false;
}
}
@@ -435,16 +446,66 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
AppendPadding(Layout.getSize() / 8, Align);
}
-void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD,
- const ASTRecordLayout &Layout) {
+void CGRecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *BaseDecl,
+ uint64_t BaseOffset) {
+ const ASTRecordLayout &Layout =
+ Types.getContext().getASTRecordLayout(BaseDecl);
+
+ uint64_t NonVirtualSize = Layout.getNonVirtualSize();
+
+ if (BaseDecl->isEmpty()) {
+ // FIXME: Lay out empty bases.
+ return;
+ }
+
+ CheckForPointerToDataMember(BaseDecl);
+
+ // FIXME: Actually use a better type than [sizeof(BaseDecl) x i8] when we can.
+ AppendPadding(BaseOffset / 8, 1);
+
+ // Append the base field.
+ LLVMNonVirtualBases.push_back(LLVMBaseInfo(BaseDecl, FieldTypes.size()));
+
+ AppendBytes(NonVirtualSize / 8);
+}
+
+void
+CGRecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD,
+ const ASTRecordLayout &Layout) {
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
// Check if we need to add a vtable pointer.
- if (RD->isDynamicClass() && !Layout.getPrimaryBase()) {
- const llvm::Type *Int8PtrTy =
- llvm::Type::getInt8PtrTy(Types.getLLVMContext());
+ if (RD->isDynamicClass()) {
+ if (!PrimaryBase) {
+ const llvm::Type *FunctionType =
+ llvm::FunctionType::get(llvm::Type::getInt32Ty(Types.getLLVMContext()),
+ /*isVarArg=*/true);
+ const llvm::Type *VTableTy = FunctionType->getPointerTo();
+
+ assert(NextFieldOffsetInBytes == 0 &&
+ "VTable pointer must come first!");
+ AppendField(NextFieldOffsetInBytes, VTableTy->getPointerTo());
+ } else {
+ // FIXME: Handle a virtual primary base.
+ if (!Layout.getPrimaryBaseWasVirtual())
+ LayoutNonVirtualBase(PrimaryBase, 0);
+ }
+ }
- assert(NextFieldOffsetInBytes == 0 &&
- "VTable pointer must come first!");
- AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo());
+ // Layout the non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // We've already laid out the primary base.
+ if (BaseDecl == PrimaryBase && !Layout.getPrimaryBaseWasVirtual())
+ continue;
+
+ LayoutNonVirtualBase(BaseDecl, Layout.getBaseClassOffset(BaseDecl));
}
}
@@ -455,7 +516,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
const ASTRecordLayout &Layout = Types.getContext().getASTRecordLayout(D);
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
- LayoutBases(RD, Layout);
+ LayoutNonVirtualBases(RD, Layout);
unsigned FieldNo = 0;
@@ -561,15 +622,24 @@ void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
} else if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
- // FIXME: It would be better if there was a way to explicitly compute the
- // record layout instead of converting to a type.
- Types.ConvertTagDeclType(RD);
+ return CheckForPointerToDataMember(RD);
+ }
+}
- const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+void
+CGRecordLayoutBuilder::CheckForPointerToDataMember(const CXXRecordDecl *RD) {
+ // This record already contains a member pointer.
+ if (ContainsPointerToDataMember)
+ return;
- if (Layout.containsPointerToDataMember())
- ContainsPointerToDataMember = true;
- }
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ Types.ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
+
+ if (Layout.containsPointerToDataMember())
+ ContainsPointerToDataMember = true;
}
CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
@@ -584,13 +654,17 @@ CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
CGRecordLayout *RL =
new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
+ // Add all the non-virtual base field numbers.
+ RL->NonVirtualBaseFields.insert(Builder.LLVMNonVirtualBases.begin(),
+ Builder.LLVMNonVirtualBases.end());
+
// Add all the field numbers.
- for (unsigned i = 0, e = Builder.LLVMFields.size(); i != e; ++i)
- RL->FieldInfo.insert(Builder.LLVMFields[i]);
+ RL->FieldInfo.insert(Builder.LLVMFields.begin(),
+ Builder.LLVMFields.end());
// Add bitfield info.
- for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i)
- RL->BitFields.insert(Builder.LLVMBitFields[i]);
+ RL->BitFields.insert(Builder.LLVMBitFields.begin(),
+ Builder.LLVMBitFields.end());
// Dump the layout, if requested.
if (getContext().getLangOptions().DumpRecordLayouts) {
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index a914c80da2ac..efde3807111f 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -30,7 +30,10 @@ using namespace CodeGen;
void CodeGenFunction::EmitStopPoint(const Stmt *S) {
if (CGDebugInfo *DI = getDebugInfo()) {
- DI->setLocation(S->getLocStart());
+ if (isa<DeclStmt>(S))
+ DI->setLocation(S->getLocEnd());
+ else
+ DI->setLocation(S->getLocStart());
DI->EmitStopPoint(CurFn, Builder);
}
}
@@ -76,8 +79,11 @@ void CodeGenFunction::EmitStmt(const Stmt *S) {
// Expression emitters don't handle unreachable blocks yet, so look for one
// explicitly here. This handles the common case of a call to a noreturn
// function.
+ // We can't erase blocks with an associated cleanup size here since the
+ // memory might be reused, leaving the old cleanup info pointing at a new
+ // block.
if (llvm::BasicBlock *CurBB = Builder.GetInsertBlock()) {
- if (CurBB->empty() && CurBB->use_empty()) {
+ if (CurBB->empty() && CurBB->use_empty() && !BlockScopes.count(CurBB)) {
CurBB->eraseFromParent();
Builder.ClearInsertionPoint();
}
@@ -484,8 +490,6 @@ void CodeGenFunction::EmitDoStmt(const DoStmt &S) {
}
void CodeGenFunction::EmitForStmt(const ForStmt &S) {
- // FIXME: What do we do if the increment (f.e.) contains a stmt expression,
- // which contains a continue/break?
CleanupScope ForScope(*this);
// Evaluate the first part before the loop.
@@ -555,14 +559,14 @@ void CodeGenFunction::EmitForStmt(const ForStmt &S) {
EmitStmt(S.getBody());
}
- BreakContinueStack.pop_back();
-
// If there is an increment, emit it next.
if (S.getInc()) {
EmitBlock(IncBlock);
EmitStmt(S.getInc());
}
+ BreakContinueStack.pop_back();
+
// Finally, branch back up to the condition for the next iteration.
if (CondCleanup) {
// Branch to the cleanup block.
@@ -604,7 +608,20 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
// FIXME: Clean this up by using an LValue for ReturnTemp,
// EmitStoreThroughLValue, and EmitAnyExpr.
- if (!ReturnValue) {
+ if (S.getNRVOCandidate() && S.getNRVOCandidate()->isNRVOVariable() &&
+ !Target.useGlobalsForAutomaticVariables()) {
+ // Apply the named return value optimization for this return statement,
+ // which means doing nothing: the appropriate result has already been
+ // constructed into the NRVO variable.
+
+ // If there is an NRVO flag for this variable, set it to 1 into indicate
+ // that the cleanup code should not destroy the variable.
+ if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()]) {
+ const llvm::Type *BoolTy = llvm::Type::getInt1Ty(VMContext);
+ llvm::Value *One = llvm::ConstantInt::get(BoolTy, 1);
+ Builder.CreateStore(One, NRVOFlag);
+ }
+ } else if (!ReturnValue) {
// Make sure not to return anything, but evaluate the expression
// for side effects.
if (RV)
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 15e564810f83..61c74230e118 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -378,7 +378,7 @@ CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
D1(printf("vtt %s\n", RD->getNameAsCString()));
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
+ llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name, true);
if (GV == 0 || GV->isDeclaration()) {
const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
diff --git a/lib/CodeGen/CGVTables.cpp b/lib/CodeGen/CGVTables.cpp
index 159753aa359c..0f023e63ae54 100644
--- a/lib/CodeGen/CGVTables.cpp
+++ b/lib/CodeGen/CGVTables.cpp
@@ -2548,7 +2548,7 @@ llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
getMangleContext().mangleThunk(MD, Thunk, Name);
const llvm::Type *Ty = getTypes().GetFunctionTypeForVTable(MD);
- return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
+ return GetOrCreateLLVMFunction(Name, Ty, GD);
}
static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
@@ -2641,10 +2641,9 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
E = MD->param_end(); I != E; ++I) {
ParmVarDecl *Param = *I;
QualType ArgType = Param->getType();
+ RValue Arg = EmitDelegateCallArg(Param);
- // FIXME: Declaring a DeclRefExpr on the stack is kinda icky.
- DeclRefExpr ArgExpr(Param, ArgType.getNonReferenceType(), SourceLocation());
- CallArgs.push_back(std::make_pair(EmitCallArg(&ArgExpr, ArgType), ArgType));
+ CallArgs.push_back(std::make_pair(Arg, ArgType));
}
// Get our callee.
@@ -2657,8 +2656,15 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
FPT->getExtInfo());
+ // Determine whether we have a return value slot to use.
+ ReturnValueSlot Slot;
+ if (!ResultType->isVoidType() &&
+ FnInfo.getReturnInfo().getKind() == ABIArgInfo::Indirect &&
+ hasAggregateLLVMType(CurFnInfo->getReturnType()))
+ Slot = ReturnValueSlot(ReturnValue, ResultType.isVolatileQualified());
+
// Now emit our call.
- RValue RV = EmitCall(FnInfo, Callee, ReturnValueSlot(), CallArgs, MD);
+ RValue RV = EmitCall(FnInfo, Callee, Slot, CallArgs, MD);
if (!Thunk.Return.isEmpty()) {
// Emit the return adjustment.
@@ -2701,7 +2707,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
RV = RValue::get(ReturnValue);
}
- if (!ResultType->isVoidType())
+ if (!ResultType->isVoidType() && Slot.isNull())
EmitReturnOfRValue(RV, ResultType);
FinishFunction();
@@ -2710,7 +2716,7 @@ void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
CXXThisDecl->Destroy(getContext());
// Set the right linkage.
- Fn->setLinkage(CGM.getFunctionLinkage(MD));
+ CGM.setFunctionLinkage(MD, Fn);
// Set the right visibility.
CGM.setGlobalVisibility(Fn, MD);
@@ -2788,7 +2794,13 @@ void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
// Check if we've computed this information before.
if (LayoutData)
return;
-
+
+ // We may need to generate a definition for this vtable.
+ if (!isKeyFunctionInAnotherTU(CGM.getContext(), RD) &&
+ RD->getTemplateSpecializationKind()
+ != TSK_ExplicitInstantiationDeclaration)
+ CGM.DeferredVTables.push_back(RD);
+
VTableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
// Add the VTable layout.
@@ -3119,49 +3131,3 @@ CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
DC->getParent()->isTranslationUnit())
CGM.EmitFundamentalRTTIDescriptors();
}
-
-void CodeGenVTables::EmitVTableRelatedData(GlobalDecl GD) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const CXXRecordDecl *RD = MD->getParent();
-
- // If the class doesn't have a vtable we don't need to emit one.
- if (!RD->isDynamicClass())
- return;
-
- // Check if we need to emit thunks for this function.
- if (MD->isVirtual())
- EmitThunks(GD);
-
- // Get the key function.
- const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
-
- TemplateSpecializationKind RDKind = RD->getTemplateSpecializationKind();
- TemplateSpecializationKind MDKind = MD->getTemplateSpecializationKind();
-
- if (KeyFunction) {
- // We don't have the right key function.
- if (KeyFunction->getCanonicalDecl() != MD->getCanonicalDecl())
- return;
- } else {
- // If we have no key funcion and this is a explicit instantiation declaration,
- // we will produce a vtable at the explicit instantiation. We don't need one
- // here.
- if (RDKind == clang::TSK_ExplicitInstantiationDeclaration)
- return;
-
- // If this is an explicit instantiation of a method, we don't need a vtable.
- // Since we have no key function, we will emit the vtable when we see
- // a use, and just defining a function is not an use.
- if (RDKind == TSK_ImplicitInstantiation &&
- MDKind == TSK_ExplicitInstantiationDefinition)
- return;
- }
-
- if (VTables.count(RD))
- return;
-
- if (RDKind == TSK_ImplicitInstantiation)
- CGM.DeferredVTables.push_back(RD);
- else
- GenerateClassData(CGM.getVTableLinkage(RD), RD);
-}
diff --git a/lib/CodeGen/CGVTables.h b/lib/CodeGen/CGVTables.h
index 6c18ca83f091..e55377f2fa2f 100644
--- a/lib/CodeGen/CGVTables.h
+++ b/lib/CodeGen/CGVTables.h
@@ -272,9 +272,6 @@ class CodeGenVTables {
/// EmitThunk - Emit a single thunk.
void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk);
- /// EmitThunks - Emit the associated thunks for the given global decl.
- void EmitThunks(GlobalDecl GD);
-
/// ComputeVTableRelatedInformation - Compute and store all vtable related
/// information (vtable layout, vbase offset offsets, thunks etc) for the
/// given record decl.
@@ -349,11 +346,10 @@ public:
VTableAddressPointsMapTy& AddressPoints);
llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
-
- // EmitVTableRelatedData - Will emit any thunks that the global decl might
- // have, as well as the vtable itself if the global decl is the key function.
- void EmitVTableRelatedData(GlobalDecl GD);
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
/// GenerateClassData - Generate all the class data required to be generated
/// upon definition of a KeyFunction. This includes the vtable, the
/// rtti data structure and the VTT.
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index dfd2a391a4cf..a226400a3787 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -28,7 +28,10 @@ add_clang_library(clangCodeGen
CodeGenFunction.cpp
CodeGenModule.cpp
CodeGenTypes.cpp
+ ItaniumCXXABI.cpp
Mangle.cpp
ModuleBuilder.cpp
TargetInfo.cpp
)
+
+add_dependencies(clangCodeGen ClangStmtNodes)
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index d3bf1645a026..73de0fd77384 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -37,6 +37,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
LLVMPointerWidth = Target.getPointerWidth(0);
Exceptions = getContext().getLangOptions().Exceptions;
CatchUndefined = getContext().getLangOptions().CatchUndefined;
+ CGM.getMangleContext().startNewFunction();
}
ASTContext &CodeGenFunction::getContext() const {
@@ -472,7 +473,23 @@ void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type,
CGM.ErrorUnsupported(S, Type, OmitOnError);
}
-void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
+void
+CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
+ // If the type contains a pointer to data member we can't memset it to zero.
+ // Instead, create a null constant and copy it to the destination.
+ if (CGM.getTypes().ContainsPointerToDataMember(Ty)) {
+ llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
+
+ llvm::GlobalVariable *NullVariable =
+ new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalVariable::PrivateLinkage,
+ NullConstant, llvm::Twine());
+ EmitAggregateCopy(DestPtr, NullVariable, Ty, /*isVolatile=*/false);
+ return;
+ }
+
+
// Ignore empty classes in C++.
if (getContext().getLangOptions().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
@@ -481,6 +498,9 @@ void CodeGenFunction::EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty) {
}
}
+ // Otherwise, just memset the whole thing to zero. This is legal
+ // because in LLVM, all default initializers (other than the ones we just
+ // handled above) are guaranteed to have a bit pattern of all zeros.
const llvm::Type *BP = llvm::Type::getInt8PtrTy(VMContext);
if (DestPtr->getType() != BP)
DestPtr = Builder.CreateBitCast(DestPtr, BP, "tmp");
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index 90a3ec4a4b10..ece275e7629e 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -67,6 +67,7 @@ namespace CodeGen {
class CGDebugInfo;
class CGFunctionInfo;
class CGRecordLayout;
+ class CGBlockInfo;
/// CodeGenFunction - This class organizes the per-function state that is used
/// while generating LLVM code.
@@ -107,6 +108,11 @@ public:
bool Exceptions;
bool CatchUndefined;
+
+ /// \brief A mapping from NRVO variables to the flags used to indicate
+ /// when the NRVO has been applied to this variable.
+ llvm::DenseMap<const VarDecl *, llvm::Value *> NRVOFlags;
+
public:
/// ObjCEHValueStack - Stack of Objective-C exception values, used for
/// rethrows.
@@ -496,18 +502,18 @@ public:
std::vector<HelperInfo> *);
llvm::Function *GenerateBlockFunction(const BlockExpr *BExpr,
- const BlockInfo& Info,
+ CGBlockInfo &Info,
const Decl *OuterFuncDecl,
- llvm::DenseMap<const Decl*, llvm::Value*> ldm,
- CharUnits &Size, CharUnits &Align,
- llvm::SmallVector<const Expr *, 8> &subBlockDeclRefDecls,
- bool &subBlockHasCopyDispose);
+ llvm::DenseMap<const Decl*, llvm::Value*> ldm);
- void BlockForwardSelf();
llvm::Value *LoadBlockStruct();
- CharUnits AllocateBlockDecl(const BlockDeclRefExpr *E);
- llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E);
+ void AllocateBlockCXXThisPointer(const CXXThisExpr *E);
+ void AllocateBlockDecl(const BlockDeclRefExpr *E);
+ llvm::Value *GetAddrOfBlockDecl(const BlockDeclRefExpr *E) {
+ return GetAddrOfBlockDecl(E->getDecl(), E->isByRef());
+ }
+ llvm::Value *GetAddrOfBlockDecl(const ValueDecl *D, bool ByRef);
const llvm::Type *BuildByRefType(const ValueDecl *D);
void GenerateCode(GlobalDecl GD, llvm::Function *Fn);
@@ -531,7 +537,8 @@ public:
/// GenerateThunk - Generate a thunk for the given method.
void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
- void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type);
+ void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type,
+ FunctionArgList &Args);
/// InitializeVTablePointer - Initialize the vtable pointer of the given
/// subobject.
@@ -554,8 +561,6 @@ public:
void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
- void SynthesizeCXXCopyConstructor(const FunctionArgList &Args);
-
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes in
/// reverse order of their construction.
@@ -725,8 +730,6 @@ public:
void EmitAggregateCopy(llvm::Value *DestPtr, llvm::Value *SrcPtr,
QualType EltTy, bool isVolatile=false);
- void EmitAggregateClear(llvm::Value *DestPtr, QualType Ty);
-
/// StartBlock - Start new block named N. If insert block is a dummy block
/// then reuse it.
void StartBlock(const char *N);
@@ -744,8 +747,10 @@ public:
llvm::BlockAddress *GetAddrOfLabel(const LabelStmt *L);
llvm::BasicBlock *GetIndirectGotoBlock();
- /// EmitMemSetToZero - Generate code to memset a value of the given type to 0.
- void EmitMemSetToZero(llvm::Value *DestPtr, QualType Ty);
+ /// EmitNullInitialization - Generate code to set a value of the given type to
+ /// null, If the type contains data member pointers, they will be initialized
+ /// to -1 in accordance with the Itanium C++ ABI.
+ void EmitNullInitialization(llvm::Value *DestPtr, QualType Ty);
// EmitVAArg - Generate code to get an argument from the passed in pointer
// and update it accordingly. The return value is a pointer to the argument.
@@ -802,14 +807,6 @@ public:
const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl);
- void EmitClassAggrMemberwiseCopy(llvm::Value *DestValue,
- llvm::Value *SrcValue,
- const ConstantArrayType *Array,
- const CXXRecordDecl *ClassDecl);
-
- void EmitClassMemberwiseCopy(llvm::Value *DestValue, llvm::Value *SrcValue,
- const CXXRecordDecl *ClassDecl);
-
void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
CXXCtorType CtorType,
const FunctionArgList &Args);
@@ -940,6 +937,7 @@ public:
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ llvm::Constant *getUnwindResumeOrRethrowFn();
struct CXXTryStmtInfo {
llvm::BasicBlock *SavedLandingPad;
llvm::BasicBlock *HandlerBlock;
@@ -1056,6 +1054,9 @@ public:
llvm::Value *EmitIvarOffset(const ObjCInterfaceDecl *Interface,
const ObjCIvarDecl *Ivar);
+ LValue EmitLValueForAnonRecordField(llvm::Value* Base,
+ const FieldDecl* Field,
+ unsigned CVRQualifiers);
LValue EmitLValueForField(llvm::Value* Base, const FieldDecl* Field,
unsigned CVRQualifiers);
@@ -1151,9 +1152,12 @@ public:
llvm::Value *EmitObjCProtocolExpr(const ObjCProtocolExpr *E);
llvm::Value *EmitObjCStringLiteral(const ObjCStringLiteral *E);
llvm::Value *EmitObjCSelectorExpr(const ObjCSelectorExpr *E);
- RValue EmitObjCMessageExpr(const ObjCMessageExpr *E);
- RValue EmitObjCPropertyGet(const Expr *E);
- RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S);
+ RValue EmitObjCMessageExpr(const ObjCMessageExpr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+ RValue EmitObjCPropertyGet(const Expr *E,
+ ReturnValueSlot Return = ReturnValueSlot());
+ RValue EmitObjCSuperPropertyGet(const Expr *Exp, const Selector &S,
+ ReturnValueSlot Return = ReturnValueSlot());
void EmitObjCPropertySet(const Expr *E, RValue Src);
void EmitObjCSuperPropertySet(const Expr *E, const Selector &S, RValue Src);
@@ -1298,6 +1302,11 @@ public:
/// EmitCallArg - Emit a single call argument.
RValue EmitCallArg(const Expr *E, QualType ArgType);
+ /// EmitDelegateCallArg - We are performing a delegate call; that
+ /// is, the current function is delegating to another one. Produce
+ /// a r-value suitable for passing the given parameter.
+ RValue EmitDelegateCallArg(const VarDecl *Param);
+
private:
void EmitReturnOfRValue(RValue RV, QualType Ty);
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index cc90a2855dee..103024c32392 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -48,7 +48,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
- MangleCtx(C, diags), VTables(*this), Runtime(0),
+ VTables(*this), Runtime(0), ABI(0),
CFConstantStringClassRef(0),
NSConstantStringClassRef(0),
VMContext(M.getContext()) {
@@ -62,12 +62,17 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
else
Runtime = CreateMacObjCRuntime(*this);
+ if (!Features.CPlusPlus)
+ ABI = 0;
+ else createCXXABI();
+
// If debug info generation is enabled, create the CGDebugInfo object.
DebugInfo = CodeGenOpts.DebugInfo ? new CGDebugInfo(*this) : 0;
}
CodeGenModule::~CodeGenModule() {
delete Runtime;
+ delete ABI;
delete DebugInfo;
}
@@ -80,6 +85,11 @@ void CodeGenModule::createObjCRuntime() {
Runtime = CreateMacObjCRuntime(*this);
}
+void CodeGenModule::createCXXABI() {
+ // For now, just create an Itanium ABI.
+ ABI = CreateItaniumCXXABI(*this);
+}
+
void CodeGenModule::Release() {
EmitDeferred();
EmitCXXGlobalInitFunc();
@@ -316,17 +326,6 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
== TSK_ExplicitInstantiationDeclaration)
return CodeGenModule::GVA_C99Inline;
- // If this is a virtual method and its class has a key method in another
- // translation unit, we know that this method will be present in that
- // translation unit. In this translation unit we will use this method
- // only for inlining and analysis. This is the semantics of c99 inline.
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- const CXXRecordDecl *RD = MD->getParent();
- if (MD->isVirtual() &&
- CodeGenVTables::isKeyFunctionInAnotherTU(Context, RD))
- return CodeGenModule::GVA_C99Inline;
- }
-
return CodeGenModule::GVA_CXXInline;
}
@@ -372,7 +371,6 @@ CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
/// variables (these details are set in EmitGlobalVarDefinition for variables).
void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
llvm::GlobalValue *GV) {
- GV->setLinkage(getFunctionLinkage(D));
SetCommonAttributes(D, GV);
}
@@ -515,9 +513,14 @@ void CodeGenModule::EmitDeferred() {
GlobalDecl D = DeferredDeclsToEmit.back();
DeferredDeclsToEmit.pop_back();
- // Look it up to see if it was defined with a stronger definition (e.g. an
- // extern inline function with a strong function redefinition). If so,
- // just ignore the deferred decl.
+ // Check to see if we've already emitted this. This is necessary
+ // for a couple of reasons: first, decls can end up in the
+ // deferred-decls queue multiple times, and second, decls can end
+ // up with definitions in unusual ways (e.g. by an extern inline
+ // function acquiring a strong function redefinition). Just
+ // ignore these cases.
+ //
+ // TODO: That said, looking this up multiple times is very wasteful.
MangleBuffer Name;
getMangledName(Name, D);
llvm::GlobalValue *CGRef = GetGlobalValue(Name);
@@ -526,6 +529,11 @@ void CodeGenModule::EmitDeferred() {
if (!CGRef->isDeclaration())
continue;
+ // GlobalAlias::isDeclaration() defers to the aliasee, but for our
+ // purposes an alias counts as a definition.
+ if (isa<llvm::GlobalAlias>(CGRef))
+ continue;
+
// Otherwise, emit the definition and move on to the next one.
EmitGlobalDefinition(D);
}
@@ -727,8 +735,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
Context.getSourceManager(),
"Generating code for declaration");
- if (isa<CXXMethodDecl>(D))
- getVTables().EmitVTableRelatedData(GD);
+ if (const CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(D))
+ if (Method->isVirtual())
+ getVTables().EmitThunks(GD);
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
return EmitCXXConstructor(CD, GD.getCtorType());
@@ -984,6 +993,11 @@ void CodeGenModule::EmitTentativeDefinition(const VarDecl *D) {
EmitGlobalVarDefinition(D);
}
+void CodeGenModule::EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired) {
+ if (DefinitionRequired)
+ getVTables().GenerateClassData(getVTableLinkage(Class), Class);
+}
+
llvm::GlobalVariable::LinkageTypes
CodeGenModule::getVTableLinkage(const CXXRecordDecl *RD) {
if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
@@ -1101,10 +1115,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D) {
assert(!ASTTy->isIncompleteType() && "Unexpected incomplete type");
Init = EmitNullConstant(D->getType());
} else {
- Init = EmitConstantExpr(InitExpr, D->getType());
-
+ Init = EmitConstantExpr(InitExpr, D->getType());
if (!Init) {
QualType T = InitExpr->getType();
+ if (D->getType()->isReferenceType())
+ T = D->getType();
+
if (getLangOptions().CPlusPlus) {
EmitCXXGlobalVarDeclInitFunc(D);
Init = EmitNullConstant(T);
@@ -1333,6 +1349,7 @@ void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
}
llvm::Function *Fn = cast<llvm::Function>(Entry);
+ setFunctionLinkage(D, Fn);
CodeGenFunction(*this).GenerateCode(D, Fn);
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 93d8ddf3e485..319744c4be3d 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -22,6 +22,7 @@
#include "CGCall.h"
#include "CGCXX.h"
#include "CGVTables.h"
+#include "CGCXXABI.h"
#include "CodeGenTypes.h"
#include "GlobalDecl.h"
#include "Mangle.h"
@@ -90,13 +91,13 @@ class CodeGenModule : public BlockModule {
mutable const TargetCodeGenInfo *TheTargetCodeGenInfo;
Diagnostic &Diags;
CodeGenTypes Types;
- MangleContext MangleCtx;
/// VTables - Holds information about C++ vtables.
CodeGenVTables VTables;
friend class CodeGenVTables;
CGObjCRuntime* Runtime;
+ CXXABI* ABI;
CGDebugInfo* DebugInfo;
// WeakRefReferences - A set of references that have only been seen via
@@ -153,6 +154,8 @@ class CodeGenModule : public BlockModule {
/// Lazily create the Objective-C runtime
void createObjCRuntime();
+ /// Lazily create the C++ ABI
+ void createCXXABI();
llvm::LLVMContext &VMContext;
public:
@@ -175,6 +178,16 @@ public:
/// been configured.
bool hasObjCRuntime() { return !!Runtime; }
+ /// getCXXABI() - Return a reference to the configured
+ /// C++ ABI.
+ CXXABI &getCXXABI() {
+ if (!ABI) createCXXABI();
+ return *ABI;
+ }
+
+ /// hasCXXABI() - Return true iff a C++ ABI has been configured.
+ bool hasCXXABI() { return !!ABI; }
+
llvm::Value *getStaticLocalDeclAddress(const VarDecl *VD) {
return StaticLocalDeclMap[VD];
}
@@ -189,7 +202,10 @@ public:
const LangOptions &getLangOptions() const { return Features; }
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
- MangleContext &getMangleContext() { return MangleCtx; }
+ MangleContext &getMangleContext() {
+ if (!ABI) createCXXABI();
+ return ABI->getMangleContext();
+ }
CodeGenVTables &getVTables() { return VTables; }
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
@@ -413,6 +429,7 @@ public:
void getMangledName(MangleBuffer &Buffer, GlobalDecl D);
void getMangledName(MangleBuffer &Buffer, const NamedDecl *ND);
+ void getMangledName(MangleBuffer &Buffer, const BlockDecl *BD);
void getMangledCXXCtorName(MangleBuffer &Buffer,
const CXXConstructorDecl *D,
CXXCtorType Type);
@@ -422,6 +439,8 @@ public:
void EmitTentativeDefinition(const VarDecl *D);
+ void EmitVTable(CXXRecordDecl *Class, bool DefinitionRequired);
+
enum GVALinkage {
GVA_Internal,
GVA_C99Inline,
@@ -434,6 +453,10 @@ public:
llvm::GlobalVariable::LinkageTypes
getFunctionLinkage(const FunctionDecl *FD);
+ void setFunctionLinkage(const FunctionDecl *FD, llvm::GlobalValue *V) {
+ V->setLinkage(getFunctionLinkage(FD));
+ }
+
/// getVTableLinkage - Return the appropriate linkage for the vtable, VTT,
/// and type information of the given class.
static llvm::GlobalVariable::LinkageTypes
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index f53dd83d7035..a46dc726a45b 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -322,6 +322,9 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
true);
}
+ case Type::ObjCObject:
+ return ConvertTypeRecursive(cast<ObjCObjectType>(Ty).getBaseType());
+
case Type::ObjCInterface: {
// Objective-C interfaces are always opaque (outside of the
// runtime, which can do whatever it likes); we never refine
@@ -467,3 +470,32 @@ CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
assert(Layout && "Unable to find record layout information for type");
return *Layout;
}
+
+bool CodeGenTypes::ContainsPointerToDataMember(QualType T) {
+ // No need to check for member pointers when not compiling C++.
+ if (!Context.getLangOptions().CPlusPlus)
+ return false;
+
+ T = Context.getBaseElementType(T);
+
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+
+ return ContainsPointerToDataMember(RD);
+ }
+
+ if (const MemberPointerType *MPT = T->getAs<MemberPointerType>())
+ return !MPT->getPointeeType()->isFunctionType();
+
+ return false;
+}
+
+bool CodeGenTypes::ContainsPointerToDataMember(const CXXRecordDecl *RD) {
+
+ // FIXME: It would be better if there was a way to explicitly compute the
+ // record layout instead of converting to a type.
+ ConvertTagDeclType(RD);
+
+ const CGRecordLayout &Layout = getCGRecordLayout(RD);
+ return Layout.containsPointerToDataMember();
+}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 10e71e2a03fa..fc28c3ae33f4 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -186,6 +186,14 @@ public: // These are internal details of CGT that shouldn't be used externally.
/// argument types it would be passed as on the provided vector \arg
/// ArgTys. See ABIArgInfo::Expand.
void GetExpandedTypes(QualType Ty, std::vector<const llvm::Type*> &ArgTys);
+
+ /// ContainsPointerToDataMember - Return whether the given type contains a
+ /// pointer to a data member.
+ bool ContainsPointerToDataMember(QualType T);
+
+ /// ContainsPointerToDataMember - Return whether the record decl contains a
+ /// pointer to a data member.
+ bool ContainsPointerToDataMember(const CXXRecordDecl *RD);
};
} // end namespace CodeGen
diff --git a/lib/CodeGen/ItaniumCXXABI.cpp b/lib/CodeGen/ItaniumCXXABI.cpp
new file mode 100644
index 000000000000..98db75ea2b46
--- /dev/null
+++ b/lib/CodeGen/ItaniumCXXABI.cpp
@@ -0,0 +1,39 @@
+//===------- ItaniumCXXABI.cpp - Emit LLVM Code from ASTs for a Module ----===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides C++ code generation targetting the Itanium C++ ABI. The class
+// in this file generates structures that follow the Itanium C++ ABI, which is
+// documented at:
+// http://www.codesourcery.com/public/cxx-abi/abi.html
+// http://www.codesourcery.com/public/cxx-abi/abi-eh.html
+//===----------------------------------------------------------------------===//
+
+#include "CGCXXABI.h"
+#include "CodeGenModule.h"
+#include "Mangle.h"
+
+using namespace clang;
+
+namespace {
+class ItaniumCXXABI : public CodeGen::CXXABI {
+ CodeGen::MangleContext MangleCtx;
+public:
+ ItaniumCXXABI(CodeGen::CodeGenModule &CGM) :
+ MangleCtx(CGM.getContext(), CGM.getDiags()) { }
+
+ CodeGen::MangleContext &getMangleContext() {
+ return MangleCtx;
+ }
+};
+}
+
+CodeGen::CXXABI *CodeGen::CreateItaniumCXXABI(CodeGenModule &CGM) {
+ return new ItaniumCXXABI(CGM);
+}
+
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index 8658cfb0282c..6c2a64898fee 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -36,6 +36,57 @@
using namespace clang;
using namespace CodeGen;
+MiscNameMangler::MiscNameMangler(MangleContext &C,
+ llvm::SmallVectorImpl<char> &Res)
+ : Context(C), Out(Res) { }
+
+void MiscNameMangler::mangleBlock(const BlockDecl *BD) {
+ // Mangle the context of the block.
+ // FIXME: We currently mimic GCC's mangling scheme, which leaves much to be
+ // desired. Come up with a better mangling scheme.
+ const DeclContext *DC = BD->getDeclContext();
+ while (isa<BlockDecl>(DC) || isa<EnumDecl>(DC))
+ DC = DC->getParent();
+ if (DC->isFunctionOrMethod()) {
+ Out << "__";
+ if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
+ else {
+ const NamedDecl *ND = cast<NamedDecl>(DC);
+ if (IdentifierInfo *II = ND->getIdentifier())
+ Out << II->getName();
+ else {
+ // FIXME: We were doing a mangleUnqualifiedName() before, but that's
+ // a private member of a class that will soon itself be private to the
+ // Itanium C++ ABI object. What should we do now? Right now, I'm just
+ // calling the mangleName() method on the MangleContext; is there a
+ // better way?
+ llvm::SmallString<64> Buffer;
+ Context.mangleName(ND, Buffer);
+ Out << Buffer;
+ }
+ }
+ Out << "_block_invoke_" << Context.getBlockId(BD, true);
+ } else {
+ Out << "__block_global_" << Context.getBlockId(BD, false);
+ }
+}
+
+void MiscNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
+ llvm::SmallString<64> Name;
+ llvm::raw_svector_ostream OS(Name);
+
+ const ObjCContainerDecl *CD =
+ dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
+ assert (CD && "Missing container decl in GetNameForMethod");
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << '(' << CID << ')';
+ OS << ' ' << MD->getSelector().getAsString() << ']';
+
+ Out << OS.str().size() << OS.str();
+}
+
namespace {
static const DeclContext *GetLocalClassFunctionDeclContext(
@@ -107,7 +158,8 @@ public:
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleName(const NamedDecl *ND);
void mangleType(QualType T);
-
+ void mangleNameOrStandardSubstitution(const NamedDecl *ND);
+
private:
bool mangleSubstitution(const NamedDecl *ND);
bool mangleSubstitution(QualType T);
@@ -714,8 +766,9 @@ void CXXNameMangler::mangleLocalName(const NamedDecl *ND) {
const DeclContext *DC = ND->getDeclContext();
Out << 'Z';
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC))
- mangleObjCMethodName(MD);
+ if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(DC)) {
+ mangleObjCMethodName(MD);
+ }
else if (const DeclContext *CDC = GetLocalClassFunctionDeclContext(DC)) {
mangleFunctionEncoding(cast<FunctionDecl>(CDC));
Out << 'E';
@@ -752,6 +805,14 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
if (DC->isTranslationUnit())
return;
+ if (const BlockDecl *Block = dyn_cast<BlockDecl>(DC)) {
+ manglePrefix(DC->getParent(), NoFunction);
+ llvm::SmallString<64> Name;
+ Context.mangleBlock(Block, Name);
+ Out << Name.size() << Name;
+ return;
+ }
+
if (mangleSubstitution(cast<NamedDecl>(DC)))
return;
@@ -762,8 +823,10 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
TemplateParameterList *TemplateParameters = TD->getTemplateParameters();
mangleTemplateArgs(*TemplateParameters, *TemplateArgs);
}
- else if(NoFunction && isa<FunctionDecl>(DC))
+ else if(NoFunction && (isa<FunctionDecl>(DC) || isa<ObjCMethodDecl>(DC)))
return;
+ else if (const ObjCMethodDecl *Method = dyn_cast<ObjCMethodDecl>(DC))
+ mangleObjCMethodName(Method);
else {
manglePrefix(DC->getParent(), NoFunction);
mangleUnqualifiedName(cast<NamedDecl>(DC));
@@ -942,18 +1005,9 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
}
void CXXNameMangler::mangleObjCMethodName(const ObjCMethodDecl *MD) {
- llvm::SmallString<64> Name;
- llvm::raw_svector_ostream OS(Name);
-
- const ObjCContainerDecl *CD =
- dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
- assert (CD && "Missing container decl in GetNameForMethod");
- OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
- if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
- OS << '(' << CID << ')';
- OS << ' ' << MD->getSelector().getAsString() << ']';
-
- Out << OS.str().size() << OS.str();
+ llvm::SmallString<64> Buffer;
+ MiscNameMangler(Context, Buffer).mangleObjCMethodName(MD);
+ Out << Buffer;
}
void CXXNameMangler::mangleType(QualType T) {
@@ -989,6 +1043,11 @@ void CXXNameMangler::mangleType(QualType T) {
addSubstitution(T);
}
+void CXXNameMangler::mangleNameOrStandardSubstitution(const NamedDecl *ND) {
+ if (!mangleStandardSubstitution(ND))
+ mangleName(ND);
+}
+
void CXXNameMangler::mangleType(const BuiltinType *T) {
// <type> ::= <builtin-type>
// <builtin-type> ::= v # void
@@ -1206,6 +1265,12 @@ void CXXNameMangler::mangleType(const ObjCInterfaceType *T) {
mangleSourceName(T->getDecl()->getIdentifier());
}
+void CXXNameMangler::mangleType(const ObjCObjectType *T) {
+ // We don't allow overloading by different protocol qualification,
+ // so mangling them isn't necessary.
+ mangleType(T->getBaseType());
+}
+
void CXXNameMangler::mangleType(const BlockPointerType *T) {
Out << "U13block_pointer";
mangleType(T->getPointeeType());
@@ -1369,7 +1434,7 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
#define EXPR(Type, Base)
#define STMT(Type, Base) \
case Expr::Type##Class:
-#include "clang/AST/StmtNodes.def"
+#include "clang/AST/StmtNodes.inc"
llvm_unreachable("unexpected statement kind");
break;
@@ -2027,6 +2092,12 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
Mangler.mangle(D);
}
+void MangleContext::mangleBlock(const BlockDecl *BD,
+ llvm::SmallVectorImpl<char> &Res) {
+ MiscNameMangler Mangler(*this, Res);
+ Mangler.mangleBlock(BD);
+}
+
void MangleContext::mangleThunk(const CXXMethodDecl *MD,
const ThunkInfo &Thunk,
llvm::SmallVectorImpl<char> &Res) {
@@ -2089,7 +2160,7 @@ void MangleContext::mangleCXXVTable(const CXXRecordDecl *RD,
// <special-name> ::= TV <type> # virtual table
CXXNameMangler Mangler(*this, Res);
Mangler.getStream() << "_ZTV";
- Mangler.mangleName(RD);
+ Mangler.mangleNameOrStandardSubstitution(RD);
}
void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
@@ -2097,7 +2168,7 @@ void MangleContext::mangleCXXVTT(const CXXRecordDecl *RD,
// <special-name> ::= TT <type> # VTT structure
CXXNameMangler Mangler(*this, Res);
Mangler.getStream() << "_ZTT";
- Mangler.mangleName(RD);
+ Mangler.mangleNameOrStandardSubstitution(RD);
}
void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
@@ -2106,10 +2177,10 @@ void MangleContext::mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
// <special-name> ::= TC <type> <offset number> _ <base type>
CXXNameMangler Mangler(*this, Res);
Mangler.getStream() << "_ZTC";
- Mangler.mangleName(RD);
+ Mangler.mangleNameOrStandardSubstitution(RD);
Mangler.getStream() << Offset;
Mangler.getStream() << '_';
- Mangler.mangleName(Type);
+ Mangler.mangleNameOrStandardSubstitution(Type);
}
void MangleContext::mangleCXXRTTI(QualType Ty,
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
index da3626fc5d37..f1c5358bdd85 100644
--- a/lib/CodeGen/Mangle.h
+++ b/lib/CodeGen/Mangle.h
@@ -23,14 +23,17 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Support/raw_ostream.h"
namespace clang {
class ASTContext;
+ class BlockDecl;
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
class FunctionDecl;
class NamedDecl;
+ class ObjCMethodDecl;
class VarDecl;
namespace CodeGen {
@@ -63,7 +66,7 @@ private:
llvm::StringRef String;
llvm::SmallString<256> Buffer;
};
-
+
/// MangleContext - Context for tracking state which persists across multiple
/// calls to the C++ name mangler.
class MangleContext {
@@ -73,6 +76,8 @@ class MangleContext {
llvm::DenseMap<const TagDecl *, uint64_t> AnonStructIds;
unsigned Discriminator;
llvm::DenseMap<const NamedDecl*, unsigned> Uniquifier;
+ llvm::DenseMap<const BlockDecl*, unsigned> GlobalBlockIds;
+ llvm::DenseMap<const BlockDecl*, unsigned> LocalBlockIds;
public:
explicit MangleContext(ASTContext &Context,
@@ -83,6 +88,8 @@ public:
Diagnostic &getDiags() const { return Diags; }
+ void startNewFunction() { LocalBlockIds.clear(); }
+
uint64_t getAnonymousStructId(const TagDecl *TD) {
std::pair<llvm::DenseMap<const TagDecl *,
uint64_t>::iterator, bool> Result =
@@ -90,31 +97,42 @@ public:
return Result.first->second;
}
+ unsigned getBlockId(const BlockDecl *BD, bool Local) {
+ llvm::DenseMap<const BlockDecl *, unsigned> &BlockIds
+ = Local? LocalBlockIds : GlobalBlockIds;
+ std::pair<llvm::DenseMap<const BlockDecl *, unsigned>::iterator, bool>
+ Result = BlockIds.insert(std::make_pair(BD, BlockIds.size()));
+ return Result.first->second;
+ }
+
/// @name Mangler Entry Points
/// @{
bool shouldMangleDeclName(const NamedDecl *D);
-
- void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
- void mangleThunk(const CXXMethodDecl *MD,
- const ThunkInfo &Thunk,
- llvm::SmallVectorImpl<char> &);
- void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
- const ThisAdjustment &ThisAdjustment,
+ virtual void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
+ virtual void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
llvm::SmallVectorImpl<char> &);
- void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &);
- void mangleCXXVTable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
- void mangleCXXVTT(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
- void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
- const CXXRecordDecl *Type,
- llvm::SmallVectorImpl<char> &);
- void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
- void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
- void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
- llvm::SmallVectorImpl<char> &);
- void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
- llvm::SmallVectorImpl<char> &);
-
+ virtual void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleGuardVariable(const VarDecl *D,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTable(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXVTT(const CXXRecordDecl *RD,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtorVTable(const CXXRecordDecl *RD, int64_t Offset,
+ const CXXRecordDecl *Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTI(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXRTTIName(QualType T, llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXCtor(const CXXConstructorDecl *D, CXXCtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ virtual void mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
+ llvm::SmallVectorImpl<char> &);
+ void mangleBlock(const BlockDecl *BD, llvm::SmallVectorImpl<char> &);
+
void mangleInitDiscriminator() {
Discriminator = 0;
}
@@ -130,7 +148,23 @@ public:
}
/// @}
};
+
+/// MiscNameMangler - Mangles Objective-C method names and blocks.
+class MiscNameMangler {
+ MangleContext &Context;
+ llvm::raw_svector_ostream Out;
+ ASTContext &getASTContext() const { return Context.getASTContext(); }
+
+public:
+ MiscNameMangler(MangleContext &C, llvm::SmallVectorImpl<char> &Res);
+
+ llvm::raw_svector_ostream &getStream() { return Out; }
+
+ void mangleBlock(const BlockDecl *BD);
+ void mangleObjCMethodName(const ObjCMethodDecl *MD);
+};
+
}
}
diff --git a/lib/CodeGen/ModuleBuilder.cpp b/lib/CodeGen/ModuleBuilder.cpp
index 1e1edc1c482f..9905ca6f1dc8 100644
--- a/lib/CodeGen/ModuleBuilder.cpp
+++ b/lib/CodeGen/ModuleBuilder.cpp
@@ -89,6 +89,13 @@ namespace {
Builder->EmitTentativeDefinition(D);
}
+
+ virtual void HandleVTable(CXXRecordDecl *RD, bool DefinitionRequired) {
+ if (Diags.hasErrorOccurred())
+ return;
+
+ Builder->EmitVTable(RD, DefinitionRequired);
+ }
};
}
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index e1fdf86eb026..b29d3cb00c4a 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -23,6 +23,18 @@
using namespace clang;
using namespace CodeGen;
+static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
+ llvm::Value *Array,
+ llvm::Value *Value,
+ unsigned FirstIndex,
+ unsigned LastIndex) {
+ // Alternatively, we could emit this as a loop in the source.
+ for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
+ llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
+ Builder.CreateStore(Value, Cell);
+ }
+}
+
ABIInfo::~ABIInfo() {}
void ABIArgInfo::dump() const {
@@ -71,6 +83,17 @@ static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT))
FT = AT->getElementType();
+ const RecordType *RT = FT->getAs<RecordType>();
+ if (!RT)
+ return false;
+
+ // C++ record fields are never empty, at least in the Itanium ABI.
+ //
+ // FIXME: We should use a predicate for whether this behavior is true in the
+ // current ABI.
+ if (isa<CXXRecordDecl>(RT->getDecl()))
+ return false;
+
return isEmptyRecord(Context, FT, AllowArrays);
}
@@ -84,6 +107,14 @@ static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
const RecordDecl *RD = RT->getDecl();
if (RD->hasFlexibleArrayMember())
return false;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i)
+ if (!isEmptyRecord(Context, i->getType(), true))
+ return false;
+
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i)
if (!isEmptyField(Context, *i, AllowArrays))
@@ -130,6 +161,28 @@ static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
return 0;
const Type *Found = 0;
+
+ // If this is a C++ record, check the bases first.
+ if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (CXXRecordDecl::base_class_const_iterator i = CXXRD->bases_begin(),
+ e = CXXRD->bases_end(); i != e; ++i) {
+ // Ignore empty records.
+ if (isEmptyRecord(Context, i->getType(), true))
+ continue;
+
+ // If we already found an element then this isn't a single-element struct.
+ if (Found)
+ return 0;
+
+ // If this is non-empty and not a single element struct, the composite
+ // cannot be a single element struct.
+ Found = isSingleElementStruct(i->getType(), Context);
+ if (!Found)
+ return 0;
+ }
+ }
+
+ // Check for single element.
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
i != e; ++i) {
const FieldDecl *FD = *i;
@@ -164,7 +217,7 @@ static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
}
static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
- if (!Ty->getAs<BuiltinType>() && !Ty->isAnyPointerType() &&
+ if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
!Ty->isAnyComplexType() && !Ty->isEnumeralType() &&
!Ty->isBlockPointerType())
return false;
@@ -212,23 +265,6 @@ static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
return true;
}
-static bool typeContainsSSEVector(const RecordDecl *RD, ASTContext &Context) {
- for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
- i != e; ++i) {
- const FieldDecl *FD = *i;
-
- if (FD->getType()->isVectorType() &&
- Context.getTypeSize(FD->getType()) >= 128)
- return true;
-
- if (const RecordType* RT = FD->getType()->getAs<RecordType>())
- if (typeContainsSSEVector(RT->getDecl(), Context))
- return true;
- }
-
- return false;
-}
-
namespace {
/// DefaultABIInfo - The default implementation for ABI specific
/// details. This implementation provides information which results in
@@ -363,10 +399,11 @@ bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
return true;
}
- // If this is a builtin, pointer, enum, or complex type, it is ok.
- if (Ty->getAs<BuiltinType>() || Ty->isAnyPointerType() ||
+ // If this is a builtin, pointer, enum, complex type, member pointer, or
+ // member function pointer it is ok.
+ if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
Ty->isAnyComplexType() || Ty->isEnumeralType() ||
- Ty->isBlockPointerType())
+ Ty->isBlockPointerType() || Ty->isMemberPointerType())
return true;
// Arrays are treated like records.
@@ -596,20 +633,14 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// 0-7 are the eight integer registers; the order is different
// on Darwin (for EH), but the range is the same.
// 8 is %eip.
- for (unsigned I = 0, E = 9; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Four8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Four8, 0, 8);
if (CGF.CGM.isTargetDarwin()) {
// 12-16 are st(0..4). Not sure why we stop at 4.
// These have size 16, which is sizeof(long double) on
// platforms with 8-byte alignment for that type.
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
- for (unsigned I = 12, E = 17; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Sixteen8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
} else {
// 9 is %eflags, which doesn't get a size on Darwin for some
@@ -620,11 +651,8 @@ bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
// These have size 12, which is sizeof(long double) on
// platforms with 4-byte alignment for that type.
llvm::Value *Twelve8 = llvm::ConstantInt::get(i8, 12);
- for (unsigned I = 11, E = 17; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Twelve8, Slot);
- }
- }
+ AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
+ }
return false;
}
@@ -733,12 +761,9 @@ public:
const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
- // 0-16 are the 16 integer registers.
- // 17 is %rip.
- for (unsigned I = 0, E = 17; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Eight8, Slot);
- }
+ // 0-15 are the 16 integer registers.
+ // 16 is %rip.
+ AssignToArrayRange(Builder, Address, Eight8, 0, 16);
return false;
}
@@ -828,6 +853,11 @@ void X86_64ABIInfo::classify(QualType Ty,
classify(ET->getDecl()->getIntegerType(), Context, OffsetBase, Lo, Hi);
} else if (Ty->hasPointerRepresentation()) {
Current = Integer;
+ } else if (Ty->isMemberPointerType()) {
+ if (Ty->isMemberFunctionPointerType())
+ Lo = Hi = Integer;
+ else
+ Current = Integer;
} else if (const VectorType *VT = Ty->getAs<VectorType>()) {
uint64_t Size = Context.getTypeSize(VT);
if (Size == 32) {
@@ -1661,16 +1691,10 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
// 0-31: r0-31, the 4-byte general-purpose registers
- for (unsigned I = 0, E = 32; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Four8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Four8, 0, 31);
// 32-63: fp0-31, the 8-byte floating-point registers
- for (unsigned I = 32, E = 64; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Eight8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Eight8, 32, 63);
// 64-76 are various 4-byte special-purpose registers:
// 64: mq
@@ -1679,26 +1703,17 @@ PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
// 67: ap
// 68-75 cr0-7
// 76: xer
- for (unsigned I = 64, E = 77; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Four8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Four8, 64, 76);
// 77-108: v0-31, the 16-byte vector registers
- for (unsigned I = 77, E = 109; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Sixteen8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
// 109: vrsave
// 110: vscr
// 111: spe_acc
// 112: spefscr
// 113: sfp
- for (unsigned I = 109, E = 114; I != E; ++I) {
- llvm::Value *Slot = Builder.CreateConstInBoundsGEP1_32(Address, I);
- Builder.CreateStore(Four8, Slot);
- }
+ AssignToArrayRange(Builder, Address, Four8, 109, 113);
return false;
}
@@ -2123,6 +2138,56 @@ void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
}
}
+// MIPS ABI Implementation. This works for both little-endian and
+// big-endian variants.
+namespace {
+class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ MIPSTargetCodeGenInfo(): TargetCodeGenInfo(new DefaultABIInfo()) {}
+
+ int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const {
+ return 29;
+ }
+
+ bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const;
+};
+}
+
+bool
+MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ // This information comes from gcc's implementation, which seems to
+ // as canonical as it gets.
+
+ CodeGen::CGBuilderTy &Builder = CGF.Builder;
+ llvm::LLVMContext &Context = CGF.getLLVMContext();
+
+ // Everything on MIPS is 4 bytes. Double-precision FP registers
+ // are aliased to pairs of single-precision FP registers.
+ const llvm::IntegerType *i8 = llvm::Type::getInt8Ty(Context);
+ llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
+
+ // 0-31 are the general purpose registers, $0 - $31.
+ // 32-63 are the floating-point registers, $f0 - $f31.
+ // 64 and 65 are the multiply/divide registers, $hi and $lo.
+ // 66 is the (notional, I think) register for signal-handler return.
+ AssignToArrayRange(Builder, Address, Four8, 0, 65);
+
+ // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
+ // They are one bit wide and ignored here.
+
+ // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
+ // (coprocessor 1 is the FP unit)
+ // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
+ // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
+ // 176-181 are the DSP accumulator registers.
+ AssignToArrayRange(Builder, Address, Four8, 80, 181);
+
+ return false;
+}
+
+
const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
if (TheTargetCodeGenInfo)
return *TheTargetCodeGenInfo;
@@ -2135,6 +2200,10 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() const {
default:
return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo);
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo());
+
case llvm::Triple::arm:
case llvm::Triple::thumb:
// FIXME: We want to know the float calling convention as well.
diff --git a/lib/Driver/ArgList.cpp b/lib/Driver/ArgList.cpp
index 95805b016b1f..3d07431209e2 100644
--- a/lib/Driver/ArgList.cpp
+++ b/lib/Driver/ArgList.cpp
@@ -9,12 +9,14 @@
#include "clang/Driver/ArgList.h"
#include "clang/Driver/Arg.h"
+#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Option.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/Twine.h"
#include "llvm/Support/raw_ostream.h"
+using namespace clang;
using namespace clang::driver;
void arg_iterator::SkipToNextArg() {
@@ -108,6 +110,32 @@ bool ArgList::hasFlag(OptSpecifier Pos, OptSpecifier Neg, bool Default) const {
return Default;
}
+llvm::StringRef ArgList::getLastArgValue(OptSpecifier Id,
+ llvm::StringRef Default) const {
+ if (Arg *A = getLastArg(Id))
+ return A->getValue(*this);
+ return Default;
+}
+
+int ArgList::getLastArgIntValue(OptSpecifier Id, int Default,
+ clang::Diagnostic &Diags) const {
+ int Res = Default;
+
+ if (Arg *A = getLastArg(Id)) {
+ if (llvm::StringRef(A->getValue(*this)).getAsInteger(10, Res))
+ Diags.Report(diag::err_drv_invalid_int_value)
+ << A->getAsString(*this) << A->getValue(*this);
+ }
+
+ return Res;
+}
+
+std::vector<std::string> ArgList::getAllArgValues(OptSpecifier Id) const {
+ llvm::SmallVector<const char *, 16> Values;
+ AddAllArgValues(Values, Id);
+ return std::vector<std::string>(Values.begin(), Values.end());
+}
+
void ArgList::AddLastArg(ArgStringList &Output, OptSpecifier Id) const {
if (Arg *A = getLastArg(Id)) {
A->claim();
diff --git a/lib/Driver/CC1AsOptions.cpp b/lib/Driver/CC1AsOptions.cpp
new file mode 100644
index 000000000000..90c69ff0d62a
--- /dev/null
+++ b/lib/Driver/CC1AsOptions.cpp
@@ -0,0 +1,39 @@
+//===--- CC1AsOptions.cpp - Clang Assembler Options Table -----------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Driver/CC1AsOptions.h"
+#include "clang/Driver/Option.h"
+#include "clang/Driver/OptTable.h"
+using namespace clang;
+using namespace clang::driver;
+using namespace clang::driver::options;
+using namespace clang::driver::cc1asoptions;
+
+static const OptTable::Info CC1AsInfoTable[] = {
+#define OPTION(NAME, ID, KIND, GROUP, ALIAS, FLAGS, PARAM, \
+ HELPTEXT, METAVAR) \
+ { NAME, HELPTEXT, METAVAR, Option::KIND##Class, FLAGS, PARAM, \
+ OPT_##GROUP, OPT_##ALIAS },
+#include "clang/Driver/CC1AsOptions.inc"
+};
+
+namespace {
+
+class CC1AsOptTable : public OptTable {
+public:
+ CC1AsOptTable()
+ : OptTable(CC1AsInfoTable,
+ sizeof(CC1AsInfoTable) / sizeof(CC1AsInfoTable[0])) {}
+};
+
+}
+
+OptTable *clang::driver::createCC1AsOptTable() {
+ return new CC1AsOptTable();
+}
diff --git a/lib/Driver/CC1Options.cpp b/lib/Driver/CC1Options.cpp
index 0e98bb9c113b..14cf0904c45b 100644
--- a/lib/Driver/CC1Options.cpp
+++ b/lib/Driver/CC1Options.cpp
@@ -1,4 +1,4 @@
-//===--- CC1Options.cpp - Clang CC1 Options Table -----------------------*-===//
+//===--- CC1Options.cpp - Clang CC1 Options Table -------------------------===//
//
// The LLVM Compiler Infrastructure
//
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 7efcd8a8dde4..5af754d7d142 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -5,6 +5,7 @@ add_clang_library(clangDriver
Arg.cpp
ArgList.cpp
CC1Options.cpp
+ CC1AsOptions.cpp
Compilation.cpp
Driver.cpp
DriverOptions.cpp
@@ -20,4 +21,5 @@ add_clang_library(clangDriver
Types.cpp
)
-add_dependencies(clangDriver ClangDiagnosticDriver ClangDriverOptions ClangCC1Options)
+add_dependencies(clangDriver ClangDiagnosticDriver ClangDriverOptions
+ ClangCC1Options ClangCC1AsOptions)
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 7371a930c337..da83803dd701 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -227,28 +227,31 @@ int Driver::ExecuteCompilation(const Compilation &C) const {
// Remove temp files.
C.CleanupFileList(C.getTempFiles());
- // If the compilation failed, remove result files as well.
- if (Res != 0 && !C.getArgs().hasArg(options::OPT_save_temps))
+ // If the command succeeded, we are done.
+ if (Res == 0)
+ return Res;
+
+ // Otherwise, remove result files as well.
+ if (!C.getArgs().hasArg(options::OPT_save_temps))
C.CleanupFileList(C.getResultFiles(), true);
// Print extra information about abnormal failures, if possible.
- if (Res) {
- // This is ad-hoc, but we don't want to be excessively noisy. If the result
- // status was 1, assume the command failed normally. In particular, if it
- // was the compiler then assume it gave a reasonable error code. Failures in
- // other tools are less common, and they generally have worse diagnostics,
- // so always print the diagnostic there.
- const Action &Source = FailingCommand->getSource();
-
- if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
- // FIXME: See FIXME above regarding result code interpretation.
- if (Res < 0)
- Diag(clang::diag::err_drv_command_signalled)
- << Source.getClassName() << -Res;
- else
- Diag(clang::diag::err_drv_command_failed)
- << Source.getClassName() << Res;
- }
+ //
+ // This is ad-hoc, but we don't want to be excessively noisy. If the result
+ // status was 1, assume the command failed normally. In particular, if it was
+ // the compiler then assume it gave a reasonable error code. Failures in other
+ // tools are less common, and they generally have worse diagnostics, so always
+ // print the diagnostic there.
+ const Tool &FailingTool = FailingCommand->getCreator();
+
+ if (!FailingCommand->getCreator().hasGoodDiagnostics() || Res != 1) {
+ // FIXME: See FIXME above regarding result code interpretation.
+ if (Res < 0)
+ Diag(clang::diag::err_drv_command_signalled)
+ << FailingTool.getShortName() << -Res;
+ else
+ Diag(clang::diag::err_drv_command_failed)
+ << FailingTool.getShortName() << Res;
}
return Res;
@@ -291,6 +294,14 @@ void Driver::PrintVersion(const Compilation &C, llvm::raw_ostream &OS) const {
OS << "Thread model: " << "posix" << '\n';
}
+/// PrintDiagnosticCategories - Implement the --print-diagnostic-categories
+/// option.
+static void PrintDiagnosticCategories(llvm::raw_ostream &OS) {
+ for (unsigned i = 1; // Skip the empty category.
+ const char *CategoryName = Diagnostic::getCategoryNameFromID(i); ++i)
+ OS << i << ',' << CategoryName << '\n';
+}
+
bool Driver::HandleImmediateArgs(const Compilation &C) {
// The order these options are handled in in gcc is all over the place, but we
// don't expect inconsistencies w.r.t. that to matter in practice.
@@ -299,6 +310,11 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
llvm::outs() << CLANG_VERSION_STRING "\n";
return false;
}
+
+ if (C.getArgs().hasArg(options::OPT__print_diagnostic_categories)) {
+ PrintDiagnosticCategories(llvm::outs());
+ return false;
+ }
if (C.getArgs().hasArg(options::OPT__help) ||
C.getArgs().hasArg(options::OPT__help_hidden)) {
@@ -889,9 +905,16 @@ static const Tool &SelectToolForJob(Compilation &C, const ToolChain *TC,
// See if we should look for a compiler with an integrated assembler. We match
// bottom up, so what we are actually looking for is an assembler job with a
// compiler input.
- if (C.getArgs().hasArg(options::OPT_integrated_as,
+
+ // FIXME: This doesn't belong here, but ideally we will support static soon
+ // anyway.
+ bool HasStatic = (C.getArgs().hasArg(options::OPT_mkernel) ||
+ C.getArgs().hasArg(options::OPT_static) ||
+ C.getArgs().hasArg(options::OPT_fapple_kext));
+ bool IsIADefault = (TC->IsIntegratedAssemblerDefault() && !HasStatic);
+ if (C.getArgs().hasFlag(options::OPT_integrated_as,
options::OPT_no_integrated_as,
- TC->IsIntegratedAssemblerDefault()) &&
+ IsIADefault) &&
!C.getArgs().hasArg(options::OPT_save_temps) &&
isa<AssembleJobAction>(JA) &&
Inputs->size() == 1 && isa<CompileJobAction>(*Inputs->begin())) {
diff --git a/lib/Driver/Tool.cpp b/lib/Driver/Tool.cpp
index 781e0a702060..fe01531d9a2a 100644
--- a/lib/Driver/Tool.cpp
+++ b/lib/Driver/Tool.cpp
@@ -11,8 +11,10 @@
using namespace clang::driver;
-Tool::Tool(const char *_Name, const ToolChain &TC) : Name(_Name),
- TheToolChain(TC) {
+Tool::Tool(const char *_Name, const char *_ShortName,
+ const ToolChain &TC) : Name(_Name), ShortName(_ShortName),
+ TheToolChain(TC)
+{
}
Tool::~Tool() {
diff --git a/lib/Driver/ToolChains.cpp b/lib/Driver/ToolChains.cpp
index 1cd8ee186cae..abb55b07d106 100644
--- a/lib/Driver/ToolChains.cpp
+++ b/lib/Driver/ToolChains.cpp
@@ -11,6 +11,7 @@
#include "clang/Driver/Arg.h"
#include "clang/Driver/ArgList.h"
+#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/HostInfo.h"
@@ -190,6 +191,16 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA) const {
else
Key = JA.getKind();
+ // FIXME: This doesn't belong here, but ideally we will support static soon
+ // anyway.
+ bool HasStatic = (C.getArgs().hasArg(options::OPT_mkernel) ||
+ C.getArgs().hasArg(options::OPT_static) ||
+ C.getArgs().hasArg(options::OPT_fapple_kext));
+ bool IsIADefault = IsIntegratedAssemblerDefault() && !HasStatic;
+ bool UseIntegratedAs = C.getArgs().hasFlag(options::OPT_integrated_as,
+ options::OPT_no_integrated_as,
+ IsIADefault);
+
Tool *&T = Tools[Key];
if (!T) {
switch (Key) {
@@ -203,8 +214,13 @@ Tool &Darwin::SelectTool(const Compilation &C, const JobAction &JA) const {
case Action::PrecompileJobClass:
case Action::CompileJobClass:
T = new tools::darwin::Compile(*this); break;
- case Action::AssembleJobClass:
- T = new tools::darwin::Assemble(*this); break;
+ case Action::AssembleJobClass: {
+ if (UseIntegratedAs)
+ T = new tools::ClangAs(*this);
+ else
+ T = new tools::darwin::Assemble(*this);
+ break;
+ }
case Action::LinkJobClass:
T = new tools::darwin::Link(*this); break;
case Action::LipoJobClass:
diff --git a/lib/Driver/ToolChains.h b/lib/Driver/ToolChains.h
index 9acc950879d5..ad975bfe6dee 100644
--- a/lib/Driver/ToolChains.h
+++ b/lib/Driver/ToolChains.h
@@ -25,7 +25,7 @@ namespace toolchains {
/// Generic_GCC - A tool chain using the 'gcc' command to perform
/// all subcommands; this relies on gcc translating the majority of
/// command line options.
-class VISIBILITY_HIDDEN Generic_GCC : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY Generic_GCC : public ToolChain {
protected:
mutable llvm::DenseMap<unsigned, Tool*> Tools;
@@ -44,7 +44,7 @@ public:
};
/// Darwin - The base Darwin tool chain.
-class VISIBILITY_HIDDEN Darwin : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY Darwin : public ToolChain {
mutable llvm::DenseMap<unsigned, Tool*> Tools;
/// Whether the information on the target has been initialized.
@@ -56,7 +56,7 @@ class VISIBILITY_HIDDEN Darwin : public ToolChain {
/// Whether we are targetting iPhoneOS target.
mutable bool TargetIsIPhoneOS;
-
+
/// The OS version we are targetting.
mutable unsigned TargetVersion[3];
@@ -159,6 +159,11 @@ public:
else
return !isMacosxVersionLT(10, 6);
}
+ virtual bool IsIntegratedAssemblerDefault() const {
+ // Default integrated assembler to on for x86.
+ return (getTriple().getArch() == llvm::Triple::x86 ||
+ getTriple().getArch() == llvm::Triple::x86_64);
+ }
virtual bool IsObjCNonFragileABIDefault() const {
// Non-fragile ABI is default for everything but i386.
return getTriple().getArch() != llvm::Triple::x86;
@@ -193,7 +198,7 @@ public:
};
/// DarwinClang - The Darwin toolchain used by Clang.
-class VISIBILITY_HIDDEN DarwinClang : public Darwin {
+class LLVM_LIBRARY_VISIBILITY DarwinClang : public Darwin {
public:
DarwinClang(const HostInfo &Host, const llvm::Triple& Triple,
const unsigned (&DarwinVersion)[3]);
@@ -211,7 +216,7 @@ public:
};
/// DarwinGCC - The Darwin toolchain used by GCC.
-class VISIBILITY_HIDDEN DarwinGCC : public Darwin {
+class LLVM_LIBRARY_VISIBILITY DarwinGCC : public Darwin {
/// GCC version to use.
unsigned GCCVersion[3];
@@ -236,7 +241,7 @@ public:
};
/// Darwin_Generic_GCC - Generic Darwin tool chain using gcc.
-class VISIBILITY_HIDDEN Darwin_Generic_GCC : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY Darwin_Generic_GCC : public Generic_GCC {
public:
Darwin_Generic_GCC(const HostInfo &Host, const llvm::Triple& Triple)
: Generic_GCC(Host, Triple) {}
@@ -244,35 +249,35 @@ public:
virtual const char *GetDefaultRelocationModel() const { return "pic"; }
};
-class VISIBILITY_HIDDEN AuroraUX : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY AuroraUX : public Generic_GCC {
public:
AuroraUX(const HostInfo &Host, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class VISIBILITY_HIDDEN OpenBSD : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY OpenBSD : public Generic_GCC {
public:
OpenBSD(const HostInfo &Host, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class VISIBILITY_HIDDEN FreeBSD : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY FreeBSD : public Generic_GCC {
public:
FreeBSD(const HostInfo &Host, const llvm::Triple& Triple, bool Lib32);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class VISIBILITY_HIDDEN DragonFly : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY DragonFly : public Generic_GCC {
public:
DragonFly(const HostInfo &Host, const llvm::Triple& Triple);
virtual Tool &SelectTool(const Compilation &C, const JobAction &JA) const;
};
-class VISIBILITY_HIDDEN Linux : public Generic_GCC {
+class LLVM_LIBRARY_VISIBILITY Linux : public Generic_GCC {
public:
Linux(const HostInfo &Host, const llvm::Triple& Triple);
};
@@ -280,7 +285,7 @@ public:
/// TCEToolChain - A tool chain using the llvm bitcode tools to perform
/// all subcommands. See http://tce.cs.tut.fi for our peculiar target.
-class VISIBILITY_HIDDEN TCEToolChain : public ToolChain {
+class LLVM_LIBRARY_VISIBILITY TCEToolChain : public ToolChain {
public:
TCEToolChain(const HostInfo &Host, const llvm::Triple& Triple);
~TCEToolChain();
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 5a3916d19ea7..918f0d9715e7 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -21,7 +21,6 @@
#include "clang/Driver/Options.h"
#include "clang/Driver/ToolChain.h"
#include "clang/Driver/Util.h"
-#include "clang/Frontend/DiagnosticOptions.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringSwitch.h"
@@ -769,6 +768,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-E");
} else if (isa<AssembleJobAction>(JA)) {
CmdArgs.push_back("-emit-obj");
+
+ // At -O0, we use -mrelax-all by default.
+ bool IsOpt = false;
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ IsOpt = !A->getOption().matches(options::OPT_O0);
+ if (Args.hasFlag(options::OPT_mrelax_all,
+ options::OPT_mno_relax_all,
+ !IsOpt))
+ CmdArgs.push_back("-mrelax-all");
} else if (isa<PrecompileJobAction>(JA)) {
// Use PCH if the user requested it, except for C++ (for now).
bool UsePCH = D.CCCUsePCH;
@@ -911,8 +919,15 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
options::OPT_fno_zero_initialized_in_bss))
CmdArgs.push_back("-mno-zero-initialized-in-bss");
- if (Args.hasArg(options::OPT_dA) || Args.hasArg(options::OPT_fverbose_asm))
+
+ // Decide whether to use verbose asm. Verbose assembly is the default on
+ // toolchains which have the integrated assembler on by default.
+ bool IsVerboseAsmDefault = getToolChain().IsIntegratedAssemblerDefault();
+ if (Args.hasFlag(options::OPT_fverbose_asm, options::OPT_fno_verbose_asm,
+ IsVerboseAsmDefault) ||
+ Args.hasArg(options::OPT_dA))
CmdArgs.push_back("-masm-verbose");
+
if (Args.hasArg(options::OPT_fdebug_pass_structure)) {
CmdArgs.push_back("-mdebug-pass");
CmdArgs.push_back("Structure");
@@ -979,6 +994,9 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fmath-errno");
+ // Explicitly error on some things we know we don't support and can't just
+ // ignore.
+ types::ID InputType = Inputs[0].getType();
Arg *Unsupported;
if ((Unsupported = Args.getLastArg(options::OPT_MG)) ||
(Unsupported = Args.getLastArg(options::OPT_iframework)) ||
@@ -986,6 +1004,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(clang::diag::err_drv_clang_unsupported)
<< Unsupported->getOption().getName();
+ if (types::isCXX(InputType) &&
+ getToolChain().getTriple().getOS() == llvm::Triple::Darwin &&
+ getToolChain().getTriple().getArch() == llvm::Triple::x86) {
+ if ((Unsupported = Args.getLastArg(options::OPT_fapple_kext)))
+ D.Diag(clang::diag::err_drv_clang_unsupported_opt_cxx_darwin_i386)
+ << Unsupported->getOption().getName();
+ }
+
Args.AddAllArgs(CmdArgs, options::OPT_v);
Args.AddLastArg(CmdArgs, options::OPT_P);
Args.AddLastArg(CmdArgs, options::OPT_print_ivar_layout);
@@ -993,9 +1019,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// Special case debug options to only pass -g to clang. This is
// wrong.
Args.ClaimAllArgs(options::OPT_g_Group);
- Arg *Garg = Args.getLastArg(options::OPT_g_Group);
- if (Garg && Garg != Args.getLastArg(options::OPT_g0))
- CmdArgs.push_back("-g");
+ if (Arg *A = Args.getLastArg(options::OPT_g_Group))
+ if (!A->getOption().matches(options::OPT_g0))
+ CmdArgs.push_back("-g");
+
+ Args.AddAllArgs(CmdArgs, options::OPT_ffunction_sections);
+ Args.AddAllArgs(CmdArgs, options::OPT_fdata_sections);
Args.AddLastArg(CmdArgs, options::OPT_nostdinc);
Args.AddLastArg(CmdArgs, options::OPT_nostdincxx);
@@ -1009,7 +1038,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// preprocessor.
//
// FIXME: Support -fpreprocessed
- types::ID InputType = Inputs[0].getType();
if (types::getPreprocessedType(InputType) != types::TY_INVALID)
AddPreprocessingOptions(D, Args, CmdArgs, Output, Inputs);
@@ -1018,7 +1046,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Arg *A = Args.getLastArg(options::OPT_O_Group)) {
if (A->getOption().matches(options::OPT_O4))
CmdArgs.push_back("-O3");
- else if (A->getValue(Args)[0] == '\0')
+ else if (A->getOption().matches(options::OPT_O) &&
+ A->getValue(Args)[0] == '\0')
CmdArgs.push_back("-O2");
else
A->render(Args, CmdArgs);
@@ -1059,6 +1088,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_trigraphs);
}
+ // Translate GCC's misnamer '-fasm' arguments to '-fgnu-keywords'.
+ if (Arg *Asm = Args.getLastArg(options::OPT_fasm, options::OPT_fno_asm)) {
+ if (Asm->getOption().matches(options::OPT_fasm))
+ CmdArgs.push_back("-fgnu-keywords");
+ else
+ CmdArgs.push_back("-fno-gnu-keywords");
+ }
+
if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_)) {
CmdArgs.push_back("-ftemplate-depth");
CmdArgs.push_back(A->getValue(Args));
@@ -1083,20 +1120,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("19");
- CmdArgs.push_back("-fmacro-backtrace-limit");
- if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ))
+ if (Arg *A = Args.getLastArg(options::OPT_fmacro_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-fmacro-backtrace-limit");
CmdArgs.push_back(A->getValue(Args));
- else
- CmdArgs.push_back(Args.MakeArgString(
- llvm::Twine(DiagnosticOptions::DefaultMacroBacktraceLimit)));
-
- CmdArgs.push_back("-ftemplate-backtrace-limit");
- if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ))
+ }
+
+ if (Arg *A = Args.getLastArg(options::OPT_ftemplate_backtrace_limit_EQ)) {
+ CmdArgs.push_back("-ftemplate-backtrace-limit");
CmdArgs.push_back(A->getValue(Args));
- else
- CmdArgs.push_back(Args.MakeArgString(
- llvm::Twine(DiagnosticOptions::DefaultTemplateBacktraceLimit)));
-
+ }
+
// Pass -fmessage-length=.
CmdArgs.push_back("-fmessage-length");
if (Arg *A = Args.getLastArg(options::OPT_fmessage_length_EQ)) {
@@ -1265,6 +1298,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fobjc-dispatch-method=non-legacy");
}
}
+
+ // FIXME: -fobjc-nonfragile-abi2 is a transient option meant to expose
+ // features in testing. It will eventually be removed.
+ if (Args.hasArg(options::OPT_fobjc_nonfragile_abi2))
+ CmdArgs.push_back("-fobjc-nonfragile-abi2");
}
if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
@@ -1320,6 +1358,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_diagnostics_show_option))
CmdArgs.push_back("-fdiagnostics-show-option");
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
+ CmdArgs.push_back("-fdiagnostics-show-category");
+ CmdArgs.push_back(A->getValue(Args));
+ }
+
// Color diagnostics are the default, unless the terminal doesn't support
// them.
if (Args.hasFlag(options::OPT_fcolor_diagnostics,
@@ -1451,6 +1495,67 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.ClaimAllArgs(options::OPT_clang_ignored_m_Group);
}
+void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const Driver &D = getToolChain().getDriver();
+ ArgStringList CmdArgs;
+
+ assert(Inputs.size() == 1 && "Unexpected number of inputs.");
+ const InputInfo &Input = Inputs[0];
+
+ // Invoke ourselves in -cc1as mode.
+ //
+ // FIXME: Implement custom jobs for internal actions.
+ CmdArgs.push_back("-cc1as");
+
+ // Add the "effective" target triple.
+ CmdArgs.push_back("-triple");
+ std::string TripleStr = getEffectiveClangTriple(D, getToolChain(), Args);
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
+
+ // Set the output mode, we currently only expect to be used as a real
+ // assembler.
+ CmdArgs.push_back("-filetype");
+ CmdArgs.push_back("obj");
+
+ // At -O0, we use -mrelax-all by default.
+ bool IsOpt = false;
+ if (Arg *A = Args.getLastArg(options::OPT_O_Group))
+ IsOpt = !A->getOption().matches(options::OPT_O0);
+ if (Args.hasFlag(options::OPT_mrelax_all,
+ options::OPT_mno_relax_all,
+ !IsOpt))
+ CmdArgs.push_back("-mrelax-all");
+
+ // FIXME: Add -force_cpusubtype_ALL support, once we have it.
+
+ // FIXME: Add -g support, once we have it.
+
+ // FIXME: Add -static support, once we have it.
+
+ Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
+ options::OPT_Xassembler);
+
+ assert(Output.isFilename() && "Unexpected lipo output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ if (Input.isPipe()) {
+ CmdArgs.push_back("-");
+ } else {
+ assert(Input.isFilename() && "Invalid input.");
+ CmdArgs.push_back(Input.getFilename());
+ }
+
+ const char *Exec =
+ Args.MakeArgString(getToolChain().GetProgramPath(C, "clang"));
+ Dest.addCommand(new Command(JA, *this, Exec, CmdArgs));
+}
+
void gcc::Common::ConstructJob(Compilation &C, const JobAction &JA,
Job &Dest,
const InputInfo &Output,
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index 091fec380622..d5e98dd24c48 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -26,7 +26,8 @@ namespace toolchains {
namespace tools {
- class VISIBILITY_HIDDEN Clang : public Tool {
+ /// \brief Clang compiler tool.
+ class LLVM_LIBRARY_VISIBILITY Clang : public Tool {
void AddPreprocessingOptions(const Driver &D,
const ArgList &Args,
ArgStringList &CmdArgs,
@@ -38,7 +39,7 @@ namespace tools {
void AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
public:
- Clang(const ToolChain &TC) : Tool("clang", TC) {}
+ Clang(const ToolChain &TC) : Tool("clang", "clang frontend", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -54,11 +55,32 @@ namespace tools {
const char *LinkingOutput) const;
};
+ /// \brief Clang integrated assembler tool.
+ class LLVM_LIBRARY_VISIBILITY ClangAs : public Tool {
+ public:
+ ClangAs(const ToolChain &TC) : Tool("clang::as",
+ "clang integrated assembler", TC) {}
+
+ virtual bool acceptsPipedInput() const { return true; }
+ virtual bool canPipeOutput() const { return true; }
+ virtual bool hasGoodDiagnostics() const { return true; }
+ virtual bool hasIntegratedAssembler() const { return false; }
+ virtual bool hasIntegratedCPP() const { return false; }
+
+ virtual void ConstructJob(Compilation &C, const JobAction &JA,
+ Job &Dest,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &TCArgs,
+ const char *LinkingOutput) const;
+ };
+
/// gcc - Generic GCC tool implementations.
namespace gcc {
- class VISIBILITY_HIDDEN Common : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Common : public Tool {
public:
- Common(const char *Name, const ToolChain &TC) : Tool(Name, TC) {}
+ Common(const char *Name, const char *ShortName,
+ const ToolChain &TC) : Tool(Name, ShortName, TC) {}
virtual void ConstructJob(Compilation &C, const JobAction &JA,
Job &Dest,
@@ -74,9 +96,10 @@ namespace gcc {
};
- class VISIBILITY_HIDDEN Preprocess : public Common {
+ class LLVM_LIBRARY_VISIBILITY Preprocess : public Common {
public:
- Preprocess(const ToolChain &TC) : Common("gcc::Preprocess", TC) {}
+ Preprocess(const ToolChain &TC) : Common("gcc::Preprocess",
+ "gcc preprocessor", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -87,9 +110,10 @@ namespace gcc {
ArgStringList &CmdArgs) const;
};
- class VISIBILITY_HIDDEN Precompile : public Common {
+ class LLVM_LIBRARY_VISIBILITY Precompile : public Common {
public:
- Precompile(const ToolChain &TC) : Common("gcc::Precompile", TC) {}
+ Precompile(const ToolChain &TC) : Common("gcc::Precompile",
+ "gcc precompile", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return false; }
@@ -100,9 +124,10 @@ namespace gcc {
ArgStringList &CmdArgs) const;
};
- class VISIBILITY_HIDDEN Compile : public Common {
+ class LLVM_LIBRARY_VISIBILITY Compile : public Common {
public:
- Compile(const ToolChain &TC) : Common("gcc::Compile", TC) {}
+ Compile(const ToolChain &TC) : Common("gcc::Compile",
+ "gcc frontend", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -113,9 +138,10 @@ namespace gcc {
ArgStringList &CmdArgs) const;
};
- class VISIBILITY_HIDDEN Assemble : public Common {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Common {
public:
- Assemble(const ToolChain &TC) : Common("gcc::Assemble", TC) {}
+ Assemble(const ToolChain &TC) : Common("gcc::Assemble",
+ "assembler (via gcc)", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return false; }
@@ -125,9 +151,10 @@ namespace gcc {
ArgStringList &CmdArgs) const;
};
- class VISIBILITY_HIDDEN Link : public Common {
+ class LLVM_LIBRARY_VISIBILITY Link : public Common {
public:
- Link(const ToolChain &TC) : Common("gcc::Link", TC) {}
+ Link(const ToolChain &TC) : Common("gcc::Link",
+ "linker (via gcc)", TC) {}
virtual bool acceptsPipedInput() const { return false; }
virtual bool canPipeOutput() const { return false; }
@@ -139,7 +166,7 @@ namespace gcc {
} // end namespace gcc
namespace darwin {
- class VISIBILITY_HIDDEN DarwinTool : public Tool {
+ class LLVM_LIBRARY_VISIBILITY DarwinTool : public Tool {
protected:
void AddDarwinArch(const ArgList &Args, ArgStringList &CmdArgs) const;
@@ -148,10 +175,11 @@ namespace darwin {
}
public:
- DarwinTool(const char *Name, const ToolChain &TC) : Tool(Name, TC) {}
+ DarwinTool(const char *Name, const char *ShortName,
+ const ToolChain &TC) : Tool(Name, ShortName, TC) {}
};
- class VISIBILITY_HIDDEN CC1 : public DarwinTool {
+ class LLVM_LIBRARY_VISIBILITY CC1 : public DarwinTool {
public:
static const char *getBaseInputName(const ArgList &Args,
const InputInfoList &Input);
@@ -176,7 +204,8 @@ namespace darwin {
void AddCPPArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
public:
- CC1(const char *Name, const ToolChain &TC) : DarwinTool(Name, TC) {}
+ CC1(const char *Name, const char *ShortName,
+ const ToolChain &TC) : DarwinTool(Name, ShortName, TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -184,9 +213,10 @@ namespace darwin {
virtual bool hasIntegratedCPP() const { return true; }
};
- class VISIBILITY_HIDDEN Preprocess : public CC1 {
+ class LLVM_LIBRARY_VISIBILITY Preprocess : public CC1 {
public:
- Preprocess(const ToolChain &TC) : CC1("darwin::Preprocess", TC) {}
+ Preprocess(const ToolChain &TC) : CC1("darwin::Preprocess",
+ "gcc preprocessor", TC) {}
virtual void ConstructJob(Compilation &C, const JobAction &JA,
Job &Dest,
@@ -196,9 +226,9 @@ namespace darwin {
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Compile : public CC1 {
+ class LLVM_LIBRARY_VISIBILITY Compile : public CC1 {
public:
- Compile(const ToolChain &TC) : CC1("darwin::Compile", TC) {}
+ Compile(const ToolChain &TC) : CC1("darwin::Compile", "gcc frontend", TC) {}
virtual void ConstructJob(Compilation &C, const JobAction &JA,
Job &Dest,
@@ -208,9 +238,10 @@ namespace darwin {
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Assemble : public DarwinTool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public DarwinTool {
public:
- Assemble(const ToolChain &TC) : DarwinTool("darwin::Assemble", TC) {}
+ Assemble(const ToolChain &TC) : DarwinTool("darwin::Assemble",
+ "assembler", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return false; }
@@ -224,11 +255,11 @@ namespace darwin {
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Link : public DarwinTool {
+ class LLVM_LIBRARY_VISIBILITY Link : public DarwinTool {
void AddLinkArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
public:
- Link(const ToolChain &TC) : DarwinTool("darwin::Link", TC) {}
+ Link(const ToolChain &TC) : DarwinTool("darwin::Link", "linker", TC) {}
virtual bool acceptsPipedInput() const { return false; }
virtual bool canPipeOutput() const { return false; }
@@ -242,9 +273,9 @@ namespace darwin {
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Lipo : public DarwinTool {
+ class LLVM_LIBRARY_VISIBILITY Lipo : public DarwinTool {
public:
- Lipo(const ToolChain &TC) : DarwinTool("darwin::Lipo", TC) {}
+ Lipo(const ToolChain &TC) : DarwinTool("darwin::Lipo", "lipo", TC) {}
virtual bool acceptsPipedInput() const { return false; }
virtual bool canPipeOutput() const { return false; }
@@ -261,9 +292,10 @@ namespace darwin {
/// openbsd -- Directly call GNU Binutils assembler and linker
namespace openbsd {
- class VISIBILITY_HIDDEN Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
- Assemble(const ToolChain &TC) : Tool("openbsd::Assemble", TC) {}
+ Assemble(const ToolChain &TC) : Tool("openbsd::Assemble", "assembler",
+ TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -276,9 +308,9 @@ namespace openbsd {
const ArgList &TCArgs,
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("openbsd::Link", TC) {}
+ Link(const ToolChain &TC) : Tool("openbsd::Link", "linker", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -295,9 +327,10 @@ namespace openbsd {
/// freebsd -- Directly call GNU Binutils assembler and linker
namespace freebsd {
- class VISIBILITY_HIDDEN Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
- Assemble(const ToolChain &TC) : Tool("freebsd::Assemble", TC) {}
+ Assemble(const ToolChain &TC) : Tool("freebsd::Assemble", "assembler",
+ TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -310,9 +343,9 @@ namespace freebsd {
const ArgList &TCArgs,
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("freebsd::Link", TC) {}
+ Link(const ToolChain &TC) : Tool("freebsd::Link", "linker", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -329,9 +362,10 @@ namespace freebsd {
/// auroraux -- Directly call GNU Binutils assembler and linker
namespace auroraux {
- class VISIBILITY_HIDDEN Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
- Assemble(const ToolChain &TC) : Tool("auroraux::Assemble", TC) {}
+ Assemble(const ToolChain &TC) : Tool("auroraux::Assemble", "assembler",
+ TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -344,9 +378,9 @@ namespace auroraux {
const ArgList &TCArgs,
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("auroraux::Link", TC) {}
+ Link(const ToolChain &TC) : Tool("auroraux::Link", "linker", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -363,9 +397,10 @@ namespace auroraux {
/// dragonfly -- Directly call GNU Binutils assembler and linker
namespace dragonfly {
- class VISIBILITY_HIDDEN Assemble : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
public:
- Assemble(const ToolChain &TC) : Tool("dragonfly::Assemble", TC) {}
+ Assemble(const ToolChain &TC) : Tool("dragonfly::Assemble", "assembler",
+ TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
@@ -378,9 +413,9 @@ namespace dragonfly {
const ArgList &TCArgs,
const char *LinkingOutput) const;
};
- class VISIBILITY_HIDDEN Link : public Tool {
+ class LLVM_LIBRARY_VISIBILITY Link : public Tool {
public:
- Link(const ToolChain &TC) : Tool("dragonfly::Link", TC) {}
+ Link(const ToolChain &TC) : Tool("dragonfly::Link", "linker", TC) {}
virtual bool acceptsPipedInput() const { return true; }
virtual bool canPipeOutput() const { return true; }
diff --git a/lib/Frontend/AnalysisConsumer.cpp b/lib/Frontend/AnalysisConsumer.cpp
index 87c3fca24962..6a4727929e7a 100644
--- a/lib/Frontend/AnalysisConsumer.cpp
+++ b/lib/Frontend/AnalysisConsumer.cpp
@@ -174,10 +174,10 @@ public:
Mgr.reset(new AnalysisManager(*Ctx, PP.getDiagnostics(),
PP.getLangOptions(), PD,
CreateStoreMgr, CreateConstraintMgr,
- Opts.MaxNodes,
+ Opts.MaxNodes, Opts.MaxLoop,
Opts.VisualizeEGDot, Opts.VisualizeEGUbi,
Opts.PurgeDead, Opts.EagerlyAssume,
- Opts.TrimGraph));
+ Opts.TrimGraph, Opts.InlineCall));
}
virtual void HandleTranslationUnit(ASTContext &C);
diff --git a/lib/Frontend/BoostConAction.cpp b/lib/Frontend/BoostConAction.cpp
new file mode 100644
index 000000000000..ae150c6ec21e
--- /dev/null
+++ b/lib/Frontend/BoostConAction.cpp
@@ -0,0 +1,39 @@
+//===-- BoostConAction.cpp - BoostCon Workshop Action -----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include <cstdio>
+#include <iostream>
+using namespace clang;
+
+namespace {
+ class BoostConASTConsumer : public ASTConsumer,
+ public RecursiveASTVisitor<BoostConASTConsumer> {
+ public:
+ /// HandleTranslationUnit - This method is called when the ASTs for entire
+ /// translation unit have been parsed.
+ virtual void HandleTranslationUnit(ASTContext &Ctx);
+
+ bool VisitCXXRecordDecl(CXXRecordDecl *D) {
+ std::cout << D->getNameAsString() << std::endl;
+ return false;
+ }
+ };
+}
+
+ASTConsumer *BoostConAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ return new BoostConASTConsumer();
+}
+
+void BoostConASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
+ fprintf(stderr, "Welcome to BoostCon!\n");
+ Visit(Ctx.getTranslationUnitDecl());
+}
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index b69ad9740d2a..01592d1f81e4 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -5,6 +5,7 @@ add_clang_library(clangFrontend
ASTMerge.cpp
ASTUnit.cpp
AnalysisConsumer.cpp
+ BoostConAction.cpp
CacheTokens.cpp
CodeGenAction.cpp
CompilerInstance.cpp
@@ -54,4 +55,5 @@ ENDIF(MSVC)
add_dependencies(clangFrontend
ClangDiagnosticFrontend
ClangDiagnosticLex
- ClangDiagnosticSema)
+ ClangDiagnosticSema
+ ClangStmtNodes)
diff --git a/lib/Frontend/CodeGenAction.cpp b/lib/Frontend/CodeGenAction.cpp
index 79d7d5f4c241..3416aa825fcb 100644
--- a/lib/Frontend/CodeGenAction.cpp
+++ b/lib/Frontend/CodeGenAction.cpp
@@ -48,6 +48,7 @@ namespace {
Backend_EmitBC, ///< Emit LLVM bitcode files
Backend_EmitLL, ///< Emit human-readable LLVM assembly
Backend_EmitNothing, ///< Don't emit anything (benchmarking mode)
+ Backend_EmitMCNull, ///< Run CodeGen, but don't emit anything
Backend_EmitObj ///< Emit native object files
};
@@ -180,6 +1