aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRoman Divacky <rdivacky@FreeBSD.org>2010-03-03 17:28:16 +0000
committerRoman Divacky <rdivacky@FreeBSD.org>2010-03-03 17:28:16 +0000
commit79ade4e028932fcb9dab15e2fb2305ca15ab0f14 (patch)
treee1a885aadfd80632f5bd70d4bd2d37e715e35a79 /lib
parentecb7e5c8afe929ee38155db94de6b084ec32a645 (diff)
downloadsrc-79ade4e028932fcb9dab15e2fb2305ca15ab0f14.tar.gz
src-79ade4e028932fcb9dab15e2fb2305ca15ab0f14.zip
Update clang to 97654.
Notes
Notes: svn path=/vendor/clang/dist/; revision=204643
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp131
-rw-r--r--lib/AST/ASTImporter.cpp864
-rw-r--r--lib/AST/AttrImpl.cpp53
-rw-r--r--lib/AST/CXXInheritance.cpp144
-rw-r--r--lib/AST/Decl.cpp8
-rw-r--r--lib/AST/DeclBase.cpp21
-rw-r--r--lib/AST/DeclCXX.cpp56
-rw-r--r--lib/AST/DeclObjC.cpp17
-rw-r--r--lib/AST/Expr.cpp42
-rw-r--r--lib/AST/ExprCXX.cpp22
-rw-r--r--lib/AST/ExprConstant.cpp46
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp4
-rw-r--r--lib/AST/StmtPrinter.cpp5
-rw-r--r--lib/Analysis/AnalysisContext.cpp12
-rw-r--r--lib/Analysis/CFG.cpp33
-rw-r--r--lib/Analysis/CMakeLists.txt1
-rw-r--r--lib/Analysis/LiveVariables.cpp21
-rw-r--r--lib/Analysis/PrintfFormatString.cpp224
-rw-r--r--lib/Analysis/ReachableCode.cpp278
-rw-r--r--lib/Analysis/UninitializedValues.cpp8
-rw-r--r--lib/Basic/Diagnostic.cpp333
-rw-r--r--lib/Basic/SourceManager.cpp32
-rw-r--r--lib/Basic/Targets.cpp41
-rw-r--r--lib/Basic/Version.cpp16
-rw-r--r--lib/Checker/BasicStore.cpp52
-rw-r--r--lib/Checker/BuiltinFunctionChecker.cpp1
-rw-r--r--lib/Checker/CFRefCount.cpp40
-rw-r--r--lib/Checker/CMakeLists.txt4
-rw-r--r--lib/Checker/CallInliner.cpp67
-rw-r--r--lib/Checker/CheckDeadStores.cpp3
-rw-r--r--lib/Checker/FlatStore.cpp2
-rw-r--r--lib/Checker/GRCoreEngine.cpp104
-rw-r--r--lib/Checker/GRExprEngine.cpp70
-rw-r--r--lib/Checker/GRExprEngineInternalChecks.h30
-rw-r--r--lib/Checker/MacOSXAPIChecker.cpp141
-rw-r--r--lib/Checker/MemRegion.cpp25
-rw-r--r--lib/Checker/OSAtomicChecker.cpp1
-rw-r--r--lib/Checker/ObjCUnusedIVarsChecker.cpp (renamed from lib/Checker/CheckObjCUnusedIVars.cpp)29
-rw-r--r--lib/Checker/RegionStore.cpp14
-rw-r--r--lib/Checker/SymbolManager.cpp21
-rw-r--r--lib/Checker/UnixAPIChecker.cpp154
-rw-r--r--lib/Checker/ValueManager.cpp12
-rw-r--r--lib/CodeGen/CGBlocks.cpp144
-rw-r--r--lib/CodeGen/CGBlocks.h8
-rw-r--r--lib/CodeGen/CGBuiltin.cpp75
-rw-r--r--lib/CodeGen/CGCXX.cpp210
-rw-r--r--lib/CodeGen/CGCall.cpp176
-rw-r--r--lib/CodeGen/CGCall.h17
-rw-r--r--lib/CodeGen/CGClass.cpp379
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp34
-rw-r--r--lib/CodeGen/CGDebugInfo.h1
-rw-r--r--lib/CodeGen/CGException.cpp68
-rw-r--r--lib/CodeGen/CGExpr.cpp16
-rw-r--r--lib/CodeGen/CGExprAgg.cpp2
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp25
-rw-r--r--lib/CodeGen/CGObjCMac.cpp23
-rw-r--r--lib/CodeGen/CGVTT.cpp21
-rw-r--r--lib/CodeGen/CGVtable.cpp1549
-rw-r--r--lib/CodeGen/CGVtable.h5
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp119
-rw-r--r--lib/CodeGen/CodeGenFunction.h66
-rw-r--r--lib/CodeGen/CodeGenModule.cpp64
-rw-r--r--lib/CodeGen/CodeGenModule.h27
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp12
-rw-r--r--lib/CodeGen/CodeGenTypes.h19
-rw-r--r--lib/CodeGen/Mangle.cpp78
-rw-r--r--lib/CodeGen/TargetInfo.cpp29
-rw-r--r--lib/CodeGen/TargetInfo.h31
-rw-r--r--lib/Driver/Driver.cpp16
-rw-r--r--lib/Driver/Tools.cpp83
-rw-r--r--lib/Driver/Tools.h1
-rw-r--r--lib/Driver/Types.cpp16
-rw-r--r--lib/Frontend/ASTUnit.cpp76
-rw-r--r--lib/Frontend/CMakeLists.txt2
-rw-r--r--lib/Frontend/CacheTokens.cpp7
-rw-r--r--lib/Frontend/CodeGenAction.cpp (renamed from lib/Frontend/Backend.cpp)114
-rw-r--r--lib/Frontend/CompilerInstance.cpp2
-rw-r--r--lib/Frontend/CompilerInvocation.cpp3
-rw-r--r--lib/Frontend/FrontendActions.cpp42
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp4
-rw-r--r--lib/Frontend/InitPreprocessor.cpp3
-rw-r--r--lib/Frontend/PCHReaderDecl.cpp25
-rw-r--r--lib/Frontend/PCHWriter.cpp11
-rw-r--r--lib/Frontend/PCHWriterDecl.cpp11
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp7
-rw-r--r--lib/Frontend/RewriteObjC.cpp392
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp10
-rw-r--r--lib/Headers/xmmintrin.h12
-rw-r--r--lib/Lex/PPDirectives.cpp8
-rw-r--r--lib/Lex/PPExpressions.cpp15
-rw-r--r--lib/Lex/PPMacroExpansion.cpp12
-rw-r--r--lib/Lex/Pragma.cpp5
-rw-r--r--lib/Lex/Preprocessor.cpp25
-rw-r--r--lib/Parse/AttributeList.cpp4
-rw-r--r--lib/Parse/ParseDecl.cpp231
-rw-r--r--lib/Parse/ParseDeclCXX.cpp63
-rw-r--r--lib/Parse/ParseExpr.cpp30
-rw-r--r--lib/Parse/ParseExprCXX.cpp190
-rw-r--r--lib/Parse/ParseObjc.cpp26
-rw-r--r--lib/Parse/ParseTentative.cpp19
-rw-r--r--lib/Parse/Parser.cpp50
-rw-r--r--lib/Sema/JumpDiagnostics.cpp14
-rw-r--r--lib/Sema/Sema.cpp62
-rw-r--r--lib/Sema/Sema.h183
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp48
-rw-r--r--lib/Sema/SemaChecking.cpp465
-rw-r--r--lib/Sema/SemaCodeComplete.cpp9
-rw-r--r--lib/Sema/SemaDecl.cpp182
-rw-r--r--lib/Sema/SemaDeclAttr.cpp236
-rw-r--r--lib/Sema/SemaDeclCXX.cpp170
-rw-r--r--lib/Sema/SemaDeclObjC.cpp98
-rw-r--r--lib/Sema/SemaExpr.cpp188
-rw-r--r--lib/Sema/SemaExprCXX.cpp787
-rw-r--r--lib/Sema/SemaExprObjC.cpp16
-rw-r--r--lib/Sema/SemaInit.cpp25
-rw-r--r--lib/Sema/SemaLookup.cpp40
-rw-r--r--lib/Sema/SemaOverload.cpp116
-rw-r--r--lib/Sema/SemaOverload.h68
-rw-r--r--lib/Sema/SemaStmt.cpp29
-rw-r--r--lib/Sema/SemaTemplate.cpp58
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp21
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp23
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp238
-rw-r--r--lib/Sema/SemaType.cpp31
-rw-r--r--lib/Sema/TargetAttributesSema.cpp112
-rw-r--r--lib/Sema/TreeTransform.h472
126 files changed, 8580 insertions, 3006 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index c23babb9a4a5..e091bf10b629 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -59,6 +59,12 @@ ASTContext::~ASTContext() {
// Release the DenseMaps associated with DeclContext objects.
// FIXME: Is this the ideal solution?
ReleaseDeclContextMaps();
+
+ // Release all of the memory associated with overridden C++ methods.
+ for (llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::iterator
+ OM = OverriddenMethods.begin(), OMEnd = OverriddenMethods.end();
+ OM != OMEnd; ++OM)
+ OM->second.Destroy();
if (FreeMemory) {
// Deallocate all the types.
@@ -319,6 +325,80 @@ void ASTContext::setInstantiatedFromUnnamedFieldDecl(FieldDecl *Inst,
InstantiatedFromUnnamedFieldDecl[Inst] = Tmpl;
}
+CXXMethodVector::iterator CXXMethodVector::begin() const {
+ if ((Storage & 0x01) == 0)
+ return reinterpret_cast<iterator>(&Storage);
+
+ vector_type *Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
+ return &Vec->front();
+}
+
+CXXMethodVector::iterator CXXMethodVector::end() const {
+ if ((Storage & 0x01) == 0) {
+ if (Storage == 0)
+ return reinterpret_cast<iterator>(&Storage);
+
+ return reinterpret_cast<iterator>(&Storage) + 1;
+ }
+
+ vector_type *Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
+ return &Vec->front() + Vec->size();
+}
+
+void CXXMethodVector::push_back(const CXXMethodDecl *Method) {
+ if (Storage == 0) {
+ // 0 -> 1 element.
+ Storage = reinterpret_cast<uintptr_t>(Method);
+ return;
+ }
+
+ vector_type *Vec;
+ if ((Storage & 0x01) == 0) {
+ // 1 -> 2 elements. Allocate a new vector and push the element into that
+ // vector.
+ Vec = new vector_type;
+ Vec->push_back(reinterpret_cast<const CXXMethodDecl *>(Storage));
+ Storage = reinterpret_cast<uintptr_t>(Vec) | 0x01;
+ } else
+ Vec = reinterpret_cast<vector_type *>(Storage & ~0x01);
+
+ // Add the new method to the vector.
+ Vec->push_back(Method);
+}
+
+void CXXMethodVector::Destroy() {
+ if (Storage & 0x01)
+ delete reinterpret_cast<vector_type *>(Storage & ~0x01);
+
+ Storage = 0;
+}
+
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_begin(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.begin();
+}
+
+ASTContext::overridden_cxx_method_iterator
+ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.end();
+}
+
+void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
+ const CXXMethodDecl *Overridden) {
+ OverriddenMethods[Method].push_back(Overridden);
+}
+
namespace {
class BeforeInTranslationUnit
: std::binary_function<SourceRange, SourceRange, bool> {
@@ -563,6 +643,12 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) {
Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
}
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(VD)) {
+ // In the case of a field in a packed struct, we want the minimum
+ // of the alignment of the field and the alignment of the struct.
+ Align = std::min(Align,
+ getPreferredTypeAlign(FD->getParent()->getTypeForDecl()));
+ }
}
return CharUnits::fromQuantity(Align / Target.getCharWidth());
@@ -872,14 +958,13 @@ void ASTContext::CollectObjCIvars(const ObjCInterfaceDecl *OI,
/// Collect all ivars, including those synthesized, in the current class.
///
void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
- llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars,
- bool CollectSynthesized) {
+ llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
for (ObjCInterfaceDecl::ivar_iterator I = OI->ivar_begin(),
E = OI->ivar_end(); I != E; ++I) {
Ivars.push_back(*I);
}
- if (CollectSynthesized)
- CollectSynthesizedIvars(OI, Ivars);
+
+ CollectNonClassIvars(OI, Ivars);
}
void ASTContext::CollectProtocolSynthesizedIvars(const ObjCProtocolDecl *PD,
@@ -895,11 +980,20 @@ void ASTContext::CollectProtocolSynthesizedIvars(const ObjCProtocolDecl *PD,
CollectProtocolSynthesizedIvars(*P, Ivars);
}
-/// CollectSynthesizedIvars -
-/// This routine collect synthesized ivars for the designated class.
+/// CollectNonClassIvars -
+/// This routine collects all other ivars which are not declared in the class.
+/// This includes synthesized ivars and those in class's implementation.
///
-void ASTContext::CollectSynthesizedIvars(const ObjCInterfaceDecl *OI,
+void ASTContext::CollectNonClassIvars(const ObjCInterfaceDecl *OI,
llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
+ // Find ivars declared in class extension.
+ if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) {
+ for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
+ E = CDecl->ivar_end(); I != E; ++I) {
+ Ivars.push_back(*I);
+ }
+ }
+
for (ObjCInterfaceDecl::prop_iterator I = OI->prop_begin(),
E = OI->prop_end(); I != E; ++I) {
if (ObjCIvarDecl *Ivar = (*I)->getPropertyIvarDecl())
@@ -912,6 +1006,13 @@ void ASTContext::CollectSynthesizedIvars(const ObjCInterfaceDecl *OI,
ObjCProtocolDecl *PD = (*P);
CollectProtocolSynthesizedIvars(PD, Ivars);
}
+
+ // Also add any ivar defined in this class's implementation
+ if (ObjCImplementationDecl *ImplDecl = OI->getImplementation()) {
+ for (ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
+ E = ImplDecl->ivar_end(); I != E; ++I)
+ Ivars.push_back(*I);
+ }
}
/// CollectInheritedProtocols - Collect all protocols in current class and
@@ -924,9 +1025,11 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
ObjCProtocolDecl *Proto = (*P);
Protocols.insert(Proto);
for (ObjCProtocolDecl::protocol_iterator P = Proto->protocol_begin(),
- PE = Proto->protocol_end(); P != PE; ++P)
+ PE = Proto->protocol_end(); P != PE; ++P) {
+ Protocols.insert(*P);
CollectInheritedProtocols(*P, Protocols);
}
+ }
// Categories of this Interface.
for (const ObjCCategoryDecl *CDeclChain = OI->getCategoryList();
@@ -4401,7 +4504,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs) {
if (allRTypes) return rhs;
return getFunctionType(retType, proto->arg_type_begin(),
proto->getNumArgs(), proto->isVariadic(),
- proto->getTypeQuals(), NoReturn, lcc);
+ proto->getTypeQuals(),
+ false, false, 0, 0, NoReturn, lcc);
}
if (allLTypes) return lhs;
@@ -4498,6 +4602,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS) {
switch (LHSClass) {
#define TYPE(Class, Base)
#define ABSTRACT_TYPE(Class, Base)
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
@@ -4620,9 +4725,6 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS) {
return QualType();
}
- case Type::TemplateSpecialization:
- assert(false && "Dependent types have no size");
- break;
}
return QualType();
@@ -4888,8 +4990,11 @@ QualType ASTContext::GetBuiltinType(unsigned id,
// handle untyped/variadic arguments "T c99Style();" or "T cppStyle(...);".
if (ArgTypes.size() == 0 && TypeStr[0] == '.')
return getFunctionNoProtoType(ResType);
+
+ // FIXME: Should we create noreturn types?
return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(),
- TypeStr[0] == '.', 0);
+ TypeStr[0] == '.', 0, false, false, 0, 0,
+ false, CC_Default);
}
QualType
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index dee0d2b342fc..2bcf07e70040 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -80,27 +80,49 @@ namespace {
// Importing declarations
bool ImportDeclParts(NamedDecl *D, DeclContext *&DC,
DeclContext *&LexicalDC, DeclarationName &Name,
- SourceLocation &Loc);
+ SourceLocation &Loc);
+ void ImportDeclContext(DeclContext *FromDC);
bool IsStructuralMatch(RecordDecl *FromRecord, RecordDecl *ToRecord);
bool IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToRecord);
Decl *VisitDecl(Decl *D);
+ Decl *VisitNamespaceDecl(NamespaceDecl *D);
Decl *VisitTypedefDecl(TypedefDecl *D);
Decl *VisitEnumDecl(EnumDecl *D);
Decl *VisitRecordDecl(RecordDecl *D);
Decl *VisitEnumConstantDecl(EnumConstantDecl *D);
Decl *VisitFunctionDecl(FunctionDecl *D);
+ Decl *VisitCXXMethodDecl(CXXMethodDecl *D);
+ Decl *VisitCXXConstructorDecl(CXXConstructorDecl *D);
+ Decl *VisitCXXDestructorDecl(CXXDestructorDecl *D);
+ Decl *VisitCXXConversionDecl(CXXConversionDecl *D);
Decl *VisitFieldDecl(FieldDecl *D);
+ Decl *VisitObjCIvarDecl(ObjCIvarDecl *D);
Decl *VisitVarDecl(VarDecl *D);
+ Decl *VisitImplicitParamDecl(ImplicitParamDecl *D);
Decl *VisitParmVarDecl(ParmVarDecl *D);
+ Decl *VisitObjCMethodDecl(ObjCMethodDecl *D);
+ Decl *VisitObjCCategoryDecl(ObjCCategoryDecl *D);
+ Decl *VisitObjCProtocolDecl(ObjCProtocolDecl *D);
Decl *VisitObjCInterfaceDecl(ObjCInterfaceDecl *D);
+ Decl *VisitObjCPropertyDecl(ObjCPropertyDecl *D);
+ Decl *VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D);
+ Decl *VisitObjCClassDecl(ObjCClassDecl *D);
// Importing statements
Stmt *VisitStmt(Stmt *S);
// Importing expressions
Expr *VisitExpr(Expr *E);
+ Expr *VisitDeclRefExpr(DeclRefExpr *E);
Expr *VisitIntegerLiteral(IntegerLiteral *E);
+ Expr *VisitCharacterLiteral(CharacterLiteral *E);
+ Expr *VisitParenExpr(ParenExpr *E);
+ Expr *VisitUnaryOperator(UnaryOperator *E);
+ Expr *VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E);
+ Expr *VisitBinaryOperator(BinaryOperator *E);
+ Expr *VisitCompoundAssignOperator(CompoundAssignOperator *E);
Expr *VisitImplicitCastExpr(ImplicitCastExpr *E);
+ Expr *VisitCStyleCastExpr(CStyleCastExpr *E);
};
}
@@ -421,6 +443,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
if (Vec1->isPixel() != Vec2->isPixel())
return false;
+ break;
}
case Type::FunctionProto: {
@@ -1356,21 +1379,29 @@ bool ASTNodeImporter::ImportDeclParts(NamedDecl *D, DeclContext *&DC,
return false;
}
+void ASTNodeImporter::ImportDeclContext(DeclContext *FromDC) {
+ for (DeclContext::decl_iterator From = FromDC->decls_begin(),
+ FromEnd = FromDC->decls_end();
+ From != FromEnd;
+ ++From)
+ Importer.Import(*From);
+}
+
bool ASTNodeImporter::IsStructuralMatch(RecordDecl *FromRecord,
RecordDecl *ToRecord) {
- StructuralEquivalenceContext SEC(Importer.getFromContext(),
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
Importer.getDiags(),
Importer.getNonEquivalentDecls());
- return SEC.IsStructurallyEquivalent(FromRecord, ToRecord);
+ return Ctx.IsStructurallyEquivalent(FromRecord, ToRecord);
}
bool ASTNodeImporter::IsStructuralMatch(EnumDecl *FromEnum, EnumDecl *ToEnum) {
- StructuralEquivalenceContext SEC(Importer.getFromContext(),
+ StructuralEquivalenceContext Ctx(Importer.getFromContext(),
Importer.getToContext(),
Importer.getDiags(),
Importer.getNonEquivalentDecls());
- return SEC.IsStructurallyEquivalent(FromEnum, ToEnum);
+ return Ctx.IsStructurallyEquivalent(FromEnum, ToEnum);
}
Decl *ASTNodeImporter::VisitDecl(Decl *D) {
@@ -1379,6 +1410,71 @@ Decl *ASTNodeImporter::VisitDecl(Decl *D) {
return 0;
}
+Decl *ASTNodeImporter::VisitNamespaceDecl(NamespaceDecl *D) {
+ // Import the major distinguishing characteristics of this namespace.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ NamespaceDecl *MergeWithNamespace = 0;
+ if (!Name) {
+ // This is an anonymous namespace. Adopt an existing anonymous
+ // namespace if we can.
+ // FIXME: Not testable.
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ MergeWithNamespace = TU->getAnonymousNamespace();
+ else
+ MergeWithNamespace = cast<NamespaceDecl>(DC)->getAnonymousNamespace();
+ } else {
+ llvm::SmallVector<NamedDecl *, 4> ConflictingDecls;
+ for (DeclContext::lookup_result Lookup = DC->lookup(Name);
+ Lookup.first != Lookup.second;
+ ++Lookup.first) {
+ if (!(*Lookup.first)->isInIdentifierNamespace(Decl::IDNS_Ordinary))
+ continue;
+
+ if (NamespaceDecl *FoundNS = dyn_cast<NamespaceDecl>(*Lookup.first)) {
+ MergeWithNamespace = FoundNS;
+ ConflictingDecls.clear();
+ break;
+ }
+
+ ConflictingDecls.push_back(*Lookup.first);
+ }
+
+ if (!ConflictingDecls.empty()) {
+ Name = Importer.HandleNameConflict(Name, DC, Decl::IDNS_Ordinary,
+ ConflictingDecls.data(),
+ ConflictingDecls.size());
+ }
+ }
+
+ // Create the "to" namespace, if needed.
+ NamespaceDecl *ToNamespace = MergeWithNamespace;
+ if (!ToNamespace) {
+ ToNamespace = NamespaceDecl::Create(Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo());
+ ToNamespace->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(ToNamespace);
+
+ // If this is an anonymous namespace, register it as the anonymous
+ // namespace within its context.
+ if (!Name) {
+ if (TranslationUnitDecl *TU = dyn_cast<TranslationUnitDecl>(DC))
+ TU->setAnonymousNamespace(ToNamespace);
+ else
+ cast<NamespaceDecl>(DC)->setAnonymousNamespace(ToNamespace);
+ }
+ }
+ Importer.Imported(D, ToNamespace);
+
+ ImportDeclContext(D);
+
+ return ToNamespace;
+}
+
Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
// Import the major distinguishing characteristics of this typedef.
DeclContext *DC, *LexicalDC;
@@ -1426,6 +1522,7 @@ Decl *ASTNodeImporter::VisitTypedefDecl(TypedefDecl *D) {
TypedefDecl *ToTypedef = TypedefDecl::Create(Importer.getToContext(), DC,
Loc, Name.getAsIdentifierInfo(),
TInfo);
+ ToTypedef->setAccess(D->getAccess());
ToTypedef->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToTypedef);
LexicalDC->addDecl(ToTypedef);
@@ -1485,6 +1582,7 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
Name.getAsIdentifierInfo(),
Importer.Import(D->getTagKeywordLoc()),
0);
+ D2->setAccess(D->getAccess());
D2->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, D2);
LexicalDC->addDecl(D2);
@@ -1506,12 +1604,7 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
return 0;
D2->startDefinition();
- for (DeclContext::decl_iterator FromMem = D->decls_begin(),
- FromMemEnd = D->decls_end();
- FromMem != FromMemEnd;
- ++FromMem)
- Importer.Import(*FromMem);
-
+ ImportDeclContext(D);
D2->completeDefinition(T, ToPromotionType);
}
@@ -1600,6 +1693,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Name.getAsIdentifierInfo(),
Importer.Import(D->getTagKeywordLoc()));
D2 = D2CXX;
+ D2->setAccess(D->getAccess());
if (D->isDefinition()) {
// Add base classes.
@@ -1638,12 +1732,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (D->isDefinition()) {
D2->startDefinition();
- for (DeclContext::decl_iterator FromMem = D->decls_begin(),
- FromMemEnd = D->decls_end();
- FromMem != FromMemEnd;
- ++FromMem)
- Importer.Import(*FromMem);
-
+ ImportDeclContext(D);
D2->completeDefinition();
}
@@ -1693,6 +1782,7 @@ Decl *ASTNodeImporter::VisitEnumConstantDecl(EnumConstantDecl *D) {
= EnumConstantDecl::Create(Importer.getToContext(), cast<EnumDecl>(DC), Loc,
Name.getAsIdentifierInfo(), T,
Init, D->getInitVal());
+ ToEnumerator->setAccess(D->getAccess());
ToEnumerator->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToEnumerator);
LexicalDC->addDecl(ToEnumerator);
@@ -1773,25 +1863,64 @@ Decl *ASTNodeImporter::VisitFunctionDecl(FunctionDecl *D) {
// Create the imported function.
TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
- FunctionDecl *ToEnumerator
- = FunctionDecl::Create(Importer.getToContext(), DC, Loc,
- Name, T, TInfo, D->getStorageClass(),
- D->isInlineSpecified(),
- D->hasWrittenPrototype());
- ToEnumerator->setLexicalDeclContext(LexicalDC);
- Importer.Imported(D, ToEnumerator);
- LexicalDC->addDecl(ToEnumerator);
+ FunctionDecl *ToFunction = 0;
+ if (CXXConstructorDecl *FromConstructor = dyn_cast<CXXConstructorDecl>(D)) {
+ ToFunction = CXXConstructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ Loc, Name, T, TInfo,
+ FromConstructor->isExplicit(),
+ D->isInlineSpecified(),
+ D->isImplicit());
+ } else if (isa<CXXDestructorDecl>(D)) {
+ ToFunction = CXXDestructorDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ Loc, Name, T,
+ D->isInlineSpecified(),
+ D->isImplicit());
+ } else if (CXXConversionDecl *FromConversion
+ = dyn_cast<CXXConversionDecl>(D)) {
+ ToFunction = CXXConversionDecl::Create(Importer.getToContext(),
+ cast<CXXRecordDecl>(DC),
+ Loc, Name, T, TInfo,
+ D->isInlineSpecified(),
+ FromConversion->isExplicit());
+ } else {
+ ToFunction = FunctionDecl::Create(Importer.getToContext(), DC, Loc,
+ Name, T, TInfo, D->getStorageClass(),
+ D->isInlineSpecified(),
+ D->hasWrittenPrototype());
+ }
+ ToFunction->setAccess(D->getAccess());
+ ToFunction->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToFunction);
+ LexicalDC->addDecl(ToFunction);
// Set the parameters.
for (unsigned I = 0, N = Parameters.size(); I != N; ++I) {
- Parameters[I]->setOwningFunction(ToEnumerator);
- ToEnumerator->addDecl(Parameters[I]);
+ Parameters[I]->setOwningFunction(ToFunction);
+ ToFunction->addDecl(Parameters[I]);
}
- ToEnumerator->setParams(Parameters.data(), Parameters.size());
+ ToFunction->setParams(Parameters.data(), Parameters.size());
// FIXME: Other bits to merge?
- return ToEnumerator;
+ return ToFunction;
+}
+
+Decl *ASTNodeImporter::VisitCXXMethodDecl(CXXMethodDecl *D) {
+ return VisitFunctionDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConstructorDecl(CXXConstructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXDestructorDecl(CXXDestructorDecl *D) {
+ return VisitCXXMethodDecl(D);
+}
+
+Decl *ASTNodeImporter::VisitCXXConversionDecl(CXXConversionDecl *D) {
+ return VisitCXXMethodDecl(D);
}
Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
@@ -1815,12 +1944,61 @@ Decl *ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
FieldDecl *ToField = FieldDecl::Create(Importer.getToContext(), DC,
Loc, Name.getAsIdentifierInfo(),
T, TInfo, BitWidth, D->isMutable());
+ ToField->setAccess(D->getAccess());
ToField->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToField);
LexicalDC->addDecl(ToField);
return ToField;
}
+Decl *ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
+ // Import the major distinguishing characteristics of an ivar.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Determine whether we've already imported this ivar
+ for (DeclContext::lookup_result Lookup = DC->lookup(Name);
+ Lookup.first != Lookup.second;
+ ++Lookup.first) {
+ if (ObjCIvarDecl *FoundIvar = dyn_cast<ObjCIvarDecl>(*Lookup.first)) {
+ if (Importer.IsStructurallyEquivalent(D->getType(),
+ FoundIvar->getType())) {
+ Importer.Imported(D, FoundIvar);
+ return FoundIvar;
+ }
+
+ Importer.ToDiag(Loc, diag::err_odr_ivar_type_inconsistent)
+ << Name << D->getType() << FoundIvar->getType();
+ Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
+ << FoundIvar->getType();
+ return 0;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(D->getTypeSourceInfo());
+ Expr *BitWidth = Importer.Import(D->getBitWidth());
+ if (!BitWidth && D->getBitWidth())
+ return 0;
+
+ ObjCIvarDecl *ToIvar = ObjCIvarDecl::Create(Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(),
+ T, TInfo, D->getAccessControl(),
+ BitWidth);
+ ToIvar->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToIvar);
+ LexicalDC->addDecl(ToIvar);
+ return ToIvar;
+
+}
+
Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
// Import the major distinguishing characteristics of a variable.
DeclContext *DC, *LexicalDC;
@@ -1922,6 +2100,7 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
VarDecl *ToVar = VarDecl::Create(Importer.getToContext(), DC, Loc,
Name.getAsIdentifierInfo(), T, TInfo,
D->getStorageClass());
+ ToVar->setAccess(D->getAccess());
ToVar->setLexicalDeclContext(LexicalDC);
Importer.Imported(D, ToVar);
LexicalDC->addDecl(ToVar);
@@ -1937,6 +2116,32 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
return ToVar;
}
+Decl *ASTNodeImporter::VisitImplicitParamDecl(ImplicitParamDecl *D) {
+ // Parameters are created in the translation unit's context, then moved
+ // into the function declaration's context afterward.
+ DeclContext *DC = Importer.getToContext().getTranslationUnitDecl();
+
+ // Import the name of this declaration.
+ DeclarationName Name = Importer.Import(D->getDeclName());
+ if (D->getDeclName() && !Name)
+ return 0;
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ // Import the parameter's type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the imported parameter.
+ ImplicitParamDecl *ToParm
+ = ImplicitParamDecl::Create(Importer.getToContext(), DC,
+ Loc, Name.getAsIdentifierInfo(),
+ T);
+ return Importer.Imported(D, ToParm);
+}
+
Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
// Parameters are created in the translation unit's context, then moved
// into the function declaration's context afterward.
@@ -1964,6 +2169,254 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
return Importer.Imported(D, ToParm);
}
+Decl *ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
+ // Import the major distinguishing characteristics of a method.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ for (DeclContext::lookup_result Lookup = DC->lookup(Name);
+ Lookup.first != Lookup.second;
+ ++Lookup.first) {
+ if (ObjCMethodDecl *FoundMethod = dyn_cast<ObjCMethodDecl>(*Lookup.first)) {
+ if (FoundMethod->isInstanceMethod() != D->isInstanceMethod())
+ continue;
+
+ // Check return types.
+ if (!Importer.IsStructurallyEquivalent(D->getResultType(),
+ FoundMethod->getResultType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_result_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->getResultType() << FoundMethod->getResultType();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // Check the number of parameters.
+ if (D->param_size() != FoundMethod->param_size()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_num_params_inconsistent)
+ << D->isInstanceMethod() << Name
+ << D->param_size() << FoundMethod->param_size();
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // Check parameter types.
+ for (ObjCMethodDecl::param_iterator P = D->param_begin(),
+ PEnd = D->param_end(), FoundP = FoundMethod->param_begin();
+ P != PEnd; ++P, ++FoundP) {
+ if (!Importer.IsStructurallyEquivalent((*P)->getType(),
+ (*FoundP)->getType())) {
+ Importer.FromDiag((*P)->getLocation(),
+ diag::err_odr_objc_method_param_type_inconsistent)
+ << D->isInstanceMethod() << Name
+ << (*P)->getType() << (*FoundP)->getType();
+ Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
+ << (*FoundP)->getType();
+ return 0;
+ }
+ }
+
+ // Check variadic/non-variadic.
+ // Check the number of parameters.
+ if (D->isVariadic() != FoundMethod->isVariadic()) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_method_variadic_inconsistent)
+ << D->isInstanceMethod() << Name;
+ Importer.ToDiag(FoundMethod->getLocation(),
+ diag::note_odr_objc_method_here)
+ << D->isInstanceMethod() << Name;
+ return 0;
+ }
+
+ // FIXME: Any other bits we need to merge?
+ return Importer.Imported(D, FoundMethod);
+ }
+ }
+
+ // Import the result type.
+ QualType ResultTy = Importer.Import(D->getResultType());
+ if (ResultTy.isNull())
+ return 0;
+
+ ObjCMethodDecl *ToMethod
+ = ObjCMethodDecl::Create(Importer.getToContext(),
+ Loc,
+ Importer.Import(D->getLocEnd()),
+ Name.getObjCSelector(),
+ ResultTy, DC,
+ D->isInstanceMethod(),
+ D->isVariadic(),
+ D->isSynthesized(),
+ D->getImplementationControl());
+
+ // FIXME: When we decide to merge method definitions, we'll need to
+ // deal with implicit parameters.
+
+ // Import the parameters
+ llvm::SmallVector<ParmVarDecl *, 5> ToParams;
+ for (ObjCMethodDecl::param_iterator FromP = D->param_begin(),
+ FromPEnd = D->param_end();
+ FromP != FromPEnd;
+ ++FromP) {
+ ParmVarDecl *ToP = cast_or_null<ParmVarDecl>(Importer.Import(*FromP));
+ if (!ToP)
+ return 0;
+
+ ToParams.push_back(ToP);
+ }
+
+ // Set the parameters.
+ for (unsigned I = 0, N = ToParams.size(); I != N; ++I) {
+ ToParams[I]->setOwningFunction(ToMethod);
+ ToMethod->addDecl(ToParams[I]);
+ }
+ ToMethod->setMethodParams(Importer.getToContext(),
+ ToParams.data(), ToParams.size());
+
+ ToMethod->setLexicalDeclContext(LexicalDC);
+ Importer.Imported(D, ToMethod);
+ LexicalDC->addDecl(ToMethod);
+ return ToMethod;
+}
+
+Decl *ASTNodeImporter::VisitObjCCategoryDecl(ObjCCategoryDecl *D) {
+ // Import the major distinguishing characteristics of a category.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ ObjCInterfaceDecl *ToInterface
+ = cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getClassInterface()));
+ if (!ToInterface)
+ return 0;
+
+ // Determine if we've already encountered this category.
+ ObjCCategoryDecl *MergeWithCategory
+ = ToInterface->FindCategoryDeclaration(Name.getAsIdentifierInfo());
+ ObjCCategoryDecl *ToCategory = MergeWithCategory;
+ if (!ToCategory) {
+ ToCategory = ObjCCategoryDecl::Create(Importer.getToContext(), DC,
+ Importer.Import(D->getAtLoc()),
+ Loc,
+ Importer.Import(D->getCategoryNameLoc()),
+ Name.getAsIdentifierInfo());
+ ToCategory->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(ToCategory);
+ Importer.Imported(D, ToCategory);
+
+ // Link this category into its class's category list.
+ ToCategory->setClassInterface(ToInterface);
+ ToCategory->insertNextClassCategory();
+
+ // Import protocols
+ llvm::SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ llvm::SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCCategoryDecl::protocol_loc_iterator FromProtoLoc
+ = D->protocol_loc_begin();
+ for (ObjCCategoryDecl::protocol_iterator FromProto = D->protocol_begin(),
+ FromProtoEnd = D->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return 0;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ ToCategory->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+
+ } else {
+ Importer.Imported(D, ToCategory);
+ }
+
+ // Import all of the members of this category.
+ ImportDeclContext(D);
+
+ // If we have an implementation, import it as well.
+ if (D->getImplementation()) {
+ ObjCCategoryImplDecl *Impl
+ = cast<ObjCCategoryImplDecl>(Importer.Import(D->getImplementation()));
+ if (!Impl)
+ return 0;
+
+ ToCategory->setImplementation(Impl);
+ }
+
+ return ToCategory;
+}
+
+Decl *ASTNodeImporter::VisitObjCProtocolDecl(ObjCProtocolDecl *D) {
+ // Import the major distinguishing characteristics of a protocol.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ ObjCProtocolDecl *MergeWithProtocol = 0;
+ for (DeclContext::lookup_result Lookup = DC->lookup(Name);
+ Lookup.first != Lookup.second;
+ ++Lookup.first) {
+ if (!(*Lookup.first)->isInIdentifierNamespace(Decl::IDNS_ObjCProtocol))
+ continue;
+
+ if ((MergeWithProtocol = dyn_cast<ObjCProtocolDecl>(*Lookup.first)))
+ break;
+ }
+
+ ObjCProtocolDecl *ToProto = MergeWithProtocol;
+ if (!ToProto || ToProto->isForwardDecl()) {
+ if (!ToProto) {
+ ToProto = ObjCProtocolDecl::Create(Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo());
+ ToProto->setForwardDecl(D->isForwardDecl());
+ ToProto->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(ToProto);
+ }
+ Importer.Imported(D, ToProto);
+
+ // Import protocols
+ llvm::SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ llvm::SmallVector<SourceLocation, 4> ProtocolLocs;
+ ObjCProtocolDecl::protocol_loc_iterator
+ FromProtoLoc = D->protocol_loc_begin();
+ for (ObjCProtocolDecl::protocol_iterator FromProto = D->protocol_begin(),
+ FromProtoEnd = D->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ return 0;
+ Protocols.push_back(ToProto);
+ ProtocolLocs.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ // FIXME: If we're merging, make sure that the protocol list is the same.
+ ToProto->setProtocolList(Protocols.data(), Protocols.size(),
+ ProtocolLocs.data(), Importer.getToContext());
+ } else {
+ Importer.Imported(D, ToProto);
+ }
+
+ // Import all of the members of this protocol.
+ ImportDeclContext(D);
+
+ return ToProto;
+}
+
Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
// Import the major distinguishing characteristics of an @interface.
DeclContext *DC, *LexicalDC;
@@ -1992,14 +2445,12 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
Importer.Import(D->getClassLoc()),
D->isForwardDecl(),
D->isImplicitInterfaceDecl());
+ ToIface->setForwardDecl(D->isForwardDecl());
ToIface->setLexicalDeclContext(LexicalDC);
LexicalDC->addDecl(ToIface);
}
Importer.Imported(D, ToIface);
- // Import superclass
- // FIXME: If we're merging, make sure that both decls have the same
- // superclass.
if (D->getSuperClass()) {
ObjCInterfaceDecl *Super
= cast_or_null<ObjCInterfaceDecl>(Importer.Import(D->getSuperClass()));
@@ -2031,20 +2482,47 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
ToIface->setProtocolList(Protocols.data(), Protocols.size(),
ProtocolLocs.data(), Importer.getToContext());
- // FIXME: Import categories
-
// Import @end range
ToIface->setAtEndRange(Importer.Import(D->getAtEndRange()));
} else {
Importer.Imported(D, ToIface);
+
+ // Check for consistency of superclasses.
+ DeclarationName FromSuperName, ToSuperName;
+ if (D->getSuperClass())
+ FromSuperName = Importer.Import(D->getSuperClass()->getDeclName());
+ if (ToIface->getSuperClass())
+ ToSuperName = ToIface->getSuperClass()->getDeclName();
+ if (FromSuperName != ToSuperName) {
+ Importer.ToDiag(ToIface->getLocation(),
+ diag::err_odr_objc_superclass_inconsistent)
+ << ToIface->getDeclName();
+ if (ToIface->getSuperClass())
+ Importer.ToDiag(ToIface->getSuperClassLoc(),
+ diag::note_odr_objc_superclass)
+ << ToIface->getSuperClass()->getDeclName();
+ else
+ Importer.ToDiag(ToIface->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ if (D->getSuperClass())
+ Importer.FromDiag(D->getSuperClassLoc(),
+ diag::note_odr_objc_superclass)
+ << D->getSuperClass()->getDeclName();
+ else
+ Importer.FromDiag(D->getLocation(),
+ diag::note_odr_objc_missing_superclass);
+ return 0;
+ }
}
+ // Import categories. When the categories themselves are imported, they'll
+ // hook themselves into this interface.
+ for (ObjCCategoryDecl *FromCat = D->getCategoryList(); FromCat;
+ FromCat = FromCat->getNextClassCategory())
+ Importer.Import(FromCat);
+
// Import all of the members of this class.
- for (DeclContext::decl_iterator FromMem = D->decls_begin(),
- FromMemEnd = D->decls_end();
- FromMem != FromMemEnd;
- ++FromMem)
- Importer.Import(*FromMem);
+ ImportDeclContext(D);
// If we have an @implementation, import it as well.
if (D->getImplementation()) {
@@ -2056,7 +2534,151 @@ Decl *ASTNodeImporter::VisitObjCInterfaceDecl(ObjCInterfaceDecl *D) {
ToIface->setImplementation(Impl);
}
- return 0;
+ return ToIface;
+}
+
+Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
+ // Import the major distinguishing characteristics of an @property.
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ if (ImportDeclParts(D, DC, LexicalDC, Name, Loc))
+ return 0;
+
+ // Check whether we have already imported this property.
+ for (DeclContext::lookup_result Lookup = DC->lookup(Name);
+ Lookup.first != Lookup.second;
+ ++Lookup.first) {
+ if (ObjCPropertyDecl *FoundProp
+ = dyn_cast<ObjCPropertyDecl>(*Lookup.first)) {
+ // Check property types.
+ if (!Importer.IsStructurallyEquivalent(D->getType(),
+ FoundProp->getType())) {
+ Importer.ToDiag(Loc, diag::err_odr_objc_property_type_inconsistent)
+ << Name << D->getType() << FoundProp->getType();
+ Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
+ << FoundProp->getType();
+ return 0;
+ }
+
+ // FIXME: Check property attributes, getters, setters, etc.?
+
+ // Consider these properties to be equivalent.
+ Importer.Imported(D, FoundProp);
+ return FoundProp;
+ }
+ }
+
+ // Import the type.
+ QualType T = Importer.Import(D->getType());
+ if (T.isNull())
+ return 0;
+
+ // Create the new property.
+ ObjCPropertyDecl *ToProperty
+ = ObjCPropertyDecl::Create(Importer.getToContext(), DC, Loc,
+ Name.getAsIdentifierInfo(),
+ Importer.Import(D->getAtLoc()),
+ T,
+ D->getPropertyImplementation());
+ Importer.Imported(D, ToProperty);
+ ToProperty->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(ToProperty);
+
+ ToProperty->setPropertyAttributes(D->getPropertyAttributes());
+ ToProperty->setGetterName(Importer.Import(D->getGetterName()));
+ ToProperty->setSetterName(Importer.Import(D->getSetterName()));
+ ToProperty->setGetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
+ ToProperty->setSetterMethodDecl(
+ cast_or_null<ObjCMethodDecl>(Importer.Import(D->getSetterMethodDecl())));
+ ToProperty->setPropertyIvarDecl(
+ cast_or_null<ObjCIvarDecl>(Importer.Import(D->getPropertyIvarDecl())));
+ return ToProperty;
+}
+
+Decl *
+ASTNodeImporter::VisitObjCForwardProtocolDecl(ObjCForwardProtocolDecl *D) {
+ // Import the context of this declaration.
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ llvm::SmallVector<ObjCProtocolDecl *, 4> Protocols;
+ llvm::SmallVector<SourceLocation, 4> Locations;
+ ObjCForwardProtocolDecl::protocol_loc_iterator FromProtoLoc
+ = D->protocol_loc_begin();
+ for (ObjCForwardProtocolDecl::protocol_iterator FromProto
+ = D->protocol_begin(), FromProtoEnd = D->protocol_end();
+ FromProto != FromProtoEnd;
+ ++FromProto, ++FromProtoLoc) {
+ ObjCProtocolDecl *ToProto
+ = cast_or_null<ObjCProtocolDecl>(Importer.Import(*FromProto));
+ if (!ToProto)
+ continue;
+
+ Protocols.push_back(ToProto);
+ Locations.push_back(Importer.Import(*FromProtoLoc));
+ }
+
+ ObjCForwardProtocolDecl *ToForward
+ = ObjCForwardProtocolDecl::Create(Importer.getToContext(), DC, Loc,
+ Protocols.data(), Protocols.size(),
+ Locations.data());
+ ToForward->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(ToForward);
+ Importer.Imported(D, ToForward);
+ return ToForward;
+}
+
+Decl *ASTNodeImporter::VisitObjCClassDecl(ObjCClassDecl *D) {
+ // Import the context of this declaration.
+ DeclContext *DC = Importer.ImportContext(D->getDeclContext());
+ if (!DC)
+ return 0;
+
+ DeclContext *LexicalDC = DC;
+ if (D->getDeclContext() != D->getLexicalDeclContext()) {
+ LexicalDC = Importer.ImportContext(D->getLexicalDeclContext());
+ if (!LexicalDC)
+ return 0;
+ }
+
+ // Import the location of this declaration.
+ SourceLocation Loc = Importer.Import(D->getLocation());
+
+ llvm::SmallVector<ObjCInterfaceDecl *, 4> Interfaces;
+ llvm::SmallVector<SourceLocation, 4> Locations;
+ for (ObjCClassDecl::iterator From = D->begin(), FromEnd = D->end();
+ From != FromEnd; ++From) {
+ ObjCInterfaceDecl *ToIface
+ = cast_or_null<ObjCInterfaceDecl>(Importer.Import(From->getInterface()));
+ if (!ToIface)
+ continue;
+
+ Interfaces.push_back(ToIface);
+ Locations.push_back(Importer.Import(From->getLocation()));
+ }
+
+ ObjCClassDecl *ToClass = ObjCClassDecl::Create(Importer.getToContext(), DC,
+ Loc,
+ Interfaces.data(),
+ Locations.data(),
+ Interfaces.size());
+ ToClass->setLexicalDeclContext(LexicalDC);
+ LexicalDC->addDecl(ToClass);
+ Importer.Imported(D, ToClass);
+ return ToClass;
}
//----------------------------------------------------------------------------
@@ -2078,6 +2700,30 @@ Expr *ASTNodeImporter::VisitExpr(Expr *E) {
return 0;
}
+Expr *ASTNodeImporter::VisitDeclRefExpr(DeclRefExpr *E) {
+ NestedNameSpecifier *Qualifier = 0;
+ if (E->getQualifier()) {
+ Qualifier = Importer.Import(E->getQualifier());
+ if (!E->getQualifier())
+ return 0;
+ }
+
+ ValueDecl *ToD = cast_or_null<ValueDecl>(Importer.Import(E->getDecl()));
+ if (!ToD)
+ return 0;
+
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ return DeclRefExpr::Create(Importer.getToContext(), Qualifier,
+ Importer.Import(E->getQualifierRange()),
+ ToD,
+ Importer.Import(E->getLocation()),
+ T,
+ /*FIXME:TemplateArgs=*/0);
+}
+
Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -2087,6 +2733,110 @@ Expr *ASTNodeImporter::VisitIntegerLiteral(IntegerLiteral *E) {
IntegerLiteral(E->getValue(), T, Importer.Import(E->getLocation()));
}
+Expr *ASTNodeImporter::VisitCharacterLiteral(CharacterLiteral *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ return new (Importer.getToContext()) CharacterLiteral(E->getValue(),
+ E->isWide(), T,
+ Importer.Import(E->getLocation()));
+}
+
+Expr *ASTNodeImporter::VisitParenExpr(ParenExpr *E) {
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext())
+ ParenExpr(Importer.Import(E->getLParen()),
+ Importer.Import(E->getRParen()),
+ SubExpr);
+}
+
+Expr *ASTNodeImporter::VisitUnaryOperator(UnaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext()) UnaryOperator(SubExpr, E->getOpcode(),
+ T,
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *E) {
+ QualType ResultType = Importer.Import(E->getType());
+
+ if (E->isArgumentType()) {
+ TypeSourceInfo *TInfo = Importer.Import(E->getArgumentTypeInfo());
+ if (!TInfo)
+ return 0;
+
+ return new (Importer.getToContext()) SizeOfAlignOfExpr(E->isSizeOf(),
+ TInfo, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+ }
+
+ Expr *SubExpr = Importer.Import(E->getArgumentExpr());
+ if (!SubExpr)
+ return 0;
+
+ return new (Importer.getToContext()) SizeOfAlignOfExpr(E->isSizeOf(),
+ SubExpr, ResultType,
+ Importer.Import(E->getOperatorLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
+Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return 0;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return 0;
+
+ return new (Importer.getToContext()) BinaryOperator(LHS, RHS, E->getOpcode(),
+ T,
+ Importer.Import(E->getOperatorLoc()));
+}
+
+Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ QualType CompLHSType = Importer.Import(E->getComputationLHSType());
+ if (CompLHSType.isNull())
+ return 0;
+
+ QualType CompResultType = Importer.Import(E->getComputationResultType());
+ if (CompResultType.isNull())
+ return 0;
+
+ Expr *LHS = Importer.Import(E->getLHS());
+ if (!LHS)
+ return 0;
+
+ Expr *RHS = Importer.Import(E->getRHS());
+ if (!RHS)
+ return 0;
+
+ return new (Importer.getToContext())
+ CompoundAssignOperator(LHS, RHS, E->getOpcode(),
+ T, CompLHSType, CompResultType,
+ Importer.Import(E->getOperatorLoc()));
+}
+
Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
QualType T = Importer.Import(E->getType());
if (T.isNull())
@@ -2101,6 +2851,25 @@ Expr *ASTNodeImporter::VisitImplicitCastExpr(ImplicitCastExpr *E) {
E->isLvalueCast());
}
+Expr *ASTNodeImporter::VisitCStyleCastExpr(CStyleCastExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return 0;
+
+ Expr *SubExpr = Importer.Import(E->getSubExpr());
+ if (!SubExpr)
+ return 0;
+
+ TypeSourceInfo *TInfo = Importer.Import(E->getTypeInfoAsWritten());
+ if (!TInfo && E->getTypeInfoAsWritten())
+ return 0;
+
+ return new (Importer.getToContext()) CStyleCastExpr(T, E->getCastKind(),
+ SubExpr, TInfo,
+ Importer.Import(E->getLParenLoc()),
+ Importer.Import(E->getRParenLoc()));
+}
+
ASTImporter::ASTImporter(Diagnostic &Diags,
ASTContext &ToContext, FileManager &ToFileManager,
ASTContext &FromContext, FileManager &FromFileManager)
@@ -2359,6 +3128,17 @@ IdentifierInfo *ASTImporter::Import(IdentifierInfo *FromId) {
return &ToContext.Idents.get(FromId->getName());
}
+Selector ASTImporter::Import(Selector FromSel) {
+ if (FromSel.isNull())
+ return Selector();
+
+ llvm::SmallVector<IdentifierInfo *, 4> Idents;
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(0)));
+ for (unsigned I = 1, N = FromSel.getNumArgs(); I < N; ++I)
+ Idents.push_back(Import(FromSel.getIdentifierInfoForSlot(I)));
+ return ToContext.Selectors.getSelector(FromSel.getNumArgs(), Idents.data());
+}
+
DeclarationName ASTImporter::HandleNameConflict(DeclarationName Name,
DeclContext *DC,
unsigned IDNS,
@@ -2388,7 +3168,7 @@ bool ASTImporter::IsStructurallyEquivalent(QualType From, QualType To) {
if (Pos != ImportedTypes.end() && ToContext.hasSameType(Import(From), To))
return true;
- StructuralEquivalenceContext SEC(FromContext, ToContext, Diags,
+ StructuralEquivalenceContext Ctx(FromContext, ToContext, Diags,
NonEquivalentDecls);
- return SEC.IsStructurallyEquivalent(From, To);
+ return Ctx.IsStructurallyEquivalent(From, To);
}
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index d81979734b3a..423aa065e57c 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -74,37 +74,40 @@ void NonNullAttr::Destroy(ASTContext &C) {
// FIXME: Can we use variadic macro to define DEF_SIMPLE_ATTR_CLONE for
// "non-simple" classes?
-DEF_SIMPLE_ATTR_CLONE(Packed)
DEF_SIMPLE_ATTR_CLONE(AlwaysInline)
-DEF_SIMPLE_ATTR_CLONE(Malloc)
-DEF_SIMPLE_ATTR_CLONE(NoReturn)
DEF_SIMPLE_ATTR_CLONE(AnalyzerNoReturn)
+DEF_SIMPLE_ATTR_CLONE(BaseCheck)
+DEF_SIMPLE_ATTR_CLONE(CDecl)
+DEF_SIMPLE_ATTR_CLONE(CFReturnsNotRetained)
+DEF_SIMPLE_ATTR_CLONE(CFReturnsRetained)
+DEF_SIMPLE_ATTR_CLONE(Const)
+DEF_SIMPLE_ATTR_CLONE(DLLExport)
+DEF_SIMPLE_ATTR_CLONE(DLLImport)
DEF_SIMPLE_ATTR_CLONE(Deprecated)
+DEF_SIMPLE_ATTR_CLONE(FastCall)
DEF_SIMPLE_ATTR_CLONE(Final)
-DEF_SIMPLE_ATTR_CLONE(Unavailable)
-DEF_SIMPLE_ATTR_CLONE(Unused)
-DEF_SIMPLE_ATTR_CLONE(Used)
-DEF_SIMPLE_ATTR_CLONE(Weak)
-DEF_SIMPLE_ATTR_CLONE(WeakImport)
+DEF_SIMPLE_ATTR_CLONE(Hiding)
+DEF_SIMPLE_ATTR_CLONE(Malloc)
+DEF_SIMPLE_ATTR_CLONE(NSReturnsNotRetained)
+DEF_SIMPLE_ATTR_CLONE(NSReturnsRetained)
+DEF_SIMPLE_ATTR_CLONE(NoDebug)
+DEF_SIMPLE_ATTR_CLONE(NoInline)
+DEF_SIMPLE_ATTR_CLONE(NoReturn)
DEF_SIMPLE_ATTR_CLONE(NoThrow)
-DEF_SIMPLE_ATTR_CLONE(Const)
+DEF_SIMPLE_ATTR_CLONE(ObjCException)
+DEF_SIMPLE_ATTR_CLONE(ObjCNSObject)
+DEF_SIMPLE_ATTR_CLONE(Override)
+DEF_SIMPLE_ATTR_CLONE(Packed)
DEF_SIMPLE_ATTR_CLONE(Pure)
-DEF_SIMPLE_ATTR_CLONE(FastCall)
DEF_SIMPLE_ATTR_CLONE(StdCall)
-DEF_SIMPLE_ATTR_CLONE(CDecl)
DEF_SIMPLE_ATTR_CLONE(TransparentUnion)
-DEF_SIMPLE_ATTR_CLONE(ObjCNSObject)
-DEF_SIMPLE_ATTR_CLONE(ObjCException)
-DEF_SIMPLE_ATTR_CLONE(NoDebug)
+DEF_SIMPLE_ATTR_CLONE(Unavailable)
+DEF_SIMPLE_ATTR_CLONE(Unused)
+DEF_SIMPLE_ATTR_CLONE(Used)
DEF_SIMPLE_ATTR_CLONE(WarnUnusedResult)
-DEF_SIMPLE_ATTR_CLONE(NoInline)
-DEF_SIMPLE_ATTR_CLONE(CFReturnsRetained)
-DEF_SIMPLE_ATTR_CLONE(NSReturnsRetained)
-DEF_SIMPLE_ATTR_CLONE(BaseCheck)
-DEF_SIMPLE_ATTR_CLONE(Hiding)
-DEF_SIMPLE_ATTR_CLONE(Override)
-DEF_SIMPLE_ATTR_CLONE(DLLImport)
-DEF_SIMPLE_ATTR_CLONE(DLLExport)
+DEF_SIMPLE_ATTR_CLONE(Weak)
+DEF_SIMPLE_ATTR_CLONE(WeakImport)
+DEF_SIMPLE_ATTR_CLONE(WeakRef)
DEF_SIMPLE_ATTR_CLONE(X86ForceAlignArgPointer)
Attr* PragmaPackAttr::clone(ASTContext &C) const {
@@ -139,6 +142,10 @@ Attr *IBOutletAttr::clone(ASTContext &C) const {
return ::new (C) IBOutletAttr;
}
+Attr *IBActionAttr::clone(ASTContext &C) const {
+ return ::new (C) IBActionAttr;
+}
+
Attr *GNUInlineAttr::clone(ASTContext &C) const {
return ::new (C) GNUInlineAttr;
}
@@ -190,5 +197,3 @@ Attr *ReqdWorkGroupSizeAttr::clone(ASTContext &C) const {
Attr *MSP430InterruptAttr::clone(ASTContext &C) const {
return ::new (C) MSP430InterruptAttr(Number);
}
-
-
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index 99f908caeab6..70f8ee4bca5e 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -90,6 +90,17 @@ bool CXXRecordDecl::isDerivedFrom(CXXRecordDecl *Base, CXXBasePaths &Paths) cons
return lookupInBases(&FindBaseClass, Base->getCanonicalDecl(), Paths);
}
+bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
+ CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
+ /*DetectVirtual=*/false);
+
+ if (getCanonicalDecl() == Base->getCanonicalDecl())
+ return false;
+
+ Paths.setOrigin(const_cast<CXXRecordDecl*>(this));
+ return lookupInBases(&FindVirtualBaseClass, Base->getCanonicalDecl(), Paths);
+}
+
static bool BaseIsNot(const CXXRecordDecl *Base, void *OpaqueTarget) {
// OpaqueTarget is a CXXRecordDecl*.
return Base->getCanonicalDecl() != (const CXXRecordDecl*) OpaqueTarget;
@@ -140,18 +151,20 @@ bool CXXRecordDecl::forallBases(ForallBasesCallback *BaseMatches,
return AllMatches;
}
-bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
- void *UserData,
- CXXBasePaths &Paths) const {
+bool CXXBasePaths::lookupInBases(ASTContext &Context,
+ const CXXRecordDecl *Record,
+ CXXRecordDecl::BaseMatchesCallback *BaseMatches,
+ void *UserData) {
bool FoundPath = false;
// The access of the path down to this record.
- AccessSpecifier AccessToHere = Paths.ScratchPath.Access;
- bool IsFirstStep = Paths.ScratchPath.empty();
+ AccessSpecifier AccessToHere = ScratchPath.Access;
+ bool IsFirstStep = ScratchPath.empty();
- ASTContext &Context = getASTContext();
- for (base_class_const_iterator BaseSpec = bases_begin(),
- BaseSpecEnd = bases_end(); BaseSpec != BaseSpecEnd; ++BaseSpec) {
+ for (CXXRecordDecl::base_class_const_iterator BaseSpec = Record->bases_begin(),
+ BaseSpecEnd = Record->bases_end();
+ BaseSpec != BaseSpecEnd;
+ ++BaseSpec) {
// Find the record of the base class subobjects for this type.
QualType BaseType = Context.getCanonicalType(BaseSpec->getType())
.getUnqualifiedType();
@@ -167,31 +180,31 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
// Determine whether we need to visit this base class at all,
// updating the count of subobjects appropriately.
- std::pair<bool, unsigned>& Subobjects = Paths.ClassSubobjects[BaseType];
+ std::pair<bool, unsigned>& Subobjects = ClassSubobjects[BaseType];
bool VisitBase = true;
bool SetVirtual = false;
if (BaseSpec->isVirtual()) {
VisitBase = !Subobjects.first;
Subobjects.first = true;
- if (Paths.isDetectingVirtual() && Paths.DetectedVirtual == 0) {
+ if (isDetectingVirtual() && DetectedVirtual == 0) {
// If this is the first virtual we find, remember it. If it turns out
// there is no base path here, we'll reset it later.
- Paths.DetectedVirtual = BaseType->getAs<RecordType>();
+ DetectedVirtual = BaseType->getAs<RecordType>();
SetVirtual = true;
}
} else
++Subobjects.second;
- if (Paths.isRecordingPaths()) {
+ if (isRecordingPaths()) {
// Add this base specifier to the current path.
CXXBasePathElement Element;
Element.Base = &*BaseSpec;
- Element.Class = this;
+ Element.Class = Record;
if (BaseSpec->isVirtual())
Element.SubobjectNumber = 0;
else
Element.SubobjectNumber = Subobjects.second;
- Paths.ScratchPath.push_back(Element);
+ ScratchPath.push_back(Element);
// Calculate the "top-down" access to this base class.
// The spec actually describes this bottom-up, but top-down is
@@ -209,22 +222,22 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
// 3. Otherwise, overall access is determined by the most restrictive
// access in the sequence.
if (IsFirstStep)
- Paths.ScratchPath.Access = BaseSpec->getAccessSpecifier();
+ ScratchPath.Access = BaseSpec->getAccessSpecifier();
else
- Paths.ScratchPath.Access
- = MergeAccess(AccessToHere, BaseSpec->getAccessSpecifier());
+ ScratchPath.Access = CXXRecordDecl::MergeAccess(AccessToHere,
+ BaseSpec->getAccessSpecifier());
}
// Track whether there's a path involving this specific base.
bool FoundPathThroughBase = false;
- if (BaseMatches(BaseSpec, Paths.ScratchPath, UserData)) {
+ if (BaseMatches(BaseSpec, ScratchPath, UserData)) {
// We've found a path that terminates at this base.
FoundPath = FoundPathThroughBase = true;
- if (Paths.isRecordingPaths()) {
+ if (isRecordingPaths()) {
// We have a path. Make a copy of it before moving on.
- Paths.Paths.push_back(Paths.ScratchPath);
- } else if (!Paths.isFindingAmbiguities()) {
+ Paths.push_back(ScratchPath);
+ } else if (!isFindingAmbiguities()) {
// We found a path and we don't care about ambiguities;
// return immediately.
return FoundPath;
@@ -233,7 +246,7 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
CXXRecordDecl *BaseRecord
= cast<CXXRecordDecl>(BaseSpec->getType()->getAs<RecordType>()
->getDecl());
- if (BaseRecord->lookupInBases(BaseMatches, UserData, Paths)) {
+ if (lookupInBases(Context, BaseRecord, BaseMatches, UserData)) {
// C++ [class.member.lookup]p2:
// A member name f in one sub-object B hides a member name f in
// a sub-object A if A is a base class sub-object of B. Any
@@ -243,29 +256,96 @@ bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
// There is a path to a base class that meets the criteria. If we're
// not collecting paths or finding ambiguities, we're done.
FoundPath = FoundPathThroughBase = true;
- if (!Paths.isFindingAmbiguities())
+ if (!isFindingAmbiguities())
return FoundPath;
}
}
// Pop this base specifier off the current path (if we're
// collecting paths).
- if (Paths.isRecordingPaths()) {
- Paths.ScratchPath.pop_back();
+ if (isRecordingPaths()) {
+ ScratchPath.pop_back();
}
// If we set a virtual earlier, and this isn't a path, forget it again.
if (SetVirtual && !FoundPathThroughBase) {
- Paths.DetectedVirtual = 0;
+ DetectedVirtual = 0;
}
}
// Reset the scratch path access.
- Paths.ScratchPath.Access = AccessToHere;
+ ScratchPath.Access = AccessToHere;
return FoundPath;
}
+bool CXXRecordDecl::lookupInBases(BaseMatchesCallback *BaseMatches,
+ void *UserData,
+ CXXBasePaths &Paths) const {
+ // If we didn't find anything, report that.
+ if (!Paths.lookupInBases(getASTContext(), this, BaseMatches, UserData))
+ return false;
+
+ // If we're not recording paths or we won't ever find ambiguities,
+ // we're done.
+ if (!Paths.isRecordingPaths() || !Paths.isFindingAmbiguities())
+ return true;
+
+ // C++ [class.member.lookup]p6:
+ // When virtual base classes are used, a hidden declaration can be
+ // reached along a path through the sub-object lattice that does
+ // not pass through the hiding declaration. This is not an
+ // ambiguity. The identical use with nonvirtual base classes is an
+ // ambiguity; in that case there is no unique instance of the name
+ // that hides all the others.
+ //
+ // FIXME: This is an O(N^2) algorithm, but DPG doesn't see an easy
+ // way to make it any faster.
+ for (CXXBasePaths::paths_iterator P = Paths.begin(), PEnd = Paths.end();
+ P != PEnd; /* increment in loop */) {
+ bool Hidden = false;
+
+ for (CXXBasePath::iterator PE = P->begin(), PEEnd = P->end();
+ PE != PEEnd && !Hidden; ++PE) {
+ if (PE->Base->isVirtual()) {
+ CXXRecordDecl *VBase = 0;
+ if (const RecordType *Record = PE->Base->getType()->getAs<RecordType>())
+ VBase = cast<CXXRecordDecl>(Record->getDecl());
+ if (!VBase)
+ break;
+
+ // The declaration(s) we found along this path were found in a
+ // subobject of a virtual base. Check whether this virtual
+ // base is a subobject of any other path; if so, then the
+ // declaration in this path are hidden by that patch.
+ for (CXXBasePaths::paths_iterator HidingP = Paths.begin(),
+ HidingPEnd = Paths.end();
+ HidingP != HidingPEnd;
+ ++HidingP) {
+ CXXRecordDecl *HidingClass = 0;
+ if (const RecordType *Record
+ = HidingP->back().Base->getType()->getAs<RecordType>())
+ HidingClass = cast<CXXRecordDecl>(Record->getDecl());
+ if (!HidingClass)
+ break;
+
+ if (HidingClass->isVirtuallyDerivedFrom(VBase)) {
+ Hidden = true;
+ break;
+ }
+ }
+ }
+ }
+
+ if (Hidden)
+ P = Paths.Paths.erase(P);
+ else
+ ++P;
+ }
+
+ return true;
+}
+
bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *BaseRecord) {
@@ -275,6 +355,16 @@ bool CXXRecordDecl::FindBaseClass(const CXXBaseSpecifier *Specifier,
->getCanonicalDecl() == BaseRecord;
}
+bool CXXRecordDecl::FindVirtualBaseClass(const CXXBaseSpecifier *Specifier,
+ CXXBasePath &Path,
+ void *BaseRecord) {
+ assert(((Decl *)BaseRecord)->getCanonicalDecl() == BaseRecord &&
+ "User data for FindBaseClass is not canonical!");
+ return Specifier->isVirtual() &&
+ Specifier->getType()->getAs<RecordType>()->getDecl()
+ ->getCanonicalDecl() == BaseRecord;
+}
+
bool CXXRecordDecl::FindTagMember(const CXXBaseSpecifier *Specifier,
CXXBasePath &Path,
void *Name) {
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 5acb82f31a29..23f5fba437a5 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -680,12 +680,12 @@ const Expr *VarDecl::getAnyInitializer(const VarDecl *&D) const {
}
bool VarDecl::isOutOfLine() const {
- if (!isStaticDataMember())
- return false;
-
if (Decl::isOutOfLine())
return true;
-
+
+ if (!isStaticDataMember())
+ return false;
+
// If this static data member was instantiated from a static data member of
// a class template, check whether that static data member was defined
// out-of-line.
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 863a1cbd03c4..47b7e7efb60e 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -194,6 +194,24 @@ ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
+bool Decl::isUsed() const {
+ if (Used)
+ return true;
+
+ // Check for used attribute.
+ if (hasAttr<UsedAttr>())
+ return true;
+
+ // Check redeclarations for used attribute.
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->hasAttr<UsedAttr>() || I->Used)
+ return true;
+ }
+
+ return false;
+}
+
+
unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
switch (DeclKind) {
case Function:
@@ -418,7 +436,8 @@ void Decl::CheckAccessDeclContext() const {
// FunctionDecl)
// 4. the context is not a record
if (isa<TranslationUnitDecl>(this) ||
- !isa<CXXRecordDecl>(getDeclContext()))
+ !isa<CXXRecordDecl>(getDeclContext()) ||
+ isInvalidDecl())
return;
assert(Access != AS_none &&
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index b0569d68015f..9b693af5bc92 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -94,9 +94,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// Keep track of inherited vbases for this base class.
const CXXBaseSpecifier *Base = Bases[i];
QualType BaseType = Base->getType();
- // Skip template types.
- // FIXME. This means that this list must be rebuilt during template
- // instantiation.
+ // Skip dependent types; we can't do any checking on them now.
if (BaseType->isDependentType())
continue;
CXXRecordDecl *BaseClassDecl
@@ -143,6 +141,9 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
data().NumVBases = vbaseCount;
for (int i = 0; i < vbaseCount; i++) {
QualType QT = UniqueVbases[i]->getType();
+ // Skip dependent types; we can't do any checking on them now.
+ if (QT->isDependentType())
+ continue;
CXXRecordDecl *VBaseClassDecl
= cast<CXXRecordDecl>(QT->getAs<RecordType>()->getDecl());
data().VBases[i] =
@@ -543,14 +544,14 @@ CXXRecordDecl::getDefaultConstructor(ASTContext &Context) {
return 0;
}
-CXXDestructorDecl *CXXRecordDecl::getDestructor(ASTContext &Context) {
+CXXDestructorDecl *CXXRecordDecl::getDestructor(ASTContext &Context) const {
QualType ClassType = Context.getTypeDeclType(this);
DeclarationName Name
= Context.DeclarationNames.getCXXDestructorName(
Context.getCanonicalType(ClassType));
- DeclContext::lookup_iterator I, E;
+ DeclContext::lookup_const_iterator I, E;
llvm::tie(I, E) = lookup(Name);
assert(I != E && "Did not find a destructor!");
@@ -573,7 +574,13 @@ bool CXXMethodDecl::isUsualDeallocationFunction() const {
if (getOverloadedOperator() != OO_Delete &&
getOverloadedOperator() != OO_Array_Delete)
return false;
-
+
+ // C++ [basic.stc.dynamic.deallocation]p2:
+ // A template instance is never a usual deallocation function,
+ // regardless of its signature.
+ if (getPrimaryTemplate())
+ return false;
+
// C++ [basic.stc.dynamic.deallocation]p2:
// If a class T has a member deallocation function named operator delete
// with exactly one parameter, then that function is a usual (non-placement)
@@ -604,51 +611,20 @@ bool CXXMethodDecl::isUsualDeallocationFunction() const {
return true;
}
-typedef llvm::DenseMap<const CXXMethodDecl*,
- std::vector<const CXXMethodDecl *> *>
- OverriddenMethodsMapTy;
-
-// FIXME: We hate static data. This doesn't survive PCH saving/loading, and
-// the vtable building code uses it at CG time.
-static OverriddenMethodsMapTy *OverriddenMethods = 0;
-
void CXXMethodDecl::addOverriddenMethod(const CXXMethodDecl *MD) {
assert(MD->isCanonicalDecl() && "Method is not canonical!");
assert(!MD->getParent()->isDependentContext() &&
"Can't add an overridden method to a class template!");
- // FIXME: The CXXMethodDecl dtor needs to remove and free the entry.
-
- if (!OverriddenMethods)
- OverriddenMethods = new OverriddenMethodsMapTy();
-
- std::vector<const CXXMethodDecl *> *&Methods = (*OverriddenMethods)[this];
- if (!Methods)
- Methods = new std::vector<const CXXMethodDecl *>;
-
- Methods->push_back(MD);
+ getASTContext().addOverriddenMethod(this, MD);
}
CXXMethodDecl::method_iterator CXXMethodDecl::begin_overridden_methods() const {
- if (!OverriddenMethods)
- return 0;
-
- OverriddenMethodsMapTy::iterator it = OverriddenMethods->find(this);
- if (it == OverriddenMethods->end() || it->second->empty())
- return 0;
-
- return &(*it->second)[0];
+ return getASTContext().overridden_methods_begin(this);
}
CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
- if (!OverriddenMethods)
- return 0;
-
- OverriddenMethodsMapTy::iterator it = OverriddenMethods->find(this);
- if (it == OverriddenMethods->end() || it->second->empty())
- return 0;
-
- return &(*it->second)[0] + it->second->size();
+ return getASTContext().overridden_methods_end(this);
}
QualType CXXMethodDecl::getThisType(ASTContext &C) const {
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index 131e098d0467..8decafa35e34 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -202,6 +202,17 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
setProtocolList(ProtocolRefs.data(), NumProtoRefs, ProtocolLocs.data(), C);
}
+/// getClassExtension - Find class extension of the given class.
+// FIXME. can speed it up, if need be.
+ObjCCategoryDecl* ObjCInterfaceDecl::getClassExtension() const {
+ const ObjCInterfaceDecl* ClassDecl = this;
+ for (ObjCCategoryDecl *CDecl = ClassDecl->getCategoryList(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
ObjCInterfaceDecl *&clsDeclared) {
ObjCInterfaceDecl* ClassDecl = this;
@@ -210,6 +221,12 @@ ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
clsDeclared = ClassDecl;
return I;
}
+ if (const ObjCCategoryDecl *CDecl = ClassDecl->getClassExtension())
+ if (ObjCIvarDecl *I = CDecl->getIvarDecl(ID)) {
+ clsDeclared = ClassDecl;
+ return I;
+ }
+
ClassDecl = ClassDecl->getSuperClass();
}
return NULL;
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 4cb0aa4560de..a2914bc6bf4e 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -1120,8 +1120,15 @@ Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
return LV_Valid;
break;
case ImplicitCastExprClass:
- return cast<ImplicitCastExpr>(this)->isLvalueCast()? LV_Valid
- : LV_InvalidExpression;
+ if (cast<ImplicitCastExpr>(this)->isLvalueCast())
+ return LV_Valid;
+
+ // If this is a conversion to a class temporary, make a note of
+ // that.
+ if (Ctx.getLangOptions().CPlusPlus && getType()->isRecordType())
+ return LV_ClassTemporary;
+
+ break;
case ParenExprClass: // C99 6.5.1p5
return cast<ParenExpr>(this)->getSubExpr()->isLvalue(Ctx);
case BinaryOperatorClass:
@@ -1171,9 +1178,15 @@ Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
if (ReturnType->isLValueReferenceType())
return LV_Valid;
+ // If the function is returning a class temporary, make a note of
+ // that.
+ if (Ctx.getLangOptions().CPlusPlus && ReturnType->isRecordType())
+ return LV_ClassTemporary;
+
break;
}
case CompoundLiteralExprClass: // C99 6.5.2.5p5
+ // FIXME: Is this what we want in C++?
return LV_Valid;
case ChooseExprClass:
// __builtin_choose_expr is an lvalue if the selected operand is.
@@ -1207,6 +1220,13 @@ Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
if (cast<ExplicitCastExpr>(this)->getTypeAsWritten()->
isLValueReferenceType())
return LV_Valid;
+
+ // If this is a conversion to a class temporary, make a note of
+ // that.
+ if (Ctx.getLangOptions().CPlusPlus &&
+ cast<ExplicitCastExpr>(this)->getTypeAsWritten()->isRecordType())
+ return LV_ClassTemporary;
+
break;
case CXXTypeidExprClass:
// C++ 5.2.8p1: The result of a typeid expression is an lvalue of ...
@@ -1253,6 +1273,11 @@ Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
return LV_Valid;
break;
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXZeroInitValueExprClass:
+ return LV_ClassTemporary;
+
default:
break;
}
@@ -1296,6 +1321,8 @@ Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
case LV_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
case LV_SubObjCPropertyGetterSetting:
return MLV_SubObjCPropertyGetterSetting;
+ case LV_ClassTemporary:
+ return MLV_ClassTemporary;
}
// The following is illegal:
@@ -1655,11 +1682,18 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
return NoDiag();
if (Ctx.getLangOptions().CPlusPlus &&
E->getType().getCVRQualifiers() == Qualifiers::Const) {
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
+
+ // Parameter variables are never constants. Without this check,
+ // getAnyInitializer() can find a default argument, which leads
+ // to chaos.
+ if (isa<ParmVarDecl>(D))
+ return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
+
// C++ 7.1.5.1p2
// A variable of non-volatile const-qualified integral or enumeration
// type initialized by an ICE can be used in ICEs.
- if (const VarDecl *Dcl =
- dyn_cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl())) {
+ if (const VarDecl *Dcl = dyn_cast<VarDecl>(D)) {
Qualifiers Quals = Ctx.getCanonicalType(Dcl->getType()).getQualifiers();
if (Quals.hasVolatile() || !Quals.hasConst())
return ICEDiag(2, cast<DeclRefExpr>(E)->getLocation());
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index f4b8333dd3ae..b9a4ee6e4d2c 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/TypeLoc.h"
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -121,6 +122,27 @@ Stmt::child_iterator CXXPseudoDestructorExpr::child_end() {
return &Base + 1;
}
+PseudoDestructorTypeStorage::PseudoDestructorTypeStorage(TypeSourceInfo *Info)
+ : Type(Info)
+{
+ Location = Info->getTypeLoc().getSourceRange().getBegin();
+}
+
+QualType CXXPseudoDestructorExpr::getDestroyedType() const {
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ return TInfo->getType();
+
+ return QualType();
+}
+
+SourceRange CXXPseudoDestructorExpr::getSourceRange() const {
+ SourceLocation End = DestroyedType.getLocation();
+ if (TypeSourceInfo *TInfo = DestroyedType.getTypeSourceInfo())
+ End = TInfo->getTypeLoc().getSourceRange().getEnd();
+ return SourceRange(Base->getLocStart(), End);
+}
+
+
// UnresolvedLookupExpr
UnresolvedLookupExpr *
UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent,
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 1a44cd02d9c1..e03669246e88 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -1560,6 +1560,31 @@ static bool EvaluateFloat(const Expr* E, APFloat& Result, EvalInfo &Info) {
return FloatExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
+static bool TryEvaluateBuiltinNaN(ASTContext &Context,
+ QualType ResultTy,
+ const Expr *Arg,
+ bool SNaN,
+ llvm::APFloat &Result) {
+ const StringLiteral *S = dyn_cast<StringLiteral>(Arg->IgnoreParenCasts());
+ if (!S) return false;
+
+ const llvm::fltSemantics &Sem = Context.getFloatTypeSemantics(ResultTy);
+
+ llvm::APInt fill;
+
+ // Treat empty strings as if they were zero.
+ if (S->getString().empty())
+ fill = llvm::APInt(32, 0);
+ else if (S->getString().getAsInteger(0, fill))
+ return false;
+
+ if (SNaN)
+ Result = llvm::APFloat::getSNaN(Sem, false, &fill);
+ else
+ Result = llvm::APFloat::getQNaN(Sem, false, &fill);
+ return true;
+}
+
bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
switch (E->isBuiltinCall(Info.Ctx)) {
default: return false;
@@ -1575,24 +1600,19 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
return true;
}
+ case Builtin::BI__builtin_nans:
+ case Builtin::BI__builtin_nansf:
+ case Builtin::BI__builtin_nansl:
+ return TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ true, Result);
+
case Builtin::BI__builtin_nan:
case Builtin::BI__builtin_nanf:
case Builtin::BI__builtin_nanl:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
- if (const StringLiteral *S =
- dyn_cast<StringLiteral>(E->getArg(0)->IgnoreParenCasts())) {
- if (!S->isWide()) {
- const llvm::fltSemantics &Sem =
- Info.Ctx.getFloatTypeSemantics(E->getType());
- unsigned Type = 0;
- if (!S->getString().empty() && S->getString().getAsInteger(0, Type))
- return false;
- Result = llvm::APFloat::getNaN(Sem, false, Type);
- return true;
- }
- }
- return false;
+ return TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
+ false, Result);
case Builtin::BI__builtin_fabs:
case Builtin::BI__builtin_fabsf:
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 50acd15fde05..10c5089f2253 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -487,6 +487,7 @@ void ASTRecordLayoutBuilder::Layout(const RecordDecl *D) {
FinishLayout();
}
+// FIXME. Impl is no longer needed.
void ASTRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D,
const ObjCImplementationDecl *Impl) {
if (ObjCInterfaceDecl *SD = D->getSuperClass()) {
@@ -508,10 +509,9 @@ void ASTRecordLayoutBuilder::Layout(const ObjCInterfaceDecl *D,
if (const AlignedAttr *AA = D->getAttr<AlignedAttr>())
UpdateAlignment(AA->getMaxAlignment());
-
// Layout each ivar sequentially.
llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- Ctx.ShallowCollectObjCIvars(D, Ivars, Impl);
+ Ctx.ShallowCollectObjCIvars(D, Ivars);
for (unsigned i = 0, e = Ivars.size(); i != e; ++i)
LayoutField(Ivars[i]);
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 3ae306d3c7ac..da43878628fb 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -1120,7 +1120,10 @@ void StmtPrinter::VisitCXXPseudoDestructorExpr(CXXPseudoDestructorExpr *E) {
E->getQualifier()->print(OS, Policy);
std::string TypeS;
- E->getDestroyedType().getAsStringInternal(TypeS, Policy);
+ if (IdentifierInfo *II = E->getDestroyedTypeIdentifier())
+ OS << II->getName();
+ else
+ E->getDestroyedType().getAsStringInternal(TypeS, Policy);
OS << TypeS;
}
diff --git a/lib/Analysis/AnalysisContext.cpp b/lib/Analysis/AnalysisContext.cpp
index ccd5088f2ec7..d9933e85cb91 100644
--- a/lib/Analysis/AnalysisContext.cpp
+++ b/lib/Analysis/AnalysisContext.cpp
@@ -186,6 +186,18 @@ LocationContext::getStackFrameForDeclContext(const DeclContext *DC) const {
return NULL;
}
+bool LocationContext::isParentOf(const LocationContext *LC) const {
+ do {
+ const LocationContext *Parent = LC->getParent();
+ if (Parent == this)
+ return true;
+ else
+ LC = Parent;
+ } while (LC);
+
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// Lazily generated map to query the external variables referenced by a Block.
//===----------------------------------------------------------------------===//
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 5b8aeae5d1c5..a4a021f20b21 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -38,11 +38,16 @@ static SourceLocation GetEndLoc(Decl* D) {
class AddStmtChoice {
public:
- enum Kind { NotAlwaysAdd = 0, AlwaysAdd, AlwaysAddAsLValue };
-public:
- AddStmtChoice(Kind kind) : k(kind) {}
- bool alwaysAdd() const { return k != NotAlwaysAdd; }
- bool asLValue() const { return k == AlwaysAddAsLValue; }
+ enum Kind { NotAlwaysAdd = 0,
+ AlwaysAdd = 1,
+ AsLValueNotAlwaysAdd = 2,
+ AlwaysAddAsLValue = 3 };
+
+ AddStmtChoice(Kind kind) : k(kind) {}
+
+ bool alwaysAdd() const { return (unsigned)k & 0x1; }
+ bool asLValue() const { return k >= AlwaysAddAsLValue; }
+
private:
Kind k;
};
@@ -589,7 +594,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
AddEHEdge = false;
if (!NoReturn && !AddEHEdge)
- return VisitStmt(C, asc);
+ return VisitStmt(C, AddStmtChoice::AlwaysAdd);
if (Block) {
Succ = Block;
@@ -771,18 +776,10 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(Decl* D) {
Expr *Init = VD->getInit();
if (Init) {
- // Optimization: Don't create separate block-level statements for literals.
- switch (Init->getStmtClass()) {
- case Stmt::IntegerLiteralClass:
- case Stmt::CharacterLiteralClass:
- case Stmt::StringLiteralClass:
- break;
- default:
- Block = addStmt(Init,
- VD->getType()->isReferenceType()
- ? AddStmtChoice::AlwaysAddAsLValue
- : AddStmtChoice::AlwaysAdd);
- }
+ AddStmtChoice::Kind k =
+ VD->getType()->isReferenceType() ? AddStmtChoice::AsLValueNotAlwaysAdd
+ : AddStmtChoice::NotAlwaysAdd;
+ Visit(Init, AddStmtChoice(k));
}
// If the type of VD is a VLA, then we must process its size expressions.
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index 4f8259e44939..b4e0e242485b 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -5,6 +5,7 @@ add_clang_library(clangAnalysis
CFG.cpp
LiveVariables.cpp
PrintfFormatString.cpp
+ ReachableCode.cpp
UninitializedValues.cpp
)
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 94ed75286dee..01a36a1074e8 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -86,6 +86,12 @@ LiveVariables::LiveVariables(AnalysisContext &AC) {
RegisterDecls R(getAnalysisData());
cfg.VisitBlockStmts(R);
+
+ // Register all parameters even if they didn't occur in the function body.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(AC.getDecl()))
+ for (FunctionDecl::param_const_iterator PI = FD->param_begin(),
+ PE = FD->param_end(); PI != PE; ++PI)
+ getAnalysisData().Register(*PI);
}
//===----------------------------------------------------------------------===//
@@ -274,9 +280,16 @@ void TransferFuncs::VisitDeclStmt(DeclStmt* DS) {
for (DeclStmt::decl_iterator DI=DS->decl_begin(), DE = DS->decl_end();
DI != DE; ++DI)
if (VarDecl* VD = dyn_cast<VarDecl>(*DI)) {
- // The initializer is evaluated after the variable comes into scope.
+ // Update liveness information by killing the VarDecl.
+ unsigned bit = AD.getIdx(VD);
+ LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
+
+ // The initializer is evaluated after the variable comes into scope, but
+ // before the DeclStmt (which binds the value to the variable).
// Since this is a reverse dataflow analysis, we must evaluate the
- // transfer function for this expression first.
+ // transfer function for this expression after the DeclStmt. If the
+ // initializer references the variable (which is bad) then we extend
+ // its liveness.
if (Expr* Init = VD->getInit())
Visit(Init);
@@ -286,10 +299,6 @@ void TransferFuncs::VisitDeclStmt(DeclStmt* DS) {
StmtIterator E;
for (; I != E; ++I) Visit(*I);
}
-
- // Update liveness information by killing the VarDecl.
- unsigned bit = AD.getIdx(VD);
- LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
}
}
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index 55abd1077150..46acc8a377bf 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -15,10 +15,12 @@
#include "clang/Analysis/Analyses/PrintfFormatString.h"
#include "clang/AST/ASTContext.h"
-using clang::analyze_printf::FormatSpecifier;
-using clang::analyze_printf::OptionalAmount;
using clang::analyze_printf::ArgTypeResult;
+using clang::analyze_printf::FormatSpecifier;
using clang::analyze_printf::FormatStringHandler;
+using clang::analyze_printf::OptionalAmount;
+using clang::analyze_printf::PositionContext;
+
using namespace clang;
namespace {
@@ -66,24 +68,19 @@ static OptionalAmount ParseAmount(const char *&Beg, const char *E) {
const char *I = Beg;
UpdateOnReturn <const char*> UpdateBeg(Beg, I);
- bool foundDigits = false;
unsigned accumulator = 0;
+ bool hasDigits = false;
for ( ; I != E; ++I) {
char c = *I;
if (c >= '0' && c <= '9') {
- foundDigits = true;
+ hasDigits = true;
accumulator += (accumulator * 10) + (c - '0');
continue;
}
- if (foundDigits)
- return OptionalAmount(accumulator, Beg);
-
- if (c == '*') {
- ++I;
- return OptionalAmount(OptionalAmount::Arg, Beg);
- }
+ if (hasDigits)
+ return OptionalAmount(OptionalAmount::Constant, accumulator, Beg);
break;
}
@@ -91,9 +88,129 @@ static OptionalAmount ParseAmount(const char *&Beg, const char *E) {
return OptionalAmount();
}
+static OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E,
+ unsigned &argIndex) {
+ if (*Beg == '*') {
+ ++Beg;
+ return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg);
+ }
+
+ return ParseAmount(Beg, E);
+}
+
+static OptionalAmount ParsePositionAmount(FormatStringHandler &H,
+ const char *Start,
+ const char *&Beg, const char *E,
+ PositionContext p) {
+ if (*Beg == '*') {
+ const char *I = Beg + 1;
+ const OptionalAmount &Amt = ParseAmount(I, E);
+
+ if (Amt.getHowSpecified() == OptionalAmount::NotSpecified) {
+ H.HandleInvalidPosition(Beg, I - Beg, p);
+ return OptionalAmount(false);
+ }
+
+ if (I== E) {
+ // No more characters left?
+ H.HandleIncompleteFormatSpecifier(Start, E - Start);
+ return OptionalAmount(false);
+ }
+
+ assert(Amt.getHowSpecified() == OptionalAmount::Constant);
+
+ if (*I == '$') {
+ // Special case: '*0$', since this is an easy mistake.
+ if (Amt.getConstantAmount() == 0) {
+ H.HandleZeroPosition(Beg, I - Beg + 1);
+ return OptionalAmount(false);
+ }
+
+ const char *Tmp = Beg;
+ Beg = ++I;
+
+ return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1,
+ Tmp);
+ }
+
+ H.HandleInvalidPosition(Beg, I - Beg, p);
+ return OptionalAmount(false);
+ }
+
+ return ParseAmount(Beg, E);
+}
+
+static bool ParsePrecision(FormatStringHandler &H, FormatSpecifier &FS,
+ const char *Start, const char *&Beg, const char *E,
+ unsigned *argIndex) {
+ if (argIndex) {
+ FS.setPrecision(ParseNonPositionAmount(Beg, E, *argIndex));
+ }
+ else {
+ const OptionalAmount Amt = ParsePositionAmount(H, Start, Beg, E,
+ analyze_printf::PrecisionPos);
+ if (Amt.isInvalid())
+ return true;
+ FS.setPrecision(Amt);
+ }
+ return false;
+}
+
+static bool ParseFieldWidth(FormatStringHandler &H, FormatSpecifier &FS,
+ const char *Start, const char *&Beg, const char *E,
+ unsigned *argIndex) {
+ // FIXME: Support negative field widths.
+ if (argIndex) {
+ FS.setFieldWidth(ParseNonPositionAmount(Beg, E, *argIndex));
+ }
+ else {
+ const OptionalAmount Amt = ParsePositionAmount(H, Start, Beg, E,
+ analyze_printf::FieldWidthPos);
+ if (Amt.isInvalid())
+ return true;
+ FS.setFieldWidth(Amt);
+ }
+ return false;
+}
+
+
+static bool ParseArgPosition(FormatStringHandler &H,
+ FormatSpecifier &FS, const char *Start,
+ const char *&Beg, const char *E) {
+
+ using namespace clang::analyze_printf;
+ const char *I = Beg;
+
+ const OptionalAmount &Amt = ParseAmount(I, E);
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteFormatSpecifier(Start, E - Start);
+ return true;
+ }
+
+ if (Amt.getHowSpecified() == OptionalAmount::Constant && *(I++) == '$') {
+ // Special case: '%0$', since this is an easy mistake.
+ if (Amt.getConstantAmount() == 0) {
+ H.HandleZeroPosition(Start, I - Start);
+ return true;
+ }
+
+ FS.setArgIndex(Amt.getConstantAmount() - 1);
+ FS.setUsesPositionalArg();
+ // Update the caller's pointer if we decided to consume
+ // these characters.
+ Beg = I;
+ return false;
+ }
+
+ return false;
+}
+
static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
const char *&Beg,
- const char *E) {
+ const char *E,
+ unsigned &argIndex) {
using namespace clang::analyze_printf;
@@ -126,6 +243,14 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
}
FormatSpecifier FS;
+ if (ParseArgPosition(H, FS, Start, I, E))
+ return true;
+
+ if (I == E) {
+ // No more characters left?
+ H.HandleIncompleteFormatSpecifier(Start, E - Start);
+ return true;
+ }
// Look for flags (if any).
bool hasMore = true;
@@ -149,7 +274,9 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
}
// Look for the field width (if any).
- FS.setFieldWidth(ParseAmount(I, E));
+ if (ParseFieldWidth(H, FS, Start, I, E,
+ FS.usesPositionalArg() ? 0 : &argIndex))
+ return true;
if (I == E) {
// No more characters left?
@@ -165,7 +292,9 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
return true;
}
- FS.setPrecision(ParseAmount(I, E));
+ if (ParsePrecision(H, FS, Start, I, E,
+ FS.usesPositionalArg() ? 0 : &argIndex))
+ return true;
if (I == E) {
// No more characters left?
@@ -214,44 +343,53 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
default:
break;
// C99: 7.19.6.1 (section 8).
- case 'd': k = ConversionSpecifier::dArg; break;
- case 'i': k = ConversionSpecifier::iArg; break;
- case 'o': k = ConversionSpecifier::oArg; break;
- case 'u': k = ConversionSpecifier::uArg; break;
- case 'x': k = ConversionSpecifier::xArg; break;
- case 'X': k = ConversionSpecifier::XArg; break;
- case 'f': k = ConversionSpecifier::fArg; break;
- case 'F': k = ConversionSpecifier::FArg; break;
- case 'e': k = ConversionSpecifier::eArg; break;
+ case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'A': k = ConversionSpecifier::AArg; break;
case 'E': k = ConversionSpecifier::EArg; break;
- case 'g': k = ConversionSpecifier::gArg; break;
+ case 'F': k = ConversionSpecifier::FArg; break;
case 'G': k = ConversionSpecifier::GArg; break;
+ case 'X': k = ConversionSpecifier::XArg; break;
case 'a': k = ConversionSpecifier::aArg; break;
- case 'A': k = ConversionSpecifier::AArg; break;
case 'c': k = ConversionSpecifier::IntAsCharArg; break;
- case 's': k = ConversionSpecifier::CStrArg; break;
- case 'p': k = ConversionSpecifier::VoidPtrArg; break;
+ case 'd': k = ConversionSpecifier::dArg; break;
+ case 'e': k = ConversionSpecifier::eArg; break;
+ case 'f': k = ConversionSpecifier::fArg; break;
+ case 'g': k = ConversionSpecifier::gArg; break;
+ case 'i': k = ConversionSpecifier::iArg; break;
case 'n': k = ConversionSpecifier::OutIntPtrArg; break;
- case '%': k = ConversionSpecifier::PercentArg; break;
+ case 'o': k = ConversionSpecifier::oArg; break;
+ case 'p': k = ConversionSpecifier::VoidPtrArg; break;
+ case 's': k = ConversionSpecifier::CStrArg; break;
+ case 'u': k = ConversionSpecifier::uArg; break;
+ case 'x': k = ConversionSpecifier::xArg; break;
+ // Mac OS X (unicode) specific
+ case 'C': k = ConversionSpecifier::CArg; break;
+ case 'S': k = ConversionSpecifier::UnicodeStrArg; break;
// Objective-C.
case '@': k = ConversionSpecifier::ObjCObjArg; break;
// Glibc specific.
case 'm': k = ConversionSpecifier::PrintErrno; break;
}
- FS.setConversionSpecifier(ConversionSpecifier(conversionPosition, k));
+ ConversionSpecifier CS(conversionPosition, k);
+ FS.setConversionSpecifier(CS);
+ if (CS.consumesDataArgument() && !FS.usesPositionalArg())
+ FS.setArgIndex(argIndex++);
if (k == ConversionSpecifier::InvalidSpecifier) {
- H.HandleInvalidConversionSpecifier(FS, Beg, I - Beg);
- return false; // Keep processing format specifiers.
+ // Assume the conversion takes one argument.
+ return !H.HandleInvalidConversionSpecifier(FS, Beg, I - Beg);
}
return FormatSpecifierResult(Start, FS);
}
bool clang::analyze_printf::ParseFormatString(FormatStringHandler &H,
const char *I, const char *E) {
+
+ unsigned argIndex = 0;
+
// Keep looking for a format specifier until we have exhausted the string.
while (I != E) {
- const FormatSpecifierResult &FSR = ParseFormatSpecifier(H, I, E);
+ const FormatSpecifierResult &FSR = ParseFormatSpecifier(H, I, E, argIndex);
// Did a fail-stop error of any kind occur when parsing the specifier?
// If so, don't do any more processing.
if (FSR.shouldStop())
@@ -345,8 +483,10 @@ bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
if (!PT)
return false;
- QualType pointeeTy = PT->getPointeeType();
- return pointeeTy == C.WCharTy;
+ QualType pointeeTy =
+ C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+
+ return pointeeTy == C.getWCharType();
}
return false;
@@ -359,7 +499,7 @@ QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
if (K == CStrTy)
return C.getPointerType(C.CharTy);
if (K == WCStrTy)
- return C.getPointerType(C.WCharTy);
+ return C.getPointerType(C.getWCharType());
if (K == ObjCPointerTy)
return C.ObjCBuiltinIdTy;
@@ -426,9 +566,17 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const {
return Ctx.DoubleTy;
}
- if (CS.getKind() == ConversionSpecifier::CStrArg)
- return ArgTypeResult(LM == AsWideChar ? ArgTypeResult::WCStrTy
- : ArgTypeResult::CStrTy);
+ switch (CS.getKind()) {
+ case ConversionSpecifier::CStrArg:
+ return ArgTypeResult(LM == AsWideChar ? ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy);
+ case ConversionSpecifier::UnicodeStrArg:
+ // FIXME: This appears to be Mac OS X specific.
+ return ArgTypeResult::WCStrTy;
+ case ConversionSpecifier::CArg:
+ return Ctx.WCharTy;
+ default:
+ break;
+ }
// FIXME: Handle other cases.
return ArgTypeResult();
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
new file mode 100644
index 000000000000..f959e5cd43e1
--- /dev/null
+++ b/lib/Analysis/ReachableCode.cpp
@@ -0,0 +1,278 @@
+//=- ReachableCodePathInsensitive.cpp ---------------------------*- C++ --*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a flow-sensitive, path-insensitive analysis of
+// determining reachable blocks within a CFG.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/StmtCXX.h"
+#include "clang/Analysis/Analyses/ReachableCode.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Basic/SourceManager.h"
+
+using namespace clang;
+
+static SourceLocation GetUnreachableLoc(const CFGBlock &b, SourceRange &R1,
+ SourceRange &R2) {
+ const Stmt *S = 0;
+ unsigned sn = 0;
+ R1 = R2 = SourceRange();
+
+top:
+ if (sn < b.size())
+ S = b[sn].getStmt();
+ else if (b.getTerminator())
+ S = b.getTerminator();
+ else
+ return SourceLocation();
+
+ switch (S->getStmtClass()) {
+ case Expr::BinaryOperatorClass: {
+ const BinaryOperator *BO = cast<BinaryOperator>(S);
+ if (BO->getOpcode() == BinaryOperator::Comma) {
+ if (sn+1 < b.size())
+ return b[sn+1].getStmt()->getLocStart();
+ const CFGBlock *n = &b;
+ while (1) {
+ if (n->getTerminator())
+ return n->getTerminator()->getLocStart();
+ if (n->succ_size() != 1)
+ return SourceLocation();
+ n = n[0].succ_begin()[0];
+ if (n->pred_size() != 1)
+ return SourceLocation();
+ if (!n->empty())
+ return n[0][0].getStmt()->getLocStart();
+ }
+ }
+ R1 = BO->getLHS()->getSourceRange();
+ R2 = BO->getRHS()->getSourceRange();
+ return BO->getOperatorLoc();
+ }
+ case Expr::UnaryOperatorClass: {
+ const UnaryOperator *UO = cast<UnaryOperator>(S);
+ R1 = UO->getSubExpr()->getSourceRange();
+ return UO->getOperatorLoc();
+ }
+ case Expr::CompoundAssignOperatorClass: {
+ const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S);
+ R1 = CAO->getLHS()->getSourceRange();
+ R2 = CAO->getRHS()->getSourceRange();
+ return CAO->getOperatorLoc();
+ }
+ case Expr::ConditionalOperatorClass: {
+ const ConditionalOperator *CO = cast<ConditionalOperator>(S);
+ return CO->getQuestionLoc();
+ }
+ case Expr::MemberExprClass: {
+ const MemberExpr *ME = cast<MemberExpr>(S);
+ R1 = ME->getSourceRange();
+ return ME->getMemberLoc();
+ }
+ case Expr::ArraySubscriptExprClass: {
+ const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S);
+ R1 = ASE->getLHS()->getSourceRange();
+ R2 = ASE->getRHS()->getSourceRange();
+ return ASE->getRBracketLoc();
+ }
+ case Expr::CStyleCastExprClass: {
+ const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S);
+ R1 = CSC->getSubExpr()->getSourceRange();
+ return CSC->getLParenLoc();
+ }
+ case Expr::CXXFunctionalCastExprClass: {
+ const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
+ R1 = CE->getSubExpr()->getSourceRange();
+ return CE->getTypeBeginLoc();
+ }
+ case Expr::ImplicitCastExprClass:
+ ++sn;
+ goto top;
+ case Stmt::CXXTryStmtClass: {
+ return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
+ }
+ default: ;
+ }
+ R1 = S->getSourceRange();
+ return S->getLocStart();
+}
+
+static SourceLocation MarkLiveTop(const CFGBlock *Start,
+ llvm::BitVector &reachable,
+ SourceManager &SM) {
+
+ // Prep work worklist.
+ llvm::SmallVector<const CFGBlock*, 32> WL;
+ WL.push_back(Start);
+
+ SourceRange R1, R2;
+ SourceLocation top = GetUnreachableLoc(*Start, R1, R2);
+
+ bool FromMainFile = false;
+ bool FromSystemHeader = false;
+ bool TopValid = false;
+
+ if (top.isValid()) {
+ FromMainFile = SM.isFromMainFile(top);
+ FromSystemHeader = SM.isInSystemHeader(top);
+ TopValid = true;
+ }
+
+ // Solve
+ while (!WL.empty()) {
+ const CFGBlock *item = WL.back();
+ WL.pop_back();
+
+ SourceLocation c = GetUnreachableLoc(*item, R1, R2);
+ if (c.isValid()
+ && (!TopValid
+ || (SM.isFromMainFile(c) && !FromMainFile)
+ || (FromSystemHeader && !SM.isInSystemHeader(c))
+ || SM.isBeforeInTranslationUnit(c, top))) {
+ top = c;
+ FromMainFile = SM.isFromMainFile(top);
+ FromSystemHeader = SM.isInSystemHeader(top);
+ }
+
+ reachable.set(item->getBlockID());
+ for (CFGBlock::const_succ_iterator I=item->succ_begin(), E=item->succ_end();
+ I != E; ++I)
+ if (const CFGBlock *B = *I) {
+ unsigned blockID = B->getBlockID();
+ if (!reachable[blockID]) {
+ reachable.set(blockID);
+ WL.push_back(B);
+ }
+ }
+ }
+
+ return top;
+}
+
+static int LineCmp(const void *p1, const void *p2) {
+ SourceLocation *Line1 = (SourceLocation *)p1;
+ SourceLocation *Line2 = (SourceLocation *)p2;
+ return !(*Line1 < *Line2);
+}
+
+namespace {
+struct ErrLoc {
+ SourceLocation Loc;
+ SourceRange R1;
+ SourceRange R2;
+ ErrLoc(SourceLocation l, SourceRange r1, SourceRange r2)
+ : Loc(l), R1(r1), R2(r2) { }
+};
+}
+namespace clang { namespace reachable_code {
+
+/// ScanReachableFromBlock - Mark all blocks reachable from Start.
+/// Returns the total number of blocks that were marked reachable.
+unsigned ScanReachableFromBlock(const CFGBlock &Start,
+ llvm::BitVector &Reachable) {
+ unsigned count = 0;
+ llvm::SmallVector<const CFGBlock*, 32> WL;
+
+ // Prep work queue
+ Reachable.set(Start.getBlockID());
+ ++count;
+ WL.push_back(&Start);
+
+ // Find the reachable blocks from 'Start'.
+ while (!WL.empty()) {
+ const CFGBlock *item = WL.back();
+ WL.pop_back();
+
+ // Look at the successors and mark then reachable.
+ for (CFGBlock::const_succ_iterator I=item->succ_begin(), E=item->succ_end();
+ I != E; ++I)
+ if (const CFGBlock *B = *I) {
+ unsigned blockID = B->getBlockID();
+ if (!Reachable[blockID]) {
+ Reachable.set(blockID);
+ ++count;
+ WL.push_back(B);
+ }
+ }
+ }
+ return count;
+}
+
+void FindUnreachableCode(AnalysisContext &AC, Callback &CB) {
+ CFG *cfg = AC.getCFG();
+ if (!cfg)
+ return;
+
+ // Scan for reachable blocks.
+ llvm::BitVector reachable(cfg->getNumBlockIDs());
+ unsigned numReachable = ScanReachableFromBlock(cfg->getEntry(), reachable);
+
+ // If there are no unreachable blocks, we're done.
+ if (numReachable == cfg->getNumBlockIDs())
+ return;
+
+ SourceRange R1, R2;
+
+ llvm::SmallVector<ErrLoc, 24> lines;
+ bool AddEHEdges = AC.getAddEHEdges();
+
+ // First, give warnings for blocks with no predecessors, as they
+ // can't be part of a loop.
+ for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
+ CFGBlock &b = **I;
+ if (!reachable[b.getBlockID()]) {
+ if (b.pred_empty()) {
+ if (!AddEHEdges && dyn_cast_or_null<CXXTryStmt>(b.getTerminator())) {
+ // When not adding EH edges from calls, catch clauses
+ // can otherwise seem dead. Avoid noting them as dead.
+ numReachable += ScanReachableFromBlock(b, reachable);
+ continue;
+ }
+ SourceLocation c = GetUnreachableLoc(b, R1, R2);
+ if (!c.isValid()) {
+ // Blocks without a location can't produce a warning, so don't mark
+ // reachable blocks from here as live.
+ reachable.set(b.getBlockID());
+ ++numReachable;
+ continue;
+ }
+ lines.push_back(ErrLoc(c, R1, R2));
+ // Avoid excessive errors by marking everything reachable from here
+ numReachable += ScanReachableFromBlock(b, reachable);
+ }
+ }
+ }
+
+ if (numReachable < cfg->getNumBlockIDs()) {
+ // And then give warnings for the tops of loops.
+ for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
+ CFGBlock &b = **I;
+ if (!reachable[b.getBlockID()])
+ // Avoid excessive errors by marking everything reachable from here
+ lines.push_back(ErrLoc(MarkLiveTop(&b, reachable,
+ AC.getASTContext().getSourceManager()),
+ SourceRange(), SourceRange()));
+ }
+ }
+
+ llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp);
+
+ for (llvm::SmallVectorImpl<ErrLoc>::iterator I=lines.begin(), E=lines.end();
+ I != E; ++I)
+ if (I->Loc.isValid())
+ CB.HandleUnreachable(I->Loc, I->R1, I->R2);
+}
+
+}} // end namespace clang::reachable_code
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index bdc0e7c621f7..7a628642dc99 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -134,8 +134,12 @@ bool TransferFuncs::VisitDeclStmt(DeclStmt* S) {
for (DeclStmt::decl_iterator I=S->decl_begin(), E=S->decl_end(); I!=E; ++I) {
VarDecl *VD = dyn_cast<VarDecl>(*I);
if (VD && VD->isBlockVarDecl()) {
- if (Stmt* I = VD->getInit())
- V(VD,AD) = AD.FullUninitTaint ? V(cast<Expr>(I),AD) : Initialized;
+ if (Stmt* I = VD->getInit()) {
+ // Visit the subexpression to check for uses of uninitialized values,
+ // even if we don't propagate that value.
+ bool isSubExprUninit = Visit(I);
+ V(VD,AD) = AD.FullUninitTaint ? isSubExprUninit : Initialized;
+ }
else {
// Special case for declarations of array types. For things like:
//
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 094f7760a8ec..f7ec873e4c15 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -387,123 +387,6 @@ Diagnostic::getDiagnosticLevel(unsigned DiagID, unsigned DiagClass) const {
return Result;
}
-static bool ReadUnsigned(const char *&Memory, const char *MemoryEnd,
- unsigned &Value) {
- if (Memory + sizeof(unsigned) > MemoryEnd)
- return true;
-
- memmove(&Value, Memory, sizeof(unsigned));
- Memory += sizeof(unsigned);
- return false;
-}
-
-static bool ReadSourceLocation(FileManager &FM, SourceManager &SM,
- const char *&Memory, const char *MemoryEnd,
- SourceLocation &Location) {
- // Read the filename.
- unsigned FileNameLen = 0;
- if (ReadUnsigned(Memory, MemoryEnd, FileNameLen) ||
- Memory + FileNameLen > MemoryEnd)
- return true;
-
- llvm::StringRef FileName(Memory, FileNameLen);
- Memory += FileNameLen;
-
- // Read the line, column.
- unsigned Line = 0, Column = 0;
- if (ReadUnsigned(Memory, MemoryEnd, Line) ||
- ReadUnsigned(Memory, MemoryEnd, Column))
- return true;
-
- if (FileName.empty()) {
- Location = SourceLocation();
- return false;
- }
-
- const FileEntry *File = FM.getFile(FileName);
- if (!File)
- return true;
-
- // Make sure that this file has an entry in the source manager.
- if (!SM.hasFileInfo(File))
- SM.createFileID(File, SourceLocation(), SrcMgr::C_User);
-
- Location = SM.getLocation(File, Line, Column);
- return false;
-}
-
-DiagnosticBuilder Diagnostic::Deserialize(FileManager &FM, SourceManager &SM,
- const char *&Memory,
- const char *MemoryEnd) {
- if (Memory == MemoryEnd)
- return DiagnosticBuilder(0);
-
- // Read the severity level.
- unsigned Level = 0;
- if (ReadUnsigned(Memory, MemoryEnd, Level) || Level > Fatal)
- return DiagnosticBuilder(0);
-
- // Read the source location.
- SourceLocation Location;
- if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Location))
- return DiagnosticBuilder(0);
-
- // Read the diagnostic text.
- if (Memory == MemoryEnd)
- return DiagnosticBuilder(0);
-
- unsigned MessageLen = 0;
- if (ReadUnsigned(Memory, MemoryEnd, MessageLen) ||
- Memory + MessageLen > MemoryEnd)
- return DiagnosticBuilder(0);
-
- llvm::StringRef Message(Memory, MessageLen);
- Memory += MessageLen;
-
- // At this point, we have enough information to form a diagnostic. Do so.
- unsigned DiagID = getCustomDiagID((enum Level)Level, Message);
- DiagnosticBuilder DB = Report(FullSourceLoc(Location, SM), DiagID);
- if (Memory == MemoryEnd)
- return DB;
-
- // Read the source ranges.
- unsigned NumSourceRanges = 0;
- if (ReadUnsigned(Memory, MemoryEnd, NumSourceRanges))
- return DB;
- for (unsigned I = 0; I != NumSourceRanges; ++I) {
- SourceLocation Begin, End;
- if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Begin) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, End))
- return DB;
-
- DB << SourceRange(Begin, End);
- }
-
- // Read the fix-it hints.
- unsigned NumFixIts = 0;
- if (ReadUnsigned(Memory, MemoryEnd, NumFixIts))
- return DB;
- for (unsigned I = 0; I != NumFixIts; ++I) {
- SourceLocation RemoveBegin, RemoveEnd, InsertionLoc;
- unsigned InsertLen = 0;
- if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveBegin) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveEnd) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, InsertionLoc) ||
- ReadUnsigned(Memory, MemoryEnd, InsertLen) ||
- Memory + InsertLen > MemoryEnd)
- return DB;
-
- CodeModificationHint Hint;
- Hint.RemoveRange = SourceRange(RemoveBegin, RemoveEnd);
- Hint.InsertionLoc = InsertionLoc;
- Hint.CodeToInsert.assign(Memory, Memory + InsertLen);
- Memory += InsertLen;
- DB << Hint;
- }
-
- return DB;
-}
-
struct WarningOption {
const char *Name;
const short *Members;
@@ -1036,6 +919,31 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
}
}
+StoredDiagnostic::StoredDiagnostic() { }
+
+StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
+ llvm::StringRef Message)
+ : Level(Level), Loc(), Message(Message) { }
+
+StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
+ const DiagnosticInfo &Info)
+ : Level(Level), Loc(Info.getLocation())
+{
+ llvm::SmallString<64> Message;
+ Info.FormatDiagnostic(Message);
+ this->Message.assign(Message.begin(), Message.end());
+
+ Ranges.reserve(Info.getNumRanges());
+ for (unsigned I = 0, N = Info.getNumRanges(); I != N; ++I)
+ Ranges.push_back(Info.getRange(I));
+
+ FixIts.reserve(Info.getNumCodeModificationHints());
+ for (unsigned I = 0, N = Info.getNumCodeModificationHints(); I != N; ++I)
+ FixIts.push_back(Info.getCodeModificationHint(I));
+}
+
+StoredDiagnostic::~StoredDiagnostic() { }
+
static void WriteUnsigned(llvm::raw_ostream &OS, unsigned Value) {
OS.write((const char *)&Value, sizeof(unsigned));
}
@@ -1065,27 +973,27 @@ static void WriteSourceLocation(llvm::raw_ostream &OS,
WriteUnsigned(OS, SM->getColumnNumber(Decomposed.first, Decomposed.second));
}
-void DiagnosticInfo::Serialize(Diagnostic::Level DiagLevel,
- llvm::raw_ostream &OS) const {
+void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const {
SourceManager *SM = 0;
if (getLocation().isValid())
SM = &const_cast<SourceManager &>(getLocation().getManager());
+ // Write a short header to help identify diagnostics.
+ OS << (char)0x06 << (char)0x07;
+
// Write the diagnostic level and location.
- WriteUnsigned(OS, (unsigned)DiagLevel);
+ WriteUnsigned(OS, (unsigned)Level);
WriteSourceLocation(OS, SM, getLocation());
// Write the diagnostic message.
llvm::SmallString<64> Message;
- FormatDiagnostic(Message);
- WriteString(OS, Message);
+ WriteString(OS, getMessage());
// Count the number of ranges that don't point into macros, since
// only simple file ranges serialize well.
unsigned NumNonMacroRanges = 0;
- for (unsigned I = 0, N = getNumRanges(); I != N; ++I) {
- SourceRange R = getRange(I);
- if (R.getBegin().isMacroID() || R.getEnd().isMacroID())
+ for (range_iterator R = range_begin(), REnd = range_end(); R != REnd; ++R) {
+ if (R->getBegin().isMacroID() || R->getEnd().isMacroID())
continue;
++NumNonMacroRanges;
@@ -1094,44 +1002,185 @@ void DiagnosticInfo::Serialize(Diagnostic::Level DiagLevel,
// Write the ranges.
WriteUnsigned(OS, NumNonMacroRanges);
if (NumNonMacroRanges) {
- for (unsigned I = 0, N = getNumRanges(); I != N; ++I) {
- SourceRange R = getRange(I);
- if (R.getBegin().isMacroID() || R.getEnd().isMacroID())
+ for (range_iterator R = range_begin(), REnd = range_end(); R != REnd; ++R) {
+ if (R->getBegin().isMacroID() || R->getEnd().isMacroID())
continue;
- WriteSourceLocation(OS, SM, R.getBegin());
- WriteSourceLocation(OS, SM, R.getEnd());
+ WriteSourceLocation(OS, SM, R->getBegin());
+ WriteSourceLocation(OS, SM, R->getEnd());
}
}
// Determine if all of the fix-its involve rewrites with simple file
// locations (not in macro instantiations). If so, we can write
// fix-it information.
- unsigned NumFixIts = getNumCodeModificationHints();
- for (unsigned I = 0; I != NumFixIts; ++I) {
- const CodeModificationHint &Hint = getCodeModificationHint(I);
- if (Hint.RemoveRange.isValid() &&
- (Hint.RemoveRange.getBegin().isMacroID() ||
- Hint.RemoveRange.getEnd().isMacroID())) {
+ unsigned NumFixIts = 0;
+ for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) {
+ if (F->RemoveRange.isValid() &&
+ (F->RemoveRange.getBegin().isMacroID() ||
+ F->RemoveRange.getEnd().isMacroID())) {
NumFixIts = 0;
break;
}
- if (Hint.InsertionLoc.isValid() && Hint.InsertionLoc.isMacroID()) {
+ if (F->InsertionLoc.isValid() && F->InsertionLoc.isMacroID()) {
NumFixIts = 0;
break;
}
+
+ ++NumFixIts;
}
// Write the fix-its.
WriteUnsigned(OS, NumFixIts);
+ for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) {
+ WriteSourceLocation(OS, SM, F->RemoveRange.getBegin());
+ WriteSourceLocation(OS, SM, F->RemoveRange.getEnd());
+ WriteSourceLocation(OS, SM, F->InsertionLoc);
+ WriteString(OS, F->CodeToInsert);
+ }
+}
+
+static bool ReadUnsigned(const char *&Memory, const char *MemoryEnd,
+ unsigned &Value) {
+ if (Memory + sizeof(unsigned) > MemoryEnd)
+ return true;
+
+ memmove(&Value, Memory, sizeof(unsigned));
+ Memory += sizeof(unsigned);
+ return false;
+}
+
+static bool ReadSourceLocation(FileManager &FM, SourceManager &SM,
+ const char *&Memory, const char *MemoryEnd,
+ SourceLocation &Location) {
+ // Read the filename.
+ unsigned FileNameLen = 0;
+ if (ReadUnsigned(Memory, MemoryEnd, FileNameLen) ||
+ Memory + FileNameLen > MemoryEnd)
+ return true;
+
+ llvm::StringRef FileName(Memory, FileNameLen);
+ Memory += FileNameLen;
+
+ // Read the line, column.
+ unsigned Line = 0, Column = 0;
+ if (ReadUnsigned(Memory, MemoryEnd, Line) ||
+ ReadUnsigned(Memory, MemoryEnd, Column))
+ return true;
+
+ if (FileName.empty()) {
+ Location = SourceLocation();
+ return false;
+ }
+
+ const FileEntry *File = FM.getFile(FileName);
+ if (!File)
+ return true;
+
+ // Make sure that this file has an entry in the source manager.
+ if (!SM.hasFileInfo(File))
+ SM.createFileID(File, SourceLocation(), SrcMgr::C_User);
+
+ Location = SM.getLocation(File, Line, Column);
+ return false;
+}
+
+StoredDiagnostic
+StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
+ const char *&Memory, const char *MemoryEnd) {
+ while (true) {
+ if (Memory == MemoryEnd)
+ return StoredDiagnostic();
+
+ if (*Memory != 0x06) {
+ ++Memory;
+ continue;
+ }
+
+ ++Memory;
+ if (Memory == MemoryEnd)
+ return StoredDiagnostic();
+
+ if (*Memory != 0x07) {
+ ++Memory;
+ continue;
+ }
+
+ // We found the header. We're done.
+ ++Memory;
+ break;
+ }
+
+ // Read the severity level.
+ unsigned Level = 0;
+ if (ReadUnsigned(Memory, MemoryEnd, Level) || Level > Diagnostic::Fatal)
+ return StoredDiagnostic();
+
+ // Read the source location.
+ SourceLocation Location;
+ if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Location))
+ return StoredDiagnostic();
+
+ // Read the diagnostic text.
+ if (Memory == MemoryEnd)
+ return StoredDiagnostic();
+
+ unsigned MessageLen = 0;
+ if (ReadUnsigned(Memory, MemoryEnd, MessageLen) ||
+ Memory + MessageLen > MemoryEnd)
+ return StoredDiagnostic();
+
+ llvm::StringRef Message(Memory, MessageLen);
+ Memory += MessageLen;
+
+
+ // At this point, we have enough information to form a diagnostic. Do so.
+ StoredDiagnostic Diag;
+ Diag.Level = (Diagnostic::Level)Level;
+ Diag.Loc = FullSourceLoc(Location, SM);
+ Diag.Message = Message;
+ if (Memory == MemoryEnd)
+ return Diag;
+
+ // Read the source ranges.
+ unsigned NumSourceRanges = 0;
+ if (ReadUnsigned(Memory, MemoryEnd, NumSourceRanges))
+ return Diag;
+ for (unsigned I = 0; I != NumSourceRanges; ++I) {
+ SourceLocation Begin, End;
+ if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Begin) ||
+ ReadSourceLocation(FM, SM, Memory, MemoryEnd, End))
+ return Diag;
+
+ Diag.Ranges.push_back(SourceRange(Begin, End));
+ }
+
+ // Read the fix-it hints.
+ unsigned NumFixIts = 0;
+ if (ReadUnsigned(Memory, MemoryEnd, NumFixIts))
+ return Diag;
for (unsigned I = 0; I != NumFixIts; ++I) {
- const CodeModificationHint &Hint = getCodeModificationHint(I);
- WriteSourceLocation(OS, SM, Hint.RemoveRange.getBegin());
- WriteSourceLocation(OS, SM, Hint.RemoveRange.getEnd());
- WriteSourceLocation(OS, SM, Hint.InsertionLoc);
- WriteString(OS, Hint.CodeToInsert);
+ SourceLocation RemoveBegin, RemoveEnd, InsertionLoc;
+ unsigned InsertLen = 0;
+ if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveBegin) ||
+ ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveEnd) ||
+ ReadSourceLocation(FM, SM, Memory, MemoryEnd, InsertionLoc) ||
+ ReadUnsigned(Memory, MemoryEnd, InsertLen) ||
+ Memory + InsertLen > MemoryEnd) {
+ Diag.FixIts.clear();
+ return Diag;
+ }
+
+ CodeModificationHint Hint;
+ Hint.RemoveRange = SourceRange(RemoveBegin, RemoveEnd);
+ Hint.InsertionLoc = InsertionLoc;
+ Hint.CodeToInsert.assign(Memory, Memory + InsertLen);
+ Memory += InsertLen;
+ Diag.FixIts.push_back(Hint);
}
+
+ return Diag;
}
/// IncludeInDiagnosticCounts - This method (whose default implementation
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index b91671ad17b1..0c22de7bddb1 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -980,20 +980,6 @@ SourceLocation SourceManager::getLocation(const FileEntry *SourceFile,
if (Content->SourceLineCache == 0)
ComputeLineNumbers(Content, ContentCacheAlloc);
- if (Line > Content->NumLines)
- return SourceLocation();
-
- unsigned FilePos = Content->SourceLineCache[Line - 1];
- const char *Buf = Content->getBuffer()->getBufferStart() + FilePos;
- unsigned BufLength = Content->getBuffer()->getBufferEnd() - Buf;
- unsigned i = 0;
-
- // Check that the given column is valid.
- while (i < BufLength-1 && i < Col-1 && Buf[i] != '\n' && Buf[i] != '\r')
- ++i;
- if (i < Col-1)
- return SourceLocation();
-
// Find the first file ID that corresponds to the given file.
FileID FirstFID;
@@ -1020,6 +1006,24 @@ SourceLocation SourceManager::getLocation(const FileEntry *SourceFile,
if (FirstFID.isInvalid())
return SourceLocation();
+ if (Line > Content->NumLines) {
+ unsigned Size = Content->getBuffer()->getBufferSize();
+ if (Size > 0)
+ --Size;
+ return getLocForStartOfFile(FirstFID).getFileLocWithOffset(Size);
+ }
+
+ unsigned FilePos = Content->SourceLineCache[Line - 1];
+ const char *Buf = Content->getBuffer()->getBufferStart() + FilePos;
+ unsigned BufLength = Content->getBuffer()->getBufferEnd() - Buf;
+ unsigned i = 0;
+
+ // Check that the given column is valid.
+ while (i < BufLength-1 && i < Col-1 && Buf[i] != '\n' && Buf[i] != '\r')
+ ++i;
+ if (i < Col-1)
+ return getLocForStartOfFile(FirstFID).getFileLocWithOffset(FilePos + i);
+
return getLocForStartOfFile(FirstFID).getFileLocWithOffset(FilePos + Col - 1);
}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index c1cd96e361ed..b8fe53599b52 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -436,11 +436,13 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// Target identification.
Builder.defineMacro("__ppc__");
Builder.defineMacro("_ARCH_PPC");
+ Builder.defineMacro("__powerpc__");
Builder.defineMacro("__POWERPC__");
if (PointerWidth == 64) {
Builder.defineMacro("_ARCH_PPC64");
Builder.defineMacro("_LP64");
Builder.defineMacro("__LP64__");
+ Builder.defineMacro("__powerpc64__");
Builder.defineMacro("__ppc64__");
} else {
Builder.defineMacro("__ppc__");
@@ -571,9 +573,12 @@ void PPCTargetInfo::getGCCRegAliases(const GCCRegAlias *&Aliases,
namespace {
class PPC32TargetInfo : public PPCTargetInfo {
public:
- PPC32TargetInfo(const std::string& triple) : PPCTargetInfo(triple) {
+ PPC32TargetInfo(const std::string &triple) : PPCTargetInfo(triple) {
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
"i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
+
+ if (getTriple().getOS() == llvm::Triple::FreeBSD)
+ this->SizeType = TargetInfo::UnsignedInt;
}
};
} // end anonymous namespace.
@@ -1919,13 +1924,39 @@ namespace {
namespace {
class MipsTargetInfo : public TargetInfo {
+ std::string ABI, CPU;
static const TargetInfo::GCCRegAlias GCCRegAliases[];
static const char * const GCCRegNames[];
public:
- MipsTargetInfo(const std::string& triple) : TargetInfo(triple) {
+ MipsTargetInfo(const std::string& triple) : TargetInfo(triple), ABI("o32") {
DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:32-i16:16:32-i32:32:32-"
"i64:32:64-f32:32:32-f64:64:64-v64:64:64-n32";
}
+ virtual const char *getABI() const { return ABI.c_str(); }
+ virtual bool setABI(const std::string &Name) {
+
+ if ((Name == "o32") || (Name == "eabi")) {
+ ABI = Name;
+ return true;
+ } else
+ return false;
+ }
+ virtual bool setCPU(const std::string &Name) {
+ CPU = Name;
+ return true;
+ }
+ void getDefaultFeatures(const std::string &CPU,
+ llvm::StringMap<bool> &Features) const {
+ Features[ABI] = true;
+ Features[CPU] = true;
+ }
+ virtual void getArchDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ if (ABI == "o32")
+ Builder.defineMacro("__mips_o32");
+ else if (ABI == "eabi")
+ Builder.defineMacro("__mips_eabi");
+ }
virtual void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
DefineStd(Builder, "mips", Opts);
@@ -1933,6 +1964,7 @@ public:
DefineStd(Builder, "MIPSEB", Opts);
Builder.defineMacro("_MIPSEB");
Builder.defineMacro("__REGISTER_PREFIX__", "");
+ getArchDefines(Opts, Builder);
}
virtual void getTargetBuiltins(const Builtin::Info *&Records,
unsigned &NumRecords) const {
@@ -2044,6 +2076,7 @@ void MipselTargetInfo::getTargetDefines(const LangOptions &Opts,
DefineStd(Builder, "MIPSEL", Opts);
Builder.defineMacro("_MIPSEL");
Builder.defineMacro("__REGISTER_PREFIX__", "");
+ getArchDefines(Opts, Builder);
}
} // end anonymous namespace.
@@ -2096,6 +2129,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::ppc:
if (os == llvm::Triple::Darwin)
return new DarwinTargetInfo<PPCTargetInfo>(T);
+ else if (os == llvm::Triple::FreeBSD)
+ return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
return new PPC32TargetInfo(T);
case llvm::Triple::ppc64:
@@ -2103,6 +2138,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new DarwinTargetInfo<PPC64TargetInfo>(T);
else if (os == llvm::Triple::Lv2)
return new PS3PPUTargetInfo<PPC64TargetInfo>(T);
+ else if (os == llvm::Triple::FreeBSD)
+ return new FreeBSDTargetInfo<PPC64TargetInfo>(T);
return new PPC64TargetInfo(T);
case llvm::Triple::sparc:
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 98cf42b8c3d9..4d903055b5b9 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -40,15 +40,15 @@ llvm::StringRef getClangRepositoryPath() {
}
std::string getClangRevision() {
-#ifndef SVN_REVISION
- // Subversion was not available at build time?
- return "";
-#else
- std::string revision;
- llvm::raw_string_ostream OS(revision);
- OS << strtol(SVN_REVISION, 0, 10);
- return revision;
+#ifdef SVN_REVISION
+ if (SVN_REVISION[0] != '\0') {
+ std::string revision;
+ llvm::raw_string_ostream OS(revision);
+ OS << strtol(SVN_REVISION, 0, 10);
+ return revision;
+ }
#endif
+ return "";
}
std::string getClangFullRepositoryVersion() {
diff --git a/lib/Checker/BasicStore.cpp b/lib/Checker/BasicStore.cpp
index 6ef29429f681..d93a6658c681 100644
--- a/lib/Checker/BasicStore.cpp
+++ b/lib/Checker/BasicStore.cpp
@@ -95,6 +95,8 @@ public:
const char *sep);
private:
+ SVal LazyRetrieve(Store store, const TypedRegion *R);
+
ASTContext& getContext() { return StateMgr.getContext(); }
};
@@ -126,6 +128,25 @@ static bool isHigherOrderRawPtr(QualType T, ASTContext &C) {
}
}
+SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) {
+ const VarRegion *VR = dyn_cast<VarRegion>(R);
+ if (!VR)
+ return UnknownVal();
+
+ const VarDecl *VD = VR->getDecl();
+ QualType T = VD->getType();
+
+ // Only handle simple types that we can symbolicate.
+ if (!SymbolManager::canSymbolicate(T) || !T->isScalarType())
+ return UnknownVal();
+
+ // Globals and parameters start with symbolic values.
+ // Local variables initially are undefined.
+ if (VR->hasGlobalsOrParametersStorage())
+ return ValMgr.getRegionValueSymbolVal(R);
+ return UndefinedVal();
+}
+
SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) {
if (isa<UnknownVal>(loc))
return UnknownVal();
@@ -142,11 +163,13 @@ SVal BasicStoreManager::Retrieve(Store store, Loc loc, QualType T) {
BindingsTy B = GetBindings(store);
BindingsTy::data_type *Val = B.lookup(R);
+ const TypedRegion *TR = cast<TypedRegion>(R);
- if (!Val)
- break;
+ if (Val)
+ return CastRetrievedVal(*Val, TR, T);
- return CastRetrievedVal(*Val, cast<TypedRegion>(R), T);
+ SVal V = LazyRetrieve(store, TR);
+ return V.isUnknownOrUndef() ? V : CastRetrievedVal(V, TR, T);
}
case loc::ConcreteIntKind:
@@ -319,7 +342,7 @@ Store BasicStoreManager::scanForIvars(Stmt *B, const Decl* SelfDecl,
const Expr *Base = IV->getBase()->IgnoreParenCasts();
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(Base)) {
if (DR->getDecl() == SelfDecl) {
- const MemRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
+ const ObjCIvarRegion *IVR = MRMgr.getObjCIvarRegion(IV->getDecl(),
SelfRegion);
SVal X = ValMgr.getRegionValueSymbolVal(IVR);
St = Bind(St, ValMgr.makeLoc(IVR), X);
@@ -351,10 +374,10 @@ Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
if (MD->getSelfDecl() == PD) {
// FIXME: Add type constraints (when they become available) to
// SelfRegion? (i.e., it implements MD->getClassInterface()).
- const MemRegion *VR = MRMgr.getVarRegion(PD, InitLoc);
+ const VarRegion *VR = MRMgr.getVarRegion(PD, InitLoc);
const MemRegion *SelfRegion =
- ValMgr.getRegionValueSymbolVal(VR).getAsRegion();
- assert(SelfRegion);
+ ValMgr.getRegionValueSymbolVal(VR).getAsRegion();
+ assert(SelfRegion);
St = Bind(St, ValMgr.makeLoc(VR), loc::MemRegionVal(SelfRegion));
// Scan the method for ivar references. While this requires an
// entire AST scan, the cost should not be high in practice.
@@ -362,21 +385,8 @@ Store BasicStoreManager::getInitialStore(const LocationContext *InitLoc) {
}
}
}
- else if (VarDecl* VD = dyn_cast<VarDecl>(ND)) {
- // Only handle simple types that we can symbolicate.
- if (!SymbolManager::canSymbolicate(VD->getType()))
- continue;
-
- // Initialize globals and parameters to symbolic values.
- // Initialize local variables to undefined.
- const MemRegion *R = ValMgr.getRegionManager().getVarRegion(VD, InitLoc);
- SVal X = UndefinedVal();
- if (R->hasGlobalsOrParametersStorage())
- X = ValMgr.getRegionValueSymbolVal(R);
-
- St = Bind(St, ValMgr.makeLoc(R), X);
- }
}
+
return St;
}
diff --git a/lib/Checker/BuiltinFunctionChecker.cpp b/lib/Checker/BuiltinFunctionChecker.cpp
index 8711492049c5..9c8b51657b26 100644
--- a/lib/Checker/BuiltinFunctionChecker.cpp
+++ b/lib/Checker/BuiltinFunctionChecker.cpp
@@ -14,7 +14,6 @@
#include "GRExprEngineInternalChecks.h"
#include "clang/Checker/PathSensitive/Checker.h"
#include "clang/Basic/Builtins.h"
-#include "llvm/ADT/StringSwitch.h"
using namespace clang;
diff --git a/lib/Checker/CFRefCount.cpp b/lib/Checker/CFRefCount.cpp
index 324916a6f6eb..ecb98a0496f0 100644
--- a/lib/Checker/CFRefCount.cpp
+++ b/lib/Checker/CFRefCount.cpp
@@ -12,26 +12,26 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/Checkers/LocalCheckers.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/SymbolManager.h"
-#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/Checkers/LocalCheckers.h"
#include "clang/Checker/DomainSpecific/CocoaConventions.h"
-#include "clang/AST/DeclObjC.h"
-#include "clang/AST/StmtVisitor.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/Checker/PathSensitive/SymbolManager.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
-#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/ImmutableList.h"
-#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include <stdarg.h>
using namespace clang;
@@ -1222,6 +1222,12 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
else if (FD->getAttr<CFReturnsRetainedAttr>()) {
Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
}
+ else if (FD->getAttr<NSReturnsNotRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
+ }
+ else if (FD->getAttr<CFReturnsNotRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
}
else if (RetTy->getAs<PointerType>()) {
if (FD->getAttr<CFReturnsRetainedAttr>()) {
@@ -1244,6 +1250,10 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
Summ.setRetEffect(ObjCAllocRetE);
return;
}
+ if (MD->getAttr<NSReturnsNotRetainedAttr>()) {
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::ObjC));
+ return;
+ }
isTrackedLoc = true;
}
@@ -1251,8 +1261,12 @@ RetainSummaryManager::updateSummaryFromAnnotations(RetainSummary &Summ,
if (!isTrackedLoc)
isTrackedLoc = MD->getResultType()->getAs<PointerType>() != NULL;
- if (isTrackedLoc && MD->getAttr<CFReturnsRetainedAttr>())
- Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ if (isTrackedLoc) {
+ if (MD->getAttr<CFReturnsRetainedAttr>())
+ Summ.setRetEffect(RetEffect::MakeOwned(RetEffect::CF, true));
+ else if (MD->getAttr<CFReturnsNotRetainedAttr>())
+ Summ.setRetEffect(RetEffect::MakeNotOwned(RetEffect::CF));
+ }
}
RetainSummary*
diff --git a/lib/Checker/CMakeLists.txt b/lib/Checker/CMakeLists.txt
index 7b21d08dcb71..c5bd2eb7cc2c 100644
--- a/lib/Checker/CMakeLists.txt
+++ b/lib/Checker/CMakeLists.txt
@@ -18,7 +18,6 @@ add_clang_library(clangChecker
CheckDeadStores.cpp
CheckObjCDealloc.cpp
CheckObjCInstMethSignature.cpp
- CheckObjCUnusedIVars.cpp
CheckSecuritySyntaxOnly.cpp
CheckSizeofPointer.cpp
Checker.cpp
@@ -35,6 +34,7 @@ add_clang_library(clangChecker
GRExprEngineExperimentalChecks.cpp
GRState.cpp
LLVMConventionsChecker.cpp
+ MacOSXAPIChecker.cpp
MallocChecker.cpp
ManagerRegistry.cpp
MemRegion.cpp
@@ -42,6 +42,7 @@ add_clang_library(clangChecker
NSErrorChecker.cpp
NoReturnFunctionChecker.cpp
OSAtomicChecker.cpp
+ ObjCUnusedIVarsChecker.cpp
PathDiagnostic.cpp
PointerArithChecker.cpp
PointerSubChecker.cpp
@@ -62,6 +63,7 @@ add_clang_library(clangChecker
UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
+ UnixAPIChecker.cpp
VLASizeChecker.cpp
ValueManager.cpp
)
diff --git a/lib/Checker/CallInliner.cpp b/lib/Checker/CallInliner.cpp
index d94994b19437..88e1a05d1191 100644
--- a/lib/Checker/CallInliner.cpp
+++ b/lib/Checker/CallInliner.cpp
@@ -26,7 +26,6 @@ public:
}
virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
- virtual void EvalEndPath(GREndPathNodeBuilder &B,void *tag,GRExprEngine &Eng);
};
}
@@ -43,71 +42,13 @@ bool CallInliner::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
if (!FD)
return false;
- if (!FD->isThisDeclarationADefinition())
+ if (!FD->getBody(FD))
return false;
- GRStmtNodeBuilder &Builder = C.getNodeBuilder();
- // Make a new LocationContext.
- const StackFrameContext *LocCtx = C.getAnalysisManager().getStackFrame(FD,
- C.getPredecessor()->getLocationContext(), CE,
- Builder.getBlock(), Builder.getIndex());
-
- CFGBlock const *Entry = &(LocCtx->getCFG()->getEntry());
-
- assert (Entry->empty() && "Entry block must be empty.");
-
- assert (Entry->succ_size() == 1 && "Entry block must have 1 successor.");
-
- // Get the solitary successor.
- CFGBlock const *SuccB = *(Entry->succ_begin());
-
- // Construct an edge representing the starting location in the function.
- BlockEdge Loc(Entry, SuccB, LocCtx);
-
- state = C.getStoreManager().EnterStackFrame(state, LocCtx);
- // This is a hack. We really should not use the GRStmtNodeBuilder.
- bool isNew;
- GRExprEngine &Eng = C.getEngine();
- ExplodedNode *Pred = C.getPredecessor();
-
-
- ExplodedNode *SuccN = Eng.getGraph().getNode(Loc, state, &isNew);
- SuccN->addPredecessor(Pred, Eng.getGraph());
- C.getNodeBuilder().Deferred.erase(Pred);
-
- if (isNew)
- Builder.getWorkList()->Enqueue(SuccN);
-
- Builder.HasGeneratedNode = true;
+ // Now we have the definition of the callee, create a CallEnter node.
+ CallEnter Loc(CE, FD, C.getPredecessor()->getLocationContext());
+ C.addTransition(state, Loc);
return true;
}
-void CallInliner::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
- GRExprEngine &Eng) {
- const GRState *state = B.getState();
- ExplodedNode *Pred = B.getPredecessor();
- const StackFrameContext *LocCtx =
- cast<StackFrameContext>(Pred->getLocationContext());
-
- const Stmt *CE = LocCtx->getCallSite();
-
- // Check if this is the top level stack frame.
- if (!LocCtx->getParent())
- return;
-
- PostStmt NodeLoc(CE, LocCtx->getParent());
-
- bool isNew;
- ExplodedNode *Succ = Eng.getGraph().getNode(NodeLoc, state, &isNew);
- Succ->addPredecessor(Pred, Eng.getGraph());
-
- // When creating the new work list unit, increment the statement index to
- // point to the statement after the CallExpr.
- if (isNew)
- B.getWorkList().Enqueue(Succ,
- *const_cast<CFGBlock*>(LocCtx->getCallSiteBlock()),
- LocCtx->getIndex() + 1);
-
- B.HasGeneratedNode = true;
-}
diff --git a/lib/Checker/CheckDeadStores.cpp b/lib/Checker/CheckDeadStores.cpp
index 4a7ca705488a..31f9390e6228 100644
--- a/lib/Checker/CheckDeadStores.cpp
+++ b/lib/Checker/CheckDeadStores.cpp
@@ -142,7 +142,8 @@ public:
if (VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
// Special case: check for assigning null to a pointer.
// This is a common form of defensive programming.
- if (VD->getType()->isPointerType()) {
+ QualType T = VD->getType();
+ if (T->isPointerType() || T->isObjCObjectPointerType()) {
if (B->getRHS()->isNullPointerConstant(Ctx,
Expr::NPC_ValueDependentIsNull))
return;
diff --git a/lib/Checker/FlatStore.cpp b/lib/Checker/FlatStore.cpp
index dac66def5dc9..07a54fb48736 100644
--- a/lib/Checker/FlatStore.cpp
+++ b/lib/Checker/FlatStore.cpp
@@ -97,7 +97,7 @@ SVal FlatStoreManager::RetrieveRegionWithNoBinding(const MemRegion *R,
if (R->hasStackNonParametersStorage())
return UndefinedVal();
else
- return ValMgr.getRegionValueSymbolVal(R, T);
+ return ValMgr.getRegionValueSymbolVal(cast<TypedRegion>(R));
}
Store FlatStoreManager::Bind(Store store, Loc L, SVal val) {
diff --git a/lib/Checker/GRCoreEngine.cpp b/lib/Checker/GRCoreEngine.cpp
index d54b0777eda7..a9347d01641c 100644
--- a/lib/Checker/GRCoreEngine.cpp
+++ b/lib/Checker/GRCoreEngine.cpp
@@ -144,6 +144,14 @@ void GRCoreEngine::ProcessSwitch(GRSwitchNodeBuilder& Builder) {
SubEngine.ProcessSwitch(Builder);
}
+void GRCoreEngine::ProcessCallEnter(GRCallEnterNodeBuilder &Builder) {
+ SubEngine.ProcessCallEnter(Builder);
+}
+
+void GRCoreEngine::ProcessCallExit(GRCallExitNodeBuilder &Builder) {
+ SubEngine.ProcessCallExit(Builder);
+}
+
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
@@ -196,6 +204,15 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
assert (false && "BlockExit location never occur in forward analysis.");
break;
+ case ProgramPoint::CallEnterKind:
+ HandleCallEnter(cast<CallEnter>(Node->getLocation()), WU.getBlock(),
+ WU.getIndex(), Node);
+ break;
+
+ case ProgramPoint::CallExitKind:
+ HandleCallExit(cast<CallExit>(Node->getLocation()), Node);
+ break;
+
default:
assert(isa<PostStmt>(Node->getLocation()));
HandlePostStmt(cast<PostStmt>(Node->getLocation()), WU.getBlock(),
@@ -207,6 +224,17 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
return WList->hasWork();
}
+void GRCoreEngine::HandleCallEnter(const CallEnter &L, const CFGBlock *Block,
+ unsigned Index, ExplodedNode *Pred) {
+ GRCallEnterNodeBuilder Builder(*this, Pred, L.getCallExpr(), L.getCallee(),
+ Block, Index);
+ ProcessCallEnter(Builder);
+}
+
+void GRCoreEngine::HandleCallExit(const CallExit &L, ExplodedNode *Pred) {
+ GRCallExitNodeBuilder Builder(*this, Pred);
+ ProcessCallExit(Builder);
+}
void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
@@ -384,11 +412,11 @@ void GRCoreEngine::GenerateNode(const ProgramPoint& Loc,
GRStmtNodeBuilder::GRStmtNodeBuilder(CFGBlock* b, unsigned idx,
ExplodedNode* N, GRCoreEngine* e,
GRStateManager &mgr)
- : Eng(*e), B(*b), Idx(idx), Pred(N), LastNode(N), Mgr(mgr), Auditor(0),
+ : Eng(*e), B(*b), Idx(idx), Pred(N), Mgr(mgr), Auditor(0),
PurgingDeadSymbols(false), BuildSinks(false), HasGeneratedNode(false),
PointKind(ProgramPoint::PostStmtKind), Tag(0) {
Deferred.insert(N);
- CleanedState = getLastNode()->getState();
+ CleanedState = Pred->getState();
}
GRStmtNodeBuilder::~GRStmtNodeBuilder() {
@@ -400,6 +428,14 @@ GRStmtNodeBuilder::~GRStmtNodeBuilder() {
void GRStmtNodeBuilder::GenerateAutoTransition(ExplodedNode* N) {
assert (!N->isSink());
+ // Check if this node entered a callee.
+ if (isa<CallEnter>(N->getLocation())) {
+ // Still use the index of the CallExpr. It's needed to create the callee
+ // StackFrameContext.
+ Eng.WList->Enqueue(N, B, Idx);
+ return;
+ }
+
PostStmt Loc(getStmt(), N->getLocationContext());
if (Loc == N->getLocation()) {
@@ -462,11 +498,9 @@ GRStmtNodeBuilder::generateNodeInternal(const ProgramPoint &Loc,
if (IsNew) {
Deferred.insert(N);
- LastNode = N;
return N;
}
- LastNode = NULL;
return NULL;
}
@@ -576,7 +610,13 @@ GRSwitchNodeBuilder::generateDefaultCaseNode(const GRState* St, bool isSink) {
GREndPathNodeBuilder::~GREndPathNodeBuilder() {
// Auto-generate an EOP node if one has not been generated.
- if (!HasGeneratedNode) generateNode(Pred->State);
+ if (!HasGeneratedNode) {
+ // If we are in an inlined call, generate CallExit node.
+ if (Pred->getLocationContext()->getParent())
+ GenerateCallExitNode(Pred->State);
+ else
+ generateNode(Pred->State);
+ }
}
ExplodedNode*
@@ -597,3 +637,57 @@ GREndPathNodeBuilder::generateNode(const GRState* State, const void *tag,
return NULL;
}
+
+void GREndPathNodeBuilder::GenerateCallExitNode(const GRState *state) {
+ HasGeneratedNode = true;
+ // Create a CallExit node and enqueue it.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(Pred->getLocationContext());
+ const Stmt *CE = LocCtx->getCallSite();
+
+ // Use the the callee location context.
+ CallExit Loc(CE, LocCtx);
+
+ bool isNew;
+ ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
+ Node->addPredecessor(Pred, *Eng.G);
+
+ if (isNew)
+ Eng.WList->Enqueue(Node);
+}
+
+
+void GRCallEnterNodeBuilder::GenerateNode(const GRState *state,
+ const LocationContext *LocCtx) {
+ // Get the callee entry block.
+ const CFGBlock *Entry = &(LocCtx->getCFG()->getEntry());
+ assert(Entry->empty());
+ assert(Entry->succ_size() == 1);
+
+ // Get the solitary successor.
+ const CFGBlock *SuccB = *(Entry->succ_begin());
+
+ // Construct an edge representing the starting location in the callee.
+ BlockEdge Loc(Entry, SuccB, LocCtx);
+
+ bool isNew;
+ ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
+ Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
+
+ if (isNew)
+ Eng.WList->Enqueue(Node);
+}
+
+void GRCallExitNodeBuilder::GenerateNode(const GRState *state) {
+ // Get the callee's location context.
+ const StackFrameContext *LocCtx
+ = cast<StackFrameContext>(Pred->getLocationContext());
+
+ PostStmt Loc(LocCtx->getCallSite(), LocCtx->getParent());
+ bool isNew;
+ ExplodedNode *Node = Eng.G->getNode(Loc, state, &isNew);
+ Node->addPredecessor(const_cast<ExplodedNode*>(Pred), *Eng.G);
+ if (isNew)
+ Eng.WList->Enqueue(Node, *const_cast<CFGBlock*>(LocCtx->getCallSiteBlock()),
+ LocCtx->getIndex() + 1);
+}
diff --git a/lib/Checker/GRExprEngine.cpp b/lib/Checker/GRExprEngine.cpp
index 7f863193743b..ad229c7b8fbc 100644
--- a/lib/Checker/GRExprEngine.cpp
+++ b/lib/Checker/GRExprEngine.cpp
@@ -37,6 +37,15 @@ using llvm::dyn_cast_or_null;
using llvm::cast;
using llvm::APSInt;
+namespace {
+ // Trait class for recording returned expression in the state.
+ struct ReturnExpr {
+ static int TagInt;
+ typedef const Stmt *data_type;
+ };
+ int ReturnExpr::TagInt;
+}
+
//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
@@ -318,6 +327,8 @@ static void RegisterInternalChecks(GRExprEngine &Eng) {
RegisterNoReturnFunctionChecker(Eng);
RegisterBuiltinFunctionChecker(Eng);
RegisterOSAtomicChecker(Eng);
+ RegisterUnixAPIChecker(Eng);
+ RegisterMacOSXAPIChecker(Eng);
}
GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
@@ -458,7 +469,7 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
"Error evaluating statement");
Builder = &builder;
- EntryNode = builder.getLastNode();
+ EntryNode = builder.getBasePredecessor();
// Set up our simple checks.
if (BatchAuditor)
@@ -1288,6 +1299,37 @@ void GRExprEngine::ProcessSwitch(GRSwitchNodeBuilder& builder) {
if (defaultIsFeasible) builder.generateDefaultCaseNode(DefaultSt);
}
+void GRExprEngine::ProcessCallEnter(GRCallEnterNodeBuilder &B) {
+ const FunctionDecl *FD = B.getCallee();
+ const StackFrameContext *LocCtx = AMgr.getStackFrame(FD,
+ B.getLocationContext(),
+ B.getCallExpr(),
+ B.getBlock(),
+ B.getIndex());
+
+ const GRState *state = B.getState();
+ state = getStoreManager().EnterStackFrame(state, LocCtx);
+
+ B.GenerateNode(state, LocCtx);
+}
+
+void GRExprEngine::ProcessCallExit(GRCallExitNodeBuilder &B) {
+ const GRState *state = B.getState();
+ const ExplodedNode *Pred = B.getPredecessor();
+ const StackFrameContext *LocCtx =
+ cast<StackFrameContext>(Pred->getLocationContext());
+ const Stmt *CE = LocCtx->getCallSite();
+
+ // If the callee returns an expression, bind its value to CallExpr.
+ const Stmt *ReturnedExpr = state->get<ReturnExpr>();
+ if (ReturnedExpr) {
+ SVal RetVal = state->getSVal(ReturnedExpr);
+ state = state->BindExpr(CE, RetVal);
+ }
+
+ B.GenerateNode(state);
+}
+
//===----------------------------------------------------------------------===//
// Transfer functions: logical operations ('&&', '||').
//===----------------------------------------------------------------------===//
@@ -2316,8 +2358,9 @@ void GRExprEngine::VisitDeclStmt(DeclStmt *DS, ExplodedNode *Pred,
// Recover some path-sensitivity if a scalar value evaluated to
// UnknownVal.
- if (InitVal.isUnknown() ||
- !getConstraintManager().canReasonAbout(InitVal)) {
+ if ((InitVal.isUnknown() ||
+ !getConstraintManager().canReasonAbout(InitVal)) &&
+ !VD->getType()->isReferenceType()) {
InitVal = ValMgr.getConjuredSymbolVal(NULL, InitEx,
Builder->getCurrentBlockCount());
}
@@ -2855,10 +2898,19 @@ void GRExprEngine::VisitAsmStmtHelperInputs(AsmStmt* A,
void GRExprEngine::VisitReturnStmt(ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
-
ExplodedNodeSet Src;
if (Expr *RetE = RS->getRetValue()) {
- Visit(RetE, Pred, Src);
+ // Record the returned expression in the state.
+ {
+ static int Tag = 0;
+ SaveAndRestore<const void *> OldTag(Builder->Tag, &Tag);
+ const GRState *state = GetState(Pred);
+ state = state->set<ReturnExpr>(RetE);
+ Pred = Builder->generateNode(RetE, state, Pred);
+ }
+ // We may get a NULL Pred because we generated a cached node.
+ if (Pred)
+ Visit(RetE, Pred, Src);
}
else {
Src.Add(Pred);
@@ -3139,6 +3191,14 @@ struct DOTGraphTraits<ExplodedNode*> :
assert (false);
break;
+ case ProgramPoint::CallEnterKind:
+ Out << "CallEnter";
+ break;
+
+ case ProgramPoint::CallExitKind:
+ Out << "CallExit";
+ break;
+
default: {
if (StmtPoint *L = dyn_cast<StmtPoint>(&Loc)) {
const Stmt* S = L->getStmt();
diff --git a/lib/Checker/GRExprEngineInternalChecks.h b/lib/Checker/GRExprEngineInternalChecks.h
index 64a930d504cf..d1176001cac7 100644
--- a/lib/Checker/GRExprEngineInternalChecks.h
+++ b/lib/Checker/GRExprEngineInternalChecks.h
@@ -19,27 +19,33 @@ namespace clang {
class GRExprEngine;
+// Foundational checks that handle basic semantics.
void RegisterAdjustedReturnValueChecker(GRExprEngine &Eng);
+void RegisterArrayBoundChecker(GRExprEngine &Eng);
void RegisterAttrNonNullChecker(GRExprEngine &Eng);
+void RegisterBuiltinFunctionChecker(GRExprEngine &Eng);
+void RegisterCallAndMessageChecker(GRExprEngine &Eng);
+void RegisterCastToStructChecker(GRExprEngine &Eng);
void RegisterDereferenceChecker(GRExprEngine &Eng);
void RegisterDivZeroChecker(GRExprEngine &Eng);
+void RegisterFixedAddressChecker(GRExprEngine &Eng);
+void RegisterNoReturnFunctionChecker(GRExprEngine &Eng);
+void RegisterPointerArithChecker(GRExprEngine &Eng);
+void RegisterPointerSubChecker(GRExprEngine &Eng);
void RegisterReturnPointerRangeChecker(GRExprEngine &Eng);
-void RegisterReturnStackAddressChecker(GRExprEngine &Eng);
+void RegisterReturnStackAddressChecker(GRExprEngine &Eng);
void RegisterReturnUndefChecker(GRExprEngine &Eng);
-void RegisterVLASizeChecker(GRExprEngine &Eng);
-void RegisterPointerSubChecker(GRExprEngine &Eng);
-void RegisterPointerArithChecker(GRExprEngine &Eng);
-void RegisterFixedAddressChecker(GRExprEngine &Eng);
-void RegisterCastToStructChecker(GRExprEngine &Eng);
-void RegisterCallAndMessageChecker(GRExprEngine &Eng);
-void RegisterArrayBoundChecker(GRExprEngine &Eng);
-void RegisterUndefinedArraySubscriptChecker(GRExprEngine &Eng);
-void RegisterUndefinedAssignmentChecker(GRExprEngine &Eng);
void RegisterUndefBranchChecker(GRExprEngine &Eng);
void RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng);
void RegisterUndefResultChecker(GRExprEngine &Eng);
-void RegisterNoReturnFunctionChecker(GRExprEngine &Eng);
-void RegisterBuiltinFunctionChecker(GRExprEngine &Eng);
+void RegisterUndefinedArraySubscriptChecker(GRExprEngine &Eng);
+void RegisterUndefinedAssignmentChecker(GRExprEngine &Eng);
+void RegisterVLASizeChecker(GRExprEngine &Eng);
+
+// API checks.
+void RegisterMacOSXAPIChecker(GRExprEngine &Eng);
void RegisterOSAtomicChecker(GRExprEngine &Eng);
+void RegisterUnixAPIChecker(GRExprEngine &Eng);
+
} // end clang namespace
#endif
diff --git a/lib/Checker/MacOSXAPIChecker.cpp b/lib/Checker/MacOSXAPIChecker.cpp
new file mode 100644
index 000000000000..9621e853bc48
--- /dev/null
+++ b/lib/Checker/MacOSXAPIChecker.cpp
@@ -0,0 +1,141 @@
+// MacOSXAPIChecker.h - Checks proper use of various MacOS X APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines MacOSXAPIChecker, which is an assortment of checks on calls
+// to various, widely used Mac OS X functions.
+//
+// FIXME: What's currently in BasicObjCFoundationChecks.cpp should be migrated
+// to here, using the new Checker interface.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang;
+
+namespace {
+class MacOSXAPIChecker : public CheckerVisitor<MacOSXAPIChecker> {
+ enum SubChecks {
+ DispatchOnce = 0,
+ DispatchOnceF,
+ NumChecks
+ };
+
+ BugType *BTypes[NumChecks];
+
+public:
+ MacOSXAPIChecker() { memset(BTypes, 0, sizeof(*BTypes) * NumChecks); }
+ static void *getTag() { static unsigned tag = 0; return &tag; }
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+} //end anonymous namespace
+
+void clang::RegisterMacOSXAPIChecker(GRExprEngine &Eng) {
+ if (Eng.getContext().Target.getTriple().getVendor() == llvm::Triple::Apple)
+ Eng.registerCheck(new MacOSXAPIChecker());
+}
+
+//===----------------------------------------------------------------------===//
+// dispatch_once and dispatch_once_f
+//===----------------------------------------------------------------------===//
+
+static void CheckDispatchOnce(CheckerContext &C, const CallExpr *CE,
+ BugType *&BT, const IdentifierInfo *FI) {
+
+ if (!BT) {
+ llvm::SmallString<128> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Improper use of '" << FI->getName() << '\'';
+ BT = new BugType(os.str(), "Mac OS X API");
+ }
+
+ if (CE->getNumArgs() < 1)
+ return;
+
+ // Check if the first argument is stack allocated. If so, issue a warning
+ // because that's likely to be bad news.
+ const GRState *state = C.getState();
+ const MemRegion *R = state->getSVal(CE->getArg(0)).getAsRegion();
+ if (!R || !isa<StackSpaceRegion>(R->getMemorySpace()))
+ return;
+
+ ExplodedNode *N = C.GenerateSink(state);
+ if (!N)
+ return;
+
+ llvm::SmallString<256> S;
+ llvm::raw_svector_ostream os(S);
+ os << "Call to '" << FI->getName() << "' uses";
+ if (const VarRegion *VR = dyn_cast<VarRegion>(R))
+ os << " the local variable '" << VR->getDecl()->getName() << '\'';
+ else
+ os << " stack allocated memory";
+ os << " for the predicate value. Using such transient memory for "
+ "the predicate is potentially dangerous.";
+ if (isa<VarRegion>(R) && isa<StackLocalsSpaceRegion>(R->getMemorySpace()))
+ os << " Perhaps you intended to declare the variable as 'static'?";
+
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, os.str(), N);
+ report->addRange(CE->getArg(0)->getSourceRange());
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+typedef void (*SubChecker)(CheckerContext &C, const CallExpr *CE, BugType *&BT,
+ const IdentifierInfo *FI);
+namespace {
+ class SubCheck {
+ SubChecker SC;
+ BugType **BT;
+ public:
+ SubCheck(SubChecker sc, BugType *& bt) : SC(sc), BT(&bt) {}
+ SubCheck() : SC(NULL), BT(NULL) {}
+
+ void run(CheckerContext &C, const CallExpr *CE,
+ const IdentifierInfo *FI) const {
+ if (SC)
+ SC(C, CE, *BT, FI);
+ }
+ };
+} // end anonymous namespace
+
+void MacOSXAPIChecker::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ // FIXME: Mostly copy and paste from UnixAPIChecker. Should refactor.
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionTextRegion *Fn =
+ dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+
+ if (!Fn)
+ return;
+
+ const IdentifierInfo *FI = Fn->getDecl()->getIdentifier();
+ if (!FI)
+ return;
+
+ const SubCheck &SC =
+ llvm::StringSwitch<SubCheck>(FI->getName())
+ .Case("dispatch_once", SubCheck(CheckDispatchOnce, BTypes[DispatchOnce]))
+ .Case("dispatch_once_f", SubCheck(CheckDispatchOnce,
+ BTypes[DispatchOnceF]))
+ .Default(SubCheck());
+
+ SC.run(C, CE, FI);
+}
diff --git a/lib/Checker/MemRegion.cpp b/lib/Checker/MemRegion.cpp
index 194015a11b11..9a26988fcf1d 100644
--- a/lib/Checker/MemRegion.cpp
+++ b/lib/Checker/MemRegion.cpp
@@ -419,20 +419,27 @@ const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
const StackLocalsSpaceRegion*
MemRegionManager::getStackLocalsRegion(const StackFrameContext *STC) {
assert(STC);
- if (STC == cachedStackLocalsFrame)
- return cachedStackLocalsRegion;
- cachedStackLocalsFrame = STC;
- return LazyAllocate(cachedStackLocalsRegion, STC);
+ StackLocalsSpaceRegion *&R = StackLocalsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackLocalsSpaceRegion>();
+ new (R) StackLocalsSpaceRegion(this, STC);
+ return R;
}
const StackArgumentsSpaceRegion *
MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
assert(STC);
- if (STC == cachedStackArgumentsFrame)
- return cachedStackArgumentsRegion;
-
- cachedStackArgumentsFrame = STC;
- return LazyAllocate(cachedStackArgumentsRegion, STC);
+ StackArgumentsSpaceRegion *&R = StackArgumentsSpaceRegions[STC];
+
+ if (R)
+ return R;
+
+ R = A.Allocate<StackArgumentsSpaceRegion>();
+ new (R) StackArgumentsSpaceRegion(this, STC);
+ return R;
}
const GlobalsSpaceRegion *MemRegionManager::getGlobalsRegion() {
diff --git a/lib/Checker/OSAtomicChecker.cpp b/lib/Checker/OSAtomicChecker.cpp
index 7f4aeca33178..e743528e2399 100644
--- a/lib/Checker/OSAtomicChecker.cpp
+++ b/lib/Checker/OSAtomicChecker.cpp
@@ -14,7 +14,6 @@
#include "GRExprEngineInternalChecks.h"
#include "clang/Checker/PathSensitive/Checker.h"
#include "clang/Basic/Builtins.h"
-#include "llvm/ADT/StringSwitch.h"
using namespace clang;
diff --git a/lib/Checker/CheckObjCUnusedIVars.cpp b/lib/Checker/ObjCUnusedIVarsChecker.cpp
index f2cf58191632..04d897aec894 100644
--- a/lib/Checker/CheckObjCUnusedIVars.cpp
+++ b/lib/Checker/ObjCUnusedIVarsChecker.cpp
@@ -1,4 +1,4 @@
-//==- CheckObjCUnusedIVars.cpp - Check for unused ivars ----------*- C++ -*-==//
+//==- ObjCUnusedIVarsChecker.cpp - Check for unused ivars --------*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -68,14 +68,14 @@ static void Scan(IvarUsageMap& M, const ObjCContainerDecl* D) {
for (ObjCContainerDecl::instmeth_iterator I = D->instmeth_begin(),
E = D->instmeth_end(); I!=E; ++I)
Scan(M, (*I)->getBody());
-
- if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
+
+ if (const ObjCImplementationDecl *ID = dyn_cast<ObjCImplementationDecl>(D)) {
// Scan for @synthesized property methods that act as setters/getters
// to an ivar.
for (ObjCImplementationDecl::propimpl_iterator I = ID->propimpl_begin(),
E = ID->propimpl_end(); I!=E; ++I)
Scan(M, *I);
-
+
// Scan the associated categories as well.
for (const ObjCCategoryDecl *CD =
ID->getClassInterface()->getCategoryList(); CD ;
@@ -92,7 +92,7 @@ static void Scan(IvarUsageMap &M, const DeclContext *C, const FileID FID,
I!=E; ++I)
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(*I)) {
SourceLocation L = FD->getLocStart();
- if (SM.getFileID(L) == FID)
+ if (SM.getFileID(L) == FID)
Scan(M, FD->getBody());
}
}
@@ -109,12 +109,12 @@ void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
const ObjCIvarDecl* ID = *I;
- // Ignore ivars that aren't private.
- if (ID->getAccessControl() != ObjCIvarDecl::Private)
- continue;
-
- // Skip IB Outlets.
- if (ID->getAttr<IBOutletAttr>())
+ // Ignore ivars that...
+ // (a) aren't private
+ // (b) explicitly marked unused
+ // (c) are iboutlets
+ if (ID->getAccessControl() != ObjCIvarDecl::Private ||
+ ID->getAttr<UnusedAttr>() || ID->getAttr<IBOutletAttr>())
continue;
M[ID] = Unused;
@@ -122,11 +122,10 @@ void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
if (M.empty())
return;
-
+
// Now scan the implementation declaration.
Scan(M, D);
-
// Any potentially unused ivars?
bool hasUnused = false;
for (IvarUsageMap::iterator I = M.begin(), E = M.end(); I!=E; ++I)
@@ -134,10 +133,10 @@ void clang::CheckObjCUnusedIvar(const ObjCImplementationDecl *D,
hasUnused = true;
break;
}
-
+
if (!hasUnused)
return;
-
+
// We found some potentially unused ivars. Scan the entire translation unit
// for functions inside the @implementation that reference these ivars.
// FIXME: In the future hopefully we can just use the lexical DeclContext
diff --git a/lib/Checker/RegionStore.cpp b/lib/Checker/RegionStore.cpp
index f70105af1379..fd48f72dd4ae 100644
--- a/lib/Checker/RegionStore.cpp
+++ b/lib/Checker/RegionStore.cpp
@@ -975,8 +975,10 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR))
MR = GetElementZeroRegion(MR, T);
- if (isa<CodeTextRegion>(MR))
+ if (isa<CodeTextRegion>(MR)) {
+ assert(0 && "Why load from a code text region?");
return UnknownVal();
+ }
// FIXME: Perhaps this method should just take a 'const MemRegion*' argument
// instead of 'Loc', and have the other Loc cases handled at a higher level.
@@ -1068,7 +1070,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
}
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolVal(R, RTy);
+ return ValMgr.getRegionValueSymbolVal(R);
}
std::pair<Store, const MemRegion *>
@@ -1229,7 +1231,7 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
}
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolVal(R, Ty);
+ return ValMgr.getRegionValueSymbolVal(R);
}
SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
@@ -1269,11 +1271,11 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
if (isa<UnknownSpaceRegion>(MS) ||
isa<StackArgumentsSpaceRegion>(MS))
- return ValMgr.getRegionValueSymbolVal(R, T);
+ return ValMgr.getRegionValueSymbolVal(R);
if (isa<GlobalsSpaceRegion>(MS)) {
if (VD->isFileVarDecl())
- return ValMgr.getRegionValueSymbolVal(R, T);
+ return ValMgr.getRegionValueSymbolVal(R);
if (T->isIntegerType())
return ValMgr.makeIntVal(0, T);
@@ -1291,7 +1293,7 @@ SVal RegionStoreManager::RetrieveLazySymbol(const TypedRegion *R) {
QualType valTy = R->getValueType(getContext());
// All other values are symbolic.
- return ValMgr.getRegionValueSymbolVal(R, valTy);
+ return ValMgr.getRegionValueSymbolVal(R);
}
SVal RegionStoreManager::RetrieveStruct(Store store, const TypedRegion* R) {
diff --git a/lib/Checker/SymbolManager.cpp b/lib/Checker/SymbolManager.cpp
index 40bdcf65bca4..f2d630cdf64b 100644
--- a/lib/Checker/SymbolManager.cpp
+++ b/lib/Checker/SymbolManager.cpp
@@ -14,6 +14,7 @@
#include "clang/Checker/PathSensitive/SymbolManager.h"
#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/Analysis/AnalysisContext.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -78,14 +79,14 @@ void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const {
}
const SymbolRegionValue*
-SymbolManager::getRegionValueSymbol(const MemRegion* R, QualType T) {
+SymbolManager::getRegionValueSymbol(const TypedRegion* R) {
llvm::FoldingSetNodeID profile;
- SymbolRegionValue::Profile(profile, R, T);
+ SymbolRegionValue::Profile(profile, R);
void* InsertPos;
SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
if (!SD) {
SD = (SymExpr*) BPAlloc.Allocate<SymbolRegionValue>();
- new (SD) SymbolRegionValue(SymbolCounter, R, T);
+ new (SD) SymbolRegionValue(SymbolCounter, R);
DataSet.InsertNode(SD, InsertPos);
++SymbolCounter;
}
@@ -175,13 +176,7 @@ QualType SymbolDerived::getType(ASTContext& Ctx) const {
}
QualType SymbolRegionValue::getType(ASTContext& C) const {
- if (!T.isNull())
- return T;
-
- if (const TypedRegion* TR = dyn_cast<TypedRegion>(R))
- return TR->getValueType(C);
-
- return QualType();
+ return R->getValueType(C);
}
SymbolManager::~SymbolManager() {}
@@ -222,7 +217,11 @@ bool SymbolReaper::isLive(SymbolRef sym) {
bool SymbolReaper::isLive(const Stmt *Loc, const VarRegion *VR) const {
const StackFrameContext *SFC = VR->getStackFrame();
- return SFC == CurrentStackFrame ? Liveness.isLive(Loc, VR->getDecl()) : true;
+
+ if (SFC == CurrentStackFrame)
+ return Liveness.isLive(Loc, VR->getDecl());
+ else
+ return SFC->isParentOf(CurrentStackFrame);
}
SymbolVisitor::~SymbolVisitor() {}
diff --git a/lib/Checker/UnixAPIChecker.cpp b/lib/Checker/UnixAPIChecker.cpp
new file mode 100644
index 000000000000..7ff817ae7677
--- /dev/null
+++ b/lib/Checker/UnixAPIChecker.cpp
@@ -0,0 +1,154 @@
+//= UnixAPIChecker.h - Checks preconditions for various Unix APIs --*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines UnixAPIChecker, which is an assortment of checks on calls
+// to various, widely used UNIX/Posix functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "GRExprEngineInternalChecks.h"
+#include <fcntl.h>
+
+using namespace clang;
+
+namespace {
+class UnixAPIChecker : public CheckerVisitor<UnixAPIChecker> {
+ enum SubChecks {
+ OpenFn = 0,
+ NumChecks
+ };
+
+ BugType *BTypes[NumChecks];
+
+public:
+ UnixAPIChecker() { memset(BTypes, 0, sizeof(*BTypes) * NumChecks); }
+ static void *getTag() { static unsigned tag = 0; return &tag; }
+
+ void PreVisitCallExpr(CheckerContext &C, const CallExpr *CE);
+};
+} //end anonymous namespace
+
+void clang::RegisterUnixAPIChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new UnixAPIChecker());
+}
+
+//===----------------------------------------------------------------------===//
+// Utility functions.
+//===----------------------------------------------------------------------===//
+
+static inline void LazyInitialize(BugType *&BT, const char *name) {
+ if (BT)
+ return;
+ BT = new BugType(name, "Unix API");
+}
+
+//===----------------------------------------------------------------------===//
+// "open" (man 2 open)
+//===----------------------------------------------------------------------===//
+
+static void CheckOpen(CheckerContext &C, const CallExpr *CE, BugType *&BT) {
+ LazyInitialize(BT, "Improper use of 'open'");
+
+ // Look at the 'oflags' argument for the O_CREAT flag.
+ const GRState *state = C.getState();
+
+ if (CE->getNumArgs() < 2) {
+ // The frontend should issue a warning for this case, so this is a sanity
+ // check.
+ return;
+ }
+
+ // Now check if oflags has O_CREAT set.
+ const Expr *oflagsEx = CE->getArg(1);
+ const SVal V = state->getSVal(oflagsEx);
+ if (!isa<NonLoc>(V)) {
+ // The case where 'V' can be a location can only be due to a bad header,
+ // so in this case bail out.
+ return;
+ }
+ NonLoc oflags = cast<NonLoc>(V);
+ NonLoc ocreateFlag =
+ cast<NonLoc>(C.getValueManager().makeIntVal((uint64_t) O_CREAT,
+ oflagsEx->getType()));
+ SVal maskedFlagsUC = C.getSValuator().EvalBinOpNN(state, BinaryOperator::And,
+ oflags, ocreateFlag,
+ oflagsEx->getType());
+ if (maskedFlagsUC.isUnknownOrUndef())
+ return;
+ DefinedSVal maskedFlags = cast<DefinedSVal>(maskedFlagsUC);
+
+ // Check if maskedFlags is non-zero.
+ const GRState *trueState, *falseState;
+ llvm::tie(trueState, falseState) = state->Assume(maskedFlags);
+
+ // Only emit an error if the value of 'maskedFlags' is properly
+ // constrained;
+ if (!(trueState && !falseState))
+ return;
+
+ if (CE->getNumArgs() < 3) {
+ ExplodedNode *N = C.GenerateSink(trueState);
+ if (!N)
+ return;
+
+ EnhancedBugReport *report =
+ new EnhancedBugReport(*BT,
+ "Call to 'open' requires a third argument when "
+ "the 'O_CREAT' flag is set", N);
+ report->addRange(oflagsEx->getSourceRange());
+ C.EmitReport(report);
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// Central dispatch function.
+//===----------------------------------------------------------------------===//
+
+typedef void (*SubChecker)(CheckerContext &C, const CallExpr *CE, BugType *&BT);
+namespace {
+ class SubCheck {
+ SubChecker SC;
+ BugType **BT;
+ public:
+ SubCheck(SubChecker sc, BugType *& bt) : SC(sc), BT(&bt) {}
+ SubCheck() : SC(NULL), BT(NULL) {}
+
+ void run(CheckerContext &C, const CallExpr *CE) const {
+ if (SC)
+ SC(C, CE, *BT);
+ }
+ };
+} // end anonymous namespace
+
+void UnixAPIChecker::PreVisitCallExpr(CheckerContext &C, const CallExpr *CE) {
+ // Get the callee. All the functions we care about are C functions
+ // with simple identifiers.
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionTextRegion *Fn =
+ dyn_cast_or_null<FunctionTextRegion>(state->getSVal(Callee).getAsRegion());
+
+ if (!Fn)
+ return;
+
+ const IdentifierInfo *FI = Fn->getDecl()->getIdentifier();
+ if (!FI)
+ return;
+
+ const SubCheck &SC =
+ llvm::StringSwitch<SubCheck>(FI->getName())
+ .Case("open", SubCheck(CheckOpen, BTypes[OpenFn]))
+ .Default(SubCheck());
+
+ SC.run(C, CE);
+}
diff --git a/lib/Checker/ValueManager.cpp b/lib/Checker/ValueManager.cpp
index 5359489a2299..aa0c3c877dde 100644
--- a/lib/Checker/ValueManager.cpp
+++ b/lib/Checker/ValueManager.cpp
@@ -70,18 +70,14 @@ SVal ValueManager::convertToArrayIndex(SVal V) {
return SVator->EvalCastNL(cast<NonLoc>(V), ArrayIndexTy);
}
-DefinedOrUnknownSVal ValueManager::getRegionValueSymbolVal(const MemRegion* R,
- QualType T) {
-
- if (T.isNull()) {
- const TypedRegion* TR = cast<TypedRegion>(R);
- T = TR->getValueType(SymMgr.getContext());
- }
+DefinedOrUnknownSVal
+ValueManager::getRegionValueSymbolVal(const TypedRegion* R) {
+ QualType T = R->getValueType(SymMgr.getContext());
if (!SymbolManager::canSymbolicate(T))
return UnknownVal();
- SymbolRef sym = SymMgr.getRegionValueSymbol(R, T);
+ SymbolRef sym = SymMgr.getRegionValueSymbol(R);
if (Loc::IsLocType(T))
return loc::MemRegionVal(MemMgr.getSymbolicRegion(sym));
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index 46b62441d6e4..7076067e4381 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -24,7 +24,7 @@ using namespace clang;
using namespace CodeGen;
llvm::Constant *CodeGenFunction::
-BuildDescriptorBlockDecl(bool BlockHasCopyDispose, CharUnits Size,
+BuildDescriptorBlockDecl(const BlockExpr *BE, bool BlockHasCopyDispose, CharUnits Size,
const llvm::StructType* Ty,
std::vector<HelperInfo> *NoteForHelper) {
const llvm::Type *UnsignedLongTy
@@ -43,6 +43,7 @@ BuildDescriptorBlockDecl(bool BlockHasCopyDispose, CharUnits Size,
C = llvm::ConstantInt::get(UnsignedLongTy, Size.getQuantity());
Elts.push_back(C);
+ // optional copy/dispose helpers
if (BlockHasCopyDispose) {
// copy_func_helper_decl
Elts.push_back(BuildCopyHelper(Ty, NoteForHelper));
@@ -51,6 +52,17 @@ BuildDescriptorBlockDecl(bool BlockHasCopyDispose, CharUnits Size,
Elts.push_back(BuildDestroyHelper(Ty, NoteForHelper));
}
+ // Signature. non-optional ObjC-style method descriptor @encode sequence
+ std::string BlockTypeEncoding;
+ CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+
+ Elts.push_back(llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty));
+
+ // Layout.
+ C = llvm::ConstantInt::get(UnsignedLongTy, 0);
+ Elts.push_back(C);
+
C = llvm::ConstantStruct::get(VMContext, Elts, false);
C = new llvm::GlobalVariable(CGM.getModule(), C->getType(), true,
@@ -110,19 +122,6 @@ static bool CanBlockBeGlobal(const CodeGenFunction::BlockInfo &Info) {
/// invoke function.
static void AllocateAllBlockDeclRefs(const CodeGenFunction::BlockInfo &Info,
CodeGenFunction *CGF) {
- // Always allocate self, as it is often handy in the debugger, even if there
- // is no codegen in the block that uses it. This is also useful to always do
- // this as if we didn't, we'd have to figure out all code that uses a self
- // pointer, including implicit uses.
- if (const ObjCMethodDecl *OMD
- = dyn_cast_or_null<ObjCMethodDecl>(CGF->CurFuncDecl)) {
- ImplicitParamDecl *SelfDecl = OMD->getSelfDecl();
- BlockDeclRefExpr *BDRE = new (CGF->getContext())
- BlockDeclRefExpr(SelfDecl,
- SelfDecl->getType(), SourceLocation(), false);
- CGF->AllocateBlockDecl(BDRE);
- }
-
// FIXME: Also always forward the this pointer in C++ as well.
for (size_t i = 0; i < Info.DeclRefs.size(); ++i)
@@ -148,30 +147,14 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
size_t BlockFields = 5;
- bool hasIntrospection = CGM.getContext().getLangOptions().BlockIntrospection;
-
- if (hasIntrospection) {
- BlockFields++;
- }
std::vector<llvm::Constant*> Elts(BlockFields);
- if (hasIntrospection) {
- std::string BlockTypeEncoding;
- CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
-
- Elts[5] = llvm::ConstantExpr::getBitCast(
- CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
- }
-
llvm::Constant *C;
llvm::Value *V;
{
// C = BuildBlockStructInitlist();
- unsigned int flags = BLOCK_HAS_DESCRIPTOR;
-
- if (hasIntrospection)
- flags |= BLOCK_HAS_OBJC_TYPE;
+ unsigned int flags = BLOCK_HAS_OBJC_TYPE;
// We run this first so that we set BlockHasCopyDispose from the entire
// block literal.
@@ -212,7 +195,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
if (subBlockDeclRefDecls.size() == 0) {
// __descriptor
- Elts[4] = BuildDescriptorBlockDecl(subBlockHasCopyDispose, subBlockSize,
+ Elts[4] = BuildDescriptorBlockDecl(BE, subBlockHasCopyDispose, subBlockSize,
0, 0);
// Optimize to being a global block.
@@ -234,8 +217,6 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
for (int i=0; i<4; ++i)
Types[i] = Elts[i]->getType();
Types[4] = PtrToInt8Ty;
- if (hasIntrospection)
- Types[5] = PtrToInt8Ty;
for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i) {
const Expr *E = subBlockDeclRefDecls[i];
@@ -258,8 +239,6 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
for (unsigned i=0; i<4; ++i)
Builder.CreateStore(Elts[i], Builder.CreateStructGEP(V, i, "block.tmp"));
- if (hasIntrospection)
- Builder.CreateStore(Elts[5], Builder.CreateStructGEP(V, 5, "block.tmp"));
for (unsigned i=0; i < subBlockDeclRefDecls.size(); ++i)
{
@@ -348,7 +327,8 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
NoteForHelper.resize(helpersize);
// __descriptor
- llvm::Value *Descriptor = BuildDescriptorBlockDecl(subBlockHasCopyDispose,
+ llvm::Value *Descriptor = BuildDescriptorBlockDecl(BE,
+ subBlockHasCopyDispose,
subBlockSize, Ty,
&NoteForHelper);
Descriptor = Builder.CreateBitCast(Descriptor, PtrToInt8Ty);
@@ -384,6 +364,16 @@ const llvm::Type *BlockModule::getBlockDescriptorType() {
// struct __block_descriptor {
// unsigned long reserved;
// unsigned long block_size;
+ //
+ // // later, the following will be added
+ //
+ // struct {
+ // void (*copyHelper)();
+ // void (*copyHelper)();
+ // } helpers; // !!! optional
+ //
+ // const char *signature; // the block signature
+ // const char *layout; // reserved
// };
BlockDescriptorType = llvm::StructType::get(UnsignedLongTy->getContext(),
UnsignedLongTy,
@@ -412,20 +402,8 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
// int __reserved;
// void (*__invoke)(void *);
// struct __block_descriptor *__descriptor;
- // // GNU runtime only:
- // const char *types;
// };
- if (CGM.getContext().getLangOptions().BlockIntrospection)
- GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
- PtrToInt8Ty,
- IntTy,
- IntTy,
- PtrToInt8Ty,
- BlockDescPtrTy,
- PtrToInt8Ty,
- NULL);
- else
- GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
+ GenericBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
PtrToInt8Ty,
IntTy,
IntTy,
@@ -439,40 +417,6 @@ const llvm::Type *BlockModule::getGenericBlockLiteralType() {
return GenericBlockLiteralType;
}
-const llvm::Type *BlockModule::getGenericExtendedBlockLiteralType() {
- if (GenericExtendedBlockLiteralType)
- return GenericExtendedBlockLiteralType;
-
- const llvm::Type *BlockDescPtrTy =
- llvm::PointerType::getUnqual(getBlockDescriptorType());
-
- const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
- getTypes().ConvertType(getContext().IntTy));
-
- // struct __block_literal_generic {
- // void *__isa;
- // int __flags;
- // int __reserved;
- // void (*__invoke)(void *);
- // struct __block_descriptor *__descriptor;
- // void *__copy_func_helper_decl;
- // void *__destroy_func_decl;
- // };
- GenericExtendedBlockLiteralType = llvm::StructType::get(IntTy->getContext(),
- PtrToInt8Ty,
- IntTy,
- IntTy,
- PtrToInt8Ty,
- BlockDescPtrTy,
- PtrToInt8Ty,
- PtrToInt8Ty,
- NULL);
-
- getModule().addTypeName("struct.__block_literal_extended_generic",
- GenericExtendedBlockLiteralType);
-
- return GenericExtendedBlockLiteralType;
-}
RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
ReturnValueSlot ReturnValue) {
@@ -603,7 +547,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(
getTypes().ConvertType(getContext().IntTy));
- llvm::Constant *DescriptorFields[2];
+ llvm::Constant *DescriptorFields[4];
// Reserved
DescriptorFields[0] = llvm::Constant::getNullValue(UnsignedLongTy);
@@ -614,9 +558,21 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
CGM.GetTargetTypeStoreSize(getGenericBlockLiteralType());
DescriptorFields[1] =
llvm::ConstantInt::get(UnsignedLongTy,BlockLiteralSize.getQuantity());
+
+ // signature. non-optional ObjC-style method descriptor @encode sequence
+ std::string BlockTypeEncoding;
+ CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
+ DescriptorFields[2] = llvm::ConstantExpr::getBitCast(
+ CGM.GetAddrOfConstantCString(BlockTypeEncoding), PtrToInt8Ty);
+
+ // layout
+ DescriptorFields[3] =
+ llvm::ConstantInt::get(UnsignedLongTy,0);
+
+ // build the structure from the 4 elements
llvm::Constant *DescriptorStruct =
- llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 2, false);
+ llvm::ConstantStruct::get(VMContext, &DescriptorFields[0], 4, false);
llvm::GlobalVariable *Descriptor =
new llvm::GlobalVariable(getModule(), DescriptorStruct->getType(), true,
@@ -625,8 +581,6 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
int FieldCount = 5;
// Generate the constants for the block literal.
- if (CGM.getContext().getLangOptions().BlockIntrospection)
- FieldCount = 6;
std::vector<llvm::Constant*> LiteralFields(FieldCount);
@@ -649,10 +603,8 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
LiteralFields[0] = getNSConcreteGlobalBlock();
// Flags
- LiteralFields[1] = CGM.getContext().getLangOptions().BlockIntrospection ?
- llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR |
- BLOCK_HAS_OBJC_TYPE) :
- llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_DESCRIPTOR);
+ LiteralFields[1] =
+ llvm::ConstantInt::get(IntTy, BLOCK_IS_GLOBAL | BLOCK_HAS_OBJC_TYPE);
// Reserved
LiteralFields[2] = llvm::Constant::getNullValue(IntTy);
@@ -663,14 +615,6 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
// Descriptor
LiteralFields[4] = Descriptor;
- // Type encoding
- if (CGM.getContext().getLangOptions().BlockIntrospection) {
- std::string BlockTypeEncoding;
- CGM.getContext().getObjCEncodingForBlock(BE, BlockTypeEncoding);
-
- LiteralFields[5] = CGM.GetAddrOfConstantCString(BlockTypeEncoding);
- }
-
llvm::Constant *BlockLiteralStruct =
llvm::ConstantStruct::get(VMContext, LiteralFields, false);
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index a9f5ae05c109..39f26f8b1363 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -51,12 +51,9 @@ class CodeGenModule;
class BlockBase {
public:
enum {
- BLOCK_NEEDS_FREE = (1 << 24),
BLOCK_HAS_COPY_DISPOSE = (1 << 25),
BLOCK_HAS_CXX_OBJ = (1 << 26),
- BLOCK_IS_GC = (1 << 27),
BLOCK_IS_GLOBAL = (1 << 28),
- BLOCK_HAS_DESCRIPTOR = (1 << 29),
BLOCK_HAS_OBJC_TYPE = (1 << 30)
};
};
@@ -80,7 +77,6 @@ public:
const llvm::Type *getBlockDescriptorType();
const llvm::Type *getGenericBlockLiteralType();
- const llvm::Type *getGenericExtendedBlockLiteralType();
llvm::Constant *GetAddrOfGlobalBlock(const BlockExpr *BE, const char *);
@@ -94,7 +90,7 @@ public:
const llvm::Type *BlockDescriptorType;
const llvm::Type *GenericBlockLiteralType;
- const llvm::Type *GenericExtendedBlockLiteralType;
+
struct {
int GlobalUniqueCount;
} Block;
@@ -111,7 +107,7 @@ public:
: Context(C), TheModule(M), TheTargetData(TD), Types(T),
CGM(CodeGen), VMContext(M.getContext()),
NSConcreteGlobalBlock(0), NSConcreteStackBlock(0), BlockDescriptorType(0),
- GenericBlockLiteralType(0), GenericExtendedBlockLiteralType(0),
+ GenericBlockLiteralType(0),
BlockObjectAssign(0), BlockObjectDispose(0) {
Block.GlobalUniqueCount = 0;
PtrToInt8Ty = llvm::Type::getInt8PtrTy(M.getContext());
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index beaf7b89c003..0f5e90fb15aa 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "TargetInfo.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "clang/Basic/TargetInfo.h"
@@ -19,6 +20,7 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/TargetBuiltins.h"
#include "llvm/Intrinsics.h"
+#include "llvm/Target/TargetData.h"
using namespace clang;
using namespace CodeGen;
using namespace llvm;
@@ -57,6 +59,10 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Operand));
}
+static llvm::ConstantInt *getInt32(llvm::LLVMContext &Context, int32_t Value) {
+ return llvm::ConstantInt::get(llvm::Type::getInt32Ty(Context), Value);
+}
+
RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned BuiltinID, const CallExpr *E) {
// See if we can constant fold this builtin. If so, don't emit it at all.
@@ -341,6 +347,20 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
llvm::ConstantInt::get(llvm::Type::getInt32Ty(VMContext), 1));
return RValue::get(Address);
}
+ case Builtin::BI__builtin_dwarf_cfa: {
+ // The offset in bytes from the first argument to the CFA.
+ //
+ // Why on earth is this in the frontend? Is there any reason at
+ // all that the backend can't reasonably determine this while
+ // lowering llvm.eh.dwarf.cfa()?
+ //
+ // TODO: If there's a satisfactory reason, add a target hook for
+ // this instead of hard-coding 0, which is correct for most targets.
+ int32_t Offset = 0;
+
+ Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa, 0, 0);
+ return RValue::get(Builder.CreateCall(F, getInt32(VMContext, Offset)));
+ }
case Builtin::BI__builtin_return_address: {
Value *Depth = EmitScalarExpr(E->getArg(0));
Depth = Builder.CreateIntCast(Depth,
@@ -358,13 +378,64 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
return RValue::get(Builder.CreateCall(F, Depth));
}
case Builtin::BI__builtin_extract_return_addr: {
- // FIXME: There should be a target hook for this
- return RValue::get(EmitScalarExpr(E->getArg(0)));
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_frob_return_addr: {
+ Value *Address = EmitScalarExpr(E->getArg(0));
+ Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
+ return RValue::get(Result);
+ }
+ case Builtin::BI__builtin_eh_return: {
+ Value *Int = EmitScalarExpr(E->getArg(0));
+ Value *Ptr = EmitScalarExpr(E->getArg(1));
+
+ const llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
+ assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
+ "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
+ Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
+ ? Intrinsic::eh_return_i32
+ : Intrinsic::eh_return_i64,
+ 0, 0);
+ Builder.CreateCall2(F, Int, Ptr);
+ Value *V = Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return RValue::get(V);
}
case Builtin::BI__builtin_unwind_init: {
Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init, 0, 0);
return RValue::get(Builder.CreateCall(F));
}
+ case Builtin::BI__builtin_extend_pointer: {
+ // Extends a pointer to the size of an _Unwind_Word, which is
+ // uint64_t on all platforms. Generally this gets poked into a
+ // register and eventually used as an address, so if the
+ // addressing registers are wider than pointers and the platform
+ // doesn't implicitly ignore high-order bits when doing
+ // addressing, we need to make sure we zext / sext based on
+ // the platform's expectations.
+ //
+ // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
+
+ LLVMContext &C = CGM.getLLVMContext();
+
+ // Cast the pointer to intptr_t.
+ Value *Ptr = EmitScalarExpr(E->getArg(0));
+ const llvm::IntegerType *IntPtrTy = CGM.getTargetData().getIntPtrType(C);
+ Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
+
+ // If that's 64 bits, we're done.
+ if (IntPtrTy->getBitWidth() == 64)
+ return RValue::get(Result);
+
+ // Otherwise, ask the codegen data what to do.
+ const llvm::IntegerType *Int64Ty = llvm::IntegerType::get(C, 64);
+ if (getTargetHooks().extendPointerWithSExt())
+ return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
+ else
+ return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
+ }
#if 0
// FIXME: Finish/enable when LLVM backend support stabilizes
case Builtin::BI__builtin_setjmp: {
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 28c4c6b4b57b..4889fc08f488 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -22,33 +22,182 @@
#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/StmtCXX.h"
+#include "clang/CodeGen/CodeGenOptions.h"
#include "llvm/ADT/StringExtras.h"
using namespace clang;
using namespace CodeGen;
+/// Determines whether the given function has a trivial body that does
+/// not require any specific codegen.
+static bool HasTrivialBody(const FunctionDecl *FD) {
+ Stmt *S = FD->getBody();
+ if (!S)
+ return true;
+ if (isa<CompoundStmt>(S) && cast<CompoundStmt>(S)->body_empty())
+ return true;
+ return false;
+}
+
+/// Try to emit a base destructor as an alias to its primary
+/// base-class destructor.
+bool CodeGenModule::TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // If the destructor doesn't have a trivial body, we have to emit it
+ // separately.
+ if (!HasTrivialBody(D))
+ return true;
+
+ const CXXRecordDecl *Class = D->getParent();
+
+ // If we need to manipulate a VTT parameter, give up.
+ if (Class->getNumVBases()) {
+ // Extra Credit: passing extra parameters is perfectly safe
+ // in many calling conventions, so only bail out if the ctor's
+ // calling convention is nonstandard.
+ return true;
+ }
+
+ // If any fields have a non-trivial destructor, we have to emit it
+ // separately.
+ for (CXXRecordDecl::field_iterator I = Class->field_begin(),
+ E = Class->field_end(); I != E; ++I)
+ if (const RecordType *RT = (*I)->getType()->getAs<RecordType>())
+ if (!cast<CXXRecordDecl>(RT->getDecl())->hasTrivialDestructor())
+ return true;
+
+ // Try to find a unique base class with a non-trivial destructor.
+ const CXXRecordDecl *UniqueBase = 0;
+ for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
+ E = Class->bases_end(); I != E; ++I) {
+
+ // We're in the base destructor, so skip virtual bases.
+ if (I->isVirtual()) continue;
+
+ // Skip base classes with trivial destructors.
+ const CXXRecordDecl *Base
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (Base->hasTrivialDestructor()) continue;
+
+ // If we've already found a base class with a non-trivial
+ // destructor, give up.
+ if (UniqueBase) return true;
+ UniqueBase = Base;
+ }
+
+ // If we didn't find any bases with a non-trivial destructor, then
+ // the base destructor is actually effectively trivial, which can
+ // happen if it was needlessly user-defined or if there are virtual
+ // bases with non-trivial destructors.
+ if (!UniqueBase)
+ return true;
+
+ /// If we don't have a definition for the destructor yet, don't
+ /// emit. We can't emit aliases to declarations; that's just not
+ /// how aliases work.
+ const CXXDestructorDecl *BaseD = UniqueBase->getDestructor(getContext());
+ if (!BaseD->isImplicit() && !BaseD->getBody())
+ return true;
+
+ // If the base is at a non-zero offset, give up.
+ const ASTRecordLayout &ClassLayout = Context.getASTRecordLayout(Class);
+ if (ClassLayout.getBaseClassOffset(UniqueBase) != 0)
+ return true;
+
+ return TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Base),
+ GlobalDecl(BaseD, Dtor_Base));
+}
+/// Try to emit a definition as a global alias for another definition.
+bool CodeGenModule::TryEmitDefinitionAsAlias(GlobalDecl AliasDecl,
+ GlobalDecl TargetDecl) {
+ if (!getCodeGenOpts().CXXCtorDtorAliases)
+ return true;
+
+ // The alias will use the linkage of the referrent. If we can't
+ // support aliases with that linkage, fail.
+ llvm::GlobalValue::LinkageTypes Linkage
+ = getFunctionLinkage(cast<FunctionDecl>(AliasDecl.getDecl()));
+
+ switch (Linkage) {
+ // We can definitely emit aliases to definitions with external linkage.
+ case llvm::GlobalValue::ExternalLinkage:
+ case llvm::GlobalValue::ExternalWeakLinkage:
+ break;
+
+ // Same with local linkage.
+ case llvm::GlobalValue::InternalLinkage:
+ case llvm::GlobalValue::PrivateLinkage:
+ case llvm::GlobalValue::LinkerPrivateLinkage:
+ break;
+
+ // We should try to support linkonce linkages.
+ case llvm::GlobalValue::LinkOnceAnyLinkage:
+ case llvm::GlobalValue::LinkOnceODRLinkage:
+ return true;
+
+ // Other linkages will probably never be supported.
+ default:
+ return true;
+ }
+
+ // Derive the type for the alias.
+ const llvm::PointerType *AliasType
+ = getTypes().GetFunctionType(AliasDecl)->getPointerTo();
+
+ // Find the referrent. Some aliases might require a bitcast, in
+ // which case the caller is responsible for ensuring the soundness
+ // of these semantics.
+ llvm::GlobalValue *Ref = cast<llvm::GlobalValue>(GetAddrOfGlobal(TargetDecl));
+ llvm::Constant *Aliasee = Ref;
+ if (Ref->getType() != AliasType)
+ Aliasee = llvm::ConstantExpr::getBitCast(Ref, AliasType);
+
+ // Create the alias with no name.
+ llvm::GlobalAlias *Alias =
+ new llvm::GlobalAlias(AliasType, Linkage, "", Aliasee, &getModule());
+
+ // Switch any previous uses to the alias.
+ const char *MangledName = getMangledName(AliasDecl);
+ llvm::GlobalValue *&Entry = GlobalDeclMap[MangledName];
+ if (Entry) {
+ assert(Entry->isDeclaration() && "definition already exists for alias");
+ assert(Entry->getType() == AliasType &&
+ "declaration exists with different type");
+ Entry->replaceAllUsesWith(Alias);
+ Entry->eraseFromParent();
+ }
+ Entry = Alias;
-llvm::Value *CodeGenFunction::LoadCXXThis() {
- assert(isa<CXXMethodDecl>(CurFuncDecl) &&
- "Must be in a C++ member function decl to load 'this'");
- assert(cast<CXXMethodDecl>(CurFuncDecl)->isInstance() &&
- "Must be in a C++ member function decl to load 'this'");
+ // Finally, set up the alias with its proper name and attributes.
+ Alias->setName(MangledName);
+ SetCommonAttributes(AliasDecl.getDecl(), Alias);
- // FIXME: What if we're inside a block?
- // ans: See how CodeGenFunction::LoadObjCSelf() uses
- // CodeGenFunction::BlockForwardSelf() for how to do this.
- return Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
+ return false;
}
void CodeGenModule::EmitCXXConstructors(const CXXConstructorDecl *D) {
+ // The constructor used for constructing this as a complete class;
+ // constucts the virtual bases, then calls the base constructor.
EmitGlobal(GlobalDecl(D, Ctor_Complete));
+
+ // The constructor used for constructing this as a base class;
+ // ignores virtual bases.
EmitGlobal(GlobalDecl(D, Ctor_Base));
}
void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
+ // The complete constructor is equivalent to the base constructor
+ // for classes with no virtual bases. Try to emit it as an alias.
+ if (Type == Ctor_Complete &&
+ !D->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(D, Ctor_Complete),
+ GlobalDecl(D, Ctor_Base)))
+ return;
- llvm::Function *Fn = GetAddrOfCXXConstructor(D, Type);
+ llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXConstructor(D, Type));
CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
@@ -56,15 +205,17 @@ void CodeGenModule::EmitCXXConstructor(const CXXConstructorDecl *D,
SetLLVMFunctionAttributesForDefinition(D, Fn);
}
-llvm::Function *
+llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
CXXCtorType Type) {
+ const char *Name = getMangledCXXCtorName(D, Type);
+ if (llvm::GlobalValue *V = GlobalDeclMap[Name])
+ return V;
+
const FunctionProtoType *FPT = D->getType()->getAs<FunctionProtoType>();
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type),
FPT->isVariadic());
-
- const char *Name = getMangledCXXCtorName(D, Type);
return cast<llvm::Function>(
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
@@ -79,15 +230,39 @@ const char *CodeGenModule::getMangledCXXCtorName(const CXXConstructorDecl *D,
}
void CodeGenModule::EmitCXXDestructors(const CXXDestructorDecl *D) {
+ // The destructor in a virtual table is always a 'deleting'
+ // destructor, which calls the complete destructor and then uses the
+ // appropriate operator delete.
if (D->isVirtual())
EmitGlobal(GlobalDecl(D, Dtor_Deleting));
+
+ // The destructor used for destructing this as a most-derived class;
+ // call the base destructor and then destructs any virtual bases.
EmitGlobal(GlobalDecl(D, Dtor_Complete));
+
+ // The destructor used for destructing this as a base class; ignores
+ // virtual bases.
EmitGlobal(GlobalDecl(D, Dtor_Base));
}
void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
- llvm::Function *Fn = GetAddrOfCXXDestructor(D, Type);
+ // The complete destructor is equivalent to the base destructor for
+ // classes with no virtual bases, so try to emit it as an alias.
+ if (Type == Dtor_Complete &&
+ !D->getParent()->getNumVBases() &&
+ !TryEmitDefinitionAsAlias(GlobalDecl(D, Dtor_Complete),
+ GlobalDecl(D, Dtor_Base)))
+ return;
+
+ // The base destructor is equivalent to the base destructor of its
+ // base class if there is exactly one non-virtual base class with a
+ // non-trivial destructor, there are no fields with a non-trivial
+ // destructor, and the body of the destructor is trivial.
+ if (Type == Dtor_Base && !TryEmitBaseDestructorAsAlias(D))
+ return;
+
+ llvm::Function *Fn = cast<llvm::Function>(GetAddrOfCXXDestructor(D, Type));
CodeGenFunction(*this).GenerateCode(GlobalDecl(D, Type), Fn);
@@ -95,13 +270,16 @@ void CodeGenModule::EmitCXXDestructor(const CXXDestructorDecl *D,
SetLLVMFunctionAttributesForDefinition(D, Fn);
}
-llvm::Function *
+llvm::GlobalValue *
CodeGenModule::GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
CXXDtorType Type) {
+ const char *Name = getMangledCXXDtorName(D, Type);
+ if (llvm::GlobalValue *V = GlobalDeclMap[Name])
+ return V;
+
const llvm::FunctionType *FTy =
getTypes().GetFunctionType(getTypes().getFunctionInfo(D, Type), false);
- const char *Name = getMangledCXXDtorName(D, Type);
return cast<llvm::Function>(
GetOrCreateLLVMFunction(Name, FTy, GlobalDecl(D, Type)));
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index b064c125ad00..072b1f6585fd 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -41,21 +41,54 @@ static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
}
}
-const
-CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionNoProtoType *FTNP) {
- return getFunctionInfo(FTNP->getResultType(),
- llvm::SmallVector<QualType, 16>(),
- FTNP->getCallConv(), FTNP->getNoReturnAttr());
+/// Derives the 'this' type for codegen purposes, i.e. ignoring method
+/// qualification.
+/// FIXME: address space qualification?
+static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
+ QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
+ return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
}
-const
-CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionProtoType *FTP) {
- llvm::SmallVector<QualType, 16> ArgTys;
+/// Returns the canonical formal type of the given C++ method.
+static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
+ return MD->getType()->getCanonicalTypeUnqualified()
+ .getAs<FunctionProtoType>();
+}
+
+/// Returns the "extra-canonicalized" return type, which discards
+/// qualifiers on the return type. Codegen doesn't care about them,
+/// and it makes ABI code a little easier to be able to assume that
+/// all parameter and return types are top-level unqualified.
+static CanQualType GetReturnType(QualType RetTy) {
+ return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
+}
+
+const CGFunctionInfo &
+CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
+ return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
+ llvm::SmallVector<CanQualType, 16>(),
+ FTNP->getCallConv(),
+ FTNP->getNoReturnAttr());
+}
+
+/// \param Args - contains any initial parameters besides those
+/// in the formal type
+static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
+ llvm::SmallVectorImpl<CanQualType> &ArgTys,
+ CanQual<FunctionProtoType> FTP) {
// FIXME: Kill copy.
for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
ArgTys.push_back(FTP->getArgType(i));
- return getFunctionInfo(FTP->getResultType(), ArgTys,
- FTP->getCallConv(), FTP->getNoReturnAttr());
+ CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
+ return CGT.getFunctionInfo(ResTy, ArgTys,
+ FTP->getCallConv(),
+ FTP->getNoReturnAttr());
+}
+
+const CGFunctionInfo &
+CodeGenTypes::getFunctionInfo(CanQual<FunctionProtoType> FTP) {
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ return ::getFunctionInfo(*this, ArgTys, FTP);
}
static CallingConv getCallingConventionForDecl(const Decl *D) {
@@ -71,67 +104,51 @@ static CallingConv getCallingConventionForDecl(const Decl *D) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXRecordDecl *RD,
const FunctionProtoType *FTP) {
- llvm::SmallVector<QualType, 16> ArgTys;
-
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
// Add the 'this' pointer.
- ArgTys.push_back(Context.getPointerType(Context.getTagDeclType(RD)));
-
- for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
-
- // FIXME: Set calling convention correctly, it needs to be associated with the
- // type somehow.
- return getFunctionInfo(FTP->getResultType(), ArgTys,
- FTP->getCallConv(), FTP->getNoReturnAttr());
+ ArgTys.push_back(GetThisType(Context, RD));
+
+ return ::getFunctionInfo(*this, ArgTys,
+ FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXMethodDecl *MD) {
- llvm::SmallVector<QualType, 16> ArgTys;
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+
// Add the 'this' pointer unless this is a static method.
if (MD->isInstance())
- ArgTys.push_back(MD->getThisType(Context));
+ ArgTys.push_back(GetThisType(Context, MD->getParent()));
- const FunctionProtoType *FTP = MD->getType()->getAs<FunctionProtoType>();
- for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
- return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
- FTP->getNoReturnAttr());
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(MD));
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXConstructorDecl *D,
CXXCtorType Type) {
- llvm::SmallVector<QualType, 16> ArgTys;
+ llvm::SmallVector<CanQualType, 16> ArgTys;
// Add the 'this' pointer.
- ArgTys.push_back(D->getThisType(Context));
+ ArgTys.push_back(GetThisType(Context, D->getParent()));
// Check if we need to add a VTT parameter (which has type void **).
if (Type == Ctor_Base && D->getParent()->getNumVBases() != 0)
ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
-
- const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
- for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
- return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
- FTP->getNoReturnAttr());
+
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const CXXDestructorDecl *D,
CXXDtorType Type) {
- llvm::SmallVector<QualType, 16> ArgTys;
+ llvm::SmallVector<CanQualType, 16> ArgTys;
// Add the 'this' pointer.
- ArgTys.push_back(D->getThisType(Context));
+ ArgTys.push_back(GetThisType(Context, D->getParent()));
// Check if we need to add a VTT parameter (which has type void **).
if (Type == Dtor_Base && D->getParent()->getNumVBases() != 0)
ArgTys.push_back(Context.getPointerType(Context.VoidPtrTy));
-
- const FunctionProtoType *FTP = D->getType()->getAs<FunctionProtoType>();
- for (unsigned i = 0, e = FTP->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FTP->getArgType(i));
- return getFunctionInfo(FTP->getResultType(), ArgTys, FTP->getCallConv(),
- FTP->getNoReturnAttr());
+
+ return ::getFunctionInfo(*this, ArgTys, GetFormalType(D));
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
@@ -139,30 +156,25 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const FunctionDecl *FD) {
if (MD->isInstance())
return getFunctionInfo(MD);
- const FunctionType *FTy = FD->getType()->getAs<FunctionType>();
- if (const FunctionNoProtoType *FNTP = dyn_cast<FunctionNoProtoType>(FTy))
- return getFunctionInfo(FNTP->getResultType(),
- llvm::SmallVector<QualType, 16>(),
- FNTP->getCallConv(), FNTP->getNoReturnAttr());
-
- const FunctionProtoType *FPT = cast<FunctionProtoType>(FTy);
- llvm::SmallVector<QualType, 16> ArgTys;
- // FIXME: Kill copy.
- for (unsigned i = 0, e = FPT->getNumArgs(); i != e; ++i)
- ArgTys.push_back(FPT->getArgType(i));
- return getFunctionInfo(FPT->getResultType(), ArgTys,
- FPT->getCallConv(), FPT->getNoReturnAttr());
+ CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
+ assert(isa<FunctionType>(FTy));
+ if (isa<FunctionNoProtoType>(FTy))
+ return getFunctionInfo(FTy.getAs<FunctionNoProtoType>());
+ assert(isa<FunctionProtoType>(FTy));
+ return getFunctionInfo(FTy.getAs<FunctionProtoType>());
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
- llvm::SmallVector<QualType, 16> ArgTys;
- ArgTys.push_back(MD->getSelfDecl()->getType());
- ArgTys.push_back(Context.getObjCSelType());
+ llvm::SmallVector<CanQualType, 16> ArgTys;
+ ArgTys.push_back(Context.getCanonicalParamType(MD->getSelfDecl()->getType()));
+ ArgTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
// FIXME: Kill copy?
for (ObjCMethodDecl::param_iterator i = MD->param_begin(),
- e = MD->param_end(); i != e; ++i)
- ArgTys.push_back((*i)->getType());
- return getFunctionInfo(MD->getResultType(), ArgTys,
+ e = MD->param_end(); i != e; ++i) {
+ ArgTys.push_back(Context.getCanonicalParamType((*i)->getType()));
+ }
+ return getFunctionInfo(GetReturnType(MD->getResultType()),
+ ArgTys,
getCallingConventionForDecl(MD),
/*NoReturn*/ false);
}
@@ -185,11 +197,11 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
CallingConv CC,
bool NoReturn) {
// FIXME: Kill copy.
- llvm::SmallVector<QualType, 16> ArgTys;
+ llvm::SmallVector<CanQualType, 16> ArgTys;
for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
- ArgTys.push_back(i->second);
- return getFunctionInfo(ResTy, ArgTys, CC, NoReturn);
+ ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, CC, NoReturn);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
@@ -197,17 +209,23 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
CallingConv CC,
bool NoReturn) {
// FIXME: Kill copy.
- llvm::SmallVector<QualType, 16> ArgTys;
+ llvm::SmallVector<CanQualType, 16> ArgTys;
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
- ArgTys.push_back(i->second);
- return getFunctionInfo(ResTy, ArgTys, CC, NoReturn);
+ ArgTys.push_back(Context.getCanonicalParamType(i->second));
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, CC, NoReturn);
}
-const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
- const llvm::SmallVector<QualType, 16> &ArgTys,
+const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys,
CallingConv CallConv,
bool NoReturn) {
+#ifndef NDEBUG
+ for (llvm::SmallVectorImpl<CanQualType>::const_iterator
+ I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
+ assert(I->isCanonicalAsParam());
+#endif
+
unsigned CC = ClangCallConvToLLVMCallConv(CallConv);
// Lookup or create unique function info.
@@ -232,8 +250,8 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
bool _NoReturn,
- QualType ResTy,
- const llvm::SmallVector<QualType, 16> &ArgTys)
+ CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
NoReturn(_NoReturn)
@@ -416,6 +434,18 @@ bool CodeGenModule::ReturnTypeUsesSret(const CGFunctionInfo &FI) {
return FI.getReturnInfo().isIndirect();
}
+const llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
+ const CGFunctionInfo &FI = getFunctionInfo(GD);
+
+ // For definition purposes, don't consider a K&R function variadic.
+ bool Variadic = false;
+ if (const FunctionProtoType *FPT =
+ cast<FunctionDecl>(GD.getDecl())->getType()->getAs<FunctionProtoType>())
+ Variadic = FPT->isVariadic();
+
+ return GetFunctionType(FI, Variadic);
+}
+
const llvm::FunctionType *
CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI, bool IsVariadic) {
std::vector<const llvm::Type*> ArgTys;
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 9601e9ae9a27..3d81165b1bf1 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -18,6 +18,7 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/Value.h"
#include "clang/AST/Type.h"
+#include "clang/AST/CanonicalType.h"
#include "CGValue.h"
@@ -57,7 +58,7 @@ namespace CodeGen {
/// function definition.
class CGFunctionInfo : public llvm::FoldingSetNode {
struct ArgInfo {
- QualType type;
+ CanQualType type;
ABIArgInfo info;
};
@@ -81,8 +82,8 @@ namespace CodeGen {
CGFunctionInfo(unsigned CallingConvention,
bool NoReturn,
- QualType ResTy,
- const llvm::SmallVector<QualType, 16> &ArgTys);
+ CanQualType ResTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys);
~CGFunctionInfo() { delete[] Args; }
const_arg_iterator arg_begin() const { return Args + 1; }
@@ -107,7 +108,7 @@ namespace CodeGen {
EffectiveCallingConvention = Value;
}
- QualType getReturnType() const { return Args[0].type; }
+ CanQualType getReturnType() const { return Args[0].type; }
ABIArgInfo &getReturnInfo() { return Args[0].info; }
const ABIArgInfo &getReturnInfo() const { return Args[0].info; }
@@ -123,14 +124,16 @@ namespace CodeGen {
static void Profile(llvm::FoldingSetNodeID &ID,
unsigned CallingConvention,
bool NoReturn,
- QualType ResTy,
+ CanQualType ResTy,
Iterator begin,
Iterator end) {
ID.AddInteger(CallingConvention);
ID.AddBoolean(NoReturn);
ResTy.Profile(ID);
- for (; begin != end; ++begin)
- begin->Profile(ID);
+ for (; begin != end; ++begin) {
+ CanQualType T = *begin; // force iterator to be over canonical types
+ T.Profile(ID);
+ }
}
};
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index fa5a47f31564..99c6dfd7ebc4 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -14,6 +14,7 @@
#include "CodeGenFunction.h"
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
+#include "clang/AST/StmtCXX.h"
using namespace clang;
using namespace CodeGen;
@@ -477,12 +478,21 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
const CXXRecordDecl *RD = cast<CXXMethodDecl>(CGF.CurFuncDecl)->getParent();
const CXXRecordDecl *Base = cast<CXXMethodDecl>(GD.getDecl())->getParent();
-
+
llvm::Value *VTT;
- uint64_t SubVTTIndex =
- CGF.CGM.getVtableInfo().getSubVTTIndex(RD, Base);
- assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ uint64_t SubVTTIndex;
+
+ // If the record matches the base, this is the complete ctor/dtor
+ // variant calling the base variant in a class with virtual bases.
+ if (RD == Base) {
+ assert(!CGVtableInfo::needsVTTParameter(CGF.CurGD) &&
+ "doing no-op VTT offset in base dtor/ctor?");
+ SubVTTIndex = 0;
+ } else {
+ SubVTTIndex = CGF.CGM.getVtableInfo().getSubVTTIndex(RD, Base);
+ assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
+ }
if (CGVtableInfo::needsVTTParameter(CGF.CurGD)) {
// A VTT parameter was passed to the constructor, use it.
@@ -590,19 +600,6 @@ void CodeGenFunction::EmitClassCopyAssignment(
Callee, ReturnValueSlot(), CallArgs, MD);
}
-/// SynthesizeDefaultConstructor - synthesize a default constructor
-void
-CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
- CXXCtorType Type,
- llvm::Function *Fn,
- const FunctionArgList &Args) {
- assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor");
- StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args,
- SourceLocation());
- EmitCtorPrologue(Ctor, Type);
- FinishFunction();
-}
-
/// SynthesizeCXXCopyConstructor - This routine implicitly defines body of a
/// copy constructor, in accordance with section 12.8 (p7 and p8) of C++03
/// The implicitly-defined copy constructor for class X performs a memberwise
@@ -619,16 +616,12 @@ CodeGenFunction::SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
/// implicitly-defined copy constructor
void
-CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
- CXXCtorType Type,
- llvm::Function *Fn,
- const FunctionArgList &Args) {
+CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
const CXXRecordDecl *ClassDecl = Ctor->getParent();
assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
"SynthesizeCXXCopyConstructor - copy constructor has definition already");
assert(!Ctor->isTrivial() && "shouldn't need to generate trivial ctor");
- StartFunction(GlobalDecl(Ctor, Type), Ctor->getResultType(), Fn, Args,
- SourceLocation());
FunctionArgList::const_iterator i = Args.begin();
const VarDecl *ThisArg = i->first;
@@ -698,7 +691,6 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
}
InitializeVtablePtrs(ClassDecl);
- FinishFunction();
}
/// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator.
@@ -721,14 +713,11 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
///
/// if the subobject is of scalar type, the built-in assignment operator is
/// used.
-void CodeGenFunction::SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
- llvm::Function *Fn,
- const FunctionArgList &Args) {
-
+void CodeGenFunction::SynthesizeCXXCopyAssignment(const FunctionArgList &Args) {
+ const CXXMethodDecl *CD = cast<CXXMethodDecl>(CurGD.getDecl());
const CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(CD->getDeclContext());
assert(!ClassDecl->hasUserDeclaredCopyAssignment() &&
"SynthesizeCXXCopyAssignment - copy assignment has user declaration");
- StartFunction(CD, CD->getResultType(), Fn, Args, SourceLocation());
FunctionArgList::const_iterator i = Args.begin();
const VarDecl *ThisArg = i->first;
@@ -796,8 +785,6 @@ void CodeGenFunction::SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
// return *this;
Builder.CreateStore(LoadOfThis, ReturnValue);
-
- FinishFunction();
}
static void EmitBaseInitializer(CodeGenFunction &CGF,
@@ -904,6 +891,101 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
}
}
+/// Checks whether the given constructor is a valid subject for the
+/// complete-to-base constructor delegation optimization, i.e.
+/// emitting the complete constructor as a simple call to the base
+/// constructor.
+static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+
+ // Currently we disable the optimization for classes with virtual
+ // bases because (1) the addresses of parameter variables need to be
+ // consistent across all initializers but (2) the delegate function
+ // call necessarily creates a second copy of the parameter variable.
+ //
+ // The limiting example (purely theoretical AFAIK):
+ // struct A { A(int &c) { c++; } };
+ // struct B : virtual A {
+ // B(int count) : A(count) { printf("%d\n", count); }
+ // };
+ // ...although even this example could in principle be emitted as a
+ // delegation since the address of the parameter doesn't escape.
+ if (Ctor->getParent()->getNumVBases()) {
+ // TODO: white-list trivial vbase initializers. This case wouldn't
+ // be subject to the restrictions below.
+
+ // TODO: white-list cases where:
+ // - there are no non-reference parameters to the constructor
+ // - the initializers don't access any non-reference parameters
+ // - the initializers don't take the address of non-reference
+ // parameters
+ // - etc.
+ // If we ever add any of the above cases, remember that:
+ // - function-try-blocks will always blacklist this optimization
+ // - we need to perform the constructor prologue and cleanup in
+ // EmitConstructorBody.
+
+ return false;
+ }
+
+ // We also disable the optimization for variadic functions because
+ // it's impossible to "re-pass" varargs.
+ if (Ctor->getType()->getAs<FunctionProtoType>()->isVariadic())
+ return false;
+
+ return true;
+}
+
+/// EmitConstructorBody - Emits the body of the current constructor.
+void CodeGenFunction::EmitConstructorBody(FunctionArgList &Args) {
+ const CXXConstructorDecl *Ctor = cast<CXXConstructorDecl>(CurGD.getDecl());
+ CXXCtorType CtorType = CurGD.getCtorType();
+
+ // Before we go any further, try the complete->base constructor
+ // delegation optimization.
+ if (CtorType == Ctor_Complete && IsConstructorDelegationValid(Ctor)) {
+ EmitDelegateCXXConstructorCall(Ctor, Ctor_Base, Args);
+ return;
+ }
+
+ Stmt *Body = Ctor->getBody();
+
+ // Enter the function-try-block before the constructor prologue if
+ // applicable.
+ CXXTryStmtInfo TryInfo;
+ bool IsTryBody = (Body && isa<CXXTryStmt>(Body));
+
+ if (IsTryBody)
+ TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+
+ unsigned CleanupStackSize = CleanupEntries.size();
+
+ // Emit the constructor prologue, i.e. the base and member
+ // initializers.
+ EmitCtorPrologue(Ctor, CtorType);
+
+ // Emit the body of the statement.
+ if (IsTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Ctor->isImplicit() && "bodyless ctor not implicit");
+ if (!Ctor->isDefaultConstructor()) {
+ assert(Ctor->isCopyConstructor());
+ SynthesizeCXXCopyConstructor(Args);
+ }
+ }
+
+ // Emit any cleanup blocks associated with the member or base
+ // initializers, which includes (along the exceptional path) the
+ // destructors for those members and bases that were fully
+ // constructed.
+ EmitCleanupBlocks(CleanupStackSize);
+
+ if (IsTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+}
+
/// EmitCtorPrologue - This routine generates necessary code to initialize
/// base classes and non-static data members belonging to this constructor.
void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
@@ -938,10 +1020,87 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
}
}
+/// EmitDestructorBody - Emits the body of the current destructor.
+void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
+ const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
+ CXXDtorType DtorType = CurGD.getDtorType();
+
+ Stmt *Body = Dtor->getBody();
+
+ // If the body is a function-try-block, enter the try before
+ // anything else --- unless we're in a deleting destructor, in which
+ // case we're just going to call the complete destructor and then
+ // call operator delete() on the way out.
+ CXXTryStmtInfo TryInfo;
+ bool isTryBody = (DtorType != Dtor_Deleting &&
+ Body && isa<CXXTryStmt>(Body));
+ if (isTryBody)
+ TryInfo = EnterCXXTryStmt(*cast<CXXTryStmt>(Body));
+
+ llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
+ PushCleanupBlock(DtorEpilogue);
+
+ bool SkipBody = false; // should get jump-threaded
+
+ // If this is the deleting variant, just invoke the complete
+ // variant, then call the appropriate operator delete() on the way
+ // out.
+ if (DtorType == Dtor_Deleting) {
+ EmitCXXDestructorCall(Dtor, Dtor_Complete, LoadCXXThis());
+ SkipBody = true;
+
+ // If this is the complete variant, just invoke the base variant;
+ // the epilogue will destruct the virtual bases. But we can't do
+ // this optimization if the body is a function-try-block, because
+ // we'd introduce *two* handler blocks.
+ } else if (!isTryBody && DtorType == Dtor_Complete) {
+ EmitCXXDestructorCall(Dtor, Dtor_Base, LoadCXXThis());
+ SkipBody = true;
+
+ // Otherwise, we're in the base variant, so we need to ensure the
+ // vtable ptrs are right before emitting the body.
+ } else {
+ InitializeVtablePtrs(Dtor->getParent());
+ }
+
+ // Emit the body of the statement.
+ if (SkipBody)
+ (void) 0;
+ else if (isTryBody)
+ EmitStmt(cast<CXXTryStmt>(Body)->getTryBlock());
+ else if (Body)
+ EmitStmt(Body);
+ else {
+ assert(Dtor->isImplicit() && "bodyless dtor not implicit");
+ // nothing to do besides what's in the epilogue
+ }
+
+ // Jump to the cleanup block.
+ CleanupBlockInfo Info = PopCleanupBlock();
+ assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
+ EmitBlock(DtorEpilogue);
+
+ // Emit the destructor epilogue now. If this is a complete
+ // destructor with a function-try-block, perform the base epilogue
+ // as well.
+ if (isTryBody && DtorType == Dtor_Complete)
+ EmitDtorEpilogue(Dtor, Dtor_Base);
+ EmitDtorEpilogue(Dtor, DtorType);
+
+ // Link up the cleanup information.
+ if (Info.SwitchBlock)
+ EmitBlock(Info.SwitchBlock);
+ if (Info.EndBlock)
+ EmitBlock(Info.EndBlock);
+
+ // Exit the try if applicable.
+ if (isTryBody)
+ ExitCXXTryStmt(*cast<CXXTryStmt>(Body), TryInfo);
+}
+
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes
/// in reverse order of their construction.
-/// FIXME: This needs to take a CXXDtorType.
void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
CXXDtorType DtorType) {
assert(!DD->isTrivial() &&
@@ -949,6 +1108,44 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
const CXXRecordDecl *ClassDecl = DD->getParent();
+ // In a deleting destructor, we've already called the complete
+ // destructor as a subroutine, so we just have to delete the
+ // appropriate value.
+ if (DtorType == Dtor_Deleting) {
+ assert(DD->getOperatorDelete() &&
+ "operator delete missing - EmitDtorEpilogue");
+ EmitDeleteCall(DD->getOperatorDelete(), LoadCXXThis(),
+ getContext().getTagDeclType(ClassDecl));
+ return;
+ }
+
+ // For complete destructors, we've already called the base
+ // destructor (in GenerateBody), so we just need to destruct all the
+ // virtual bases.
+ if (DtorType == Dtor_Complete) {
+ // Handle virtual bases.
+ for (CXXRecordDecl::reverse_base_class_const_iterator I =
+ ClassDecl->vbases_rbegin(), E = ClassDecl->vbases_rend();
+ I != E; ++I) {
+ const CXXBaseSpecifier &Base = *I;
+ CXXRecordDecl *BaseClassDecl
+ = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore trivial destructors.
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+ const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
+ llvm::Value *V = GetAddressOfBaseOfCompleteClass(LoadCXXThis(),
+ true,
+ ClassDecl,
+ BaseClassDecl);
+ EmitCXXDestructorCall(D, Dtor_Base, V);
+ }
+ return;
+ }
+
+ assert(DtorType == Dtor_Base);
+
// Collect the fields.
llvm::SmallVector<const FieldDecl *, 16> FieldDecls;
for (CXXRecordDecl::field_iterator I = ClassDecl->field_begin(),
@@ -1021,51 +1218,6 @@ void CodeGenFunction::EmitDtorEpilogue(const CXXDestructorDecl *DD,
/*NullCheckValue=*/false);
EmitCXXDestructorCall(D, Dtor_Base, V);
}
-
- // If we're emitting a base destructor, we don't want to emit calls to the
- // virtual bases.
- if (DtorType == Dtor_Base)
- return;
-
- // Handle virtual bases.
- for (CXXRecordDecl::reverse_base_class_const_iterator I =
- ClassDecl->vbases_rbegin(), E = ClassDecl->vbases_rend(); I != E; ++I) {
- const CXXBaseSpecifier &Base = *I;
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
-
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
- continue;
- const CXXDestructorDecl *D = BaseClassDecl->getDestructor(getContext());
- llvm::Value *V = GetAddressOfBaseOfCompleteClass(LoadCXXThis(),
- true,
- ClassDecl,
- BaseClassDecl);
- EmitCXXDestructorCall(D, Dtor_Base, V);
- }
-
- // If we have a deleting destructor, emit a call to the delete operator.
- if (DtorType == Dtor_Deleting) {
- assert(DD->getOperatorDelete() &&
- "operator delete missing - EmitDtorEpilogue");
- EmitDeleteCall(DD->getOperatorDelete(), LoadCXXThis(),
- getContext().getTagDeclType(ClassDecl));
- }
-}
-
-void CodeGenFunction::SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor,
- CXXDtorType DtorType,
- llvm::Function *Fn,
- const FunctionArgList &Args) {
- assert(!Dtor->getParent()->hasUserDeclaredDestructor() &&
- "SynthesizeDefaultDestructor - destructor has user declaration");
-
- StartFunction(GlobalDecl(Dtor, DtorType), Dtor->getResultType(), Fn, Args,
- SourceLocation());
- InitializeVtablePtrs(Dtor->getParent());
- EmitDtorEpilogue(Dtor, DtorType);
- FinishFunction();
}
/// EmitCXXAggrConstructorCall - This routine essentially creates a (nested)
@@ -1303,6 +1455,71 @@ CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
EmitCXXMemberCall(D, Callee, ReturnValueSlot(), This, VTT, ArgBeg, ArgEnd);
}
+void
+CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args) {
+ CallArgList DelegateArgs;
+
+ FunctionArgList::const_iterator I = Args.begin(), E = Args.end();
+ assert(I != E && "no parameters to constructor");
+
+ // this
+ DelegateArgs.push_back(std::make_pair(RValue::get(LoadCXXThis()),
+ I->second));
+ ++I;
+
+ // vtt
+ if (llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(Ctor, CtorType))) {
+ QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
+ DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP));
+
+ if (CGVtableInfo::needsVTTParameter(CurGD)) {
+ assert(I != E && "cannot skip vtt parameter, already done with args");
+ assert(I->second == VoidPP && "skipping parameter not of vtt type");
+ ++I;
+ }
+ }
+
+ // Explicit arguments.
+ for (; I != E; ++I) {
+
+ const VarDecl *Param = I->first;
+ QualType ArgType = Param->getType(); // because we're passing it to itself
+
+ // StartFunction converted the ABI-lowered parameter(s) into a
+ // local alloca. We need to turn that into an r-value suitable
+ // for EmitCall.
+ llvm::Value *Local = GetAddrOfLocalVar(Param);
+ RValue Arg;
+
+ // For the most part, we just need to load the alloca, except:
+ // 1) aggregate r-values are actually pointers to temporaries, and
+ // 2) references to aggregates are pointers directly to the aggregate.
+ // I don't know why references to non-aggregates are different here.
+ if (ArgType->isReferenceType()) {
+ const ReferenceType *RefType = ArgType->getAs<ReferenceType>();
+ if (hasAggregateLLVMType(RefType->getPointeeType()))
+ Arg = RValue::getAggregate(Local);
+ else
+ // Locals which are references to scalars are represented
+ // with allocas holding the pointer.
+ Arg = RValue::get(Builder.CreateLoad(Local));
+ } else {
+ if (hasAggregateLLVMType(ArgType))
+ Arg = RValue::getAggregate(Local);
+ else
+ Arg = RValue::get(EmitLoadOfScalar(Local, false, ArgType));
+ }
+
+ DelegateArgs.push_back(std::make_pair(Arg, ArgType));
+ }
+
+ EmitCall(CGM.getTypes().getFunctionInfo(Ctor, CtorType),
+ CGM.GetAddrOfCXXConstructor(Ctor, CtorType),
+ ReturnValueSlot(), DelegateArgs, Ctor);
+}
+
void CodeGenFunction::EmitCXXDestructorCall(const CXXDestructorDecl *DD,
CXXDtorType Type,
llvm::Value *This) {
@@ -1405,11 +1622,3 @@ void CodeGenFunction::InitializeVtablePtrsRecursive(
// Store address point
Builder.CreateStore(VtableAddressPoint, VtableField);
}
-
-llvm::Value *CodeGenFunction::LoadCXXVTT() {
- assert((isa<CXXConstructorDecl>(CurFuncDecl) ||
- isa<CXXDestructorDecl>(CurFuncDecl)) &&
- "Must be in a C++ ctor or dtor to load the vtt parameter");
-
- return Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
-}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 5b9c6b055e0e..0f3502e9bea3 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -1034,6 +1034,28 @@ llvm::DIType CGDebugInfo::CreateType(const TagType *Ty,
return llvm::DIType();
}
+llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
+ llvm::DICompileUnit Unit) {
+ llvm::DIType ElementTy = getOrCreateType(Ty->getElementType(), Unit);
+ uint64_t NumElems = Ty->getNumElements();
+ if (NumElems > 0)
+ --NumElems;
+ llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
+ Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, NumElems));
+
+ llvm::DIArray SubscriptArray =
+ DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t Align = CGM.getContext().getTypeAlign(Ty);
+
+ return
+ DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_vector_type,
+ Unit, "", llvm::DICompileUnit(),
+ 0, Size, Align, 0, 0,
+ ElementTy, SubscriptArray);
+}
+
llvm::DIType CGDebugInfo::CreateType(const ArrayType *Ty,
llvm::DICompileUnit Unit) {
uint64_t Size;
@@ -1214,9 +1236,10 @@ llvm::DIType CGDebugInfo::CreateTypeNode(QualType Ty,
// FIXME: Handle these.
case Type::ExtVector:
- case Type::Vector:
return llvm::DIType();
-
+
+ case Type::Vector:
+ return CreateType(cast<VectorType>(Ty), Unit);
case Type::ObjCObjectPointer:
return CreateType(cast<ObjCObjectPointerType>(Ty), Unit);
case Type::ObjCInterface:
@@ -1351,10 +1374,13 @@ void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
/// EmitRegionStart- Constructs the debug code for entering a declarative
/// region - "llvm.dbg.region.start.".
void CGDebugInfo::EmitRegionStart(llvm::Function *Fn, CGBuilderTy &Builder) {
+ SourceManager &SM = CGM.getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
llvm::DIDescriptor D =
DebugFactory.CreateLexicalBlock(RegionStack.empty() ?
llvm::DIDescriptor() :
- llvm::DIDescriptor(RegionStack.back()));
+ llvm::DIDescriptor(RegionStack.back()),
+ PLoc.getLine(), PLoc.getColumn());
RegionStack.push_back(D.getNode());
}
@@ -1666,7 +1692,7 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
T = CGM.getContext().getConstantArrayType(ET, ConstVal,
ArrayType::Normal, 0);
}
- llvm::StringRef DeclName = D->getName();
+ llvm::StringRef DeclName = Var->getName();
llvm::DIDescriptor DContext =
getContextDescriptor(dyn_cast<Decl>(D->getDeclContext()), Unit);
DebugFactory.CreateGlobalVariable(DContext, DeclName,
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index b2d3a1f1fa53..50f575940886 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -84,6 +84,7 @@ class CGDebugInfo {
llvm::DIType CreateType(const RecordType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const ObjCInterfaceType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const EnumType *Ty, llvm::DICompileUnit U);
+ llvm::DIType CreateType(const VectorType *Ty, llvm::DICompileUnit Unit);
llvm::DIType CreateType(const ArrayType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const LValueReferenceType *Ty, llvm::DICompileUnit U);
llvm::DIType CreateType(const MemberPointerType *Ty, llvm::DICompileUnit U);
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index d956c1c3cd85..142cb811b059 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -427,6 +427,26 @@ void CodeGenFunction::EmitEndEHSpec(const Decl *D) {
}
void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
+ CXXTryStmtInfo Info = EnterCXXTryStmt(S);
+ EmitStmt(S.getTryBlock());
+ ExitCXXTryStmt(S, Info);
+}
+
+CodeGenFunction::CXXTryStmtInfo
+CodeGenFunction::EnterCXXTryStmt(const CXXTryStmt &S) {
+ CXXTryStmtInfo Info;
+ Info.SavedLandingPad = getInvokeDest();
+ Info.HandlerBlock = createBasicBlock("try.handler");
+ Info.FinallyBlock = createBasicBlock("finally");
+
+ PushCleanupBlock(Info.FinallyBlock);
+ setInvokeDest(Info.HandlerBlock);
+
+ return Info;
+}
+
+void CodeGenFunction::ExitCXXTryStmt(const CXXTryStmt &S,
+ CXXTryStmtInfo TryInfo) {
// Pointer to the personality function
llvm::Constant *Personality =
CGM.CreateRuntimeFunction(llvm::FunctionType::get(llvm::Type::getInt32Ty
@@ -439,54 +459,12 @@ void CodeGenFunction::EmitCXXTryStmt(const CXXTryStmt &S) {
llvm::Value *llvm_eh_selector =
CGM.getIntrinsic(llvm::Intrinsic::eh_selector);
- llvm::BasicBlock *PrevLandingPad = getInvokeDest();
- llvm::BasicBlock *TryHandler = createBasicBlock("try.handler");
- llvm::BasicBlock *FinallyBlock = createBasicBlock("finally");
+ llvm::BasicBlock *PrevLandingPad = TryInfo.SavedLandingPad;
+ llvm::BasicBlock *TryHandler = TryInfo.HandlerBlock;
+ llvm::BasicBlock *FinallyBlock = TryInfo.FinallyBlock;
llvm::BasicBlock *FinallyRethrow = createBasicBlock("finally.throw");
llvm::BasicBlock *FinallyEnd = createBasicBlock("finally.end");
- // Push an EH context entry, used for handling rethrows.
- PushCleanupBlock(FinallyBlock);
-
- // Emit the statements in the try {} block
- setInvokeDest(TryHandler);
-
- // FIXME: We should not have to do this here. The AST should have the member
- // initializers under the CXXTryStmt's TryBlock.
- if (OuterTryBlock == &S) {
- GlobalDecl GD = CurGD;
- const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
-
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- size_t OldCleanupStackSize = CleanupEntries.size();
- EmitCtorPrologue(CD, CurGD.getCtorType());
- EmitStmt(S.getTryBlock());
-
- // If any of the member initializers are temporaries bound to references
- // make sure to emit their destructors.
- EmitCleanupBlocks(OldCleanupStackSize);
- } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
- llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
- PushCleanupBlock(DtorEpilogue);
-
- InitializeVtablePtrs(DD->getParent());
- EmitStmt(S.getTryBlock());
-
- CleanupBlockInfo Info = PopCleanupBlock();
-
- assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
- EmitBlock(DtorEpilogue);
- EmitDtorEpilogue(DD, GD.getDtorType());
-
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
- } else
- EmitStmt(S.getTryBlock());
- } else
- EmitStmt(S.getTryBlock());
-
// Jump to end if there is no exception
EmitBranchThroughCleanup(FinallyEnd);
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index 830954fd10cc..030d2c9c9f84 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -36,7 +36,17 @@ llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(const llvm::Type *Ty,
return new llvm::AllocaInst(Ty, 0, Name, AllocaInsertPt);
}
-llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty, const llvm::Twine &Name) {
+llvm::Value *CodeGenFunction::CreateIRTemp(QualType Ty,
+ const llvm::Twine &Name) {
+ llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertType(Ty), Name);
+ // FIXME: Should we prefer the preferred type alignment here?
+ CharUnits Align = getContext().getTypeAlignInChars(Ty);
+ Alloc->setAlignment(Align.getQuantity());
+ return Alloc;
+}
+
+llvm::Value *CodeGenFunction::CreateMemTemp(QualType Ty,
+ const llvm::Twine &Name) {
llvm::AllocaInst *Alloc = CreateTempAlloca(ConvertTypeForMem(Ty), Name);
// FIXME: Should we prefer the preferred type alignment here?
CharUnits Align = getContext().getTypeAlignInChars(Ty);
@@ -1520,9 +1530,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
}
LValue CodeGenFunction::EmitCompoundLiteralLValue(const CompoundLiteralExpr* E){
- llvm::Value *DeclPtr = CreateTempAlloca(ConvertTypeForMem(E->getType()),
- ".compoundliteral");
-
+ llvm::Value *DeclPtr = CreateMemTemp(E->getType(), ".compoundliteral");
const Expr* InitExpr = E->getInitializer();
LValue Result = LValue::MakeAddr(DeclPtr, MakeQualifiers(E->getType()));
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 97455c7b13cf..ac189a064904 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -189,7 +189,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CGF.ConvertType(PtrTy));
EmitInitializationToLValue(E->getSubExpr(),
LValue::MakeAddr(CastPtr, Qualifiers()),
- E->getType());
+ E->getSubExpr()->getType());
break;
}
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 1d38ef9e2d2f..198e2d12fca3 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -56,7 +56,7 @@ private:
const llvm::FunctionType *IMPTy;
const llvm::PointerType *IdTy;
const llvm::PointerType *PtrToIdTy;
- QualType ASTIdTy;
+ CanQualType ASTIdTy;
const llvm::IntegerType *IntTy;
const llvm::PointerType *PtrTy;
const llvm::IntegerType *LongTy;
@@ -262,7 +262,7 @@ CGObjCGNU::CGObjCGNU(CodeGen::CodeGenModule &cgm)
PtrTy = PtrToInt8Ty;
// Object type
- ASTIdTy = CGM.getContext().getObjCIdType();
+ ASTIdTy = CGM.getContext().getCanonicalType(CGM.getContext().getObjCIdType());
if (QualType() == ASTIdTy) {
IdTy = PtrToInt8Ty;
} else {
@@ -1192,19 +1192,22 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
iter != endIter ; iter++) {
std::vector<llvm::Constant*> Fields;
ObjCPropertyDecl *property = (*iter)->getPropertyDecl();
+ ObjCPropertyImplDecl *propertyImpl = *iter;
+ bool isSynthesized = (propertyImpl->getPropertyImplementation() ==
+ ObjCPropertyImplDecl::Synthesize);
Fields.push_back(MakeConstantString(property->getNameAsString()));
Fields.push_back(llvm::ConstantInt::get(Int8Ty,
property->getPropertyAttributes()));
- Fields.push_back(llvm::ConstantInt::get(Int8Ty,
- (*iter)->getPropertyImplementation() ==
- ObjCPropertyImplDecl::Synthesize));
+ Fields.push_back(llvm::ConstantInt::get(Int8Ty, isSynthesized));
if (ObjCMethodDecl *getter = property->getGetterMethodDecl()) {
- InstanceMethodSels.push_back(getter->getSelector());
std::string TypeStr;
Context.getObjCEncodingForMethodDecl(getter,TypeStr);
llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- InstanceMethodTypes.push_back(TypeEncoding);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(getter->getSelector());
+ }
Fields.push_back(MakeConstantString(getter->getSelector().getAsString()));
Fields.push_back(TypeEncoding);
} else {
@@ -1212,11 +1215,13 @@ llvm::Constant *CGObjCGNU::GeneratePropertyList(const ObjCImplementationDecl *OI
Fields.push_back(NULLPtr);
}
if (ObjCMethodDecl *setter = property->getSetterMethodDecl()) {
- InstanceMethodSels.push_back(setter->getSelector());
std::string TypeStr;
Context.getObjCEncodingForMethodDecl(setter,TypeStr);
llvm::Constant *TypeEncoding = MakeConstantString(TypeStr);
- InstanceMethodTypes.push_back(TypeEncoding);
+ if (isSynthesized) {
+ InstanceMethodTypes.push_back(TypeEncoding);
+ InstanceMethodSels.push_back(setter->getSelector());
+ }
Fields.push_back(MakeConstantString(setter->getSelector().getAsString()));
Fields.push_back(TypeEncoding);
} else {
@@ -1685,7 +1690,7 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_enumerationMutation (id)
- llvm::SmallVector<QualType,16> Params;
+ llvm::SmallVector<CanQualType,1> Params;
Params.push_back(ASTIdTy);
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index b16a510f98f6..475280b6a01e 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -297,9 +297,9 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// id objc_getProperty (id, SEL, ptrdiff_t, bool)
- llvm::SmallVector<QualType,16> Params;
- QualType IdType = Ctx.getObjCIdType();
- QualType SelType = Ctx.getObjCSelType();
+ llvm::SmallVector<CanQualType,4> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
Params.push_back(IdType);
Params.push_back(SelType);
Params.push_back(Ctx.LongTy);
@@ -314,9 +314,9 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_setProperty (id, SEL, ptrdiff_t, id, bool, bool)
- llvm::SmallVector<QualType,16> Params;
- QualType IdType = Ctx.getObjCIdType();
- QualType SelType = Ctx.getObjCSelType();
+ llvm::SmallVector<CanQualType,6> Params;
+ CanQualType IdType = Ctx.getCanonicalParamType(Ctx.getObjCIdType());
+ CanQualType SelType = Ctx.getCanonicalParamType(Ctx.getObjCSelType());
Params.push_back(IdType);
Params.push_back(SelType);
Params.push_back(Ctx.LongTy);
@@ -333,8 +333,8 @@ public:
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
// void objc_enumerationMutation (id)
- llvm::SmallVector<QualType,16> Params;
- Params.push_back(Ctx.getObjCIdType());
+ llvm::SmallVector<CanQualType,1> Params;
+ Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
CC_Default, false), false);
@@ -3293,7 +3293,7 @@ llvm::Constant *CGObjCCommonMac::BuildIvarLayout(
// Add this implementations synthesized ivars.
llvm::SmallVector<ObjCIvarDecl*, 16> Ivars;
- CGM.getContext().CollectSynthesizedIvars(OI, Ivars);
+ CGM.getContext().CollectNonClassIvars(OI, Ivars);
for (unsigned k = 0, e = Ivars.size(); k != e; ++k)
RecFields.push_back(cast<FieldDecl>(Ivars[k]));
@@ -5093,9 +5093,8 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
// Find the message function name.
// FIXME. This is too much work to get the ABI-specific result type needed to
// find the message name.
- const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType,
- llvm::SmallVector<QualType, 16>(),
- CC_Default, false);
+ const CGFunctionInfo &FnInfo
+ = Types.getFunctionInfo(ResultType, CallArgList(), CC_Default, false);
llvm::Constant *Fn = 0;
std::string Name("\01l_");
if (CGM.ReturnTypeUsesSret(FnInfo)) {
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 9714bd9d9678..96c104b22d15 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -46,7 +46,8 @@ class VTTBuilder {
llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t>
CtorVtableAddressPoints;
- llvm::Constant *getCtorVtable(const BaseSubobject &Base) {
+ llvm::Constant *getCtorVtable(const BaseSubobject &Base,
+ bool BaseIsVirtual) {
if (!GenerateDefinition)
return 0;
@@ -54,7 +55,7 @@ class VTTBuilder {
if (!CtorVtable) {
// Build the vtable.
CGVtableInfo::CtorVtableInfo Info
- = CGM.getVtableInfo().getCtorVtable(Class, Base);
+ = CGM.getVtableInfo().getCtorVtable(Class, Base, BaseIsVirtual);
CtorVtable = Info.Vtable;
@@ -166,7 +167,7 @@ class VTTBuilder {
if (BaseMorallyVirtual || VtblClass == Class)
init = BuildVtablePtr(vtbl, VtblClass, Base, BaseOffset);
else {
- init = getCtorVtable(BaseSubobject(Base, BaseOffset));
+ init = getCtorVtable(BaseSubobject(Base, BaseOffset), i->isVirtual());
subvtbl = init;
subVtblClass = Base;
@@ -186,7 +187,8 @@ class VTTBuilder {
/// BuiltVTT - Add the VTT to Inits. Offset is the offset in bits to the
/// currnet object we're working on.
- void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool MorallyVirtual) {
+ void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool BaseIsVirtual,
+ bool MorallyVirtual) {
// Itanium C++ ABI 2.6.2:
// An array of virtual table addresses, called the VTT, is declared for
// each class type that has indirect or direct virtual base classes.
@@ -204,7 +206,8 @@ class VTTBuilder {
Vtable = ClassVtbl;
VtableClass = Class;
} else {
- Vtable = getCtorVtable(BaseSubobject(RD, Offset));
+ Vtable = getCtorVtable(BaseSubobject(RD, Offset),
+ /*IsVirtual=*/BaseIsVirtual);
VtableClass = RD;
}
@@ -235,7 +238,7 @@ class VTTBuilder {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- BuildVTT(Base, BaseOffset, MorallyVirtual);
+ BuildVTT(Base, BaseOffset, /*BaseIsVirtual=*/false, MorallyVirtual);
}
}
@@ -249,7 +252,7 @@ class VTTBuilder {
if (i->isVirtual() && !SeenVBase.count(Base)) {
SeenVBase.insert(Base);
uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
- BuildVTT(Base, BaseOffset, false);
+ BuildVTT(Base, BaseOffset, /*BaseIsVirtual=*/true, false);
}
VirtualVTTs(Base);
}
@@ -335,13 +338,13 @@ CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
CGVtableInfo::CtorVtableInfo
CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD,
- const BaseSubobject &Base) {
+ const BaseSubobject &Base, bool BaseIsVirtual) {
CtorVtableInfo Info;
Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage,
/*GenerateDefinition=*/true,
RD, Base.getBase(), Base.getBaseOffset(),
- Info.AddressPoints);
+ BaseIsVirtual, Info.AddressPoints);
return Info;
}
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
index 970bbd777f14..932bd079e93f 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVtable.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/CXXInheritance.h"
#include "clang/AST/RecordLayout.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
#include <cstdio>
@@ -25,40 +26,44 @@ using namespace CodeGen;
namespace {
+/// BaseOffset - Represents an offset from a derived class to a direct or
+/// indirect base class.
+struct BaseOffset {
+ /// DerivedClass - The derived class.
+ const CXXRecordDecl *DerivedClass;
+
+ /// VirtualBase - If the path from the derived class to the base class
+ /// involves a virtual base class, this holds its declaration.
+ const CXXRecordDecl *VirtualBase;
+
+ /// NonVirtualOffset - The offset from the derived class to the base class.
+ /// (Or the offset from the virtual base class to the base class, if the
+ /// path from the derived class to the base class involves a virtual base
+ /// class.
+ int64_t NonVirtualOffset;
+
+ BaseOffset() : DerivedClass(0), VirtualBase(0), NonVirtualOffset(0) { }
+ BaseOffset(const CXXRecordDecl *DerivedClass,
+ const CXXRecordDecl *VirtualBase, int64_t NonVirtualOffset)
+ : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
+ NonVirtualOffset(NonVirtualOffset) { }
+
+ bool isEmpty() const { return !NonVirtualOffset && !VirtualBase; }
+};
+
/// FinalOverriders - Contains the final overrider member functions for all
/// member functions in the base subobjects of a class.
class FinalOverriders {
public:
- /// BaseOffset - Represents an offset from a derived class to a direct or
- /// indirect base class.
- struct BaseOffset {
- /// DerivedClass - The derived class.
- const CXXRecordDecl *DerivedClass;
-
- /// VirtualBase - If the path from the derived class to the base class
- /// involves a virtual base class, this holds its declaration.
- const CXXRecordDecl *VirtualBase;
-
- /// NonVirtualOffset - The offset from the derived class to the base class.
- /// Or the offset from the virtual base class to the base class, if the path
- /// from the derived class to the base class involves a virtual base class.
- int64_t NonVirtualOffset;
-
- BaseOffset() : DerivedClass(0), VirtualBase(0), NonVirtualOffset(0) { }
- BaseOffset(const CXXRecordDecl *DerivedClass,
- const CXXRecordDecl *VirtualBase, int64_t NonVirtualOffset)
- : DerivedClass(DerivedClass), VirtualBase(VirtualBase),
- NonVirtualOffset(NonVirtualOffset) { }
-
- bool isEmpty() const { return !NonVirtualOffset && !VirtualBase; }
- };
-
/// OverriderInfo - Information about a final overrider.
struct OverriderInfo {
/// Method - The method decl of the overrider.
const CXXMethodDecl *Method;
+
+ /// Offset - the base offset of the overrider relative to the layout class.
+ int64_t Offset;
- OverriderInfo() : Method(0) { }
+ OverriderInfo() : Method(0), Offset(0) { }
};
private:
@@ -93,12 +98,9 @@ private:
/// ReturnAdjustments - Holds return adjustments for all the overriders that
/// need to perform return value adjustments.
AdjustmentOffsetsMapTy ReturnAdjustments;
-
- /// ThisAdjustments - Holds 'this' adjustments for all the overriders that
- /// need them.
- AdjustmentOffsetsMapTy ThisAdjustments;
- typedef llvm::SmallVector<uint64_t, 1> OffsetVectorTy;
+ // FIXME: We might be able to get away with making this a SmallSet.
+ typedef llvm::SmallSetVector<uint64_t, 2> OffsetSetVectorTy;
/// SubobjectOffsetsMapTy - This map is used for keeping track of all the
/// base subobject offsets that a single class declaration might refer to.
@@ -113,12 +115,13 @@ private:
/// when we determine that C::f() overrides A::f(), we need to update the
/// overriders map for both A-in-B1 and A-in-B2 and the subobject offsets map
/// will have the subobject offsets for both A copies.
- typedef llvm::DenseMap<const CXXRecordDecl *, OffsetVectorTy>
+ typedef llvm::DenseMap<const CXXRecordDecl *, OffsetSetVectorTy>
SubobjectOffsetsMapTy;
/// ComputeFinalOverriders - Compute the final overriders for a given base
/// subobject (and all its direct and indirect bases).
void ComputeFinalOverriders(BaseSubobject Base,
+ bool BaseSubobjectIsVisitedVBase,
SubobjectOffsetsMapTy &Offsets);
/// AddOverriders - Add the final overriders for this base subobject to the
@@ -139,17 +142,12 @@ private:
const CXXMethodDecl *NewMD,
SubobjectOffsetsMapTy &Offsets);
- /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
- /// the 'this' pointer from the base subobject to the derived subobject.
- BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
- BaseSubobject Derived);
-
static void MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
SubobjectOffsetsMapTy &Offsets);
public:
explicit FinalOverriders(const CXXRecordDecl *MostDerivedClass);
-
+
/// getOverrider - Get the final overrider for the given method declaration in
/// the given base subobject.
OverriderInfo getOverrider(BaseSubobject Base,
@@ -168,14 +166,6 @@ public:
return ReturnAdjustments.lookup(std::make_pair(Base, MD));
}
- /// getThisAdjustmentOffset - Get the 'this' pointer adjustment offset for the
- /// method decl in the given base subobject. Returns an empty base offset if
- /// no adjustment is needed.
- BaseOffset getThisAdjustmentOffset(BaseSubobject Base,
- const CXXMethodDecl *MD) const {
- return ThisAdjustments.lookup(std::make_pair(Base, MD));
- }
-
/// dump - dump the final overriders.
void dump() {
assert(VisitedVirtualBases.empty() &&
@@ -189,6 +179,8 @@ public:
void dump(llvm::raw_ostream &Out, BaseSubobject Base);
};
+#define DUMP_OVERRIDERS 0
+
FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass)
: MostDerivedClass(MostDerivedClass),
Context(MostDerivedClass->getASTContext()),
@@ -196,8 +188,11 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass)
// Compute the final overriders.
SubobjectOffsetsMapTy Offsets;
- ComputeFinalOverriders(BaseSubobject(MostDerivedClass, 0), Offsets);
-
+ ComputeFinalOverriders(BaseSubobject(MostDerivedClass, 0),
+ /*BaseSubobjectIsVisitedVBase=*/false, Offsets);
+ VisitedVirtualBases.clear();
+
+#if DUMP_OVERRIDERS
// And dump them (for now).
dump();
@@ -212,6 +207,7 @@ FinalOverriders::FinalOverriders(const CXXRecordDecl *MostDerivedClass)
for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I)
llvm::errs() << " " << I << " - " << OffsetVector[I] << '\n';
}
+#endif
}
void FinalOverriders::AddOverriders(BaseSubobject Base,
@@ -232,13 +228,14 @@ void FinalOverriders::AddOverriders(BaseSubobject Base,
OverriderInfo& Overrider = OverridersMap[std::make_pair(Base, MD)];
assert(!Overrider.Method && "Overrider should not exist yet!");
+ Overrider.Offset = Base.getBaseOffset();
Overrider.Method = MD;
}
}
-static FinalOverriders::BaseOffset
-ComputeBaseOffset(ASTContext &Context, const CXXRecordDecl *DerivedRD,
- const CXXBasePath &Path) {
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *DerivedRD,
+ const CXXBasePath &Path) {
int64_t NonVirtualOffset = 0;
unsigned NonVirtualStart = 0;
@@ -264,37 +261,36 @@ ComputeBaseOffset(ASTContext &Context, const CXXRecordDecl *DerivedRD,
// Check the base class offset.
const ASTRecordLayout &Layout = Context.getASTRecordLayout(Element.Class);
-
+
const RecordType *BaseType = Element.Base->getType()->getAs<RecordType>();
const CXXRecordDecl *Base = cast<CXXRecordDecl>(BaseType->getDecl());
-
+
NonVirtualOffset += Layout.getBaseClassOffset(Base);
}
// FIXME: This should probably use CharUnits or something. Maybe we should
// even change the base offsets in ASTRecordLayout to be specified in
// CharUnits.
- return FinalOverriders::BaseOffset(DerivedRD, VirtualBase,
- NonVirtualOffset / 8);
+ return BaseOffset(DerivedRD, VirtualBase, NonVirtualOffset / 8);
}
-static FinalOverriders::BaseOffset
-ComputeBaseOffset(ASTContext &Context, const CXXRecordDecl *BaseRD,
- const CXXRecordDecl *DerivedRD) {
+static BaseOffset ComputeBaseOffset(ASTContext &Context,
+ const CXXRecordDecl *BaseRD,
+ const CXXRecordDecl *DerivedRD) {
CXXBasePaths Paths(/*FindAmbiguities=*/false,
/*RecordPaths=*/true, /*DetectVirtual=*/false);
if (!const_cast<CXXRecordDecl *>(DerivedRD)->
isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
assert(false && "Class must be derived from the passed in base class!");
- return FinalOverriders::BaseOffset();
+ return BaseOffset();
}
return ComputeBaseOffset(Context, DerivedRD, Paths.front());
}
-static FinalOverriders::BaseOffset
+static BaseOffset
ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
const CXXMethodDecl *DerivedMD,
const CXXMethodDecl *BaseMD) {
@@ -313,7 +309,7 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
if (CanDerivedReturnType == CanBaseReturnType) {
// No adjustment needed.
- return FinalOverriders::BaseOffset();
+ return BaseOffset();
}
if (isa<ReferenceType>(CanDerivedReturnType)) {
@@ -336,7 +332,7 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
if (CanDerivedReturnType.getUnqualifiedType() ==
CanBaseReturnType.getUnqualifiedType()) {
// No adjustment needed.
- return FinalOverriders::BaseOffset();
+ return BaseOffset();
}
const CXXRecordDecl *DerivedRD =
@@ -348,35 +344,6 @@ ComputeReturnAdjustmentBaseOffset(ASTContext &Context,
return ComputeBaseOffset(Context, BaseRD, DerivedRD);
}
-FinalOverriders::BaseOffset
-FinalOverriders::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
- BaseSubobject Derived) {
- const CXXRecordDecl *BaseRD = Base.getBase();
- const CXXRecordDecl *DerivedRD = Derived.getBase();
-
- CXXBasePaths Paths(/*FindAmbiguities=*/true,
- /*RecordPaths=*/true, /*DetectVirtual=*/true);
-
- if (!const_cast<CXXRecordDecl *>(DerivedRD)->
- isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
- assert(false && "Class must be derived from the passed in base class!");
- return FinalOverriders::BaseOffset();
- }
-
- assert(!Paths.getDetectedVirtual() && "FIXME: Handle virtual bases!");
-
- BaseOffset Offset;
-
- // FIXME: This is not going to be enough with virtual bases.
- // FIXME: We should not use / 8 here.
- int64_t DerivedToBaseOffset =
- (Base.getBaseOffset() - Derived.getBaseOffset()) / 8;
-
- Offset.NonVirtualOffset = -DerivedToBaseOffset;
-
- return Offset;
-}
-
void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
BaseSubobject NewBase,
const CXXMethodDecl *NewMD,
@@ -394,7 +361,7 @@ void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
/// struct C : B1, B2 { virtual void f(); };
///
/// When overriding A::f with C::f we need to do so in both A subobjects.
- const OffsetVectorTy &OffsetVector = Offsets[OverriddenRD];
+ const OffsetSetVectorTy &OffsetVector = Offsets[OverriddenRD];
// Go through all the subobjects.
for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I) {
@@ -419,20 +386,10 @@ void FinalOverriders::PropagateOverrider(const CXXMethodDecl *OldMD,
// Store the return adjustment base offset.
ReturnAdjustments[SubobjectAndMethod] = ReturnBaseOffset;
}
-
- // Check if we need a 'this' adjustment base offset as well.
- if (Offset != NewBase.getBaseOffset()) {
- BaseOffset ThisBaseOffset =
- ComputeThisAdjustmentBaseOffset(OverriddenSubobject,
- NewBase);
- assert(!ThisBaseOffset.isEmpty() &&
- "Should not get an empty 'this' adjustment!");
-
- ThisAdjustments[SubobjectAndMethod] = ThisBaseOffset;
- }
}
// Set the new overrider.
+ Overrider.Offset = NewBase.getBaseOffset();
Overrider.Method = NewMD;
// And propagate it further.
@@ -448,45 +405,17 @@ FinalOverriders::MergeSubobjectOffsets(const SubobjectOffsetsMapTy &NewOffsets,
for (SubobjectOffsetsMapTy::const_iterator I = NewOffsets.begin(),
E = NewOffsets.end(); I != E; ++I) {
const CXXRecordDecl *NewRD = I->first;
- const OffsetVectorTy& NewOffsetVector = I->second;
-
- OffsetVectorTy &OffsetVector = Offsets[NewRD];
- if (OffsetVector.empty()) {
- // There were no previous offsets in this vector, just insert all entries
- // from the new offset vector.
- OffsetVector.append(NewOffsetVector.begin(), NewOffsetVector.end());
- continue;
- }
-
- // We need to merge the new offsets vector into the old, but we don't want
- // to have duplicate entries. Do this by inserting the old offsets in a set
- // so they'll be unique. After this, we iterate over the new offset vector
- // and only append elements that aren't in the set.
+ const OffsetSetVectorTy& NewOffsetVector = I->second;
- // First, add the existing offsets to the set.
- llvm::SmallSet<uint64_t, 4> OffsetSet;
- for (unsigned I = 0, E = OffsetVector.size(); I != E; ++I) {
- bool Inserted = OffsetSet.insert(OffsetVector[I]);
- if (!Inserted)
- assert(false && "Set of offsets should be unique!");
- }
+ OffsetSetVectorTy &OffsetVector = Offsets[NewRD];
- // Next, only add the new offsets if they are not already in the set.
- for (unsigned I = 0, E = NewOffsetVector.size(); I != E; ++I) {
- uint64_t Offset = NewOffsetVector[I];
-
- if (OffsetSet.count(Offset)) {
- // Ignore the offset.
- continue;
- }
-
- // Otherwise, add it to the offsets vector.
- OffsetVector.push_back(Offset);
- }
+ // Merge the new offsets set vector into the old.
+ OffsetVector.insert(NewOffsetVector.begin(), NewOffsetVector.end());
}
}
void FinalOverriders::ComputeFinalOverriders(BaseSubobject Base,
+ bool BaseSubobjectIsVisitedVBase,
SubobjectOffsetsMapTy &Offsets) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
@@ -502,25 +431,51 @@ void FinalOverriders::ComputeFinalOverriders(BaseSubobject Base,
if (!BaseDecl->isPolymorphic())
continue;
+ bool IsVisitedVirtualBase = BaseSubobjectIsVisitedVBase;
uint64_t BaseOffset;
if (I->isVirtual()) {
+ if (!VisitedVirtualBases.insert(BaseDecl))
+ IsVisitedVirtualBase = true;
BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
} else {
BaseOffset = Layout.getBaseClassOffset(BaseDecl) + Base.getBaseOffset();
}
// Compute the final overriders for this base.
- ComputeFinalOverriders(BaseSubobject(BaseDecl, BaseOffset), NewOffsets);
+ // We always want to compute the final overriders, even if the base is a
+ // visited virtual base. Consider:
+ //
+ // struct A {
+ // virtual void f();
+ // virtual void g();
+ // };
+ //
+ // struct B : virtual A {
+ // void f();
+ // };
+ //
+ // struct C : virtual A {
+ // void g ();
+ // };
+ //
+ // struct D : B, C { };
+ //
+ // Here, we still want to compute the overriders for A as a base of C,
+ // because otherwise we'll miss that C::g overrides A::f.
+ ComputeFinalOverriders(BaseSubobject(BaseDecl, BaseOffset),
+ IsVisitedVirtualBase, NewOffsets);
}
/// Now add the overriders for this particular subobject.
- AddOverriders(Base, NewOffsets);
+ /// (We don't want to do this more than once for a virtual base).
+ if (!BaseSubobjectIsVisitedVBase)
+ AddOverriders(Base, NewOffsets);
// And merge the newly discovered subobject offsets.
MergeSubobjectOffsets(NewOffsets, Offsets);
/// Finally, add the offset for our own subobject.
- Offsets[RD].push_back(Base.getBaseOffset());
+ Offsets[RD].insert(Base.getBaseOffset());
}
void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
@@ -565,9 +520,10 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
OverriderInfo Overrider = getOverrider(Base, MD);
- Out << " " << MD->getQualifiedNameAsString() << " - ";
+ Out << " " << MD->getQualifiedNameAsString() << " - (";
Out << Overrider.Method->getQualifiedNameAsString();
-
+ Out << ", " << Overrider.Offset << ')';
+
AdjustmentOffsetsMapTy::const_iterator AI =
ReturnAdjustments.find(std::make_pair(Base, MD));
if (AI != ReturnAdjustments.end()) {
@@ -580,17 +536,6 @@ void FinalOverriders::dump(llvm::raw_ostream &Out, BaseSubobject Base) {
Out << Offset.NonVirtualOffset << " nv]";
}
- AI = ThisAdjustments.find(std::make_pair(Base, MD));
- if (AI != ThisAdjustments.end()) {
- const BaseOffset &Offset = AI->second;
-
- Out << " [this-adj: ";
- if (Offset.VirtualBase)
- Out << Offset.VirtualBase->getQualifiedNameAsString() << " vbase, ";
-
- Out << Offset.NonVirtualOffset << " nv]";
- }
-
Out << "\n";
}
}
@@ -609,9 +554,18 @@ public:
CK_CompleteDtorPointer,
/// CK_DeletingDtorPointer - A pointer to the deleting destructor.
- CK_DeletingDtorPointer
+ CK_DeletingDtorPointer,
+
+ /// CK_UnusedFunctionPointer - In some cases, a vtable function pointer
+ /// will end up never being called. Such vtable function pointers are
+ /// represented as a CK_UnusedFunctionPointer.
+ CK_UnusedFunctionPointer
};
+ static VtableComponent MakeVCallOffset(int64_t Offset) {
+ return VtableComponent(CK_VCallOffset, Offset);
+ }
+
static VtableComponent MakeVBaseOffset(int64_t Offset) {
return VtableComponent(CK_VBaseOffset, Offset);
}
@@ -642,11 +596,24 @@ public:
reinterpret_cast<uintptr_t>(DD));
}
+ static VtableComponent MakeUnusedFunction(const CXXMethodDecl *MD) {
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Don't use MakeUnusedFunction with destructors!");
+ return VtableComponent(CK_UnusedFunctionPointer,
+ reinterpret_cast<uintptr_t>(MD));
+ }
+
/// getKind - Get the kind of this vtable component.
Kind getKind() const {
return (Kind)(Value & 0x7);
}
+ int64_t getVCallOffset() const {
+ assert(getKind() == CK_VCallOffset && "Invalid component kind!");
+
+ return getOffset();
+ }
+
int64_t getVBaseOffset() const {
assert(getKind() == CK_VBaseOffset && "Invalid component kind!");
@@ -678,6 +645,12 @@ public:
return reinterpret_cast<CXXDestructorDecl *>(getPointer());
}
+ const CXXMethodDecl *getUnusedFunctionDecl() const {
+ assert(getKind() == CK_UnusedFunctionPointer);
+
+ return reinterpret_cast<CXXMethodDecl *>(getPointer());
+ }
+
private:
VtableComponent(Kind ComponentKind, int64_t Offset) {
assert((ComponentKind == CK_VCallOffset ||
@@ -692,7 +665,8 @@ private:
assert((ComponentKind == CK_RTTI ||
ComponentKind == CK_FunctionPointer ||
ComponentKind == CK_CompleteDtorPointer ||
- ComponentKind == CK_DeletingDtorPointer) &&
+ ComponentKind == CK_DeletingDtorPointer ||
+ ComponentKind == CK_UnusedFunctionPointer) &&
"Invalid component kind!");
assert((Ptr & 7) == 0 && "Pointer not sufficiently aligned!");
@@ -711,7 +685,8 @@ private:
assert((getKind() == CK_RTTI ||
getKind() == CK_FunctionPointer ||
getKind() == CK_CompleteDtorPointer ||
- getKind() == CK_DeletingDtorPointer) &&
+ getKind() == CK_DeletingDtorPointer ||
+ getKind() == CK_UnusedFunctionPointer) &&
"Invalid component kind!");
return static_cast<uintptr_t>(Value & ~7ULL);
@@ -725,12 +700,334 @@ private:
int64_t Value;
};
+/// VCallOffsetMap - Keeps track of vcall offsets when building a vtable.
+struct VCallOffsetMap {
+
+ typedef std::pair<const CXXMethodDecl *, int64_t> MethodAndOffsetPairTy;
+
+ /// Offsets - Keeps track of methods and their offsets.
+ // FIXME: This should be a real map and not a vector.
+ llvm::SmallVector<MethodAndOffsetPairTy, 16> Offsets;
+
+ /// MethodsCanShareVCallOffset - Returns whether two virtual member functions
+ /// can share the same vcall offset.
+ static bool MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS);
+
+public:
+ /// AddVCallOffset - Adds a vcall offset to the map. Returns true if the
+ /// add was successful, or false if there was already a member function with
+ /// the same signature in the map.
+ bool AddVCallOffset(const CXXMethodDecl *MD, int64_t OffsetOffset);
+
+ /// getVCallOffsetOffset - Returns the vcall offset offset (relative to the
+ /// vtable address point) for the given virtual member function.
+ int64_t getVCallOffsetOffset(const CXXMethodDecl *MD);
+
+ // empty - Return whether the offset map is empty or not.
+ bool empty() const { return Offsets.empty(); }
+};
+
+static bool HasSameVirtualSignature(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ ASTContext &C = LHS->getASTContext(); // TODO: thread this down
+ CanQual<FunctionProtoType>
+ LT = C.getCanonicalType(LHS->getType()).getAs<FunctionProtoType>(),
+ RT = C.getCanonicalType(RHS->getType()).getAs<FunctionProtoType>();
+
+ // Fast-path matches in the canonical types.
+ if (LT == RT) return true;
+
+ // Force the signatures to match. We can't rely on the overrides
+ // list here because there isn't necessarily an inheritance
+ // relationship between the two methods.
+ if (LT.getQualifiers() != RT.getQualifiers() ||
+ LT->getNumArgs() != RT->getNumArgs())
+ return false;
+ for (unsigned I = 0, E = LT->getNumArgs(); I != E; ++I)
+ if (LT->getArgType(I) != RT->getArgType(I))
+ return false;
+ return true;
+}
+
+bool VCallOffsetMap::MethodsCanShareVCallOffset(const CXXMethodDecl *LHS,
+ const CXXMethodDecl *RHS) {
+ assert(LHS->isVirtual() && "LHS must be virtual!");
+ assert(RHS->isVirtual() && "LHS must be virtual!");
+
+ // A destructor can share a vcall offset with another destructor.
+ if (isa<CXXDestructorDecl>(LHS))
+ return isa<CXXDestructorDecl>(RHS);
+
+ // FIXME: We need to check more things here.
+
+ // The methods must have the same name.
+ DeclarationName LHSName = LHS->getDeclName();
+ DeclarationName RHSName = RHS->getDeclName();
+ if (LHSName != RHSName)
+ return false;
+
+ // And the same signatures.
+ return HasSameVirtualSignature(LHS, RHS);
+}
+
+bool VCallOffsetMap::AddVCallOffset(const CXXMethodDecl *MD,
+ int64_t OffsetOffset) {
+ // Check if we can reuse an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return false;
+ }
+
+ // Add the offset.
+ Offsets.push_back(MethodAndOffsetPairTy(MD, OffsetOffset));
+ return true;
+}
+
+int64_t VCallOffsetMap::getVCallOffsetOffset(const CXXMethodDecl *MD) {
+ // Look for an offset.
+ for (unsigned I = 0, E = Offsets.size(); I != E; ++I) {
+ if (MethodsCanShareVCallOffset(Offsets[I].first, MD))
+ return Offsets[I].second;
+ }
+
+ assert(false && "Should always find a vcall offset offset!");
+ return 0;
+}
+
+/// VCallAndVBaseOffsetBuilder - Class for building vcall and vbase offsets.
+class VCallAndVBaseOffsetBuilder {
+ /// MostDerivedClass - The most derived class for which we're building vcall
+ /// and vbase offsets.
+ const CXXRecordDecl *MostDerivedClass;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
+ /// Context - The ASTContext which we will use for layout information.
+ ASTContext &Context;
+
+ /// Components - vcall and vbase offset components
+ typedef llvm::SmallVector<VtableComponent, 64> VtableComponentVectorTy;
+ VtableComponentVectorTy Components;
+
+ /// VisitedVirtualBases - Visited virtual bases.
+ llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
+
+ /// VCallOffsets - Keeps track of vcall offsets.
+ VCallOffsetMap VCallOffsets;
+
+ /// FinalOverriders - The final overriders of the most derived class.
+ /// (Can be null when we're not building a vtable of the most derived class).
+ const FinalOverriders *Overriders;
+
+ /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
+ /// given base subobject.
+ void AddVCallAndVBaseOffsets(BaseSubobject Base, bool BaseIsVirtual,
+ uint64_t RealBaseOffset);
+
+ /// AddVCallOffsets - Add vcall offsets for the given base subobject.
+ void AddVCallOffsets(BaseSubobject Base, uint64_t VBaseOffset);
+
+ /// AddVBaseOffsets - Add vbase offsets for the given class.
+ void AddVBaseOffsets(const CXXRecordDecl *Base, uint64_t OffsetInLayoutClass);
+
+public:
+ VCallAndVBaseOffsetBuilder(const CXXRecordDecl *MostDerivedClass,
+ const CXXRecordDecl *LayoutClass,
+ const FinalOverriders *Overriders,
+ BaseSubobject Base, bool BaseIsVirtual,
+ uint64_t OffsetInLayoutClass)
+ : MostDerivedClass(MostDerivedClass), LayoutClass(LayoutClass),
+ Context(MostDerivedClass->getASTContext()), Overriders(Overriders) {
+
+ // Add vcall and vbase offsets.
+ AddVCallAndVBaseOffsets(Base, BaseIsVirtual, OffsetInLayoutClass);
+ }
+
+ /// Methods for iterating over the components.
+ typedef VtableComponentVectorTy::const_reverse_iterator const_iterator;
+ const_iterator components_begin() const { return Components.rbegin(); }
+ const_iterator components_end() const { return Components.rend(); }
+
+ const VCallOffsetMap& getVCallOffsets() const { return VCallOffsets; }
+};
+
+void
+VCallAndVBaseOffsetBuilder::AddVCallAndVBaseOffsets(BaseSubobject Base,
+ bool BaseIsVirtual,
+ uint64_t RealBaseOffset) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base.getBase());
+
+ // Itanium C++ ABI 2.5.2:
+ // ..in classes sharing a virtual table with a primary base class, the vcall
+ // and vbase offsets added by the derived class all come before the vcall
+ // and vbase offsets required by the base class, so that the latter may be
+ // laid out as required by the base class without regard to additions from
+ // the derived class(es).
+
+ // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
+ // emit them for the primary base first).
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+ bool PrimaryBaseIsVirtual = Layout.getPrimaryBaseWasVirtual();
+
+ uint64_t PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (PrimaryBaseIsVirtual) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallAndVBaseOffsets(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ PrimaryBaseIsVirtual, RealBaseOffset);
+ }
+
+ AddVBaseOffsets(Base.getBase(), RealBaseOffset);
+
+ // We only want to add vcall offsets for virtual bases.
+ if (BaseIsVirtual)
+ AddVCallOffsets(Base, RealBaseOffset);
+}
+
+void VCallAndVBaseOffsetBuilder::AddVCallOffsets(BaseSubobject Base,
+ uint64_t VBaseOffset) {
+ const CXXRecordDecl *RD = Base.getBase();
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ // Handle the primary base first.
+ if (PrimaryBase) {
+ uint64_t PrimaryBaseOffset;
+
+ // Get the base offset of the primary base.
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ PrimaryBaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should have a zero offset!");
+
+ PrimaryBaseOffset = Base.getBaseOffset();
+ }
+
+ AddVCallOffsets(BaseSubobject(PrimaryBase, PrimaryBaseOffset),
+ VBaseOffset);
+ }
+
+ // Add the vcall offsets.
+ for (CXXRecordDecl::method_iterator I = RD->method_begin(),
+ E = RD->method_end(); I != E; ++I) {
+ const CXXMethodDecl *MD = *I;
+
+ if (!MD->isVirtual())
+ continue;
+
+ // OffsetIndex is the index of this vcall offset, relative to the vtable
+ // address point. (We subtract 3 to account for the information just
+ // above the address point, the RTTI info, the offset to top, and the
+ // vcall offset itself).
+ int64_t OffsetIndex = -(int64_t)(3 + Components.size());
+
+ // FIXME: We shouldn't use / 8 here.
+ int64_t OffsetOffset = OffsetIndex *
+ (int64_t)Context.Target.getPointerWidth(0) / 8;
+
+ // Don't add a vcall offset if we already have one for this member function
+ // signature.
+ if (!VCallOffsets.AddVCallOffset(MD, OffsetOffset))
+ continue;
+
+ int64_t Offset = 0;
+
+ if (Overriders) {
+ // Get the final overrider.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders->getOverrider(Base, MD);
+
+ /// The vcall offset is the offset from the virtual base to the object
+ /// where the function was overridden.
+ // FIXME: We should not use / 8 here.
+ Offset = (int64_t)(Overrider.Offset - VBaseOffset) / 8;
+ }
+
+ Components.push_back(VtableComponent::MakeVCallOffset(Offset));
+ }
+
+ // And iterate over all non-virtual bases (ignoring the primary base).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+ if (BaseDecl == PrimaryBase)
+ continue;
+
+ // Get the base offset of this base.
+ uint64_t BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ AddVCallOffsets(BaseSubobject(BaseDecl, BaseOffset), VBaseOffset);
+ }
+}
+
+void VCallAndVBaseOffsetBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
+ uint64_t OffsetInLayoutClass) {
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Add vbase offsets.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this is a virtual base that we haven't visited before.
+ if (I->isVirtual() && VisitedVirtualBases.insert(BaseDecl)) {
+ // FIXME: We shouldn't use / 8 here.
+ int64_t Offset =
+ (int64_t)(LayoutClassLayout.getVBaseClassOffset(BaseDecl) -
+ OffsetInLayoutClass) / 8;
+
+ Components.push_back(VtableComponent::MakeVBaseOffset(Offset));
+ }
+
+ // Check the base class looking for more vbase offsets.
+ AddVBaseOffsets(BaseDecl, OffsetInLayoutClass);
+ }
+}
+
/// VtableBuilder - Class for building vtable layout information.
class VtableBuilder {
public:
- /// PrimaryBasesSetTy - A set of direct and indirect primary bases.
- typedef llvm::SmallPtrSet<const CXXRecordDecl *, 8> PrimaryBasesSetTy;
-
+ /// PrimaryBasesSetVectorTy - A set vector of direct and indirect
+ /// primary bases.
+ typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
+ PrimaryBasesSetVectorTy;
+
private:
/// VtableInfo - Global vtable information.
CGVtableInfo &VtableInfo;
@@ -739,15 +1036,28 @@ private:
/// vtable.
const CXXRecordDecl *MostDerivedClass;
+ /// MostDerivedClassOffset - If we're building a construction vtable, this
+ /// holds the offset from the layout class to the most derived class.
+ const uint64_t MostDerivedClassOffset;
+
+ /// MostDerivedClassIsVirtual - Whether the most derived class is a virtual
+ /// base. (This only makes sense when building a construction vtable).
+ bool MostDerivedClassIsVirtual;
+
+ /// LayoutClass - The class we're using for layout information. Will be
+ /// different than the most derived class if we're building a construction
+ /// vtable.
+ const CXXRecordDecl *LayoutClass;
+
/// Context - The ASTContext which we will use for layout information.
ASTContext &Context;
/// FinalOverriders - The final overriders of the most derived class.
- FinalOverriders Overriders;
+ const FinalOverriders Overriders;
- /// VCallAndVBaseOffsets - The vcall and vbase offset, of the vtable we're
- // building (in reverse order).
- llvm::SmallVector<VtableComponent, 64> VCallAndVBaseOffsets;
+ /// VCallOffsetsForVBases - Keeps track of vcall offsets for the virtual
+ /// bases in this vtable.
+ llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
/// Components - The components of the vtable being built.
llvm::SmallVector<VtableComponent, 64> Components;
@@ -761,7 +1071,7 @@ private:
/// nearest virtual base.
int64_t NonVirtual;
- /// VBaseOffsetOffset - The offset, in bytes, relative to the address point
+ /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
/// of the virtual base class offset.
int64_t VBaseOffsetOffset;
@@ -774,72 +1084,159 @@ private:
llvm::SmallVector<std::pair<uint64_t, ReturnAdjustment>, 16>
ReturnAdjustments;
+ /// MethodInfo - Contains information about a method in a vtable.
+ /// (Used for computing 'this' pointer adjustment thunks.
+ struct MethodInfo {
+ /// BaseOffset - The base offset of this method.
+ const uint64_t BaseOffset;
+
+ /// VtableIndex - The index in the vtable that this method has.
+ /// (For destructors, this is the index of the complete destructor).
+ const uint64_t VtableIndex;
+
+ MethodInfo(uint64_t BaseOffset, uint64_t VtableIndex)
+ : BaseOffset(BaseOffset), VtableIndex(VtableIndex) { }
+
+ MethodInfo() : BaseOffset(0), VtableIndex(0) { }
+ };
+
+ typedef llvm::DenseMap<const CXXMethodDecl *, MethodInfo> MethodInfoMapTy;
+
+ /// MethodInfoMap - The information for all methods in the vtable we're
+ /// currently building.
+ MethodInfoMapTy MethodInfoMap;
+
/// ThisAdjustment - A 'this' pointer adjustment thunk.
struct ThisAdjustment {
/// NonVirtual - The non-virtual adjustment from the derived object to its
/// nearest virtual base.
int64_t NonVirtual;
- /// FIXME: Add VCallOffsetOffset here.
+ /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
- ThisAdjustment() : NonVirtual(0) { }
+ ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
- bool isEmpty() const { return !NonVirtual; }
+ bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
};
/// ThisAdjustments - The 'this' pointer adjustments needed in this vtable.
llvm::SmallVector<std::pair<uint64_t, ThisAdjustment>, 16>
ThisAdjustments;
+
+ /// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
+ /// part of the vtable we're currently building.
+ void ComputeThisAdjustments();
typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
- /// AddVCallAndVBaseOffsets - Add vcall offsets and vbase offsets for the
- /// given class.
- void AddVCallAndVBaseOffsets(const CXXRecordDecl *RD, int64_t OffsetToTop,
- VisitedVirtualBasesSetTy &VBases);
-
- /// AddVBaseOffsets - Add vbase offsets for the given class.
- void AddVBaseOffsets(const CXXRecordDecl *RD, int64_t OffsetToTop,
- VisitedVirtualBasesSetTy &VBases);
+ /// PrimaryVirtualBases - All known virtual bases who are a primary base of
+ /// some other base.
+ VisitedVirtualBasesSetTy PrimaryVirtualBases;
/// ComputeReturnAdjustment - Compute the return adjustment given a return
/// adjustment base offset.
- ReturnAdjustment ComputeReturnAdjustment(FinalOverriders::BaseOffset Offset);
+ ReturnAdjustment ComputeReturnAdjustment(BaseOffset Offset);
- /// ComputeThisAdjustment - Compute the 'this' pointer adjustment given a
- /// 'this' pointer adjustment base offset.
- ThisAdjustment ComputeThisAdjustment(FinalOverriders::BaseOffset Offset);
+ /// ComputeThisAdjustmentBaseOffset - Compute the base offset for adjusting
+ /// the 'this' pointer from the base subobject to the derived subobject.
+ BaseOffset ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const;
+
+ /// ComputeThisAdjustment - Compute the 'this' pointer adjustment for the
+ /// given virtual member function and the 'this' pointer adjustment base
+ /// offset.
+ ThisAdjustment ComputeThisAdjustment(const CXXMethodDecl *MD,
+ BaseOffset Offset);
/// AddMethod - Add a single virtual member function to the vtable
/// components vector.
- void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment,
- ThisAdjustment ThisAdjustment);
+ void AddMethod(const CXXMethodDecl *MD, ReturnAdjustment ReturnAdjustment);
+ /// IsOverriderUsed - Returns whether the overrider will ever be used in this
+ /// part of the vtable.
+ ///
+ /// Itanium C++ ABI 2.5.2:
+ ///
+ /// struct A { virtual void f(); };
+ /// struct B : virtual public A { int i; };
+ /// struct C : virtual public A { int j; };
+ /// struct D : public B, public C {};
+ ///
+ /// When B and C are declared, A is a primary base in each case, so although
+ /// vcall offsets are allocated in the A-in-B and A-in-C vtables, no this
+ /// adjustment is required and no thunk is generated. However, inside D
+ /// objects, A is no longer a primary base of C, so if we allowed calls to
+ /// C::f() to use the copy of A's vtable in the C subobject, we would need
+ /// to adjust this from C* to B::A*, which would require a third-party
+ /// thunk. Since we require that a call to C::f() first convert to A*,
+ /// C-in-D's copy of A's vtable is never referenced, so this is not
+ /// necessary.
+ bool IsOverriderUsed(BaseSubobject Base,
+ BaseSubobject FirstBaseInPrimaryBaseChain,
+ uint64_t OffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) const;
+
/// AddMethods - Add the methods of this base subobject and all its
/// primary bases to the vtable components vector.
- void AddMethods(BaseSubobject Base, PrimaryBasesSetTy &PrimaryBases);
-
- /// LayoutVtable - Layout a vtable and all its secondary vtables.
- void LayoutVtable(BaseSubobject Base);
+ void AddMethods(BaseSubobject Base, BaseSubobject FirstBaseInPrimaryBaseChain,
+ uint64_t OffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases);
+
+ // LayoutVtable - Layout the vtable for the given base class, including its
+ // secondary vtables and any vtables for virtual bases.
+ void LayoutVtable();
+
+ /// LayoutPrimaryAndSecondaryVtables - Layout the primary vtable for the
+ /// given base subobject, as well as all its secondary vtables.
+ void LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
+ bool BaseIsVirtual,
+ uint64_t OffsetInLayoutClass);
+ /// LayoutSecondaryVtables - Layout the secondary vtables for the given base
+ /// subobject.
+ void LayoutSecondaryVtables(BaseSubobject Base, uint64_t OffsetInLayoutClass);
+
+ /// DeterminePrimaryVirtualBases - Determine the primary virtual bases in this
+ /// class hierarchy.
+ void DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVtablesForVirtualBases - Layout vtables for all virtual bases of the
+ /// given base (excluding any primary bases).
+ void LayoutVtablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// isBuildingConstructionVtable - Return whether this vtable builder is
+ /// building a construction vtable.
+ bool isBuildingConstructorVtable() const {
+ return MostDerivedClass != LayoutClass;
+ }
+
public:
- VtableBuilder(CGVtableInfo &VtableInfo, const CXXRecordDecl *MostDerivedClass)
- : VtableInfo(VtableInfo), MostDerivedClass(MostDerivedClass),
- Context(MostDerivedClass->getASTContext()), Overriders(MostDerivedClass) {
+ VtableBuilder(CGVtableInfo &VtableInfo, const CXXRecordDecl *MostDerivedClass,
+ uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual,
+ const CXXRecordDecl *LayoutClass)
+ : VtableInfo(VtableInfo), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassOffset(MostDerivedClassOffset),
+ MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
+ LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
+ Overriders(MostDerivedClass) {
- LayoutVtable(BaseSubobject(MostDerivedClass, 0));
+ LayoutVtable();
}
/// dumpLayout - Dump the vtable layout.
void dumpLayout(llvm::raw_ostream&);
};
-/// OverridesMethodInPrimaryBase - Checks whether whether this virtual member
-/// function overrides a member function in a direct or indirect primary base.
+/// OverridesMethodInBases - Checks whether whether this virtual member
+/// function overrides a member function in any of the given bases.
/// Returns the overridden member function, or null if none was found.
static const CXXMethodDecl *
-OverridesMethodInPrimaryBase(const CXXMethodDecl *MD,
- VtableBuilder::PrimaryBasesSetTy &PrimaryBases) {
+OverridesMethodInBases(const CXXMethodDecl *MD,
+ VtableBuilder::PrimaryBasesSetVectorTy &Bases) {
for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
E = MD->end_overridden_methods(); I != E; ++I) {
const CXXMethodDecl *OverriddenMD = *I;
@@ -847,15 +1244,77 @@ OverridesMethodInPrimaryBase(const CXXMethodDecl *MD,
assert(OverriddenMD->isCanonicalDecl() &&
"Should have the canonical decl of the overridden RD!");
- if (PrimaryBases.count(OverriddenRD))
+ if (Bases.count(OverriddenRD))
return OverriddenMD;
}
return 0;
}
+void VtableBuilder::ComputeThisAdjustments() {
+ std::map<uint64_t, ThisAdjustment> SortedThisAdjustments;
+
+ // Now go through the method info map and see if any of the methods need
+ // 'this' pointer adjustments.
+ for (MethodInfoMapTy::const_iterator I = MethodInfoMap.begin(),
+ E = MethodInfoMap.end(); I != E; ++I) {
+ const CXXMethodDecl *MD = I->first;
+ const MethodInfo &MethodInfo = I->second;
+
+ BaseSubobject OverriddenBaseSubobject(MD->getParent(),
+ MethodInfo.BaseOffset);
+
+ // Get the final overrider for this method.
+ FinalOverriders::OverriderInfo Overrider =
+ Overriders.getOverrider(OverriddenBaseSubobject, MD);
+
+ // Check if we need an adjustment.
+ if (Overrider.Offset == (int64_t)MethodInfo.BaseOffset)
+ continue;
+
+ uint64_t VtableIndex = MethodInfo.VtableIndex;
+
+ // Ignore adjustments for pure virtual member functions.
+ if (Overrider.Method->isPure())
+ continue;
+
+ // Ignore adjustments for unused function pointers.
+ if (Components[VtableIndex].getKind() ==
+ VtableComponent::CK_UnusedFunctionPointer)
+ continue;
+
+ BaseSubobject OverriderBaseSubobject(Overrider.Method->getParent(),
+ Overrider.Offset);
+
+ // Compute the adjustment offset.
+ BaseOffset ThisAdjustmentOffset =
+ ComputeThisAdjustmentBaseOffset(OverriddenBaseSubobject,
+ OverriderBaseSubobject);
+
+ // Then compute the adjustment itself.
+ ThisAdjustment ThisAdjustment = ComputeThisAdjustment(Overrider.Method,
+ ThisAdjustmentOffset);
+
+ // Add it.
+ SortedThisAdjustments.insert(std::make_pair(VtableIndex, ThisAdjustment));
+
+ if (isa<CXXDestructorDecl>(MD)) {
+ // Add an adjustment for the deleting destructor as well.
+ SortedThisAdjustments.insert(std::make_pair(VtableIndex + 1,
+ ThisAdjustment));
+ }
+ }
+
+ /// Clear the method info map.
+ MethodInfoMap.clear();
+
+ // Add the sorted elements.
+ ThisAdjustments.append(SortedThisAdjustments.begin(),
+ SortedThisAdjustments.end());
+}
+
VtableBuilder::ReturnAdjustment
-VtableBuilder::ComputeReturnAdjustment(FinalOverriders::BaseOffset Offset) {
+VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
ReturnAdjustment Adjustment;
if (!Offset.isEmpty()) {
@@ -876,79 +1335,96 @@ VtableBuilder::ComputeReturnAdjustment(FinalOverriders::BaseOffset Offset) {
return Adjustment;
}
-VtableBuilder::ThisAdjustment
-VtableBuilder::ComputeThisAdjustment(FinalOverriders::BaseOffset Offset) {
- ThisAdjustment Adjustment;
+BaseOffset
+VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
+ BaseSubobject Derived) const {
+ const CXXRecordDecl *BaseRD = Base.getBase();
+ const CXXRecordDecl *DerivedRD = Derived.getBase();
- if (!Offset.isEmpty()) {
- assert(!Offset.VirtualBase && "FIXME: Handle virtual bases!");
- Adjustment.NonVirtual = Offset.NonVirtualOffset;
+ CXXBasePaths Paths(/*FindAmbiguities=*/true,
+ /*RecordPaths=*/true, /*DetectVirtual=*/true);
+
+ if (!const_cast<CXXRecordDecl *>(DerivedRD)->
+ isDerivedFrom(const_cast<CXXRecordDecl *>(BaseRD), Paths)) {
+ assert(false && "Class must be derived from the passed in base class!");
+ return BaseOffset();
}
-
- return Adjustment;
-}
-void
-VtableBuilder::AddVCallAndVBaseOffsets(const CXXRecordDecl *RD,
- int64_t OffsetToTop,
- VisitedVirtualBasesSetTy &VBases) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ // We have to go through all the paths, and see which one leads us to the
+ // right base subobject.
+ for (CXXBasePaths::const_paths_iterator I = Paths.begin(), E = Paths.end();
+ I != E; ++I) {
+ BaseOffset Offset = ComputeBaseOffset(Context, DerivedRD, *I);
+
+ // FIXME: Should not use * 8 here.
+ uint64_t OffsetToBaseSubobject = Offset.NonVirtualOffset * 8;
+
+ if (Offset.VirtualBase) {
+ // If we have a virtual base class, the non-virtual offset is relative
+ // to the virtual base class offset.
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ /// Get the virtual base offset, relative to the most derived class
+ /// layout.
+ OffsetToBaseSubobject +=
+ MostDerivedClassLayout.getVBaseClassOffset(Offset.VirtualBase);
+ } else {
+ // Otherwise, the non-virtual offset is relative to the derived class
+ // offset.
+ OffsetToBaseSubobject += Derived.getBaseOffset();
+ }
+
+ // Check if this path gives us the right base subobject.
+ if (OffsetToBaseSubobject == Base.getBaseOffset()) {
+ // Since we're going from the base class _to_ the derived class, we'll
+ // invert the non-virtual offset here.
+ Offset.NonVirtualOffset = -Offset.NonVirtualOffset;
+ return Offset;
+ }
+ }
- // Itanium C++ ABI 2.5.2:
- // ..in classes sharing a virtual table with a primary base class, the vcall
- // and vbase offsets added by the derived class all come before the vcall
- // and vbase offsets required by the base class, so that the latter may be
- // laid out as required by the base class without regard to additions from
- // the derived class(es).
-
- // (Since we're emitting the vcall and vbase offsets in reverse order, we'll
- // emit them for the primary base first).
- if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase())
- AddVCallAndVBaseOffsets(PrimaryBase, OffsetToTop, VBases);
-
- AddVBaseOffsets(RD, OffsetToTop, VBases);
+ return BaseOffset();
}
+
-void VtableBuilder::AddVBaseOffsets(const CXXRecordDecl *RD,
- int64_t OffsetToTop,
- VisitedVirtualBasesSetTy &VBases) {
- const ASTRecordLayout &MostDerivedClassLayout =
- Context.getASTRecordLayout(MostDerivedClass);
-
- // Add vbase offsets.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- // Check if this is a virtual base that we haven't visited before.
- if (I->isVirtual() && VBases.insert(BaseDecl)) {
- // FIXME: We shouldn't use / 8 here.
- uint64_t Offset =
- OffsetToTop + MostDerivedClassLayout.getVBaseClassOffset(BaseDecl) / 8;
-
- VCallAndVBaseOffsets.push_back(VtableComponent::MakeVBaseOffset(Offset));
+VtableBuilder::ThisAdjustment
+VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
+ BaseOffset Offset) {
+ ThisAdjustment Adjustment;
+
+ if (!Offset.isEmpty()) {
+ if (Offset.VirtualBase) {
+ // Get the vcall offset map for this virtual base.
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Offset.VirtualBase];
+
+ if (VCallOffsets.empty()) {
+ // We don't have vcall offsets for this virtual base, go ahead and
+ // build them.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, MostDerivedClass,
+ /*FinalOverriders=*/0,
+ BaseSubobject(Offset.VirtualBase, 0),
+ /*BaseIsVirtual=*/true,
+ /*OffsetInLayoutClass=*/0);
+
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
+ Adjustment.VCallOffsetOffset = VCallOffsets.getVCallOffsetOffset(MD);
}
-
- // Check the base class looking for more vbase offsets.
- AddVBaseOffsets(BaseDecl, OffsetToTop, VBases);
+
+ Adjustment.NonVirtual = Offset.NonVirtualOffset;
}
+
+ return Adjustment;
}
void
VtableBuilder::AddMethod(const CXXMethodDecl *MD,
- ReturnAdjustment ReturnAdjustment,
- ThisAdjustment ThisAdjustment) {
+ ReturnAdjustment ReturnAdjustment) {
if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
assert(ReturnAdjustment.isEmpty() &&
"Destructor can't have return adjustment!");
- // Add the 'this' pointer adjustments if necessary.
- if (!ThisAdjustment.isEmpty()) {
- ThisAdjustments.push_back(std::make_pair(Components.size(),
- ThisAdjustment));
- ThisAdjustments.push_back(std::make_pair(Components.size() + 1,
- ThisAdjustment));
- }
// Add both the complete destructor and the deleting destructor.
Components.push_back(VtableComponent::MakeCompleteDtor(DD));
@@ -959,30 +1435,157 @@ VtableBuilder::AddMethod(const CXXMethodDecl *MD,
ReturnAdjustments.push_back(std::make_pair(Components.size(),
ReturnAdjustment));
- // Add the 'this' pointer adjustment if necessary.
- if (!ThisAdjustment.isEmpty())
- ThisAdjustments.push_back(std::make_pair(Components.size(),
- ThisAdjustment));
-
// Add the function.
Components.push_back(VtableComponent::MakeFunction(MD));
}
}
+/// OverridesIndirectMethodInBase - Return whether the given member function
+/// overrides any methods in the set of given bases.
+/// Unlike OverridesMethodInBase, this checks "overriders of overriders".
+/// For example, if we have:
+///
+/// struct A { virtual void f(); }
+/// struct B : A { virtual void f(); }
+/// struct C : B { virtual void f(); }
+///
+/// OverridesIndirectMethodInBase will return true if given C::f as the method
+/// and { A } as the set of bases.
+static bool
+OverridesIndirectMethodInBases(const CXXMethodDecl *MD,
+ VtableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+ const CXXRecordDecl *OverriddenRD = OverriddenMD->getParent();
+ assert(OverriddenMD->isCanonicalDecl() &&
+ "Should have the canonical decl of the overridden RD!");
+
+ if (Bases.count(OverriddenRD))
+ return true;
+
+ // Check "indirect overriders".
+ if (OverridesIndirectMethodInBases(OverriddenMD, Bases))
+ return true;
+ }
+
+ return false;
+}
+
+bool
+VtableBuilder::IsOverriderUsed(BaseSubobject Base,
+ BaseSubobject FirstBaseInPrimaryBaseChain,
+ uint64_t OffsetInLayoutClass,
+ FinalOverriders::OverriderInfo Overrider) const {
+ // If the base and the first base in the primary base chain have the same
+ // offsets, then this overrider will be used.
+ if (Base.getBaseOffset() == OffsetInLayoutClass)
+ return true;
+
+ // We know now that Base (or a direct or indirect base of it) is a primary
+ // base in part of the class hierarchy, but not a primary base in the most
+ // derived class.
+
+ // If the overrider is the first base in the primary base chain, we know
+ // that the overrider will be used.
+ if (Overrider.Method->getParent() == FirstBaseInPrimaryBaseChain.getBase())
+ return true;
+
+ VtableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
+
+ const CXXRecordDecl *RD = FirstBaseInPrimaryBaseChain.getBase();
+ PrimaryBases.insert(RD);
+
+ // Now traverse the base chain, starting with the first base, until we find
+ // the base that is no longer a primary base.
+ while (true) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
+
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ // Now check if this is the primary base that is not a primary base in the
+ // most derived class.
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We found it, stop walking the chain.
+ break;
+ }
+ } else {
+ assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary base should always be at offset 0!");
+ }
+
+ if (!PrimaryBases.insert(PrimaryBase))
+ assert(false && "Found a duplicate primary base!");
+
+ RD = PrimaryBase;
+ }
+
+ // If the final overrider is an override of one of the primary bases,
+ // then we know that it will be used.
+ return OverridesIndirectMethodInBases(Overrider.Method, PrimaryBases);
+}
+
+/// FindNearestOverriddenMethod - Given a method, returns the overridden method
+/// from the nearest base. Returns null if no method was found.
+static const CXXMethodDecl *
+FindNearestOverriddenMethod(const CXXMethodDecl *MD,
+ VtableBuilder::PrimaryBasesSetVectorTy &Bases) {
+ for (int I = Bases.size(), E = 0; I != E; --I) {
+ const CXXRecordDecl *PrimaryBase = Bases[I - 1];
+
+ // Now check the overriden methods.
+ for (CXXMethodDecl::method_iterator I = MD->begin_overridden_methods(),
+ E = MD->end_overridden_methods(); I != E; ++I) {
+ const CXXMethodDecl *OverriddenMD = *I;
+
+ // We found our overridden method.
+ if (OverriddenMD->getParent() == PrimaryBase)
+ return OverriddenMD;
+ }
+ }
+
+ return 0;
+}
+
void
-VtableBuilder::AddMethods(BaseSubobject Base, PrimaryBasesSetTy &PrimaryBases) {
+VtableBuilder::AddMethods(BaseSubobject Base,
+ BaseSubobject FirstBaseInPrimaryBaseChain,
+ uint64_t OffsetInLayoutClass,
+ PrimaryBasesSetVectorTy &PrimaryBases) {
const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
- if (Layout.getPrimaryBaseWasVirtual())
- assert(false && "FIXME: Handle vbases here.");
- else
+ uint64_t BaseOffset;
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
+ "Primary vbase should have a zero offset!");
+
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(PrimaryBase);
+ } else {
assert(Layout.getBaseClassOffset(PrimaryBase) == 0 &&
"Primary base should have a zero offset!");
-
- AddMethods(BaseSubobject(PrimaryBase, Base.getBaseOffset()), PrimaryBases);
+
+ BaseOffset = Base.getBaseOffset();
+ }
+
+ // FIXME: OffsetInLayoutClass is not right here.
+ AddMethods(BaseSubobject(PrimaryBase, BaseOffset),
+ FirstBaseInPrimaryBaseChain, OffsetInLayoutClass, PrimaryBases);
if (!PrimaryBases.insert(PrimaryBase))
assert(false && "Found a duplicate primary base!");
@@ -1004,47 +1607,90 @@ VtableBuilder::AddMethods(BaseSubobject Base, PrimaryBasesSetTy &PrimaryBases) {
// base. If this is the case, and the return type doesn't require adjustment
// then we can just use the member function from the primary base.
if (const CXXMethodDecl *OverriddenMD =
- OverridesMethodInPrimaryBase(MD, PrimaryBases)) {
+ FindNearestOverriddenMethod(MD, PrimaryBases)) {
if (ComputeReturnAdjustmentBaseOffset(Context, MD,
- OverriddenMD).isEmpty())
+ OverriddenMD).isEmpty()) {
+ // Replace the method info of the overridden method with our own
+ // method.
+ assert(MethodInfoMap.count(OverriddenMD) &&
+ "Did not find the overridden method!");
+ MethodInfo &OverriddenMethodInfo = MethodInfoMap[OverriddenMD];
+
+ MethodInfo MethodInfo(Base.getBaseOffset(),
+ OverriddenMethodInfo.VtableIndex);
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+ MethodInfoMap.erase(OverriddenMD);
continue;
+ }
}
+ // Insert the method info for this method.
+ MethodInfo MethodInfo(Base.getBaseOffset(), Components.size());
+
+ assert(!MethodInfoMap.count(MD) &&
+ "Should not have method info for this method yet!");
+ MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
+
+ // Check if this overrider is going to be used.
+ if (!IsOverriderUsed(Base, FirstBaseInPrimaryBaseChain, OffsetInLayoutClass,
+ Overrider)) {
+ const CXXMethodDecl *OverriderMD = Overrider.Method;
+ Components.push_back(VtableComponent::MakeUnusedFunction(OverriderMD));
+ continue;
+ }
+
// Check if this overrider needs a return adjustment.
- FinalOverriders::BaseOffset ReturnAdjustmentOffset =
+ BaseOffset ReturnAdjustmentOffset =
Overriders.getReturnAdjustmentOffset(Base, MD);
ReturnAdjustment ReturnAdjustment =
ComputeReturnAdjustment(ReturnAdjustmentOffset);
- // Check if this overrider needs a 'this' pointer adjustment.
- FinalOverriders::BaseOffset ThisAdjustmentOffset =
- Overriders.getThisAdjustmentOffset(Base, MD);
-
- ThisAdjustment ThisAdjustment = ComputeThisAdjustment(ThisAdjustmentOffset);
-
- AddMethod(Overrider.Method, ReturnAdjustment, ThisAdjustment);
+ AddMethod(Overrider.Method, ReturnAdjustment);
}
}
-void VtableBuilder::LayoutVtable(BaseSubobject Base) {
- const CXXRecordDecl *RD = Base.getBase();
- assert(RD->isDynamicClass() && "class does not have a vtable!");
-
- int64_t OffsetToTop = -(int64_t)Base.getBaseOffset() / 8;
-
- // Add vcall and vbase offsets for this vtable.
+void VtableBuilder::LayoutVtable() {
+ LayoutPrimaryAndSecondaryVtables(BaseSubobject(MostDerivedClass, 0),
+ MostDerivedClassIsVirtual,
+ MostDerivedClassOffset);
+
VisitedVirtualBasesSetTy VBases;
- AddVCallAndVBaseOffsets(RD, OffsetToTop, VBases);
+
+ // Determine the primary virtual bases.
+ DeterminePrimaryVirtualBases(MostDerivedClass, VBases);
+ VBases.clear();
+
+ LayoutVtablesForVirtualBases(MostDerivedClass, VBases);
+}
+
+void
+VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
+ bool BaseIsVirtual,
+ uint64_t OffsetInLayoutClass) {
+ assert(Base.getBase()->isDynamicClass() && "class does not have a vtable!");
- // Reverse them and add them to the vtable components.
- std::reverse(VCallAndVBaseOffsets.begin(), VCallAndVBaseOffsets.end());
- Components.append(VCallAndVBaseOffsets.begin(), VCallAndVBaseOffsets.end());
- VCallAndVBaseOffsets.clear();
+ // Add vcall and vbase offsets for this vtable.
+ VCallAndVBaseOffsetBuilder Builder(MostDerivedClass, LayoutClass, &Overriders,
+ Base, BaseIsVirtual, OffsetInLayoutClass);
+ Components.append(Builder.components_begin(), Builder.components_end());
+ // Check if we need to add these vcall offsets.
+ if (BaseIsVirtual && !Builder.getVCallOffsets().empty()) {
+ VCallOffsetMap &VCallOffsets = VCallOffsetsForVBases[Base.getBase()];
+
+ if (VCallOffsets.empty())
+ VCallOffsets = Builder.getVCallOffsets();
+ }
+
// Add the offset to top.
- // FIXME: This is not going to be right for construction vtables.
// FIXME: We should not use / 8 here.
+ int64_t OffsetToTop = -(int64_t)(OffsetInLayoutClass -
+ MostDerivedClassOffset) / 8;
Components.push_back(VtableComponent::MakeOffsetToTop(OffsetToTop));
// Next, add the RTTI.
@@ -1053,59 +1699,166 @@ void VtableBuilder::LayoutVtable(BaseSubobject Base) {
uint64_t AddressPoint = Components.size();
// Now go through all virtual member functions and add them.
- PrimaryBasesSetTy PrimaryBases;
- AddMethods(Base, PrimaryBases);
+ PrimaryBasesSetVectorTy PrimaryBases;
+ AddMethods(Base, Base, OffsetInLayoutClass, PrimaryBases);
+
+ // Compute 'this' pointer adjustments.
+ ComputeThisAdjustments();
// Record the address point.
- AddressPoints.insert(std::make_pair(Base, AddressPoint));
+ AddressPoints.insert(std::make_pair(BaseSubobject(Base.getBase(),
+ OffsetInLayoutClass),
+ AddressPoint));
// Record the address points for all primary bases.
- for (PrimaryBasesSetTy::const_iterator I = PrimaryBases.begin(),
+ for (PrimaryBasesSetVectorTy::const_iterator I = PrimaryBases.begin(),
E = PrimaryBases.end(); I != E; ++I) {
const CXXRecordDecl *BaseDecl = *I;
// We know that all the primary bases have the same offset as the base
// subobject.
- BaseSubobject PrimaryBase(BaseDecl, Base.getBaseOffset());
+ BaseSubobject PrimaryBase(BaseDecl, OffsetInLayoutClass);
AddressPoints.insert(std::make_pair(PrimaryBase, AddressPoint));
}
+ // Layout secondary vtables.
+ LayoutSecondaryVtables(Base, OffsetInLayoutClass);
+}
+
+void VtableBuilder::LayoutSecondaryVtables(BaseSubobject Base,
+ uint64_t OffsetInLayoutClass) {
+ // Itanium C++ ABI 2.5.2:
+ // Following the primary virtual table of a derived class are secondary
+ // virtual tables for each of its proper base classes, except any primary
+ // base(s) with which it shares its primary virtual table.
+
+ const CXXRecordDecl *RD = Base.getBase();
const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- // Layout secondary vtables.
+
for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
E = RD->bases_end(); I != E; ++I) {
+ // Ignore virtual bases, we'll emit them later.
+ if (I->isVirtual())
+ continue;
+
const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// Ignore bases that don't have a vtable.
if (!BaseDecl->isDynamicClass())
continue;
+
+ // Get the base offset of this base.
+ uint64_t RelativeBaseOffset = Layout.getBaseClassOffset(BaseDecl);
+ uint64_t BaseOffset = Base.getBaseOffset() + RelativeBaseOffset;
- // Ignore the primary base.
- if (BaseDecl == PrimaryBase)
+ uint64_t BaseOffsetInLayoutClass = OffsetInLayoutClass + RelativeBaseOffset;
+
+ // Don't emit a secondary vtable for a primary base. We might however want
+ // to emit secondary vtables for other bases of this base.
+ if (BaseDecl == PrimaryBase) {
+ LayoutSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
+ BaseOffsetInLayoutClass);
continue;
+ }
- // Ignore virtual bases, we'll emit them later.
- if (I->isVirtual())
+ // Layout the primary vtable (and any secondary vtables) for this base.
+ LayoutPrimaryAndSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsVirtual=*/false,
+ BaseOffsetInLayoutClass);
+ }
+}
+
+void
+VtableBuilder::DeterminePrimaryVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Check if this base has a primary base.
+ if (const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase()) {
+
+ // Check if it's virtual.
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ bool IsPrimaryVirtualBase = true;
+
+ if (isBuildingConstructorVtable()) {
+ // Check if the base is actually a primary base in the class we use for
+ // layout.
+ // FIXME: Is this check enough?
+ if (MostDerivedClassOffset != 0)
+ IsPrimaryVirtualBase = false;
+ }
+
+ if (IsPrimaryVirtualBase)
+ PrimaryVirtualBases.insert(PrimaryBase);
+ }
+ }
+
+ // Traverse bases, looking for more primary virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ if (I->isVirtual() && !VBases.insert(BaseDecl))
continue;
-
- // Get the base offset of this base.
- uint64_t BaseOffset = Base.getBaseOffset() +
- Layout.getBaseClassOffset(BaseDecl);
- // Layout this secondary vtable.
- LayoutVtable(BaseSubobject(BaseDecl, BaseOffset));
+ DeterminePrimaryVirtualBases(BaseDecl, VBases);
+ }
+}
+
+void
+VtableBuilder::LayoutVtablesForVirtualBases(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ // Itanium C++ ABI 2.5.2:
+ // Then come the virtual base virtual tables, also in inheritance graph
+ // order, and again excluding primary bases (which share virtual tables with
+ // the classes for which they are primary).
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Check if this base needs a vtable. (If it's virtual, not a primary base
+ // of some other class, and we haven't visited it before).
+ if (I->isVirtual() && BaseDecl->isDynamicClass() &&
+ !PrimaryVirtualBases.count(BaseDecl) && VBases.insert(BaseDecl)) {
+ const ASTRecordLayout &MostDerivedClassLayout =
+ Context.getASTRecordLayout(MostDerivedClass);
+ uint64_t BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+ uint64_t BaseOffsetInLayoutClass =
+ LayoutClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutPrimaryAndSecondaryVtables(BaseSubobject(BaseDecl, BaseOffset),
+ /*BaseIsVirtual=*/true,
+ BaseOffsetInLayoutClass);
+ }
+
+ // We only need to check the base for virtual base vtables if it actually
+ // has virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVtablesForVirtualBases(BaseDecl, VBases);
}
-
- // FIXME: Emit vtables for virtual bases here.
}
/// dumpLayout - Dump the vtable layout.
void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
-
- Out << "Vtable for '" << MostDerivedClass->getQualifiedNameAsString();
+
+ if (MostDerivedClass == LayoutClass) {
+ Out << "Vtable for '";
+ Out << MostDerivedClass->getQualifiedNameAsString();
+ } else {
+ Out << "Construction vtable for ('";
+ Out << MostDerivedClass->getQualifiedNameAsString() << "', ";
+ // FIXME: Don't use / 8 .
+ Out << MostDerivedClassOffset / 8 << ") in '";
+ Out << LayoutClass->getQualifiedNameAsString();
+ }
Out << "' (" << Components.size() << " entries).\n";
// Iterate through the address points and insert them into a new map where
@@ -1125,37 +1878,6 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
unsigned NextThisAdjustmentIndex = 0;
for (unsigned I = 0, E = Components.size(); I != E; ++I) {
uint64_t Index = I;
-
- if (AddressPointsByIndex.count(I)) {
- if (AddressPointsByIndex.count(Index) == 1) {
- const BaseSubobject &Base = AddressPointsByIndex.find(Index)->second;
-
- // FIXME: Instead of dividing by 8, we should be using CharUnits.
- Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
- Out << ", " << Base.getBaseOffset() / 8 << ") vtable address --\n";
- } else {
- uint64_t BaseOffset =
- AddressPointsByIndex.lower_bound(Index)->second.getBaseOffset();
-
- // We store the class names in a set to get a stable order.
- std::set<std::string> ClassNames;
- for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
- AddressPointsByIndex.lower_bound(Index), E =
- AddressPointsByIndex.upper_bound(Index); I != E; ++I) {
- assert(I->second.getBaseOffset() == BaseOffset &&
- "Invalid base offset!");
- const CXXRecordDecl *RD = I->second.getBase();
- ClassNames.insert(RD->getQualifiedNameAsString());
- }
-
- for (std::set<std::string>::const_iterator I = ClassNames.begin(),
- E = ClassNames.end(); I != E; ++I) {
- // FIXME: Instead of dividing by 8, we should be using CharUnits.
- Out << " -- (" << *I;
- Out << ", " << BaseOffset / 8 << ") vtable address --\n";
- }
- }
- }
Out << llvm::format("%4d | ", I);
@@ -1163,9 +1885,9 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
// Dump the component.
switch (Component.getKind()) {
- // FIXME: Remove this default case.
- default:
- assert(false && "Unhandled component kind!");
+
+ case VtableComponent::CK_VCallOffset:
+ Out << "vcall_offset (" << Component.getVCallOffset() << ")";
break;
case VtableComponent::CK_VBaseOffset:
@@ -1201,6 +1923,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
if (Adjustment.VBaseOffsetOffset)
Out << ", " << Adjustment.VBaseOffsetOffset << " vbase offset offset";
+
Out << ']';
NextReturnAdjustmentIndex++;
@@ -1214,7 +1937,10 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
Out << "\n [this adjustment: ";
Out << Adjustment.NonVirtual << " non-virtual";
-
+
+ if (Adjustment.VCallOffsetOffset)
+ Out << ", " << Adjustment.VCallOffsetOffset << " vcall offset offset";
+
Out << ']';
NextThisAdjustmentIndex++;
@@ -1223,31 +1949,94 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
break;
}
- case VtableComponent::CK_CompleteDtorPointer: {
+ case VtableComponent::CK_CompleteDtorPointer:
+ case VtableComponent::CK_DeletingDtorPointer: {
+ bool IsComplete =
+ Component.getKind() == VtableComponent::CK_CompleteDtorPointer;
+
const CXXDestructorDecl *DD = Component.getDestructorDecl();
- Out << DD->getQualifiedNameAsString() << "() [complete]";
+ Out << DD->getQualifiedNameAsString();
+ if (IsComplete)
+ Out << "() [complete]";
+ else
+ Out << "() [deleting]";
+
if (DD->isPure())
Out << " [pure]";
+ // If this destructor has a 'this' pointer adjustment, dump it.
+ if (NextThisAdjustmentIndex < ThisAdjustments.size() &&
+ ThisAdjustments[NextThisAdjustmentIndex].first == I) {
+ const ThisAdjustment Adjustment =
+ ThisAdjustments[NextThisAdjustmentIndex].second;
+
+ Out << "\n [this adjustment: ";
+ Out << Adjustment.NonVirtual << " non-virtual";
+
+ if (Adjustment.VCallOffsetOffset)
+ Out << ", " << Adjustment.VCallOffsetOffset << " vcall offset offset";
+
+ Out << ']';
+
+ NextThisAdjustmentIndex++;
+ }
+
+
break;
}
- case VtableComponent::CK_DeletingDtorPointer: {
- const CXXDestructorDecl *DD = Component.getDestructorDecl();
-
- Out << DD->getQualifiedNameAsString() << "() [deleting]";
- if (DD->isPure())
- Out << " [pure]";
+ case VtableComponent::CK_UnusedFunctionPointer: {
+ const CXXMethodDecl *MD = Component.getUnusedFunctionDecl();
- break;
+ std::string Str =
+ PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
+ MD);
+ Out << "[unused] " << Str;
+ if (MD->isPure())
+ Out << " [pure]";
}
}
Out << '\n';
+
+ // Dump the next address point.
+ uint64_t NextIndex = Index + 1;
+ if (AddressPointsByIndex.count(NextIndex)) {
+ if (AddressPointsByIndex.count(NextIndex) == 1) {
+ const BaseSubobject &Base =
+ AddressPointsByIndex.find(NextIndex)->second;
+
+ // FIXME: Instead of dividing by 8, we should be using CharUnits.
+ Out << " -- (" << Base.getBase()->getQualifiedNameAsString();
+ Out << ", " << Base.getBaseOffset() / 8 << ") vtable address --\n";
+ } else {
+ uint64_t BaseOffset =
+ AddressPointsByIndex.lower_bound(NextIndex)->second.getBaseOffset();
+
+ // We store the class names in a set to get a stable order.
+ std::set<std::string> ClassNames;
+ for (std::multimap<uint64_t, BaseSubobject>::const_iterator I =
+ AddressPointsByIndex.lower_bound(NextIndex), E =
+ AddressPointsByIndex.upper_bound(NextIndex); I != E; ++I) {
+ assert(I->second.getBaseOffset() == BaseOffset &&
+ "Invalid base offset!");
+ const CXXRecordDecl *RD = I->second.getBase();
+ ClassNames.insert(RD->getQualifiedNameAsString());
+ }
+
+ for (std::set<std::string>::const_iterator I = ClassNames.begin(),
+ E = ClassNames.end(); I != E; ++I) {
+ // FIXME: Instead of dividing by 8, we should be using CharUnits.
+ Out << " -- (" << *I;
+ Out << ", " << BaseOffset / 8 << ") vtable address --\n";
+ }
+ }
+ }
}
-
+
+ Out << '\n';
}
}
@@ -1500,10 +2289,10 @@ private:
// If already set, note the two sets as the same
if (0)
printf("%s::%s same as %s::%s\n",
- PrevU->getParent()->getNameAsCString(),
- PrevU->getNameAsCString(),
- U->getParent()->getNameAsCString(),
- U->getNameAsCString());
+ PrevU->getParent()->getNameAsString().c_str(),
+ PrevU->getNameAsString().c_str(),
+ U->getParent()->getNameAsString().c_str(),
+ U->getNameAsString().c_str());
ForwardUnique[PrevU] = U;
return;
}
@@ -1511,11 +2300,11 @@ private:
// Not set, set it now
if (0)
printf("marking %s::%s %p override as %s::%s\n",
- MD->getParent()->getNameAsCString(),
- MD->getNameAsCString(),
+ MD->getParent()->getNameAsString().c_str(),
+ MD->getNameAsString().c_str(),
(void*)MD,
- U->getParent()->getNameAsCString(),
- U->getNameAsCString());
+ U->getParent()->getNameAsString().c_str(),
+ U->getNameAsString().c_str());
UniqueOverrider[MD] = U;
for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(),
@@ -1537,8 +2326,8 @@ private:
BuildUniqueOverrider(MD, MD);
if (0)
printf("top set is %s::%s %p\n",
- MD->getParent()->getNameAsCString(),
- MD->getNameAsCString(),
+ MD->getParent()->getNameAsString().c_str(),
+ MD->getNameAsString().c_str(),
(void*)MD);
ForwardUnique[MD] = MD;
}
@@ -1573,7 +2362,7 @@ private:
A_t::iterator J = I;
while (++J != E && DclCmp(I, J) == 0)
if (DclIsSame(*I, *J)) {
- if (0) printf("connecting %s\n", (*I)->getNameAsCString());
+ if (0) printf("connecting %s\n", (*I)->getNameAsString().c_str());
ForwardUnique[*J] = *I;
}
}
@@ -1657,7 +2446,7 @@ public:
}
//#define D1(x)
-#define D1(X) do { if (getenv("DEBUG")) { X; } } while (0)
+#define D1(X) do { if (getenv("CLANG_VTABLE_DEBUG")) { X; } } while (0)
void GenerateVBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
bool updateVBIndex, Index_t current_vbindex) {
@@ -1801,7 +2590,7 @@ public:
return;
D1(printf(" vfn for %s at %d\n",
- dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsCString(),
+ dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsString().c_str(),
(int)Methods.size()));
// We didn't find an entry in the vtable that we could use, add a new
@@ -1824,7 +2613,7 @@ public:
idx = VCalls.size()+1;
VCalls.push_back(Offset/8 - CurrentVBaseOffset/8);
D1(printf(" vcall for %s at %d with delta %d\n",
- dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsCString(),
+ dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsString().c_str(),
(int)-VCalls.size()-3, (int)VCalls[idx-1]));
}
}
@@ -2352,7 +3141,7 @@ void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
// Collect all the primary bases, so we can check whether methods override
// a method from the base.
- VtableBuilder::PrimaryBasesSetTy PrimaryBases;
+ VtableBuilder::PrimaryBasesSetVectorTy PrimaryBases;
for (ASTRecordLayout::primary_base_info_iterator
I = Layout.primary_base_begin(), E = Layout.primary_base_end();
I != E; ++I)
@@ -2370,7 +3159,7 @@ void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
// Check if this method overrides a method in the primary base.
if (const CXXMethodDecl *OverriddenMD =
- OverridesMethodInPrimaryBase(MD, PrimaryBases)) {
+ OverridesMethodInBases(MD, PrimaryBases)) {
// Check if converting from the return type of the method to the
// return type of the overridden method requires conversion.
if (ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD,
@@ -2535,12 +3324,25 @@ CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
bool GenerateDefinition,
const CXXRecordDecl *LayoutClass,
const CXXRecordDecl *RD, uint64_t Offset,
+ bool IsVirtual,
AddressPointsMapTy& AddressPoints) {
- if (GenerateDefinition && CGM.getLangOptions().DumpVtableLayouts &&
- LayoutClass == RD) {
- VtableBuilder Builder(*this, RD);
-
- Builder.dumpLayout(llvm::errs());
+ if (GenerateDefinition) {
+ if (LayoutClass == RD) {
+ assert(!IsVirtual &&
+ "Can't only have a virtual base in construction vtables!");
+ VtableBuilder Builder(*this, RD, Offset,
+ /*MostDerivedClassIsVirtual=*/false,
+ LayoutClass);
+
+ if (CGM.getLangOptions().DumpVtableLayouts)
+ Builder.dumpLayout(llvm::errs());
+ } else if (CGM.getLangOptions().DumpVtableLayouts) {
+ // We only build construction vtables when dumping vtable layouts for now.
+ VtableBuilder Builder(*this, RD, Offset,
+ /*MostDerivedClassIsVirtual=*/IsVirtual,
+ LayoutClass);
+ Builder.dumpLayout(llvm::errs());
+ }
}
llvm::SmallString<256> OutName;
@@ -2601,6 +3403,7 @@ void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
AddressPointsMapTy AddressPoints;
Vtable = GenerateVtable(Linkage, /*GenerateDefinition=*/true, RD, RD, 0,
+ /*IsVirtual=*/false,
AddressPoints);
GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
}
@@ -2612,7 +3415,7 @@ llvm::GlobalVariable *CGVtableInfo::getVtable(const CXXRecordDecl *RD) {
AddressPointsMapTy AddressPoints;
Vtable = GenerateVtable(llvm::GlobalValue::ExternalLinkage,
/*GenerateDefinition=*/false, RD, RD, 0,
- AddressPoints);
+ /*IsVirtual=*/false, AddressPoints);
}
return Vtable;
diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h
index 471d6384d6b2..6ccb011985fd 100644
--- a/lib/CodeGen/CGVtable.h
+++ b/lib/CodeGen/CGVtable.h
@@ -185,7 +185,7 @@ private:
llvm::GlobalVariable *
GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
bool GenerateDefinition, const CXXRecordDecl *LayoutClass,
- const CXXRecordDecl *RD, uint64_t Offset,
+ const CXXRecordDecl *RD, uint64_t Offset, bool IsVirtual,
AddressPointsMapTy& AddressPoints);
llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
@@ -239,7 +239,8 @@ public:
};
CtorVtableInfo getCtorVtable(const CXXRecordDecl *RD,
- const BaseSubobject &Base);
+ const BaseSubobject &Base,
+ bool BaseIsVirtual);
llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index 5a4f94e3e092..f45582705618 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -30,7 +30,7 @@ CodeGenFunction::CodeGenFunction(CodeGenModule &cgm)
Builder(cgm.getModule().getContext()),
DebugInfo(0), IndirectBranch(0),
SwitchInsn(0), CaseRangeBlock(0), InvokeDest(0),
- CXXThisDecl(0), CXXVTTDecl(0),
+ CXXThisDecl(0), CXXThisValue(0), CXXVTTDecl(0), CXXVTTValue(0),
ConditionalBranchLevel(0), TerminateHandler(0), TrapBB(0),
UniqueAggrDestructorCount(0) {
LLVMIntTy = ConvertType(getContext().IntTy);
@@ -197,7 +197,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
Builder.SetInsertPoint(EntryBB);
- QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0);
+ QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
+ false, false, 0, 0,
+ /*FIXME?*/false,
+ /*FIXME?*/CC_Default);
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -216,15 +219,20 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
} else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
hasAggregateLLVMType(CurFnInfo->getReturnType())) {
// Indirect aggregate return; emit returned value directly into sret slot.
- // This reduces code size, and is also affects correctness in C++.
+ // This reduces code size, and affects correctness in C++.
ReturnValue = CurFn->arg_begin();
} else {
- ReturnValue = CreateTempAlloca(ConvertType(RetTy), "retval");
+ ReturnValue = CreateIRTemp(RetTy, "retval");
}
EmitStartEHSpec(CurCodeDecl);
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
+ if (CXXThisDecl)
+ CXXThisValue = Builder.CreateLoad(LocalDeclMap[CXXThisDecl], "this");
+ if (CXXVTTDecl)
+ CXXVTTValue = Builder.CreateLoad(LocalDeclMap[CXXVTTDecl], "vtt");
+
// If any of the arguments have a variably modified type, make sure to
// emit the type size.
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
@@ -236,6 +244,19 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
}
+void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args) {
+ const FunctionDecl *FD = cast<FunctionDecl>(CurGD.getDecl());
+
+ Stmt *Body = FD->getBody();
+ if (Body)
+ EmitStmt(Body);
+ else {
+ assert(FD->isImplicit() && "non-implicit function def has no body");
+ assert(FD->isCopyAssignment() && "implicit function not copy assignment");
+ SynthesizeCXXCopyAssignment(Args);
+ }
+}
+
void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
@@ -246,13 +267,13 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
FunctionArgList Args;
CurGD = GD;
- OuterTryBlock = 0;
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
if (MD->isInstance()) {
// Create the implicit 'this' decl.
// FIXME: I'm not entirely sure I like using a fake decl just for code
// generation. Maybe we can come up with a better way?
- CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
+ CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
+ FD->getLocation(),
&getContext().Idents.get("this"),
MD->getThisType(getContext()));
Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
@@ -262,7 +283,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
// FIXME: The comment about using a fake decl above applies here too.
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
CXXVTTDecl =
- ImplicitParamDecl::Create(getContext(), 0, SourceLocation(),
+ ImplicitParamDecl::Create(getContext(), 0, FD->getLocation(),
&getContext().Idents.get("vtt"), T);
Args.push_back(std::make_pair(CXXVTTDecl, CXXVTTDecl->getType()));
}
@@ -278,80 +299,22 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
FProto->getArgType(i)));
}
- if (const CompoundStmt *S = FD->getCompoundBody()) {
- StartFunction(GD, FD->getResultType(), Fn, Args, S->getLBracLoc());
-
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- EmitCtorPrologue(CD, GD.getCtorType());
- EmitStmt(S);
-
- // If any of the member initializers are temporaries bound to references
- // make sure to emit their destructors.
- EmitCleanupBlocks(0);
-
- } else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
- llvm::BasicBlock *DtorEpilogue = createBasicBlock("dtor.epilogue");
- PushCleanupBlock(DtorEpilogue);
+ SourceRange BodyRange;
+ if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
- InitializeVtablePtrs(DD->getParent());
+ // Emit the standard function prologue.
+ StartFunction(GD, FD->getResultType(), Fn, Args, BodyRange.getBegin());
- EmitStmt(S);
-
- CleanupBlockInfo Info = PopCleanupBlock();
+ // Generate the body of the function.
+ if (isa<CXXDestructorDecl>(FD))
+ EmitDestructorBody(Args);
+ else if (isa<CXXConstructorDecl>(FD))
+ EmitConstructorBody(Args);
+ else
+ EmitFunctionBody(Args);
- assert(Info.CleanupBlock == DtorEpilogue && "Block mismatch!");
- EmitBlock(DtorEpilogue);
- EmitDtorEpilogue(DD, GD.getDtorType());
-
- if (Info.SwitchBlock)
- EmitBlock(Info.SwitchBlock);
- if (Info.EndBlock)
- EmitBlock(Info.EndBlock);
- } else {
- // Just a regular function, emit its body.
- EmitStmt(S);
- }
-
- FinishFunction(S->getRBracLoc());
- } else if (FD->isImplicit()) {
- const CXXRecordDecl *ClassDecl =
- cast<CXXRecordDecl>(FD->getDeclContext());
- (void) ClassDecl;
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- // FIXME: For C++0x, we want to look for implicit *definitions* of
- // these special member functions, rather than implicit *declarations*.
- if (CD->isCopyConstructor()) {
- assert(!ClassDecl->hasUserDeclaredCopyConstructor() &&
- "Cannot synthesize a non-implicit copy constructor");
- SynthesizeCXXCopyConstructor(CD, GD.getCtorType(), Fn, Args);
- } else if (CD->isDefaultConstructor()) {
- assert(!ClassDecl->hasUserDeclaredConstructor() &&
- "Cannot synthesize a non-implicit default constructor.");
- SynthesizeDefaultConstructor(CD, GD.getCtorType(), Fn, Args);
- } else {
- assert(false && "Implicit constructor cannot be synthesized");
- }
- } else if (const CXXDestructorDecl *CD = dyn_cast<CXXDestructorDecl>(FD)) {
- assert(!ClassDecl->hasUserDeclaredDestructor() &&
- "Cannot synthesize a non-implicit destructor");
- SynthesizeDefaultDestructor(CD, GD.getDtorType(), Fn, Args);
- } else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- assert(MD->isCopyAssignment() &&
- !ClassDecl->hasUserDeclaredCopyAssignment() &&
- "Cannot synthesize a method that is not an implicit-defined "
- "copy constructor");
- SynthesizeCXXCopyAssignment(MD, Fn, Args);
- } else {
- assert(false && "Cannot synthesize unknown implicit function");
- }
- } else if (const Stmt *S = FD->getBody()) {
- if (const CXXTryStmt *TS = dyn_cast<CXXTryStmt>(S)) {
- OuterTryBlock = TS;
- StartFunction(GD, FD->getResultType(), Fn, Args, TS->getTryLoc());
- EmitStmt(TS);
- FinishFunction(TS->getEndLoc());
- }
- }
+ // Emit the standard function epilogue.
+ FinishFunction(BodyRange.getEnd());
// Destroy the 'this' declaration.
if (CXXThisDecl)
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index fb2e5fee7309..d582c0def346 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -55,6 +55,7 @@ namespace clang {
class ObjCImplementationDecl;
class ObjCPropertyImplDecl;
class TargetInfo;
+ class TargetCodeGenInfo;
class VarDecl;
class ObjCForCollectionStmt;
class ObjCAtTryStmt;
@@ -62,7 +63,6 @@ namespace clang {
class ObjCAtSynchronizedStmt;
namespace CodeGen {
- class CodeGenModule;
class CodeGenTypes;
class CGDebugInfo;
class CGFunctionInfo;
@@ -91,9 +91,6 @@ public:
/// CurGD - The GlobalDecl for the current function being compiled.
GlobalDecl CurGD;
- /// OuterTryBlock - This is the address of the outter most try block, 0
- /// otherwise.
- const Stmt *OuterTryBlock;
/// ReturnBlock - Unified return block.
llvm::BasicBlock *ReturnBlock;
@@ -382,11 +379,13 @@ private:
/// CXXThisDecl - When generating code for a C++ member function,
/// this will hold the implicit 'this' declaration.
ImplicitParamDecl *CXXThisDecl;
+ llvm::Value *CXXThisValue;
/// CXXVTTDecl - When generating code for a base object constructor or
/// base object destructor with virtual bases, this will hold the implicit
/// VTT parameter.
ImplicitParamDecl *CXXVTTDecl;
+ llvm::Value *CXXVTTValue;
/// CXXLiveTemporaryInfo - Holds information about a live C++ temporary.
struct CXXLiveTemporaryInfo {
@@ -466,7 +465,8 @@ public:
//===--------------------------------------------------------------------===//
llvm::Value *BuildBlockLiteralTmp(const BlockExpr *);
- llvm::Constant *BuildDescriptorBlockDecl(bool BlockHasCopyDispose,
+ llvm::Constant *BuildDescriptorBlockDecl(const BlockExpr *,
+ bool BlockHasCopyDispose,
CharUnits Size,
const llvm::StructType *,
std::vector<HelperInfo> *);
@@ -492,6 +492,10 @@ public:
const FunctionArgList &Args,
SourceLocation StartLoc);
+ void EmitConstructorBody(FunctionArgList &Args);
+ void EmitDestructorBody(FunctionArgList &Args);
+ void EmitFunctionBody(FunctionArgList &Args);
+
/// EmitReturnBlock - Emit the unified return block, trying to avoid its
/// emission when possible.
void EmitReturnBlock();
@@ -525,24 +529,8 @@ public:
llvm::Value *ThisPtr,
uint64_t Offset);
- void SynthesizeCXXCopyConstructor(const CXXConstructorDecl *Ctor,
- CXXCtorType Type,
- llvm::Function *Fn,
- const FunctionArgList &Args);
-
- void SynthesizeCXXCopyAssignment(const CXXMethodDecl *CD,
- llvm::Function *Fn,
- const FunctionArgList &Args);
-
- void SynthesizeDefaultConstructor(const CXXConstructorDecl *Ctor,
- CXXCtorType Type,
- llvm::Function *Fn,
- const FunctionArgList &Args);
-
- void SynthesizeDefaultDestructor(const CXXDestructorDecl *Dtor,
- CXXDtorType Type,
- llvm::Function *Fn,
- const FunctionArgList &Args);
+ void SynthesizeCXXCopyConstructor(const FunctionArgList &Args);
+ void SynthesizeCXXCopyAssignment(const FunctionArgList &Args);
/// EmitDtorEpilogue - Emit all code that comes at the end of class's
/// destructor. This is to call destructors on members and base classes in
@@ -663,6 +651,13 @@ public:
llvm::AllocaInst *CreateTempAlloca(const llvm::Type *Ty,
const llvm::Twine &Name = "tmp");
+ /// CreateIRTemp - Create a temporary IR object of the given type, with
+ /// appropriate alignment. This routine should only be used when an temporary
+ /// value needs to be stored into an alloca (for example, to avoid explicit
+ /// PHI construction), but the type is the IR type, not the type appropriate
+ /// for storing in memory.
+ llvm::Value *CreateIRTemp(QualType T, const llvm::Twine &Name = "tmp");
+
/// CreateMemTemp - Create a temporary memory object of the given type, with
/// appropriate alignment.
llvm::Value *CreateMemTemp(QualType T, const llvm::Twine &Name = "tmp");
@@ -738,11 +733,17 @@ public:
/// LoadCXXThis - Load the value of 'this'. This function is only valid while
/// generating code for an C++ member function.
- llvm::Value *LoadCXXThis();
+ llvm::Value *LoadCXXThis() {
+ assert(CXXThisValue && "no 'this' value for this function");
+ return CXXThisValue;
+ }
/// LoadCXXVTT - Load the VTT parameter to base constructors/destructors have
/// virtual bases.
- llvm::Value *LoadCXXVTT();
+ llvm::Value *LoadCXXVTT() {
+ assert(CXXVTTValue && "no VTT value for this function");
+ return CXXVTTValue;
+ }
/// GetAddressOfBaseOfCompleteClass - Convert the given pointer to a
/// complete class down to one of its virtual bases.
@@ -789,6 +790,9 @@ public:
const CXXRecordDecl *BaseClassDecl,
QualType Ty);
+ void EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
+ CXXCtorType CtorType,
+ const FunctionArgList &Args);
void EmitCXXConstructorCall(const CXXConstructorDecl *D, CXXCtorType Type,
llvm::Value *This,
CallExpr::const_arg_iterator ArgBeg,
@@ -916,6 +920,14 @@ public:
void EmitObjCAtThrowStmt(const ObjCAtThrowStmt &S);
void EmitObjCAtSynchronizedStmt(const ObjCAtSynchronizedStmt &S);
+ struct CXXTryStmtInfo {
+ llvm::BasicBlock *SavedLandingPad;
+ llvm::BasicBlock *HandlerBlock;
+ llvm::BasicBlock *FinallyBlock;
+ };
+ CXXTryStmtInfo EnterCXXTryStmt(const CXXTryStmt &S);
+ void ExitCXXTryStmt(const CXXTryStmt &S, CXXTryStmtInfo Info);
+
void EmitCXXTryStmt(const CXXTryStmt &S);
//===--------------------------------------------------------------------===//
@@ -1326,6 +1338,10 @@ private:
ArgType));
}
}
+
+ const TargetCodeGenInfo &getTargetHooks() const {
+ return CGM.getTargetCodeGenInfo();
+ }
};
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index 5a552c490ac6..91c7322c6767 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -316,24 +316,20 @@ GetLinkageForFunction(ASTContext &Context, const FunctionDecl *FD,
return CodeGenModule::GVA_CXXInline;
}
-/// SetFunctionDefinitionAttributes - Set attributes for a global.
-///
-/// FIXME: This is currently only done for aliases and functions, but not for
-/// variables (these details are set in EmitGlobalVarDefinition for variables).
-void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
- llvm::GlobalValue *GV) {
+llvm::GlobalValue::LinkageTypes
+CodeGenModule::getFunctionLinkage(const FunctionDecl *D) {
GVALinkage Linkage = GetLinkageForFunction(getContext(), D, Features);
if (Linkage == GVA_Internal) {
- GV->setLinkage(llvm::Function::InternalLinkage);
+ return llvm::Function::InternalLinkage;
} else if (D->hasAttr<DLLExportAttr>()) {
- GV->setLinkage(llvm::Function::DLLExportLinkage);
+ return llvm::Function::DLLExportLinkage;
} else if (D->hasAttr<WeakAttr>()) {
- GV->setLinkage(llvm::Function::WeakAnyLinkage);
+ return llvm::Function::WeakAnyLinkage;
} else if (Linkage == GVA_C99Inline) {
// In C99 mode, 'inline' functions are guaranteed to have a strong
// definition somewhere else, so we can use available_externally linkage.
- GV->setLinkage(llvm::Function::AvailableExternallyLinkage);
+ return llvm::Function::AvailableExternallyLinkage;
} else if (Linkage == GVA_CXXInline || Linkage == GVA_TemplateInstantiation) {
// In C++, the compiler has to emit a definition in every translation unit
// that references the function. We should use linkonce_odr because
@@ -341,13 +337,22 @@ void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
// don't need to codegen it. b) if the function persists, it needs to be
// merged with other definitions. c) C++ has the ODR, so we know the
// definition is dependable.
- GV->setLinkage(llvm::Function::LinkOnceODRLinkage);
+ return llvm::Function::LinkOnceODRLinkage;
} else {
assert(Linkage == GVA_StrongExternal);
// Otherwise, we have strong external linkage.
- GV->setLinkage(llvm::Function::ExternalLinkage);
+ return llvm::Function::ExternalLinkage;
}
+}
+
+/// SetFunctionDefinitionAttributes - Set attributes for a global.
+///
+/// FIXME: This is currently only done for aliases and functions, but not for
+/// variables (these details are set in EmitGlobalVarDefinition for variables).
+void CodeGenModule::SetFunctionDefinitionAttributes(const FunctionDecl *D,
+ llvm::GlobalValue *GV) {
+ GV->setLinkage(getFunctionLinkage(D));
SetCommonAttributes(D, GV);
}
@@ -747,14 +752,20 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(const char *MangledName,
// A called constructor which has no definition or declaration need be
// synthesized.
else if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD)) {
- if (CD->isImplicit())
+ if (CD->isImplicit()) {
+ assert (CD->isUsed());
DeferredDeclsToEmit.push_back(D);
+ }
} else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD)) {
- if (DD->isImplicit())
+ if (DD->isImplicit()) {
+ assert (DD->isUsed());
DeferredDeclsToEmit.push_back(D);
+ }
} else if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
- if (MD->isCopyAssignment() && MD->isImplicit())
+ if (MD->isCopyAssignment() && MD->isImplicit()) {
+ assert (MD->isUsed());
DeferredDeclsToEmit.push_back(D);
+ }
}
}
@@ -1190,28 +1201,8 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
void CodeGenModule::EmitGlobalFunctionDefinition(GlobalDecl GD) {
- const llvm::FunctionType *Ty;
const FunctionDecl *D = cast<FunctionDecl>(GD.getDecl());
-
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- bool isVariadic = D->getType()->getAs<FunctionProtoType>()->isVariadic();
-
- Ty = getTypes().GetFunctionType(getTypes().getFunctionInfo(MD), isVariadic);
- } else {
- Ty = cast<llvm::FunctionType>(getTypes().ConvertType(D->getType()));
-
- // As a special case, make sure that definitions of K&R function
- // "type foo()" aren't declared as varargs (which forces the backend
- // to do unnecessary work).
- if (D->getType()->isFunctionNoProtoType()) {
- assert(Ty->isVarArg() && "Didn't lower type as expected");
- // Due to stret, the lowered function could have arguments.
- // Just create the same type as was lowered by ConvertType
- // but strip off the varargs bit.
- std::vector<const llvm::Type*> Args(Ty->param_begin(), Ty->param_end());
- Ty = llvm::FunctionType::get(Ty->getReturnType(), Args, false);
- }
- }
+ const llvm::FunctionType *Ty = getTypes().GetFunctionType(GD);
// Get or create the prototype for the function.
llvm::Constant *Entry = GetAddrOfFunction(GD, Ty);
@@ -1342,6 +1333,7 @@ void CodeGenModule::EmitAliasDefinition(const ValueDecl *D) {
GA->setLinkage(llvm::Function::DLLExportLinkage);
}
} else if (D->hasAttr<WeakAttr>() ||
+ D->hasAttr<WeakRefAttr>() ||
D->hasAttr<WeakImportAttr>()) {
GA->setLinkage(llvm::Function::WeakAnyLinkage);
}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index 8280766c7035..ac8332647b77 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -206,6 +206,19 @@ public:
/// GlobalValue.
void setGlobalVisibility(llvm::GlobalValue *GV, const Decl *D) const;
+ llvm::Constant *GetAddrOfGlobal(GlobalDecl GD) {
+ if (isa<CXXConstructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXConstructor(cast<CXXConstructorDecl>(GD.getDecl()),
+ GD.getCtorType());
+ else if (isa<CXXDestructorDecl>(GD.getDecl()))
+ return GetAddrOfCXXDestructor(cast<CXXDestructorDecl>(GD.getDecl()),
+ GD.getDtorType());
+ else if (isa<FunctionDecl>(GD.getDecl()))
+ return GetAddrOfFunction(GD);
+ else
+ return GetAddrOfGlobalVar(cast<VarDecl>(GD.getDecl()));
+ }
+
/// GetAddrOfGlobalVar - Return the llvm::Constant for the address of the
/// given global variable. If Ty is non-null and if the global doesn't exist,
/// then it will be greated with the specified type instead of whatever the
@@ -291,13 +304,13 @@ public:
/// GetAddrOfCXXConstructor - Return the address of the constructor of the
/// given type.
- llvm::Function *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
- CXXCtorType Type);
+ llvm::GlobalValue *GetAddrOfCXXConstructor(const CXXConstructorDecl *D,
+ CXXCtorType Type);
/// GetAddrOfCXXDestructor - Return the address of the constructor of the
/// given type.
- llvm::Function *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
- CXXDtorType Type);
+ llvm::GlobalValue *GetAddrOfCXXDestructor(const CXXDestructorDecl *D,
+ CXXDtorType Type);
/// getBuiltinLibFunction - Given a builtin id for a function like
/// "__builtin_fabsf", return a Function* for "fabsf".
@@ -417,6 +430,9 @@ public:
GVA_TemplateInstantiation
};
+ llvm::GlobalVariable::LinkageTypes
+ getFunctionLinkage(const FunctionDecl *FD);
+
/// getVtableLinkage - Return the appropriate linkage for the vtable, VTT,
/// and type information of the given class.
static llvm::GlobalVariable::LinkageTypes
@@ -468,6 +484,9 @@ private:
// C++ related functions.
+ bool TryEmitDefinitionAsAlias(GlobalDecl Alias, GlobalDecl Target);
+ bool TryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D);
+
void EmitNamespace(const NamespaceDecl *D);
void EmitLinkageSpec(const LinkageSpecDecl *D);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 3c20934baf25..4feca4dd7d3d 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -190,6 +190,7 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
#define ABSTRACT_TYPE(Class, Base)
#define NON_CANONICAL_TYPE(Class, Base) case Type::Class:
#define DEPENDENT_TYPE(Class, Base) case Type::Class:
+#define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base) case Type::Class:
#include "clang/AST/TypeNodes.def"
assert(false && "Non-canonical or dependent types aren't possible.");
break;
@@ -313,10 +314,14 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
// The function type can be built; call the appropriate routines to
// build it.
if (const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(&Ty))
- return GetFunctionType(getFunctionInfo(FPT), FPT->isVariadic());
+ return GetFunctionType(getFunctionInfo(
+ CanQual<FunctionProtoType>::CreateUnsafe(QualType(FPT,0))),
+ FPT->isVariadic());
const FunctionNoProtoType *FNPT = cast<FunctionNoProtoType>(&Ty);
- return GetFunctionType(getFunctionInfo(FNPT), true);
+ return GetFunctionType(getFunctionInfo(
+ CanQual<FunctionNoProtoType>::CreateUnsafe(QualType(FNPT,0))),
+ true);
}
case Type::ObjCInterface: {
@@ -386,9 +391,6 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
NULL);
return PtrDiffTy;
}
-
- case Type::TemplateSpecialization:
- assert(false && "Dependent types can't get here");
}
// FIXME: implement.
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index 87ba0bcfba1d..b2912efb3402 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -35,6 +35,7 @@ namespace llvm {
namespace clang {
class ABIInfo;
class ASTContext;
+ template <typename> class CanQual;
class CXXConstructorDecl;
class CXXDestructorDecl;
class CXXMethodDecl;
@@ -48,6 +49,7 @@ namespace clang {
class TagDecl;
class TargetInfo;
class Type;
+ typedef CanQual<Type> CanQualType;
namespace CodeGen {
class CodeGenTypes;
@@ -168,6 +170,8 @@ public:
const llvm::FunctionType *GetFunctionType(const CGFunctionInfo &Info,
bool IsVariadic);
+ const llvm::FunctionType *GetFunctionType(GlobalDecl GD);
+
/// GetFunctionTypeForVtable - Get the LLVM function type for use in a vtable,
/// given a CXXMethodDecl. If the method to has an incomplete return type,
@@ -184,11 +188,6 @@ public:
/// replace the 'opaque' type we previously made for it if applicable.
void UpdateCompletedType(const TagDecl *TD);
-private:
- const CGFunctionInfo &getFunctionInfo(const FunctionNoProtoType *FTNP);
- const CGFunctionInfo &getFunctionInfo(const FunctionProtoType *FTP);
-
-public:
/// getFunctionInfo - Get the function info for the specified function decl.
const CGFunctionInfo &getFunctionInfo(GlobalDecl GD);
@@ -205,6 +204,8 @@ public:
return getFunctionInfo(Ty->getResultType(), Args,
Ty->getCallConv(), Ty->getNoReturnAttr());
}
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
+ const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
// getFunctionInfo - Get the function info for a member function.
const CGFunctionInfo &getFunctionInfo(const CXXRecordDecl *RD,
@@ -221,8 +222,12 @@ public:
const FunctionArgList &Args,
CallingConv CC,
bool NoReturn);
- const CGFunctionInfo &getFunctionInfo(QualType RetTy,
- const llvm::SmallVector<QualType, 16> &ArgTys,
+
+ /// Retrieves the ABI information for the given function signature.
+ ///
+ /// \param ArgTys - must all actually be canonical as params
+ const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
+ const llvm::SmallVectorImpl<CanQualType> &ArgTys,
CallingConv CC,
bool NoReturn);
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index a302225c7f77..64743c7696f7 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -455,7 +455,9 @@ void CXXNameMangler::mangleUnresolvedScope(NestedNameSpecifier *Qualifier) {
mangleType(QualType(Qualifier->getAsType(), 0));
break;
case NestedNameSpecifier::Identifier:
- mangleUnresolvedScope(Qualifier->getPrefix());
+ // Member expressions can have these without prefixes.
+ if (Qualifier->getPrefix())
+ mangleUnresolvedScope(Qualifier->getPrefix());
mangleSourceName(Qualifier->getAsIdentifier());
break;
}
@@ -1123,6 +1125,42 @@ void CXXNameMangler::mangleType(const TypenameType *T) {
Out << 'E';
}
+void CXXNameMangler::mangleType(const TypeOfType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const TypeOfExprType *T) {
+ // FIXME: this is pretty unsatisfactory, but there isn't an obvious
+ // "extension with parameters" mangling.
+ Out << "u6typeof";
+}
+
+void CXXNameMangler::mangleType(const DecltypeType *T) {
+ Expr *E = T->getUnderlyingExpr();
+
+ // type ::= Dt <expression> E # decltype of an id-expression
+ // # or class member access
+ // ::= DT <expression> E # decltype of an expression
+
+ // This purports to be an exhaustive list of id-expressions and
+ // class member accesses. Note that we do not ignore parentheses;
+ // parentheses change the semantics of decltype for these
+ // expressions (and cause the mangler to use the other form).
+ if (isa<DeclRefExpr>(E) ||
+ isa<MemberExpr>(E) ||
+ isa<UnresolvedLookupExpr>(E) ||
+ isa<DependentScopeDeclRefExpr>(E) ||
+ isa<CXXDependentScopeMemberExpr>(E) ||
+ isa<UnresolvedMemberExpr>(E))
+ Out << "Dt";
+ else
+ Out << "DT";
+ mangleExpression(E);
+ Out << 'E';
+}
+
void CXXNameMangler::mangleIntegerLiteral(QualType T,
const llvm::APSInt &Value) {
// <expr-primary> ::= L <type> <value number> E # integer literal
@@ -1163,20 +1201,14 @@ void CXXNameMangler::mangleCalledExpression(const Expr *E, unsigned Arity) {
/// Mangles a member expression. Implicit accesses are not handled,
/// but that should be okay, because you shouldn't be able to
/// make an implicit access in a function template declaration.
-///
-/// The standard ABI does not describe how member expressions should
-/// be mangled, so this is very unstandardized. We mangle as if it
-/// were a binary operator, except that the RHS is mangled as an
-/// abstract name.
-///
-/// The standard ABI also does not assign a mangling to the dot
-/// operator, so we arbitrarily select 'me'.
void CXXNameMangler::mangleMemberExpr(const Expr *Base,
bool IsArrow,
NestedNameSpecifier *Qualifier,
DeclarationName Member,
unsigned Arity) {
- Out << (IsArrow ? "pt" : "me");
+ // gcc-4.4 uses 'dt' for dot expressions, which is reasonable.
+ // OTOH, gcc also mangles the name as an expression.
+ Out << (IsArrow ? "pt" : "dt");
mangleExpression(Base);
mangleUnresolvedName(Qualifier, Member, Arity);
}
@@ -1346,10 +1378,16 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
break;
case Expr::DeclRefExprClass: {
- const Decl *D = cast<DeclRefExpr>(E)->getDecl();
+ const NamedDecl *D = cast<DeclRefExpr>(E)->getDecl();
switch (D->getKind()) {
- default: assert(false && "Unhandled decl kind!");
+ default:
+ // <expr-primary> ::= L <mangled-name> E # external name
+ Out << 'L';
+ mangle(D, "_Z");
+ Out << 'E';
+ break;
+
case Decl::NonTypeTemplateParm: {
const NonTypeTemplateParmDecl *PD = cast<NonTypeTemplateParmDecl>(D);
mangleTemplateParameter(PD->getIndex());
@@ -1363,7 +1401,18 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
case Expr::DependentScopeDeclRefExprClass: {
const DependentScopeDeclRefExpr *DRE = cast<DependentScopeDeclRefExpr>(E);
- const Type *QTy = DRE->getQualifier()->getAsType();
+ NestedNameSpecifier *NNS = DRE->getQualifier();
+ const Type *QTy = NNS->getAsType();
+
+ // When we're dealing with a nested-name-specifier that has just a
+ // dependent identifier in it, mangle that as a typename. FIXME:
+ // It isn't clear that we ever actually want to have such a
+ // nested-name-specifier; why not just represent it as a typename type?
+ if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) {
+ QTy = getASTContext().getTypenameType(NNS->getPrefix(),
+ NNS->getAsIdentifier())
+ .getTypePtr();
+ }
assert(QTy && "Qualifier was not type!");
// ::= sr <type> <unqualified-name> # dependent name
@@ -1648,6 +1697,9 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
if (const ClassTemplateSpecializationDecl *SD =
dyn_cast<ClassTemplateSpecializationDecl>(ND)) {
+ if (!isStdNamespace(SD->getDeclContext()))
+ return false;
+
// <substitution> ::= Ss # ::std::basic_string<char,
// ::std::char_traits<char>,
// ::std::allocator<char> >
diff --git a/lib/CodeGen/TargetInfo.cpp b/lib/CodeGen/TargetInfo.cpp
index a7c0caa299e7..f4ec914a4e05 100644
--- a/lib/CodeGen/TargetInfo.cpp
+++ b/lib/CodeGen/TargetInfo.cpp
@@ -972,12 +972,11 @@ ABIArgInfo X86_64ABIInfo::getCoerceResult(QualType Ty,
return (Ty->isPromotableIntegerType() ?
ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
} else if (CoerceTo == llvm::Type::getDoubleTy(CoerceTo->getContext())) {
- // FIXME: It would probably be better to make CGFunctionInfo only map using
- // canonical types than to canonize here.
- QualType CTy = Context.getCanonicalType(Ty);
+ assert(Ty.isCanonical() && "should always have a canonical type here");
+ assert(!Ty.hasQualifiers() && "should never have a qualified type here");
// Float and double end up in a single SSE reg.
- if (CTy == Context.FloatTy || CTy == Context.DoubleTy)
+ if (Ty == Context.FloatTy || Ty == Context.DoubleTy)
return ABIArgInfo::getDirect();
}
@@ -1497,9 +1496,29 @@ ABIArgInfo PIC16ABIInfo::classifyArgumentType(QualType Ty,
llvm::Value *PIC16ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
- return 0;
+ const llvm::Type *BP = llvm::PointerType::getUnqual(llvm::Type::getInt8Ty(CGF.getLLVMContext()));
+ const llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
+
+ CGBuilderTy &Builder = CGF.Builder;
+ llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
+ "ap");
+ llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
+ llvm::Type *PTy =
+ llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
+ llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
+
+ uint64_t Offset = CGF.getContext().getTypeSize(Ty) / 8;
+
+ llvm::Value *NextAddr =
+ Builder.CreateGEP(Addr, llvm::ConstantInt::get(
+ llvm::Type::getInt32Ty(CGF.getLLVMContext()), Offset),
+ "ap.next");
+ Builder.CreateStore(NextAddr, VAListAddrAsBPP);
+
+ return AddrTyped;
}
+
// ARM ABI Implementation
namespace {
diff --git a/lib/CodeGen/TargetInfo.h b/lib/CodeGen/TargetInfo.h
index 58b7b79224fd..9e80081429cc 100644
--- a/lib/CodeGen/TargetInfo.h
+++ b/lib/CodeGen/TargetInfo.h
@@ -17,6 +17,7 @@
namespace llvm {
class GlobalValue;
+ class Value;
}
namespace clang {
@@ -25,6 +26,7 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
+ class CodeGenFunction;
}
/// TargetCodeGenInfo - This class organizes various target-specific
@@ -44,6 +46,35 @@ namespace clang {
/// target-specific attributes for the given global.
virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
CodeGen::CodeGenModule &M) const { }
+
+ /// Controls whether __builtin_extend_pointer should sign-extend
+ /// pointers to uint64_t or zero-extend them (the default). Has
+ /// no effect for targets:
+ /// - that have 64-bit pointers, or
+ /// - that cannot address through registers larger than pointers, or
+ /// - that implicitly ignore/truncate the top bits when addressing
+ /// through such registers.
+ virtual bool extendPointerWithSExt() const { return false; }
+
+ /// Performs the code-generation required to convert a return
+ /// address as stored by the system into the actual address of the
+ /// next instruction that will be executed.
+ ///
+ /// Used by __builtin_extract_return_addr().
+ virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
+
+ /// Performs the code-generation required to convert the address
+ /// of an instruction into a return address suitable for storage
+ /// by the system in a return slot.
+ ///
+ /// Used by __builtin_frob_return_addr().
+ virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
+ llvm::Value *Address) const {
+ return Address;
+ }
};
}
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index 15df767d9707..ec8227efb332 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -49,6 +49,7 @@ Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir,
: Opts(createDriverOptTable()), Diags(_Diags),
Name(_Name), Dir(_Dir), DefaultHostTriple(_DefaultHostTriple),
DefaultImageName(_DefaultImageName),
+ DriverTitle("clang \"gcc-compatible\" driver"),
Host(0),
CCCGenericGCCName("gcc"), CCCIsCXX(false), CCCEcho(false),
CCCPrintBindings(false), CheckInputsExist(true), CCCUseClang(true),
@@ -273,8 +274,8 @@ void Driver::PrintOptions(const ArgList &Args) const {
// FIXME: Move -ccc options to real options in the .td file (or eliminate), and
// then move to using OptTable::PrintHelp.
void Driver::PrintHelp(bool ShowHidden) const {
- getOpts().PrintHelp(llvm::outs(), Name.c_str(),
- "clang \"gcc-compatible\" driver", ShowHidden);
+ getOpts().PrintHelp(llvm::outs(), Name.c_str(), DriverTitle.c_str(),
+ ShowHidden);
}
void Driver::PrintVersion(const Compilation &C, llvm::raw_ostream &OS) const {
@@ -558,6 +559,17 @@ void Driver::BuildActions(const ArgList &Args, ActionList &Actions) const {
if (Ty == types::TY_INVALID)
Ty = types::TY_Object;
+
+ // If the driver is invoked as C++ compiler (like clang++ or c++) it
+ // should autodetect some input files as C++ for g++ compatibility.
+ if (CCCIsCXX) {
+ types::ID OldTy = Ty;
+ Ty = types::lookupCXXTypeForCType(Ty);
+
+ if (Ty != OldTy)
+ Diag(clang::diag::warn_drv_treating_input_as_cxx)
+ << getTypeName(OldTy) << getTypeName(Ty);
+ }
}
// -ObjC and -ObjC++ override the default language, but only for "source
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index aff70bc7ba0d..de9bdcc18816 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -480,6 +480,65 @@ void Clang::AddARMTargetArgs(const ArgList &Args,
}
}
+void Clang::AddMIPSTargetArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ const Driver &D = getToolChain().getDriver();
+
+ // Select the ABI to use.
+ const char *ABIName = 0;
+ if (Arg *A = Args.getLastArg(options::OPT_mabi_EQ)) {
+ ABIName = A->getValue(Args);
+ } else {
+ ABIName = "o32";
+ }
+
+ CmdArgs.push_back("-target-abi");
+ CmdArgs.push_back(ABIName);
+
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ llvm::StringRef MArch = A->getValue(Args);
+ CmdArgs.push_back("-target-cpu");
+
+ if ((MArch == "r2000") || (MArch == "r3000"))
+ CmdArgs.push_back("mips1");
+ else if (MArch == "r6000")
+ CmdArgs.push_back("mips2");
+ else
+ CmdArgs.push_back(MArch.str().c_str());
+ }
+
+ // Select the float ABI as determined by -msoft-float, -mhard-float, and
+ llvm::StringRef FloatABI;
+ if (Arg *A = Args.getLastArg(options::OPT_msoft_float,
+ options::OPT_mhard_float)) {
+ if (A->getOption().matches(options::OPT_msoft_float))
+ FloatABI = "soft";
+ else if (A->getOption().matches(options::OPT_mhard_float))
+ FloatABI = "hard";
+ }
+
+ // If unspecified, choose the default based on the platform.
+ if (FloatABI.empty()) {
+ switch (getToolChain().getTriple().getOS()) {
+ default:
+ // Assume "soft", but warn the user we are guessing.
+ FloatABI = "soft";
+ D.Diag(clang::diag::warn_drv_assuming_mfloat_abi_is) << "soft";
+ break;
+ }
+ }
+
+ if (FloatABI == "soft") {
+ // Floating point operations and argument passing are soft.
+ //
+ // FIXME: This changes CPP defines, we need -target-soft-float.
+ CmdArgs.push_back("-msoft-float");
+ } else {
+ assert(FloatABI == "hard" && "Invalid float abi!");
+ CmdArgs.push_back("-mhard-float");
+ }
+}
+
void Clang::AddX86TargetArgs(const ArgList &Args,
ArgStringList &CmdArgs) const {
if (!Args.hasFlag(options::OPT_mred_zone,
@@ -799,6 +858,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("Arguments");
}
+ // Enable -mconstructor-aliases except on darwin, where we have to
+ // work around a linker bug; see <rdar://problem/7651567>.
+ if (getToolChain().getTriple().getOS() != llvm::Triple::Darwin)
+ CmdArgs.push_back("-mconstructor-aliases");
+
// This is a coarse approximation of what llvm-gcc actually does, both
// -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
// complicated ways.
@@ -834,6 +898,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
AddARMTargetArgs(Args, CmdArgs);
break;
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ AddMIPSTargetArgs(Args, CmdArgs);
+ break;
+
case llvm::Triple::x86:
case llvm::Triple::x86_64:
AddX86TargetArgs(Args, CmdArgs);
@@ -1006,7 +1075,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fblocks=0 is default.
if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks,
getToolChain().IsBlocksDefault())) {
- Args.AddLastArg(CmdArgs, options::OPT_fblock_introspection);
CmdArgs.push_back("-fblocks");
}
@@ -2542,6 +2610,13 @@ void freebsd::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
if (getToolChain().getArchName() == "i386")
CmdArgs.push_back("--32");
+
+ // Set byte order explicitly
+ if (getToolChain().getArchName() == "mips")
+ CmdArgs.push_back("-EB");
+ else if (getToolChain().getArchName() == "mipsel")
+ CmdArgs.push_back("-EL");
+
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA,
options::OPT_Xassembler);
@@ -2637,11 +2712,13 @@ void freebsd::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (D.CCCIsCXX) {
+ CmdArgs.push_back("-lstdc++");
+ CmdArgs.push_back("-lm");
+ }
// FIXME: For some reason GCC passes -lgcc and -lgcc_s before adding
// the default system libraries. Just mimic this for now.
CmdArgs.push_back("-lgcc");
- if (D.CCCIsCXX)
- CmdArgs.push_back("-lstdc++");
if (Args.hasArg(options::OPT_static)) {
CmdArgs.push_back("-lgcc_eh");
} else {
diff --git a/lib/Driver/Tools.h b/lib/Driver/Tools.h
index db596417a9d2..7a8f1b7cb703 100644
--- a/lib/Driver/Tools.h
+++ b/lib/Driver/Tools.h
@@ -34,6 +34,7 @@ namespace tools {
const InputInfoList &Inputs) const;
void AddARMTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
+ void AddMIPSTargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
void AddX86TargetArgs(const ArgList &Args, ArgStringList &CmdArgs) const;
public:
diff --git a/lib/Driver/Types.cpp b/lib/Driver/Types.cpp
index 60d86a62a3a0..8857fb16a304 100644
--- a/lib/Driver/Types.cpp
+++ b/lib/Driver/Types.cpp
@@ -213,3 +213,19 @@ phases::ID types::getCompilationPhase(ID Id, unsigned N) {
return phases::Link;
}
+
+ID types::lookupCXXTypeForCType(ID Id) {
+ switch (Id) {
+ default:
+ return Id;
+
+ case types::TY_C:
+ return types::TY_CXX;
+ case types::TY_PP_C:
+ return types::TY_PP_CXX;
+ case types::TY_CHeader:
+ return types::TY_CXXHeader;
+ case types::TY_PP_CHeader:
+ return types::TY_PP_CXXHeader;
+ }
+}
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index a0c4889c1631..ef14df10345e 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -36,11 +36,11 @@
using namespace clang;
ASTUnit::ASTUnit(bool _MainFileIsAST)
- : tempFile(false), MainFileIsAST(_MainFileIsAST) {
+ : MainFileIsAST(_MainFileIsAST) {
}
ASTUnit::~ASTUnit() {
- if (tempFile)
- llvm::sys::Path(getPCHFileName()).eraseFromDisk();
+ for (unsigned I = 0, N = TemporaryFiles.size(); I != N; ++I)
+ TemporaryFiles[I].eraseFromDisk();
}
namespace {
@@ -90,8 +90,46 @@ public:
}
};
+class StoredDiagnosticClient : public DiagnosticClient {
+ llvm::SmallVectorImpl<StoredDiagnostic> &StoredDiags;
+
+public:
+ explicit StoredDiagnosticClient(
+ llvm::SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ : StoredDiags(StoredDiags) { }
+
+ virtual void HandleDiagnostic(Diagnostic::Level Level,
+ const DiagnosticInfo &Info);
+};
+
+/// \brief RAII object that optionally captures diagnostics, if
+/// there is no diagnostic client to capture them already.
+class CaptureDroppedDiagnostics {
+ Diagnostic &Diags;
+ StoredDiagnosticClient Client;
+ DiagnosticClient *PreviousClient;
+
+public:
+ CaptureDroppedDiagnostics(bool RequestCapture, Diagnostic &Diags,
+ llvm::SmallVectorImpl<StoredDiagnostic> &StoredDiags)
+ : Diags(Diags), Client(StoredDiags), PreviousClient(Diags.getClient())
+ {
+ if (RequestCapture || Diags.getClient() == 0)
+ Diags.setClient(&Client);
+ }
+
+ ~CaptureDroppedDiagnostics() {
+ Diags.setClient(PreviousClient);
+ }
+};
+
} // anonymous namespace
+void StoredDiagnosticClient::HandleDiagnostic(Diagnostic::Level Level,
+ const DiagnosticInfo &Info) {
+ StoredDiags.push_back(StoredDiagnostic(Level, Info));
+}
+
const std::string &ASTUnit::getOriginalSourceFileName() {
return OriginalSourceFile;
}
@@ -105,11 +143,16 @@ ASTUnit *ASTUnit::LoadFromPCHFile(const std::string &Filename,
Diagnostic &Diags,
bool OnlyLocalDecls,
RemappedFile *RemappedFiles,
- unsigned NumRemappedFiles) {
+ unsigned NumRemappedFiles,
+ bool CaptureDiagnostics) {
llvm::OwningPtr<ASTUnit> AST(new ASTUnit(true));
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->HeaderInfo.reset(new HeaderSearch(AST->getFileManager()));
+ // If requested, capture diagnostics in the ASTUnit.
+ CaptureDroppedDiagnostics Capture(CaptureDiagnostics, Diags,
+ AST->Diagnostics);
+
for (unsigned I = 0; I != NumRemappedFiles; ++I) {
// Create the file entry for the file that we're mapping from.
const FileEntry *FromFile
@@ -119,6 +162,7 @@ ASTUnit *ASTUnit::LoadFromPCHFile(const std::string &Filename,
if (!FromFile) {
Diags.Report(diag::err_fe_remap_missing_from_file)
<< RemappedFiles[I].first;
+ delete RemappedFiles[I].second;
continue;
}
@@ -231,7 +275,8 @@ public:
ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
Diagnostic &Diags,
- bool OnlyLocalDecls) {
+ bool OnlyLocalDecls,
+ bool CaptureDiagnostics) {
// Create the compiler instance to use for building the AST.
CompilerInstance Clang;
llvm::OwningPtr<ASTUnit> AST;
@@ -245,8 +290,13 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
// Create the target instance.
Clang.setTarget(TargetInfo::CreateTargetInfo(Clang.getDiagnostics(),
Clang.getTargetOpts()));
- if (!Clang.hasTarget())
- goto error;
+ if (!Clang.hasTarget()) {
+ Clang.takeSourceManager();
+ Clang.takeFileManager();
+ Clang.takeDiagnosticClient();
+ Clang.takeDiagnostics();
+ return 0;
+ }
// Inform the target of the language options.
//
@@ -261,10 +311,14 @@ ASTUnit *ASTUnit::LoadFromCompilerInvocation(CompilerInvocation *CI,
// Create the AST unit.
AST.reset(new ASTUnit(false));
-
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->OriginalSourceFile = Clang.getFrontendOpts().Inputs[0].second;
+ // Capture any diagnostics that would otherwise be dropped.
+ CaptureDroppedDiagnostics Capture(CaptureDiagnostics,
+ Clang.getDiagnostics(),
+ AST->Diagnostics);
+
// Create a file manager object to provide access to and cache the filesystem.
Clang.setFileManager(&AST->getFileManager());
@@ -312,7 +366,8 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
llvm::StringRef ResourceFilesPath,
bool OnlyLocalDecls,
RemappedFile *RemappedFiles,
- unsigned NumRemappedFiles) {
+ unsigned NumRemappedFiles,
+ bool CaptureDiagnostics) {
llvm::SmallVector<const char *, 16> Args;
Args.push_back("<clang>"); // FIXME: Remove dummy argument.
Args.insert(Args.end(), ArgBegin, ArgEnd);
@@ -363,5 +418,6 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
CI->getHeaderSearchOpts().ResourceDir = ResourceFilesPath;
CI->getFrontendOpts().DisableFree = true;
- return LoadFromCompilerInvocation(CI.take(), Diags, OnlyLocalDecls);
+ return LoadFromCompilerInvocation(CI.take(), Diags, OnlyLocalDecls,
+ CaptureDiagnostics);
}
diff --git a/lib/Frontend/CMakeLists.txt b/lib/Frontend/CMakeLists.txt
index 1d0b5c12041a..b69ad9740d2a 100644
--- a/lib/Frontend/CMakeLists.txt
+++ b/lib/Frontend/CMakeLists.txt
@@ -5,8 +5,8 @@ add_clang_library(clangFrontend
ASTMerge.cpp
ASTUnit.cpp
AnalysisConsumer.cpp
- Backend.cpp
CacheTokens.cpp
+ CodeGenAction.cpp
CompilerInstance.cpp
CompilerInvocation.cpp
DeclXML.cpp
diff --git a/lib/Frontend/CacheTokens.cpp b/lib/Frontend/CacheTokens.cpp
index 7326937e520a..09b5b458e93a 100644
--- a/lib/Frontend/CacheTokens.cpp
+++ b/lib/Frontend/CacheTokens.cpp
@@ -190,12 +190,7 @@ class PTHWriter {
void Emit16(uint32_t V) { ::Emit16(Out, V); }
- void Emit24(uint32_t V) {
- Out << (unsigned char)(V);
- Out << (unsigned char)(V >> 8);
- Out << (unsigned char)(V >> 16);
- assert((V >> 24) == 0);
- }
+ void Emit24(uint32_t V) { ::Emit24(Out, V); }
void Emit32(uint32_t V) { ::Emit32(Out, V); }
diff --git a/lib/Frontend/Backend.cpp b/lib/Frontend/CodeGenAction.cpp
index f5291a9525e7..b1795a3aa3b5 100644
--- a/lib/Frontend/Backend.cpp
+++ b/lib/Frontend/CodeGenAction.cpp
@@ -1,4 +1,4 @@
-//===--- Backend.cpp - Interface to LLVM backend technologies -------------===//
+//===--- CodeGenAction.cpp - LLVM Code Generation Frontend Action ---------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,7 +7,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/CodeGenAction.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclGroup.h"
@@ -15,6 +15,8 @@
#include "clang/Basic/TargetOptions.h"
#include "clang/CodeGen/CodeGenOptions.h"
#include "clang/CodeGen/ModuleBuilder.h"
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/Module.h"
#include "llvm/PassManager.h"
@@ -37,6 +39,14 @@ using namespace clang;
using namespace llvm;
namespace {
+ enum BackendAction {
+ Backend_EmitAssembly, ///< Emit native assembly files
+ Backend_EmitBC, ///< Emit LLVM bitcode files
+ Backend_EmitLL, ///< Emit human-readable LLVM assembly
+ Backend_EmitNothing, ///< Don't emit anything (benchmarking mode)
+ Backend_EmitObj ///< Emit native object files
+ };
+
class BackendConsumer : public ASTConsumer {
Diagnostic &Diags;
BackendAction Action;
@@ -52,7 +62,7 @@ namespace {
llvm::OwningPtr<CodeGenerator> Gen;
- llvm::Module *TheModule;
+ llvm::OwningPtr<llvm::Module> TheModule;
llvm::TargetData *TheTargetData;
mutable FunctionPassManager *CodeGenPasses;
@@ -87,7 +97,7 @@ namespace {
LLVMIRGeneration("LLVM IR Generation Time"),
CodeGenerationTime("Code Generation Time"),
Gen(CreateLLVMCodeGen(Diags, infile, compopts, C)),
- TheModule(0), TheTargetData(0),
+ TheTargetData(0),
CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {
if (AsmOutStream)
@@ -99,12 +109,13 @@ namespace {
~BackendConsumer() {
delete TheTargetData;
- delete TheModule;
delete CodeGenPasses;
delete PerModulePasses;
delete PerFunctionPasses;
}
+ llvm::Module *takeModule() { return TheModule.take(); }
+
virtual void Initialize(ASTContext &Ctx) {
Context = &Ctx;
@@ -113,7 +124,7 @@ namespace {
Gen->Initialize(Ctx);
- TheModule = Gen->GetModule();
+ TheModule.reset(Gen->GetModule());
TheTargetData = new llvm::TargetData(Ctx.Target.getTargetDescription());
if (llvm::TimePassesIsEnabled)
@@ -169,7 +180,7 @@ namespace {
FunctionPassManager *BackendConsumer::getCodeGenPasses() const {
if (!CodeGenPasses) {
- CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses = new FunctionPassManager(&*TheModule);
CodeGenPasses->add(new TargetData(*TheTargetData));
}
@@ -187,7 +198,7 @@ PassManager *BackendConsumer::getPerModulePasses() const {
FunctionPassManager *BackendConsumer::getPerFunctionPasses() const {
if (!PerFunctionPasses) {
- PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses = new FunctionPassManager(&*TheModule);
PerFunctionPasses->add(new TargetData(*TheTargetData));
}
@@ -303,12 +314,21 @@ bool BackendConsumer::AddEmitPasses() {
case 3: OptLevel = CodeGenOpt::Aggressive; break;
}
+ // Request that addPassesToEmitFile run the Verifier after running
+ // passes which modify the IR.
+#ifndef NDEBUG
+ bool DisableVerify = false;
+#else
+ bool DisableVerify = true;
+#endif
+
// Normal mode, emit a .s or .o file by running the code generator. Note,
// this also adds codegenerator level optimization passes.
TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
if (Action == Backend_EmitObj)
CGFT = TargetMachine::CGFT_ObjectFile;
- if (TM->addPassesToEmitFile(*PM, FormattedOutStream, CGFT, OptLevel)) {
+ if (TM->addPassesToEmitFile(*PM, FormattedOutStream, CGFT, OptLevel,
+ DisableVerify)) {
Diags.Report(diag::err_fe_unable_to_interface_with_target);
return false;
}
@@ -381,11 +401,12 @@ void BackendConsumer::EmitAssembly() {
if (!M) {
// The module has been released by IR gen on failures, do not
// double free.
- TheModule = 0;
+ TheModule.take();
return;
}
- assert(TheModule == M && "Unexpected module change during IR generation");
+ assert(TheModule.get() == M &&
+ "Unexpected module change during IR generation");
CreatePasses();
if (!AddEmitPasses())
@@ -419,15 +440,64 @@ void BackendConsumer::EmitAssembly() {
}
}
-ASTConsumer *clang::CreateBackendConsumer(BackendAction Action,
- Diagnostic &Diags,
- const LangOptions &LangOpts,
- const CodeGenOptions &CodeGenOpts,
- const TargetOptions &TargetOpts,
- bool TimePasses,
- const std::string& InFile,
- llvm::raw_ostream* OS,
- LLVMContext& C) {
- return new BackendConsumer(Action, Diags, LangOpts, CodeGenOpts,
- TargetOpts, TimePasses, InFile, OS, C);
+//
+
+CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
+
+CodeGenAction::~CodeGenAction() {}
+
+void CodeGenAction::EndSourceFileAction() {
+ // If the consumer creation failed, do nothing.
+ if (!getCompilerInstance().hasASTConsumer())
+ return;
+
+ // Steal the module from the consumer.
+ BackendConsumer *Consumer = static_cast<BackendConsumer*>(
+ &getCompilerInstance().getASTConsumer());
+
+ TheModule.reset(Consumer->takeModule());
+}
+
+llvm::Module *CodeGenAction::takeModule() {
+ return TheModule.take();
}
+
+ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ BackendAction BA = static_cast<BackendAction>(Act);
+ llvm::OwningPtr<llvm::raw_ostream> OS;
+ switch (BA) {
+ case Backend_EmitAssembly:
+ OS.reset(CI.createDefaultOutputFile(false, InFile, "s"));
+ break;
+ case Backend_EmitLL:
+ OS.reset(CI.createDefaultOutputFile(false, InFile, "ll"));
+ break;
+ case Backend_EmitBC:
+ OS.reset(CI.createDefaultOutputFile(true, InFile, "bc"));
+ break;
+ case Backend_EmitNothing:
+ break;
+ case Backend_EmitObj:
+ OS.reset(CI.createDefaultOutputFile(true, InFile, "o"));
+ break;
+ }
+ if (BA != Backend_EmitNothing && !OS)
+ return 0;
+
+ return new BackendConsumer(BA, CI.getDiagnostics(), CI.getLangOpts(),
+ CI.getCodeGenOpts(), CI.getTargetOpts(),
+ CI.getFrontendOpts().ShowTimers, InFile, OS.take(),
+ CI.getLLVMContext());
+}
+
+EmitAssemblyAction::EmitAssemblyAction()
+ : CodeGenAction(Backend_EmitAssembly) {}
+
+EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
+
+EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
+
+EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
+
+EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 917cbd711ad3..1831ca532beb 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -102,7 +102,7 @@ namespace {
void BinaryDiagnosticSerializer::HandleDiagnostic(Diagnostic::Level DiagLevel,
const DiagnosticInfo &Info) {
- Info.Serialize(DiagLevel, OS);
+ StoredDiagnostic(DiagLevel, Info).Serialize(OS);
}
static void SetUpBuildDumpLog(const DiagnosticOptions &DiagOpts,
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index a193ac870307..64a42bc0ec40 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -180,6 +180,8 @@ static void CodeGenOptsToArgs(const CodeGenOptions &Opts,
Res.push_back("-mrelocation-model");
Res.push_back(Opts.RelocationModel);
}
+ if (Opts.CXXCtorDtorAliases)
+ Res.push_back("-mconstructor-aliases");
if (!Opts.VerifyModule)
Res.push_back("-disable-llvm-verifier");
}
@@ -789,6 +791,7 @@ static void ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
Opts.SoftFloat = Args.hasArg(OPT_msoft_float);
Opts.UnwindTables = Args.hasArg(OPT_munwind_tables);
Opts.RelocationModel = getLastArgValue(Args, OPT_mrelocation_model, "pic");
+ Opts.CXXCtorDtorAliases = Args.hasArg(OPT_mconstructor_aliases);
Opts.MainFileName = getLastArgValue(Args, OPT_main_file_name);
Opts.VerifyModule = !Args.hasArg(OPT_disable_llvm_verifier);
diff --git a/lib/Frontend/FrontendActions.cpp b/lib/Frontend/FrontendActions.cpp
index 1c958a7087a9..1e210b42e6d1 100644
--- a/lib/Frontend/FrontendActions.cpp
+++ b/lib/Frontend/FrontendActions.cpp
@@ -159,48 +159,6 @@ ASTConsumer *SyntaxOnlyAction::CreateASTConsumer(CompilerInstance &CI,
return new ASTConsumer();
}
-CodeGenAction::CodeGenAction(unsigned _Act) : Act(_Act) {}
-
-ASTConsumer *CodeGenAction::CreateASTConsumer(CompilerInstance &CI,
- llvm::StringRef InFile) {
- BackendAction BA = static_cast<BackendAction>(Act);
- llvm::OwningPtr<llvm::raw_ostream> OS;
- switch (BA) {
- case Backend_EmitAssembly:
- OS.reset(CI.createDefaultOutputFile(false, InFile, "s"));
- break;
- case Backend_EmitLL:
- OS.reset(CI.createDefaultOutputFile(false, InFile, "ll"));
- break;
- case Backend_EmitBC:
- OS.reset(CI.createDefaultOutputFile(true, InFile, "bc"));
- break;
- case Backend_EmitNothing:
- break;
- case Backend_EmitObj:
- OS.reset(CI.createDefaultOutputFile(true, InFile, "o"));
- break;
- }
- if (BA != Backend_EmitNothing && !OS)
- return 0;
-
- return CreateBackendConsumer(BA, CI.getDiagnostics(), CI.getLangOpts(),
- CI.getCodeGenOpts(), CI.getTargetOpts(),
- CI.getFrontendOpts().ShowTimers, InFile,
- OS.take(), CI.getLLVMContext());
-}
-
-EmitAssemblyAction::EmitAssemblyAction()
- : CodeGenAction(Backend_EmitAssembly) {}
-
-EmitBCAction::EmitBCAction() : CodeGenAction(Backend_EmitBC) {}
-
-EmitLLVMAction::EmitLLVMAction() : CodeGenAction(Backend_EmitLL) {}
-
-EmitLLVMOnlyAction::EmitLLVMOnlyAction() : CodeGenAction(Backend_EmitNothing) {}
-
-EmitObjAction::EmitObjAction() : CodeGenAction(Backend_EmitObj) {}
-
//===----------------------------------------------------------------------===//
// Preprocessor Actions
//===----------------------------------------------------------------------===//
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index 2e0b4bdbfce3..34cb9ec3b82a 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -489,8 +489,10 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(const llvm::Triple &tripl
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.3",
"i686-pc-linux-gnu", "", "", triple);
// Debian sid
- AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4.2",
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4",
"x86_64-linux-gnu", "32", "", triple);
+ AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.4",
+ "i486-linux-gnu", "64", "", triple);
// Ubuntu 7.10 - Gutsy Gibbon
AddGnuCPlusPlusIncludePaths("/usr/include/c++/4.1.3",
"i486-linux-gnu", "", "", triple);
diff --git a/lib/Frontend/InitPreprocessor.cpp b/lib/Frontend/InitPreprocessor.cpp
index b7ab3d8cd452..8bcd3a83c00e 100644
--- a/lib/Frontend/InitPreprocessor.cpp
+++ b/lib/Frontend/InitPreprocessor.cpp
@@ -439,6 +439,7 @@ static void InitializeFileRemapping(Diagnostic &Diags,
if (!FromFile) {
Diags.Report(diag::err_fe_remap_missing_from_file)
<< Remap->first;
+ delete Remap->second;
continue;
}
@@ -477,7 +478,7 @@ static void InitializeFileRemapping(Diagnostic &Diags,
= llvm::MemoryBuffer::getFile(ToFile->getName(), &ErrorStr);
if (!Buffer) {
Diags.Report(diag::err_fe_error_opening)
- << Remap->second << ErrorStr;
+ << Remap->second << ErrorStr;
continue;
}
diff --git a/lib/Frontend/PCHReaderDecl.cpp b/lib/Frontend/PCHReaderDecl.cpp
index 625997cac232..356bd0726e52 100644
--- a/lib/Frontend/PCHReaderDecl.cpp
+++ b/lib/Frontend/PCHReaderDecl.cpp
@@ -39,6 +39,7 @@ namespace {
void VisitDecl(Decl *D);
void VisitTranslationUnitDecl(TranslationUnitDecl *TU);
void VisitNamedDecl(NamedDecl *ND);
+ void VisitNamespaceDecl(NamespaceDecl *D);
void VisitTypeDecl(TypeDecl *TD);
void VisitTypedefDecl(TypedefDecl *TD);
void VisitTagDecl(TagDecl *TD);
@@ -96,6 +97,18 @@ void PCHDeclReader::VisitNamedDecl(NamedDecl *ND) {
ND->setDeclName(Reader.ReadDeclarationName(Record, Idx));
}
+void PCHDeclReader::VisitNamespaceDecl(NamespaceDecl *D) {
+ VisitNamedDecl(D);
+ D->setLBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ D->setRBracLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
+ D->setNextNamespace(
+ cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
+ D->setOriginalNamespace(
+ cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
+ D->setAnonymousNamespace(
+ cast_or_null<NamespaceDecl>(Reader.GetDecl(Record[Idx++])));
+}
+
void PCHDeclReader::VisitTypeDecl(TypeDecl *TD) {
VisitNamedDecl(TD);
TD->setTypeForDecl(Reader.GetType(Record[Idx++]).getTypePtr());
@@ -235,7 +248,6 @@ void PCHDeclReader::VisitObjCInterfaceDecl(ObjCInterfaceDecl *ID) {
IVars.reserve(NumIvars);
for (unsigned I = 0; I != NumIvars; ++I)
IVars.push_back(cast<ObjCIvarDecl>(Reader.GetDecl(Record[Idx++])));
- ID->setIVarList(IVars.data(), NumIvars, *Reader.getContext());
ID->setCategoryList(
cast_or_null<ObjCCategoryDecl>(Reader.GetDecl(Record[Idx++])));
ID->setForwardDecl(Record[Idx++]);
@@ -517,6 +529,10 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(GNUInline);
SIMPLE_ATTR(Hiding);
+ case Attr::IBActionKind:
+ New = ::new (*Context) IBActionAttr();
+ break;
+
case Attr::IBOutletKind:
New = ::new (*Context) IBOutletAttr();
break;
@@ -546,7 +562,9 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(ObjCException);
SIMPLE_ATTR(ObjCNSObject);
+ SIMPLE_ATTR(CFReturnsNotRetained);
SIMPLE_ATTR(CFReturnsRetained);
+ SIMPLE_ATTR(NSReturnsNotRetained);
SIMPLE_ATTR(NSReturnsRetained);
SIMPLE_ATTR(Overloadable);
SIMPLE_ATTR(Override);
@@ -568,6 +586,7 @@ Attr *PCHReader::ReadAttributes() {
SIMPLE_ATTR(WarnUnusedResult);
SIMPLE_ATTR(Weak);
+ SIMPLE_ATTR(WeakRef);
SIMPLE_ATTR(WeakImport);
}
@@ -738,6 +757,10 @@ Decl *PCHReader::ReadDeclRecord(uint64_t Offset, unsigned Index) {
case pch::DECL_BLOCK:
D = BlockDecl::Create(*Context, 0, SourceLocation());
break;
+
+ case pch::DECL_NAMESPACE:
+ D = NamespaceDecl::Create(*Context, 0, SourceLocation(), 0);
+ break;
}
assert(D && "Unknown declaration reading PCH file");
diff --git a/lib/Frontend/PCHWriter.cpp b/lib/Frontend/PCHWriter.cpp
index 4c99dbe24504..93af75468382 100644
--- a/lib/Frontend/PCHWriter.cpp
+++ b/lib/Frontend/PCHWriter.cpp
@@ -1852,12 +1852,13 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) {
case Attr::GNUInline:
case Attr::Hiding:
+ case Attr::IBActionKind:
case Attr::IBOutletKind:
case Attr::Malloc:
case Attr::NoDebug:
+ case Attr::NoInline:
case Attr::NoReturn:
case Attr::NoThrow:
- case Attr::NoInline:
break;
case Attr::NonNull: {
@@ -1867,10 +1868,12 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) {
break;
}
- case Attr::ObjCException:
- case Attr::ObjCNSObject:
+ case Attr::CFReturnsNotRetained:
case Attr::CFReturnsRetained:
+ case Attr::NSReturnsNotRetained:
case Attr::NSReturnsRetained:
+ case Attr::ObjCException:
+ case Attr::ObjCNSObject:
case Attr::Overloadable:
case Attr::Override:
break;
@@ -1913,6 +1916,7 @@ void PCHWriter::WriteAttributeRecord(const Attr *Attr) {
case Attr::WarnUnusedResult:
case Attr::Weak:
+ case Attr::WeakRef:
case Attr::WeakImport:
break;
}
@@ -2332,4 +2336,3 @@ void PCHWriter::AddDeclarationName(DeclarationName Name, RecordData &Record) {
break;
}
}
-
diff --git a/lib/Frontend/PCHWriterDecl.cpp b/lib/Frontend/PCHWriterDecl.cpp
index d105382b4354..e776d32454d2 100644
--- a/lib/Frontend/PCHWriterDecl.cpp
+++ b/lib/Frontend/PCHWriterDecl.cpp
@@ -42,6 +42,7 @@ namespace {
void VisitDecl(Decl *D);
void VisitTranslationUnitDecl(TranslationUnitDecl *D);
void VisitNamedDecl(NamedDecl *D);
+ void VisitNamespaceDecl(NamespaceDecl *D);
void VisitTypeDecl(TypeDecl *D);
void VisitTypedefDecl(TypedefDecl *D);
void VisitTagDecl(TagDecl *D);
@@ -99,6 +100,16 @@ void PCHDeclWriter::VisitNamedDecl(NamedDecl *D) {
Writer.AddDeclarationName(D->getDeclName(), Record);
}
+void PCHDeclWriter::VisitNamespaceDecl(NamespaceDecl *D) {
+ VisitNamedDecl(D);
+ Writer.AddSourceLocation(D->getLBracLoc(), Record);
+ Writer.AddSourceLocation(D->getRBracLoc(), Record);
+ Writer.AddDeclRef(D->getNextNamespace(), Record);
+ Writer.AddDeclRef(D->getOriginalNamespace(), Record);
+ Writer.AddDeclRef(D->getAnonymousNamespace(), Record);
+ Code = pch::DECL_NAMESPACE;
+}
+
void PCHDeclWriter::VisitTypeDecl(TypeDecl *D) {
VisitNamedDecl(D);
Writer.AddTypeRef(QualType(D->getTypeForDecl(), 0), Record);
diff --git a/lib/Frontend/PrintPreprocessedOutput.cpp b/lib/Frontend/PrintPreprocessedOutput.cpp
index 43deaee8c1db..774372c86934 100644
--- a/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -67,12 +67,7 @@ static void PrintMacroDefinition(const IdentifierInfo &II, const MacroInfo &MI,
if (I->hasLeadingSpace())
OS << ' ';
- // Make sure we have enough space in the spelling buffer.
- if (I->getLength() > SpellingBuffer.size())
- SpellingBuffer.resize(I->getLength());
- const char *Buffer = SpellingBuffer.data();
- unsigned SpellingLen = PP.getSpelling(*I, Buffer);
- OS.write(Buffer, SpellingLen);
+ OS << PP.getSpelling(*I, SpellingBuffer);
}
}
diff --git a/lib/Frontend/RewriteObjC.cpp b/lib/Frontend/RewriteObjC.cpp
index 9dade66d4ab4..a13bccbb91ec 100644
--- a/lib/Frontend/RewriteObjC.cpp
+++ b/lib/Frontend/RewriteObjC.cpp
@@ -26,6 +26,7 @@
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/OwningPtr.h"
#include "llvm/ADT/DenseSet.h"
+
using namespace clang;
using llvm::utostr;
@@ -120,8 +121,10 @@ namespace {
// Block expressions.
llvm::SmallVector<BlockExpr *, 32> Blocks;
+ llvm::SmallVector<int, 32> InnerDeclRefsCount;
+ llvm::SmallVector<BlockDeclRefExpr *, 32> InnerDeclRefs;
+
llvm::SmallVector<BlockDeclRefExpr *, 32> BlockDeclRefs;
- llvm::DenseMap<BlockDeclRefExpr *, CallExpr *> BlockCallExprs;
// Block related declarations.
llvm::SmallVector<ValueDecl *, 8> BlockByCopyDecls;
@@ -253,6 +256,8 @@ namespace {
void RewriteInterfaceDecl(ObjCInterfaceDecl *Dcl);
void RewriteImplementationDecl(Decl *Dcl);
void RewriteObjCMethodDecl(ObjCMethodDecl *MDecl, std::string &ResultStr);
+ void RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType);
void RewriteByRefString(std::string &ResultStr, const std::string &Name,
ValueDecl *VD);
void RewriteCategoryDecl(ObjCCategoryDecl *Dcl);
@@ -301,8 +306,12 @@ namespace {
Stmt *RewriteObjCForCollectionStmt(ObjCForCollectionStmt *S,
SourceLocation OrigEnd);
CallExpr *SynthesizeCallToFunctionDecl(FunctionDecl *FD,
- Expr **args, unsigned nargs);
- Stmt *SynthMessageExpr(ObjCMessageExpr *Exp);
+ Expr **args, unsigned nargs,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
+ Stmt *SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc=SourceLocation(),
+ SourceLocation EndLoc=SourceLocation());
Stmt *RewriteBreakStmt(BreakStmt *S);
Stmt *RewriteContinueStmt(ContinueStmt *S);
void SynthCountByEnumWithState(std::string &buf);
@@ -342,7 +351,7 @@ namespace {
std::string &Result);
void SynthesizeObjCInternalStruct(ObjCInterfaceDecl *CDecl,
std::string &Result);
- void SynthesizeIvarOffsetComputation(ObjCImplementationDecl *IDecl,
+ void SynthesizeIvarOffsetComputation(ObjCContainerDecl *IDecl,
ObjCIvarDecl *ivar,
std::string &Result);
void RewriteImplementations();
@@ -379,8 +388,10 @@ namespace {
void RewriteRecordBody(RecordDecl *RD);
void CollectBlockDeclRefInfo(BlockExpr *Exp);
- void GetBlockCallExprs(Stmt *S);
void GetBlockDeclRefExprs(Stmt *S);
+ void GetInnerBlockDeclRefExprs(Stmt *S,
+ llvm::SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts);
// We avoid calling Type::isBlockPointerType(), since it operates on the
// canonical type. We only care if the top-level type is a closure pointer.
@@ -412,7 +423,8 @@ namespace {
void RewriteCastExpr(CStyleCastExpr *CE);
FunctionDecl *SynthBlockInitFunctionDecl(const char *name);
- Stmt *SynthBlockInitExpr(BlockExpr *Exp);
+ Stmt *SynthBlockInitExpr(BlockExpr *Exp,
+ const llvm::SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs);
void QuoteDoublequotes(std::string &From, std::string &To) {
for (unsigned i = 0; i < From.length(); i++) {
@@ -547,7 +559,7 @@ void RewriteObjC::Initialize(ASTContext &context) {
Preamble += "#define __OBJC_RW_DLLIMPORT extern \"C\" __declspec(dllimport)\n";
Preamble += "#define __OBJC_RW_STATICIMPORT extern \"C\"\n";
} else
- Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
+ Preamble += "#define __OBJC_RW_DLLIMPORT extern\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSend";
Preamble += "(struct objc_object *, struct objc_selector *, ...);\n";
Preamble += "__OBJC_RW_DLLIMPORT struct objc_object *objc_msgSendSuper";
@@ -606,7 +618,8 @@ void RewriteObjC::Initialize(ASTContext &context) {
Preamble += "};\n";
Preamble += "// Runtime copy/destroy helper functions (from Block_private.h)\n";
Preamble += "#ifdef __OBJC_EXPORT_BLOCKS\n";
- Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_assign(void *, const void *, const int);\n";
+ Preamble += "extern \"C\" __declspec(dllexport) "
+ "void _Block_object_assign(void *, const void *, const int);\n";
Preamble += "extern \"C\" __declspec(dllexport) void _Block_object_dispose(const void *, const int);\n";
Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteGlobalBlock[32];\n";
Preamble += "extern \"C\" __declspec(dllexport) void *_NSConcreteStackBlock[32];\n";
@@ -627,6 +640,9 @@ void RewriteObjC::Initialize(ASTContext &context) {
Preamble += "#define __block\n";
Preamble += "#define __weak\n";
}
+ // NOTE! Windows uses LLP64 for 64bit mode. So, cast pointer to long long
+ // as this avoids warning in any 64bit/32bit compilation model.
+ Preamble += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long long) &((TYPE *)0)->MEMBER)\n";
}
@@ -750,6 +766,8 @@ static std::string getIvarAccessString(ObjCInterfaceDecl *ClassDecl,
void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
ObjCImplementationDecl *IMD,
ObjCCategoryImplDecl *CID) {
+ static bool objcGetPropertyDefined = false;
+ static bool objcSetPropertyDefined = false;
SourceLocation startLoc = PID->getLocStart();
InsertText(startLoc, "// ");
const char *startBuf = SM->getCharacterData(startLoc);
@@ -769,15 +787,55 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
if (!OID)
return;
-
+ unsigned Attributes = PD->getPropertyAttributes();
+ bool GenGetProperty = !(Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic) &&
+ (Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy));
std::string Getr;
+ if (GenGetProperty && !objcGetPropertyDefined) {
+ objcGetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Getr = "\nextern \"C\" __declspec(dllimport) "
+ "id objc_getProperty(id, SEL, long, bool);\n";
+ }
RewriteObjCMethodDecl(PD->getGetterMethodDecl(), Getr);
Getr += "{ ";
// Synthesize an explicit cast to gain access to the ivar.
- // FIXME: deal with code generation implications for various property
- // attributes (copy, retain, nonatomic).
// See objc-act.c:objc_synthesize_new_getter() for details.
- Getr += "return " + getIvarAccessString(ClassDecl, OID);
+ if (GenGetProperty) {
+ // return objc_getProperty(self, _cmd, offsetof(ClassDecl, OID), 1)
+ Getr += "typedef ";
+ const FunctionType *FPRetType = 0;
+ RewriteTypeIntoString(PD->getGetterMethodDecl()->getResultType(), Getr,
+ FPRetType);
+ Getr += " _TYPE";
+ if (FPRetType) {
+ Getr += ")"; // close the precedence "scope" for "*".
+
+ // Now, emit the argument types (if any).
+ if (const FunctionProtoType *FT = dyn_cast<FunctionProtoType>(FPRetType)) {
+ Getr += "(";
+ for (unsigned i = 0, e = FT->getNumArgs(); i != e; ++i) {
+ if (i) Getr += ", ";
+ std::string ParamStr = FT->getArgType(i).getAsString();
+ Getr += ParamStr;
+ }
+ if (FT->isVariadic()) {
+ if (FT->getNumArgs()) Getr += ", ";
+ Getr += "...";
+ }
+ Getr += ")";
+ } else
+ Getr += "()";
+ }
+ Getr += ";\n";
+ Getr += "return (_TYPE)";
+ Getr += "objc_getProperty(self, _cmd, ";
+ SynthesizeIvarOffsetComputation(ClassDecl, OID, Getr);
+ Getr += ", 1)";
+ }
+ else
+ Getr += "return " + getIvarAccessString(ClassDecl, OID);
Getr += "; }";
InsertText(onePastSemiLoc, Getr);
if (PD->isReadOnly())
@@ -785,14 +843,38 @@ void RewriteObjC::RewritePropertyImplDecl(ObjCPropertyImplDecl *PID,
// Generate the 'setter' function.
std::string Setr;
+ bool GenSetProperty = Attributes & (ObjCPropertyDecl::OBJC_PR_retain |
+ ObjCPropertyDecl::OBJC_PR_copy);
+ if (GenSetProperty && !objcSetPropertyDefined) {
+ objcSetPropertyDefined = true;
+ // FIXME. Is this attribute correct in all cases?
+ Setr = "\nextern \"C\" __declspec(dllimport) "
+ "void objc_setProperty (id, SEL, long, id, bool, bool);\n";
+ }
+
RewriteObjCMethodDecl(PD->getSetterMethodDecl(), Setr);
Setr += "{ ";
// Synthesize an explicit cast to initialize the ivar.
- // FIXME: deal with code generation implications for various property
- // attributes (copy, retain, nonatomic).
// See objc-act.c:objc_synthesize_new_setter() for details.
- Setr += getIvarAccessString(ClassDecl, OID) + " = ";
- Setr += PD->getNameAsCString();
+ if (GenSetProperty) {
+ Setr += "objc_setProperty (self, _cmd, ";
+ SynthesizeIvarOffsetComputation(ClassDecl, OID, Setr);
+ Setr += ", (id)";
+ Setr += PD->getNameAsCString();
+ Setr += ", ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_nonatomic)
+ Setr += "0, ";
+ else
+ Setr += "1, ";
+ if (Attributes & ObjCPropertyDecl::OBJC_PR_copy)
+ Setr += "1)";
+ else
+ Setr += "0)";
+ }
+ else {
+ Setr += getIvarAccessString(ClassDecl, OID) + " = ";
+ Setr += PD->getNameAsCString();
+ }
Setr += "; }";
InsertText(onePastSemiLoc, Setr);
}
@@ -929,18 +1011,15 @@ void RewriteObjC::RewriteForwardProtocolDecl(ObjCForwardProtocolDecl *PDecl) {
ReplaceText(LocStart, 0, "// ");
}
-void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
- std::string &ResultStr) {
- //fprintf(stderr,"In RewriteObjCMethodDecl\n");
- const FunctionType *FPRetType = 0;
- ResultStr += "\nstatic ";
- if (OMD->getResultType()->isObjCQualifiedIdType())
+void RewriteObjC::RewriteTypeIntoString(QualType T, std::string &ResultStr,
+ const FunctionType *&FPRetType) {
+ if (T->isObjCQualifiedIdType())
ResultStr += "id";
- else if (OMD->getResultType()->isFunctionPointerType() ||
- OMD->getResultType()->isBlockPointerType()) {
+ else if (T->isFunctionPointerType() ||
+ T->isBlockPointerType()) {
// needs special handling, since pointer-to-functions have special
// syntax (where a decaration models use).
- QualType retType = OMD->getResultType();
+ QualType retType = T;
QualType PointeeTy;
if (const PointerType* PT = retType->getAs<PointerType>())
PointeeTy = PT->getPointeeType();
@@ -951,7 +1030,15 @@ void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
ResultStr += "(*";
}
} else
- ResultStr += OMD->getResultType().getAsString();
+ ResultStr += T.getAsString();
+}
+
+void RewriteObjC::RewriteObjCMethodDecl(ObjCMethodDecl *OMD,
+ std::string &ResultStr) {
+ //fprintf(stderr,"In RewriteObjCMethodDecl\n");
+ const FunctionType *FPRetType = 0;
+ ResultStr += "\nstatic ";
+ RewriteTypeIntoString(OMD->getResultType(), ResultStr, FPRetType);
ResultStr += " ";
// Unique method name
@@ -1952,7 +2039,8 @@ Stmt *RewriteObjC::RewriteAtSelector(ObjCSelectorExpr *Exp) {
}
CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
- FunctionDecl *FD, Expr **args, unsigned nargs) {
+ FunctionDecl *FD, Expr **args, unsigned nargs, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
// Get the type, we will need to reference it in a couple spots.
QualType msgSendType = FD->getType();
@@ -1968,8 +2056,10 @@ CallExpr *RewriteObjC::SynthesizeCallToFunctionDecl(
const FunctionType *FT = msgSendType->getAs<FunctionType>();
- return new (Context) CallExpr(*Context, ICE, args, nargs, FT->getResultType(),
- SourceLocation());
+ CallExpr *Exp =
+ new (Context) CallExpr(*Context, ICE, args, nargs, FT->getResultType(),
+ EndLoc);
+ return Exp;
}
static bool scanForProtocolRefs(const char *startBuf, const char *endBuf,
@@ -2165,8 +2255,10 @@ void RewriteObjC::SynthSelGetUidFunctionDecl() {
llvm::SmallVector<QualType, 16> ArgTys;
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getFuncType = Context->getFunctionType(Context->getObjCSelType(),
- &ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0);
+ &ArgTys[0], ArgTys.size(),
+ false /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SelGetUidIdent, getFuncType, 0,
@@ -2196,6 +2288,36 @@ static void RewriteBlockPointerType(std::string& Str, QualType Type) {
}
}
+// FIXME. Consolidate this routine with RewriteBlockPointerType.
+static void RewriteBlockPointerTypeVariable(std::string& Str, ValueDecl *VD) {
+ QualType Type = VD->getType();
+ std::string TypeString(Type.getAsString());
+ const char *argPtr = TypeString.c_str();
+ int paren = 0;
+ while (*argPtr) {
+ switch (*argPtr) {
+ case '(':
+ Str += *argPtr;
+ paren++;
+ break;
+ case ')':
+ Str += *argPtr;
+ paren--;
+ break;
+ case '^':
+ Str += '*';
+ if (paren == 1)
+ Str += VD->getNameAsString();
+ break;
+ default:
+ Str += *argPtr;
+ break;
+ }
+ argPtr++;
+ }
+}
+
+
void RewriteObjC::RewriteBlockLiteralFunctionDecl(FunctionDecl *FD) {
SourceLocation FunLocStart = FD->getTypeSpecStartLoc();
const FunctionType *funcType = FD->getType()->getAs<FunctionType>();
@@ -2231,7 +2353,9 @@ void RewriteObjC::SynthSuperContructorFunctionDecl() {
ArgTys.push_back(argT);
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- false, 0);
+ false, 0,
+ false, false, 0, 0, false,
+ CC_Default);
SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2250,7 +2374,9 @@ void RewriteObjC::SynthMsgSendFunctionDecl() {
ArgTys.push_back(argT);
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0);
+ true /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2272,7 +2398,9 @@ void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
ArgTys.push_back(argT);
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0);
+ true /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2291,7 +2419,9 @@ void RewriteObjC::SynthMsgSendStretFunctionDecl() {
ArgTys.push_back(argT);
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0);
+ true /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2315,7 +2445,9 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
ArgTys.push_back(argT);
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0);
+ true /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2334,7 +2466,9 @@ void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
ArgTys.push_back(argT);
QualType msgSendType = Context->getFunctionType(Context->DoubleTy,
&ArgTys[0], ArgTys.size(),
- true /*isVariadic*/, 0);
+ true /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2348,7 +2482,9 @@ void RewriteObjC::SynthGetClassFunctionDecl() {
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0);
+ false /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getClassIdent, getClassType, 0,
@@ -2362,7 +2498,9 @@ void RewriteObjC::SynthGetMetaClassFunctionDecl() {
ArgTys.push_back(Context->getPointerType(Context->CharTy.withConst()));
QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
- false /*isVariadic*/, 0);
+ false /*isVariadic*/, 0,
+ false, false, 0, 0, false,
+ CC_Default);
GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getClassIdent, getClassType, 0,
@@ -2485,7 +2623,9 @@ QualType RewriteObjC::getConstantStringStructType() {
return Context->getTagDeclType(ConstantStringDecl);
}
-Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
+Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
+ SourceLocation StartLoc,
+ SourceLocation EndLoc) {
if (!SelGetUidFunctionDecl)
SynthSelGetUidFunctionDecl();
if (!MsgSendFunctionDecl)
@@ -2551,7 +2691,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
false, argType, SourceLocation()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetMetaClassFunctionDecl,
&ClsExprs[0],
- ClsExprs.size());
+ ClsExprs.size(),
+ StartLoc,
+ EndLoc);
// To turn off a warning, type-cast to 'id'
InitExprs.push_back( // set 'super class', using objc_getClass().
NoTypeInfoCStyleCastExpr(Context,
@@ -2606,7 +2748,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
SourceLocation()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
- ClsExprs.size());
+ ClsExprs.size(),
+ StartLoc, EndLoc);
MsgExprs.push_back(Cls);
}
} else { // instance message.
@@ -2636,7 +2779,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
false, argType, SourceLocation()));
CallExpr *Cls = SynthesizeCallToFunctionDecl(GetClassFunctionDecl,
&ClsExprs[0],
- ClsExprs.size());
+ ClsExprs.size(),
+ StartLoc, EndLoc);
// To turn off a warning, type-cast to 'id'
InitExprs.push_back(
// set 'super class', using objc_getClass().
@@ -2695,7 +2839,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
Exp->getSelector().getAsString().size(),
false, argType, SourceLocation()));
CallExpr *SelExp = SynthesizeCallToFunctionDecl(SelGetUidFunctionDecl,
- &SelExprs[0], SelExprs.size());
+ &SelExprs[0], SelExprs.size(),
+ StartLoc,
+ EndLoc);
MsgExprs.push_back(SelExp);
// Now push any user supplied arguments.
@@ -2752,6 +2898,10 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
}
returnType = OMD->getResultType()->isObjCQualifiedIdType()
? Context->getObjCIdType() : OMD->getResultType();
+ if (isTopLevelBlockPointerType(returnType)) {
+ const BlockPointerType *BPT = returnType->getAs<BlockPointerType>();
+ returnType = Context->getPointerType(BPT->getPointeeType());
+ }
} else {
returnType = Context->getObjCIdType();
}
@@ -2774,18 +2924,20 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
QualType castType = Context->getFunctionType(returnType,
&ArgTypes[0], ArgTypes.size(),
// If we don't have a method decl, force a variadic cast.
- Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true, 0);
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true, 0,
+ false, false, 0, 0, false,
+ CC_Default);
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CastExpr::CK_Unknown,
cast);
// Don't forget the parens to enforce the proper binding.
- ParenExpr *PE = new (Context) ParenExpr(SourceLocation(), SourceLocation(), cast);
+ ParenExpr *PE = new (Context) ParenExpr(StartLoc, EndLoc, cast);
const FunctionType *FT = msgSendType->getAs<FunctionType>();
CallExpr *CE = new (Context) CallExpr(*Context, PE, &MsgExprs[0],
MsgExprs.size(),
- FT->getResultType(), SourceLocation());
+ FT->getResultType(), EndLoc);
Stmt *ReplacingStmt = CE;
if (MsgSendStretFlavor) {
// We have the method which returns a struct/union. Must also generate
@@ -2803,7 +2955,9 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
// Now do the "normal" pointer to function cast.
castType = Context->getFunctionType(returnType,
&ArgTypes[0], ArgTypes.size(),
- Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false, 0);
+ Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false, 0,
+ false, false, 0, 0, false,
+ CC_Default);
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CastExpr::CK_Unknown,
cast);
@@ -2846,7 +3000,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp) {
}
Stmt *RewriteObjC::RewriteMessageExpr(ObjCMessageExpr *Exp) {
- Stmt *ReplacingStmt = SynthMessageExpr(Exp);
+ Stmt *ReplacingStmt = SynthMessageExpr(Exp, Exp->getLocStart(),
+ Exp->getLocEnd());
// Now do the actual rewrite.
ReplaceStmt(Exp, ReplacingStmt);
@@ -3430,7 +3585,7 @@ void RewriteObjC::RewriteObjCCategoryImplDecl(ObjCCategoryImplDecl *IDecl,
/// SynthesizeIvarOffsetComputation - This rutine synthesizes computation of
/// ivar offset.
-void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCImplementationDecl *IDecl,
+void RewriteObjC::SynthesizeIvarOffsetComputation(ObjCContainerDecl *IDecl,
ObjCIvarDecl *ivar,
std::string &Result) {
if (ivar->isBitField()) {
@@ -3504,12 +3659,12 @@ void RewriteObjC::RewriteObjCClassMetaData(ObjCImplementationDecl *IDecl,
ObjCInterfaceDecl::ivar_iterator IVI, IVE;
llvm::SmallVector<ObjCIvarDecl *, 8> IVars;
if (!IDecl->ivar_empty()) {
- for (ObjCImplementationDecl::ivar_iterator
+ for (ObjCInterfaceDecl::ivar_iterator
IV = IDecl->ivar_begin(), IVEnd = IDecl->ivar_end();
IV != IVEnd; ++IV)
IVars.push_back(*IV);
- IVI = IVars.begin();
- IVE = IVars.end();
+ IVI = IDecl->ivar_begin();
+ IVE = IDecl->ivar_end();
} else {
IVI = CDecl->ivar_begin();
IVE = CDecl->ivar_end();
@@ -3728,9 +3883,7 @@ void RewriteObjC::RewriteImplementations() {
void RewriteObjC::SynthesizeMetaDataIntoBuffer(std::string &Result) {
int ClsDefCount = ClassImplementation.size();
int CatDefCount = CategoryImplementation.size();
-
- // This is needed for determining instance variable offsets.
- Result += "\n#define __OFFSETOFIVAR__(TYPE, MEMBER) ((long) &((TYPE *)0)->MEMBER)\n";
+
// For each implemented class, write out all its meta data.
for (int i = 0; i < ClsDefCount; i++)
RewriteObjCClassMetaData(ClassImplementation[i], Result);
@@ -3888,7 +4041,6 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
for (llvm::SmallVector<ValueDecl*,8>::iterator I = BlockByCopyDecls.begin(),
E = BlockByCopyDecls.end(); I != E; ++I) {
S += " ";
- std::string Name = (*I)->getNameAsString();
// Handle nested closure invocation. For example:
//
// void (^myImportedClosure)(void);
@@ -3899,11 +4051,19 @@ std::string RewriteObjC::SynthesizeBlockFunc(BlockExpr *CE, int i,
// myImportedClosure(); // import and invoke the closure
// };
//
- if (isTopLevelBlockPointerType((*I)->getType()))
- S += "struct __block_impl *";
- else
+ if (isTopLevelBlockPointerType((*I)->getType())) {
+ RewriteBlockPointerTypeVariable(S, (*I));
+ S += " = (";
+ RewriteBlockPointerType(S, (*I)->getType());
+ S += ")";
+ S += "__cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ }
+ else {
+ std::string Name = (*I)->getNameAsString();
(*I)->getType().getAsStringInternal(Name, Context->PrintingPolicy);
- S += Name + " = __cself->" + (*I)->getNameAsString() + "; // bound by copy\n";
+ S += Name + " = __cself->" +
+ (*I)->getNameAsString() + "; // bound by copy\n";
+ }
}
std::string RewrittenStr = RewrittenBlockExprs[CE];
const char *cstr = RewrittenStr.c_str();
@@ -4107,9 +4267,23 @@ void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
if (CurFunctionDeclToDeclareForBlock && !Blocks.empty())
RewriteBlockLiteralFunctionDecl(CurFunctionDeclToDeclareForBlock);
// Insert closures that were part of the function.
- for (unsigned i = 0; i < Blocks.size(); i++) {
-
+ for (unsigned i = 0, count=0; i < Blocks.size(); i++) {
CollectBlockDeclRefInfo(Blocks[i]);
+ // Need to copy-in the inner copied-in variables not actually used in this
+ // block.
+ for (int j = 0; j < InnerDeclRefsCount[i]; j++) {
+ BlockDeclRefExpr *Exp = InnerDeclRefs[count++];
+ ValueDecl *VD = Exp->getDecl();
+ BlockDeclRefs.push_back(Exp);
+ if (!Exp->isByRef() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (Exp->isByRef() && !BlockByRefDeclsPtrSet.count(VD)) {
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
std::string ImplTag = "__" + std::string(FunName) + "_block_impl_" + utostr(i);
std::string DescTag = "__" + std::string(FunName) + "_block_desc_" + utostr(i);
@@ -4135,10 +4309,11 @@ void RewriteObjC::SynthesizeBlockLiterals(SourceLocation FunLocStart,
BlockByRefDeclsPtrSet.clear();
BlockByCopyDecls.clear();
BlockByCopyDeclsPtrSet.clear();
- BlockCallExprs.clear();
ImportedBlockDecls.clear();
}
Blocks.clear();
+ InnerDeclRefsCount.clear();
+ InnerDeclRefs.clear();
RewrittenBlockExprs.clear();
}
@@ -4186,21 +4361,30 @@ void RewriteObjC::GetBlockDeclRefExprs(Stmt *S) {
return;
}
-void RewriteObjC::GetBlockCallExprs(Stmt *S) {
+void RewriteObjC::GetInnerBlockDeclRefExprs(Stmt *S,
+ llvm::SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs,
+ llvm::SmallPtrSet<const DeclContext *, 8> &InnerContexts) {
for (Stmt::child_iterator CI = S->child_begin(), E = S->child_end();
CI != E; ++CI)
if (*CI) {
- if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI))
- GetBlockCallExprs(CBE->getBody());
+ if (BlockExpr *CBE = dyn_cast<BlockExpr>(*CI)) {
+ InnerContexts.insert(cast<DeclContext>(CBE->getBlockDecl()));
+ GetInnerBlockDeclRefExprs(CBE->getBody(),
+ InnerBlockDeclRefs,
+ InnerContexts);
+ }
else
- GetBlockCallExprs(*CI);
- }
+ GetInnerBlockDeclRefExprs(*CI,
+ InnerBlockDeclRefs,
+ InnerContexts);
- if (CallExpr *CE = dyn_cast<CallExpr>(S)) {
- if (CE->getCallee()->getType()->isBlockPointerType()) {
- BlockCallExprs[dyn_cast<BlockDeclRefExpr>(CE->getCallee())] = CE;
}
- }
+ // Handle specific things.
+ if (BlockDeclRefExpr *CDRE = dyn_cast<BlockDeclRefExpr>(S))
+ if (!isa<FunctionDecl>(CDRE->getDecl()) &&
+ !InnerContexts.count(CDRE->getDecl()->getDeclContext()))
+ InnerBlockDeclRefs.push_back(CDRE);
+
return;
}
@@ -4269,7 +4453,9 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
}
// Now do the pointer to function cast.
QualType PtrToFuncCastType = Context->getFunctionType(Exp->getType(),
- &ArgTypes[0], ArgTypes.size(), false/*no variadic*/, 0);
+ &ArgTypes[0], ArgTypes.size(), false/*no variadic*/, 0,
+ false, false, 0, 0,
+ false, CC_Default);
PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
@@ -4379,7 +4565,7 @@ void RewriteObjC::RewriteCastExpr(CStyleCastExpr *CE) {
const TypeOfExprType *TypeOfExprTypePtr = cast<TypeOfExprType>(TypePtr);
QT = TypeOfExprTypePtr->getUnderlyingExpr()->getType();
std::string TypeAsString = "(";
- TypeAsString += QT.getAsString();
+ RewriteBlockPointerType(TypeAsString, QT);
TypeAsString += ")";
ReplaceText(LocStart, endBuf-startBuf+1, TypeAsString);
return;
@@ -4605,6 +4791,10 @@ void RewriteObjC::RewriteByRefVar(VarDecl *ND) {
int flag = 0;
int isa = 0;
SourceLocation DeclLoc = ND->getTypeSpecStartLoc();
+ if (DeclLoc.isInvalid())
+ // If type location is missing, it is because of missing type (a warning).
+ // Use variable's location which is good for this case.
+ DeclLoc = ND->getLocation();
const char *startBuf = SM->getCharacterData(DeclLoc);
SourceLocation X = ND->getLocEnd();
X = SM->getInstantiationLoc(X);
@@ -4758,10 +4948,8 @@ void RewriteObjC::CollectBlockDeclRefInfo(BlockExpr *Exp) {
for (unsigned i = 0; i < BlockDeclRefs.size(); i++)
if (BlockDeclRefs[i]->isByRef() ||
BlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
- BlockDeclRefs[i]->getType()->isBlockPointerType()) {
- GetBlockCallExprs(BlockDeclRefs[i]);
+ BlockDeclRefs[i]->getType()->isBlockPointerType())
ImportedBlockDecls.insert(BlockDeclRefs[i]->getDecl());
- }
}
}
@@ -4773,10 +4961,43 @@ FunctionDecl *RewriteObjC::SynthBlockInitFunctionDecl(const char *name) {
false);
}
-Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp) {
+Stmt *RewriteObjC::SynthBlockInitExpr(BlockExpr *Exp,
+ const llvm::SmallVector<BlockDeclRefExpr *, 8> &InnerBlockDeclRefs) {
Blocks.push_back(Exp);
CollectBlockDeclRefInfo(Exp);
+
+ // Add inner imported variables now used in current block.
+ int countOfInnerDecls = 0;
+ if (!InnerBlockDeclRefs.empty()) {
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++) {
+ BlockDeclRefExpr *Exp = InnerBlockDeclRefs[i];
+ ValueDecl *VD = Exp->getDecl();
+ if (!Exp->isByRef() && !BlockByCopyDeclsPtrSet.count(VD)) {
+ // We need to save the copied-in variables in nested
+ // blocks because it is needed at the end for some of the API generations.
+ // See SynthesizeBlockLiterals routine.
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByCopyDeclsPtrSet.insert(VD);
+ BlockByCopyDecls.push_back(VD);
+ }
+ if (Exp->isByRef() && !BlockByRefDeclsPtrSet.count(VD)) {
+ InnerDeclRefs.push_back(Exp); countOfInnerDecls++;
+ BlockDeclRefs.push_back(Exp);
+ BlockByRefDeclsPtrSet.insert(VD);
+ BlockByRefDecls.push_back(VD);
+ }
+ }
+ // Find any imported blocks...they will need special attention.
+ for (unsigned i = 0; i < InnerBlockDeclRefs.size(); i++)
+ if (InnerBlockDeclRefs[i]->isByRef() ||
+ InnerBlockDeclRefs[i]->getType()->isObjCObjectPointerType() ||
+ InnerBlockDeclRefs[i]->getType()->isBlockPointerType())
+ ImportedBlockDecls.insert(InnerBlockDeclRefs[i]->getDecl());
+ }
+ InnerDeclRefsCount.push_back(countOfInnerDecls);
+
std::string FuncName;
if (CurFunctionDef)
@@ -4955,6 +5176,11 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
}
if (BlockExpr *BE = dyn_cast<BlockExpr>(S)) {
+ llvm::SmallVector<BlockDeclRefExpr *, 8> InnerBlockDeclRefs;
+ llvm::SmallPtrSet<const DeclContext *, 8> InnerContexts;
+ InnerContexts.insert(BE->getBlockDecl());
+ GetInnerBlockDeclRefExprs(BE->getBody(),
+ InnerBlockDeclRefs, InnerContexts);
// Rewrite the block body in place.
RewriteFunctionBodyOrGlobalInitializer(BE->getBody());
@@ -4962,7 +5188,8 @@ Stmt *RewriteObjC::RewriteFunctionBodyOrGlobalInitializer(Stmt *S) {
std::string Str = Rewrite.getRewrittenText(BE->getSourceRange());
RewrittenBlockExprs[BE] = Str;
- Stmt *blockTranscribed = SynthBlockInitExpr(BE);
+ Stmt *blockTranscribed = SynthBlockInitExpr(BE, InnerBlockDeclRefs);
+
//blockTranscribed->dump();
ReplaceStmt(S, blockTranscribed);
return blockTranscribed;
@@ -5281,11 +5508,6 @@ void RewriteObjC::HandleDeclInMainFile(Decl *D) {
RewriteBlockPointerDecl(TD);
else if (TD->getUnderlyingType()->isFunctionPointerType())
CheckFunctionPointerDecl(TD->getUnderlyingType(), TD);
- else if (TD->getUnderlyingType()->isRecordType()) {
- RecordDecl *RD = TD->getUnderlyingType()->getAs<RecordType>()->getDecl();
- if (RD->isDefinition())
- RewriteRecordBody(RD);
- }
return;
}
if (RecordDecl *RD = dyn_cast<RecordDecl>(D)) {
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index 9ec5ffe1c353..d2aa5480b4b1 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -31,7 +31,7 @@ static const enum llvm::raw_ostream::Colors warningColor =
llvm::raw_ostream::MAGENTA;
static const enum llvm::raw_ostream::Colors errorColor = llvm::raw_ostream::RED;
static const enum llvm::raw_ostream::Colors fatalColor = llvm::raw_ostream::RED;
-// used for changing only the bold attribute
+// Used for changing only the bold attribute.
static const enum llvm::raw_ostream::Colors savedColor =
llvm::raw_ostream::SAVEDCOLOR;
@@ -682,6 +682,9 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
// file+line+column number prefix is.
uint64_t StartOfLocationInfo = OS.tell();
+ if (!Prefix.empty())
+ OS << Prefix << ": ";
+
// If the location is specified, print out a file/line/col and include trace
// if enabled.
if (Info.getLocation().isValid()) {
@@ -786,12 +789,15 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
llvm::SmallString<100> OutStr;
Info.FormatDiagnostic(OutStr);
- if (DiagOpts->ShowOptionNames)
+ if (DiagOpts->ShowOptionNames) {
if (const char *Opt = Diagnostic::getWarningOptionForDiag(Info.getID())) {
OutStr += " [-W";
OutStr += Opt;
OutStr += ']';
+ } else if (Diagnostic::isBuiltinExtensionDiag(Info.getID())) {
+ OutStr += " [-pedantic]";
}
+ }
if (DiagOpts->ShowColors) {
// Print warnings, errors and fatal errors in bold, no color
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index b59c7e824bf1..2f3888bebc76 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -397,6 +397,12 @@ _mm_cvtss_si32(__m128 a)
return __builtin_ia32_cvtss2si(a);
}
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvt_ss2si(__m128 a)
+{
+ return _mm_cvtss_si32(a);
+}
+
#ifdef __x86_64__
static inline long long __attribute__((__always_inline__, __nodebug__))
@@ -419,6 +425,12 @@ _mm_cvttss_si32(__m128 a)
return a[0];
}
+static inline int __attribute__((__always_inline__, __nodebug__))
+_mm_cvtt_ss2si(__m128 a)
+{
+ return _mm_cvttss_si32(a);
+}
+
static inline long long __attribute__((__always_inline__, __nodebug__))
_mm_cvttss_si64(__m128 a)
{
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 4803c5ab85d5..976c94eda364 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -1024,13 +1024,9 @@ void Preprocessor::HandleIncludeDirective(Token &IncludeTok,
return;
case tok::angle_string_literal:
- case tok::string_literal: {
- FilenameBuffer.resize(FilenameTok.getLength());
- const char *FilenameStart = &FilenameBuffer[0];
- unsigned Len = getSpelling(FilenameTok, FilenameStart);
- Filename = llvm::StringRef(FilenameStart, Len);
+ case tok::string_literal:
+ Filename = getSpelling(FilenameTok, FilenameBuffer);
break;
- }
case tok::less:
// This could be a <foo/bar.h> file coming from a macro expansion. In this
diff --git a/lib/Lex/PPExpressions.cpp b/lib/Lex/PPExpressions.cpp
index 2a6b2a729417..ede129edcb6f 100644
--- a/lib/Lex/PPExpressions.cpp
+++ b/lib/Lex/PPExpressions.cpp
@@ -106,7 +106,7 @@ static bool EvaluateDefined(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// Consume identifier.
Result.setEnd(PeekTok.getLocation());
- PP.LexNonComment(PeekTok);
+ PP.LexUnexpandedToken(PeekTok);
// If we are in parens, ensure we have a trailing ).
if (LParenLoc.isValid()) {
@@ -170,10 +170,8 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
return true;
case tok::numeric_constant: {
llvm::SmallString<64> IntegerBuffer;
- IntegerBuffer.resize(PeekTok.getLength());
- const char *ThisTokBegin = &IntegerBuffer[0];
- unsigned ActualLength = PP.getSpelling(PeekTok, ThisTokBegin);
- NumericLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ llvm::StringRef Spelling = PP.getSpelling(PeekTok, IntegerBuffer);
+ NumericLiteralParser Literal(Spelling.begin(), Spelling.end(),
PeekTok.getLocation(), PP);
if (Literal.hadError)
return true; // a diagnostic was already reported.
@@ -218,10 +216,9 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
}
case tok::char_constant: { // 'x'
llvm::SmallString<32> CharBuffer;
- CharBuffer.resize(PeekTok.getLength());
- const char *ThisTokBegin = &CharBuffer[0];
- unsigned ActualLength = PP.getSpelling(PeekTok, ThisTokBegin);
- CharLiteralParser Literal(ThisTokBegin, ThisTokBegin+ActualLength,
+ llvm::StringRef ThisTok = PP.getSpelling(PeekTok, CharBuffer);
+
+ CharLiteralParser Literal(ThisTok.begin(), ThisTok.end(),
PeekTok.getLocation(), PP);
if (Literal.hadError())
return true; // A diagnostic was already emitted.
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index b97ab2485d3d..d60cf0804f53 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -501,8 +501,10 @@ static bool HasFeature(const Preprocessor &PP, const IdentifierInfo *II) {
//.Case("cxx_variadic_templates", false)
.Case("attribute_ext_vector_type", true)
.Case("attribute_analyzer_noreturn", true)
- .Case("attribute_ns_returns_retained", true)
+ .Case("attribute_cf_returns_not_retained", true)
.Case("attribute_cf_returns_retained", true)
+ .Case("attribute_ns_returns_not_retained", true)
+ .Case("attribute_ns_returns_retained", true)
.Default(false);
}
@@ -539,13 +541,9 @@ static bool EvaluateHasIncludeCommon(bool &Result, Token &Tok,
return false;
case tok::angle_string_literal:
- case tok::string_literal: {
- FilenameBuffer.resize(Tok.getLength());
- const char *FilenameStart = &FilenameBuffer[0];
- unsigned Len = PP.getSpelling(Tok, FilenameStart);
- Filename = llvm::StringRef(FilenameStart, Len);
+ case tok::string_literal:
+ Filename = PP.getSpelling(Tok, FilenameBuffer);
break;
- }
case tok::less:
// This could be a <foo/bar.h> file coming from a macro expansion. In this
diff --git a/lib/Lex/Pragma.cpp b/lib/Lex/Pragma.cpp
index 63b23b6d5c47..654d4606a959 100644
--- a/lib/Lex/Pragma.cpp
+++ b/lib/Lex/Pragma.cpp
@@ -287,11 +287,8 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
// Reserve a buffer to get the spelling.
llvm::SmallString<128> FilenameBuffer;
- FilenameBuffer.resize(FilenameTok.getLength());
+ llvm::StringRef Filename = getSpelling(FilenameTok, FilenameBuffer);
- const char *FilenameStart = &FilenameBuffer[0];
- unsigned Len = getSpelling(FilenameTok, FilenameStart);
- llvm::StringRef Filename(FilenameStart, Len);
bool isAngled =
GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index df0e702ab447..2c6ad6ee462c 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -40,7 +40,6 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
-#include <cstdio>
using namespace clang;
//===----------------------------------------------------------------------===//
@@ -365,6 +364,24 @@ unsigned Preprocessor::getSpelling(const Token &Tok,
return OutBuf-Buffer;
}
+/// getSpelling - This method is used to get the spelling of a token into a
+/// SmallVector. Note that the returned StringRef may not point to the
+/// supplied buffer if a copy can be avoided.
+llvm::StringRef Preprocessor::getSpelling(const Token &Tok,
+ llvm::SmallVectorImpl<char> &Buffer) const {
+ // Try the fast path.
+ if (const IdentifierInfo *II = Tok.getIdentifierInfo())
+ return II->getName();
+
+ // Resize the buffer if we need to copy into it.
+ if (Tok.needsCleaning())
+ Buffer.resize(Tok.getLength());
+
+ const char *Ptr = Buffer.data();
+ unsigned Len = getSpelling(Tok, Ptr);
+ return llvm::StringRef(Ptr, Len);
+}
+
/// CreateString - Plop the specified string into a scratch buffer and return a
/// location for it. If specified, the source location provides a source
/// location for the token.
@@ -503,10 +520,8 @@ IdentifierInfo *Preprocessor::LookUpIdentifierInfo(Token &Identifier,
} else {
// Cleaning needed, alloca a buffer, clean into it, then use the buffer.
llvm::SmallVector<char, 64> IdentifierBuffer;
- IdentifierBuffer.resize(Identifier.getLength());
- const char *TmpBuf = &IdentifierBuffer[0];
- unsigned Size = getSpelling(Identifier, TmpBuf);
- II = getIdentifierInfo(llvm::StringRef(TmpBuf, Size));
+ llvm::StringRef CleanedStr = getSpelling(Identifier, IdentifierBuffer);
+ II = getIdentifierInfo(CleanedStr);
}
Identifier.setIdentifierInfo(II);
return II;
diff --git a/lib/Parse/AttributeList.cpp b/lib/Parse/AttributeList.cpp
index df48e3a7861f..b96dff573dfa 100644
--- a/lib/Parse/AttributeList.cpp
+++ b/lib/Parse/AttributeList.cpp
@@ -57,6 +57,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
// FIXME: Hand generating this is neither smart nor efficient.
return llvm::StringSwitch<AttributeList::Kind>(AttrName)
.Case("weak", AT_weak)
+ .Case("weakref", AT_weakref)
.Case("pure", AT_pure)
.Case("mode", AT_mode)
.Case("used", AT_used)
@@ -82,6 +83,7 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("stdcall", AT_stdcall)
.Case("annotate", AT_annotate)
.Case("fastcall", AT_fastcall)
+ .Case("ibaction", AT_IBAction)
.Case("iboutlet", AT_IBOutlet)
.Case("noreturn", AT_noreturn)
.Case("noinline", AT_noinline)
@@ -111,7 +113,9 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("analyzer_noreturn", AT_analyzer_noreturn)
.Case("warn_unused_result", AT_warn_unused_result)
.Case("carries_dependency", AT_carries_dependency)
+ .Case("ns_returns_not_retained", AT_ns_returns_not_retained)
.Case("ns_returns_retained", AT_ns_returns_retained)
+ .Case("cf_returns_not_retained", AT_cf_returns_not_retained)
.Case("cf_returns_retained", AT_cf_returns_retained)
.Case("reqd_work_group_size", AT_reqd_wg_size)
.Case("no_instrument_function", AT_no_instrument_function)
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index 8aa69363beee..12c5b6c70483 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -564,10 +564,10 @@ Parser::DeclPtrTy Parser::ParseDeclarationAfterDeclarator(Declarator &D,
}
if (Init.isInvalid()) {
- SkipUntil(tok::semi, true, true);
- return DeclPtrTy();
- }
- Actions.AddInitializerToDecl(ThisDecl, move(Init));
+ SkipUntil(tok::comma, true, true);
+ Actions.ActOnInitializerError(ThisDecl);
+ } else
+ Actions.AddInitializerToDecl(ThisDecl, move(Init));
}
} else if (Tok.is(tok::l_paren)) {
// Parse C++ direct initializer: '(' expression-list ')'
@@ -738,7 +738,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// Parse this as a tag as if the missing tag were present.
if (TagKind == tok::kw_enum)
- ParseEnumSpecifier(Loc, DS, AS);
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS);
else
ParseClassSpecifier(TagKind, Loc, DS, TemplateInfo, AS);
return true;
@@ -859,10 +859,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
return;
case tok::coloncolon: // ::foo::bar
- // Annotate C++ scope specifiers. If we get one, loop.
- if (TryAnnotateCXXScopeToken(true))
- continue;
- goto DoneWithDeclSpec;
+ // C++ scope specifier. Annotate and loop, or bail out on error.
+ if (TryAnnotateCXXScopeToken(true)) {
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (Tok.is(tok::coloncolon)) // ::new or ::delete
+ goto DoneWithDeclSpec;
+ continue;
case tok::annot_cxxscope: {
if (DS.hasTypeSpecifier())
@@ -1020,8 +1025,15 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
case tok::identifier: {
// In C++, check to see if this is a scope specifier like foo::bar::, if
// so handle it as such. This is important for ctor parsing.
- if (getLang().CPlusPlus && TryAnnotateCXXScopeToken(true))
- continue;
+ if (getLang().CPlusPlus) {
+ if (TryAnnotateCXXScopeToken(true)) {
+ if (!DS.hasTypeSpecifier())
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (!Tok.is(tok::identifier))
+ continue;
+ }
// This identifier can only be a typedef name if we haven't already seen
// a type-specifier. Without this check we misparse:
@@ -1294,7 +1306,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// enum-specifier:
case tok::kw_enum:
ConsumeToken();
- ParseEnumSpecifier(Loc, DS, AS);
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS);
continue;
// cv-qualifier:
@@ -1313,7 +1325,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// C++ typename-specifier:
case tok::kw_typename:
- if (TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken()) {
+ DS.SetTypeSpecError();
+ goto DoneWithDeclSpec;
+ }
+ if (!Tok.is(tok::kw_typename))
continue;
break;
@@ -1423,10 +1439,11 @@ bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
- TemplateInfo, SuppressDeclarations);
- // Otherwise, not a type specifier.
- return false;
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+ return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
+ TemplateInfo, SuppressDeclarations);
case tok::coloncolon: // ::foo::bar
if (NextToken().is(tok::kw_new) || // ::new
NextToken().is(tok::kw_delete)) // ::delete
@@ -1435,10 +1452,9 @@ bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
- TemplateInfo, SuppressDeclarations);
- // Otherwise, not a type specifier.
- return false;
+ return true;
+ return ParseOptionalTypeSpecifier(DS, isInvalid, PrevSpec, DiagID,
+ TemplateInfo, SuppressDeclarations);
// simple-type-specifier:
case tok::annot_typename: {
@@ -1556,7 +1572,7 @@ bool Parser::ParseOptionalTypeSpecifier(DeclSpec &DS, bool& isInvalid,
// enum-specifier:
case tok::kw_enum:
ConsumeToken();
- ParseEnumSpecifier(Loc, DS);
+ ParseEnumSpecifier(Loc, DS, TemplateInfo, AS_none);
return true;
// cv-qualifier:
@@ -1834,6 +1850,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
/// [C++] 'enum' '::'[opt] nested-name-specifier[opt] identifier
///
void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
+ const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS) {
// Parse the tag portion of this.
if (Tok.is(tok::code_completion)) {
@@ -1848,8 +1865,11 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Attr.reset(ParseGNUAttributes());
CXXScopeSpec SS;
- if (getLang().CPlusPlus && ParseOptionalCXXScopeSpecifier(SS, 0, false)) {
- if (Tok.isNot(tok::identifier)) {
+ if (getLang().CPlusPlus) {
+ if (ParseOptionalCXXScopeSpecifier(SS, 0, false))
+ return;
+
+ if (SS.isSet() && Tok.isNot(tok::identifier)) {
Diag(Tok, diag::err_expected_ident);
if (Tok.isNot(tok::l_brace)) {
// Has no name and is not a definition.
@@ -1869,6 +1889,15 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
+ // enums cannot be templates.
+ if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
+ Diag(Tok, diag::err_enum_template);
+
+ // Skip the rest of this declarator, up until the comma or semicolon.
+ SkipUntil(tok::comma, true);
+ return;
+ }
+
// If an identifier is present, consume and remember it.
IdentifierInfo *Name = 0;
SourceLocation NameLoc;
@@ -2002,6 +2031,47 @@ bool Parser::isTypeQualifier() const {
}
}
+/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
+/// is definitely a type-specifier. Return false if it isn't part of a type
+/// specifier or if we're not sure.
+bool Parser::isKnownToBeTypeSpecifier(const Token &Tok) const {
+ switch (Tok.getKind()) {
+ default: return false;
+ // type-specifiers
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw__Complex:
+ case tok::kw__Imaginary:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_wchar_t:
+ case tok::kw_char16_t:
+ case tok::kw_char32_t:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw__Bool:
+ case tok::kw__Decimal32:
+ case tok::kw__Decimal64:
+ case tok::kw__Decimal128:
+ case tok::kw___vector:
+
+ // struct-or-union-specifier (C99) or class-specifier (C++)
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ // enum-specifier
+ case tok::kw_enum:
+
+ // typedef-name
+ case tok::annot_typename:
+ return true;
+ }
+}
+
/// isTypeSpecifierQualifier - Return true if the current token could be the
/// start of a specifier-qualifier-list.
bool Parser::isTypeSpecifierQualifier() {
@@ -2016,21 +2086,19 @@ bool Parser::isTypeSpecifierQualifier() {
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return isTypeSpecifierQualifier();
- // Otherwise, not a type specifier.
- return false;
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+ return isTypeSpecifierQualifier();
case tok::coloncolon: // ::foo::bar
if (NextToken().is(tok::kw_new) || // ::new
NextToken().is(tok::kw_delete)) // ::delete
return false;
- // Annotate typenames and C++ scope specifiers. If we get one, just
- // recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return isTypeSpecifierQualifier();
- // Otherwise, not a type specifier.
- return false;
+ return true;
+ return isTypeSpecifierQualifier();
// GNU attributes support.
case tok::kw___attribute:
@@ -2101,14 +2169,15 @@ bool Parser::isDeclarationSpecifier() {
if (TryAltiVecVectorToken())
return true;
// Fall through.
-
case tok::kw_typename: // typename T::type
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return isDeclarationSpecifier();
- // Otherwise, not a declaration specifier.
- return false;
+ return true;
+ if (Tok.is(tok::identifier))
+ return false;
+ return isDeclarationSpecifier();
+
case tok::coloncolon: // ::foo::bar
if (NextToken().is(tok::kw_new) || // ::new
NextToken().is(tok::kw_delete)) // ::delete
@@ -2117,9 +2186,8 @@ bool Parser::isDeclarationSpecifier() {
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return isDeclarationSpecifier();
- // Otherwise, not a declaration specifier.
- return false;
+ return true;
+ return isDeclarationSpecifier();
// storage-class-specifier
case tok::kw_typedef:
@@ -2200,7 +2268,10 @@ bool Parser::isConstructorDeclarator() {
// Parse the C++ scope specifier.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, 0, true);
+ if (ParseOptionalCXXScopeSpecifier(SS, 0, true)) {
+ TPA.Revert();
+ return false;
+ }
// Parse the constructor name.
if (Tok.is(tok::identifier) || Tok.is(tok::annot_template_id)) {
@@ -2351,7 +2422,9 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
(Tok.is(tok::coloncolon) || Tok.is(tok::identifier) ||
Tok.is(tok::annot_cxxscope))) {
CXXScopeSpec SS;
- if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, true)) {
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, true); // ignore fail
+
+ if (SS.isSet()) {
if (Tok.isNot(tok::star)) {
// The scope spec really belongs to the direct-declarator.
D.getCXXScopeSpec() = SS;
@@ -2507,9 +2580,13 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
if (getLang().CPlusPlus && D.mayHaveIdentifier()) {
// ParseDeclaratorInternal might already have parsed the scope.
- bool afterCXXScope = D.getCXXScopeSpec().isSet() ||
+ bool afterCXXScope = D.getCXXScopeSpec().isSet();
+ if (!afterCXXScope) {
ParseOptionalCXXScopeSpecifier(D.getCXXScopeSpec(), /*ObjectType=*/0,
true);
+ afterCXXScope = D.getCXXScopeSpec().isSet();
+ }
+
if (afterCXXScope) {
if (Actions.ShouldEnterDeclaratorScope(CurScope, D.getCXXScopeSpec()))
// Change the declaration context for name lookup, until this function
@@ -2588,7 +2665,7 @@ void Parser::ParseDirectDeclarator(Declarator &D) {
"Haven't past the location of the identifier yet?");
// Don't parse attributes unless we have an identifier.
- if (D.getIdentifier() && getLang().CPlusPlus
+ if (D.getIdentifier() && getLang().CPlusPlus0x
&& isCXX0XAttributeSpecifier(true)) {
SourceLocation AttrEndLoc;
CXX0XAttributeList Attr = ParseCXX0XAttributes();
@@ -2799,7 +2876,7 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// K&R-style function: void foo(a,b,c)
if (!getLang().CPlusPlus && Tok.is(tok::identifier)
&& !TryAltiVecVectorToken()) {
- if (!TryAnnotateTypeOrScopeToken()) {
+ if (TryAnnotateTypeOrScopeToken() || !Tok.is(tok::annot_typename)) {
// K&R identifier lists can't have typedefs as identifiers, per
// C99 6.7.5.3p11.
if (RequiresArg) {
@@ -3238,3 +3315,69 @@ void Parser::ParseTypeofSpecifier(DeclSpec &DS) {
DiagID, Operand.release()))
Diag(StartLoc, DiagID) << PrevSpec;
}
+
+
+/// TryAltiVecVectorTokenOutOfLine - Out of line body that should only be called
+/// from TryAltiVecVectorToken.
+bool Parser::TryAltiVecVectorTokenOutOfLine() {
+ Token Next = NextToken();
+ switch (Next.getKind()) {
+ default: return false;
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw___pixel:
+ Tok.setKind(tok::kw___vector);
+ return true;
+ case tok::identifier:
+ if (Next.getIdentifierInfo() == Ident_pixel) {
+ Tok.setKind(tok::kw___vector);
+ return true;
+ }
+ return false;
+ }
+}
+
+bool Parser::TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
+ const char *&PrevSpec, unsigned &DiagID,
+ bool &isInvalid) {
+ if (Tok.getIdentifierInfo() == Ident_vector) {
+ Token Next = NextToken();
+ switch (Next.getKind()) {
+ case tok::kw_short:
+ case tok::kw_long:
+ case tok::kw_signed:
+ case tok::kw_unsigned:
+ case tok::kw_void:
+ case tok::kw_char:
+ case tok::kw_int:
+ case tok::kw_float:
+ case tok::kw_double:
+ case tok::kw_bool:
+ case tok::kw___pixel:
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+ return true;
+ case tok::identifier:
+ if (Next.getIdentifierInfo() == Ident_pixel) {
+ isInvalid = DS.SetTypeAltiVecVector(true, Loc, PrevSpec, DiagID);
+ return true;
+ }
+ break;
+ default:
+ break;
+ }
+ } else if (Tok.getIdentifierInfo() == Ident_pixel &&
+ DS.isTypeAltiVecVector()) {
+ isInvalid = DS.SetTypeAltiVecPixel(true, Loc, PrevSpec, DiagID);
+ return true;
+ }
+ return false;
+}
+
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 51ee6a443488..bfb75d2dd3d4 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -167,9 +167,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS,
assert(Tok.is(tok::string_literal) && "Not a string literal!");
llvm::SmallVector<char, 8> LangBuffer;
// LangBuffer is guaranteed to be big enough.
- LangBuffer.resize(Tok.getLength());
- const char *LangBufPtr = &LangBuffer[0];
- unsigned StrSize = PP.getSpelling(Tok, LangBufPtr);
+ llvm::StringRef Lang = PP.getSpelling(Tok, LangBuffer);
SourceLocation Loc = ConsumeStringToken();
@@ -177,7 +175,7 @@ Parser::DeclPtrTy Parser::ParseLinkage(ParsingDeclSpec &DS,
DeclPtrTy LinkageSpec
= Actions.ActOnStartLinkageSpecification(CurScope,
/*FIXME: */SourceLocation(),
- Loc, LangBufPtr, StrSize,
+ Loc, Lang.data(), Lang.size(),
Tok.is(tok::l_brace)? Tok.getLocation()
: SourceLocation());
@@ -464,8 +462,7 @@ void Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
/// simple-template-id
///
Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
- const CXXScopeSpec *SS,
- bool DestrExpected) {
+ const CXXScopeSpec *SS) {
// Check whether we have a template-id that names a type.
if (Tok.is(tok::annot_template_id)) {
TemplateIdAnnotation *TemplateId
@@ -536,8 +533,7 @@ Parser::TypeResult Parser::ParseClassName(SourceLocation &EndLocation,
// We have an identifier; check whether it is actually a type.
TypeTy *Type = Actions.getTypeName(*Id, IdLoc, CurScope, SS, true);
if (!Type) {
- Diag(IdLoc, DestrExpected ? diag::err_destructor_class_name
- : diag::err_expected_class_name);
+ Diag(IdLoc, diag::err_expected_class_name);
return true;
}
@@ -647,7 +643,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// "FOO : BAR" is not a potential typo for "FOO::BAR".
ColonProtectionRAIIObject X(*this);
- if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, true))
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, true);
+ if (SS.isSet())
if (Tok.isNot(tok::identifier) && Tok.isNot(tok::annot_template_id))
Diag(Tok, diag::err_expected_ident);
}
@@ -943,7 +940,9 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
//
// This switch enumerates the valid "follow" set for definition.
if (TUK == Action::TUK_Definition) {
+ bool ExpectedSemi = true;
switch (Tok.getKind()) {
+ default: break;
case tok::semi: // struct foo {...} ;
case tok::star: // struct foo {...} * P;
case tok::amp: // struct foo {...} & R = ...
@@ -954,24 +953,46 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
case tok::annot_template_id: // struct foo {...} a<int> ::b;
case tok::l_paren: // struct foo {...} ( x);
case tok::comma: // __builtin_offsetof(struct foo{...} ,
+ ExpectedSemi = false;
+ break;
+ // Type qualifiers
+ case tok::kw_const: // struct foo {...} const x;
+ case tok::kw_volatile: // struct foo {...} volatile x;
+ case tok::kw_restrict: // struct foo {...} restrict x;
+ case tok::kw_inline: // struct foo {...} inline foo() {};
// Storage-class specifiers
case tok::kw_static: // struct foo {...} static x;
case tok::kw_extern: // struct foo {...} extern x;
case tok::kw_typedef: // struct foo {...} typedef x;
case tok::kw_register: // struct foo {...} register x;
case tok::kw_auto: // struct foo {...} auto x;
- // Type qualifiers
- case tok::kw_const: // struct foo {...} const x;
- case tok::kw_volatile: // struct foo {...} volatile x;
- case tok::kw_restrict: // struct foo {...} restrict x;
- case tok::kw_inline: // struct foo {...} inline foo() {};
+ // As shown above, type qualifiers and storage class specifiers absolutely
+ // can occur after class specifiers according to the grammar. However,
+ // almost noone actually writes code like this. If we see one of these,
+ // it is much more likely that someone missed a semi colon and the
+ // type/storage class specifier we're seeing is part of the *next*
+ // intended declaration, as in:
+ //
+ // struct foo { ... }
+ // typedef int X;
+ //
+ // We'd really like to emit a missing semicolon error instead of emitting
+ // an error on the 'int' saying that you can't have two type specifiers in
+ // the same declaration of X. Because of this, we look ahead past this
+ // token to see if it's a type specifier. If so, we know the code is
+ // otherwise invalid, so we can produce the expected semi error.
+ if (!isKnownToBeTypeSpecifier(NextToken()))
+ ExpectedSemi = false;
break;
case tok::r_brace: // struct bar { struct foo {...} }
// Missing ';' at end of struct is accepted as an extension in C mode.
- if (!getLang().CPlusPlus) break;
- // FALL THROUGH.
- default:
+ if (!getLang().CPlusPlus)
+ ExpectedSemi = false;
+ break;
+ }
+
+ if (ExpectedSemi) {
ExpectAndConsume(tok::semi, diag::err_expected_semi_after_tagdecl,
TagType == DeclSpec::TST_class ? "class"
: TagType == DeclSpec::TST_struct? "struct" : "union");
@@ -980,7 +1001,6 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// ';' after the definition.
PP.EnterToken(Tok);
Tok.setKind(tok::semi);
- break;
}
}
}
@@ -1064,7 +1084,8 @@ Parser::BaseResult Parser::ParseBaseSpecifier(DeclPtrTy ClassDecl) {
// Parse optional '::' and optional nested-name-specifier.
CXXScopeSpec SS;
- ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, true);
+ ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0,
+ /*EnteringContext=*/false);
// The location of the base class itself.
SourceLocation BaseLoc = Tok.getLocation();
@@ -1122,7 +1143,7 @@ void Parser::HandleMemberFunctionDefaultArgs(Declarator& DeclaratorInfo,
LateMethod->DefaultArgs.reserve(FTI.NumArgs);
for (unsigned I = 0; I < ParamIdx; ++I)
LateMethod->DefaultArgs.push_back(
- LateParsedDefaultArgument(FTI.ArgInfo[ParamIdx].Param));
+ LateParsedDefaultArgument(FTI.ArgInfo[I].Param));
}
// Add this parameter to the list of parameters (it or may
@@ -1165,7 +1186,7 @@ void Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
// Access declarations.
if (!TemplateInfo.Kind &&
(Tok.is(tok::identifier) || Tok.is(tok::coloncolon)) &&
- TryAnnotateCXXScopeToken() &&
+ !TryAnnotateCXXScopeToken() &&
Tok.is(tok::annot_cxxscope)) {
bool isAccessDecl = false;
if (NextToken().is(tok::identifier))
diff --git a/lib/Parse/ParseExpr.cpp b/lib/Parse/ParseExpr.cpp
index c763c2c6f65f..af91021d33ce 100644
--- a/lib/Parse/ParseExpr.cpp
+++ b/lib/Parse/ParseExpr.cpp
@@ -626,6 +626,8 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
Next.is(tok::l_paren)) {
// If TryAnnotateTypeOrScopeToken annotates the token, tail recurse.
if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::identifier))
return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
}
}
@@ -790,7 +792,7 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
if (SavedKind == tok::kw_typename) {
// postfix-expression: typename-specifier '(' expression-list[opt] ')'
- if (!TryAnnotateTypeOrScopeToken())
+ if (TryAnnotateTypeOrScopeToken())
return ExprError();
}
@@ -852,6 +854,8 @@ Parser::OwningExprResult Parser::ParseCastExpression(bool isUnaryExpression,
// ::foo::bar -> global qualified name etc. If TryAnnotateTypeOrScopeToken
// annotates the token, tail recurse.
if (TryAnnotateTypeOrScopeToken())
+ return ExprError();
+ if (!Tok.is(tok::coloncolon))
return ParseCastExpression(isUnaryExpression, isAddressOfOperand);
// ::new -> [C++] new-expression
@@ -996,12 +1000,16 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
CXXScopeSpec SS;
Action::TypeTy *ObjectType = 0;
+ bool MayBePseudoDestructor = false;
if (getLang().CPlusPlus && !LHS.isInvalid()) {
LHS = Actions.ActOnStartCXXMemberReference(CurScope, move(LHS),
- OpLoc, OpKind, ObjectType);
+ OpLoc, OpKind, ObjectType,
+ MayBePseudoDestructor);
if (LHS.isInvalid())
break;
- ParseOptionalCXXScopeSpecifier(SS, ObjectType, false);
+
+ ParseOptionalCXXScopeSpecifier(SS, ObjectType, false,
+ &MayBePseudoDestructor);
}
if (Tok.is(tok::code_completion)) {
@@ -1012,6 +1020,17 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
ConsumeToken();
}
+ if (MayBePseudoDestructor) {
+ LHS = ParseCXXPseudoDestructor(move(LHS), OpLoc, OpKind, SS,
+ ObjectType);
+ break;
+ }
+
+ // Either the action has told is that this cannot be a
+ // pseudo-destructor expression (based on the type of base
+ // expression), or we didn't see a '~' in the right place. We
+ // can still parse a destructor name here, but in that case it
+ // names a real destructor.
UnqualifiedId Name;
if (ParseUnqualifiedId(SS,
/*EnteringContext=*/false,
@@ -1022,10 +1041,9 @@ Parser::ParsePostfixExpressionSuffix(OwningExprResult LHS) {
return ExprError();
if (!LHS.isInvalid())
- LHS = Actions.ActOnMemberAccessExpr(CurScope, move(LHS), OpLoc, OpKind,
- SS, Name, ObjCImpDecl,
+ LHS = Actions.ActOnMemberAccessExpr(CurScope, move(LHS), OpLoc,
+ OpKind, SS, Name, ObjCImpDecl,
Tok.is(tok::l_paren));
-
break;
}
case tok::plusplus: // postfix-expression: postfix-expression '++'
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index 0dbe1ea83890..f1e989f4a7b0 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -45,10 +45,21 @@ using namespace clang;
/// \param EnteringContext whether we will be entering into the context of
/// the nested-name-specifier after parsing it.
///
-/// \returns true if a scope specifier was parsed.
+/// \param MayBePseudoDestructor When non-NULL, points to a flag that
+/// indicates whether this nested-name-specifier may be part of a
+/// pseudo-destructor name. In this case, the flag will be set false
+/// if we don't actually end up parsing a destructor name. Moreorover,
+/// if we do end up determining that we are parsing a destructor name,
+/// the last component of the nested-name-specifier is not parsed as
+/// part of the scope specifier.
+
+/// member access expression, e.g., the \p T:: in \p p->T::m.
+///
+/// \returns true if there was an error parsing a scope specifier
bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
Action::TypeTy *ObjectType,
- bool EnteringContext) {
+ bool EnteringContext,
+ bool *MayBePseudoDestructor) {
assert(getLang().CPlusPlus &&
"Call sites of this function should be guarded by checking for C++");
@@ -56,7 +67,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
SS.setScopeRep(Tok.getAnnotationValue());
SS.setRange(Tok.getAnnotationRange());
ConsumeToken();
- return true;
+ return false;
}
bool HasScopeSpecifier = false;
@@ -75,6 +86,12 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
HasScopeSpecifier = true;
}
+ bool CheckForDestructor = false;
+ if (MayBePseudoDestructor && *MayBePseudoDestructor) {
+ CheckForDestructor = true;
+ *MayBePseudoDestructor = false;
+ }
+
while (true) {
if (HasScopeSpecifier) {
// C++ [basic.lookup.classref]p5:
@@ -151,10 +168,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
= Actions.ActOnDependentTemplateName(TemplateKWLoc, SS, TemplateName,
ObjectType, EnteringContext);
if (!Template)
- break;
+ return true;
if (AnnotateTemplateIdToken(Template, TNK_Dependent_template_name,
&SS, TemplateName, TemplateKWLoc, false))
- break;
+ return true;
continue;
}
@@ -169,6 +186,10 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// convert it into a type within the nested-name-specifier.
TemplateIdAnnotation *TemplateId
= static_cast<TemplateIdAnnotation *>(Tok.getAnnotationValue());
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde)) {
+ *MayBePseudoDestructor = true;
+ return false;
+ }
if (TemplateId->Kind == TNK_Type_template ||
TemplateId->Kind == TNK_Dependent_template_name) {
@@ -217,21 +238,29 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// If we get foo:bar, this is almost certainly a typo for foo::bar. Recover
// and emit a fixit hint for it.
- if (Next.is(tok::colon) && !ColonIsSacred &&
- Actions.IsInvalidUnlessNestedName(CurScope, SS, II, ObjectType,
- EnteringContext) &&
- // If the token after the colon isn't an identifier, it's still an
- // error, but they probably meant something else strange so don't
- // recover like this.
- PP.LookAhead(1).is(tok::identifier)) {
- Diag(Next, diag::err_unexected_colon_in_nested_name_spec)
- << CodeModificationHint::CreateReplacement(Next.getLocation(), "::");
-
- // Recover as if the user wrote '::'.
- Next.setKind(tok::coloncolon);
+ if (Next.is(tok::colon) && !ColonIsSacred) {
+ if (Actions.IsInvalidUnlessNestedName(CurScope, SS, II, ObjectType,
+ EnteringContext) &&
+ // If the token after the colon isn't an identifier, it's still an
+ // error, but they probably meant something else strange so don't
+ // recover like this.
+ PP.LookAhead(1).is(tok::identifier)) {
+ Diag(Next, diag::err_unexected_colon_in_nested_name_spec)
+ << CodeModificationHint::CreateReplacement(Next.getLocation(), "::");
+
+ // Recover as if the user wrote '::'.
+ Next.setKind(tok::coloncolon);
+ }
}
if (Next.is(tok::coloncolon)) {
+ if (CheckForDestructor && GetLookAheadToken(2).is(tok::tilde) &&
+ !Actions.isNonTypeNestedNameSpecifier(CurScope, SS, Tok.getLocation(),
+ II, ObjectType)) {
+ *MayBePseudoDestructor = true;
+ return false;
+ }
+
// We have an identifier followed by a '::'. Lookup this name
// as the name in a nested-name-specifier.
SourceLocation IdLoc = ConsumeToken();
@@ -274,7 +303,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
ConsumeToken();
if (AnnotateTemplateIdToken(Template, TNK, &SS, TemplateName,
SourceLocation(), false))
- break;
+ return true;
continue;
}
}
@@ -284,7 +313,13 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
break;
}
- return HasScopeSpecifier;
+ // Even if we didn't see any pieces of a nested-name-specifier, we
+ // still check whether there is a tilde in this position, which
+ // indicates a potential pseudo-destructor.
+ if (CheckForDestructor && Tok.is(tok::tilde))
+ *MayBePseudoDestructor = true;
+
+ return false;
}
/// ParseCXXIdExpression - Handle id-expression.
@@ -479,6 +514,77 @@ Parser::OwningExprResult Parser::ParseCXXTypeid() {
return move(Result);
}
+/// \brief Parse a C++ pseudo-destructor expression after the base,
+/// . or -> operator, and nested-name-specifier have already been
+/// parsed.
+///
+/// postfix-expression: [C++ 5.2]
+/// postfix-expression . pseudo-destructor-name
+/// postfix-expression -> pseudo-destructor-name
+///
+/// pseudo-destructor-name:
+/// ::[opt] nested-name-specifier[opt] type-name :: ~type-name
+/// ::[opt] nested-name-specifier template simple-template-id ::
+/// ~type-name
+/// ::[opt] nested-name-specifier[opt] ~type-name
+///
+Parser::OwningExprResult
+Parser::ParseCXXPseudoDestructor(ExprArg Base, SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ CXXScopeSpec &SS,
+ Action::TypeTy *ObjectType) {
+ // We're parsing either a pseudo-destructor-name or a dependent
+ // member access that has the same form as a
+ // pseudo-destructor-name. We parse both in the same way and let
+ // the action model sort them out.
+ //
+ // Note that the ::[opt] nested-name-specifier[opt] has already
+ // been parsed, and if there was a simple-template-id, it has
+ // been coalesced into a template-id annotation token.
+ UnqualifiedId FirstTypeName;
+ SourceLocation CCLoc;
+ if (Tok.is(tok::identifier)) {
+ FirstTypeName.setIdentifier(Tok.getIdentifierInfo(), Tok.getLocation());
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
+ CCLoc = ConsumeToken();
+ } else if (Tok.is(tok::annot_template_id)) {
+ FirstTypeName.setTemplateId(
+ (TemplateIdAnnotation *)Tok.getAnnotationValue());
+ ConsumeToken();
+ assert(Tok.is(tok::coloncolon) &&"ParseOptionalCXXScopeSpecifier fail");
+ CCLoc = ConsumeToken();
+ } else {
+ FirstTypeName.setIdentifier(0, SourceLocation());
+ }
+
+ // Parse the tilde.
+ assert(Tok.is(tok::tilde) && "ParseOptionalCXXScopeSpecifier fail");
+ SourceLocation TildeLoc = ConsumeToken();
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok, diag::err_destructor_tilde_identifier);
+ return ExprError();
+ }
+
+ // Parse the second type.
+ UnqualifiedId SecondTypeName;
+ IdentifierInfo *Name = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = ConsumeToken();
+ SecondTypeName.setIdentifier(Name, NameLoc);
+
+ // If there is a '<', the second type name is a template-id. Parse
+ // it as such.
+ if (Tok.is(tok::less) &&
+ ParseUnqualifiedIdTemplateId(SS, Name, NameLoc, false, ObjectType,
+ SecondTypeName, /*AssumeTemplateName=*/true))
+ return ExprError();
+
+ return Actions.ActOnPseudoDestructorExpr(CurScope, move(Base), OpLoc, OpKind,
+ SS, FirstTypeName, CCLoc,
+ TildeLoc, SecondTypeName,
+ Tok.is(tok::l_paren));
+}
+
/// ParseCXXBoolLiteral - This handles the C++ Boolean literals.
///
/// boolean-literal: [C++ 2.13.5]
@@ -773,6 +879,7 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
ParsedTemplateInfo(), /*SuppressDeclarations*/true))
{}
+ DS.Finish(Diags, PP);
return false;
}
@@ -804,13 +911,17 @@ bool Parser::ParseCXXTypeSpecifierSeq(DeclSpec &DS) {
/// that precedes the '<'. If template arguments were parsed successfully,
/// will be updated with the template-id.
///
+/// \param AssumeTemplateId When true, this routine will assume that the name
+/// refers to a template without performing name lookup to verify.
+///
/// \returns true if a parse error occurred, false otherwise.
bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
TypeTy *ObjectType,
- UnqualifiedId &Id) {
+ UnqualifiedId &Id,
+ bool AssumeTemplateId) {
assert(Tok.is(tok::less) && "Expected '<' to finish parsing a template-id");
TemplateTy Template;
@@ -819,8 +930,16 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
case UnqualifiedId::IK_Identifier:
case UnqualifiedId::IK_OperatorFunctionId:
case UnqualifiedId::IK_LiteralOperatorId:
- TNK = Actions.isTemplateName(CurScope, SS, Id, ObjectType, EnteringContext,
- Template);
+ if (AssumeTemplateId) {
+ Template = Actions.ActOnDependentTemplateName(SourceLocation(), SS,
+ Id, ObjectType,
+ EnteringContext);
+ TNK = TNK_Dependent_template_name;
+ if (!Template.get())
+ return true;
+ } else
+ TNK = Actions.isTemplateName(CurScope, SS, Id, ObjectType,
+ EnteringContext, Template);
break;
case UnqualifiedId::IK_ConstructorName: {
@@ -846,13 +965,8 @@ bool Parser::ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
EnteringContext, Template);
if (TNK == TNK_Non_template && Id.DestructorName == 0) {
- // The identifier following the destructor did not refer to a template
- // or to a type. Complain.
- if (ObjectType)
- Diag(NameLoc, diag::err_ident_in_pseudo_dtor_not_a_type)
- << Name;
- else
- Diag(NameLoc, diag::err_destructor_class_name);
+ Diag(NameLoc, diag::err_destructor_template_id)
+ << Name << SS.getRange();
return true;
}
}
@@ -1258,7 +1372,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
// Parse the class-name.
if (Tok.isNot(tok::identifier)) {
- Diag(Tok, diag::err_destructor_class_name);
+ Diag(Tok, diag::err_destructor_tilde_identifier);
return true;
}
@@ -1273,17 +1387,13 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
}
// Note that this is a destructor name.
- Action::TypeTy *Ty = Actions.getTypeName(*ClassName, ClassNameLoc,
- CurScope, &SS, false, ObjectType);
- if (!Ty) {
- if (ObjectType)
- Diag(ClassNameLoc, diag::err_ident_in_pseudo_dtor_not_a_type)
- << ClassName;
- else
- Diag(ClassNameLoc, diag::err_destructor_class_name);
+ Action::TypeTy *Ty = Actions.getDestructorName(TildeLoc, *ClassName,
+ ClassNameLoc, CurScope,
+ SS, ObjectType,
+ EnteringContext);
+ if (!Ty)
return true;
- }
-
+
Result.setDestructorName(TildeLoc, Ty, ClassNameLoc);
return false;
}
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index d1c9be233fe0..7ab0e71dc235 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -188,7 +188,10 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration(
ProtocolRefs.size(),
ProtocolLocs.data(),
EndProtoLoc);
-
+ if (Tok.is(tok::l_brace))
+ ParseObjCClassInstanceVariables(CategoryType, tok::objc_private,
+ atLoc);
+
ParseObjCInterfaceDeclList(CategoryType, tok::objc_not_keyword);
return CategoryType;
}
@@ -229,7 +232,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtInterfaceDeclaration(
EndProtoLoc, attrList);
if (Tok.is(tok::l_brace))
- ParseObjCClassInstanceVariables(ClsType, atLoc);
+ ParseObjCClassInstanceVariables(ClsType, tok::objc_protected, atLoc);
ParseObjCInterfaceDeclList(ClsType, tok::objc_interface);
return ClsType;
@@ -772,6 +775,12 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
if (Tok.is(tok::l_paren))
ReturnType = ParseObjCTypeName(DSRet);
+ // If attributes exist before the method, parse them.
+ llvm::OwningPtr<AttributeList> MethodAttrs;
+ if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
+ MethodAttrs.reset(ParseGNUAttributes());
+
+ // Now parse the selector.
SourceLocation selLoc;
IdentifierInfo *SelIdent = ParseObjCSelectorPiece(selLoc);
@@ -787,9 +796,9 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
llvm::SmallVector<Declarator, 8> CargNames;
if (Tok.isNot(tok::colon)) {
// If attributes exist after the method, parse them.
- llvm::OwningPtr<AttributeList> MethodAttrs;
if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
- MethodAttrs.reset(ParseGNUAttributes());
+ MethodAttrs.reset(addAttributeLists(MethodAttrs.take(),
+ ParseGNUAttributes()));
Selector Sel = PP.getSelectorTable().getNullarySelector(SelIdent);
DeclPtrTy Result
@@ -863,9 +872,9 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDecl(SourceLocation mLoc,
// FIXME: Add support for optional parmameter list...
// If attributes exist after the method, parse them.
- llvm::OwningPtr<AttributeList> MethodAttrs;
if (getLang().ObjC2 && Tok.is(tok::kw___attribute))
- MethodAttrs.reset(ParseGNUAttributes());
+ MethodAttrs.reset(addAttributeLists(MethodAttrs.take(),
+ ParseGNUAttributes()));
if (KeyIdents.size() == 0)
return DeclPtrTy();
@@ -959,6 +968,7 @@ ParseObjCProtocolReferences(llvm::SmallVectorImpl<Action::DeclPtrTy> &Protocols,
/// struct-declaration
///
void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
+ tok::ObjCKeywordKind visibility,
SourceLocation atLoc) {
assert(Tok.is(tok::l_brace) && "expected {");
llvm::SmallVector<DeclPtrTy, 32> AllIvarDecls;
@@ -967,7 +977,6 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
SourceLocation LBraceLoc = ConsumeBrace(); // the "{"
- tok::ObjCKeywordKind visibility = tok::objc_protected;
// While we still have something to read, read the instance variables.
while (Tok.isNot(tok::r_brace) && Tok.isNot(tok::eof)) {
// Each iteration of this loop reads one objc-instance-variable-decl.
@@ -1222,7 +1231,8 @@ Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration(
superClassId, superClassLoc);
if (Tok.is(tok::l_brace)) // we have ivars
- ParseObjCClassInstanceVariables(ImplClsType/*FIXME*/, atLoc);
+ ParseObjCClassInstanceVariables(ImplClsType/*FIXME*/,
+ tok::objc_protected, atLoc);
ObjCImpDecl = ImplClsType;
PendingObjCImpDecl.push_back(ObjCImpDecl);
diff --git a/lib/Parse/ParseTentative.cpp b/lib/Parse/ParseTentative.cpp
index 6251a2f36754..516a9a620b62 100644
--- a/lib/Parse/ParseTentative.cpp
+++ b/lib/Parse/ParseTentative.cpp
@@ -491,7 +491,8 @@ Parser::TPResult Parser::TryParseDeclarator(bool mayBeAbstract,
while (1) {
if (Tok.is(tok::coloncolon) || Tok.is(tok::identifier))
- TryAnnotateCXXScopeToken(true);
+ if (TryAnnotateCXXScopeToken(true))
+ return TPResult::Error();
if (Tok.is(tok::star) || Tok.is(tok::amp) || Tok.is(tok::caret) ||
(Tok.is(tok::annot_cxxscope) && NextToken().is(tok::star))) {
@@ -681,9 +682,10 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return isCXXDeclarationSpecifier();
- // Otherwise, not a typename.
- return TPResult::False();
+ return TPResult::Error();
+ if (Tok.is(tok::identifier))
+ return TPResult::False();
+ return isCXXDeclarationSpecifier();
case tok::coloncolon: { // ::foo::bar
const Token &Next = NextToken();
@@ -694,9 +696,8 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
// Annotate typenames and C++ scope specifiers. If we get one, just
// recurse to handle whatever we get.
if (TryAnnotateTypeOrScopeToken())
- return isCXXDeclarationSpecifier();
- // Otherwise, not a typename.
- return TPResult::False();
+ return TPResult::Error();
+ return isCXXDeclarationSpecifier();
}
// decl-specifier:
@@ -762,7 +763,9 @@ Parser::TPResult Parser::isCXXDeclarationSpecifier() {
case tok::annot_cxxscope: // foo::bar or ::foo::bar, but already parsed
// We've already annotated a scope; try to annotate a type.
- if (!(TryAnnotateTypeOrScopeToken() && Tok.is(tok::annot_typename)))
+ if (TryAnnotateTypeOrScopeToken())
+ return TPResult::Error();
+ if (!Tok.is(tok::annot_typename))
return TPResult::False();
// If that succeeded, fallthrough into the generic simple-type-id case.
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index 30899c5dddb5..e7a771edda44 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -274,6 +274,7 @@ void Parser::EnterScope(unsigned ScopeFlags) {
} else {
CurScope = new Scope(CurScope, ScopeFlags);
}
+ CurScope->setNumErrorsAtStart(Diags.getNumErrors());
}
/// ExitScope - Pop a scope off the scope stack.
@@ -743,10 +744,11 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
// Handle the full declarator list.
while (1) {
// If attributes are present, parse them.
- llvm::OwningPtr<AttributeList> AttrList;
- if (Tok.is(tok::kw___attribute))
- // FIXME: attach attributes too.
- AttrList.reset(ParseGNUAttributes());
+ if (Tok.is(tok::kw___attribute)) {
+ SourceLocation Loc;
+ AttributeList *AttrList = ParseGNUAttributes(&Loc);
+ ParmDeclarator.AddAttributes(AttrList, Loc);
+ }
// Ask the actions module to compute the type for this declarator.
Action::DeclPtrTy Param =
@@ -890,8 +892,7 @@ Parser::OwningExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
/// specifier, and another one to get the actual type inside
/// ParseDeclarationSpecifiers).
///
-/// This returns true if the token was annotated or an unrecoverable error
-/// occurs.
+/// This returns true if an error occurred.
///
/// Note that this routine emits an error if you call it with ::new or ::delete
/// as the current tokens, so only call it in contexts where these are invalid.
@@ -909,11 +910,11 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
// simple-template-id
SourceLocation TypenameLoc = ConsumeToken();
CXXScopeSpec SS;
- bool HadNestedNameSpecifier
- = ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, false);
- if (!HadNestedNameSpecifier) {
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, false))
+ return true;
+ if (!SS.isSet()) {
Diag(Tok.getLocation(), diag::err_expected_qualified_after_typename);
- return false;
+ return true;
}
TypeResult Ty;
@@ -927,7 +928,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
if (TemplateId->Kind == TNK_Function_template) {
Diag(Tok, diag::err_typename_refers_to_non_type_template)
<< Tok.getAnnotationRange();
- return false;
+ return true;
}
AnnotateTemplateIdTokenAsType(0);
@@ -941,7 +942,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
} else {
Diag(Tok, diag::err_expected_type_name_after_typename)
<< SS.getRange();
- return false;
+ return true;
}
SourceLocation EndLoc = Tok.getLastLoc();
@@ -950,7 +951,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
Tok.setAnnotationEndLoc(EndLoc);
Tok.setLocation(TypenameLoc);
PP.AnnotateCachedTokens(Tok);
- return true;
+ return false;
}
// Remembers whether the token was originally a scope annotation.
@@ -958,7 +959,8 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
CXXScopeSpec SS;
if (getLang().CPlusPlus)
- ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, EnteringContext);
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, EnteringContext))
+ return true;
if (Tok.is(tok::identifier)) {
// Determine whether the identifier is a type name.
@@ -975,7 +977,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
// In case the tokens were cached, have Preprocessor replace
// them with the annotation token.
PP.AnnotateCachedTokens(Tok);
- return true;
+ return false;
}
if (!getLang().CPlusPlus) {
@@ -1000,7 +1002,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
// If an unrecoverable error occurred, we need to return true here,
// because the token stream is in a damaged state. We may not return
// a valid identifier.
- return Tok.isNot(tok::identifier);
+ return true;
}
}
}
@@ -1020,12 +1022,12 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
// to produce a type annotation token. Update the template-id
// annotation token to a type annotation token now.
AnnotateTemplateIdTokenAsType(&SS);
- return true;
+ return false;
}
}
if (SS.isEmpty())
- return Tok.isNot(tok::identifier) && Tok.isNot(tok::coloncolon);
+ return false;
// A C++ scope specifier that isn't followed by a typename.
// Push the current token back into the token stream (or revert it if it is
@@ -1043,7 +1045,7 @@ bool Parser::TryAnnotateTypeOrScopeToken(bool EnteringContext) {
// just reverted back to the state we were in before being called.
if (!wasScopeAnnotation)
PP.AnnotateCachedTokens(Tok);
- return true;
+ return false;
}
/// TryAnnotateScopeToken - Like TryAnnotateTypeOrScopeToken but only
@@ -1060,10 +1062,10 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
"Cannot be a type or scope token!");
CXXScopeSpec SS;
- if (!ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, EnteringContext))
- // If the token left behind is not an identifier, we either had an error or
- // successfully turned it into an annotation token.
- return Tok.isNot(tok::identifier);
+ if (ParseOptionalCXXScopeSpecifier(SS, /*ObjectType=*/0, EnteringContext))
+ return true;
+ if (!SS.isSet())
+ return false;
// Push the current token back into the token stream (or revert it if it is
// cached) and use an annotation scope token for current token.
@@ -1078,7 +1080,7 @@ bool Parser::TryAnnotateCXXScopeToken(bool EnteringContext) {
// In case the tokens were cached, have Preprocessor replace them with the
// annotation token.
PP.AnnotateCachedTokens(Tok);
- return true;
+ return false;
}
// Anchor the Parser::FieldCallback vtable to this translation unit.
diff --git a/lib/Sema/JumpDiagnostics.cpp b/lib/Sema/JumpDiagnostics.cpp
index 2b37e9df2c0e..7cf207f77aa8 100644
--- a/lib/Sema/JumpDiagnostics.cpp
+++ b/lib/Sema/JumpDiagnostics.cpp
@@ -77,7 +77,7 @@ JumpScopeChecker::JumpScopeChecker(Stmt *Body, Sema &s) : S(s) {
/// GetDiagForGotoScopeDecl - If this decl induces a new goto scope, return a
/// diagnostic that should be emitted if control goes over it. If not, return 0.
-static unsigned GetDiagForGotoScopeDecl(const Decl *D) {
+static unsigned GetDiagForGotoScopeDecl(const Decl *D, bool isCPlusPlus) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->getType()->isVariablyModifiedType())
return diag::note_protected_by_vla;
@@ -85,6 +85,9 @@ static unsigned GetDiagForGotoScopeDecl(const Decl *D) {
return diag::note_protected_by_cleanup;
if (VD->hasAttr<BlocksAttr>())
return diag::note_protected_by___block;
+ if (isCPlusPlus && VD->hasLocalStorage() && VD->hasInit())
+ return diag::note_protected_by_variable_init;
+
} else if (const TypedefDecl *TD = dyn_cast<TypedefDecl>(D)) {
if (TD->getUnderlyingType()->isVariablyModifiedType())
return diag::note_protected_by_vla_typedef;
@@ -116,18 +119,17 @@ void JumpScopeChecker::BuildScopeInformation(Stmt *S, unsigned ParentScope) {
Stmt *SubStmt = *CI;
if (SubStmt == 0) continue;
- // FIXME: diagnose jumps past initialization: required in C++, warning in C.
- // goto L; int X = 4; L: ;
+ bool isCPlusPlus = this->S.getLangOptions().CPlusPlus;
// If this is a declstmt with a VLA definition, it defines a scope from here
// to the end of the containing context.
if (DeclStmt *DS = dyn_cast<DeclStmt>(SubStmt)) {
- // The decl statement creates a scope if any of the decls in it are VLAs or
- // have the cleanup attribute.
+ // The decl statement creates a scope if any of the decls in it are VLAs
+ // or have the cleanup attribute.
for (DeclStmt::decl_iterator I = DS->decl_begin(), E = DS->decl_end();
I != E; ++I) {
// If this decl causes a new scope, push and switch to it.
- if (unsigned Diag = GetDiagForGotoScopeDecl(*I)) {
+ if (unsigned Diag = GetDiagForGotoScopeDecl(*I, isCPlusPlus)) {
Scopes.push_back(GotoScope(ParentScope, Diag, (*I)->getLocation()));
ParentScope = Scopes.size()-1;
}
diff --git a/lib/Sema/Sema.cpp b/lib/Sema/Sema.cpp
index 38c842eede57..3b4afef70b53 100644
--- a/lib/Sema/Sema.cpp
+++ b/lib/Sema/Sema.cpp
@@ -26,7 +26,18 @@
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
using namespace clang;
-
+
+FunctionScopeInfo::~FunctionScopeInfo() { }
+
+void FunctionScopeInfo::Clear(unsigned NumErrors) {
+ NeedsScopeChecking = false;
+ LabelMap.clear();
+ SwitchStack.clear();
+ NumErrorsAtStartOfFunction = NumErrors;
+}
+
+BlockScopeInfo::~BlockScopeInfo() { }
+
static inline RecordDecl *CreateStructDecl(ASTContext &C, const char *Name) {
if (C.getLangOptions().CPlusPlus)
return CXXRecordDecl::Create(C, TagDecl::TK_struct,
@@ -116,7 +127,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
LangOpts(pp.getLangOptions()), PP(pp), Context(ctxt), Consumer(consumer),
Diags(PP.getDiagnostics()), SourceMgr(PP.getSourceManager()),
ExternalSource(0), CodeCompleter(CodeCompleter), CurContext(0),
- CurBlock(0), PackContext(0), ParsingDeclDepth(0),
+ PackContext(0), TopFunctionScope(0), ParsingDeclDepth(0),
IdResolver(pp.getLangOptions()), StdNamespace(0), StdBadAlloc(0),
GlobalNewDeleteDeclared(false),
CompleteTranslationUnit(CompleteTranslationUnit),
@@ -138,6 +149,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
Sema::~Sema() {
if (PackContext) FreePackedContext();
delete TheTargetAttributesSema;
+ while (!FunctionScopes.empty())
+ PopFunctionOrBlockScope();
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit cast.
@@ -342,6 +355,51 @@ Sema::Diag(SourceLocation Loc, const PartialDiagnostic& PD) {
return Builder;
}
+
+/// \brief Enter a new function scope
+void Sema::PushFunctionScope() {
+ if (FunctionScopes.empty()) {
+ // Use the "top" function scope rather than having to allocate memory for
+ // a new scope.
+ TopFunctionScope.Clear(getDiagnostics().getNumErrors());
+ FunctionScopes.push_back(&TopFunctionScope);
+ return;
+ }
+
+ FunctionScopes.push_back(
+ new FunctionScopeInfo(getDiagnostics().getNumErrors()));
+}
+
+void Sema::PushBlockScope(Scope *BlockScope, BlockDecl *Block) {
+ FunctionScopes.push_back(new BlockScopeInfo(getDiagnostics().getNumErrors(),
+ BlockScope, Block));
+}
+
+void Sema::PopFunctionOrBlockScope() {
+ if (FunctionScopes.back() != &TopFunctionScope)
+ delete FunctionScopes.back();
+ else
+ TopFunctionScope.Clear(getDiagnostics().getNumErrors());
+
+ FunctionScopes.pop_back();
+}
+
+/// \brief Determine whether any errors occurred within this function/method/
+/// block.
+bool Sema::hasAnyErrorsInThisFunction() const {
+ unsigned NumErrors = TopFunctionScope.NumErrorsAtStartOfFunction;
+ if (!FunctionScopes.empty())
+ NumErrors = FunctionScopes.back()->NumErrorsAtStartOfFunction;
+ return NumErrors != getDiagnostics().getNumErrors();
+}
+
+BlockScopeInfo *Sema::getCurBlock() {
+ if (FunctionScopes.empty())
+ return 0;
+
+ return dyn_cast<BlockScopeInfo>(FunctionScopes.back());
+}
+
void Sema::ActOnComment(SourceRange Comment) {
Context.Comments.push_back(Comment);
}
diff --git a/lib/Sema/Sema.h b/lib/Sema/Sema.h
index 3c7492af6108..efd04e8eddd0 100644
--- a/lib/Sema/Sema.h
+++ b/lib/Sema/Sema.h
@@ -95,6 +95,7 @@ namespace clang {
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCContainerDecl;
+ class PseudoDestructorTypeStorage;
class FunctionProtoType;
class CXXBasePath;
class CXXBasePaths;
@@ -107,9 +108,47 @@ namespace clang {
class TargetAttributesSema;
class ADLResult;
-/// BlockSemaInfo - When a block is being parsed, this contains information
-/// about the block. It is pointed to from Sema::CurBlock.
-struct BlockSemaInfo {
+/// \brief Retains information about a function, method, or block that is
+/// currently being parsed.
+struct FunctionScopeInfo {
+ /// \brief Whether this scope information structure defined information for
+ /// a block.
+ bool IsBlockInfo;
+
+ /// \brief Set true when a function, method contains a VLA or ObjC try block,
+ /// which introduce scopes that need to be checked for goto conditions. If a
+ /// function does not contain this, then it need not have the jump checker run on it.
+ bool NeedsScopeChecking;
+
+ /// \brief The number of errors that had occurred before starting this
+ /// function or block.
+ unsigned NumErrorsAtStartOfFunction;
+
+ /// LabelMap - This is a mapping from label identifiers to the LabelStmt for
+ /// it (which acts like the label decl in some ways). Forward referenced
+ /// labels have a LabelStmt created for them with a null location & SubStmt.
+ llvm::DenseMap<IdentifierInfo*, LabelStmt*> LabelMap;
+
+ /// SwitchStack - This is the current set of active switch statements in the
+ /// block.
+ llvm::SmallVector<SwitchStmt*, 8> SwitchStack;
+
+ FunctionScopeInfo(unsigned NumErrors)
+ : IsBlockInfo(false), NeedsScopeChecking(false),
+ NumErrorsAtStartOfFunction(NumErrors) { }
+
+ virtual ~FunctionScopeInfo();
+
+ /// \brief Clear out the information in this function scope, making it
+ /// suitable for reuse.
+ void Clear(unsigned NumErrors);
+
+ static bool classof(const FunctionScopeInfo *FSI) { return true; }
+};
+
+
+/// \brief Retains information about a block that is currently being parsed.
+struct BlockScopeInfo : FunctionScopeInfo {
llvm::SmallVector<ParmVarDecl*, 8> Params;
bool hasPrototype;
bool isVariadic;
@@ -125,22 +164,17 @@ struct BlockSemaInfo {
/// return types, if any, in the block body.
QualType ReturnType;
- /// LabelMap - This is a mapping from label identifiers to the LabelStmt for
- /// it (which acts like the label decl in some ways). Forward referenced
- /// labels have a LabelStmt created for them with a null location & SubStmt.
- llvm::DenseMap<IdentifierInfo*, LabelStmt*> LabelMap;
-
- /// SwitchStack - This is the current set of active switch statements in the
- /// block.
- llvm::SmallVector<SwitchStmt*, 8> SwitchStack;
+ BlockScopeInfo(unsigned NumErrors, Scope *BlockScope, BlockDecl *Block)
+ : FunctionScopeInfo(NumErrors), hasPrototype(false), isVariadic(false),
+ hasBlockDeclRefExprs(false), TheDecl(Block), TheScope(BlockScope)
+ {
+ IsBlockInfo = true;
+ }
- /// SavedFunctionNeedsScopeChecking - This is the value of
- /// CurFunctionNeedsScopeChecking at the point when the block started.
- bool SavedFunctionNeedsScopeChecking;
+ virtual ~BlockScopeInfo();
- /// PrevBlockInfo - If this is nested inside another block, this points
- /// to the outer block.
- BlockSemaInfo *PrevBlockInfo;
+ static bool classof(const FunctionScopeInfo *FSI) { return FSI->IsBlockInfo; }
+ static bool classof(const BlockScopeInfo *BSI) { return true; }
};
/// \brief Holds a QualType and a TypeSourceInfo* that came out of a declarator
@@ -199,38 +233,25 @@ public:
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
- /// CurBlock - If inside of a block definition, this contains a pointer to
- /// the active block object that represents it.
- BlockSemaInfo *CurBlock;
-
/// PackContext - Manages the stack for #pragma pack. An alignment
/// of 0 indicates default alignment.
void *PackContext; // Really a "PragmaPackStack*"
- /// FunctionLabelMap - This is a mapping from label identifiers to the
- /// LabelStmt for it (which acts like the label decl in some ways). Forward
- /// referenced labels have a LabelStmt created for them with a null location &
- /// SubStmt.
+ /// \brief Stack containing information about each of the nested function,
+ /// block, and method scopes that are currently active.
+ llvm::SmallVector<FunctionScopeInfo *, 4> FunctionScopes;
+
+ /// \brief Cached function scope object used for the top function scope
+ /// and when there is no function scope (in error cases).
///
- /// Note that this should always be accessed through getLabelMap() in order
- /// to handle blocks properly.
- llvm::DenseMap<IdentifierInfo*, LabelStmt*> FunctionLabelMap;
-
- /// FunctionSwitchStack - This is the current set of active switch statements
- /// in the top level function. Clients should always use getSwitchStack() to
- /// handle the case when they are in a block.
- llvm::SmallVector<SwitchStmt*, 8> FunctionSwitchStack;
-
+ /// This should never be accessed directly; rather, it's address will be
+ /// pushed into \c FunctionScopes when we want to re-use it.
+ FunctionScopeInfo TopFunctionScope;
+
/// ExprTemporaries - This is the stack of temporaries that are created by
/// the current full expression.
llvm::SmallVector<CXXTemporary*, 8> ExprTemporaries;
- /// CurFunctionNeedsScopeChecking - This is set to true when a function or
- /// ObjC method body contains a VLA or an ObjC try block, which introduce
- /// scopes that need to be checked for goto conditions. If a function does
- /// not contain this, then it need not have the jump checker run on it.
- bool CurFunctionNeedsScopeChecking;
-
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
@@ -606,18 +627,42 @@ public:
virtual void ActOnEndOfTranslationUnit();
+ void PushFunctionScope();
+ void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
+ void PopFunctionOrBlockScope();
+
/// getLabelMap() - Return the current label map. If we're in a block, we
/// return it.
llvm::DenseMap<IdentifierInfo*, LabelStmt*> &getLabelMap() {
- return CurBlock ? CurBlock->LabelMap : FunctionLabelMap;
+ if (FunctionScopes.empty())
+ return TopFunctionScope.LabelMap;
+
+ return FunctionScopes.back()->LabelMap;
}
/// getSwitchStack - This is returns the switch stack for the current block or
/// function.
llvm::SmallVector<SwitchStmt*,8> &getSwitchStack() {
- return CurBlock ? CurBlock->SwitchStack : FunctionSwitchStack;
+ if (FunctionScopes.empty())
+ return TopFunctionScope.SwitchStack;
+
+ return FunctionScopes.back()->SwitchStack;
}
+ /// \brief Determine whether the current function or block needs scope
+ /// checking.
+ bool &FunctionNeedsScopeChecking() {
+ if (FunctionScopes.empty())
+ return TopFunctionScope.NeedsScopeChecking;
+
+ return FunctionScopes.back()->NeedsScopeChecking;
+ }
+
+ bool hasAnyErrorsInThisFunction() const;
+
+ /// \brief Retrieve the current block, if any.
+ BlockScopeInfo *getCurBlock();
+
/// WeakTopLevelDeclDecls - access to #pragma weak-generated Decls
llvm::SmallVector<Decl*,2> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
@@ -1440,6 +1485,8 @@ public:
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl);
+ void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
+
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
@@ -2056,6 +2103,12 @@ public:
SourceLocation Loc,
ASTOwningVector<&ActionBase::DeleteExpr> &ConvertedArgs);
+ virtual TypeTy *getDestructorName(SourceLocation TildeLoc,
+ IdentifierInfo &II, SourceLocation NameLoc,
+ Scope *S, const CXXScopeSpec &SS,
+ TypeTy *ObjectType,
+ bool EnteringContext);
+
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
virtual OwningExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
@@ -2167,8 +2220,32 @@ public:
ExprArg Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
- TypeTy *&ObjectType);
+ TypeTy *&ObjectType,
+ bool &MayBePseudoDestructor);
+ OwningExprResult DiagnoseDtorReference(SourceLocation NameLoc,
+ ExprArg MemExpr);
+
+ OwningExprResult BuildPseudoDestructorExpr(ExprArg Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ const CXXScopeSpec &SS,
+ TypeSourceInfo *ScopeType,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ PseudoDestructorTypeStorage DestroyedType,
+ bool HasTrailingLParen);
+
+ virtual OwningExprResult ActOnPseudoDestructorExpr(Scope *S, ExprArg Base,
+ SourceLocation OpLoc,
+ tok::TokenKind OpKind,
+ const CXXScopeSpec &SS,
+ UnqualifiedId &FirstTypeName,
+ SourceLocation CCLoc,
+ SourceLocation TildeLoc,
+ UnqualifiedId &SecondTypeName,
+ bool HasTrailingLParen);
+
/// MaybeCreateCXXExprWithTemporaries - If the list of temporaries is
/// non-empty, will create a new CXXExprWithTemporaries expression.
/// Otherwise, just returs the passed in expression.
@@ -2195,7 +2272,11 @@ public:
bool isAcceptableNestedNameSpecifier(NamedDecl *SD);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
-
+ virtual bool isNonTypeNestedNameSpecifier(Scope *S, const CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ IdentifierInfo &II,
+ TypeTy *ObjectType);
+
CXXScopeTy *BuildCXXNestedNameSpecifier(Scope *S,
const CXXScopeSpec &SS,
SourceLocation IdLoc,
@@ -3376,7 +3457,8 @@ public:
Decl *getInstantiationOf(const Decl *D) {
Decl *Result = LocalDecls[D];
- assert(Result && "declaration was not instantiated in this scope!");
+ assert((Result || D->isInvalidDecl()) &&
+ "declaration was not instantiated in this scope!");
return Result;
}
@@ -3395,7 +3477,7 @@ public:
void InstantiatedLocal(const Decl *D, Decl *Inst) {
Decl *&Stored = LocalDecls[D];
- assert(!Stored && "Already instantiated this local");
+ assert((!Stored || Stored == Inst) && "Already instantiated this local");
Stored = Inst;
}
};
@@ -3502,9 +3584,9 @@ public:
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
- NamedDecl *FindInstantiatedDecl(NamedDecl *D,
+ NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs);
- DeclContext *FindInstantiatedContext(DeclContext *DC,
+ DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
@@ -3910,7 +3992,8 @@ public:
Expr *&cond, Expr *&lhs, Expr *&rhs, SourceLocation questionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
Expr *&cond, Expr *&lhs, Expr *&rhs, SourceLocation questionLoc);
- QualType FindCompositePointerType(Expr *&E1, Expr *&E2); // C++ 5.9
+ QualType FindCompositePointerType(Expr *&E1, Expr *&E2,
+ bool *NonStandardCompositeType = 0);
QualType FindCompositeObjCPointerType(Expr *&LHS, Expr *&RHS,
SourceLocation questionLoc);
@@ -4129,7 +4212,7 @@ private:
CallExpr *TheCall);
bool SemaBuiltinVAStart(CallExpr *TheCall);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
- bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned LastArg=1);
+ bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinStackAddress(CallExpr *TheCall);
public:
diff --git a/lib/Sema/SemaCXXScopeSpec.cpp b/lib/Sema/SemaCXXScopeSpec.cpp
index 52e9e9bc87ef..971b78c489e8 100644
--- a/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/lib/Sema/SemaCXXScopeSpec.cpp
@@ -327,6 +327,54 @@ NamedDecl *Sema::FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS) {
return 0;
}
+bool Sema::isNonTypeNestedNameSpecifier(Scope *S, const CXXScopeSpec &SS,
+ SourceLocation IdLoc,
+ IdentifierInfo &II,
+ TypeTy *ObjectTypePtr) {
+ QualType ObjectType = GetTypeFromParser(ObjectTypePtr);
+ LookupResult Found(*this, &II, IdLoc, LookupNestedNameSpecifierName);
+
+ // Determine where to perform name lookup
+ DeclContext *LookupCtx = 0;
+ bool isDependent = false;
+ if (!ObjectType.isNull()) {
+ // This nested-name-specifier occurs in a member access expression, e.g.,
+ // x->B::f, and we are looking into the type of the object.
+ assert(!SS.isSet() && "ObjectType and scope specifier cannot coexist");
+ LookupCtx = computeDeclContext(ObjectType);
+ isDependent = ObjectType->isDependentType();
+ } else if (SS.isSet()) {
+ // This nested-name-specifier occurs after another nested-name-specifier,
+ // so long into the context associated with the prior nested-name-specifier.
+ LookupCtx = computeDeclContext(SS, false);
+ isDependent = isDependentScopeSpecifier(SS);
+ Found.setContextRange(SS.getRange());
+ }
+
+ if (LookupCtx) {
+ // Perform "qualified" name lookup into the declaration context we
+ // computed, which is either the type of the base of a member access
+ // expression or the declaration context associated with a prior
+ // nested-name-specifier.
+
+ // The declaration context must be complete.
+ if (!LookupCtx->isDependentContext() && RequireCompleteDeclContext(SS))
+ return false;
+
+ LookupQualifiedName(Found, LookupCtx);
+ } else if (isDependent) {
+ return false;
+ } else {
+ LookupName(Found, S);
+ }
+ Found.suppressDiagnostics();
+
+ if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
+ return isa<NamespaceDecl>(ND) || isa<NamespaceAliasDecl>(ND);
+
+ return false;
+}
+
/// \brief Build a new nested-name-specifier for "identifier::", as described
/// by ActOnCXXNestedNameSpecifier.
///
diff --git a/lib/Sema/SemaChecking.cpp b/lib/Sema/SemaChecking.cpp
index b62cd19a0b25..30a6ab465538 100644
--- a/lib/Sema/SemaChecking.cpp
+++ b/lib/Sema/SemaChecking.cpp
@@ -13,8 +13,9 @@
//===----------------------------------------------------------------------===//
#include "Sema.h"
-#include "clang/Analysis/CFG.h"
#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
+#include "clang/Analysis/Analyses/ReachableCode.h"
#include "clang/Analysis/Analyses/PrintfFormatString.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/CharUnits.h"
@@ -109,7 +110,8 @@ bool Sema::CheckablePrintfAttr(const FormatAttr *Format, CallExpr *TheCall) {
}
if (format_idx < TheCall->getNumArgs()) {
Expr *Format = TheCall->getArg(format_idx)->IgnoreParenCasts();
- if (!Format->isNullPointerConstant(Context, Expr::NPC_ValueDependentIsNull))
+ if (!Format->isNullPointerConstant(Context,
+ Expr::NPC_ValueDependentIsNull))
return true;
}
}
@@ -150,7 +152,7 @@ Sema::CheckBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall) {
case Builtin::BI__builtin_isinf_sign:
case Builtin::BI__builtin_isnan:
case Builtin::BI__builtin_isnormal:
- if (SemaBuiltinFPClassification(TheCall))
+ if (SemaBuiltinFPClassification(TheCall, 1))
return ExprError();
break;
case Builtin::BI__builtin_return_address:
@@ -504,6 +506,7 @@ bool Sema::SemaBuiltinVAStart(CallExpr *TheCall) {
}
// Determine whether the current function is variadic or not.
+ BlockScopeInfo *CurBlock = getCurBlock();
bool isVariadic;
if (CurBlock)
isVariadic = CurBlock->isVariadic;
@@ -590,19 +593,20 @@ bool Sema::SemaBuiltinUnorderedCompare(CallExpr *TheCall) {
/// SemaBuiltinSemaBuiltinFPClassification - Handle functions like
/// __builtin_isnan and friends. This is declared to take (...), so we have
-/// to check everything.
-bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned LastArg) {
- if (TheCall->getNumArgs() < LastArg)
+/// to check everything. We expect the last argument to be a floating point
+/// value.
+bool Sema::SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs) {
+ if (TheCall->getNumArgs() < NumArgs)
return Diag(TheCall->getLocEnd(), diag::err_typecheck_call_too_few_args)
<< 0 /*function call*/;
- if (TheCall->getNumArgs() > LastArg)
- return Diag(TheCall->getArg(LastArg)->getLocStart(),
+ if (TheCall->getNumArgs() > NumArgs)
+ return Diag(TheCall->getArg(NumArgs)->getLocStart(),
diag::err_typecheck_call_too_many_args)
<< 0 /*function call*/
- << SourceRange(TheCall->getArg(LastArg)->getLocStart(),
+ << SourceRange(TheCall->getArg(NumArgs)->getLocStart(),
(*(TheCall->arg_end()-1))->getLocEnd());
- Expr *OrigArg = TheCall->getArg(LastArg-1);
+ Expr *OrigArg = TheCall->getArg(NumArgs-1);
if (OrigArg->isTypeDependent())
return false;
@@ -953,6 +957,7 @@ Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
/// FormatGuard: Automatic Protection From printf Format String
/// Vulnerabilities, Proceedings of the 10th USENIX Security Symposium, 2001.
///
+/// TODO:
/// Functionality implemented:
///
/// We can statically check the following properties for string
@@ -963,7 +968,7 @@ Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
/// data arguments?
///
/// (2) Does each format conversion correctly match the type of the
-/// corresponding data argument? (TODO)
+/// corresponding data argument?
///
/// Moreover, for all printf functions we can:
///
@@ -982,7 +987,6 @@ Sema::CheckNonNullArguments(const NonNullAttr *NonNull,
///
/// All of these checks can be done by parsing the format string.
///
-/// For now, we ONLY do (1), (3), (5), (6), (7), and (8).
void
Sema::CheckPrintfArguments(const CallExpr *TheCall, bool HasVAListArg,
unsigned format_idx, unsigned firstDataArg) {
@@ -1042,13 +1046,15 @@ class CheckPrintfHandler : public analyze_printf::FormatStringHandler {
Sema &S;
const StringLiteral *FExpr;
const Expr *OrigFormatExpr;
- unsigned NumConversions;
const unsigned NumDataArgs;
const bool IsObjCLiteral;
const char *Beg; // Start of format string.
const bool HasVAListArg;
const CallExpr *TheCall;
unsigned FormatIdx;
+ llvm::BitVector CoveredArgs;
+ bool usesPositionalArgs;
+ bool atFirstArg;
public:
CheckPrintfHandler(Sema &s, const StringLiteral *fexpr,
const Expr *origFormatExpr,
@@ -1056,21 +1062,31 @@ public:
const char *beg, bool hasVAListArg,
const CallExpr *theCall, unsigned formatIdx)
: S(s), FExpr(fexpr), OrigFormatExpr(origFormatExpr),
- NumConversions(0), NumDataArgs(numDataArgs),
+ NumDataArgs(numDataArgs),
IsObjCLiteral(isObjCLiteral), Beg(beg),
HasVAListArg(hasVAListArg),
- TheCall(theCall), FormatIdx(formatIdx) {}
+ TheCall(theCall), FormatIdx(formatIdx),
+ usesPositionalArgs(false), atFirstArg(true) {
+ CoveredArgs.resize(numDataArgs);
+ CoveredArgs.reset();
+ }
void DoneProcessing();
void HandleIncompleteFormatSpecifier(const char *startSpecifier,
unsigned specifierLen);
- void
+ bool
HandleInvalidConversionSpecifier(const analyze_printf::FormatSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen);
+ virtual void HandleInvalidPosition(const char *startSpecifier,
+ unsigned specifierLen,
+ analyze_printf::PositionContext p);
+
+ virtual void HandleZeroPosition(const char *startPos, unsigned posLen);
+
void HandleNullChar(const char *nullCharacter);
bool HandleFormatSpecifier(const analyze_printf::FormatSpecifier &FS,
@@ -1082,9 +1098,8 @@ private:
unsigned specifierLen);
SourceLocation getLocationOfByte(const char *x);
- bool HandleAmount(const analyze_printf::OptionalAmount &Amt,
- unsigned MissingArgDiag, unsigned BadTypeDiag,
- const char *startSpecifier, unsigned specifierLen);
+ bool HandleAmount(const analyze_printf::OptionalAmount &Amt, unsigned k,
+ const char *startSpecifier, unsigned specifierLen);
void HandleFlags(const analyze_printf::FormatSpecifier &FS,
llvm::StringRef flag, llvm::StringRef cspec,
const char *startSpecifier, unsigned specifierLen);
@@ -1115,18 +1130,50 @@ HandleIncompleteFormatSpecifier(const char *startSpecifier,
<< getFormatSpecifierRange(startSpecifier, specifierLen);
}
-void CheckPrintfHandler::
+void
+CheckPrintfHandler::HandleInvalidPosition(const char *startPos, unsigned posLen,
+ analyze_printf::PositionContext p) {
+ SourceLocation Loc = getLocationOfByte(startPos);
+ S.Diag(Loc, diag::warn_printf_invalid_positional_specifier)
+ << (unsigned) p << getFormatSpecifierRange(startPos, posLen);
+}
+
+void CheckPrintfHandler::HandleZeroPosition(const char *startPos,
+ unsigned posLen) {
+ SourceLocation Loc = getLocationOfByte(startPos);
+ S.Diag(Loc, diag::warn_printf_zero_positional_specifier)
+ << getFormatSpecifierRange(startPos, posLen);
+}
+
+bool CheckPrintfHandler::
HandleInvalidConversionSpecifier(const analyze_printf::FormatSpecifier &FS,
const char *startSpecifier,
unsigned specifierLen) {
- ++NumConversions;
+ unsigned argIndex = FS.getArgIndex();
+ bool keepGoing = true;
+ if (argIndex < NumDataArgs) {
+ // Consider the argument coverered, even though the specifier doesn't
+ // make sense.
+ CoveredArgs.set(argIndex);
+ }
+ else {
+ // If argIndex exceeds the number of data arguments we
+ // don't issue a warning because that is just a cascade of warnings (and
+ // they may have intended '%%' anyway). We don't want to continue processing
+ // the format string after this point, however, as we will like just get
+ // gibberish when trying to match arguments.
+ keepGoing = false;
+ }
+
const analyze_printf::ConversionSpecifier &CS =
FS.getConversionSpecifier();
SourceLocation Loc = getLocationOfByte(CS.getStart());
S.Diag(Loc, diag::warn_printf_invalid_conversion)
<< llvm::StringRef(CS.getStart(), CS.getLength())
<< getFormatSpecifierRange(startSpecifier, specifierLen);
+
+ return keepGoing;
}
void CheckPrintfHandler::HandleNullChar(const char *nullCharacter) {
@@ -1137,7 +1184,7 @@ void CheckPrintfHandler::HandleNullChar(const char *nullCharacter) {
}
const Expr *CheckPrintfHandler::getDataArg(unsigned i) const {
- return TheCall->getArg(FormatIdx + i);
+ return TheCall->getArg(FormatIdx + i + 1);
}
@@ -1154,17 +1201,16 @@ void CheckPrintfHandler::HandleFlags(const analyze_printf::FormatSpecifier &FS,
bool
CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt,
- unsigned MissingArgDiag,
- unsigned BadTypeDiag,
- const char *startSpecifier,
+ unsigned k, const char *startSpecifier,
unsigned specifierLen) {
if (Amt.hasDataArgument()) {
- ++NumConversions;
if (!HasVAListArg) {
- if (NumConversions > NumDataArgs) {
- S.Diag(getLocationOfByte(Amt.getStart()), MissingArgDiag)
- << getFormatSpecifierRange(startSpecifier, specifierLen);
+ unsigned argIndex = Amt.getArgIndex();
+ if (argIndex >= NumDataArgs) {
+ S.Diag(getLocationOfByte(Amt.getStart()),
+ diag::warn_printf_asterisk_missing_arg)
+ << k << getFormatSpecifierRange(startSpecifier, specifierLen);
// Don't do any more checking. We will just emit
// spurious errors.
return false;
@@ -1174,14 +1220,17 @@ CheckPrintfHandler::HandleAmount(const analyze_printf::OptionalAmount &Amt,
// Although not in conformance with C99, we also allow the argument to be
// an 'unsigned int' as that is a reasonably safe case. GCC also
// doesn't emit a warning for that case.
- const Expr *Arg = getDataArg(NumConversions);
+ CoveredArgs.set(argIndex);
+ const Expr *Arg = getDataArg(argIndex);
QualType T = Arg->getType();
const analyze_printf::ArgTypeResult &ATR = Amt.getArgType(S.Context);
assert(ATR.isValid());
if (!ATR.matchesType(S.Context, T)) {
- S.Diag(getLocationOfByte(Amt.getStart()), BadTypeDiag)
+ S.Diag(getLocationOfByte(Amt.getStart()),
+ diag::warn_printf_asterisk_wrong_type)
+ << k
<< ATR.getRepresentativeType(S.Context) << T
<< getFormatSpecifierRange(startSpecifier, specifierLen)
<< Arg->getSourceRange();
@@ -1200,32 +1249,31 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
const char *startSpecifier,
unsigned specifierLen) {
- using namespace analyze_printf;
+ using namespace analyze_printf;
const ConversionSpecifier &CS = FS.getConversionSpecifier();
- // First check if the field width, precision, and conversion specifier
- // have matching data arguments.
- if (!HandleAmount(FS.getFieldWidth(),
- diag::warn_printf_asterisk_width_missing_arg,
- diag::warn_printf_asterisk_width_wrong_type,
- startSpecifier, specifierLen)) {
+ if (atFirstArg) {
+ atFirstArg = false;
+ usesPositionalArgs = FS.usesPositionalArg();
+ }
+ else if (usesPositionalArgs != FS.usesPositionalArg()) {
+ // Cannot mix-and-match positional and non-positional arguments.
+ S.Diag(getLocationOfByte(CS.getStart()),
+ diag::warn_printf_mix_positional_nonpositional_args)
+ << getFormatSpecifierRange(startSpecifier, specifierLen);
return false;
}
- if (!HandleAmount(FS.getPrecision(),
- diag::warn_printf_asterisk_precision_missing_arg,
- diag::warn_printf_asterisk_precision_wrong_type,
- startSpecifier, specifierLen)) {
+ // First check if the field width, precision, and conversion specifier
+ // have matching data arguments.
+ if (!HandleAmount(FS.getFieldWidth(), /* field width */ 0,
+ startSpecifier, specifierLen)) {
return false;
}
- // Check for using an Objective-C specific conversion specifier
- // in a non-ObjC literal.
- if (!IsObjCLiteral && CS.isObjCArg()) {
- HandleInvalidConversionSpecifier(FS, startSpecifier, specifierLen);
-
- // Continue checking the other format specifiers.
- return true;
+ if (!HandleAmount(FS.getPrecision(), /* precision */ 1,
+ startSpecifier, specifierLen)) {
+ return false;
}
if (!CS.consumesDataArgument()) {
@@ -1234,7 +1282,20 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
return true;
}
- ++NumConversions;
+ // Consume the argument.
+ unsigned argIndex = FS.getArgIndex();
+ if (argIndex < NumDataArgs) {
+ // The check to see if the argIndex is valid will come later.
+ // We set the bit here because we may exit early from this
+ // function if we encounter some other error.
+ CoveredArgs.set(argIndex);
+ }
+
+ // Check for using an Objective-C specific conversion specifier
+ // in a non-ObjC literal.
+ if (!IsObjCLiteral && CS.isObjCArg()) {
+ return HandleInvalidConversionSpecifier(FS, startSpecifier, specifierLen);
+ }
// Are we using '%n'? Issue a warning about this being
// a possible security issue.
@@ -1268,7 +1329,7 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
if (HasVAListArg)
return true;
- if (NumConversions > NumDataArgs) {
+ if (argIndex >= NumDataArgs) {
S.Diag(getLocationOfByte(CS.getStart()),
diag::warn_printf_insufficient_data_args)
<< getFormatSpecifierRange(startSpecifier, specifierLen);
@@ -1278,7 +1339,7 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
// Now type check the data expression that matches the
// format specifier.
- const Expr *Ex = getDataArg(NumConversions);
+ const Expr *Ex = getDataArg(argIndex);
const analyze_printf::ArgTypeResult &ATR = FS.getArgType(S.Context);
if (ATR.isValid() && !ATR.matchesType(S.Context, Ex->getType())) {
// Check if we didn't match because of an implicit cast from a 'char'
@@ -1302,10 +1363,17 @@ CheckPrintfHandler::HandleFormatSpecifier(const analyze_printf::FormatSpecifier
void CheckPrintfHandler::DoneProcessing() {
// Does the number of data arguments exceed the number of
// format conversions in the format string?
- if (!HasVAListArg && NumConversions < NumDataArgs)
- S.Diag(getDataArg(NumConversions+1)->getLocStart(),
- diag::warn_printf_too_many_data_args)
- << getFormatStringRange();
+ if (!HasVAListArg) {
+ // Find any arguments that weren't covered.
+ CoveredArgs.flip();
+ signed notCoveredArg = CoveredArgs.find_first();
+ if (notCoveredArg >= 0) {
+ assert((unsigned)notCoveredArg < NumDataArgs);
+ S.Diag(getDataArg((unsigned) notCoveredArg)->getLocStart(),
+ diag::warn_printf_data_arg_not_used)
+ << getFormatStringRange();
+ }
+ }
}
void Sema::CheckPrintfString(const StringLiteral *FExpr,
@@ -1680,13 +1748,13 @@ struct IntRange {
}
// Returns the supremum of two ranges: i.e. their conservative merge.
- static IntRange join(const IntRange &L, const IntRange &R) {
+ static IntRange join(IntRange L, IntRange R) {
return IntRange(std::max(L.Width, R.Width),
L.NonNegative && R.NonNegative);
}
// Returns the infinum of two ranges: i.e. their aggressive merge.
- static IntRange meet(const IntRange &L, const IntRange &R) {
+ static IntRange meet(IntRange L, IntRange R) {
return IntRange(std::min(L.Width, R.Width),
L.NonNegative || R.NonNegative);
}
@@ -1804,6 +1872,15 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
case BinaryOperator::NE:
return IntRange::forBoolType();
+ // The type of these compound assignments is the type of the LHS,
+ // so the RHS is not necessarily an integer.
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::RemAssign:
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ return IntRange::forType(C, E->getType());
+
// Operations with opaque sources are black-listed.
case BinaryOperator::PtrMemD:
case BinaryOperator::PtrMemI:
@@ -1811,15 +1888,18 @@ IntRange GetExprRange(ASTContext &C, Expr *E, unsigned MaxWidth) {
// Bitwise-and uses the *infinum* of the two source ranges.
case BinaryOperator::And:
+ case BinaryOperator::AndAssign:
return IntRange::meet(GetExprRange(C, BO->getLHS(), MaxWidth),
GetExprRange(C, BO->getRHS(), MaxWidth));
// Left shift gets black-listed based on a judgement call.
case BinaryOperator::Shl:
+ case BinaryOperator::ShlAssign:
return IntRange::forType(C, E->getType());
// Right shift by a constant can narrow its left argument.
- case BinaryOperator::Shr: {
+ case BinaryOperator::Shr:
+ case BinaryOperator::ShrAssign: {
IntRange L = GetExprRange(C, BO->getLHS(), MaxWidth);
// If the shift amount is a positive constant, drop the width by
@@ -2103,250 +2183,30 @@ void Sema::CheckImplicitConversion(Expr *E, QualType T) {
return;
}
-// MarkLive - Mark all the blocks reachable from e as live. Returns the total
-// number of blocks just marked live.
-static unsigned MarkLive(CFGBlock *e, llvm::BitVector &live) {
- unsigned count = 0;
- std::queue<CFGBlock*> workq;
- // Prep work queue
- live.set(e->getBlockID());
- ++count;
- workq.push(e);
- // Solve
- while (!workq.empty()) {
- CFGBlock *item = workq.front();
- workq.pop();
- for (CFGBlock::succ_iterator I=item->succ_begin(),
- E=item->succ_end();
- I != E;
- ++I) {
- if ((*I) && !live[(*I)->getBlockID()]) {
- live.set((*I)->getBlockID());
- ++count;
- workq.push(*I);
- }
- }
- }
- return count;
-}
-static SourceLocation GetUnreachableLoc(CFGBlock &b, SourceRange &R1,
- SourceRange &R2) {
- Stmt *S;
- unsigned sn = 0;
- R1 = R2 = SourceRange();
-
- top:
- if (sn < b.size())
- S = b[sn].getStmt();
- else if (b.getTerminator())
- S = b.getTerminator();
- else
- return SourceLocation();
-
- switch (S->getStmtClass()) {
- case Expr::BinaryOperatorClass: {
- BinaryOperator *BO = cast<BinaryOperator>(S);
- if (BO->getOpcode() == BinaryOperator::Comma) {
- if (sn+1 < b.size())
- return b[sn+1].getStmt()->getLocStart();
- CFGBlock *n = &b;
- while (1) {
- if (n->getTerminator())
- return n->getTerminator()->getLocStart();
- if (n->succ_size() != 1)
- return SourceLocation();
- n = n[0].succ_begin()[0];
- if (n->pred_size() != 1)
- return SourceLocation();
- if (!n->empty())
- return n[0][0].getStmt()->getLocStart();
- }
- }
- R1 = BO->getLHS()->getSourceRange();
- R2 = BO->getRHS()->getSourceRange();
- return BO->getOperatorLoc();
- }
- case Expr::UnaryOperatorClass: {
- const UnaryOperator *UO = cast<UnaryOperator>(S);
- R1 = UO->getSubExpr()->getSourceRange();
- return UO->getOperatorLoc();
- }
- case Expr::CompoundAssignOperatorClass: {
- const CompoundAssignOperator *CAO = cast<CompoundAssignOperator>(S);
- R1 = CAO->getLHS()->getSourceRange();
- R2 = CAO->getRHS()->getSourceRange();
- return CAO->getOperatorLoc();
- }
- case Expr::ConditionalOperatorClass: {
- const ConditionalOperator *CO = cast<ConditionalOperator>(S);
- return CO->getQuestionLoc();
- }
- case Expr::MemberExprClass: {
- const MemberExpr *ME = cast<MemberExpr>(S);
- R1 = ME->getSourceRange();
- return ME->getMemberLoc();
- }
- case Expr::ArraySubscriptExprClass: {
- const ArraySubscriptExpr *ASE = cast<ArraySubscriptExpr>(S);
- R1 = ASE->getLHS()->getSourceRange();
- R2 = ASE->getRHS()->getSourceRange();
- return ASE->getRBracketLoc();
- }
- case Expr::CStyleCastExprClass: {
- const CStyleCastExpr *CSC = cast<CStyleCastExpr>(S);
- R1 = CSC->getSubExpr()->getSourceRange();
- return CSC->getLParenLoc();
- }
- case Expr::CXXFunctionalCastExprClass: {
- const CXXFunctionalCastExpr *CE = cast <CXXFunctionalCastExpr>(S);
- R1 = CE->getSubExpr()->getSourceRange();
- return CE->getTypeBeginLoc();
- }
- case Expr::ImplicitCastExprClass:
- ++sn;
- goto top;
- case Stmt::CXXTryStmtClass: {
- return cast<CXXTryStmt>(S)->getHandler(0)->getCatchLoc();
- }
- default: ;
- }
- R1 = S->getSourceRange();
- return S->getLocStart();
-}
-
-static SourceLocation MarkLiveTop(CFGBlock *e, llvm::BitVector &live,
- SourceManager &SM) {
- std::queue<CFGBlock*> workq;
- // Prep work queue
- workq.push(e);
- SourceRange R1, R2;
- SourceLocation top = GetUnreachableLoc(*e, R1, R2);
- bool FromMainFile = false;
- bool FromSystemHeader = false;
- bool TopValid = false;
- if (top.isValid()) {
- FromMainFile = SM.isFromMainFile(top);
- FromSystemHeader = SM.isInSystemHeader(top);
- TopValid = true;
- }
- // Solve
- while (!workq.empty()) {
- CFGBlock *item = workq.front();
- workq.pop();
- SourceLocation c = GetUnreachableLoc(*item, R1, R2);
- if (c.isValid()
- && (!TopValid
- || (SM.isFromMainFile(c) && !FromMainFile)
- || (FromSystemHeader && !SM.isInSystemHeader(c))
- || SM.isBeforeInTranslationUnit(c, top))) {
- top = c;
- FromMainFile = SM.isFromMainFile(top);
- FromSystemHeader = SM.isInSystemHeader(top);
- }
- live.set(item->getBlockID());
- for (CFGBlock::succ_iterator I=item->succ_begin(),
- E=item->succ_end();
- I != E;
- ++I) {
- if ((*I) && !live[(*I)->getBlockID()]) {
- live.set((*I)->getBlockID());
- workq.push(*I);
- }
- }
- }
- return top;
-}
-
-static int LineCmp(const void *p1, const void *p2) {
- SourceLocation *Line1 = (SourceLocation *)p1;
- SourceLocation *Line2 = (SourceLocation *)p2;
- return !(*Line1 < *Line2);
-}
namespace {
- struct ErrLoc {
- SourceLocation Loc;
- SourceRange R1;
- SourceRange R2;
- ErrLoc(SourceLocation l, SourceRange r1, SourceRange r2)
- : Loc(l), R1(r1), R2(r2) { }
- };
+class UnreachableCodeHandler : public reachable_code::Callback {
+ Sema &S;
+public:
+ UnreachableCodeHandler(Sema *s) : S(*s) {}
+
+ void HandleUnreachable(SourceLocation L, SourceRange R1, SourceRange R2) {
+ S.Diag(L, diag::warn_unreachable) << R1 << R2;
+ }
+};
}
/// CheckUnreachable - Check for unreachable code.
void Sema::CheckUnreachable(AnalysisContext &AC) {
- unsigned count;
// We avoid checking when there are errors, as the CFG won't faithfully match
// the user's code.
- if (getDiagnostics().hasErrorOccurred())
- return;
- if (Diags.getDiagnosticLevel(diag::warn_unreachable) == Diagnostic::Ignored)
- return;
-
- CFG *cfg = AC.getCFG();
- if (cfg == 0)
+ if (getDiagnostics().hasErrorOccurred() ||
+ Diags.getDiagnosticLevel(diag::warn_unreachable) == Diagnostic::Ignored)
return;
- llvm::BitVector live(cfg->getNumBlockIDs());
- // Mark all live things first.
- count = MarkLive(&cfg->getEntry(), live);
-
- if (count == cfg->getNumBlockIDs())
- // If there are no dead blocks, we're done.
- return;
-
- SourceRange R1, R2;
-
- llvm::SmallVector<ErrLoc, 24> lines;
- bool AddEHEdges = AC.getAddEHEdges();
- // First, give warnings for blocks with no predecessors, as they
- // can't be part of a loop.
- for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
- CFGBlock &b = **I;
- if (!live[b.getBlockID()]) {
- if (b.pred_begin() == b.pred_end()) {
- if (!AddEHEdges && b.getTerminator()
- && isa<CXXTryStmt>(b.getTerminator())) {
- // When not adding EH edges from calls, catch clauses
- // can otherwise seem dead. Avoid noting them as dead.
- count += MarkLive(&b, live);
- continue;
- }
- SourceLocation c = GetUnreachableLoc(b, R1, R2);
- if (!c.isValid()) {
- // Blocks without a location can't produce a warning, so don't mark
- // reachable blocks from here as live.
- live.set(b.getBlockID());
- ++count;
- continue;
- }
- lines.push_back(ErrLoc(c, R1, R2));
- // Avoid excessive errors by marking everything reachable from here
- count += MarkLive(&b, live);
- }
- }
- }
-
- if (count < cfg->getNumBlockIDs()) {
- // And then give warnings for the tops of loops.
- for (CFG::iterator I = cfg->begin(), E = cfg->end(); I != E; ++I) {
- CFGBlock &b = **I;
- if (!live[b.getBlockID()])
- // Avoid excessive errors by marking everything reachable from here
- lines.push_back(ErrLoc(MarkLiveTop(&b, live,
- Context.getSourceManager()),
- SourceRange(), SourceRange()));
- }
- }
-
- llvm::array_pod_sort(lines.begin(), lines.end(), LineCmp);
- for (llvm::SmallVector<ErrLoc, 24>::iterator I = lines.begin(),
- E = lines.end();
- I != E;
- ++I)
- if (I->Loc.isValid())
- Diag(I->Loc, diag::warn_unreachable) << I->R1 << I->R2;
+ UnreachableCodeHandler UC(this);
+ reachable_code::FindUnreachableCode(AC, UC);
}
/// CheckFallThrough - Check that we don't fall off the end of a
@@ -2368,7 +2228,8 @@ Sema::ControlFlowKind Sema::CheckFallThrough(AnalysisContext &AC) {
// confuse us, so we mark all live things first.
std::queue<CFGBlock*> workq;
llvm::BitVector live(cfg->getNumBlockIDs());
- unsigned count = MarkLive(&cfg->getEntry(), live);
+ unsigned count = reachable_code::ScanReachableFromBlock(cfg->getEntry(),
+ live);
bool AddEHEdges = AC.getAddEHEdges();
if (!AddEHEdges && count != cfg->getNumBlockIDs())
@@ -2382,7 +2243,7 @@ Sema::ControlFlowKind Sema::CheckFallThrough(AnalysisContext &AC) {
if (b.getTerminator() && isa<CXXTryStmt>(b.getTerminator()))
// When not adding EH edges from calls, catch clauses
// can otherwise seem dead. Avoid noting them as dead.
- count += MarkLive(&b, live);
+ count += reachable_code::ScanReachableFromBlock(b, live);
continue;
}
}
@@ -2489,22 +2350,20 @@ void Sema::CheckFallThroughForFunctionDef(Decl *D, Stmt *Body,
bool ReturnsVoid = false;
bool HasNoReturn = false;
+
if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
// For function templates, class templates and member function templates
// we'll do the analysis at instantiation time.
if (FD->isDependentContext())
return;
- if (FD->getResultType()->isVoidType())
- ReturnsVoid = true;
- if (FD->hasAttr<NoReturnAttr>() ||
- FD->getType()->getAs<FunctionType>()->getNoReturnAttr())
- HasNoReturn = true;
+ ReturnsVoid = FD->getResultType()->isVoidType();
+ HasNoReturn = FD->hasAttr<NoReturnAttr>() ||
+ FD->getType()->getAs<FunctionType>()->getNoReturnAttr();
+
} else if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
- if (MD->getResultType()->isVoidType())
- ReturnsVoid = true;
- if (MD->hasAttr<NoReturnAttr>())
- HasNoReturn = true;
+ ReturnsVoid = MD->getResultType()->isVoidType();
+ HasNoReturn = MD->hasAttr<NoReturnAttr>();
}
// Short circuit for compilation speed.
diff --git a/lib/Sema/SemaCodeComplete.cpp b/lib/Sema/SemaCodeComplete.cpp
index a86294971861..edf1bc51eb92 100644
--- a/lib/Sema/SemaCodeComplete.cpp
+++ b/lib/Sema/SemaCodeComplete.cpp
@@ -1137,12 +1137,15 @@ static void AddOrdinaryNameResults(Action::CodeCompletionContext CCC,
else if (ObjCMethodDecl *Method
= dyn_cast<ObjCMethodDecl>(SemaRef.CurContext))
isVoid = Method->getResultType()->isVoidType();
- else if (SemaRef.CurBlock && !SemaRef.CurBlock->ReturnType.isNull())
- isVoid = SemaRef.CurBlock->ReturnType->isVoidType();
+ else if (SemaRef.getCurBlock() &&
+ !SemaRef.getCurBlock()->ReturnType.isNull())
+ isVoid = SemaRef.getCurBlock()->ReturnType->isVoidType();
Pattern = new CodeCompletionString;
Pattern->AddTypedTextChunk("return");
- if (!isVoid)
+ if (!isVoid) {
+ Pattern->AddChunk(CodeCompletionString::CK_HorizontalSpace);
Pattern->AddPlaceholderChunk("expression");
+ }
Pattern->AddChunk(CodeCompletionString::CK_SemiColon);
Results.AddResult(Result(Pattern));
diff --git a/lib/Sema/SemaDecl.cpp b/lib/Sema/SemaDecl.cpp
index 1fc08ce03197..ec1939e5ece9 100644
--- a/lib/Sema/SemaDecl.cpp
+++ b/lib/Sema/SemaDecl.cpp
@@ -436,14 +436,15 @@ void Sema::PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext) {
if (AddToContext)
CurContext->addDecl(D);
- // Out-of-line function and variable definitions should not be pushed into
- // scope.
- if ((isa<FunctionTemplateDecl>(D) &&
- cast<FunctionTemplateDecl>(D)->getTemplatedDecl()->isOutOfLine()) ||
- (isa<FunctionDecl>(D) &&
- (cast<FunctionDecl>(D)->isFunctionTemplateSpecialization() ||
- cast<FunctionDecl>(D)->isOutOfLine())) ||
- (isa<VarDecl>(D) && cast<VarDecl>(D)->isOutOfLine()))
+ // Out-of-line definitions shouldn't be pushed into scope in C++.
+ // Out-of-line variable and function definitions shouldn't even in C.
+ if ((getLangOptions().CPlusPlus || isa<VarDecl>(D) || isa<FunctionDecl>(D)) &&
+ D->isOutOfLine())
+ return;
+
+ // Template instantiations should also not be pushed into scope.
+ if (isa<FunctionDecl>(D) &&
+ cast<FunctionDecl>(D)->isFunctionTemplateSpecialization())
return;
// If this replaces anything in the current scope,
@@ -552,7 +553,8 @@ void Sema::ActOnPopScope(SourceLocation Loc, Scope *S) {
if (!D->getDeclName()) continue;
// Diagnose unused variables in this scope.
- if (ShouldDiagnoseUnusedDecl(D))
+ if (ShouldDiagnoseUnusedDecl(D) &&
+ S->getNumErrorsAtStart() == getDiagnostics().getNumErrors())
Diag(D->getLocation(), diag::warn_unused_variable) << D->getDeclName();
// Remove this name from our lexical scope.
@@ -849,7 +851,7 @@ void Sema::MergeTypeDefDecl(TypedefDecl *New, LookupResult &OldDecls) {
// is normally mapped to an error, but can be controlled with
// -Wtypedef-redefinition. If either the original or the redefinition is
// in a system header, don't emit this for compatibility with GCC.
- if (PP.getDiagnostics().getSuppressSystemWarnings() &&
+ if (getDiagnostics().getSuppressSystemWarnings() &&
(Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
Context.getSourceManager().isInSystemHeader(New->getLocation())))
return;
@@ -908,6 +910,16 @@ static Sema::CXXSpecialMember getSpecialMember(ASTContext &Ctx,
return Sema::CXXCopyAssignment;
}
+/// canREdefineFunction - checks if a function can be redefined. Currently,
+/// only extern inline functions can be redefined, and even then only in
+/// GNU89 mode.
+static bool canRedefineFunction(const FunctionDecl *FD,
+ const LangOptions& LangOpts) {
+ return (LangOpts.GNUMode && !LangOpts.C99 && !LangOpts.CPlusPlus &&
+ FD->isInlineSpecified() &&
+ FD->getStorageClass() == FunctionDecl::Extern);
+}
+
/// MergeFunctionDecl - We just parsed a function 'New' from
/// declarator D which has the same name and scope as a previous
/// declaration 'Old'. Figure out how to resolve this situation,
@@ -956,9 +968,12 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
QualType OldQType = Context.getCanonicalType(Old->getType());
QualType NewQType = Context.getCanonicalType(New->getType());
+ // Don't complain about this if we're in GNU89 mode and the old function
+ // is an extern inline function.
if (!isa<CXXMethodDecl>(New) && !isa<CXXMethodDecl>(Old) &&
New->getStorageClass() == FunctionDecl::Static &&
- Old->getStorageClass() != FunctionDecl::Static) {
+ Old->getStorageClass() != FunctionDecl::Static &&
+ !canRedefineFunction(Old, getLangOptions())) {
Diag(New->getLocation(), diag::err_static_non_static)
<< New;
Diag(Old->getLocation(), PrevDiag);
@@ -1089,7 +1104,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
NewQType = Context.getFunctionType(NewFuncType->getResultType(),
ParamTypes.data(), ParamTypes.size(),
OldProto->isVariadic(),
- OldProto->getTypeQuals());
+ OldProto->getTypeQuals(),
+ false, false, 0, 0,
+ OldProto->getNoReturnAttr(),
+ OldProto->getCallConv());
New->setType(NewQType);
New->setHasInheritedPrototype();
@@ -1168,7 +1186,10 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, Decl *OldD) {
New->setType(Context.getFunctionType(MergedReturn, &ArgTypes[0],
ArgTypes.size(),
- OldProto->isVariadic(), 0));
+ OldProto->isVariadic(), 0,
+ false, false, 0, 0,
+ OldProto->getNoReturnAttr(),
+ OldProto->getCallConv()));
return MergeCompatibleFunctionDecls(New, Old);
}
@@ -2159,7 +2180,7 @@ Sema::ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// then it shall have block scope.
QualType T = NewTD->getUnderlyingType();
if (T->isVariablyModifiedType()) {
- CurFunctionNeedsScopeChecking = true;
+ FunctionNeedsScopeChecking() = true;
if (S->getFnParent() == 0) {
bool SizeIsNegative;
@@ -2480,8 +2501,13 @@ void Sema::CheckVariableDeclaration(VarDecl *NewVD,
bool isVM = T->isVariablyModifiedType();
if (isVM || NewVD->hasAttr<CleanupAttr>() ||
- NewVD->hasAttr<BlocksAttr>())
- CurFunctionNeedsScopeChecking = true;
+ NewVD->hasAttr<BlocksAttr>() ||
+ // FIXME: We need to diagnose jumps passed initialized variables in C++.
+ // However, this turns on the scope checker for everything with a variable
+ // which may impact compile time. See if we can find a better solution
+ // to this, perhaps only checking functions that contain gotos in C++?
+ (LangOpts.CPlusPlus && NewVD->hasLocalStorage()))
+ FunctionNeedsScopeChecking() = true;
if ((isVM && NewVD->hasLinkage()) ||
(T->isVariableArrayType() && NewVD->hasGlobalStorage())) {
@@ -3135,7 +3161,7 @@ Sema::ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
// FIXME: Also include static functions declared but not defined.
if (!NewFD->isInvalidDecl() && IsFunctionDefinition
&& !NewFD->isInlined() && NewFD->getLinkage() == InternalLinkage
- && !NewFD->isUsed())
+ && !NewFD->isUsed() && !NewFD->hasAttr<UnusedAttr>())
UnusedStaticFuncs.push_back(NewFD);
return NewFD;
@@ -3212,7 +3238,7 @@ void Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
// Turn this into a variadic function with no parameters.
QualType R = Context.getFunctionType(
NewFD->getType()->getAs<FunctionType>()->getResultType(),
- 0, 0, true, 0);
+ 0, 0, true, 0, false, false, 0, 0, false, CC_Default);
NewFD->setType(R);
return NewFD->setInvalidDecl();
}
@@ -3858,9 +3884,7 @@ Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
<< Context.getTypeDeclType(OwnedDecl);
}
- // TODO: CHECK FOR CONFLICTS, multiple decls with same name in one scope.
- // Can this happen for params? We already checked that they don't conflict
- // among each other. Here they can only shadow globals, which is ok.
+ // Check for redeclaration of parameters, e.g. int foo(int x, int x);
IdentifierInfo *II = D.getIdentifier();
if (II) {
if (NamedDecl *PrevDecl = LookupSingleName(S, II, LookupOrdinaryName)) {
@@ -3871,6 +3895,7 @@ Sema::ActOnParamDeclarator(Scope *S, Declarator &D) {
PrevDecl = 0;
} else if (S->isDeclScope(DeclPtrTy::make(PrevDecl))) {
Diag(D.getIdentifierLoc(), diag::err_param_redefinition) << II;
+ Diag(PrevDecl->getLocation(), diag::note_previous_declaration);
// Recover by removing the name
II = 0;
@@ -4059,11 +4084,15 @@ Sema::DeclPtrTy Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, DeclPtrTy D) {
else
FD = cast<FunctionDecl>(D.getAs<Decl>());
- CurFunctionNeedsScopeChecking = false;
+ // Enter a new function scope
+ PushFunctionScope();
// See if this is a redefinition.
+ // But don't complain if we're in GNU89 mode and the previous definition
+ // was an extern inline function.
const FunctionDecl *Definition;
- if (FD->getBody(Definition)) {
+ if (FD->getBody(Definition) &&
+ !canRedefineFunction(Definition, getLangOptions())) {
Diag(FD->getLocation(), diag::err_redefinition) << FD->getDeclName();
Diag(Definition->getLocation(), diag::note_previous_definition);
}
@@ -4120,7 +4149,11 @@ Sema::DeclPtrTy Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, DeclPtrTy D) {
<< "dllimport";
FD->setInvalidDecl();
return DeclPtrTy::make(FD);
- } else {
+ }
+
+ // Visual C++ appears to not think this is an issue, so only issue
+ // a warning when Microsoft extensions are disabled.
+ if (!LangOpts.Microsoft) {
// If a symbol previously declared dllimport is later defined, the
// attribute is ignored in subsequent references, and a warning is
// emitted.
@@ -4184,11 +4217,9 @@ Sema::DeclPtrTy Sema::ActOnFinishFunctionBody(DeclPtrTy D, StmtArg BodyArg,
// Verify and clean out per-function state.
- assert(&getLabelMap() == &FunctionLabelMap && "Didn't pop block right?");
-
// Check goto/label use.
for (llvm::DenseMap<IdentifierInfo*, LabelStmt*>::iterator
- I = FunctionLabelMap.begin(), E = FunctionLabelMap.end(); I != E; ++I) {
+ I = getLabelMap().begin(), E = getLabelMap().end(); I != E; ++I) {
LabelStmt *L = I->second;
// Verify that we have no forward references left. If so, there was a goto
@@ -4224,32 +4255,41 @@ Sema::DeclPtrTy Sema::ActOnFinishFunctionBody(DeclPtrTy D, StmtArg BodyArg,
Elements.push_back(L);
Compound->setStmts(Context, &Elements[0], Elements.size());
}
- FunctionLabelMap.clear();
- if (!Body) return D;
-
- CheckUnreachable(AC);
+ if (Body) {
+ CheckUnreachable(AC);
+ // C++ constructors that have function-try-blocks can't have return
+ // statements in the handlers of that block. (C++ [except.handle]p14)
+ // Verify this.
+ if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
+ DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
+
// Verify that that gotos and switch cases don't jump into scopes illegally.
- if (CurFunctionNeedsScopeChecking)
- DiagnoseInvalidJumps(Body);
-
- // C++ constructors that have function-try-blocks can't have return
- // statements in the handlers of that block. (C++ [except.handle]p14)
- // Verify this.
- if (FD && isa<CXXConstructorDecl>(FD) && isa<CXXTryStmt>(Body))
- DiagnoseReturnInConstructorExceptionHandler(cast<CXXTryStmt>(Body));
+ // Verify that that gotos and switch cases don't jump into scopes illegally.
+ if (FunctionNeedsScopeChecking() && !hasAnyErrorsInThisFunction())
+ DiagnoseInvalidJumps(Body);
- if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl))
- MarkBaseAndMemberDestructorsReferenced(Destructor);
+ if (CXXDestructorDecl *Destructor = dyn_cast<CXXDestructorDecl>(dcl))
+ MarkBaseAndMemberDestructorsReferenced(Destructor);
+
+ // If any errors have occurred, clear out any temporaries that may have
+ // been leftover. This ensures that these temporaries won't be picked up for
+ // deletion in some later function.
+ if (PP.getDiagnostics().hasErrorOccurred())
+ ExprTemporaries.clear();
+
+ assert(ExprTemporaries.empty() && "Leftover temporaries in function");
+ }
+
+ PopFunctionOrBlockScope();
// If any errors have occurred, clear out any temporaries that may have
// been leftover. This ensures that these temporaries won't be picked up for
// deletion in some later function.
- if (PP.getDiagnostics().hasErrorOccurred())
+ if (getDiagnostics().hasErrorOccurred())
ExprTemporaries.clear();
- assert(ExprTemporaries.empty() && "Leftover temporaries in function");
return D;
}
@@ -4501,8 +4541,9 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
bool isStdBadAlloc = false;
bool Invalid = false;
- RedeclarationKind Redecl = (TUK != TUK_Reference ? ForRedeclaration
- : NotForRedeclaration);
+ RedeclarationKind Redecl = ForRedeclaration;
+ if (TUK == TUK_Friend || TUK == TUK_Reference)
+ Redecl = NotForRedeclaration;
LookupResult Previous(*this, Name, NameLoc, LookupTagName, Redecl);
@@ -4752,12 +4793,15 @@ Sema::DeclPtrTy Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// If a friend declaration in a non-local class first declares a
// class or function, the friend class or function is a member of
// the innermost enclosing namespace.
- while (!SearchDC->isFileContext())
- SearchDC = SearchDC->getParent();
+ SearchDC = SearchDC->getEnclosingNamespaceContext();
- // The entity of a decl scope is a DeclContext; see PushDeclContext.
- while (S->getEntity() != SearchDC)
+ // Look up through our scopes until we find one with an entity which
+ // matches our declaration context.
+ while (S->getEntity() &&
+ ((DeclContext *)S->getEntity())->getPrimaryContext() != SearchDC) {
S = S->getParent();
+ assert(S && "No enclosing scope matching the enclosing namespace.");
+ }
}
CreateNewDecl:
@@ -5625,7 +5669,6 @@ void Sema::ActOnFields(Scope* S,
ObjCIvarDecl **ClsFields =
reinterpret_cast<ObjCIvarDecl**>(RecFields.data());
if (ObjCInterfaceDecl *ID = dyn_cast<ObjCInterfaceDecl>(EnclosingDecl)) {
- ID->setIVarList(ClsFields, RecFields.size(), Context);
ID->setLocEnd(RBrac);
// Add ivar's to class's DeclContext.
for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
@@ -5634,21 +5677,8 @@ void Sema::ActOnFields(Scope* S,
}
// Must enforce the rule that ivars in the base classes may not be
// duplicates.
- if (ID->getSuperClass()) {
- for (ObjCInterfaceDecl::ivar_iterator IVI = ID->ivar_begin(),
- IVE = ID->ivar_end(); IVI != IVE; ++IVI) {
- ObjCIvarDecl* Ivar = (*IVI);
-
- if (IdentifierInfo *II = Ivar->getIdentifier()) {
- ObjCIvarDecl* prevIvar =
- ID->getSuperClass()->lookupInstanceVariable(II);
- if (prevIvar) {
- Diag(Ivar->getLocation(), diag::err_duplicate_member) << II;
- Diag(prevIvar->getLocation(), diag::note_previous_declaration);
- }
- }
- }
- }
+ if (ID->getSuperClass())
+ DiagnoseDuplicateIvars(ID, ID->getSuperClass());
} else if (ObjCImplementationDecl *IMPDecl =
dyn_cast<ObjCImplementationDecl>(EnclosingDecl)) {
assert(IMPDecl && "ActOnFields - missing ObjCImplementationDecl");
@@ -5657,6 +5687,19 @@ void Sema::ActOnFields(Scope* S,
// Only it is in implementation's lexical context.
ClsFields[I]->setLexicalDeclContext(IMPDecl);
CheckImplementationIvars(IMPDecl, ClsFields, RecFields.size(), RBrac);
+ } else if (ObjCCategoryDecl *CDecl =
+ dyn_cast<ObjCCategoryDecl>(EnclosingDecl)) {
+ if (!LangOpts.ObjCNonFragileABI2 || !CDecl->IsClassExtension())
+ Diag(LBrac, diag::err_misplaced_ivar);
+ else {
+ // FIXME. Class extension does not have a LocEnd field.
+ // CDecl->setLocEnd(RBrac);
+ // Add ivar's to class extension's DeclContext.
+ for (unsigned i = 0, e = RecFields.size(); i != e; ++i) {
+ ClsFields[i]->setLexicalDeclContext(CDecl);
+ CDecl->addDecl(ClsFields[i]);
+ }
+ }
}
}
@@ -5714,12 +5757,13 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
llvm::APSInt EnumVal(IntWidth);
QualType EltTy;
if (Val) {
- if (Enum->isDependentType())
+ if (Enum->isDependentType() || Val->isTypeDependent())
EltTy = Context.DependentTy;
else {
// C99 6.7.2.2p2: Make sure we have an integer constant expression.
SourceLocation ExpLoc;
- if (VerifyIntegerConstantExpression(Val, &EnumVal)) {
+ if (!Val->isValueDependent() &&
+ VerifyIntegerConstantExpression(Val, &EnumVal)) {
Val = 0;
} else {
if (!getLangOptions().CPlusPlus) {
@@ -5732,7 +5776,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
if (!isRepresentableIntegerValue(Context, EnumVal, Context.IntTy))
Diag(IdLoc, diag::ext_enum_value_not_int)
<< EnumVal.toString(10) << Val->getSourceRange()
- << EnumVal.isNonNegative();
+ << (EnumVal.isUnsigned() || EnumVal.isNonNegative());
else if (!Context.hasSameType(Val->getType(), Context.IntTy)) {
// Force the type of the expression to 'int'.
ImpCastExprToType(Val, Context.IntTy, CastExpr::CK_IntegralCast);
@@ -5821,7 +5865,7 @@ EnumConstantDecl *Sema::CheckEnumConstant(EnumDecl *Enum,
}
}
- if (!Enum->isDependentType()) {
+ if (!EltTy->isDependentType()) {
// Make the enumerator value match the signedness and size of the
// enumerator's type.
EnumVal.zextOrTrunc(Context.getTypeSize(EltTy));
diff --git a/lib/Sema/SemaDeclAttr.cpp b/lib/Sema/SemaDeclAttr.cpp
index cba1e9e1cd50..242d66fa521f 100644
--- a/lib/Sema/SemaDeclAttr.cpp
+++ b/lib/Sema/SemaDeclAttr.cpp
@@ -225,19 +225,38 @@ static void HandlePackedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << Attr.getName();
}
-static void HandleIBOutletAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+static void HandleIBAction(Decl *d, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() > 0) {
S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
return;
}
- // The IBOutlet attribute only applies to instance variables of Objective-C
- // classes.
- if (isa<ObjCIvarDecl>(d) || isa<ObjCPropertyDecl>(d))
+ // The IBAction attributes only apply to instance methods.
+ if (ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(d))
+ if (MD->isInstanceMethod()) {
+ d->addAttr(::new (S.Context) IBActionAttr());
+ return;
+ }
+
+ S.Diag(Attr.getLoc(), diag::err_attribute_ibaction) << Attr.getName();
+}
+
+static void HandleIBOutlet(Decl *d, const AttributeList &Attr, Sema &S) {
+ // check the attribute arguments.
+ if (Attr.getNumArgs() > 0) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
+ return;
+ }
+
+ // The IBOutlet attributes only apply to instance variables of
+ // Objective-C classes.
+ if (isa<ObjCIvarDecl>(d) || isa<ObjCPropertyDecl>(d)) {
d->addAttr(::new (S.Context) IBOutletAttr());
- else
- S.Diag(Attr.getLoc(), diag::err_attribute_iboutlet);
+ return;
+ }
+
+ S.Diag(Attr.getLoc(), diag::err_attribute_iboutlet) << Attr.getName();
}
static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
@@ -310,6 +329,86 @@ static void HandleNonNullAttr(Decl *d, const AttributeList &Attr, Sema &S) {
d->addAttr(::new (S.Context) NonNullAttr(S.Context, start, size));
}
+static bool isStaticVarOrStaticFunciton(Decl *D) {
+ if (VarDecl *VD = dyn_cast<VarDecl>(D))
+ return VD->getStorageClass() == VarDecl::Static;
+ if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getStorageClass() == FunctionDecl::Static;
+ return false;
+}
+
+static void HandleWeakRefAttr(Decl *d, const AttributeList &Attr, Sema &S) {
+ // Check the attribute arguments.
+ if (Attr.getNumArgs() > 1) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 1;
+ return;
+ }
+
+ // gcc rejects
+ // class c {
+ // static int a __attribute__((weakref ("v2")));
+ // static int b() __attribute__((weakref ("f3")));
+ // };
+ // and ignores the attributes of
+ // void f(void) {
+ // static int a __attribute__((weakref ("v2")));
+ // }
+ // we reject them
+ if (const DeclContext *Ctx = d->getDeclContext()) {
+ Ctx = Ctx->getLookupContext();
+ if (!isa<TranslationUnitDecl>(Ctx) && !isa<NamespaceDecl>(Ctx) ) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_global_context) <<
+ dyn_cast<NamedDecl>(d)->getNameAsString();
+ return;
+ }
+ }
+
+ // The GCC manual says
+ //
+ // At present, a declaration to which `weakref' is attached can only
+ // be `static'.
+ //
+ // It also says
+ //
+ // Without a TARGET,
+ // given as an argument to `weakref' or to `alias', `weakref' is
+ // equivalent to `weak'.
+ //
+ // gcc 4.4.1 will accept
+ // int a7 __attribute__((weakref));
+ // as
+ // int a7 __attribute__((weak));
+ // This looks like a bug in gcc. We reject that for now. We should revisit
+ // it if this behaviour is actually used.
+
+ if (!isStaticVarOrStaticFunciton(d)) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_weakref_not_static) <<
+ dyn_cast<NamedDecl>(d)->getNameAsString();
+ return;
+ }
+
+ // GCC rejects
+ // static ((alias ("y"), weakref)).
+ // Should we? How to check that weakref is before or after alias?
+
+ if (Attr.getNumArgs() == 1) {
+ Expr *Arg = static_cast<Expr*>(Attr.getArg(0));
+ Arg = Arg->IgnoreParenCasts();
+ StringLiteral *Str = dyn_cast<StringLiteral>(Arg);
+
+ if (Str == 0 || Str->isWide()) {
+ S.Diag(Attr.getLoc(), diag::err_attribute_argument_n_not_string)
+ << "weakref" << 1;
+ return;
+ }
+ // GCC will accept anything as the argument of weakref. Should we
+ // check for an existing decl?
+ d->addAttr(::new (S.Context) AliasAttr(S.Context, Str->getString()));
+ }
+
+ d->addAttr(::new (S.Context) WeakRefAttr());
+}
+
static void HandleAliasAttr(Decl *d, const AttributeList &Attr, Sema &S) {
// check the attribute arguments.
if (Attr.getNumArgs() != 1) {
@@ -422,7 +521,7 @@ static void HandleUnusedAttr(Decl *d, const AttributeList &Attr, Sema &S) {
return;
}
- if (!isa<VarDecl>(d) && !isFunctionOrMethod(d)) {
+ if (!isa<VarDecl>(d) && !isa<ObjCIvarDecl>(d) && !isFunctionOrMethod(d)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << 2 /*variable and function*/;
return;
@@ -735,7 +834,7 @@ static void HandleWarnUnusedResult(Decl *D, const AttributeList &Attr, Sema &S)
return;
}
- if (!isFunctionOrMethod(D)) {
+ if (!isFunction(D)) {
S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
<< Attr.getName() << 0 /*function*/;
return;
@@ -758,13 +857,7 @@ static void HandleWeakAttr(Decl *D, const AttributeList &Attr, Sema &S) {
}
/* weak only applies to non-static declarations */
- bool isStatic = false;
- if (VarDecl *VD = dyn_cast<VarDecl>(D)) {
- isStatic = VD->getStorageClass() == VarDecl::Static;
- } else if (FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- isStatic = FD->getStorageClass() == FunctionDecl::Static;
- }
- if (isStatic) {
+ if (isStaticVarOrStaticFunciton(D)) {
S.Diag(Attr.getLoc(), diag::err_attribute_weak_static) <<
dyn_cast<NamedDecl>(D)->getNameAsString();
return;
@@ -813,82 +906,6 @@ static void HandleWeakImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
D->addAttr(::new (S.Context) WeakImportAttr());
}
-static void HandleDLLImportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
- // check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
- }
-
- // Attribute can be applied only to functions or variables.
- if (isa<VarDecl>(D)) {
- D->addAttr(::new (S.Context) DLLImportAttr());
- return;
- }
-
- FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (!FD) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 2 /*variable and function*/;
- return;
- }
-
- // Currently, the dllimport attribute is ignored for inlined functions.
- // Warning is emitted.
- if (FD->isInlineSpecified()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
- return;
- }
-
- // The attribute is also overridden by a subsequent declaration as dllexport.
- // Warning is emitted.
- for (AttributeList *nextAttr = Attr.getNext(); nextAttr;
- nextAttr = nextAttr->getNext()) {
- if (nextAttr->getKind() == AttributeList::AT_dllexport) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
- return;
- }
- }
-
- if (D->getAttr<DLLExportAttr>()) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllimport";
- return;
- }
-
- D->addAttr(::new (S.Context) DLLImportAttr());
-}
-
-static void HandleDLLExportAttr(Decl *D, const AttributeList &Attr, Sema &S) {
- // check the attribute arguments.
- if (Attr.getNumArgs() != 0) {
- S.Diag(Attr.getLoc(), diag::err_attribute_wrong_number_arguments) << 0;
- return;
- }
-
- // Attribute can be applied only to functions or variables.
- if (isa<VarDecl>(D)) {
- D->addAttr(::new (S.Context) DLLExportAttr());
- return;
- }
-
- FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
- if (!FD) {
- S.Diag(Attr.getLoc(), diag::warn_attribute_wrong_decl_type)
- << Attr.getName() << 2 /*variable and function*/;
- return;
- }
-
- // Currently, the dllexport attribute is ignored for inlined functions, unless
- // the -fkeep-inline-functions flag has been used. Warning is emitted;
- if (FD->isInlineSpecified()) {
- // FIXME: ... unless the -fkeep-inline-functions flag has been used.
- S.Diag(Attr.getLoc(), diag::warn_attribute_ignored) << "dllexport";
- return;
- }
-
- D->addAttr(::new (S.Context) DLLExportAttr());
-}
-
static void HandleReqdWorkGroupSize(Decl *D, const AttributeList &Attr,
Sema &S) {
// Attribute has 3 arguments.
@@ -1777,6 +1794,12 @@ static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &Attr,
default:
assert(0 && "invalid ownership attribute");
return;
+ case AttributeList::AT_cf_returns_not_retained:
+ d->addAttr(::new (S.Context) CFReturnsNotRetainedAttr());
+ return;
+ case AttributeList::AT_ns_returns_not_retained:
+ d->addAttr(::new (S.Context) NSReturnsNotRetainedAttr());
+ return;
case AttributeList::AT_cf_returns_retained:
d->addAttr(::new (S.Context) CFReturnsRetainedAttr());
return;
@@ -1786,6 +1809,11 @@ static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &Attr,
};
}
+static bool isKnownDeclSpecAttr(const AttributeList &Attr) {
+ return Attr.getKind() == AttributeList::AT_dllimport ||
+ Attr.getKind() == AttributeList::AT_dllexport;
+}
+
//===----------------------------------------------------------------------===//
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
@@ -1796,11 +1824,12 @@ static void HandleNSReturnsRetainedAttr(Decl *d, const AttributeList &Attr,
/// the wrong thing is illegal (C++0x [dcl.attr.grammar]/4).
static void ProcessDeclAttribute(Scope *scope, Decl *D,
const AttributeList &Attr, Sema &S) {
- if (Attr.isDeclspecAttribute())
- // FIXME: Try to deal with __declspec attributes!
+ if (Attr.isDeclspecAttribute() && !isKnownDeclSpecAttr(Attr))
+ // FIXME: Try to deal with other __declspec attributes!
return;
switch (Attr.getKind()) {
- case AttributeList::AT_IBOutlet: HandleIBOutletAttr (D, Attr, S); break;
+ case AttributeList::AT_IBAction: HandleIBAction(D, Attr, S); break;
+ case AttributeList::AT_IBOutlet: HandleIBOutlet(D, Attr, S); break;
case AttributeList::AT_address_space:
case AttributeList::AT_objc_gc:
case AttributeList::AT_vector_size:
@@ -1820,8 +1849,6 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_constructor: HandleConstructorAttr (D, Attr, S); break;
case AttributeList::AT_deprecated: HandleDeprecatedAttr (D, Attr, S); break;
case AttributeList::AT_destructor: HandleDestructorAttr (D, Attr, S); break;
- case AttributeList::AT_dllexport: HandleDLLExportAttr (D, Attr, S); break;
- case AttributeList::AT_dllimport: HandleDLLImportAttr (D, Attr, S); break;
case AttributeList::AT_ext_vector_type:
HandleExtVectorTypeAttr(scope, D, Attr, S);
break;
@@ -1838,6 +1865,8 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_override: HandleOverrideAttr (D, Attr, S); break;
// Checker-specific.
+ case AttributeList::AT_ns_returns_not_retained:
+ case AttributeList::AT_cf_returns_not_retained:
case AttributeList::AT_ns_returns_retained:
case AttributeList::AT_cf_returns_retained:
HandleNSReturnsRetainedAttr(D, Attr, S); break;
@@ -1854,6 +1883,7 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
case AttributeList::AT_warn_unused_result: HandleWarnUnusedResult(D,Attr,S);
break;
case AttributeList::AT_weak: HandleWeakAttr (D, Attr, S); break;
+ case AttributeList::AT_weakref: HandleWeakRefAttr (D, Attr, S); break;
case AttributeList::AT_weak_import: HandleWeakImportAttr (D, Attr, S); break;
case AttributeList::AT_transparent_union:
HandleTransparentUnionAttr(D, Attr, S);
@@ -1892,9 +1922,17 @@ static void ProcessDeclAttribute(Scope *scope, Decl *D,
/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
/// attribute list to the specified decl, ignoring any type attributes.
void Sema::ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AttrList) {
- while (AttrList) {
- ProcessDeclAttribute(S, D, *AttrList, *this);
- AttrList = AttrList->getNext();
+ for (const AttributeList* l = AttrList; l; l = l->getNext()) {
+ ProcessDeclAttribute(S, D, *l, *this);
+ }
+
+ // GCC accepts
+ // static int a9 __attribute__((weakref));
+ // but that looks really pointless. We reject it.
+ if (D->hasAttr<WeakRefAttr>() && !D->hasAttr<AliasAttr>()) {
+ Diag(AttrList->getLoc(), diag::err_attribute_weakref_without_alias) <<
+ dyn_cast<NamedDecl>(D)->getNameAsString();
+ return;
}
}
diff --git a/lib/Sema/SemaDeclCXX.cpp b/lib/Sema/SemaDeclCXX.cpp
index 9defcca7e565..574b22502789 100644
--- a/lib/Sema/SemaDeclCXX.cpp
+++ b/lib/Sema/SemaDeclCXX.cpp
@@ -586,7 +586,10 @@ Sema::ActOnBaseSpecifier(DeclPtrTy classdecl, SourceRange SpecifierRange,
return true;
AdjustDeclIfTemplate(classdecl);
- CXXRecordDecl *Class = cast<CXXRecordDecl>(classdecl.getAs<Decl>());
+ CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(classdecl.getAs<Decl>());
+ if (!Class)
+ return true;
+
QualType BaseType = GetTypeFromParser(basetype);
if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
Virtual, Access,
@@ -1635,8 +1638,22 @@ Sema::SetBaseOrMemberInitializers(CXXConstructorDecl *Constructor,
new (Context) CXXBaseOrMemberInitializer*[NumInitializers];
Constructor->setBaseOrMemberInitializers(baseOrMemberInitializers);
- for (unsigned Idx = 0; Idx < NumInitializers; ++Idx)
- baseOrMemberInitializers[Idx] = AllToInit[Idx];
+ for (unsigned Idx = 0; Idx < NumInitializers; ++Idx) {
+ CXXBaseOrMemberInitializer *Member = AllToInit[Idx];
+ baseOrMemberInitializers[Idx] = Member;
+ if (!Member->isBaseInitializer())
+ continue;
+ const Type *BaseType = Member->getBaseClass();
+ const RecordType *RT = BaseType->getAs<RecordType>();
+ if (!RT)
+ continue;
+ CXXRecordDecl *BaseClassDecl =
+ cast<CXXRecordDecl>(RT->getDecl());
+ if (BaseClassDecl->hasTrivialDestructor())
+ continue;
+ CXXDestructorDecl *DD = BaseClassDecl->getDestructor(Context);
+ MarkDeclarationReferenced(Constructor->getLocation(), DD);
+ }
}
return HadError;
@@ -2174,7 +2191,10 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
CXXConstructorDecl::Create(Context, ClassDecl,
ClassDecl->getLocation(), Name,
Context.getFunctionType(Context.VoidTy,
- 0, 0, false, 0),
+ 0, 0, false, 0,
+ /*FIXME*/false, false,
+ 0, 0, false,
+ CC_Default),
/*TInfo=*/0,
/*isExplicit=*/false,
/*isInline=*/true,
@@ -2246,7 +2266,10 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
ClassDecl->getLocation(), Name,
Context.getFunctionType(Context.VoidTy,
&ArgType, 1,
- false, 0),
+ false, 0,
+ /*FIXME:*/false,
+ false, 0, 0, false,
+ CC_Default),
/*TInfo=*/0,
/*isExplicit=*/false,
/*isInline=*/true,
@@ -2332,7 +2355,10 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
CXXMethodDecl *CopyAssignment =
CXXMethodDecl::Create(Context, ClassDecl, ClassDecl->getLocation(), Name,
Context.getFunctionType(RetType, &ArgType, 1,
- false, 0),
+ false, 0,
+ /*FIXME:*/false,
+ false, 0, 0, false,
+ CC_Default),
/*TInfo=*/0, /*isStatic=*/false, /*isInline=*/true);
CopyAssignment->setAccess(AS_public);
CopyAssignment->setImplicit();
@@ -2364,7 +2390,10 @@ void Sema::AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl) {
= CXXDestructorDecl::Create(Context, ClassDecl,
ClassDecl->getLocation(), Name,
Context.getFunctionType(Context.VoidTy,
- 0, 0, false, 0),
+ 0, 0, false, 0,
+ /*FIXME:*/false,
+ false, 0, 0, false,
+ CC_Default),
/*isInline=*/true,
/*isImplicitlyDeclared=*/true);
Destructor->setAccess(AS_public);
@@ -2523,7 +2552,13 @@ QualType Sema::CheckConstructorDeclarator(Declarator &D, QualType R,
const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
return Context.getFunctionType(Context.VoidTy, Proto->arg_type_begin(),
Proto->getNumArgs(),
- Proto->isVariadic(), 0);
+ Proto->isVariadic(), 0,
+ Proto->hasExceptionSpec(),
+ Proto->hasAnyExceptionSpec(),
+ Proto->getNumExceptions(),
+ Proto->exception_begin(),
+ Proto->getNoReturnAttr(),
+ Proto->getCallConv());
}
/// CheckConstructor - Checks a fully-formed constructor for
@@ -2680,7 +2715,9 @@ QualType Sema::CheckDestructorDeclarator(Declarator &D,
// "void" as the return type, since destructors don't have return
// types. We *always* have to do this, because GetTypeForDeclarator
// will put in a result type of "int" when none was specified.
- return Context.getFunctionType(Context.VoidTy, 0, 0, false, 0);
+ // FIXME: Exceptions!
+ return Context.getFunctionType(Context.VoidTy, 0, 0, false, 0,
+ false, false, 0, 0, false, CC_Default);
}
/// CheckConversionDeclarator - Called by ActOnDeclarator to check the
@@ -2749,8 +2786,15 @@ void Sema::CheckConversionDeclarator(Declarator &D, QualType &R,
// Rebuild the function type "R" without any parameters (in case any
// of the errors above fired) and with the conversion type as the
// return type.
+ const FunctionProtoType *Proto = R->getAs<FunctionProtoType>();
R = Context.getFunctionType(ConvType, 0, 0, false,
- R->getAs<FunctionProtoType>()->getTypeQuals());
+ Proto->getTypeQuals(),
+ Proto->hasExceptionSpec(),
+ Proto->hasAnyExceptionSpec(),
+ Proto->getNumExceptions(),
+ Proto->exception_begin(),
+ Proto->getNoReturnAttr(),
+ Proto->getCallConv());
// C++0x explicit conversion operators.
if (D.getDeclSpec().isExplicitSpecified() && !getLangOptions().CPlusPlus0x)
@@ -3996,7 +4040,8 @@ bool Sema::InitializeVarWithConstructor(VarDecl *VD,
void Sema::FinalizeVarWithDestructor(VarDecl *VD, const RecordType *Record) {
CXXRecordDecl *ClassDecl = cast<CXXRecordDecl>(Record->getDecl());
- if (!ClassDecl->hasTrivialDestructor()) {
+ if (!ClassDecl->isInvalidDecl() && !VD->isInvalidDecl() &&
+ !ClassDecl->hasTrivialDestructor()) {
CXXDestructorDecl *Destructor = ClassDecl->getDestructor(Context);
MarkDeclarationReferenced(VD->getLocation(), Destructor);
CheckDestructorAccess(VD->getLocation(), Record);
@@ -4368,8 +4413,7 @@ Sema::CheckReferenceInit(Expr *&Init, QualType DeclType,
// Most pa