aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRoman Divacky <rdivacky@FreeBSD.org>2010-07-13 17:21:42 +0000
committerRoman Divacky <rdivacky@FreeBSD.org>2010-07-13 17:21:42 +0000
commit4ba675006b5a8edfc48b6a9bd3dcf54a70cc08f2 (patch)
tree48b44512b5db8ced345df4a1a56b5065cf2a14d9 /lib
parentd7279c4c177bca357ef96ff1379fd9bc420bfe83 (diff)
downloadsrc-4ba675006b5a8edfc48b6a9bd3dcf54a70cc08f2.tar.gz
src-4ba675006b5a8edfc48b6a9bd3dcf54a70cc08f2.zip
Update clang to r108243.vendor/clang/clang-r108243
Notes
Notes: svn path=/vendor/clang/dist/; revision=210008 svn path=/vendor/clang/clang-r108243/; revision=210076; tag=vendor/clang/clang-r108243
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp569
-rw-r--r--lib/AST/ASTImporter.cpp93
-rw-r--r--lib/AST/AttrImpl.cpp9
-rw-r--r--lib/AST/CMakeLists.txt4
-rw-r--r--lib/AST/CXXInheritance.cpp38
-rw-r--r--lib/AST/Decl.cpp161
-rw-r--r--lib/AST/DeclBase.cpp282
-rw-r--r--lib/AST/DeclCXX.cpp124
-rw-r--r--lib/AST/DeclFriend.cpp4
-rw-r--r--lib/AST/DeclObjC.cpp23
-rw-r--r--lib/AST/DeclPrinter.cpp47
-rw-r--r--lib/AST/DeclTemplate.cpp117
-rw-r--r--lib/AST/Expr.cpp427
-rw-r--r--lib/AST/ExprCXX.cpp101
-rw-r--r--lib/AST/ExprClassification.cpp471
-rw-r--r--lib/AST/ExprConstant.cpp88
-rw-r--r--lib/AST/Makefile6
-rw-r--r--lib/AST/RecordLayoutBuilder.cpp972
-rw-r--r--lib/AST/Stmt.cpp132
-rw-r--r--lib/AST/StmtPrinter.cpp34
-rw-r--r--lib/AST/StmtProfile.cpp13
-rw-r--r--lib/AST/TemplateBase.cpp27
-rw-r--r--lib/AST/TemplateName.cpp11
-rw-r--r--lib/AST/Type.cpp133
-rw-r--r--lib/AST/TypePrinter.cpp51
-rw-r--r--lib/Analysis/CFG.cpp29
-rw-r--r--lib/Analysis/CMakeLists.txt3
-rw-r--r--lib/Analysis/LiveVariables.cpp22
-rw-r--r--lib/Analysis/Makefile6
-rw-r--r--lib/Analysis/PrintfFormatString.cpp672
-rw-r--r--lib/Basic/CMakeLists.txt3
-rw-r--r--lib/Basic/Diagnostic.cpp19
-rw-r--r--lib/Basic/Makefile9
-rw-r--r--lib/Basic/TargetInfo.cpp6
-rw-r--r--lib/Basic/Targets.cpp166
-rw-r--r--lib/Checker/AnalysisConsumer.cpp (renamed from lib/Frontend/AnalysisConsumer.cpp)37
-rw-r--r--lib/Checker/AttrNonNullChecker.cpp5
-rw-r--r--lib/Checker/BasicConstraintManager.cpp124
-rw-r--r--lib/Checker/BasicObjCFoundationChecks.cpp93
-rw-r--r--lib/Checker/BasicObjCFoundationChecks.h3
-rw-r--r--lib/Checker/BasicStore.cpp99
-rw-r--r--lib/Checker/BugReporter.cpp4
-rw-r--r--lib/Checker/BuiltinFunctionChecker.cpp15
-rw-r--r--lib/Checker/CFRefCount.cpp201
-rw-r--r--lib/Checker/CMakeLists.txt30
-rw-r--r--lib/Checker/CStringChecker.cpp525
-rw-r--r--lib/Checker/CallInliner.cpp2
-rw-r--r--lib/Checker/CastSizeChecker.cpp21
-rw-r--r--lib/Checker/CheckSecuritySyntaxOnly.cpp4
-rw-r--r--lib/Checker/Environment.cpp4
-rw-r--r--lib/Checker/FlatStore.cpp18
-rw-r--r--lib/Checker/FrontendActions.cpp21
-rw-r--r--lib/Checker/GRCoreEngine.cpp6
-rw-r--r--lib/Checker/GRExprEngine.cpp176
-rw-r--r--lib/Checker/GRExprEngineExperimentalChecks.cpp3
-rw-r--r--lib/Checker/GRExprEngineExperimentalChecks.h3
-rw-r--r--lib/Checker/GRExprEngineInternalChecks.h2
-rw-r--r--lib/Checker/GRState.cpp31
-rw-r--r--lib/Checker/HTMLDiagnostics.cpp (renamed from lib/Frontend/HTMLDiagnostics.cpp)6
-rw-r--r--lib/Checker/IdempotentOperationChecker.cpp454
-rw-r--r--lib/Checker/LLVMConventionsChecker.cpp50
-rw-r--r--lib/Checker/Makefile6
-rw-r--r--lib/Checker/MallocChecker.cpp256
-rw-r--r--lib/Checker/MemRegion.cpp179
-rw-r--r--lib/Checker/OSAtomicChecker.cpp8
-rw-r--r--lib/Checker/PathDiagnostic.cpp13
-rw-r--r--lib/Checker/PlistDiagnostics.cpp (renamed from lib/Frontend/PlistDiagnostics.cpp)2
-rw-r--r--lib/Checker/RangeConstraintManager.cpp301
-rw-r--r--lib/Checker/RegionStore.cpp339
-rw-r--r--lib/Checker/ReturnStackAddressChecker.cpp125
-rw-r--r--lib/Checker/SVals.cpp10
-rw-r--r--lib/Checker/SValuator.cpp8
-rw-r--r--lib/Checker/SimpleConstraintManager.cpp166
-rw-r--r--lib/Checker/SimpleConstraintManager.h26
-rw-r--r--lib/Checker/SimpleSValuator.cpp619
-rw-r--r--lib/Checker/StackAddrLeakChecker.cpp204
-rw-r--r--lib/Checker/Store.cpp14
-rw-r--r--lib/Checker/StreamChecker.cpp287
-rw-r--r--lib/Checker/SymbolManager.cpp38
-rw-r--r--lib/Checker/VLASizeChecker.cpp49
-rw-r--r--lib/CodeGen/ABIInfo.h12
-rw-r--r--lib/CodeGen/BackendUtil.cpp339
-rw-r--r--lib/CodeGen/CGBlocks.cpp72
-rw-r--r--lib/CodeGen/CGBlocks.h7
-rw-r--r--lib/CodeGen/CGBuilder.h8
-rw-r--r--lib/CodeGen/CGBuiltin.cpp924
-rw-r--r--lib/CodeGen/CGCXX.cpp77
-rw-r--r--lib/CodeGen/CGCXXABI.h1
-rw-r--r--lib/CodeGen/CGCall.cpp458
-rw-r--r--lib/CodeGen/CGCall.h8
-rw-r--r--lib/CodeGen/CGClass.cpp156
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp43
-rw-r--r--lib/CodeGen/CGDecl.cpp148
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp195
-rw-r--r--lib/CodeGen/CGException.cpp1525
-rw-r--r--lib/CodeGen/CGException.h342
-rw-r--r--lib/CodeGen/CGExpr.cpp447
-rw-r--r--lib/CodeGen/CGExprAgg.cpp114
-rw-r--r--lib/CodeGen/CGExprCXX.cpp111
-rw-r--r--lib/CodeGen/CGExprComplex.cpp76
-rw-r--r--lib/CodeGen/CGExprConstant.cpp102
-rw-r--r--lib/CodeGen/CGExprScalar.cpp496
-rw-r--r--lib/CodeGen/CGObjC.cpp29
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp378
-rw-r--r--lib/CodeGen/CGObjCMac.cpp791
-rw-r--r--lib/CodeGen/CGObjCRuntime.h10
-rw-r--r--lib/CodeGen/CGRTTI.cpp22
-rw-r--r--lib/CodeGen/CGStmt.cpp275
-rw-r--r--lib/CodeGen/CGTemporaries.cpp144
-rw-r--r--lib/CodeGen/CGVTables.cpp427
-rw-r--r--lib/CodeGen/CGVTables.h17
-rw-r--r--lib/CodeGen/CMakeLists.txt6
-rw-r--r--lib/CodeGen/CodeGenAction.cpp348
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp640
-rw-r--r--lib/CodeGen/CodeGenFunction.h632
-rw-r--r--lib/CodeGen/CodeGenModule.cpp370
-rw-r--r--lib/CodeGen/CodeGenModule.h52
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp82
-rw-r--r--lib/CodeGen/CodeGenTypes.h34
-rw-r--r--lib/CodeGen/GlobalDecl.h10
-rw-r--r--lib/CodeGen/Makefile9
-rw-r--r--lib/CodeGen/Mangle.cpp197
-rw-r--r--lib/CodeGen/Mangle.h12
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp1191
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp2
-rw-r--r--lib/CodeGen/TargetInfo.cpp393
-rw-r--r--lib/Driver/Action.cpp5
-rw-r--r--lib/Driver/Arg.cpp189
-rw-r--r--lib/Driver/ArgList.cpp92
-rw-r--r--lib/Driver/CMakeLists.txt4
-rw-r--r--lib/Driver/Compilation.cpp19
-rw-r--r--lib/Driver/Driver.cpp130
-rw-r--r--lib/Driver/HostInfo.cpp58
-rw-r--r--lib/Driver/Makefile6
-rw-r--r--lib/Driver/OptTable.cpp13
-rw-r--r--lib/Driver/Option.cpp106
-rw-r--r--lib/Driver/ToolChains.cpp186
-rw-r--r--lib/Driver/ToolChains.h18
-rw-r--r--lib/Driver/Tools.cpp486
-rw-r--r--lib/Driver/Tools.h52
-rw-r--r--lib/Driver/Types.cpp18
-rw-r--r--lib/Frontend/ASTConsumers.cpp21
-rw-r--r--lib/Frontend/ASTMerge.cpp7
-rw-r--r--lib/Frontend/ASTUnit.cpp10
-rw-r--r--lib/Frontend/BoostConAction.cpp10
-rw-r--r--lib/Frontend/CMakeLists.txt12
-rw-r--r--lib/Frontend/CodeGenAction.cpp593
-rw-r--r--lib/Frontend/CompilerInstance.cpp32
-rw-r--r--lib/Frontend/CompilerInvocation.cpp235
-rw-r--r--lib/Frontend/FrontendAction.cpp54
-rw-r--r--lib/Frontend/FrontendActions.cpp96
-rw-r--r--lib/Frontend/FrontendOptions.cpp4
-rw-r--r--lib/Frontend/GeneratePCH.cpp11
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp45
-rw-r--r--lib/Frontend/InitPreprocessor.cpp37
-rw-r--r--lib/Frontend/Makefile6
-rw-r--r--lib/Frontend/PCHReader.cpp443
-rw-r--r--lib/Frontend/PCHReaderDecl.cpp609
-rw-r--r--lib/Frontend/PCHReaderStmt.cpp1064
-rw-r--r--lib/Frontend/PCHWriter.cpp510
-rw-r--r--lib/Frontend/PCHWriterDecl.cpp423
-rw-r--r--lib/Frontend/PCHWriterStmt.cpp488
-rw-r--r--lib/Frontend/PrintParserCallbacks.cpp3
-rw-r--r--lib/Frontend/PrintPreprocessedOutput.cpp63
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp63
-rw-r--r--lib/Frontend/Warnings.cpp2
-rw-r--r--lib/Headers/CMakeLists.txt13
-rw-r--r--lib/Headers/Makefile21
-rw-r--r--lib/Headers/altivec.h5317
-rw-r--r--lib/Headers/arm_neon.td341
-rw-r--r--lib/Headers/emmintrin.h7
-rw-r--r--lib/Headers/smmintrin.h16
-rw-r--r--lib/Headers/stddef.h7
-rw-r--r--lib/Headers/stdint.h4
-rw-r--r--lib/Headers/xmmintrin.h6
-rw-r--r--lib/Index/CallGraph.cpp2
-rw-r--r--lib/Index/Entity.cpp77
-rw-r--r--lib/Index/EntityImpl.h1
-rw-r--r--lib/Index/Indexer.cpp21
-rw-r--r--lib/Index/Makefile12
-rw-r--r--lib/Lex/Lexer.cpp56
-rw-r--r--lib/Lex/LiteralSupport.cpp17
-rw-r--r--lib/Lex/Makefile8
-rw-r--r--lib/Lex/PPCaching.cpp12
-rw-r--r--lib/Lex/PPMacroExpansion.cpp2
-rw-r--r--lib/Lex/Pragma.cpp209
-rw-r--r--lib/Lex/Preprocessor.cpp10
-rwxr-xr-xlib/Makefile6
-rw-r--r--lib/Parse/AttributeList.cpp1
-rw-r--r--lib/Parse/CMakeLists.txt2
-rw-r--r--lib/Parse/DeclSpec.cpp55
-rw-r--r--lib/Parse/Makefile6
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp52
-rw-r--r--lib/Parse/ParseDecl.cpp123
-rw-r--r--lib/Parse/ParseDeclCXX.cpp186
-rw-r--r--lib/Parse/ParseExpr.cpp89
-rw-r--r--lib/Parse/ParseExprCXX.cpp144
-rw-r--r--lib/Parse/ParseInit.cpp4
-rw-r--r--lib/Parse/ParseObjc.cpp144
-rw-r--r--lib/Parse/ParsePragma.cpp12
-rw-r--r--lib/Parse/ParsePragma.h16
-rw-r--r--lib/Parse/ParseStmt.cpp35
-rw-r--r--lib/Parse/ParseTemplate.cpp134
-rw-r--r--lib/Parse/Parser.cpp114
-rw-r--r--lib/Parse/RAIIObjectsForParser.h17
-rw-r--r--lib/Rewrite/CMakeLists.txt6
-rw-r--r--lib/Rewrite/FixItRewriter.cpp (renamed from lib/Frontend/FixItRewriter.cpp)2
-rw-r--r--lib/Rewrite/FrontendActions.cpp106
-rw-r--r--lib/Rewrite/HTMLPrint.cpp (renamed from lib/Frontend/HTMLPrint.cpp)2
-rw-r--r--lib/Rewrite/Makefile6
-rw-r--r--lib/Rewrite/RewriteMacros.cpp (renamed from lib/Frontend/RewriteMacros.cpp)2
-rw-r--r--lib/Rewrite/RewriteObjC.cpp (renamed from lib/Frontend/RewriteObjC.cpp)47
-rw-r--r--lib/Rewrite/RewriteRope.cpp2
-rw-r--r--lib/Rewrite/RewriteTest.cpp (renamed from lib/Frontend/RewriteTest.cpp)2
-rw-r--r--lib/Rewrite/Rewriter.cpp12
-rw-r--r--lib/Runtime/Makefile101
-rw-r--r--lib/Sema/CMakeLists.txt3
-rw-r--r--lib/Sema/JumpDiagnostics.cpp70
-rw-r--r--lib/Sema/Lookup.h5
-rw-r--r--lib/Sema/Makefile6
-rw-r--r--lib/Sema/Sema.cpp45
-rw-r--r--lib/Sema/Sema.h239
-rw-r--r--lib/Sema/SemaAccess.cpp30
-rw-r--r--lib/Sema/SemaAttr.cpp13
-rw-r--r--lib/Sema/SemaCXXCast.cpp62
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp6
-rw-r--r--lib/Sema/SemaChecking.cpp458
-rw-r--r--lib/Sema/SemaCodeComplete.cpp1026
-rw-r--r--lib/Sema/SemaDecl.cpp298
-rw-r--r--lib/Sema/SemaDeclAttr.cpp114
-rw-r--r--lib/Sema/SemaDeclCXX.cpp1385
-rw-r--r--lib/Sema/SemaDeclObjC.cpp52
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp9
-rw-r--r--lib/Sema/SemaExpr.cpp689
-rw-r--r--lib/Sema/SemaExprCXX.cpp255
-rw-r--r--lib/Sema/SemaExprObjC.cpp14
-rw-r--r--lib/Sema/SemaInit.cpp187
-rw-r--r--lib/Sema/SemaInit.h10
-rw-r--r--lib/Sema/SemaLookup.cpp446
-rw-r--r--lib/Sema/SemaObjCProperty.cpp79
-rw-r--r--lib/Sema/SemaOverload.cpp662
-rw-r--r--lib/Sema/SemaStmt.cpp170
-rw-r--r--lib/Sema/SemaTemplate.cpp644
-rw-r--r--lib/Sema/SemaTemplate.h31
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp12
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp26
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp87
-rw-r--r--lib/Sema/SemaType.cpp372
-rw-r--r--lib/Sema/TreeTransform.h301
249 files changed, 30710 insertions, 12097 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 851f8d1c68bf..d41051f5dcad 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -31,25 +31,128 @@
using namespace clang;
+unsigned ASTContext::NumImplicitDefaultConstructors;
+unsigned ASTContext::NumImplicitDefaultConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyConstructors;
+unsigned ASTContext::NumImplicitCopyConstructorsDeclared;
+unsigned ASTContext::NumImplicitCopyAssignmentOperators;
+unsigned ASTContext::NumImplicitCopyAssignmentOperatorsDeclared;
+unsigned ASTContext::NumImplicitDestructors;
+unsigned ASTContext::NumImplicitDestructorsDeclared;
+
enum FloatingRank {
FloatRank, DoubleRank, LongDoubleRank
};
+void
+ASTContext::CanonicalTemplateTemplateParm::Profile(llvm::FoldingSetNodeID &ID,
+ TemplateTemplateParmDecl *Parm) {
+ ID.AddInteger(Parm->getDepth());
+ ID.AddInteger(Parm->getPosition());
+ // FIXME: Parameter pack
+
+ TemplateParameterList *Params = Parm->getTemplateParameters();
+ ID.AddInteger(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P)) {
+ ID.AddInteger(0);
+ ID.AddBoolean(TTP->isParameterPack());
+ continue;
+ }
+
+ if (NonTypeTemplateParmDecl *NTTP = dyn_cast<NonTypeTemplateParmDecl>(*P)) {
+ ID.AddInteger(1);
+ // FIXME: Parameter pack
+ ID.AddPointer(NTTP->getType().getAsOpaquePtr());
+ continue;
+ }
+
+ TemplateTemplateParmDecl *TTP = cast<TemplateTemplateParmDecl>(*P);
+ ID.AddInteger(2);
+ Profile(ID, TTP);
+ }
+}
+
+TemplateTemplateParmDecl *
+ASTContext::getCanonicalTemplateTemplateParmDecl(
+ TemplateTemplateParmDecl *TTP) {
+ // Check if we already have a canonical template template parameter.
+ llvm::FoldingSetNodeID ID;
+ CanonicalTemplateTemplateParm::Profile(ID, TTP);
+ void *InsertPos = 0;
+ CanonicalTemplateTemplateParm *Canonical
+ = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ if (Canonical)
+ return Canonical->getParam();
+
+ // Build a canonical template parameter list.
+ TemplateParameterList *Params = TTP->getTemplateParameters();
+ llvm::SmallVector<NamedDecl *, 4> CanonParams;
+ CanonParams.reserve(Params->size());
+ for (TemplateParameterList::const_iterator P = Params->begin(),
+ PEnd = Params->end();
+ P != PEnd; ++P) {
+ if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*P))
+ CanonParams.push_back(
+ TemplateTypeParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getIndex(), 0, false,
+ TTP->isParameterPack()));
+ else if (NonTypeTemplateParmDecl *NTTP
+ = dyn_cast<NonTypeTemplateParmDecl>(*P))
+ CanonParams.push_back(
+ NonTypeTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), NTTP->getDepth(),
+ NTTP->getPosition(), 0,
+ getCanonicalType(NTTP->getType()),
+ 0));
+ else
+ CanonParams.push_back(getCanonicalTemplateTemplateParmDecl(
+ cast<TemplateTemplateParmDecl>(*P)));
+ }
+
+ TemplateTemplateParmDecl *CanonTTP
+ = TemplateTemplateParmDecl::Create(*this, getTranslationUnitDecl(),
+ SourceLocation(), TTP->getDepth(),
+ TTP->getPosition(), 0,
+ TemplateParameterList::Create(*this, SourceLocation(),
+ SourceLocation(),
+ CanonParams.data(),
+ CanonParams.size(),
+ SourceLocation()));
+
+ // Get the new insert position for the node we care about.
+ Canonical = CanonTemplateTemplateParms.FindNodeOrInsertPos(ID, InsertPos);
+ assert(Canonical == 0 && "Shouldn't be in the map!");
+ (void)Canonical;
+
+ // Create the canonical template template parameter entry.
+ Canonical = new (*this) CanonicalTemplateTemplateParm(CanonTTP);
+ CanonTemplateTemplateParms.InsertNode(Canonical, InsertPos);
+ return CanonTTP;
+}
+
ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
const TargetInfo &t,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins,
bool FreeMem, unsigned size_reserve) :
- GlobalNestedNameSpecifier(0), CFConstantStringTypeDecl(0),
- NSConstantStringTypeDecl(0),
+ TemplateSpecializationTypes(this_()),
+ DependentTemplateSpecializationTypes(this_()),
+ GlobalNestedNameSpecifier(0), IsInt128Installed(false),
+ CFConstantStringTypeDecl(0), NSConstantStringTypeDecl(0),
ObjCFastEnumerationStateTypeDecl(0), FILEDecl(0), jmp_bufDecl(0),
sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0),
+ NullTypeSourceInfo(QualType()),
SourceMgr(SM), LangOpts(LOpts), FreeMemory(FreeMem), Target(t),
Idents(idents), Selectors(sels),
BuiltinInfo(builtins),
DeclarationNames(*this),
ExternalSource(0), PrintingPolicy(LOpts),
- LastSDM(0, 0) {
+ LastSDM(0, 0),
+ UniqueBlockByRefTypeID(0), UniqueBlockParmTypeID(0) {
ObjCIdRedefinitionType = QualType();
ObjCClassRedefinitionType = QualType();
ObjCSelRedefinitionType = QualType();
@@ -88,13 +191,6 @@ ASTContext::~ASTContext() {
Deallocate(&*I++);
}
- for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
- I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
- // Increment in loop to prevent using deallocated memory.
- if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
- R->Destroy(*this);
- }
-
for (llvm::DenseMap<const ObjCContainerDecl*,
const ASTRecordLayout*>::iterator
I = ObjCLayouts.begin(), E = ObjCLayouts.end(); I != E; ) {
@@ -104,6 +200,16 @@ ASTContext::~ASTContext() {
}
}
+ // ASTRecordLayout objects in ASTRecordLayouts must always be destroyed
+ // even when using the BumpPtrAllocator because they can contain
+ // DenseMaps.
+ for (llvm::DenseMap<const RecordDecl*, const ASTRecordLayout*>::iterator
+ I = ASTRecordLayouts.begin(), E = ASTRecordLayouts.end(); I != E; ) {
+ // Increment in loop to prevent using deallocated memory.
+ if (ASTRecordLayout *R = const_cast<ASTRecordLayout*>((I++)->second))
+ R->Destroy(*this);
+ }
+
// Destroy nested-name-specifiers.
for (llvm::FoldingSet<NestedNameSpecifier>::iterator
NNS = NestedNameSpecifiers.begin(),
@@ -155,11 +261,30 @@ void ASTContext::PrintStats() const {
#include "clang/AST/TypeNodes.def"
fprintf(stderr, "Total bytes = %d\n", int(TotalBytes));
-
+
+ // Implicit special member functions.
+ fprintf(stderr, " %u/%u implicit default constructors created\n",
+ NumImplicitDefaultConstructorsDeclared,
+ NumImplicitDefaultConstructors);
+ fprintf(stderr, " %u/%u implicit copy constructors created\n",
+ NumImplicitCopyConstructorsDeclared,
+ NumImplicitCopyConstructors);
+ fprintf(stderr, " %u/%u implicit copy assignment operators created\n",
+ NumImplicitCopyAssignmentOperatorsDeclared,
+ NumImplicitCopyAssignmentOperators);
+ fprintf(stderr, " %u/%u implicit destructors created\n",
+ NumImplicitDestructorsDeclared, NumImplicitDestructors);
+
+ if (!FreeMemory)
+ BumpAlloc.PrintStats();
+
if (ExternalSource.get()) {
fprintf(stderr, "\n");
ExternalSource->PrintStats();
}
+
+ if (!FreeMemory)
+ BumpAlloc.PrintStats();
}
@@ -273,13 +398,14 @@ ASTContext::getInstantiatedFromStaticDataMember(const VarDecl *Var) {
void
ASTContext::setInstantiatedFromStaticDataMember(VarDecl *Inst, VarDecl *Tmpl,
- TemplateSpecializationKind TSK) {
+ TemplateSpecializationKind TSK,
+ SourceLocation PointOfInstantiation) {
assert(Inst->isStaticDataMember() && "Not a static data member");
assert(Tmpl->isStaticDataMember() && "Not a static data member");
assert(!InstantiatedFromStaticDataMember[Inst] &&
"Already noted what static data member was instantiated from");
InstantiatedFromStaticDataMember[Inst]
- = new (*this) MemberSpecializationInfo(Tmpl, TSK);
+ = new (*this) MemberSpecializationInfo(Tmpl, TSK, PointOfInstantiation);
}
NamedDecl *
@@ -358,6 +484,16 @@ ASTContext::overridden_methods_end(const CXXMethodDecl *Method) const {
return Pos->second.end();
}
+unsigned
+ASTContext::overridden_methods_size(const CXXMethodDecl *Method) const {
+ llvm::DenseMap<const CXXMethodDecl *, CXXMethodVector>::const_iterator Pos
+ = OverriddenMethods.find(Method);
+ if (Pos == OverriddenMethods.end())
+ return 0;
+
+ return Pos->second.size();
+}
+
void ASTContext::addOverriddenMethod(const CXXMethodDecl *Method,
const CXXMethodDecl *Overridden) {
OverriddenMethods[Method].push_back(Overridden);
@@ -414,6 +550,15 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool RefAsPointee) {
T = getPointerType(RT->getPointeeType());
}
if (!T->isIncompleteType() && !T->isFunctionType()) {
+ unsigned MinWidth = Target.getLargeArrayMinWidth();
+ unsigned ArrayAlign = Target.getLargeArrayAlign();
+ if (isa<VariableArrayType>(T) && MinWidth != 0)
+ Align = std::max(Align, ArrayAlign);
+ if (ConstantArrayType *CT = dyn_cast<ConstantArrayType>(T)) {
+ unsigned Size = getTypeSize(CT);
+ if (MinWidth != 0 && MinWidth <= Size)
+ Align = std::max(Align, ArrayAlign);
+ }
// Incomplete or function types default to 1.
while (isa<VariableArrayType>(T) || isa<IncompleteArrayType>(T))
T = cast<ArrayType>(T)->getElementType();
@@ -762,7 +907,8 @@ void ASTContext::ShallowCollectObjCIvars(const ObjCInterfaceDecl *OI,
void ASTContext::CollectNonClassIvars(const ObjCInterfaceDecl *OI,
llvm::SmallVectorImpl<ObjCIvarDecl*> &Ivars) {
// Find ivars declared in class extension.
- if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) {
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension()) {
for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
E = CDecl->ivar_end(); I != E; ++I) {
Ivars.push_back(*I);
@@ -827,7 +973,8 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) {
unsigned count = 0;
// Count ivars declared in class extension.
- if (const ObjCCategoryDecl *CDecl = OI->getClassExtension())
+ for (const ObjCCategoryDecl *CDecl = OI->getFirstClassExtension(); CDecl;
+ CDecl = CDecl->getNextClassExtension())
count += CDecl->ivar_size();
// Count ivar defined in this class's implementation. This
@@ -1406,7 +1553,7 @@ QualType ASTContext::getIncompleteArrayType(QualType EltTy,
/// getVectorType - Return the unique reference to a vector type of
/// the specified element type and size. VectorType must be a built-in type.
QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
- bool IsAltiVec, bool IsPixel) {
+ VectorType::AltiVecSpecific AltiVecSpec) {
BuiltinType *baseType;
baseType = dyn_cast<BuiltinType>(getCanonicalType(vecType).getTypePtr());
@@ -1414,8 +1561,8 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
- VectorType::Profile(ID, vecType, NumElts, Type::Vector,
- IsAltiVec, IsPixel);
+ VectorType::Profile(ID, vecType, NumElts, Type::Vector, AltiVecSpec);
+
void *InsertPos = 0;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
@@ -1423,16 +1570,19 @@ QualType ASTContext::getVectorType(QualType vecType, unsigned NumElts,
// If the element type isn't canonical, this won't be a canonical type either,
// so fill in the canonical type field.
QualType Canonical;
- if (!vecType.isCanonical() || IsAltiVec || IsPixel) {
- Canonical = getVectorType(getCanonicalType(vecType),
- NumElts, false, false);
+ if (!vecType.isCanonical() || (AltiVecSpec == VectorType::AltiVec)) {
+ // pass VectorType::NotAltiVec for AltiVecSpec to make AltiVec canonical
+ // vector type (except 'vector bool ...' and 'vector Pixel') the same as
+ // the equivalent GCC vector types
+ Canonical = getVectorType(getCanonicalType(vecType), NumElts,
+ VectorType::NotAltiVec);
// Get the new insert position for the node we care about.
VectorType *NewIP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(NewIP == 0 && "Shouldn't be in the map!"); NewIP = NewIP;
}
VectorType *New = new (*this, TypeAlignment)
- VectorType(vecType, NumElts, Canonical, IsAltiVec, IsPixel);
+ VectorType(vecType, NumElts, Canonical, AltiVecSpec);
VectorTypes.InsertNode(New, InsertPos);
Types.push_back(New);
return QualType(New, 0);
@@ -1448,7 +1598,8 @@ QualType ASTContext::getExtVectorType(QualType vecType, unsigned NumElts) {
// Check if we've already instantiated a vector of this type.
llvm::FoldingSetNodeID ID;
- VectorType::Profile(ID, vecType, NumElts, Type::ExtVector, false, false);
+ VectorType::Profile(ID, vecType, NumElts, Type::ExtVector,
+ VectorType::NotAltiVec);
void *InsertPos = 0;
if (VectorType *VTP = VectorTypes.FindNodeOrInsertPos(ID, InsertPos))
return QualType(VTP, 0);
@@ -1629,8 +1780,7 @@ QualType ASTContext::getInjectedClassNameType(CXXRecordDecl *Decl,
assert(NeedsInjectedClassNameType(Decl));
if (Decl->TypeForDecl) {
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
- } else if (CXXRecordDecl *PrevDecl
- = cast_or_null<CXXRecordDecl>(Decl->getPreviousDeclaration())) {
+ } else if (CXXRecordDecl *PrevDecl = Decl->getPreviousDeclaration()) {
assert(PrevDecl->TypeForDecl && "previous declaration has no type");
Decl->TypeForDecl = PrevDecl->TypeForDecl;
assert(isa<InjectedClassNameType>(Decl->TypeForDecl));
@@ -1658,11 +1808,11 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
assert(!Record->getPreviousDeclaration() &&
"struct/union has previous declaration");
assert(!NeedsInjectedClassNameType(Record));
- Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Record);
+ return getRecordType(Record);
} else if (const EnumDecl *Enum = dyn_cast<EnumDecl>(Decl)) {
assert(!Enum->getPreviousDeclaration() &&
"enum has previous declaration");
- Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Enum);
+ return getEnumType(Enum);
} else if (const UnresolvedUsingTypenameDecl *Using =
dyn_cast<UnresolvedUsingTypenameDecl>(Decl)) {
Decl->TypeForDecl = new (*this, TypeAlignment) UnresolvedUsingType(Using);
@@ -1675,16 +1825,42 @@ QualType ASTContext::getTypeDeclTypeSlow(const TypeDecl *Decl) {
/// getTypedefType - Return the unique reference to the type for the
/// specified typename decl.
-QualType ASTContext::getTypedefType(const TypedefDecl *Decl) {
+QualType
+ASTContext::getTypedefType(const TypedefDecl *Decl, QualType Canonical) {
if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
- QualType Canonical = getCanonicalType(Decl->getUnderlyingType());
+ if (Canonical.isNull())
+ Canonical = getCanonicalType(Decl->getUnderlyingType());
Decl->TypeForDecl = new(*this, TypeAlignment)
TypedefType(Type::Typedef, Decl, Canonical);
Types.push_back(Decl->TypeForDecl);
return QualType(Decl->TypeForDecl, 0);
}
+QualType ASTContext::getRecordType(const RecordDecl *Decl) {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const RecordDecl *PrevDecl = Decl->getPreviousDeclaration())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ Decl->TypeForDecl = new (*this, TypeAlignment) RecordType(Decl);
+ Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
+QualType ASTContext::getEnumType(const EnumDecl *Decl) {
+ if (Decl->TypeForDecl) return QualType(Decl->TypeForDecl, 0);
+
+ if (const EnumDecl *PrevDecl = Decl->getPreviousDeclaration())
+ if (PrevDecl->TypeForDecl)
+ return QualType(Decl->TypeForDecl = PrevDecl->TypeForDecl, 0);
+
+ Decl->TypeForDecl = new (*this, TypeAlignment) EnumType(Decl);
+ Types.push_back(Decl->TypeForDecl);
+ return QualType(Decl->TypeForDecl, 0);
+}
+
/// \brief Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
@@ -1763,8 +1939,7 @@ ASTContext::getTemplateSpecializationTypeInfo(TemplateName Name,
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgumentListInfo &Args,
- QualType Canon,
- bool IsCurrentInstantiation) {
+ QualType Canon) {
unsigned NumArgs = Args.size();
llvm::SmallVector<TemplateArgument, 4> ArgVec;
@@ -1773,56 +1948,18 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
ArgVec.push_back(Args[i].getArgument());
return getTemplateSpecializationType(Template, ArgVec.data(), NumArgs,
- Canon, IsCurrentInstantiation);
+ Canon);
}
QualType
ASTContext::getTemplateSpecializationType(TemplateName Template,
const TemplateArgument *Args,
unsigned NumArgs,
- QualType Canon,
- bool IsCurrentInstantiation) {
+ QualType Canon) {
if (!Canon.isNull())
Canon = getCanonicalType(Canon);
- else {
- assert(!IsCurrentInstantiation &&
- "current-instantiation specializations should always "
- "have a canonical type");
-
- // Build the canonical template specialization type.
- TemplateName CanonTemplate = getCanonicalTemplateName(Template);
- llvm::SmallVector<TemplateArgument, 4> CanonArgs;
- CanonArgs.reserve(NumArgs);
- for (unsigned I = 0; I != NumArgs; ++I)
- CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
-
- // Determine whether this canonical template specialization type already
- // exists.
- llvm::FoldingSetNodeID ID;
- TemplateSpecializationType::Profile(ID, CanonTemplate, false,
- CanonArgs.data(), NumArgs, *this);
-
- void *InsertPos = 0;
- TemplateSpecializationType *Spec
- = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
-
- if (!Spec) {
- // Allocate a new canonical template specialization type.
- void *Mem = Allocate((sizeof(TemplateSpecializationType) +
- sizeof(TemplateArgument) * NumArgs),
- TypeAlignment);
- Spec = new (Mem) TemplateSpecializationType(*this, CanonTemplate, false,
- CanonArgs.data(), NumArgs,
- Canon);
- Types.push_back(Spec);
- TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
- }
-
- if (Canon.isNull())
- Canon = QualType(Spec, 0);
- assert(Canon->isDependentType() &&
- "Non-dependent template-id type must have a canonical type");
- }
+ else
+ Canon = getCanonicalTemplateSpecializationType(Template, Args, NumArgs);
// Allocate the (non-canonical) template specialization type, but don't
// try to unique it: these types typically have location information that
@@ -1831,8 +1968,7 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
sizeof(TemplateArgument) * NumArgs),
TypeAlignment);
TemplateSpecializationType *Spec
- = new (Mem) TemplateSpecializationType(*this, Template,
- IsCurrentInstantiation,
+ = new (Mem) TemplateSpecializationType(Template,
Args, NumArgs,
Canon);
@@ -1841,6 +1977,44 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
}
QualType
+ASTContext::getCanonicalTemplateSpecializationType(TemplateName Template,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+ // Build the canonical template specialization type.
+ TemplateName CanonTemplate = getCanonicalTemplateName(Template);
+ llvm::SmallVector<TemplateArgument, 4> CanonArgs;
+ CanonArgs.reserve(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I)
+ CanonArgs.push_back(getCanonicalTemplateArgument(Args[I]));
+
+ // Determine whether this canonical template specialization type already
+ // exists.
+ llvm::FoldingSetNodeID ID;
+ TemplateSpecializationType::Profile(ID, CanonTemplate,
+ CanonArgs.data(), NumArgs, *this);
+
+ void *InsertPos = 0;
+ TemplateSpecializationType *Spec
+ = TemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+
+ if (!Spec) {
+ // Allocate a new canonical template specialization type.
+ void *Mem = Allocate((sizeof(TemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ Spec = new (Mem) TemplateSpecializationType(CanonTemplate,
+ CanonArgs.data(), NumArgs,
+ QualType());
+ Types.push_back(Spec);
+ TemplateSpecializationTypes.InsertNode(Spec, InsertPos);
+ }
+
+ assert(Spec->isDependentType() &&
+ "Non-dependent template-id type must have a canonical type");
+ return QualType(Spec, 0);
+}
+
+QualType
ASTContext::getElaboratedType(ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
QualType NamedType) {
@@ -1898,44 +2072,69 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
}
QualType
-ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
NestedNameSpecifier *NNS,
- const TemplateSpecializationType *TemplateId,
- QualType Canon) {
+ const IdentifierInfo *Name,
+ const TemplateArgumentListInfo &Args) {
+ // TODO: avoid this copy
+ llvm::SmallVector<TemplateArgument, 16> ArgCopy;
+ for (unsigned I = 0, E = Args.size(); I != E; ++I)
+ ArgCopy.push_back(Args[I].getArgument());
+ return getDependentTemplateSpecializationType(Keyword, NNS, Name,
+ ArgCopy.size(),
+ ArgCopy.data());
+}
+
+QualType
+ASTContext::getDependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
assert(NNS->isDependent() && "nested-name-specifier must be dependent");
llvm::FoldingSetNodeID ID;
- DependentNameType::Profile(ID, Keyword, NNS, TemplateId);
+ DependentTemplateSpecializationType::Profile(ID, *this, Keyword, NNS,
+ Name, NumArgs, Args);
void *InsertPos = 0;
- DependentNameType *T
- = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentTemplateSpecializationType *T
+ = DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
- if (Canon.isNull()) {
- NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
- QualType CanonType = getCanonicalType(QualType(TemplateId, 0));
- ElaboratedTypeKeyword CanonKeyword = Keyword;
- if (Keyword == ETK_None)
- CanonKeyword = ETK_Typename;
- if (CanonNNS != NNS || CanonKeyword != Keyword ||
- CanonType != QualType(TemplateId, 0)) {
- const TemplateSpecializationType *CanonTemplateId
- = CanonType->getAs<TemplateSpecializationType>();
- assert(CanonTemplateId &&
- "Canonical type must also be a template specialization type");
- Canon = getDependentNameType(CanonKeyword, CanonNNS, CanonTemplateId);
- }
+ NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
+
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None) CanonKeyword = ETK_Typename;
- DependentNameType *CheckT
- = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
- assert(!CheckT && "Typename canonical type is broken"); (void)CheckT;
+ bool AnyNonCanonArgs = false;
+ llvm::SmallVector<TemplateArgument, 16> CanonArgs(NumArgs);
+ for (unsigned I = 0; I != NumArgs; ++I) {
+ CanonArgs[I] = getCanonicalTemplateArgument(Args[I]);
+ if (!CanonArgs[I].structurallyEquals(Args[I]))
+ AnyNonCanonArgs = true;
}
- T = new (*this) DependentNameType(Keyword, NNS, TemplateId, Canon);
+ QualType Canon;
+ if (AnyNonCanonArgs || CanonNNS != NNS || CanonKeyword != Keyword) {
+ Canon = getDependentTemplateSpecializationType(CanonKeyword, CanonNNS,
+ Name, NumArgs,
+ CanonArgs.data());
+
+ // Find the insert position again.
+ DependentTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos);
+ }
+
+ void *Mem = Allocate((sizeof(DependentTemplateSpecializationType) +
+ sizeof(TemplateArgument) * NumArgs),
+ TypeAlignment);
+ T = new (Mem) DependentTemplateSpecializationType(Keyword, NNS,
+ Name, NumArgs, Args, Canon);
Types.push_back(T);
- DependentNameTypes.InsertNode(T, InsertPos);
+ DependentTemplateSpecializationTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
@@ -2326,6 +2525,48 @@ QualType ASTContext::getUnqualifiedArrayType(QualType T,
SourceRange());
}
+/// UnwrapSimilarPointerTypes - If T1 and T2 are pointer types that
+/// may be similar (C++ 4.4), replaces T1 and T2 with the type that
+/// they point to and return true. If T1 and T2 aren't pointer types
+/// or pointer-to-member types, or if they are not similar at this
+/// level, returns false and leaves T1 and T2 unchanged. Top-level
+/// qualifiers on T1 and T2 are ignored. This function will typically
+/// be called in a loop that successively "unwraps" pointer and
+/// pointer-to-member types to compare them at each level.
+bool ASTContext::UnwrapSimilarPointerTypes(QualType &T1, QualType &T2) {
+ const PointerType *T1PtrType = T1->getAs<PointerType>(),
+ *T2PtrType = T2->getAs<PointerType>();
+ if (T1PtrType && T2PtrType) {
+ T1 = T1PtrType->getPointeeType();
+ T2 = T2PtrType->getPointeeType();
+ return true;
+ }
+
+ const MemberPointerType *T1MPType = T1->getAs<MemberPointerType>(),
+ *T2MPType = T2->getAs<MemberPointerType>();
+ if (T1MPType && T2MPType &&
+ hasSameUnqualifiedType(QualType(T1MPType->getClass(), 0),
+ QualType(T2MPType->getClass(), 0))) {
+ T1 = T1MPType->getPointeeType();
+ T2 = T2MPType->getPointeeType();
+ return true;
+ }
+
+ if (getLangOptions().ObjC1) {
+ const ObjCObjectPointerType *T1OPType = T1->getAs<ObjCObjectPointerType>(),
+ *T2OPType = T2->getAs<ObjCObjectPointerType>();
+ if (T1OPType && T2OPType) {
+ T1 = T1OPType->getPointeeType();
+ T2 = T2OPType->getPointeeType();
+ return true;
+ }
+ }
+
+ // FIXME: Block pointers, too?
+
+ return false;
+}
+
DeclarationName ASTContext::getNameForTemplate(TemplateName Name) {
if (TemplateDecl *TD = Name.getAsTemplateDecl())
return TD->getDeclName();
@@ -2344,10 +2585,14 @@ DeclarationName ASTContext::getNameForTemplate(TemplateName Name) {
}
TemplateName ASTContext::getCanonicalTemplateName(TemplateName Name) {
- // If this template name refers to a template, the canonical
- // template name merely stores the template itself.
- if (TemplateDecl *Template = Name.getAsTemplateDecl())
+ if (TemplateDecl *Template = Name.getAsTemplateDecl()) {
+ if (TemplateTemplateParmDecl *TTP
+ = dyn_cast<TemplateTemplateParmDecl>(Template))
+ Template = getCanonicalTemplateTemplateParmDecl(TTP);
+
+ // The canonical template name is the canonical template declaration.
return TemplateName(cast<TemplateDecl>(Template->getCanonicalDecl()));
+ }
assert(!Name.getAsOverloadedTemplate());
@@ -2856,6 +3101,10 @@ QualType ASTContext::getObjCFastEnumerationStateType() {
Field->setAccess(AS_public);
ObjCFastEnumerationStateTypeDecl->addDecl(Field);
}
+ if (getLangOptions().CPlusPlus)
+ if (CXXRecordDecl *CXXRD =
+ dyn_cast<CXXRecordDecl>(ObjCFastEnumerationStateTypeDecl))
+ CXXRD->setEmpty(false);
ObjCFastEnumerationStateTypeDecl->completeDefinition();
}
@@ -2981,7 +3230,6 @@ QualType ASTContext::BuildByRefType(const char *DeclName, QualType Ty) {
bool HasCopyAndDispose = BlockRequiresCopying(Ty);
// FIXME: Move up
- static unsigned int UniqueBlockByRefTypeID = 0;
llvm::SmallString<36> Name;
llvm::raw_svector_ostream(Name) << "__Block_byref_" <<
++UniqueBlockByRefTypeID << '_' << DeclName;
@@ -3033,7 +3281,6 @@ QualType ASTContext::getBlockParmType(
llvm::SmallVectorImpl<const Expr *> &Layout) {
// FIXME: Move up
- static unsigned int UniqueBlockParmTypeID = 0;
llvm::SmallString<36> Name;
llvm::raw_svector_ostream(Name) << "__block_literal_"
<< ++UniqueBlockParmTypeID;
@@ -3122,7 +3369,7 @@ CharUnits ASTContext::getObjCEncodingTypeSize(QualType type) {
CharUnits sz = getTypeSizeInChars(type);
// Make all integer and enum types at least as large as an int
- if (sz.isPositive() && type->isIntegralType())
+ if (sz.isPositive() && type->isIntegralOrEnumerationType())
sz = std::max(sz, getTypeSizeInChars(IntTy));
// Treat arrays as pointers, since that's how they're passed in.
else if (type->isArrayType())
@@ -3143,7 +3390,7 @@ void ASTContext::getObjCEncodingForBlock(const BlockExpr *Expr,
QualType BlockTy =
Expr->getType()->getAs<BlockPointerType>()->getPointeeType();
// Encode result type.
- getObjCEncodingForType(cast<FunctionType>(BlockTy)->getResultType(), S);
+ getObjCEncodingForType(BlockTy->getAs<FunctionType>()->getResultType(), S);
// Compute size of all parameters.
// Start with computing size of a pointer in number of bytes.
// FIXME: There might(should) be a better way of doing this computation!
@@ -3376,13 +3623,74 @@ void ASTContext::getObjCEncodingForType(QualType T, std::string& S,
true /* outermost type */);
}
+static char ObjCEncodingForPrimitiveKind(const ASTContext *C, QualType T) {
+ switch (T->getAs<BuiltinType>()->getKind()) {
+ default: assert(0 && "Unhandled builtin type kind");
+ case BuiltinType::Void: return 'v';
+ case BuiltinType::Bool: return 'B';
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar: return 'C';
+ case BuiltinType::UShort: return 'S';
+ case BuiltinType::UInt: return 'I';
+ case BuiltinType::ULong:
+ return
+ (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'L' : 'Q';
+ case BuiltinType::UInt128: return 'T';
+ case BuiltinType::ULongLong: return 'Q';
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar: return 'c';
+ case BuiltinType::Short: return 's';
+ case BuiltinType::WChar:
+ case BuiltinType::Int: return 'i';
+ case BuiltinType::Long:
+ return
+ (const_cast<ASTContext *>(C))->getIntWidth(T) == 32 ? 'l' : 'q';
+ case BuiltinType::LongLong: return 'q';
+ case BuiltinType::Int128: return 't';
+ case BuiltinType::Float: return 'f';
+ case BuiltinType::Double: return 'd';
+ case BuiltinType::LongDouble: return 'd';
+ }
+}
+
static void EncodeBitField(const ASTContext *Context, std::string& S,
- const FieldDecl *FD) {
+ QualType T, const FieldDecl *FD) {
const Expr *E = FD->getBitWidth();
assert(E && "bitfield width not there - getObjCEncodingForTypeImpl");
ASTContext *Ctx = const_cast<ASTContext*>(Context);
- unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue();
S += 'b';
+ // The NeXT runtime encodes bit fields as b followed by the number of bits.
+ // The GNU runtime requires more information; bitfields are encoded as b,
+ // then the offset (in bits) of the first element, then the type of the
+ // bitfield, then the size in bits. For example, in this structure:
+ //
+ // struct
+ // {
+ // int integer;
+ // int flags:2;
+ // };
+ // On a 32-bit system, the encoding for flags would be b2 for the NeXT
+ // runtime, but b32i2 for the GNU runtime. The reason for this extra
+ // information is not especially sensible, but we're stuck with it for
+ // compatibility with GCC, although providing it breaks anything that
+ // actually uses runtime introspection and wants to work on both runtimes...
+ if (!Ctx->getLangOptions().NeXTRuntime) {
+ const RecordDecl *RD = FD->getParent();
+ const ASTRecordLayout &RL = Ctx->getASTRecordLayout(RD);
+ // FIXME: This same linear search is also used in ExprConstant - it might
+ // be better if the FieldDecl stored its offset. We'd be increasing the
+ // size of the object slightly, but saving some time every time it is used.
+ unsigned i = 0;
+ for (RecordDecl::field_iterator Field = RD->field_begin(),
+ FieldEnd = RD->field_end();
+ Field != FieldEnd; (void)++Field, ++i) {
+ if (*Field == FD)
+ break;
+ }
+ S += llvm::utostr(RL.getFieldOffset(i));
+ S += ObjCEncodingForPrimitiveKind(Context, T);
+ }
+ unsigned N = E->EvaluateAsInt(*Ctx).getZExtValue();
S += llvm::utostr(N);
}
@@ -3393,40 +3701,10 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
const FieldDecl *FD,
bool OutermostType,
bool EncodingProperty) {
- if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
+ if (T->getAs<BuiltinType>()) {
if (FD && FD->isBitField())
- return EncodeBitField(this, S, FD);
- char encoding;
- switch (BT->getKind()) {
- default: assert(0 && "Unhandled builtin type kind");
- case BuiltinType::Void: encoding = 'v'; break;
- case BuiltinType::Bool: encoding = 'B'; break;
- case BuiltinType::Char_U:
- case BuiltinType::UChar: encoding = 'C'; break;
- case BuiltinType::UShort: encoding = 'S'; break;
- case BuiltinType::UInt: encoding = 'I'; break;
- case BuiltinType::ULong:
- encoding =
- (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'L' : 'Q';
- break;
- case BuiltinType::UInt128: encoding = 'T'; break;
- case BuiltinType::ULongLong: encoding = 'Q'; break;
- case BuiltinType::Char_S:
- case BuiltinType::SChar: encoding = 'c'; break;
- case BuiltinType::Short: encoding = 's'; break;
- case BuiltinType::Int: encoding = 'i'; break;
- case BuiltinType::Long:
- encoding =
- (const_cast<ASTContext *>(this))->getIntWidth(T) == 32 ? 'l' : 'q';
- break;
- case BuiltinType::LongLong: encoding = 'q'; break;
- case BuiltinType::Int128: encoding = 't'; break;
- case BuiltinType::Float: encoding = 'f'; break;
- case BuiltinType::Double: encoding = 'd'; break;
- case BuiltinType::LongDouble: encoding = 'd'; break;
- }
-
- S += encoding;
+ return EncodeBitField(this, S, T, FD);
+ S += ObjCEncodingForPrimitiveKind(this, T);
return;
}
@@ -3585,7 +3863,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
if (T->isEnumeralType()) {
if (FD && FD->isBitField())
- EncodeBitField(this, S, FD);
+ EncodeBitField(this, S, T, FD);
else
S += 'i';
return;
@@ -4728,7 +5006,7 @@ QualType ASTContext::getCorrespondingUnsignedType(QualType T) {
// Turn <4 x signed int> -> <4 x unsigned int>
if (const VectorType *VTy = T->getAs<VectorType>())
return getVectorType(getCorrespondingUnsignedType(VTy->getElementType()),
- VTy->getNumElements(), VTy->isAltiVec(), VTy->isPixel());
+ VTy->getNumElements(), VTy->getAltiVecSpecific());
// For enums, we return the unsigned version of the base type.
if (const EnumType *ETy = T->getAs<EnumType>())
@@ -4886,7 +5164,8 @@ static QualType DecodeTypeFromStr(const char *&Str, ASTContext &Context,
QualType ElementType = DecodeTypeFromStr(Str, Context, Error, false);
// FIXME: Don't know what to do about AltiVec.
- Type = Context.getVectorType(ElementType, NumElements, false, false);
+ Type = Context.getVectorType(ElementType, NumElements,
+ VectorType::NotAltiVec);
break;
}
case 'X': {
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 6ed08d1e1e29..8d347d171665 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -73,6 +73,7 @@ namespace {
// FIXME: TemplateSpecializationType
QualType VisitElaboratedType(ElaboratedType *T);
// FIXME: DependentNameType
+ // FIXME: DependentTemplateSpecializationType
QualType VisitObjCInterfaceType(ObjCInterfaceType *T);
QualType VisitObjCObjectType(ObjCObjectType *T);
QualType VisitObjCObjectPointerType(ObjCObjectPointerType *T);
@@ -439,9 +440,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
if (Vec1->getNumElements() != Vec2->getNumElements())
return false;
- if (Vec1->isAltiVec() != Vec2->isAltiVec())
- return false;
- if (Vec1->isPixel() != Vec2->isPixel())
+ if (Vec1->getAltiVecSpecific() != Vec2->getAltiVecSpecific())
return false;
break;
}
@@ -619,14 +618,32 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
if (!IsStructurallyEquivalent(Typename1->getIdentifier(),
Typename2->getIdentifier()))
return false;
- if (!IsStructurallyEquivalent(Context,
- QualType(Typename1->getTemplateId(), 0),
- QualType(Typename2->getTemplateId(), 0)))
- return false;
break;
}
+ case Type::DependentTemplateSpecialization: {
+ const DependentTemplateSpecializationType *Spec1 =
+ cast<DependentTemplateSpecializationType>(T1);
+ const DependentTemplateSpecializationType *Spec2 =
+ cast<DependentTemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getQualifier(),
+ Spec2->getQualifier()))
+ return false;
+ if (!IsStructurallyEquivalent(Spec1->getIdentifier(),
+ Spec2->getIdentifier()))
+ return false;
+ if (Spec1->getNumArgs() != Spec2->getNumArgs())
+ return false;
+ for (unsigned I = 0, N = Spec1->getNumArgs(); I != N; ++I) {
+ if (!IsStructurallyEquivalent(Context,
+ Spec1->getArg(I), Spec2->getArg(I)))
+ return false;
+ }
+ break;
+ }
+
case Type::ObjCInterface: {
const ObjCInterfaceType *Iface1 = cast<ObjCInterfaceType>(T1);
const ObjCInterfaceType *Iface2 = cast<ObjCInterfaceType>(T2);
@@ -1172,8 +1189,7 @@ QualType ASTNodeImporter::VisitVectorType(VectorType *T) {
return Importer.getToContext().getVectorType(ToElementType,
T->getNumElements(),
- T->isAltiVec(),
- T->isPixel());
+ T->getAltiVecSpecific());
}
QualType ASTNodeImporter::VisitExtVectorType(ExtVectorType *T) {
@@ -1687,7 +1703,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
// Create the record declaration.
RecordDecl *D2 = AdoptDecl;
if (!D2) {
- if (CXXRecordDecl *D1CXX = dyn_cast<CXXRecordDecl>(D)) {
+ if (isa<CXXRecordDecl>(D)) {
CXXRecordDecl *D2CXX = CXXRecordDecl::Create(Importer.getToContext(),
D->getTagKind(),
DC, Loc,
@@ -1695,30 +1711,6 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
Importer.Import(D->getTagKeywordLoc()));
D2 = D2CXX;
D2->setAccess(D->getAccess());
-
- if (D->isDefinition()) {
- // Add base classes.
- llvm::SmallVector<CXXBaseSpecifier *, 4> Bases;
- for (CXXRecordDecl::base_class_iterator
- Base1 = D1CXX->bases_begin(),
- FromBaseEnd = D1CXX->bases_end();
- Base1 != FromBaseEnd;
- ++Base1) {
- QualType T = Importer.Import(Base1->getType());
- if (T.isNull())
- return 0;
-
- Bases.push_back(
- new (Importer.getToContext())
- CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
- Base1->isVirtual(),
- Base1->isBaseOfClass(),
- Base1->getAccessSpecifierAsWritten(),
- T));
- }
- if (!Bases.empty())
- D2CXX->setBases(Bases.data(), Bases.size());
- }
} else {
D2 = RecordDecl::Create(Importer.getToContext(), D->getTagKind(),
DC, Loc,
@@ -1739,6 +1731,33 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (D->isDefinition()) {
D2->startDefinition();
+
+ // Add base classes.
+ if (CXXRecordDecl *D2CXX = dyn_cast<CXXRecordDecl>(D2)) {
+ CXXRecordDecl *D1CXX = cast<CXXRecordDecl>(D);
+
+ llvm::SmallVector<CXXBaseSpecifier *, 4> Bases;
+ for (CXXRecordDecl::base_class_iterator
+ Base1 = D1CXX->bases_begin(),
+ FromBaseEnd = D1CXX->bases_end();
+ Base1 != FromBaseEnd;
+ ++Base1) {
+ QualType T = Importer.Import(Base1->getType());
+ if (T.isNull())
+ return 0;
+
+ Bases.push_back(
+ new (Importer.getToContext())
+ CXXBaseSpecifier(Importer.Import(Base1->getSourceRange()),
+ Base1->isVirtual(),
+ Base1->isBaseOfClass(),
+ Base1->getAccessSpecifierAsWritten(),
+ T));
+ }
+ if (!Bases.empty())
+ D2CXX->setBases(Bases.data(), Bases.size());
+ }
+
ImportDeclContext(D);
D2->completeDefinition();
}
@@ -2598,8 +2617,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
}
// Import the type.
- QualType T = Importer.Import(D->getType());
- if (T.isNull())
+ TypeSourceInfo *T = Importer.Import(D->getTypeSourceInfo());
+ if (!T)
return 0;
// Create the new property.
@@ -2614,6 +2633,8 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
LexicalDC->addDecl(ToProperty);
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
+ ToProperty->setPropertyAttributesAsWritten(
+ D->getPropertyAttributesAsWritten());
ToProperty->setGetterName(Importer.Import(D->getGetterName()));
ToProperty->setSetterName(Importer.Import(D->getSetterName()));
ToProperty->setGetterMethodDecl(
diff --git a/lib/AST/AttrImpl.cpp b/lib/AST/AttrImpl.cpp
index 0fab22caced8..b09ba895c019 100644
--- a/lib/AST/AttrImpl.cpp
+++ b/lib/AST/AttrImpl.cpp
@@ -24,7 +24,7 @@ void Attr::Destroy(ASTContext &C) {
C.Deallocate((void*)this);
}
-AttrWithString::AttrWithString(Attr::Kind AK, ASTContext &C, llvm::StringRef s)
+AttrWithString::AttrWithString(attr::Kind AK, ASTContext &C, llvm::StringRef s)
: Attr(AK) {
assert(!s.empty());
StrLen = s.size();
@@ -51,7 +51,7 @@ void FormatAttr::setType(ASTContext &C, llvm::StringRef type) {
}
NonNullAttr::NonNullAttr(ASTContext &C, unsigned* arg_nums, unsigned size)
- : Attr(NonNull), ArgNums(0), Size(0) {
+ : Attr(attr::NonNull), ArgNums(0), Size(0) {
if (size == 0)
return;
assert(arg_nums);
@@ -93,6 +93,7 @@ DEF_SIMPLE_ATTR_CLONE(NSReturnsNotRetained)
DEF_SIMPLE_ATTR_CLONE(NSReturnsRetained)
DEF_SIMPLE_ATTR_CLONE(NoDebug)
DEF_SIMPLE_ATTR_CLONE(NoInline)
+DEF_SIMPLE_ATTR_CLONE(NoInstrumentFunction)
DEF_SIMPLE_ATTR_CLONE(NoReturn)
DEF_SIMPLE_ATTR_CLONE(NoThrow)
DEF_SIMPLE_ATTR_CLONE(ObjCException)
@@ -200,6 +201,10 @@ Attr *ReqdWorkGroupSizeAttr::clone(ASTContext &C) const {
return ::new (C) ReqdWorkGroupSizeAttr(X, Y, Z);
}
+Attr *InitPriorityAttr::clone(ASTContext &C) const {
+ return ::new (C) InitPriorityAttr(Priority);
+}
+
Attr *MSP430InterruptAttr::clone(ASTContext &C) const {
return ::new (C) MSP430InterruptAttr(Number);
}
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index bce3646feedb..407ed95f3ee1 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -18,6 +18,7 @@ add_clang_library(clangAST
DeclPrinter.cpp
DeclTemplate.cpp
Expr.cpp
+ ExprClassification.cpp
ExprConstant.cpp
ExprCXX.cpp
FullExpr.cpp
@@ -39,4 +40,5 @@ add_clang_library(clangAST
TypePrinter.cpp
)
-add_dependencies(clangAST ClangDiagnosticAST ClangStmtNodes)
+add_dependencies(clangAST ClangARMNeon ClangAttrClasses ClangAttrList
+ ClangDiagnosticAST ClangDeclNodes ClangStmtNodes)
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index d616e42e0076..c563c37d58f4 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -90,6 +90,9 @@ bool CXXRecordDecl::isDerivedFrom(CXXRecordDecl *Base, CXXBasePaths &Paths) cons
}
bool CXXRecordDecl::isVirtuallyDerivedFrom(CXXRecordDecl *Base) const {
+ if (!getNumVBases())
+ return false;
+
CXXBasePaths Paths(/*FindAmbiguities=*/false, /*RecordPaths=*/false,
/*DetectVirtual=*/false);
@@ -559,22 +562,23 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
const CXXMethodDecl *CanonOM
= cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());
+
+ // C++ [class.virtual]p2:
+ // A virtual member function C::vf of a class object S is
+ // a final overrider unless the most derived class (1.8)
+ // of which S is a base class subobject (if any) declares
+ // or inherits another member function that overrides vf.
+ //
+ // Treating this object like the most derived class, we
+ // replace any overrides from base classes with this
+ // overriding virtual function.
+ Overriders[CanonOM].replaceAll(
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+
if (CanonOM->begin_overridden_methods()
- == CanonOM->end_overridden_methods()) {
- // C++ [class.virtual]p2:
- // A virtual member function C::vf of a class object S is
- // a final overrider unless the most derived class (1.8)
- // of which S is a base class subobject (if any) declares
- // or inherits another member function that overrides vf.
- //
- // Treating this object like the most derived class, we
- // replace any overrides from base classes with this
- // overriding virtual function.
- Overriders[CanonOM].replaceAll(
- UniqueVirtualMethod(CanonM, SubobjectNumber,
- InVirtualSubobject));
+ == CanonOM->end_overridden_methods())
continue;
- }
// Continue recursion to the methods that this virtual method
// overrides.
@@ -582,6 +586,12 @@ void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
CanonOM->end_overridden_methods()));
}
}
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
}
}
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index ffdcb471d082..149938fc5c86 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -523,6 +523,14 @@ bool NamedDecl::isCXXInstanceMember() const {
// DeclaratorDecl Implementation
//===----------------------------------------------------------------------===//
+template <typename DeclT>
+static SourceLocation getTemplateOrInnerLocStart(const DeclT *decl) {
+ if (decl->getNumTemplateParameterLists() > 0)
+ return decl->getTemplateParameterList(0)->getTemplateLoc();
+ else
+ return decl->getInnerLocStart();
+}
+
DeclaratorDecl::~DeclaratorDecl() {}
void DeclaratorDecl::Destroy(ASTContext &C) {
if (hasExtInfo())
@@ -531,15 +539,8 @@ void DeclaratorDecl::Destroy(ASTContext &C) {
}
SourceLocation DeclaratorDecl::getTypeSpecStartLoc() const {
- if (DeclInfo) {
- TypeLoc TL = getTypeSourceInfo()->getTypeLoc();
- while (true) {
- TypeLoc NextTL = TL.getNextTypeLoc();
- if (!NextTL)
- return TL.getLocalSourceRange().getBegin();
- TL = NextTL;
- }
- }
+ TypeSourceInfo *TSI = getTypeSourceInfo();
+ if (TSI) return TSI->getTypeLoc().getBeginLoc();
return SourceLocation();
}
@@ -573,6 +574,40 @@ void DeclaratorDecl::setQualifierInfo(NestedNameSpecifier *Qualifier,
}
}
+SourceLocation DeclaratorDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
+void
+QualifierInfo::setTemplateParameterListsInfo(ASTContext &Context,
+ unsigned NumTPLists,
+ TemplateParameterList **TPLists) {
+ assert((NumTPLists == 0 || TPLists != 0) &&
+ "Empty array of template parameters with positive size!");
+ assert((NumTPLists == 0 || NNS) &&
+ "Nonempty array of template parameters with no qualifier!");
+
+ // Free previous template parameters (if any).
+ if (NumTemplParamLists > 0) {
+ Context.Deallocate(TemplParamLists);
+ TemplParamLists = 0;
+ NumTemplParamLists = 0;
+ }
+ // Set info on matched template parameter lists (if any).
+ if (NumTPLists > 0) {
+ TemplParamLists = new (Context) TemplateParameterList*[NumTPLists];
+ NumTemplParamLists = NumTPLists;
+ for (unsigned i = NumTPLists; i-- > 0; )
+ TemplParamLists[i] = TPLists[i];
+ }
+}
+
+void QualifierInfo::Destroy(ASTContext &Context) {
+ // FIXME: Deallocate template parameter lists themselves!
+ if (TemplParamLists)
+ Context.Deallocate(TemplParamLists);
+}
+
//===----------------------------------------------------------------------===//
// VarDecl Implementation
//===----------------------------------------------------------------------===//
@@ -613,14 +648,17 @@ void VarDecl::Destroy(ASTContext& C) {
VarDecl::~VarDecl() {
}
-SourceRange VarDecl::getSourceRange() const {
+SourceLocation VarDecl::getInnerLocStart() const {
SourceLocation Start = getTypeSpecStartLoc();
if (Start.isInvalid())
Start = getLocation();
-
+ return Start;
+}
+
+SourceRange VarDecl::getSourceRange() const {
if (getInit())
- return SourceRange(Start, getInit()->getLocEnd());
- return SourceRange(Start, getLocation());
+ return SourceRange(getOuterLocStart(), getInit()->getLocEnd());
+ return SourceRange(getOuterLocStart(), getLocation());
}
bool VarDecl::isExternC() const {
@@ -678,7 +716,15 @@ VarDecl::DefinitionKind VarDecl::isThisDeclarationADefinition() const {
// AST for 'extern "C" int foo;' is annotated with 'extern'.
if (hasExternalStorage())
return DeclarationOnly;
-
+
+ if (getStorageClassAsWritten() == Extern ||
+ getStorageClassAsWritten() == PrivateExtern) {
+ for (const VarDecl *PrevVar = getPreviousDeclaration();
+ PrevVar; PrevVar = PrevVar->getPreviousDeclaration()) {
+ if (PrevVar->getLinkage() == InternalLinkage && PrevVar->hasInit())
+ return DeclarationOnly;
+ }
+ }
// C99 6.9.2p2:
// A declaration of an object that has file scope without an initializer,
// and without a storage class specifier or the scs 'static', constitutes
@@ -697,7 +743,7 @@ VarDecl *VarDecl::getActingDefinition() {
if (Kind != TentativeDefinition)
return 0;
- VarDecl *LastTentative = false;
+ VarDecl *LastTentative = 0;
VarDecl *First = getFirstDeclaration();
for (redecl_iterator I = First->redecls_begin(), E = First->redecls_end();
I != E; ++I) {
@@ -907,6 +953,17 @@ bool FunctionDecl::isVariadic() const {
return false;
}
+bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
+ if (I->Body) {
+ Definition = *I;
+ return true;
+ }
+ }
+
+ return false;
+}
+
Stmt *FunctionDecl::getBody(const FunctionDecl *&Definition) const {
for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
if (I->Body) {
@@ -1107,11 +1164,11 @@ bool FunctionDecl::isInlined() const {
}
const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
- Stmt *Pattern = 0;
+ bool HasPattern = false;
if (PatternDecl)
- Pattern = PatternDecl->getBody(PatternDecl);
+ HasPattern = PatternDecl->hasBody(PatternDecl);
- if (Pattern && PatternDecl)
+ if (HasPattern && PatternDecl)
return PatternDecl->isInlined();
return false;
@@ -1197,6 +1254,23 @@ const IdentifierInfo *FunctionDecl::getLiteralIdentifier() const {
return 0;
}
+FunctionDecl::TemplatedKind FunctionDecl::getTemplatedKind() const {
+ if (TemplateOrSpecialization.isNull())
+ return TK_NonTemplate;
+ if (TemplateOrSpecialization.is<FunctionTemplateDecl *>())
+ return TK_FunctionTemplate;
+ if (TemplateOrSpecialization.is<MemberSpecializationInfo *>())
+ return TK_MemberSpecialization;
+ if (TemplateOrSpecialization.is<FunctionTemplateSpecializationInfo *>())
+ return TK_FunctionTemplateSpecialization;
+ if (TemplateOrSpecialization.is
+ <DependentFunctionTemplateSpecializationInfo*>())
+ return TK_DependentFunctionTemplateSpecialization;
+
+ assert(false && "Did we miss a TemplateOrSpecialization type?");
+ return TK_NonTemplate;
+}
+
FunctionDecl *FunctionDecl::getInstantiatedFromMemberFunction() const {
if (MemberSpecializationInfo *Info = getMemberSpecializationInfo())
return cast<FunctionDecl>(Info->getInstantiatedFrom());
@@ -1239,15 +1313,15 @@ bool FunctionDecl::isImplicitlyInstantiable() const {
// Find the actual template from which we will instantiate.
const FunctionDecl *PatternDecl = getTemplateInstantiationPattern();
- Stmt *Pattern = 0;
+ bool HasPattern = false;
if (PatternDecl)
- Pattern = PatternDecl->getBody(PatternDecl);
+ HasPattern = PatternDecl->hasBody(PatternDecl);
// C++0x [temp.explicit]p9:
// Except for inline functions, other explicit instantiation declarations
// have the effect of suppressing the implicit instantiation of the entity
// to which they refer.
- if (!Pattern || !PatternDecl)
+ if (!HasPattern || !PatternDecl)
return true;
return PatternDecl->isInlined();
@@ -1304,7 +1378,8 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
const TemplateArgumentList *TemplateArgs,
void *InsertPos,
TemplateSpecializationKind TSK,
- const TemplateArgumentListInfo *TemplateArgsAsWritten) {
+ const TemplateArgumentListInfo *TemplateArgsAsWritten,
+ SourceLocation PointOfInstantiation) {
assert(TSK != TSK_Undeclared &&
"Must specify the type of function template specialization");
FunctionTemplateSpecializationInfo *Info
@@ -1317,6 +1392,7 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
Info->Template.setInt(TSK - 1);
Info->TemplateArguments = TemplateArgs;
Info->TemplateArgumentsAsWritten = TemplateArgsAsWritten;
+ Info->PointOfInstantiation = PointOfInstantiation;
TemplateOrSpecialization = Info;
// Insert this function template specialization into the set of known
@@ -1336,6 +1412,28 @@ FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
}
void
+FunctionDecl::setFunctionTemplateSpecialization(FunctionTemplateDecl *Template,
+ unsigned NumTemplateArgs,
+ const TemplateArgument *TemplateArgs,
+ TemplateSpecializationKind TSK,
+ unsigned NumTemplateArgsAsWritten,
+ TemplateArgumentLoc *TemplateArgsAsWritten,
+ SourceLocation LAngleLoc,
+ SourceLocation RAngleLoc,
+ SourceLocation PointOfInstantiation) {
+ ASTContext &Ctx = getASTContext();
+ TemplateArgumentList *TemplArgs
+ = new (Ctx) TemplateArgumentList(Ctx, TemplateArgs, NumTemplateArgs);
+ TemplateArgumentListInfo *TemplArgsInfo
+ = new (Ctx) TemplateArgumentListInfo(LAngleLoc, RAngleLoc);
+ for (unsigned i=0; i != NumTemplateArgsAsWritten; ++i)
+ TemplArgsInfo->addArgument(TemplateArgsAsWritten[i]);
+
+ setFunctionTemplateSpecialization(Template, TemplArgs, /*InsertPos=*/0, TSK,
+ TemplArgsInfo, PointOfInstantiation);
+}
+
+void
FunctionDecl::setDependentTemplateSpecialization(ASTContext &Context,
const UnresolvedSetImpl &Templates,
const TemplateArgumentListInfo &TemplateArgs) {
@@ -1427,7 +1525,7 @@ bool FunctionDecl::isOutOfLine() const {
// class template, check whether that member function was defined out-of-line.
if (FunctionDecl *FD = getInstantiatedFromMemberFunction()) {
const FunctionDecl *Definition;
- if (FD->getBody(Definition))
+ if (FD->hasBody(Definition))
return Definition->isOutOfLine();
}
@@ -1435,7 +1533,7 @@ bool FunctionDecl::isOutOfLine() const {
// check whether that function template was defined out-of-line.
if (FunctionTemplateDecl *FunTmpl = getPrimaryTemplate()) {
const FunctionDecl *Definition;
- if (FunTmpl->getTemplatedDecl()->getBody(Definition))
+ if (FunTmpl->getTemplatedDecl()->hasBody(Definition))
return Definition->isOutOfLine();
}
@@ -1472,9 +1570,13 @@ void TagDecl::Destroy(ASTContext &C) {
TypeDecl::Destroy(C);
}
+SourceLocation TagDecl::getOuterLocStart() const {
+ return getTemplateOrInnerLocStart(this);
+}
+
SourceRange TagDecl::getSourceRange() const {
SourceLocation E = RBraceLoc.isValid() ? RBraceLoc : getLocation();
- return SourceRange(TagKeywordLoc, E);
+ return SourceRange(getOuterLocStart(), E);
}
TagDecl* TagDecl::getCanonicalDecl() {
@@ -1569,6 +1671,10 @@ EnumDecl *EnumDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation L,
return Enum;
}
+EnumDecl *EnumDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) EnumDecl(0, SourceLocation(), 0, 0, SourceLocation());
+}
+
void EnumDecl::Destroy(ASTContext& C) {
TagDecl::Destroy(C);
}
@@ -1608,6 +1714,11 @@ RecordDecl *RecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
+RecordDecl *RecordDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) RecordDecl(Record, TTK_Struct, 0, SourceLocation(), 0, 0,
+ SourceLocation());
+}
+
RecordDecl::~RecordDecl() {
}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 42a372632099..d4f997de7669 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -35,16 +35,18 @@ using namespace clang;
// Statistics
//===----------------------------------------------------------------------===//
-#define DECL(Derived, Base) static int n##Derived##s = 0;
-#include "clang/AST/DeclNodes.def"
+#define DECL(DERIVED, BASE) static int n##DERIVED##s = 0;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
static bool StatSwitch = false;
const char *Decl::getDeclKindName() const {
switch (DeclKind) {
- default: assert(0 && "Declaration not in DeclNodes.def!");
-#define DECL(Derived, Base) case Derived: return #Derived;
-#include "clang/AST/DeclNodes.def"
+ default: assert(0 && "Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
@@ -60,9 +62,10 @@ void Decl::setInvalidDecl(bool Invalid) {
const char *DeclContext::getDeclKindName() const {
switch (DeclKind) {
- default: assert(0 && "Declaration context not in DeclNodes.def!");
-#define DECL(Derived, Base) case Decl::Derived: return #Derived;
-#include "clang/AST/DeclNodes.def"
+ default: assert(0 && "Declaration context not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case Decl::DERIVED: return #DERIVED;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
@@ -75,28 +78,31 @@ void Decl::PrintStats() {
fprintf(stderr, "*** Decl Stats:\n");
int totalDecls = 0;
-#define DECL(Derived, Base) totalDecls += n##Derived##s;
-#include "clang/AST/DeclNodes.def"
+#define DECL(DERIVED, BASE) totalDecls += n##DERIVED##s;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
fprintf(stderr, " %d decls total.\n", totalDecls);
int totalBytes = 0;
-#define DECL(Derived, Base) \
- if (n##Derived##s > 0) { \
- totalBytes += (int)(n##Derived##s * sizeof(Derived##Decl)); \
- fprintf(stderr, " %d " #Derived " decls, %d each (%d bytes)\n", \
- n##Derived##s, (int)sizeof(Derived##Decl), \
- (int)(n##Derived##s * sizeof(Derived##Decl))); \
+#define DECL(DERIVED, BASE) \
+ if (n##DERIVED##s > 0) { \
+ totalBytes += (int)(n##DERIVED##s * sizeof(DERIVED##Decl)); \
+ fprintf(stderr, " %d " #DERIVED " decls, %d each (%d bytes)\n", \
+ n##DERIVED##s, (int)sizeof(DERIVED##Decl), \
+ (int)(n##DERIVED##s * sizeof(DERIVED##Decl))); \
}
-#include "clang/AST/DeclNodes.def"
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
fprintf(stderr, "Total bytes = %d\n", totalBytes);
}
-void Decl::addDeclKind(Kind k) {
+void Decl::add(Kind k) {
switch (k) {
- default: assert(0 && "Declaration not in DeclNodes.def!");
-#define DECL(Derived, Base) case Derived: ++n##Derived##s; break;
-#include "clang/AST/DeclNodes.def"
+ default: assert(0 && "Declaration not in DeclNodes.inc!");
+#define DECL(DERIVED, BASE) case DERIVED: ++n##DERIVED##s; break;
+#define ABSTRACT_DECL(DECL)
+#include "clang/AST/DeclNodes.inc"
}
}
@@ -206,17 +212,17 @@ ASTContext &Decl::getASTContext() const {
return getTranslationUnitDecl()->getASTContext();
}
-bool Decl::isUsed() const {
+bool Decl::isUsed(bool CheckUsedAttr) const {
if (Used)
return true;
// Check for used attribute.
- if (hasAttr<UsedAttr>())
+ if (CheckUsedAttr && hasAttr<UsedAttr>())
return true;
// Check redeclarations for used attribute.
for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I) {
- if (I->hasAttr<UsedAttr>() || I->Used)
+ if ((CheckUsedAttr && I->hasAttr<UsedAttr>()) || I->Used)
return true;
}
@@ -285,6 +291,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
// Never have names.
case Friend:
case FriendTemplate:
+ case AccessSpec:
case LinkageSpec:
case FileScopeAsm:
case StaticAssert:
@@ -307,9 +314,20 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
return 0;
}
+void Decl::initAttrs(Attr *attrs) {
+ assert(!HasAttrs && "Decl already contains attrs.");
+
+ Attr *&AttrBlank = getASTContext().getDeclAttrs(this);
+ assert(AttrBlank == 0 && "HasAttrs was wrong?");
+
+ AttrBlank = attrs;
+ HasAttrs = true;
+}
+
void Decl::addAttr(Attr *NewAttr) {
Attr *&ExistingAttr = getASTContext().getDeclAttrs(this);
+ assert(NewAttr->getNext() == 0 && "Chain of attributes will be truncated!");
NewAttr->setNext(ExistingAttr);
ExistingAttr = NewAttr;
@@ -354,7 +372,6 @@ void Decl::swapAttrs(Decl *RHS) {
RHS->HasAttrs = true;
}
-
void Decl::Destroy(ASTContext &C) {
// Free attributes for this decl.
if (HasAttrs) {
@@ -392,16 +409,18 @@ void Decl::Destroy(ASTContext &C) {
Decl *Decl::castFromDeclContext (const DeclContext *D) {
Decl::Kind DK = D->getDeclKind();
switch(DK) {
-#define DECL_CONTEXT(Name) \
- case Decl::Name: \
- return static_cast<Name##Decl*>(const_cast<DeclContext*>(D));
-#define DECL_CONTEXT_BASE(Name)
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
default:
-#define DECL_CONTEXT_BASE(Name) \
- if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \
- return static_cast<Name##Decl*>(const_cast<DeclContext*>(D));
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<DeclContext*>(D));
+#include "clang/AST/DeclNodes.inc"
assert(false && "a decl that inherits DeclContext isn't handled");
return 0;
}
@@ -410,46 +429,51 @@ Decl *Decl::castFromDeclContext (const DeclContext *D) {
DeclContext *Decl::castToDeclContext(const Decl *D) {
Decl::Kind DK = D->getKind();
switch(DK) {
-#define DECL_CONTEXT(Name) \
- case Decl::Name: \
- return static_cast<Name##Decl*>(const_cast<Decl*>(D));
-#define DECL_CONTEXT_BASE(Name)
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) \
+ case Decl::NAME: \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
default:
-#define DECL_CONTEXT_BASE(Name) \
- if (DK >= Decl::Name##First && DK <= Decl::Name##Last) \
- return static_cast<Name##Decl*>(const_cast<Decl*>(D));
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (DK >= first##NAME && DK <= last##NAME) \
+ return static_cast<NAME##Decl*>(const_cast<Decl*>(D));
+#include "clang/AST/DeclNodes.inc"
assert(false && "a decl that inherits DeclContext isn't handled");
return 0;
}
}
-CompoundStmt* Decl::getCompoundBody() const {
- return dyn_cast_or_null<CompoundStmt>(getBody());
-}
-
SourceLocation Decl::getBodyRBrace() const {
- Stmt *Body = getBody();
- if (!Body)
+ // Special handling of FunctionDecl to avoid de-serializing the body from PCH.
+ // FunctionDecl stores EndRangeLoc for this purpose.
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(this)) {
+ const FunctionDecl *Definition;
+ if (FD->hasBody(Definition))
+ return Definition->getSourceRange().getEnd();
return SourceLocation();
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
- return CS->getRBracLoc();
- assert(isa<CXXTryStmt>(Body) &&
- "Body can only be CompoundStmt or CXXTryStmt");
- return cast<CXXTryStmt>(Body)->getSourceRange().getEnd();
+ }
+
+ if (Stmt *Body = getBody())
+ return Body->getSourceRange().getEnd();
+
+ return SourceLocation();
}
#ifndef NDEBUG
void Decl::CheckAccessDeclContext() const {
+ // FIXME: Disable this until rdar://8146294 "access specifier for inner class
+ // templates is not set or checked" is fixed.
+ return;
// Suppress this check if any of the following hold:
// 1. this is the translation unit (and thus has no parent)
// 2. this is a template parameter (and thus doesn't belong to its context)
- // 3. this is a ParmVarDecl (which can be in a record context during
- // the brief period between its creation and the creation of the
- // FunctionDecl)
- // 4. the context is not a record
+ // 3. the context is not a record
+ // 4. it's invalid
if (isa<TranslationUnitDecl>(this) ||
+ isa<TemplateTypeParmDecl>(this) ||
!isa<CXXRecordDecl>(getDeclContext()) ||
isInvalidDecl())
return;
@@ -466,16 +490,18 @@ void Decl::CheckAccessDeclContext() const {
bool DeclContext::classof(const Decl *D) {
switch (D->getKind()) {
-#define DECL_CONTEXT(Name) case Decl::Name:
-#define DECL_CONTEXT_BASE(Name)
-#include "clang/AST/DeclNodes.def"
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT(NAME) case Decl::NAME:
+#define DECL_CONTEXT_BASE(NAME)
+#include "clang/AST/DeclNodes.inc"
return true;
default:
-#define DECL_CONTEXT_BASE(Name) \
- if (D->getKind() >= Decl::Name##First && \
- D->getKind() <= Decl::Name##Last) \
+#define DECL(NAME, BASE)
+#define DECL_CONTEXT_BASE(NAME) \
+ if (D->getKind() >= Decl::first##NAME && \
+ D->getKind() <= Decl::last##NAME) \
return true;
-#include "clang/AST/DeclNodes.def"
+#include "clang/AST/DeclNodes.inc"
return false;
}
}
@@ -537,7 +563,7 @@ bool DeclContext::isTransparentContext() const {
return true; // FIXME: Check for C++0x scoped enums
else if (DeclKind == Decl::LinkageSpec)
return true;
- else if (DeclKind >= Decl::RecordFirst && DeclKind <= Decl::RecordLast)
+ else if (DeclKind >= Decl::firstRecord && DeclKind <= Decl::lastRecord)
return cast<RecordDecl>(this)->isAnonymousStructOrUnion();
else if (DeclKind == Decl::Namespace)
return false; // FIXME: Check for C++0x inline namespaces
@@ -581,7 +607,7 @@ DeclContext *DeclContext::getPrimaryContext() {
return this;
default:
- if (DeclKind >= Decl::TagFirst && DeclKind <= Decl::TagLast) {
+ if (DeclKind >= Decl::firstTag && DeclKind <= Decl::lastTag) {
// If this is a tag type that has a definition or is currently
// being defined, that definition is our primary context.
TagDecl *Tag = cast<TagDecl>(this);
@@ -602,7 +628,7 @@ DeclContext *DeclContext::getPrimaryContext() {
return Tag;
}
- assert(DeclKind >= Decl::FunctionFirst && DeclKind <= Decl::FunctionLast &&
+ assert(DeclKind >= Decl::firstFunction && DeclKind <= Decl::lastFunction &&
"Unknown DeclContext kind");
return this;
}
@@ -626,9 +652,8 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
ExternalASTSource *Source = getParentASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
- llvm::SmallVector<uint32_t, 64> Decls;
- if (Source->ReadDeclsLexicallyInContext(const_cast<DeclContext *>(this),
- Decls))
+ llvm::SmallVector<Decl*, 64> Decls;
+ if (Source->FindExternalLexicalDecls(this, Decls))
return;
// There is no longer any lexical storage in this context
@@ -642,7 +667,7 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
Decl *FirstNewDecl = 0;
Decl *PrevDecl = 0;
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
- Decl *D = Source->GetDecl(Decls[I]);
+ Decl *D = Decls[I];
if (PrevDecl)
PrevDecl->NextDeclInContext = D;
else
@@ -659,28 +684,83 @@ DeclContext::LoadLexicalDeclsFromExternalStorage() const {
LastDecl = PrevDecl;
}
-void
-DeclContext::LoadVisibleDeclsFromExternalStorage() const {
- DeclContext *This = const_cast<DeclContext *>(this);
- ExternalASTSource *Source = getParentASTContext().getExternalSource();
- assert(hasExternalVisibleStorage() && Source && "No external storage?");
+DeclContext::lookup_result
+ExternalASTSource::SetNoExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ assert(List.isNull());
+ (void) List;
+
+ return DeclContext::lookup_result();
+}
- llvm::SmallVector<VisibleDeclaration, 64> Decls;
- if (Source->ReadDeclsVisibleInContext(This, Decls))
- return;
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ const VisibleDeclaration &VD) {
+ ASTContext &Context = DC->getParentASTContext();
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[VD.Name];
+ List.setFromDeclIDs(VD.Declarations);
+ return List.getLookupResult(Context);
+}
- // There is no longer any visible storage in this context
- ExternalVisibleStorage = false;
+DeclContext::lookup_result
+ExternalASTSource::SetExternalVisibleDeclsForName(const DeclContext *DC,
+ DeclarationName Name,
+ llvm::SmallVectorImpl<NamedDecl*> &Decls) {
+ ASTContext &Context = DC->getParentASTContext();;
- // Load the declaration IDs for all of the names visible in this
- // context.
- assert(!LookupPtr && "Have a lookup map before de-serialization?");
- StoredDeclsMap *Map = CreateStoredDeclsMap(getParentASTContext());
+ StoredDeclsMap *Map;
+ if (!(Map = DC->LookupPtr))
+ Map = DC->CreateStoredDeclsMap(Context);
+
+ StoredDeclsList &List = (*Map)[Name];
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ if (List.isNull())
+ List.setOnlyValue(Decls[I]);
+ else
+ List.AddSubsequentDecl(Decls[I]);
+ }
+
+ return List.getLookupResult(Context);
+}
+
+void ExternalASTSource::SetExternalVisibleDecls(const DeclContext *DC,
+ const llvm::SmallVectorImpl<VisibleDeclaration> &Decls) {
+ // There is no longer any visible storage in this context.
+ DC->ExternalVisibleStorage = false;
+
+ assert(!DC->LookupPtr && "Have a lookup map before de-serialization?");
+ StoredDeclsMap *Map = DC->CreateStoredDeclsMap(DC->getParentASTContext());
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
(*Map)[Decls[I].Name].setFromDeclIDs(Decls[I].Declarations);
}
}
+void ExternalASTSource::SetExternalVisibleDecls(const DeclContext *DC,
+ const llvm::SmallVectorImpl<NamedDecl*> &Decls) {
+ // There is no longer any visible storage in this context.
+ DC->ExternalVisibleStorage = false;
+
+ assert(!DC->LookupPtr && "Have a lookup map before de-serialization?");
+ StoredDeclsMap &Map = *DC->CreateStoredDeclsMap(DC->getParentASTContext());
+ for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
+ StoredDeclsList &List = Map[Decls[I]->getDeclName()];
+ if (List.isNull())
+ List.setOnlyValue(Decls[I]);
+ else
+ List.AddSubsequentDecl(Decls[I]);
+ }
+}
+
DeclContext::decl_iterator DeclContext::decls_begin() const {
if (hasExternalLexicalStorage())
LoadLexicalDeclsFromExternalStorage();
@@ -801,8 +881,17 @@ DeclContext::lookup(DeclarationName Name) {
if (PrimaryContext != this)
return PrimaryContext->lookup(Name);
- if (hasExternalVisibleStorage())
- LoadVisibleDeclsFromExternalStorage();
+ if (hasExternalVisibleStorage()) {
+ // Check to see if we've already cached the lookup results.
+ if (LookupPtr) {
+ StoredDeclsMap::iterator I = LookupPtr->find(Name);
+ if (I != LookupPtr->end())
+ return I->second.getLookupResult(getParentASTContext());
+ }
+
+ ExternalASTSource *Source = getParentASTContext().getExternalSource();
+ return Source->FindExternalVisibleDeclsByName(this, Name);
+ }
/// If there is no lookup data structure, build one now by walking
/// all of the linked DeclContexts (in declaration order!) and
@@ -858,9 +947,10 @@ void DeclContext::makeDeclVisibleInContext(NamedDecl *D, bool Recoverable) {
}
// If we already have a lookup data structure, perform the insertion
- // into it. Otherwise, be lazy and don't build that structure until
- // someone asks for it.
- if (LookupPtr || !Recoverable)
+ // into it. If we haven't deserialized externally stored decls, deserialize
+ // them so we can add the decl. Otherwise, be lazy and don't build that
+ // structure until someone asks for it.
+ if (LookupPtr || !Recoverable || hasExternalVisibleStorage())
makeDeclVisibleInContextImpl(D);
// If we are a transparent context, insert into our parent context,
@@ -880,6 +970,12 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) {
if (isa<ClassTemplateSpecializationDecl>(D))
return;
+ // If there is an external AST source, load any declarations it knows about
+ // with this declaration's name.
+ if (ExternalASTSource *Source = getParentASTContext().getExternalSource())
+ if (hasExternalVisibleStorage())
+ Source->FindExternalVisibleDeclsByName(this, D->getDeclName());
+
ASTContext *C = 0;
if (!LookupPtr) {
C = &getParentASTContext();
@@ -932,7 +1028,7 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) {
ExternalASTSource *Source = Context.getExternalSource();
assert(Source && "No external AST source available!");
- Data = reinterpret_cast<uintptr_t>(Source->GetDecl(DeclID));
+ Data = reinterpret_cast<uintptr_t>(Source->GetExternalDecl(DeclID));
break;
}
@@ -944,7 +1040,7 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) {
assert(Source && "No external AST source available!");
for (unsigned I = 0, N = Vector.size(); I != N; ++I)
- Vector[I] = reinterpret_cast<uintptr_t>(Source->GetDecl(Vector[I]));
+ Vector[I] = reinterpret_cast<uintptr_t>(Source->GetExternalDecl(Vector[I]));
Data = (Data & ~0x03) | DK_Decl_Vector;
break;
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index cd7afd98b63d..dd0fe08979f0 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -32,6 +32,8 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
Abstract(false), HasTrivialConstructor(true),
HasTrivialCopyConstructor(true), HasTrivialCopyAssignment(true),
HasTrivialDestructor(true), ComputedVisibleConversions(false),
+ DeclaredDefaultConstructor(false), DeclaredCopyConstructor(false),
+ DeclaredCopyAssignment(false), DeclaredDestructor(false),
Bases(0), NumBases(0), VBases(0), NumVBases(0),
Definition(D), FirstFriend(0) {
}
@@ -58,6 +60,11 @@ CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, TagKind TK, DeclContext *DC,
return R;
}
+CXXRecordDecl *CXXRecordDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) CXXRecordDecl(CXXRecord, TTK_Struct, 0, SourceLocation(), 0, 0,
+ SourceLocation());
+}
+
CXXRecordDecl::~CXXRecordDecl() {
}
@@ -159,6 +166,29 @@ bool CXXRecordDecl::hasConstCopyConstructor(ASTContext &Context) const {
return getCopyConstructor(Context, Qualifiers::Const) != 0;
}
+/// \brief Perform a simplistic form of overload resolution that only considers
+/// cv-qualifiers on a single parameter, and return the best overload candidate
+/// (if there is one).
+static CXXMethodDecl *
+GetBestOverloadCandidateSimple(
+ const llvm::SmallVectorImpl<std::pair<CXXMethodDecl *, Qualifiers> > &Cands) {
+ if (Cands.empty())
+ return 0;
+ if (Cands.size() == 1)
+ return Cands[0].first;
+
+ unsigned Best = 0, N = Cands.size();
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.isSupersetOf(Cands[I].second))
+ Best = I;
+
+ for (unsigned I = 1; I != N; ++I)
+ if (Cands[Best].second.isSupersetOf(Cands[I].second))
+ return 0;
+
+ return Cands[Best].first;
+}
+
CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
unsigned TypeQuals) const{
QualType ClassType
@@ -167,6 +197,7 @@ CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
= Context.DeclarationNames.getCXXConstructorName(
Context.getCanonicalType(ClassType));
unsigned FoundTQs;
+ llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
DeclContext::lookup_const_iterator Con, ConEnd;
for (llvm::tie(Con, ConEnd) = this->lookup(ConstructorName);
Con != ConEnd; ++Con) {
@@ -175,61 +206,68 @@ CXXConstructorDecl *CXXRecordDecl::getCopyConstructor(ASTContext &Context,
if (isa<FunctionTemplateDecl>(*Con))
continue;
- if (cast<CXXConstructorDecl>(*Con)->isCopyConstructor(FoundTQs)) {
+ CXXConstructorDecl *Constructor = cast<CXXConstructorDecl>(*Con);
+ if (Constructor->isCopyConstructor(FoundTQs)) {
if (((TypeQuals & Qualifiers::Const) == (FoundTQs & Qualifiers::Const)) ||
(!(TypeQuals & Qualifiers::Const) && (FoundTQs & Qualifiers::Const)))
- return cast<CXXConstructorDecl>(*Con);
-
+ Found.push_back(std::make_pair(
+ const_cast<CXXConstructorDecl *>(Constructor),
+ Qualifiers::fromCVRMask(FoundTQs)));
}
}
- return 0;
+
+ return cast_or_null<CXXConstructorDecl>(
+ GetBestOverloadCandidateSimple(Found));
}
-bool CXXRecordDecl::hasConstCopyAssignment(ASTContext &Context,
- const CXXMethodDecl *& MD) const {
- QualType ClassType = Context.getCanonicalType(Context.getTypeDeclType(
- const_cast<CXXRecordDecl*>(this)));
- DeclarationName OpName =Context.DeclarationNames.getCXXOperatorName(OO_Equal);
-
+CXXMethodDecl *CXXRecordDecl::getCopyAssignmentOperator(bool ArgIsConst) const {
+ ASTContext &Context = getASTContext();
+ QualType Class = Context.getTypeDeclType(const_cast<CXXRecordDecl *>(this));
+ DeclarationName Name = Context.DeclarationNames.getCXXOperatorName(OO_Equal);
+
+ llvm::SmallVector<std::pair<CXXMethodDecl *, Qualifiers>, 4> Found;
DeclContext::lookup_const_iterator Op, OpEnd;
- for (llvm::tie(Op, OpEnd) = this->lookup(OpName);
- Op != OpEnd; ++Op) {
+ for (llvm::tie(Op, OpEnd) = this->lookup(Name); Op != OpEnd; ++Op) {
// C++ [class.copy]p9:
// A user-declared copy assignment operator is a non-static non-template
// member function of class X with exactly one parameter of type X, X&,
// const X&, volatile X& or const volatile X&.
const CXXMethodDecl* Method = dyn_cast<CXXMethodDecl>(*Op);
- if (!Method)
+ if (!Method || Method->isStatic() || Method->getPrimaryTemplate())
continue;
-
- if (Method->isStatic())
- continue;
- if (Method->getPrimaryTemplate())
- continue;
- const FunctionProtoType *FnType =
- Method->getType()->getAs<FunctionProtoType>();
+
+ const FunctionProtoType *FnType
+ = Method->getType()->getAs<FunctionProtoType>();
assert(FnType && "Overloaded operator has no prototype.");
// Don't assert on this; an invalid decl might have been left in the AST.
if (FnType->getNumArgs() != 1 || FnType->isVariadic())
continue;
- bool AcceptsConst = true;
+
QualType ArgType = FnType->getArgType(0);
+ Qualifiers Quals;
if (const LValueReferenceType *Ref = ArgType->getAs<LValueReferenceType>()) {
ArgType = Ref->getPointeeType();
- // Is it a non-const lvalue reference?
- if (!ArgType.isConstQualified())
- AcceptsConst = false;
+ // If we have a const argument and we have a reference to a non-const,
+ // this function does not match.
+ if (ArgIsConst && !ArgType.isConstQualified())
+ continue;
+
+ Quals = ArgType.getQualifiers();
+ } else {
+ // By-value copy-assignment operators are treated like const X&
+ // copy-assignment operators.
+ Quals = Qualifiers::fromCVRMask(Qualifiers::Const);
}
- if (!Context.hasSameUnqualifiedType(ArgType, ClassType))
+
+ if (!Context.hasSameUnqualifiedType(ArgType, Class))
continue;
- MD = Method;
- // We have a single argument of type cv X or cv X&, i.e. we've found the
- // copy assignment operator. Return whether it accepts const arguments.
- return AcceptsConst;
+
+ // Save this copy-assignment operator. It might be "the one".
+ Found.push_back(std::make_pair(const_cast<CXXMethodDecl *>(Method), Quals));
}
- assert(isInvalidDecl() &&
- "No copy assignment operator declared in valid code.");
- return false;
+
+ // Use a simplistic form of overload resolution to find the candidate.
+ return GetBestOverloadCandidateSimple(Found);
}
void
@@ -239,6 +277,9 @@ CXXRecordDecl::addedConstructor(ASTContext &Context,
// Note that we have a user-declared constructor.
data().UserDeclaredConstructor = true;
+ // Note that we have no need of an implicitly-declared default constructor.
+ data().DeclaredDefaultConstructor = true;
+
// C++ [dcl.init.aggr]p1:
// An aggregate is an array or a class (clause 9) with no
// user-declared constructors (12.1) [...].
@@ -258,11 +299,13 @@ CXXRecordDecl::addedConstructor(ASTContext &Context,
// suppress the implicit declaration of a copy constructor.
if (ConDecl->isCopyConstructor()) {
data().UserDeclaredCopyConstructor = true;
-
+ data().DeclaredCopyConstructor = true;
+
// C++ [class.copy]p6:
// A copy constructor is trivial if it is implicitly declared.
// FIXME: C++0x: don't do this for "= default" copy constructors.
data().HasTrivialCopyConstructor = false;
+
}
}
@@ -294,7 +337,8 @@ void CXXRecordDecl::addedAssignmentOperator(ASTContext &Context,
// Suppress the implicit declaration of a copy constructor.
data().UserDeclaredCopyAssignment = true;
-
+ data().DeclaredCopyAssignment = true;
+
// C++ [class.copy]p11:
// A copy assignment operator is trivial if it is implicitly declared.
// FIXME: C++0x: don't do this for "= default" copy operators.
@@ -546,7 +590,8 @@ CXXRecordDecl::setTemplateSpecializationKind(TemplateSpecializationKind TSK) {
}
CXXConstructorDecl *
-CXXRecordDecl::getDefaultConstructor(ASTContext &Context) {
+CXXRecordDecl::getDefaultConstructor() {
+ ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
DeclarationName ConstructorName
= Context.DeclarationNames.getCXXConstructorName(
@@ -566,7 +611,8 @@ CXXRecordDecl::getDefaultConstructor(ASTContext &Context) {
return 0;
}
-CXXDestructorDecl *CXXRecordDecl::getDestructor(ASTContext &Context) const {
+CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
+ ASTContext &Context = getASTContext();
QualType ClassType = Context.getTypeDeclType(this);
DeclarationName Name
@@ -670,6 +716,10 @@ CXXMethodDecl::method_iterator CXXMethodDecl::end_overridden_methods() const {
return getASTContext().overridden_methods_end(this);
}
+unsigned CXXMethodDecl::size_overridden_methods() const {
+ return getASTContext().overridden_methods_size(this);
+}
+
QualType CXXMethodDecl::getThisType(ASTContext &C) const {
// C++ 9.3.2p1: The type of this in a member function of a class X is X*.
// If the member function is declared const, the type of this is const X*,
@@ -693,7 +743,7 @@ bool CXXMethodDecl::hasInlineBody() const {
CheckFn = this;
const FunctionDecl *fn;
- return CheckFn->getBody(fn) && !fn->isOutOfLine();
+ return CheckFn->hasBody(fn) && !fn->isOutOfLine();
}
CXXBaseOrMemberInitializer::
diff --git a/lib/AST/DeclFriend.cpp b/lib/AST/DeclFriend.cpp
index ab3552db28e0..99bfe40c31f4 100644
--- a/lib/AST/DeclFriend.cpp
+++ b/lib/AST/DeclFriend.cpp
@@ -39,3 +39,7 @@ FriendDecl *FriendDecl::Create(ASTContext &C, DeclContext *DC,
cast<CXXRecordDecl>(DC)->pushFriendDecl(FD);
return FD;
}
+
+FriendDecl *FriendDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) FriendDecl(Empty);
+}
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index dc4aacdb515c..adb0e7d08345 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -223,17 +223,24 @@ void ObjCInterfaceDecl::mergeClassExtensionProtocolList(
setProtocolList(ProtocolRefs.data(), NumProtoRefs, ProtocolLocs.data(), C);
}
-/// getClassExtension - Find class extension of the given class.
-// FIXME. can speed it up, if need be.
-ObjCCategoryDecl* ObjCInterfaceDecl::getClassExtension() const {
- const ObjCInterfaceDecl* ClassDecl = this;
- for (ObjCCategoryDecl *CDecl = ClassDecl->getCategoryList(); CDecl;
+/// getFirstClassExtension - Find first class extension of the given class.
+ObjCCategoryDecl* ObjCInterfaceDecl::getFirstClassExtension() const {
+ for (ObjCCategoryDecl *CDecl = getCategoryList(); CDecl;
CDecl = CDecl->getNextClassCategory())
if (CDecl->IsClassExtension())
return CDecl;
return 0;
}
+/// getNextClassCategory - Find next class extension in list of categories.
+const ObjCCategoryDecl* ObjCCategoryDecl::getNextClassExtension() const {
+ for (const ObjCCategoryDecl *CDecl = getNextClassCategory(); CDecl;
+ CDecl = CDecl->getNextClassCategory())
+ if (CDecl->IsClassExtension())
+ return CDecl;
+ return 0;
+}
+
ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
ObjCInterfaceDecl *&clsDeclared) {
ObjCInterfaceDecl* ClassDecl = this;
@@ -242,11 +249,13 @@ ObjCIvarDecl *ObjCInterfaceDecl::lookupInstanceVariable(IdentifierInfo *ID,
clsDeclared = ClassDecl;
return I;
}
- if (const ObjCCategoryDecl *CDecl = ClassDecl->getClassExtension())
+ for (const ObjCCategoryDecl *CDecl = ClassDecl->getFirstClassExtension();
+ CDecl; CDecl = CDecl->getNextClassExtension()) {
if (ObjCIvarDecl *I = CDecl->getIvarDecl(ID)) {
clsDeclared = ClassDecl;
return I;
}
+ }
ClassDecl = ClassDecl->getSuperClass();
}
@@ -887,7 +896,7 @@ ObjCPropertyDecl *ObjCPropertyDecl::Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
IdentifierInfo *Id,
SourceLocation AtLoc,
- QualType T,
+ TypeSourceInfo *T,
PropertyControl propControl) {
return new (C) ObjCPropertyDecl(DC, L, Id, AtLoc, T);
}
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index 539492479707..765772dd13f9 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -183,7 +183,7 @@ void DeclPrinter::Print(AccessSpecifier AS) {
case AS_none: assert(0 && "No access specifier!"); break;
case AS_public: Out << "public"; break;
case AS_protected: Out << "protected"; break;
- case AS_private: Out << " private"; break;
+ case AS_private: Out << "private"; break;
}
}
@@ -195,9 +195,6 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
if (Indent)
Indentation += Policy.Indentation;
- bool PrintAccess = isa<CXXRecordDecl>(DC);
- AccessSpecifier CurAS = AS_none;
-
llvm::SmallVector<Decl*, 2> Decls;
for (DeclContext::decl_iterator D = DC->decls_begin(), DEnd = DC->decls_end();
D != DEnd; ++D) {
@@ -205,21 +202,14 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
// Skip over implicit declarations in pretty-printing mode.
if (D->isImplicit()) continue;
// FIXME: Ugly hack so we don't pretty-print the builtin declaration
- // of __builtin_va_list. There should be some other way to check that.
- if (isa<NamedDecl>(*D) && cast<NamedDecl>(*D)->getNameAsString() ==
- "__builtin_va_list")
- continue;
- }
-
- if (PrintAccess) {
- AccessSpecifier AS = D->getAccess();
-
- if (AS != CurAS) {
- if (Indent)
- this->Indent(Indentation - Policy.Indentation);
- Print(AS);
- Out << ":\n";
- CurAS = AS;
+ // of __builtin_va_list or __[u]int128_t. There should be some other way
+ // to check that.
+ if (NamedDecl *ND = dyn_cast<NamedDecl>(*D)) {
+ if (IdentifierInfo *II = ND->getIdentifier()) {
+ if (II->isStr("__builtin_va_list") ||
+ II->isStr("__int128_t") || II->isStr("__uint128_t"))
+ continue;
+ }
}
}
@@ -251,6 +241,16 @@ void DeclPrinter::VisitDeclContext(DeclContext *DC, bool Indent) {
Decls.push_back(*D);
continue;
}
+
+ if (isa<AccessSpecDecl>(*D)) {
+ Indentation -= Policy.Indentation;
+ this->Indent();
+ Print(D->getAccess());
+ Out << ":\n";
+ Indentation += Policy.Indentation;
+ continue;
+ }
+
this->Indent();
Visit(*D);
@@ -406,7 +406,8 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
FieldDecl *FD = BMInitializer->getMember();
Out << FD;
} else {
- Out << QualType(BMInitializer->getBaseClass(), 0).getAsString();
+ Out << QualType(BMInitializer->getBaseClass(),
+ 0).getAsString(Policy);
}
Out << "(";
@@ -653,7 +654,11 @@ void DeclPrinter::VisitTemplateDecl(TemplateDecl *D) {
Out << "> ";
- Visit(D->getTemplatedDecl());
+ if (isa<TemplateTemplateParmDecl>(D)) {
+ Out << "class " << D->getName();
+ } else {
+ Visit(D->getTemplatedDecl());
+ }
}
//----------------------------------------------------------------------------
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index 26e291c94f64..f00eb0478a75 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -162,37 +162,19 @@ ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
TemplateParameterList *Params,
NamedDecl *Decl,
ClassTemplateDecl *PrevDecl) {
- Common *CommonPtr;
- if (PrevDecl)
- CommonPtr = PrevDecl->CommonPtr;
- else {
- CommonPtr = new (C) Common;
- C.AddDeallocation(DeallocateCommon, CommonPtr);
- }
-
- return new (C) ClassTemplateDecl(DC, L, Name, Params, Decl, PrevDecl,
- CommonPtr);
-}
-
-ClassTemplateDecl::~ClassTemplateDecl() {
- assert(CommonPtr == 0 && "ClassTemplateDecl must be explicitly destroyed");
+ ClassTemplateDecl *New = new (C) ClassTemplateDecl(DC, L, Name, Params, Decl);
+ New->setPreviousDeclaration(PrevDecl);
+ return New;
}
void ClassTemplateDecl::Destroy(ASTContext& C) {
- if (!PreviousDeclaration) {
- CommonPtr->~Common();
- C.Deallocate((void*)CommonPtr);
- }
- CommonPtr = 0;
-
- this->~ClassTemplateDecl();
- C.Deallocate((void*)this);
+ Decl::Destroy(C);
}
void ClassTemplateDecl::getPartialSpecializations(
llvm::SmallVectorImpl<ClassTemplatePartialSpecializationDecl *> &PS) {
llvm::FoldingSet<ClassTemplatePartialSpecializationDecl> &PartialSpecs
- = CommonPtr->PartialSpecializations;
+ = getPartialSpecializations();
PS.clear();
PS.resize(PartialSpecs.size());
for (llvm::FoldingSet<ClassTemplatePartialSpecializationDecl>::iterator
@@ -219,7 +201,8 @@ ClassTemplateDecl::findPartialSpecialization(QualType T) {
}
QualType
-ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
+ClassTemplateDecl::getInjectedClassNameSpecialization() {
+ Common *CommonPtr = getCommonPtr();
if (!CommonPtr->InjectedClassNameType.isNull())
return CommonPtr->InjectedClassNameType;
@@ -227,7 +210,7 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
// corresponding to template parameter packs should be pack
// expansions. We already say that in 14.6.2.1p2, so it would be
// better to fix that redundancy.
-
+ ASTContext &Context = getASTContext();
TemplateParameterList *Params = getTemplateParameters();
llvm::SmallVector<TemplateArgument, 16> TemplateArgs;
TemplateArgs.reserve(Params->size());
@@ -256,6 +239,20 @@ ClassTemplateDecl::getInjectedClassNameSpecialization(ASTContext &Context) {
return CommonPtr->InjectedClassNameType;
}
+ClassTemplateDecl::Common *ClassTemplateDecl::getCommonPtr() {
+ // Find the first declaration of this function template.
+ ClassTemplateDecl *First = this;
+ while (First->getPreviousDeclaration())
+ First = First->getPreviousDeclaration();
+
+ if (First->CommonOrPrev.isNull()) {
+ Common *CommonPtr = new (getASTContext()) Common;
+ getASTContext().AddDeallocation(DeallocateCommon, CommonPtr);
+ First->CommonOrPrev = CommonPtr;
+ }
+ return First->CommonOrPrev.get<Common*>();
+}
+
//===----------------------------------------------------------------------===//
// TemplateTypeParm Allocation/Deallocation Method Implementations
//===----------------------------------------------------------------------===//
@@ -269,6 +266,12 @@ TemplateTypeParmDecl::Create(ASTContext &C, DeclContext *DC,
return new (C) TemplateTypeParmDecl(DC, L, Id, Typename, Type, ParameterPack);
}
+TemplateTypeParmDecl *
+TemplateTypeParmDecl::Create(ASTContext &C, EmptyShell Empty) {
+ return new (C) TemplateTypeParmDecl(0, SourceLocation(), 0, false,
+ QualType(), false);
+}
+
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
return DefaultArgument->getTypeLoc().getSourceRange().getBegin();
}
@@ -294,8 +297,9 @@ NonTypeTemplateParmDecl::Create(ASTContext &C, DeclContext *DC,
}
SourceLocation NonTypeTemplateParmDecl::getDefaultArgumentLoc() const {
- return DefaultArgument? DefaultArgument->getSourceRange().getBegin()
- : SourceLocation();
+ return hasDefaultArgument()
+ ? getDefaultArgument()->getSourceRange().getBegin()
+ : SourceLocation();
}
//===----------------------------------------------------------------------===//
@@ -393,6 +397,13 @@ TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
}
}
+TemplateArgumentList::TemplateArgumentList(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs)
+ : NumFlatArguments(0), NumStructuredArguments(0) {
+ init(Context, Args, NumArgs);
+}
+
/// Produces a shallow copy of the given template argument list. This
/// assumes that the input argument list outlives it. This takes the list as
/// a pointer to avoid looking like a copy constructor, since this really
@@ -403,6 +414,23 @@ TemplateArgumentList::TemplateArgumentList(const TemplateArgumentList *Other)
StructuredArguments(Other->StructuredArguments.getPointer(), false),
NumStructuredArguments(Other->NumStructuredArguments) { }
+void TemplateArgumentList::init(ASTContext &Context,
+ const TemplateArgument *Args,
+ unsigned NumArgs) {
+assert(NumFlatArguments == 0 && NumStructuredArguments == 0 &&
+ "Already initialized!");
+
+NumFlatArguments = NumStructuredArguments = NumArgs;
+TemplateArgument *NewArgs = new (Context) TemplateArgument[NumArgs];
+std::copy(Args, Args+NumArgs, NewArgs);
+FlatArguments.setPointer(NewArgs);
+FlatArguments.setInt(1); // Owns the pointer.
+
+// Just reuse the flat arguments array.
+StructuredArguments.setPointer(NewArgs);
+StructuredArguments.setInt(0); // Doesn't own the pointer.
+}
+
void TemplateArgumentList::Destroy(ASTContext &C) {
if (FlatArguments.getInt())
C.Deallocate((void*)FlatArguments.getPointer());
@@ -425,11 +453,17 @@ ClassTemplateSpecializationDecl(ASTContext &Context, Kind DK, TagKind TK,
SpecializedTemplate->getIdentifier(),
PrevDecl),
SpecializedTemplate(SpecializedTemplate),
- TypeAsWritten(0),
+ ExplicitInfo(0),
TemplateArgs(Context, Builder, /*TakeArgs=*/true),
SpecializationKind(TSK_Undeclared) {
}
+ClassTemplateSpecializationDecl::ClassTemplateSpecializationDecl(Kind DK)
+ : CXXRecordDecl(DK, TTK_Struct, 0, SourceLocation(), 0, 0),
+ ExplicitInfo(0),
+ SpecializationKind(TSK_Undeclared) {
+}
+
ClassTemplateSpecializationDecl *
ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
DeclContext *DC, SourceLocation L,
@@ -447,7 +481,15 @@ ClassTemplateSpecializationDecl::Create(ASTContext &Context, TagKind TK,
return Result;
}
+ClassTemplateSpecializationDecl *
+ClassTemplateSpecializationDecl::Create(ASTContext &Context, EmptyShell Empty) {
+ return
+ new (Context)ClassTemplateSpecializationDecl(ClassTemplateSpecialization);
+}
+
void ClassTemplateSpecializationDecl::Destroy(ASTContext &C) {
+ delete ExplicitInfo;
+
if (SpecializedPartialSpecialization *PartialSpec
= SpecializedTemplate.dyn_cast<SpecializedPartialSpecialization*>())
C.Deallocate(PartialSpec);
@@ -508,6 +550,25 @@ Create(ASTContext &Context, TagKind TK,DeclContext *DC, SourceLocation L,
return Result;
}
+ClassTemplatePartialSpecializationDecl *
+ClassTemplatePartialSpecializationDecl::Create(ASTContext &Context,
+ EmptyShell Empty) {
+ return new (Context)ClassTemplatePartialSpecializationDecl();
+}
+
+void ClassTemplatePartialSpecializationDecl::
+initTemplateArgsAsWritten(const TemplateArgumentListInfo &ArgInfos) {
+ assert(ArgsAsWritten == 0 && "ArgsAsWritten already set");
+ unsigned N = ArgInfos.size();
+ TemplateArgumentLoc *ClonedArgs
+ = new (getASTContext()) TemplateArgumentLoc[N];
+ for (unsigned I = 0; I != N; ++I)
+ ClonedArgs[I] = ArgInfos[I];
+
+ ArgsAsWritten = ClonedArgs;
+ NumArgsAsWritten = N;
+}
+
//===----------------------------------------------------------------------===//
// FriendTemplateDecl Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index c38cec32c3b2..bd97b886fe1e 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -37,7 +37,7 @@ bool Expr::isKnownToHaveBooleanValue() const {
// If this value has _Bool type, it is obvious 0/1.
if (getType()->isBooleanType()) return true;
// If this is a non-scalar-integer type, we don't care enough to try.
- if (!getType()->isIntegralType()) return false;
+ if (!getType()->isIntegralOrEnumerationType()) return false;
if (const ParenExpr *PE = dyn_cast<ParenExpr>(this))
return PE->getSubExpr()->isKnownToHaveBooleanValue();
@@ -52,7 +52,9 @@ bool Expr::isKnownToHaveBooleanValue() const {
}
}
- if (const CastExpr *CE = dyn_cast<CastExpr>(this))
+ // Only look through implicit casts. If the user writes
+ // '(int) (a && b)' treat it as an arbitrary int.
+ if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(this))
return CE->getSubExpr()->isKnownToHaveBooleanValue();
if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(this)) {
@@ -111,10 +113,14 @@ void ExplicitTemplateArgumentList::copyInto(
Info.addArgument(getTemplateArgs()[I]);
}
+std::size_t ExplicitTemplateArgumentList::sizeFor(unsigned NumTemplateArgs) {
+ return sizeof(ExplicitTemplateArgumentList) +
+ sizeof(TemplateArgumentLoc) * NumTemplateArgs;
+}
+
std::size_t ExplicitTemplateArgumentList::sizeFor(
const TemplateArgumentListInfo &Info) {
- return sizeof(ExplicitTemplateArgumentList) +
- sizeof(TemplateArgumentLoc) * Info.size();
+ return sizeFor(Info.size());
}
void DeclRefExpr::computeDependence() {
@@ -158,7 +164,7 @@ void DeclRefExpr::computeDependence() {
// (VD) - a constant with integral or enumeration type and is
// initialized with an expression that is value-dependent.
else if (VarDecl *Var = dyn_cast<VarDecl>(D)) {
- if (Var->getType()->isIntegralType() &&
+ if (Var->getType()->isIntegralOrEnumerationType() &&
Var->getType().getCVRQualifiers() == Qualifiers::Const) {
if (const Expr *Init = Var->getAnyInitializer())
if (Init->isValueDependent())
@@ -222,6 +228,19 @@ DeclRefExpr *DeclRefExpr::Create(ASTContext &Context,
TemplateArgs, T);
}
+DeclRefExpr *DeclRefExpr::CreateEmpty(ASTContext &Context, bool HasQualifier,
+ unsigned NumTemplateArgs) {
+ std::size_t Size = sizeof(DeclRefExpr);
+ if (HasQualifier)
+ Size += sizeof(NameQualifier);
+
+ if (NumTemplateArgs)
+ Size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+
+ void *Mem = Context.Allocate(Size, llvm::alignof<DeclRefExpr>());
+ return new (Mem) DeclRefExpr(EmptyShell());
+}
+
SourceRange DeclRefExpr::getSourceRange() const {
// FIXME: Does not handle multi-token names well, e.g., operator[].
SourceRange R(Loc);
@@ -557,7 +576,10 @@ QualType CallExpr::getCallReturnType() const {
CalleeType = FnTypePtr->getPointeeType();
else if (const BlockPointerType *BPT = CalleeType->getAs<BlockPointerType>())
CalleeType = BPT->getPointeeType();
-
+ else if (const MemberPointerType *MPT
+ = CalleeType->getAs<MemberPointerType>())
+ CalleeType = MPT->getPointeeType();
+
const FunctionType *FnType = CalleeType->getAs<FunctionType>();
return FnType->getResultType();
}
@@ -968,7 +990,8 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
switch (BO->getOpcode()) {
default:
break;
- // Consider ',', '||', '&&' to have side effects if the LHS or RHS does.
+ // Consider the RHS of comma for side effects. LHS was checked by
+ // Sema::CheckCommaOperands.
case BinaryOperator::Comma:
// ((foo = <blah>), 0) is an idiom for hiding the result (and
// lvalue-ness) of an assignment written in a macro.
@@ -976,10 +999,14 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
dyn_cast<IntegerLiteral>(BO->getRHS()->IgnoreParens()))
if (IE->getValue() == 0)
return false;
+ return BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx);
+ // Consider '||', '&&' to have side effects if the LHS or RHS does.
case BinaryOperator::LAnd:
case BinaryOperator::LOr:
- return (BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
- BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx));
+ if (!BO->getLHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx) ||
+ !BO->getRHS()->isUnusedResultAWarning(Loc, R1, R2, Ctx))
+ return false;
+ break;
}
if (BO->isAssignmentOp())
return false;
@@ -1140,378 +1167,6 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
}
}
-/// DeclCanBeLvalue - Determine whether the given declaration can be
-/// an lvalue. This is a helper routine for isLvalue.
-static bool DeclCanBeLvalue(const NamedDecl *Decl, ASTContext &Ctx) {
- // C++ [temp.param]p6:
- // A non-type non-reference template-parameter is not an lvalue.
- if (const NonTypeTemplateParmDecl *NTTParm
- = dyn_cast<NonTypeTemplateParmDecl>(Decl))
- return NTTParm->getType()->isReferenceType();
-
- return isa<VarDecl>(Decl) || isa<FieldDecl>(Decl) ||
- // C++ 3.10p2: An lvalue refers to an object or function.
- (Ctx.getLangOptions().CPlusPlus &&
- (isa<FunctionDecl>(Decl) || isa<FunctionTemplateDecl>(Decl)));
-}
-
-/// isLvalue - C99 6.3.2.1: an lvalue is an expression with an object type or an
-/// incomplete type other than void. Nonarray expressions that can be lvalues:
-/// - name, where name must be a variable
-/// - e[i]
-/// - (e), where e must be an lvalue
-/// - e.name, where e must be an lvalue
-/// - e->name
-/// - *e, the type of e cannot be a function type
-/// - string-constant
-/// - (__real__ e) and (__imag__ e) where e is an lvalue [GNU extension]
-/// - reference type [C++ [expr]]
-///
-Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const {
- assert(!TR->isReferenceType() && "Expressions can't have reference type.");
-
- isLvalueResult Res = isLvalueInternal(Ctx);
- if (Res != LV_Valid || Ctx.getLangOptions().CPlusPlus)
- return Res;
-
- // first, check the type (C99 6.3.2.1). Expressions with function
- // type in C are not lvalues, but they can be lvalues in C++.
- if (TR->isFunctionType() || TR == Ctx.OverloadTy)
- return LV_NotObjectType;
-
- // Allow qualified void which is an incomplete type other than void (yuck).
- if (TR->isVoidType() && !Ctx.getCanonicalType(TR).hasQualifiers())
- return LV_IncompleteVoidType;
-
- return LV_Valid;
-}
-
-// Check whether the expression can be sanely treated like an l-value
-Expr::isLvalueResult Expr::isLvalueInternal(ASTContext &Ctx) const {
- switch (getStmtClass()) {
- case ObjCIsaExprClass:
- case StringLiteralClass: // C99 6.5.1p4
- case ObjCEncodeExprClass: // @encode behaves like its string in every way.
- return LV_Valid;
- case ArraySubscriptExprClass: // C99 6.5.3p4 (e1[e2] == (*((e1)+(e2))))
- // For vectors, make sure base is an lvalue (i.e. not a function call).
- if (cast<ArraySubscriptExpr>(this)->getBase()->getType()->isVectorType())
- return cast<ArraySubscriptExpr>(this)->getBase()->isLvalue(Ctx);
- return LV_Valid;
- case DeclRefExprClass: { // C99 6.5.1p2
- const NamedDecl *RefdDecl = cast<DeclRefExpr>(this)->getDecl();
- if (DeclCanBeLvalue(RefdDecl, Ctx))
- return LV_Valid;
- break;
- }
- case BlockDeclRefExprClass: {
- const BlockDeclRefExpr *BDR = cast<BlockDeclRefExpr>(this);
- if (isa<VarDecl>(BDR->getDecl()))
- return LV_Valid;
- break;
- }
- case MemberExprClass: {
- const MemberExpr *m = cast<MemberExpr>(this);
- if (Ctx.getLangOptions().CPlusPlus) { // C++ [expr.ref]p4:
- NamedDecl *Member = m->getMemberDecl();
- // C++ [expr.ref]p4:
- // If E2 is declared to have type "reference to T", then E1.E2
- // is an lvalue.
- if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
- if (Value->getType()->isReferenceType())
- return LV_Valid;
-
- // -- If E2 is a static data member [...] then E1.E2 is an lvalue.
- if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
- return LV_Valid;
-
- // -- If E2 is a non-static data member [...]. If E1 is an
- // lvalue, then E1.E2 is an lvalue.
- if (isa<FieldDecl>(Member)) {
- if (m->isArrow())
- return LV_Valid;
- return m->getBase()->isLvalue(Ctx);
- }
-
- // -- If it refers to a static member function [...], then
- // E1.E2 is an lvalue.
- // -- Otherwise, if E1.E2 refers to a non-static member
- // function [...], then E1.E2 is not an lvalue.
- if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
- return Method->isStatic()? LV_Valid : LV_MemberFunction;
-
- // -- If E2 is a member enumerator [...], the expression E1.E2
- // is not an lvalue.
- if (isa<EnumConstantDecl>(Member))
- return LV_InvalidExpression;
-
- // Not an lvalue.
- return LV_InvalidExpression;
- }
-
- // C99 6.5.2.3p4
- if (m->isArrow())
- return LV_Valid;
- Expr *BaseExp = m->getBase();
- if (BaseExp->getStmtClass() == ObjCPropertyRefExprClass ||
- BaseExp->getStmtClass() == ObjCImplicitSetterGetterRefExprClass)
- return LV_SubObjCPropertySetting;
- return
- BaseExp->isLvalue(Ctx);
- }
- case UnaryOperatorClass:
- if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Deref)
- return LV_Valid; // C99 6.5.3p4
-
- if (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Real ||
- cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Imag ||
- cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::Extension)
- return cast<UnaryOperator>(this)->getSubExpr()->isLvalue(Ctx); // GNU.
-
- if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.pre.incr]p1
- (cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreInc ||
- cast<UnaryOperator>(this)->getOpcode() == UnaryOperator::PreDec))
- return LV_Valid;
- break;
- case ImplicitCastExprClass:
- if (cast<ImplicitCastExpr>(this)->isLvalueCast())
- return LV_Valid;
-
- // If this is a conversion to a class temporary, make a note of
- // that.
- if (Ctx.getLangOptions().CPlusPlus && getType()->isRecordType())
- return LV_ClassTemporary;
-
- break;
- case ParenExprClass: // C99 6.5.1p5
- return cast<ParenExpr>(this)->getSubExpr()->isLvalue(Ctx);
- case BinaryOperatorClass:
- case CompoundAssignOperatorClass: {
- const BinaryOperator *BinOp = cast<BinaryOperator>(this);
-
- if (Ctx.getLangOptions().CPlusPlus && // C++ [expr.comma]p1
- BinOp->getOpcode() == BinaryOperator::Comma)
- return BinOp->getRHS()->isLvalue(Ctx);
-
- // C++ [expr.mptr.oper]p6
- // The result of a .* expression is an lvalue only if its first operand is
- // an lvalue and its second operand is a pointer to data member.
- if (BinOp->getOpcode() == BinaryOperator::PtrMemD &&
- !BinOp->getType()->isFunctionType())
- return BinOp->getLHS()->isLvalue(Ctx);
-
- // The result of an ->* expression is an lvalue only if its second operand
- // is a pointer to data member.
- if (BinOp->getOpcode() == BinaryOperator::PtrMemI &&
- !BinOp->getType()->isFunctionType()) {
- QualType Ty = BinOp->getRHS()->getType();
- if (Ty->isMemberPointerType() && !Ty->isMemberFunctionPointerType())
- return LV_Valid;
- }
-
- if (!BinOp->isAssignmentOp())
- return LV_InvalidExpression;
-
- if (Ctx.getLangOptions().CPlusPlus)
- // C++ [expr.ass]p1:
- // The result of an assignment operation [...] is an lvalue.
- return LV_Valid;
-
-
- // C99 6.5.16:
- // An assignment expression [...] is not an lvalue.
- return LV_InvalidExpression;
- }
- case CallExprClass:
- case CXXOperatorCallExprClass:
- case CXXMemberCallExprClass: {
- // C++0x [expr.call]p10
- // A function call is an lvalue if and only if the result type
- // is an lvalue reference.
- QualType ReturnType = cast<CallExpr>(this)->getCallReturnType();
- if (ReturnType->isLValueReferenceType())
- return LV_Valid;
-
- // If the function is returning a class temporary, make a note of
- // that.
- if (Ctx.getLangOptions().CPlusPlus && ReturnType->isRecordType())
- return LV_ClassTemporary;
-
- break;
- }
- case CompoundLiteralExprClass: // C99 6.5.2.5p5
- // FIXME: Is this what we want in C++?
- return LV_Valid;
- case ChooseExprClass:
- // __builtin_choose_expr is an lvalue if the selected operand is.
- return cast<ChooseExpr>(this)->getChosenSubExpr(Ctx)->isLvalue(Ctx);
- case ExtVectorElementExprClass:
- if (cast<ExtVectorElementExpr>(this)->containsDuplicateElements())
- return LV_DuplicateVectorComponents;
- return LV_Valid;
- case ObjCIvarRefExprClass: // ObjC instance variables are lvalues.
- return LV_Valid;
- case ObjCPropertyRefExprClass: // FIXME: check if read-only property.
- return LV_Valid;
- case ObjCImplicitSetterGetterRefExprClass:
- // FIXME: check if read-only property.
- return LV_Valid;
- case PredefinedExprClass:
- return LV_Valid;
- case UnresolvedLookupExprClass:
- case UnresolvedMemberExprClass:
- return LV_Valid;
- case CXXDefaultArgExprClass:
- return cast<CXXDefaultArgExpr>(this)->getExpr()->isLvalue(Ctx);
- case CStyleCastExprClass:
- case CXXFunctionalCastExprClass:
- case CXXStaticCastExprClass:
- case CXXDynamicCastExprClass:
- case CXXReinterpretCastExprClass:
- case CXXConstCastExprClass:
- // The result of an explicit cast is an lvalue if the type we are
- // casting to is an lvalue reference type. See C++ [expr.cast]p1,
- // C++ [expr.static.cast]p2, C++ [expr.dynamic.cast]p2,
- // C++ [expr.reinterpret.cast]p1, C++ [expr.const.cast]p1.
- if (cast<ExplicitCastExpr>(this)->getTypeAsWritten()->
- isLValueReferenceType())
- return LV_Valid;
-
- // If this is a conversion to a class temporary, make a note of
- // that.
- if (Ctx.getLangOptions().CPlusPlus &&
- cast<ExplicitCastExpr>(this)->getTypeAsWritten()->isRecordType())
- return LV_ClassTemporary;
-
- break;
- case CXXTypeidExprClass:
- // C++ 5.2.8p1: The result of a typeid expression is an lvalue of ...
- return LV_Valid;
- case CXXBindTemporaryExprClass:
- return cast<CXXBindTemporaryExpr>(this)->getSubExpr()->
- isLvalueInternal(Ctx);
- case CXXBindReferenceExprClass:
- // Something that's bound to a reference is always an lvalue.
- return LV_Valid;
- case ConditionalOperatorClass: {
- // Complicated handling is only for C++.
- if (!Ctx.getLangOptions().CPlusPlus)
- return LV_InvalidExpression;
-
- // Sema should have taken care to ensure that a CXXTemporaryObjectExpr is
- // everywhere there's an object converted to an rvalue. Also, any other
- // casts should be wrapped by ImplicitCastExprs. There's just the special
- // case involving throws to work out.
- const ConditionalOperator *Cond = cast<ConditionalOperator>(this);
- Expr *True = Cond->getTrueExpr();
- Expr *False = Cond->getFalseExpr();
- // C++0x 5.16p2
- // If either the second or the third operand has type (cv) void, [...]
- // the result [...] is an rvalue.
- if (True->getType()->isVoidType() || False->getType()->isVoidType())
- return LV_InvalidExpression;
-
- // Both sides must be lvalues for the result to be an lvalue.
- if (True->isLvalue(Ctx) != LV_Valid || False->isLvalue(Ctx) != LV_Valid)
- return LV_InvalidExpression;
-
- // That's it.
- return LV_Valid;
- }
-
- case Expr::CXXExprWithTemporariesClass:
- return cast<CXXExprWithTemporaries>(this)->getSubExpr()->isLvalue(Ctx);
-
- case Expr::ObjCMessageExprClass:
- if (const ObjCMethodDecl *Method
- = cast<ObjCMessageExpr>(this)->getMethodDecl())
- if (Method->getResultType()->isLValueReferenceType())
- return LV_Valid;
- break;
-
- case Expr::CXXConstructExprClass:
- case Expr::CXXTemporaryObjectExprClass:
- case Expr::CXXZeroInitValueExprClass:
- return LV_ClassTemporary;
-
- default:
- break;
- }
- return LV_InvalidExpression;
-}
-
-/// isModifiableLvalue - C99 6.3.2.1: an lvalue that does not have array type,
-/// does not have an incomplete type, does not have a const-qualified type, and
-/// if it is a structure or union, does not have any member (including,
-/// recursively, any member or element of all contained aggregates or unions)
-/// with a const-qualified type.
-Expr::isModifiableLvalueResult
-Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
- isLvalueResult lvalResult = isLvalue(Ctx);
-
- switch (lvalResult) {
- case LV_Valid:
- // C++ 3.10p11: Functions cannot be modified, but pointers to
- // functions can be modifiable.
- if (Ctx.getLangOptions().CPlusPlus && TR->isFunctionType())
- return MLV_NotObjectType;
- break;
-
- case LV_NotObjectType: return MLV_NotObjectType;
- case LV_IncompleteVoidType: return MLV_IncompleteVoidType;
- case LV_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
- case LV_InvalidExpression:
- // If the top level is a C-style cast, and the subexpression is a valid
- // lvalue, then this is probably a use of the old-school "cast as lvalue"
- // GCC extension. We don't support it, but we want to produce good
- // diagnostics when it happens so that the user knows why.
- if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(IgnoreParens())) {
- if (CE->getSubExpr()->isLvalue(Ctx) == LV_Valid) {
- if (Loc)
- *Loc = CE->getLParenLoc();
- return MLV_LValueCast;
- }
- }
- return MLV_InvalidExpression;
- case LV_MemberFunction: return MLV_MemberFunction;
- case LV_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
- case LV_ClassTemporary:
- return MLV_ClassTemporary;
- }
-
- // The following is illegal:
- // void takeclosure(void (^C)(void));
- // void func() { int x = 1; takeclosure(^{ x = 7; }); }
- //
- if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(this)) {
- if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl()))
- return MLV_NotBlockQualified;
- }
-
- // Assigning to an 'implicit' property?
- if (const ObjCImplicitSetterGetterRefExpr* Expr =
- dyn_cast<ObjCImplicitSetterGetterRefExpr>(this)) {
- if (Expr->getSetterMethod() == 0)
- return MLV_NoSetterProperty;
- }
-
- QualType CT = Ctx.getCanonicalType(getType());
-
- if (CT.isConstQualified())
- return MLV_ConstQualified;
- if (CT->isArrayType())
- return MLV_ArrayType;
- if (CT->isIncompleteType())
- return MLV_IncompleteType;
-
- if (const RecordType *r = CT->getAs<RecordType>()) {
- if (r->hasConstFields())
- return MLV_ConstQualified;
- }
-
- return MLV_Valid;
-}
-
/// isOBJCGCCandidate - Check if an expression is objc gc'able.
/// returns true, if it is; false otherwise.
bool Expr::isOBJCGCCandidate(ASTContext &Ctx) const {
@@ -1596,7 +1251,7 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
if (CastExpr *P = dyn_cast<CastExpr>(E)) {
// We ignore integer <-> casts that are of the same width, ptr<->ptr and
- // ptr<->int casts of the same width. We also ignore all identify casts.
+ // ptr<->int casts of the same width. We also ignore all identity casts.
Expr *SE = P->getSubExpr();
if (Ctx.hasSameUnqualifiedType(E->getType(), SE->getType())) {
@@ -1604,8 +1259,10 @@ Expr *Expr::IgnoreParenNoopCasts(ASTContext &Ctx) {
continue;
}
- if ((E->getType()->isPointerType() || E->getType()->isIntegralType()) &&
- (SE->getType()->isPointerType() || SE->getType()->isIntegralType()) &&
+ if ((E->getType()->isPointerType() ||
+ E->getType()->isIntegralType(Ctx)) &&
+ (SE->getType()->isPointerType() ||
+ SE->getType()->isIntegralType(Ctx)) &&
Ctx.getTypeSize(E->getType()) == Ctx.getTypeSize(SE->getType())) {
E = SE;
continue;
@@ -1795,7 +1452,7 @@ bool Expr::isNullPointerConstant(ASTContext &Ctx,
// If the unthinkable happens, fall through to the safest alternative.
case NPC_ValueDependentIsNull:
- return isTypeDependent() || getType()->isIntegralType();
+ return isTypeDependent() || getType()->isIntegralType(Ctx);
case NPC_ValueDependentIsNotNull:
return false;
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index d1a2b261f26b..c2548eca659e 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -74,27 +74,27 @@ Stmt::child_iterator CXXDefaultArgExpr::child_end() {
return child_iterator();
}
-// CXXZeroInitValueExpr
-Stmt::child_iterator CXXZeroInitValueExpr::child_begin() {
+// CXXScalarValueInitExpr
+Stmt::child_iterator CXXScalarValueInitExpr::child_begin() {
return child_iterator();
}
-Stmt::child_iterator CXXZeroInitValueExpr::child_end() {
+Stmt::child_iterator CXXScalarValueInitExpr::child_end() {
return child_iterator();
}
// CXXNewExpr
CXXNewExpr::CXXNewExpr(ASTContext &C, bool globalNew, FunctionDecl *operatorNew,
Expr **placementArgs, unsigned numPlaceArgs,
- bool parenTypeId, Expr *arraySize,
+ SourceRange TypeIdParens, Expr *arraySize,
CXXConstructorDecl *constructor, bool initializer,
Expr **constructorArgs, unsigned numConsArgs,
FunctionDecl *operatorDelete, QualType ty,
SourceLocation startLoc, SourceLocation endLoc)
: Expr(CXXNewExprClass, ty, ty->isDependentType(), ty->isDependentType()),
- GlobalNew(globalNew), ParenTypeId(parenTypeId),
+ GlobalNew(globalNew),
Initializer(initializer), SubExprs(0), OperatorNew(operatorNew),
OperatorDelete(operatorDelete), Constructor(constructor),
- StartLoc(startLoc), EndLoc(endLoc) {
+ TypeIdParens(TypeIdParens), StartLoc(startLoc), EndLoc(endLoc) {
AllocateArgsArray(C, arraySize != 0, numPlaceArgs, numConsArgs);
unsigned i = 0;
@@ -190,6 +190,18 @@ UnresolvedLookupExpr::Create(ASTContext &C, bool Dependent,
return ULE;
}
+UnresolvedLookupExpr *
+UnresolvedLookupExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedLookupExpr);
+ if (NumTemplateArgs != 0)
+ size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignof<UnresolvedLookupExpr>());
+ UnresolvedLookupExpr *E = new (Mem) UnresolvedLookupExpr(EmptyShell());
+ E->HasExplicitTemplateArgs = NumTemplateArgs != 0;
+ return E;
+}
+
OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T,
bool Dependent, NestedNameSpecifier *Qualifier,
SourceRange QRange, DeclarationName Name,
@@ -197,19 +209,28 @@ OverloadExpr::OverloadExpr(StmtClass K, ASTContext &C, QualType T,
UnresolvedSetIterator Begin,
UnresolvedSetIterator End)
: Expr(K, T, Dependent, Dependent),
- Results(0), NumResults(End - Begin), Name(Name), Qualifier(Qualifier),
+ Results(0), NumResults(0), Name(Name), Qualifier(Qualifier),
QualifierRange(QRange), NameLoc(NameLoc),
HasExplicitTemplateArgs(HasTemplateArgs)
{
+ initializeResults(C, Begin, End);
+}
+
+void OverloadExpr::initializeResults(ASTContext &C,
+ UnresolvedSetIterator Begin,
+ UnresolvedSetIterator End) {
+ assert(Results == 0 && "Results already initialized!");
+ NumResults = End - Begin;
if (NumResults) {
Results = static_cast<DeclAccessPair *>(
C.Allocate(sizeof(DeclAccessPair) * NumResults,
llvm::alignof<DeclAccessPair>()));
memcpy(Results, &*Begin.getIterator(),
- (End - Begin) * sizeof(DeclAccessPair));
+ NumResults * sizeof(DeclAccessPair));
}
}
+
bool OverloadExpr::ComputeDependence(UnresolvedSetIterator Begin,
UnresolvedSetIterator End,
const TemplateArgumentListInfo *Args) {
@@ -269,6 +290,19 @@ DependentScopeDeclRefExpr::Create(ASTContext &C,
return DRE;
}
+DependentScopeDeclRefExpr *
+DependentScopeDeclRefExpr::CreateEmpty(ASTContext &C,
+ unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(DependentScopeDeclRefExpr);
+ if (NumTemplateArgs)
+ size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size);
+
+ return new (Mem) DependentScopeDeclRefExpr(QualType(), 0, SourceRange(),
+ DeclarationName(),SourceLocation(),
+ NumTemplateArgs != 0);
+}
+
StmtIterator DependentScopeDeclRefExpr::child_begin() {
return child_iterator();
}
@@ -535,14 +569,6 @@ CXXConstructExpr::CXXConstructExpr(ASTContext &C, StmtClass SC, QualType T,
}
}
-CXXConstructExpr::CXXConstructExpr(EmptyShell Empty, ASTContext &C,
- unsigned numargs)
- : Expr(CXXConstructExprClass, Empty), Args(0), NumArgs(numargs)
-{
- if (NumArgs)
- Args = new (C) Stmt*[NumArgs];
-}
-
void CXXConstructExpr::DoDestroy(ASTContext &C) {
DestroyChildren(C);
if (Args)
@@ -656,6 +682,14 @@ CXXUnresolvedConstructExpr::Create(ASTContext &C,
Args, NumArgs, RParenLoc);
}
+CXXUnresolvedConstructExpr *
+CXXUnresolvedConstructExpr::CreateEmpty(ASTContext &C, unsigned NumArgs) {
+ Stmt::EmptyShell Empty;
+ void *Mem = C.Allocate(sizeof(CXXUnresolvedConstructExpr) +
+ sizeof(Expr *) * NumArgs);
+ return new (Mem) CXXUnresolvedConstructExpr(Empty, NumArgs);
+}
+
Stmt::child_iterator CXXUnresolvedConstructExpr::child_begin() {
return child_iterator(reinterpret_cast<Stmt **>(this + 1));
}
@@ -714,6 +748,29 @@ CXXDependentScopeMemberExpr::Create(ASTContext &C,
Member, MemberLoc, TemplateArgs);
}
+CXXDependentScopeMemberExpr *
+CXXDependentScopeMemberExpr::CreateEmpty(ASTContext &C,
+ unsigned NumTemplateArgs) {
+ if (NumTemplateArgs == 0)
+ return new (C) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(), 0,
+ SourceRange(), 0,
+ DeclarationName(),
+ SourceLocation());
+
+ std::size_t size = sizeof(CXXDependentScopeMemberExpr) +
+ ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+ void *Mem = C.Allocate(size, llvm::alignof<CXXDependentScopeMemberExpr>());
+ CXXDependentScopeMemberExpr *E
+ = new (Mem) CXXDependentScopeMemberExpr(C, 0, QualType(),
+ 0, SourceLocation(), 0,
+ SourceRange(), 0,
+ DeclarationName(),
+ SourceLocation(), 0);
+ E->HasExplicitTemplateArgs = true;
+ return E;
+}
+
Stmt::child_iterator CXXDependentScopeMemberExpr::child_begin() {
return child_iterator(&Base);
}
@@ -770,6 +827,18 @@ UnresolvedMemberExpr::Create(ASTContext &C, bool Dependent,
Member, MemberLoc, TemplateArgs, Begin, End);
}
+UnresolvedMemberExpr *
+UnresolvedMemberExpr::CreateEmpty(ASTContext &C, unsigned NumTemplateArgs) {
+ std::size_t size = sizeof(UnresolvedMemberExpr);
+ if (NumTemplateArgs != 0)
+ size += ExplicitTemplateArgumentList::sizeFor(NumTemplateArgs);
+
+ void *Mem = C.Allocate(size, llvm::alignof<UnresolvedMemberExpr>());
+ UnresolvedMemberExpr *E = new (Mem) UnresolvedMemberExpr(EmptyShell());
+ E->HasExplicitTemplateArgs = NumTemplateArgs != 0;
+ return E;
+}
+
CXXRecordDecl *UnresolvedMemberExpr::getNamingClass() const {
// Unlike for UnresolvedLookupExpr, it is very easy to re-derive this.
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
new file mode 100644
index 000000000000..60ac347c50fb
--- /dev/null
+++ b/lib/AST/ExprClassification.cpp
@@ -0,0 +1,471 @@
+//===--- ExprClassification.cpp - Expression AST Node Implementation ------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements Expr::classify.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Support/ErrorHandling.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ExprObjC.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
+using namespace clang;
+
+typedef Expr::Classification Cl;
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E);
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D);
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T);
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E);
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E);
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const ConditionalOperator *E);
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc);
+
+Cl Expr::ClassifyImpl(ASTContext &Ctx, SourceLocation *Loc) const {
+ assert(!TR->isReferenceType() && "Expressions can't have reference type.");
+
+ Cl::Kinds kind = ClassifyInternal(Ctx, this);
+ // C99 6.3.2.1: An lvalue is an expression with an object type or an
+ // incomplete type other than void.
+ if (!Ctx.getLangOptions().CPlusPlus) {
+ // Thus, no functions.
+ if (TR->isFunctionType() || TR == Ctx.OverloadTy)
+ kind = Cl::CL_Function;
+ // No void either, but qualified void is OK because it is "other than void".
+ else if (TR->isVoidType() && !Ctx.getCanonicalType(TR).hasQualifiers())
+ kind = Cl::CL_Void;
+ }
+
+ Cl::ModifiableType modifiable = Cl::CM_Untested;
+ if (Loc)
+ modifiable = IsModifiable(Ctx, this, kind, *Loc);
+ return Classification(kind, modifiable);
+}
+
+static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
+ // This function takes the first stab at classifying expressions.
+ const LangOptions &Lang = Ctx.getLangOptions();
+
+ switch (E->getStmtClass()) {
+ // First come the expressions that are always lvalues, unconditionally.
+
+ case Expr::ObjCIsaExprClass:
+ // C++ [expr.prim.general]p1: A string literal is an lvalue.
+ case Expr::StringLiteralClass:
+ // @encode is equivalent to its string
+ case Expr::ObjCEncodeExprClass:
+ // __func__ and friends are too.
+ case Expr::PredefinedExprClass:
+ // Property references are lvalues
+ case Expr::ObjCPropertyRefExprClass:
+ case Expr::ObjCImplicitSetterGetterRefExprClass:
+ // C++ [expr.typeid]p1: The result of a typeid expression is an lvalue of...
+ case Expr::CXXTypeidExprClass:
+ // Unresolved lookups get classified as lvalues.
+ // FIXME: Is this wise? Should they get their own kind?
+ case Expr::UnresolvedLookupExprClass:
+ case Expr::UnresolvedMemberExprClass:
+ // ObjC instance variables are lvalues
+ // FIXME: ObjC++0x might have different rules
+ case Expr::ObjCIvarRefExprClass:
+ // C99 6.5.2.5p5 says that compound literals are lvalues.
+ // FIXME: C++ might have a different opinion.
+ case Expr::CompoundLiteralExprClass:
+ return Cl::CL_LValue;
+
+ // Next come the complicated cases.
+
+ // C++ [expr.sub]p1: The result is an lvalue of type "T".
+ // However, subscripting vector types is more like member access.
+ case Expr::ArraySubscriptExprClass:
+ if (cast<ArraySubscriptExpr>(E)->getBase()->getType()->isVectorType())
+ return ClassifyInternal(Ctx, cast<ArraySubscriptExpr>(E)->getBase());
+ return Cl::CL_LValue;
+
+ // C++ [expr.prim.general]p3: The result is an lvalue if the entity is a
+ // function or variable and a prvalue otherwise.
+ case Expr::DeclRefExprClass:
+ return ClassifyDecl(Ctx, cast<DeclRefExpr>(E)->getDecl());
+ // We deal with names referenced from blocks the same way.
+ case Expr::BlockDeclRefExprClass:
+ return ClassifyDecl(Ctx, cast<BlockDeclRefExpr>(E)->getDecl());
+
+ // Member access is complex.
+ case Expr::MemberExprClass:
+ return ClassifyMemberExpr(Ctx, cast<MemberExpr>(E));
+
+ case Expr::UnaryOperatorClass:
+ switch (cast<UnaryOperator>(E)->getOpcode()) {
+ // C++ [expr.unary.op]p1: The unary * operator performs indirection:
+ // [...] the result is an lvalue referring to the object or function
+ // to which the expression points.
+ case UnaryOperator::Deref:
+ return Cl::CL_LValue;
+
+ // GNU extensions, simply look through them.
+ case UnaryOperator::Real:
+ case UnaryOperator::Imag:
+ case UnaryOperator::Extension:
+ return ClassifyInternal(Ctx, cast<UnaryOperator>(E)->getSubExpr());
+
+ // C++ [expr.pre.incr]p1: The result is the updated operand; it is an
+ // lvalue, [...]
+ // Not so in C.
+ case UnaryOperator::PreInc:
+ case UnaryOperator::PreDec:
+ return Lang.CPlusPlus ? Cl::CL_LValue : Cl::CL_PRValue;
+
+ default:
+ return Cl::CL_PRValue;
+ }
+
+ // Implicit casts are lvalues if they're lvalue casts. Other than that, we
+ // only specifically record class temporaries.
+ case Expr::ImplicitCastExprClass:
+ if (cast<ImplicitCastExpr>(E)->isLvalueCast())
+ return Cl::CL_LValue;
+ return Lang.CPlusPlus && E->getType()->isRecordType() ?
+ Cl::CL_ClassTemporary : Cl::CL_PRValue;
+
+ // C++ [expr.prim.general]p4: The presence of parentheses does not affect
+ // whether the expression is an lvalue.
+ case Expr::ParenExprClass:
+ return ClassifyInternal(Ctx, cast<ParenExpr>(E)->getSubExpr());
+
+ case Expr::BinaryOperatorClass:
+ case Expr::CompoundAssignOperatorClass:
+ // C doesn't have any binary expressions that are lvalues.
+ if (Lang.CPlusPlus)
+ return ClassifyBinaryOp(Ctx, cast<BinaryOperator>(E));
+ return Cl::CL_PRValue;
+
+ case Expr::CallExprClass:
+ case Expr::CXXOperatorCallExprClass:
+ case Expr::CXXMemberCallExprClass:
+ return ClassifyUnnamed(Ctx, cast<CallExpr>(E)->getCallReturnType());
+
+ // __builtin_choose_expr is equivalent to the chosen expression.
+ case Expr::ChooseExprClass:
+ return ClassifyInternal(Ctx, cast<ChooseExpr>(E)->getChosenSubExpr(Ctx));
+
+ // Extended vector element access is an lvalue unless there are duplicates
+ // in the shuffle expression.
+ case Expr::ExtVectorElementExprClass:
+ return cast<ExtVectorElementExpr>(E)->containsDuplicateElements() ?
+ Cl::CL_DuplicateVectorComponents : Cl::CL_LValue;
+
+ // Simply look at the actual default argument.
+ case Expr::CXXDefaultArgExprClass:
+ return ClassifyInternal(Ctx, cast<CXXDefaultArgExpr>(E)->getExpr());
+
+ // Same idea for temporary binding.
+ case Expr::CXXBindTemporaryExprClass:
+ return ClassifyInternal(Ctx, cast<CXXBindTemporaryExpr>(E)->getSubExpr());
+
+ // And the temporary lifetime guard.
+ case Expr::CXXExprWithTemporariesClass:
+ return ClassifyInternal(Ctx, cast<CXXExprWithTemporaries>(E)->getSubExpr());
+
+ // Casts depend completely on the target type. All casts work the same.
+ case Expr::CStyleCastExprClass:
+ case Expr::CXXFunctionalCastExprClass:
+ case Expr::CXXStaticCastExprClass:
+ case Expr::CXXDynamicCastExprClass:
+ case Expr::CXXReinterpretCastExprClass:
+ case Expr::CXXConstCastExprClass:
+ // Only in C++ can casts be interesting at all.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyUnnamed(Ctx, cast<ExplicitCastExpr>(E)->getTypeAsWritten());
+
+ case Expr::ConditionalOperatorClass:
+ // Once again, only C++ is interesting.
+ if (!Lang.CPlusPlus) return Cl::CL_PRValue;
+ return ClassifyConditional(Ctx, cast<ConditionalOperator>(E));
+
+ // ObjC message sends are effectively function calls, if the target function
+ // is known.
+ case Expr::ObjCMessageExprClass:
+ if (const ObjCMethodDecl *Method =
+ cast<ObjCMessageExpr>(E)->getMethodDecl()) {
+ return ClassifyUnnamed(Ctx, Method->getResultType());
+ }
+
+ // Some C++ expressions are always class temporaries.
+ case Expr::CXXConstructExprClass:
+ case Expr::CXXTemporaryObjectExprClass:
+ case Expr::CXXScalarValueInitExprClass:
+ return Cl::CL_ClassTemporary;
+
+ // Everything we haven't handled is a prvalue.
+ default:
+ return Cl::CL_PRValue;
+ }
+}
+
+/// ClassifyDecl - Return the classification of an expression referencing the
+/// given declaration.
+static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
+ // C++ [expr.prim.general]p6: The result is an lvalue if the entity is a
+ // function, variable, or data member and a prvalue otherwise.
+ // In C, functions are not lvalues.
+ // In addition, NonTypeTemplateParmDecl derives from VarDecl but isn't an
+ // lvalue unless it's a reference type (C++ [temp.param]p6), so we need to
+ // special-case this.
+ bool islvalue;
+ if (const NonTypeTemplateParmDecl *NTTParm =
+ dyn_cast<NonTypeTemplateParmDecl>(D))
+ islvalue = NTTParm->getType()->isReferenceType();
+ else
+ islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
+ (Ctx.getLangOptions().CPlusPlus &&
+ (isa<FunctionDecl>(D) || isa<FunctionTemplateDecl>(D)));
+
+ return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
+}
+
+/// ClassifyUnnamed - Return the classification of an expression yielding an
+/// unnamed value of the given type. This applies in particular to function
+/// calls and casts.
+static Cl::Kinds ClassifyUnnamed(ASTContext &Ctx, QualType T) {
+ // In C, function calls are always rvalues.
+ if (!Ctx.getLangOptions().CPlusPlus) return Cl::CL_PRValue;
+
+ // C++ [expr.call]p10: A function call is an lvalue if the result type is an
+ // lvalue reference type or an rvalue reference to function type, an xvalue
+ // if the result type is an rvalue refernence to object type, and a prvalue
+ // otherwise.
+ if (T->isLValueReferenceType())
+ return Cl::CL_LValue;
+ const RValueReferenceType *RV = T->getAs<RValueReferenceType>();
+ if (!RV) // Could still be a class temporary, though.
+ return T->isRecordType() ? Cl::CL_ClassTemporary : Cl::CL_PRValue;
+
+ return RV->getPointeeType()->isFunctionType() ? Cl::CL_LValue : Cl::CL_XValue;
+}
+
+static Cl::Kinds ClassifyMemberExpr(ASTContext &Ctx, const MemberExpr *E) {
+ // Handle C first, it's easier.
+ if (!Ctx.getLangOptions().CPlusPlus) {
+ // C99 6.5.2.3p3
+ // For dot access, the expression is an lvalue if the first part is. For
+ // arrow access, it always is an lvalue.
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ // ObjC property accesses are not lvalues, but get special treatment.
+ Expr *Base = E->getBase();
+ if (isa<ObjCPropertyRefExpr>(Base) ||
+ isa<ObjCImplicitSetterGetterRefExpr>(Base))
+ return Cl::CL_SubObjCPropertySetting;
+ return ClassifyInternal(Ctx, Base);
+ }
+
+ NamedDecl *Member = E->getMemberDecl();
+ // C++ [expr.ref]p3: E1->E2 is converted to the equivalent form (*(E1)).E2.
+ // C++ [expr.ref]p4: If E2 is declared to have type "reference to T", then
+ // E1.E2 is an lvalue.
+ if (ValueDecl *Value = dyn_cast<ValueDecl>(Member))
+ if (Value->getType()->isReferenceType())
+ return Cl::CL_LValue;
+
+ // Otherwise, one of the following rules applies.
+ // -- If E2 is a static member [...] then E1.E2 is an lvalue.
+ if (isa<VarDecl>(Member) && Member->getDeclContext()->isRecord())
+ return Cl::CL_LValue;
+
+ // -- If E2 is a non-static data member [...]. If E1 is an lvalue, then
+ // E1.E2 is an lvalue; if E1 is an xvalue, then E1.E2 is an xvalue;
+ // otherwise, it is a prvalue.
+ if (isa<FieldDecl>(Member)) {
+ // *E1 is an lvalue
+ if (E->isArrow())
+ return Cl::CL_LValue;
+ return ClassifyInternal(Ctx, E->getBase());
+ }
+
+ // -- If E2 is a [...] member function, [...]
+ // -- If it refers to a static member function [...], then E1.E2 is an
+ // lvalue; [...]
+ // -- Otherwise [...] E1.E2 is a prvalue.
+ if (CXXMethodDecl *Method = dyn_cast<CXXMethodDecl>(Member))
+ return Method->isStatic() ? Cl::CL_LValue : Cl::CL_MemberFunction;
+
+ // -- If E2 is a member enumerator [...], the expression E1.E2 is a prvalue.
+ // So is everything else we haven't handled yet.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyBinaryOp(ASTContext &Ctx, const BinaryOperator *E) {
+ assert(Ctx.getLangOptions().CPlusPlus &&
+ "This is only relevant for C++.");
+ // C++ [expr.ass]p1: All [...] return an lvalue referring to the left operand.
+ if (E->isAssignmentOp())
+ return Cl::CL_LValue;
+
+ // C++ [expr.comma]p1: the result is of the same value category as its right
+ // operand, [...].
+ if (E->getOpcode() == BinaryOperator::Comma)
+ return ClassifyInternal(Ctx, E->getRHS());
+
+ // C++ [expr.mptr.oper]p6: The result of a .* expression whose second operand
+ // is a pointer to a data member is of the same value category as its first
+ // operand.
+ if (E->getOpcode() == BinaryOperator::PtrMemD)
+ return E->getType()->isFunctionType() ? Cl::CL_MemberFunction :
+ ClassifyInternal(Ctx, E->getLHS());
+
+ // C++ [expr.mptr.oper]p6: The result of an ->* expression is an lvalue if its
+ // second operand is a pointer to data member and a prvalue otherwise.
+ if (E->getOpcode() == BinaryOperator::PtrMemI)
+ return E->getType()->isFunctionType() ?
+ Cl::CL_MemberFunction : Cl::CL_LValue;
+
+ // All other binary operations are prvalues.
+ return Cl::CL_PRValue;
+}
+
+static Cl::Kinds ClassifyConditional(ASTContext &Ctx,
+ const ConditionalOperator *E) {
+ assert(Ctx.getLangOptions().CPlusPlus &&
+ "This is only relevant for C++.");
+
+ Expr *True = E->getTrueExpr();
+ Expr *False = E->getFalseExpr();
+ // C++ [expr.cond]p2
+ // If either the second or the third operand has type (cv) void, [...]
+ // the result [...] is a prvalue.
+ if (True->getType()->isVoidType() || False->getType()->isVoidType())
+ return Cl::CL_PRValue;
+
+ // Note that at this point, we have already performed all conversions
+ // according to [expr.cond]p3.
+ // C++ [expr.cond]p4: If the second and third operands are glvalues of the
+ // same value category [...], the result is of that [...] value category.
+ // C++ [expr.cond]p5: Otherwise, the result is a prvalue.
+ Cl::Kinds LCl = ClassifyInternal(Ctx, True),
+ RCl = ClassifyInternal(Ctx, False);
+ return LCl == RCl ? LCl : Cl::CL_PRValue;
+}
+
+static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
+ Cl::Kinds Kind, SourceLocation &Loc) {
+ // As a general rule, we only care about lvalues. But there are some rvalues
+ // for which we want to generate special results.
+ if (Kind == Cl::CL_PRValue) {
+ // For the sake of better diagnostics, we want to specifically recognize
+ // use of the GCC cast-as-lvalue extension.
+ if (const CStyleCastExpr *CE = dyn_cast<CStyleCastExpr>(E->IgnoreParens())){
+ if (CE->getSubExpr()->Classify(Ctx).isLValue()) {
+ Loc = CE->getLParenLoc();
+ return Cl::CM_LValueCast;
+ }
+ }
+ }
+ if (Kind != Cl::CL_LValue)
+ return Cl::CM_RValue;
+
+ // This is the lvalue case.
+ // Functions are lvalues in C++, but not modifiable. (C++ [basic.lval]p6)
+ if (Ctx.getLangOptions().CPlusPlus && E->getType()->isFunctionType())
+ return Cl::CM_Function;
+
+ // You cannot assign to a variable outside a block from within the block if
+ // it is not marked __block, e.g.
+ // void takeclosure(void (^C)(void));
+ // void func() { int x = 1; takeclosure(^{ x = 7; }); }
+ if (const BlockDeclRefExpr *BDR = dyn_cast<BlockDeclRefExpr>(E)) {
+ if (!BDR->isByRef() && isa<VarDecl>(BDR->getDecl()))
+ return Cl::CM_NotBlockQualified;
+ }
+
+ // Assignment to a property in ObjC is an implicit setter access. But a
+ // setter might not exist.
+ if (const ObjCImplicitSetterGetterRefExpr *Expr =
+ dyn_cast<ObjCImplicitSetterGetterRefExpr>(E)) {
+ if (Expr->getSetterMethod() == 0)
+ return Cl::CM_NoSetterProperty;
+ }
+
+ CanQualType CT = Ctx.getCanonicalType(E->getType());
+ // Const stuff is obviously not modifiable.
+ if (CT.isConstQualified())
+ return Cl::CM_ConstQualified;
+ // Arrays are not modifiable, only their elements are.
+ if (CT->isArrayType())
+ return Cl::CM_ArrayType;
+ // Incomplete types are not modifiable.
+ if (CT->isIncompleteType())
+ return Cl::CM_IncompleteType;
+
+ // Records with any const fields (recursively) are not modifiable.
+ if (const RecordType *R = CT->getAs<RecordType>()) {
+ assert(!Ctx.getLangOptions().CPlusPlus &&
+ "C++ struct assignment should be resolved by the "
+ "copy assignment operator.");
+ if (R->hasConstFields())
+ return Cl::CM_ConstQualified;
+ }
+
+ return Cl::CM_Modifiable;
+}
+
+Expr::isLvalueResult Expr::isLvalue(ASTContext &Ctx) const {
+ Classification VC = Classify(Ctx);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: return LV_Valid;
+ case Cl::CL_XValue: return LV_InvalidExpression;
+ case Cl::CL_Function: return LV_NotObjectType;
+ case Cl::CL_Void: return LV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return LV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return LV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return LV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return LV_ClassTemporary;
+ case Cl::CL_PRValue: return LV_InvalidExpression;
+ }
+ llvm_unreachable("Unhandled kind");
+}
+
+Expr::isModifiableLvalueResult
+Expr::isModifiableLvalue(ASTContext &Ctx, SourceLocation *Loc) const {
+ SourceLocation dummy;
+ Classification VC = ClassifyModifiable(Ctx, Loc ? *Loc : dummy);
+ switch (VC.getKind()) {
+ case Cl::CL_LValue: break;
+ case Cl::CL_XValue: return MLV_InvalidExpression;
+ case Cl::CL_Function: return MLV_NotObjectType;
+ case Cl::CL_Void: return MLV_IncompleteVoidType;
+ case Cl::CL_DuplicateVectorComponents: return MLV_DuplicateVectorComponents;
+ case Cl::CL_MemberFunction: return MLV_MemberFunction;
+ case Cl::CL_SubObjCPropertySetting: return MLV_SubObjCPropertySetting;
+ case Cl::CL_ClassTemporary: return MLV_ClassTemporary;
+ case Cl::CL_PRValue:
+ return VC.getModifiable() == Cl::CM_LValueCast ?
+ MLV_LValueCast : MLV_InvalidExpression;
+ }
+ assert(VC.getKind() == Cl::CL_LValue && "Unhandled kind");
+ switch (VC.getModifiable()) {
+ case Cl::CM_Untested: llvm_unreachable("Did not test modifiability");
+ case Cl::CM_Modifiable: return MLV_Valid;
+ case Cl::CM_RValue: llvm_unreachable("CM_RValue and CL_LValue don't match");
+ case Cl::CM_Function: return MLV_NotObjectType;
+ case Cl::CM_LValueCast:
+ llvm_unreachable("CM_LValueCast and CL_LValue don't match");
+ case Cl::CM_NotBlockQualified: return MLV_NotBlockQualified;
+ case Cl::CM_NoSetterProperty: return MLV_NoSetterProperty;
+ case Cl::CM_ConstQualified: return MLV_ConstQualified;
+ case Cl::CM_ArrayType: return MLV_ArrayType;
+ case Cl::CM_IncompleteType: return MLV_IncompleteType;
+ }
+ llvm_unreachable("Unhandled modifiable type");
+}
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index dc614018ec2b..a963182ae88b 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -157,7 +157,7 @@ static bool EvalPointerValueAsBool(LValue& Value, bool& Result) {
static bool HandleConversionToBool(const Expr* E, bool& Result,
EvalInfo &Info) {
- if (E->getType()->isIntegralType()) {
+ if (E->getType()->isIntegralOrEnumerationType()) {
APSInt IntResult;
if (!EvaluateInteger(E, IntResult, Info))
return false;
@@ -542,7 +542,7 @@ bool PointerExprEvaluator::VisitCastExpr(CastExpr* E) {
SubExpr->getType()->isBlockPointerType())
return Visit(SubExpr);
- if (SubExpr->getType()->isIntegralType()) {
+ if (SubExpr->getType()->isIntegralOrEnumerationType()) {
APValue Value;
if (!EvaluateIntegerOrLValue(SubExpr, Value, Info))
break;
@@ -746,25 +746,46 @@ VectorExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
QualType EltTy = VT->getElementType();
llvm::SmallVector<APValue, 4> Elements;
- for (unsigned i = 0; i < NumElements; i++) {
+ // If a vector is initialized with a single element, that value
+ // becomes every element of the vector, not just the first.
+ // This is the behavior described in the IBM AltiVec documentation.
+ if (NumInits == 1) {
+ APValue InitValue;
if (EltTy->isIntegerType()) {
llvm::APSInt sInt(32);
- if (i < NumInits) {
- if (!EvaluateInteger(E->getInit(i), sInt, Info))
- return APValue();
- } else {
- sInt = Info.Ctx.MakeIntValue(0, EltTy);
- }
- Elements.push_back(APValue(sInt));
+ if (!EvaluateInteger(E->getInit(0), sInt, Info))
+ return APValue();
+ InitValue = APValue(sInt);
} else {
llvm::APFloat f(0.0);
- if (i < NumInits) {
- if (!EvaluateFloat(E->getInit(i), f, Info))
- return APValue();
+ if (!EvaluateFloat(E->getInit(0), f, Info))
+ return APValue();
+ InitValue = APValue(f);
+ }
+ for (unsigned i = 0; i < NumElements; i++) {
+ Elements.push_back(InitValue);
+ }
+ } else {
+ for (unsigned i = 0; i < NumElements; i++) {
+ if (EltTy->isIntegerType()) {
+ llvm::APSInt sInt(32);
+ if (i < NumInits) {
+ if (!EvaluateInteger(E->getInit(i), sInt, Info))
+ return APValue();
+ } else {
+ sInt = Info.Ctx.MakeIntValue(0, EltTy);
+ }
+ Elements.push_back(APValue(sInt));
} else {
- f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ llvm::APFloat f(0.0);
+ if (i < NumInits) {
+ if (!EvaluateFloat(E->getInit(i), f, Info))
+ return APValue();
+ } else {
+ f = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(EltTy));
+ }
+ Elements.push_back(APValue(f));
}
- Elements.push_back(APValue(f));
}
}
return APValue(&Elements[0], Elements.size());
@@ -818,7 +839,8 @@ public:
: Info(info), Result(result) {}
bool Success(const llvm::APSInt &SI, const Expr *E) {
- assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
assert(SI.isSigned() == E->getType()->isSignedIntegerType() &&
"Invalid evaluation result.");
assert(SI.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
@@ -828,7 +850,8 @@ public:
}
bool Success(const llvm::APInt &I, const Expr *E) {
- assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
assert(I.getBitWidth() == Info.Ctx.getIntWidth(E->getType()) &&
"Invalid evaluation result.");
Result = APValue(APSInt(I));
@@ -837,7 +860,8 @@ public:
}
bool Success(uint64_t Value, const Expr *E) {
- assert(E->getType()->isIntegralType() && "Invalid evaluation result.");
+ assert(E->getType()->isIntegralOrEnumerationType() &&
+ "Invalid evaluation result.");
Result = APValue(Info.Ctx.MakeIntValue(Value, E->getType()));
return true;
}
@@ -914,7 +938,7 @@ public:
return Success(0, E);
}
- bool VisitCXXZeroInitValueExpr(const CXXZeroInitValueExpr *E) {
+ bool VisitCXXScalarValueInitExpr(const CXXScalarValueInitExpr *E) {
return Success(0, E);
}
@@ -943,12 +967,12 @@ private:
} // end anonymous namespace
static bool EvaluateIntegerOrLValue(const Expr* E, APValue &Result, EvalInfo &Info) {
- assert(E->getType()->isIntegralType());
+ assert(E->getType()->isIntegralOrEnumerationType());
return IntExprEvaluator(Info, Result).Visit(const_cast<Expr*>(E));
}
static bool EvaluateInteger(const Expr* E, APSInt &Result, EvalInfo &Info) {
- assert(E->getType()->isIntegralType());
+ assert(E->getType()->isIntegralOrEnumerationType());
APValue Val;
if (!EvaluateIntegerOrLValue(E, Val, Info) || !Val.isInt())
@@ -1314,8 +1338,8 @@ bool IntExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
return Success(Result, E);
}
}
- if (!LHSTy->isIntegralType() ||
- !RHSTy->isIntegralType()) {
+ if (!LHSTy->isIntegralOrEnumerationType() ||
+ !RHSTy->isIntegralOrEnumerationType()) {
// We can't continue from here for non-integral types, and they
// could potentially confuse the following operations.
return false;
@@ -1570,7 +1594,7 @@ bool IntExprEvaluator::VisitUnaryOperator(const UnaryOperator *E) {
}
// Only handle integral operations...
- if (!E->getSubExpr()->getType()->isIntegralType())
+ if (!E->getSubExpr()->getType()->isIntegralOrEnumerationType())
return false;
// Get the operand value into 'Result'.
@@ -1613,7 +1637,7 @@ bool IntExprEvaluator::VisitCastExpr(CastExpr *E) {
}
// Handle simple integer->integer casts.
- if (SrcType->isIntegralType()) {
+ if (SrcType->isIntegralOrEnumerationType()) {
if (!Visit(SubExpr))
return false;
@@ -1732,7 +1756,7 @@ public:
bool VisitBinaryOperator(const BinaryOperator *E);
bool VisitFloatingLiteral(const FloatingLiteral *E);
bool VisitCastExpr(CastExpr *E);
- bool VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E);
+ bool VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E);
bool VisitConditionalOperator(ConditionalOperator *E);
bool VisitChooseExpr(const ChooseExpr *E)
@@ -1908,7 +1932,7 @@ bool FloatExprEvaluator::VisitFloatingLiteral(const FloatingLiteral *E) {
bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) {
Expr* SubExpr = E->getSubExpr();
- if (SubExpr->getType()->isIntegralType()) {
+ if (SubExpr->getType()->isIntegralOrEnumerationType()) {
APSInt IntResult;
if (!EvaluateInteger(SubExpr, IntResult, Info))
return false;
@@ -1928,7 +1952,7 @@ bool FloatExprEvaluator::VisitCastExpr(CastExpr *E) {
return false;
}
-bool FloatExprEvaluator::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *E) {
+bool FloatExprEvaluator::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
Result = APFloat::getZero(Info.Ctx.getFloatTypeSemantics(E->getType()));
return true;
}
@@ -2186,6 +2210,8 @@ bool Expr::Evaluate(EvalResult &Result, ASTContext &Ctx) const {
} else if (E->getType()->isIntegerType()) {
if (!IntExprEvaluator(Info, Info.EvalResult.Val).Visit(const_cast<Expr*>(E)))
return false;
+ if (Result.Val.isLValue() && !IsGlobalLValue(Result.Val.getLValueBase()))
+ return false;
} else if (E->getType()->hasPointerRepresentation()) {
LValue LV;
if (!EvaluatePointer(E, LV, Info))
@@ -2316,7 +2342,7 @@ static ICEDiag CheckEvalInICE(const Expr* E, ASTContext &Ctx) {
static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
assert(!E->isValueDependent() && "Should not see value dependent exprs!");
- if (!E->getType()->isIntegralType()) {
+ if (!E->getType()->isIntegralOrEnumerationType()) {
return ICEDiag(2, E->getLocStart());
}
@@ -2384,7 +2410,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::IntegerLiteralClass:
case Expr::CharacterLiteralClass:
case Expr::CXXBoolLiteralExprClass:
- case Expr::CXXZeroInitValueExprClass:
+ case Expr::CXXScalarValueInitExprClass:
case Expr::TypesCompatibleExprClass:
case Expr::UnaryTypeTraitExprClass:
return NoDiag();
@@ -2579,7 +2605,7 @@ static ICEDiag CheckICE(const Expr* E, ASTContext &Ctx) {
case Expr::CXXReinterpretCastExprClass:
case Expr::CXXConstCastExprClass: {
const Expr *SubExpr = cast<CastExpr>(E)->getSubExpr();
- if (SubExpr->getType()->isIntegralType())
+ if (SubExpr->getType()->isIntegralOrEnumerationType())
return CheckICE(SubExpr, Ctx);
if (isa<FloatingLiteral>(SubExpr->IgnoreParens()))
return NoDiag();
diff --git a/lib/AST/Makefile b/lib/AST/Makefile
index ede25777c90b..7a1672b81728 100644
--- a/lib/AST/Makefile
+++ b/lib/AST/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangAST
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/lib/AST/RecordLayoutBuilder.cpp b/lib/AST/RecordLayoutBuilder.cpp
index 983a2874a735..88d71ce04287 100644
--- a/lib/AST/RecordLayoutBuilder.cpp
+++ b/lib/AST/RecordLayoutBuilder.cpp
@@ -23,6 +23,35 @@ using namespace clang;
namespace {
+/// BaseSubobjectInfo - Represents a single base subobject in a complete class.
+/// For a class hierarchy like
+///
+/// class A { };
+/// class B : A { };
+/// class C : A, B { };
+///
+/// The BaseSubobjectInfo graph for C will have three BaseSubobjectInfo
+/// instances, one for B and two for A.
+///
+/// If a base is virtual, it will only have one BaseSubobjectInfo allocated.
+struct BaseSubobjectInfo {
+ /// Class - The class for this base info.
+ const CXXRecordDecl *Class;
+
+ /// IsVirtual - Whether the BaseInfo represents a virtual base or not.
+ bool IsVirtual;
+
+ /// Bases - Information about the base subobjects.
+ llvm::SmallVector<BaseSubobjectInfo*, 4> Bases;
+
+ /// PrimaryVirtualBaseInfo - Holds the base info for the primary virtual base
+ /// of this base info (if one exists).
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo;
+
+ // FIXME: Document.
+ const BaseSubobjectInfo *Derived;
+};
+
/// EmptySubobjectMap - Keeps track of which empty subobjects exist at different
/// offsets while laying out a C++ class.
class EmptySubobjectMap {
@@ -36,30 +65,41 @@ class EmptySubobjectMap {
typedef llvm::DenseMap<uint64_t, ClassVectorTy> EmptyClassOffsetsMapTy;
EmptyClassOffsetsMapTy EmptyClassOffsets;
+ /// MaxEmptyClassOffset - The highest offset known to contain an empty
+ /// base subobject.
+ uint64_t MaxEmptyClassOffset;
+
/// ComputeEmptySubobjectSizes - Compute the size of the largest base or
/// member subobject that is empty.
void ComputeEmptySubobjectSizes();
+
+ bool CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset) const;
- struct BaseInfo {
- const CXXRecordDecl *Class;
- bool IsVirtual;
-
- const CXXRecordDecl *PrimaryVirtualBase;
-
- llvm::SmallVector<BaseInfo*, 4> Bases;
- const BaseInfo *Derived;
- };
+ void AddSubobjectAtOffset(const CXXRecordDecl *RD, uint64_t Offset);
- llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> VirtualBaseInfo;
- llvm::DenseMap<const CXXRecordDecl *, BaseInfo *> NonVirtualBaseInfo;
+ bool CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
+ uint64_t Offset);
+ void UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ uint64_t Offset, bool PlacingEmptyBase);
- BaseInfo *ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual,
- const BaseInfo *Derived);
- void ComputeBaseInfo();
+ bool CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset) const;
+ bool CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ uint64_t Offset) const;
- bool CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info, uint64_t Offset);
- void UpdateEmptyBaseSubobjects(const BaseInfo *Info, uint64_t Offset);
+ void UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset);
+ void UpdateEmptyFieldSubobjects(const FieldDecl *FD, uint64_t Offset);
+ /// AnyEmptySubobjectsBeyondOffset - Returns whether there are any empty
+ /// subobjects beyond the given offset.
+ bool AnyEmptySubobjectsBeyondOffset(uint64_t Offset) const {
+ return Offset <= MaxEmptyClassOffset;
+ }
+
public:
/// This holds the size of the largest empty subobject (either a base
/// or a member). Will be zero if the record being built doesn't contain
@@ -67,18 +107,21 @@ public:
uint64_t SizeOfLargestEmptySubobject;
EmptySubobjectMap(ASTContext &Context, const CXXRecordDecl *Class)
- : Context(Context), Class(Class), SizeOfLargestEmptySubobject(0) {
+ : Context(Context), Class(Class), MaxEmptyClassOffset(0),
+ SizeOfLargestEmptySubobject(0) {
ComputeEmptySubobjectSizes();
-
- ComputeBaseInfo();
}
/// CanPlaceBaseAtOffset - Return whether the given base class can be placed
/// at the given offset.
/// Returns false if placing the record will result in two components
/// (direct or indirect) of the same type having the same offset.
- bool CanPlaceBaseAtOffset(const CXXRecordDecl *RD, bool BaseIsVirtual,
+ bool CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
uint64_t Offset);
+
+ /// CanPlaceFieldAtOffset - Return whether a field can be placed at the given
+ /// offset.
+ bool CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset);
};
void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
@@ -130,93 +173,67 @@ void EmptySubobjectMap::ComputeEmptySubobjectSizes() {
}
}
-EmptySubobjectMap::BaseInfo *
-EmptySubobjectMap::ComputeBaseInfo(const CXXRecordDecl *RD, bool IsVirtual,
- const BaseInfo *Derived) {
- BaseInfo *Info;
-
- if (IsVirtual) {
- BaseInfo *&InfoSlot = VirtualBaseInfo[RD];
- if (InfoSlot) {
- assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
- return InfoSlot;
- }
+bool
+EmptySubobjectMap::CanPlaceSubobjectAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset) const {
+ // We only need to check empty bases.
+ if (!RD->isEmpty())
+ return true;
- InfoSlot = new (Context) BaseInfo;
- Info = InfoSlot;
- } else {
- Info = new (Context) BaseInfo;
- }
-
- Info->Class = RD;
- Info->IsVirtual = IsVirtual;
- Info->Derived = Derived;
- Info->PrimaryVirtualBase = 0;
+ EmptyClassOffsetsMapTy::const_iterator I = EmptyClassOffsets.find(Offset);
+ if (I == EmptyClassOffsets.end())
+ return true;
- if (RD->getNumVBases()) {
- // Check if this class has a primary virtual base.
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- if (Layout.getPrimaryBaseWasVirtual()) {
- Info->PrimaryVirtualBase = Layout.getPrimaryBase();
- assert(Info->PrimaryVirtualBase &&
- "Didn't have a primary virtual base!");
- }
- }
+ const ClassVectorTy& Classes = I->second;
+ if (std::find(Classes.begin(), Classes.end(), RD) == Classes.end())
+ return true;
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- bool IsVirtual = I->isVirtual();
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- Info->Bases.push_back(ComputeBaseInfo(BaseDecl, IsVirtual, Info));
- }
-
- return Info;
+ // There is already an empty class of the same type at this offset.
+ return false;
}
+
+void EmptySubobjectMap::AddSubobjectAtOffset(const CXXRecordDecl *RD,
+ uint64_t Offset) {
+ // We only care about empty bases.
+ if (!RD->isEmpty())
+ return;
-void EmptySubobjectMap::ComputeBaseInfo() {
- for (CXXRecordDecl::base_class_const_iterator I = Class->bases_begin(),
- E = Class->bases_end(); I != E; ++I) {
- bool IsVirtual = I->isVirtual();
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- BaseInfo *Info = ComputeBaseInfo(BaseDecl, IsVirtual, /*Derived=*/0);
- if (IsVirtual) {
- // ComputeBaseInfo has already added this base for us.
- continue;
- }
+ ClassVectorTy& Classes = EmptyClassOffsets[Offset];
+ assert(std::find(Classes.begin(), Classes.end(), RD) == Classes.end() &&
+ "Duplicate empty class detected!");
- // Add the base info to the map of non-virtual bases.
- assert(!NonVirtualBaseInfo.count(BaseDecl) &&
- "Non-virtual base already exists!");
- NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
- }
+ Classes.push_back(RD);
+
+ // Update the empty class offset.
+ MaxEmptyClassOffset = std::max(MaxEmptyClassOffset, Offset);
}
bool
-EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info,
+EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseSubobjectInfo *Info,
uint64_t Offset) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(Info->Class, Offset))
+ return false;
+
// Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
- BaseInfo* Base = Info->Bases[I];
+ BaseSubobjectInfo* Base = Info->Bases[I];
if (Base->IsVirtual)
continue;
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
if (!CanPlaceBaseSubobjectAtOffset(Base, BaseOffset))
return false;
}
- if (Info->PrimaryVirtualBase) {
- BaseInfo *PrimaryVirtualBaseInfo =
- VirtualBaseInfo.lookup(Info->PrimaryVirtualBase);
- assert(PrimaryVirtualBaseInfo && "Didn't find base info!");
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
if (Info == PrimaryVirtualBaseInfo->Derived) {
if (!CanPlaceBaseSubobjectAtOffset(PrimaryVirtualBaseInfo, Offset))
@@ -224,62 +241,277 @@ EmptySubobjectMap::CanPlaceBaseSubobjectAtOffset(const BaseInfo *Info,
}
}
- // FIXME: Member variables.
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
return true;
}
-void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseInfo *Info,
- uint64_t Offset) {
- if (Info->Class->isEmpty()) {
- // FIXME: Record that there is an empty class at this offset.
+void EmptySubobjectMap::UpdateEmptyBaseSubobjects(const BaseSubobjectInfo *Info,
+ uint64_t Offset,
+ bool PlacingEmptyBase) {
+ if (!PlacingEmptyBase && Offset >= SizeOfLargestEmptySubobject) {
+ // We know that the only empty subobjects that can conflict with empty
+ // subobject of non-empty bases, are empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty base
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ return;
}
-
+
+ AddSubobjectAtOffset(Info->Class, Offset);
+
// Traverse all non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
- BaseInfo* Base = Info->Bases[I];
+ BaseSubobjectInfo* Base = Info->Bases[I];
if (Base->IsVirtual)
continue;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+
uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
-
- UpdateEmptyBaseSubobjects(Base, BaseOffset);
+ UpdateEmptyBaseSubobjects(Base, BaseOffset, PlacingEmptyBase);
}
- if (Info->PrimaryVirtualBase) {
- BaseInfo *PrimaryVirtualBaseInfo =
- VirtualBaseInfo.lookup(Info->PrimaryVirtualBase);
- assert(PrimaryVirtualBaseInfo && "Didn't find base info!");
+ if (Info->PrimaryVirtualBaseInfo) {
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = Info->PrimaryVirtualBaseInfo;
if (Info == PrimaryVirtualBaseInfo->Derived)
- UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset);
+ UpdateEmptyBaseSubobjects(PrimaryVirtualBaseInfo, Offset,
+ PlacingEmptyBase);
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = Info->Class->field_begin(),
+ E = Info->Class->field_end(); I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
}
-
- // FIXME: Member variables.
}
-bool EmptySubobjectMap::CanPlaceBaseAtOffset(const CXXRecordDecl *RD,
- bool BaseIsVirtual,
+bool EmptySubobjectMap::CanPlaceBaseAtOffset(const BaseSubobjectInfo *Info,
uint64_t Offset) {
// If we know this class doesn't have any empty subobjects we don't need to
// bother checking.
if (!SizeOfLargestEmptySubobject)
return true;
- BaseInfo *Info;
+ if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ return false;
+
+ // We are able to place the base at this offset. Make sure to update the
+ // empty base subobject map.
+ UpdateEmptyBaseSubobjects(Info, Offset, Info->Class->isEmpty());
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
+
+ if (!CanPlaceSubobjectAtOffset(RD, Offset))
+ return false;
- if (BaseIsVirtual)
- Info = VirtualBaseInfo.lookup(RD);
- else
- Info = NonVirtualBaseInfo.lookup(RD);
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(BaseDecl, Class, BaseOffset))
+ return false;
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ if (!CanPlaceFieldSubobjectAtOffset(VBaseDecl, Class, VBaseOffset))
+ return false;
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+
+ if (!CanPlaceFieldSubobjectAtOffset(FD, FieldOffset))
+ return false;
+ }
+
+ return true;
+}
+
+bool EmptySubobjectMap::CanPlaceFieldSubobjectAtOffset(const FieldDecl *FD,
+ uint64_t Offset) const {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(Offset))
+ return true;
- if (!CanPlaceBaseSubobjectAtOffset(Info, Offset))
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ return CanPlaceFieldSubobjectAtOffset(RD, RD, Offset);
+ }
+
+ // If we have an array type we need to look at every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return true;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ uint64_t ElementOffset = Offset;
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We don't have to keep looking past the maximum offset that's known to
+ // contain an empty class.
+ if (!AnyEmptySubobjectsBeyondOffset(ElementOffset))
+ return true;
+
+ if (!CanPlaceFieldSubobjectAtOffset(RD, RD, ElementOffset))
+ return false;
+
+ ElementOffset += Layout.getSize();
+ }
+ }
+
+ return true;
+}
+
+bool
+EmptySubobjectMap::CanPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) {
+ if (!CanPlaceFieldSubobjectAtOffset(FD, Offset))
return false;
- UpdateEmptyBaseSubobjects(Info, Offset);
+ // We are able to place the member variable at this offset.
+ // Make sure to update the empty base subobject map.
+ UpdateEmptyFieldSubobjects(FD, Offset);
return true;
}
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Class,
+ uint64_t Offset) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at offset
+ // zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (Offset >= SizeOfLargestEmptySubobject)
+ return;
+
+ AddSubobjectAtOffset(RD, Offset);
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ // Traverse all non-virtual bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ if (I->isVirtual())
+ continue;
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
+ UpdateEmptyFieldSubobjects(BaseDecl, Class, BaseOffset);
+ }
+
+ if (RD == Class) {
+ // This is the most derived class, traverse virtual bases as well.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->vbases_begin(),
+ E = RD->vbases_end(); I != E; ++I) {
+ const CXXRecordDecl *VBaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ uint64_t VBaseOffset = Offset + Layout.getVBaseClassOffset(VBaseDecl);
+ UpdateEmptyFieldSubobjects(VBaseDecl, Class, VBaseOffset);
+ }
+ }
+
+ // Traverse all member variables.
+ unsigned FieldNo = 0;
+ for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
+ I != E; ++I, ++FieldNo) {
+ const FieldDecl *FD = *I;
+
+ uint64_t FieldOffset = Offset + Layout.getFieldOffset(FieldNo);
+
+ UpdateEmptyFieldSubobjects(FD, FieldOffset);
+ }
+}
+
+void EmptySubobjectMap::UpdateEmptyFieldSubobjects(const FieldDecl *FD,
+ uint64_t Offset) {
+ QualType T = FD->getType();
+ if (const RecordType *RT = T->getAs<RecordType>()) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ UpdateEmptyFieldSubobjects(RD, RD, Offset);
+ return;
+ }
+
+ // If we have an array type we need to update every element.
+ if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
+ QualType ElemTy = Context.getBaseElementType(AT);
+ const RecordType *RT = ElemTy->getAs<RecordType>();
+ if (!RT)
+ return;
+
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+
+ uint64_t NumElements = Context.getConstantArrayElementCount(AT);
+ uint64_t ElementOffset = Offset;
+
+ for (uint64_t I = 0; I != NumElements; ++I) {
+ // We know that the only empty subobjects that can conflict with empty
+ // field subobjects are subobjects of empty bases that can be placed at
+ // offset zero. Because of this, we only need to keep track of empty field
+ // subobjects with offsets less than the size of the largest empty
+ // subobject for our class.
+ if (ElementOffset >= SizeOfLargestEmptySubobject)
+ return;
+
+ UpdateEmptyFieldSubobjects(RD, RD, ElementOffset);
+ ElementOffset += Layout.getSize();
+ }
+ }
+}
+
class RecordLayoutBuilder {
// FIXME: Remove this and make the appropriate fields public.
friend class clang::ASTContext;
@@ -346,10 +578,6 @@ class RecordLayoutBuilder {
/// avoid visiting virtual bases more than once.
llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBases;
- /// EmptyClassOffsets - A map from offsets to empty record decls.
- typedef std::multimap<uint64_t, const CXXRecordDecl *> EmptyClassOffsetsTy;
- EmptyClassOffsetsTy EmptyClassOffsets;
-
RecordLayoutBuilder(ASTContext &Context, EmptySubobjectMap *EmptySubobjects)
: Context(Context), EmptySubobjects(EmptySubobjects), Size(0), Alignment(8),
Packed(false), IsUnion(false), IsMac68kAlign(false),
@@ -366,9 +594,29 @@ class RecordLayoutBuilder {
void LayoutWideBitField(uint64_t FieldSize, uint64_t TypeSize);
void LayoutBitField(const FieldDecl *D);
- /// ComputeEmptySubobjectSizes - Compute the size of the largest base or
- /// member subobject that is empty.
- void ComputeEmptySubobjectSizes(const CXXRecordDecl *RD);
+ /// BaseSubobjectInfoAllocator - Allocator for BaseSubobjectInfo objects.
+ llvm::SpecificBumpPtrAllocator<BaseSubobjectInfo> BaseSubobjectInfoAllocator;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, BaseSubobjectInfo *>
+ BaseSubobjectInfoMapTy;
+
+ /// VirtualBaseInfo - Map from all the (direct or indirect) virtual bases
+ /// of the class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy VirtualBaseInfo;
+
+ /// NonVirtualBaseInfo - Map from all the direct non-virtual bases of the
+ /// class we're laying out to their base subobject info.
+ BaseSubobjectInfoMapTy NonVirtualBaseInfo;
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for the
+ /// bases of the given class.
+ void ComputeBaseSubobjectInfo(const CXXRecordDecl *RD);
+
+ /// ComputeBaseSubobjectInfo - Compute the base subobject information for a
+ /// single class and all of its base classes.
+ BaseSubobjectInfo *ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived);
/// DeterminePrimaryBase - Determine the primary base of the given class.
void DeterminePrimaryBase(const CXXRecordDecl *RD);
@@ -387,43 +635,21 @@ class RecordLayoutBuilder {
void LayoutNonVirtualBases(const CXXRecordDecl *RD);
/// LayoutNonVirtualBase - Lays out a single non-virtual base.
- void LayoutNonVirtualBase(const CXXRecordDecl *Base);
+ void LayoutNonVirtualBase(const BaseSubobjectInfo *Base);
- void AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
- const CXXRecordDecl *MostDerivedClass);
+ void AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ uint64_t Offset);
/// LayoutVirtualBases - Lays out all the virtual bases.
void LayoutVirtualBases(const CXXRecordDecl *RD,
const CXXRecordDecl *MostDerivedClass);
/// LayoutVirtualBase - Lays out a single virtual base.
- void LayoutVirtualBase(const CXXRecordDecl *Base);
+ void LayoutVirtualBase(const BaseSubobjectInfo *Base);
/// LayoutBase - Will lay out a base and return the offset where it was
/// placed, in bits.
- uint64_t LayoutBase(const CXXRecordDecl *Base, bool BaseIsVirtual);
-
- /// canPlaceRecordAtOffset - Return whether a record (either a base class
- /// or a field) can be placed at the given offset.
- /// Returns false if placing the record will result in two components
- /// (direct or indirect) of the same type having the same offset.
- bool canPlaceRecordAtOffset(const CXXRecordDecl *RD, uint64_t Offset,
- bool CheckVBases) const;
-
- /// canPlaceFieldAtOffset - Return whether a field can be placed at the given
- /// offset.
- bool canPlaceFieldAtOffset(const FieldDecl *FD, uint64_t Offset) const;
-
- /// UpdateEmptyClassOffsets - Called after a record (either a base class
- /// or a field) has been placed at the given offset. Will update the
- /// EmptyClassOffsets map if the class is empty or has any empty bases or
- /// fields.
- void UpdateEmptyClassOffsets(const CXXRecordDecl *RD, uint64_t Offset,
- bool UpdateVBases);
-
- /// UpdateEmptyClassOffsets - Called after a field has been placed at the
- /// given offset.
- void UpdateEmptyClassOffsets(const FieldDecl *FD, uint64_t Offset);
+ uint64_t LayoutBase(const BaseSubobjectInfo *Base);
/// InitializeLayout - Initialize record layout for the given record decl.
void InitializeLayout(const Decl *D);
@@ -575,14 +801,127 @@ void RecordLayoutBuilder::DeterminePrimaryBase(const CXXRecordDecl *RD) {
UpdateAlignment(Context.Target.getPointerAlign(0));
}
+BaseSubobjectInfo *
+RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD,
+ bool IsVirtual,
+ BaseSubobjectInfo *Derived) {
+ BaseSubobjectInfo *Info;
+
+ if (IsVirtual) {
+ // Check if we already have info about this virtual base.
+ BaseSubobjectInfo *&InfoSlot = VirtualBaseInfo[RD];
+ if (InfoSlot) {
+ assert(InfoSlot->Class == RD && "Wrong class for virtual base info!");
+ return InfoSlot;
+ }
+
+ // We don't, create it.
+ InfoSlot = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ Info = InfoSlot;
+ } else {
+ Info = new (BaseSubobjectInfoAllocator.Allocate()) BaseSubobjectInfo;
+ }
+
+ Info->Class = RD;
+ Info->IsVirtual = IsVirtual;
+ Info->Derived = 0;
+ Info->PrimaryVirtualBaseInfo = 0;
+
+ const CXXRecordDecl *PrimaryVirtualBase = 0;
+ BaseSubobjectInfo *PrimaryVirtualBaseInfo = 0;
+
+ // Check if this base has a primary virtual base.
+ if (RD->getNumVBases()) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ // This base does have a primary virtual base.
+ PrimaryVirtualBase = Layout.getPrimaryBase();
+ assert(PrimaryVirtualBase && "Didn't have a primary virtual base!");
+
+ // Now check if we have base subobject info about this primary base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+
+ if (PrimaryVirtualBaseInfo) {
+ if (PrimaryVirtualBaseInfo->Derived) {
+ // We did have info about this primary base, and it turns out that it
+ // has already been claimed as a primary virtual base for another
+ // base.
+ PrimaryVirtualBase = 0;
+ } else {
+ // We can claim this base as our primary base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+ }
+ }
+ }
+
+ // Now go through all direct bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ Info->Bases.push_back(ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, Info));
+ }
+
+ if (PrimaryVirtualBase && !PrimaryVirtualBaseInfo) {
+ // Traversing the bases must have created the base info for our primary
+ // virtual base.
+ PrimaryVirtualBaseInfo = VirtualBaseInfo.lookup(PrimaryVirtualBase);
+ assert(PrimaryVirtualBaseInfo &&
+ "Did not create a primary virtual base!");
+
+ // Claim the primary virtual base as our primary virtual base.
+ Info->PrimaryVirtualBaseInfo = PrimaryVirtualBaseInfo;
+ PrimaryVirtualBaseInfo->Derived = Info;
+ }
+
+ return Info;
+}
+
+void RecordLayoutBuilder::ComputeBaseSubobjectInfo(const CXXRecordDecl *RD) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ bool IsVirtual = I->isVirtual();
+
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Compute the base subobject info for this base.
+ BaseSubobjectInfo *Info = ComputeBaseSubobjectInfo(BaseDecl, IsVirtual, 0);
+
+ if (IsVirtual) {
+ // ComputeBaseInfo has already added this base for us.
+ assert(VirtualBaseInfo.count(BaseDecl) &&
+ "Did not add virtual base!");
+ } else {
+ // Add the base info to the map of non-virtual bases.
+ assert(!NonVirtualBaseInfo.count(BaseDecl) &&
+ "Non-virtual base already exists!");
+ NonVirtualBaseInfo.insert(std::make_pair(BaseDecl, Info));
+ }
+ }
+}
+
void
RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
- // First, determine the primary base class.
+ // Then, determine the primary base class.
DeterminePrimaryBase(RD);
+ // Compute base subobject info.
+ ComputeBaseSubobjectInfo(RD);
+
// If we have a primary base class, lay it out.
if (PrimaryBase) {
if (PrimaryBaseIsVirtual) {
+ // If the primary virtual base was a primary virtual base of some other
+ // base class we'll have to steal it.
+ BaseSubobjectInfo *PrimaryBaseInfo = VirtualBaseInfo.lookup(PrimaryBase);
+ PrimaryBaseInfo->Derived = 0;
+
// We have a virtual primary base, insert it as an indirect primary base.
IndirectPrimaryBases.insert(PrimaryBase);
@@ -590,9 +929,15 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
"vbase already visited!");
VisitedVirtualBases.insert(PrimaryBase);
- LayoutVirtualBase(PrimaryBase);
- } else
- LayoutNonVirtualBase(PrimaryBase);
+ LayoutVirtualBase(PrimaryBaseInfo);
+ } else {
+ BaseSubobjectInfo *PrimaryBaseInfo =
+ NonVirtualBaseInfo.lookup(PrimaryBase);
+ assert(PrimaryBaseInfo &&
+ "Did not find base info for non-virtual primary base!");
+
+ LayoutNonVirtualBase(PrimaryBaseInfo);
+ }
}
// Now lay out the non-virtual bases.
@@ -603,81 +948,64 @@ RecordLayoutBuilder::LayoutNonVirtualBases(const CXXRecordDecl *RD) {
if (I->isVirtual())
continue;
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
// Skip the primary base.
- if (Base == PrimaryBase && !PrimaryBaseIsVirtual)
+ if (BaseDecl == PrimaryBase && !PrimaryBaseIsVirtual)
continue;
// Lay out the base.
- LayoutNonVirtualBase(Base);
+ BaseSubobjectInfo *BaseInfo = NonVirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find base info for non-virtual base!");
+
+ LayoutNonVirtualBase(BaseInfo);
}
}
-void RecordLayoutBuilder::LayoutNonVirtualBase(const CXXRecordDecl *Base) {
+void RecordLayoutBuilder::LayoutNonVirtualBase(const BaseSubobjectInfo *Base) {
// Layout the base.
- uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/false);
+ uint64_t Offset = LayoutBase(Base);
// Add its base class offset.
- if (!Bases.insert(std::make_pair(Base, Offset)).second)
- assert(false && "Added same base offset more than once!");
+ assert(!Bases.count(Base->Class) && "base offset already exists!");
+ Bases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
}
void
-RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const CXXRecordDecl *RD,
- uint64_t Offset,
- const CXXRecordDecl *MostDerivedClass) {
- // We already have the offset for the primary base of the most derived class.
- if (RD != MostDerivedClass) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- // If this is a primary virtual base and we haven't seen it before, add it.
- if (PrimaryBase && Layout.getPrimaryBaseWasVirtual() &&
- !VBases.count(PrimaryBase))
- VBases.insert(std::make_pair(PrimaryBase, Offset));
+RecordLayoutBuilder::AddPrimaryVirtualBaseOffsets(const BaseSubobjectInfo *Info,
+ uint64_t Offset) {
+ // This base isn't interesting, it has no virtual bases.
+ if (!Info->Class->getNumVBases())
+ return;
+
+ // First, check if we have a virtual primary base to add offsets for.
+ if (Info->PrimaryVirtualBaseInfo) {
+ assert(Info->PrimaryVirtualBaseInfo->IsVirtual &&
+ "Primary virtual base is not virtual!");
+ if (Info->PrimaryVirtualBaseInfo->Derived == Info) {
+ // Add the offset.
+ assert(!VBases.count(Info->PrimaryVirtualBaseInfo->Class) &&
+ "primary vbase offset already exists!");
+ VBases.insert(std::make_pair(Info->PrimaryVirtualBaseInfo->Class,
+ Offset));
+
+ // Traverse the primary virtual base.
+ AddPrimaryVirtualBaseOffsets(Info->PrimaryVirtualBaseInfo, Offset);
+ }
}
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- if (!BaseDecl->getNumVBases()) {
- // This base isn't interesting since it doesn't have any virtual bases.
+ // Now go through all direct non-virtual bases.
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Info->Class);
+ for (unsigned I = 0, E = Info->Bases.size(); I != E; ++I) {
+ const BaseSubobjectInfo *Base = Info->Bases[I];
+ if (Base->IsVirtual)
continue;
- }
-
- // Compute the offset of this base.
- uint64_t BaseOffset;
-
- if (I->isVirtual()) {
- // If we don't know this vbase yet, don't visit it. It will be visited
- // later.
- if (!VBases.count(BaseDecl)) {
- continue;
- }
-
- // Check if we've already visited this base.
- if (!VisitedVirtualBases.insert(BaseDecl))
- continue;
- // We want the vbase offset from the class we're currently laying out.
- BaseOffset = VBases[BaseDecl];
- } else if (RD == MostDerivedClass) {
- // We want the base offset from the class we're currently laying out.
- assert(Bases.count(BaseDecl) && "Did not find base!");
- BaseOffset = Bases[BaseDecl];
- } else {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
- BaseOffset = Offset + Layout.getBaseClassOffset(BaseDecl);
- }
-
- AddPrimaryVirtualBaseOffsets(BaseDecl, BaseOffset, MostDerivedClass);
+ uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base->Class);
+ AddPrimaryVirtualBaseOffsets(Base, BaseOffset);
}
}
@@ -701,53 +1029,54 @@ RecordLayoutBuilder::LayoutVirtualBases(const CXXRecordDecl *RD,
assert(!I->getType()->isDependentType() &&
"Cannot layout class with dependent bases.");
- const CXXRecordDecl *Base =
+ const CXXRecordDecl *BaseDecl =
cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
if (I->isVirtual()) {
- if (PrimaryBase != Base || !PrimaryBaseIsVirtual) {
- bool IndirectPrimaryBase = IndirectPrimaryBases.count(Base);
+ if (PrimaryBase != BaseDecl || !PrimaryBaseIsVirtual) {
+ bool IndirectPrimaryBase = IndirectPrimaryBases.count(BaseDecl);
// Only lay out the virtual base if it's not an indirect primary base.
if (!IndirectPrimaryBase) {
// Only visit virtual bases once.
- if (!VisitedVirtualBases.insert(Base))
+ if (!VisitedVirtualBases.insert(BaseDecl))
continue;
- LayoutVirtualBase(Base);
+ const BaseSubobjectInfo *BaseInfo = VirtualBaseInfo.lookup(BaseDecl);
+ assert(BaseInfo && "Did not find virtual base info!");
+ LayoutVirtualBase(BaseInfo);
}
}
}
- if (!Base->getNumVBases()) {
+ if (!BaseDecl->getNumVBases()) {
// This base isn't interesting since it doesn't have any virtual bases.
continue;
}
- LayoutVirtualBases(Base, MostDerivedClass);
+ LayoutVirtualBases(BaseDecl, MostDerivedClass);
}
}
-void RecordLayoutBuilder::LayoutVirtualBase(const CXXRecordDecl *Base) {
+void RecordLayoutBuilder::LayoutVirtualBase(const BaseSubobjectInfo *Base) {
+ assert(!Base->Derived && "Trying to lay out a primary virtual base!");
+
// Layout the base.
- uint64_t Offset = LayoutBase(Base, /*BaseIsVirtual=*/true);
+ uint64_t Offset = LayoutBase(Base);
// Add its base class offset.
- if (!VBases.insert(std::make_pair(Base, Offset)).second)
- assert(false && "Added same vbase offset more than once!");
+ assert(!VBases.count(Base->Class) && "vbase offset already exists!");
+ VBases.insert(std::make_pair(Base->Class, Offset));
+
+ AddPrimaryVirtualBaseOffsets(Base, Offset);
}
-uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
- bool BaseIsVirtual) {
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base);
+uint64_t RecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(Base->Class);
// If we have an empty base class, try to place it at offset 0.
- if (Base->isEmpty() &&
- EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, 0) &&
- canPlaceRecordAtOffset(Base, 0, /*CheckVBases=*/false)) {
- // We were able to place the class at offset 0.
- UpdateEmptyClassOffsets(Base, 0, /*UpdateVBases=*/false);
-
+ if (Base->Class->isEmpty() &&
+ EmptySubobjects->CanPlaceBaseAtOffset(Base, 0)) {
Size = std::max(Size, Layout.getSize());
return 0;
@@ -759,15 +1088,10 @@ uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
uint64_t Offset = llvm::RoundUpToAlignment(DataSize, BaseAlign);
// Try to place the base.
- while (true) {
- if (EmptySubobjects->CanPlaceBaseAtOffset(Base, BaseIsVirtual, Offset) &&
- canPlaceRecordAtOffset(Base, Offset, /*CheckVBases=*/false))
- break;
-
+ while (!EmptySubobjects->CanPlaceBaseAtOffset(Base, Offset))
Offset += BaseAlign;
- }
- if (!Base->isEmpty()) {
+ if (!Base->Class->isEmpty()) {
// Update the data size.
DataSize = Offset + Layout.getNonVirtualSize();
@@ -778,173 +1102,9 @@ uint64_t RecordLayoutBuilder::LayoutBase(const CXXRecordDecl *Base,
// Remember max struct/class alignment.
UpdateAlignment(BaseAlign);
- UpdateEmptyClassOffsets(Base, Offset, /*UpdateVBases=*/false);
return Offset;
}
-bool
-RecordLayoutBuilder::canPlaceRecordAtOffset(const CXXRecordDecl *RD,
- uint64_t Offset,
- bool CheckVBases) const {
- // Look for an empty class with the same type at the same offset.
- for (EmptyClassOffsetsTy::const_iterator I =
- EmptyClassOffsets.lower_bound(Offset),
- E = EmptyClassOffsets.upper_bound(Offset); I != E; ++I) {
-
- if (I->second == RD)
- return false;
- }
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Check bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *BaseDecl =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- uint64_t BaseOffset = Layout.getBaseClassOffset(BaseDecl);
-
- if (!canPlaceRecordAtOffset(BaseDecl, Offset + BaseOffset,
- /*CheckVBases=*/false))
- return false;
- }
-
- // Check fields.
- unsigned FieldNo = 0;
- for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
-
- if (!canPlaceFieldAtOffset(FD, Offset + FieldOffset))
- return false;
- }
-
- if (CheckVBases) {
- // FIXME: virtual bases.
- }
-
- return true;
-}
-
-bool RecordLayoutBuilder::canPlaceFieldAtOffset(const FieldDecl *FD,
- uint64_t Offset) const {
- QualType T = FD->getType();
- if (const RecordType *RT = T->getAs<RecordType>()) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl()))
- return canPlaceRecordAtOffset(RD, Offset, /*CheckVBases=*/true);
- }
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
- QualType ElemTy = Context.getBaseElementType(AT);
- const RecordType *RT = ElemTy->getAs<RecordType>();
- if (!RT)
- return true;
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return true;
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- uint64_t NumElements = Context.getConstantArrayElementCount(AT);
- uint64_t ElementOffset = Offset;
- for (uint64_t I = 0; I != NumElements; ++I) {
- if (!canPlaceRecordAtOffset(RD, ElementOffset, /*CheckVBases=*/true))
- return false;
-
- ElementOffset += Layout.getSize();
- }
- }
-
- return true;
-}
-
-void RecordLayoutBuilder::UpdateEmptyClassOffsets(const CXXRecordDecl *RD,
- uint64_t Offset,
- bool UpdateVBases) {
- if (RD->isEmpty())
- EmptyClassOffsets.insert(std::make_pair(Offset, RD));
-
- const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
-
- // Update bases.
- for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
- E = RD->bases_end(); I != E; ++I) {
- assert(!I->getType()->isDependentType() &&
- "Cannot layout class with dependent bases.");
- if (I->isVirtual())
- continue;
-
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
-
- uint64_t BaseClassOffset = Layout.getBaseClassOffset(Base);
- UpdateEmptyClassOffsets(Base, Offset + BaseClassOffset,
- /*UpdateVBases=*/false);
- }
-
- // Update fields.
- unsigned FieldNo = 0;
- for (CXXRecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
- I != E; ++I, ++FieldNo) {
- const FieldDecl *FD = *I;
-
- uint64_t FieldOffset = Layout.getFieldOffset(FieldNo);
- UpdateEmptyClassOffsets(FD, Offset + FieldOffset);
- }
-
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
-
- if (UpdateVBases) {
- // FIXME: Update virtual bases.
- } else if (PrimaryBase && Layout.getPrimaryBaseWasVirtual()) {
- // We always want to update the offsets of a primary virtual base.
- assert(Layout.getVBaseClassOffset(PrimaryBase) == 0 &&
- "primary base class offset must always be 0!");
- UpdateEmptyClassOffsets(PrimaryBase, Offset, /*UpdateVBases=*/false);
- }
-}
-
-void
-RecordLayoutBuilder::UpdateEmptyClassOffsets(const FieldDecl *FD,
- uint64_t Offset) {
- QualType T = FD->getType();
-
- if (const RecordType *RT = T->getAs<RecordType>()) {
- if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
- UpdateEmptyClassOffsets(RD, Offset, /*UpdateVBases=*/true);
- return;
- }
- }
-
- if (const ConstantArrayType *AT = Context.getAsConstantArrayType(T)) {
- QualType ElemTy = Context.getBaseElementType(AT);
- const RecordType *RT = ElemTy->getAs<RecordType>();
- if (!RT)
- return;
- const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return;
-
- const ASTRecordLayout &Info = Context.getASTRecordLayout(RD);
-
- uint64_t NumElements = Context.getConstantArrayElementCount(AT);
- uint64_t ElementOffset = Offset;
-
- for (uint64_t I = 0; I != NumElements; ++I) {
- UpdateEmptyClassOffsets(RD, ElementOffset, /*UpdateVBases=*/true);
- ElementOffset += Info.getSize();
- }
- }
-}
-
void RecordLayoutBuilder::InitializeLayout(const Decl *D) {
if (const RecordDecl *RD = dyn_cast<RecordDecl>(D))
IsUnion = RD->isUnion();
@@ -992,7 +1152,6 @@ void RecordLayoutBuilder::Layout(const CXXRecordDecl *RD) {
LayoutVirtualBases(RD, RD);
VisitedVirtualBases.clear();
- AddPrimaryVirtualBaseOffsets(RD, 0, RD);
// Finally, round the size of the total struct up to the alignment of the
// struct itself.
@@ -1137,7 +1296,7 @@ void RecordLayoutBuilder::LayoutBitField(const FieldDecl *D) {
// Check if we need to add padding to give the field the correct alignment.
if (FieldSize == 0 || (FieldOffset & (FieldAlign-1)) + FieldSize > TypeSize)
- FieldOffset = (FieldOffset + (FieldAlign-1)) & ~(FieldAlign-1);
+ FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
// Padding members don't affect overall alignment.
if (!D->getIdentifier())
@@ -1208,17 +1367,12 @@ void RecordLayoutBuilder::LayoutField(const FieldDecl *D) {
// Round up the current record size to the field's alignment boundary.
FieldOffset = llvm::RoundUpToAlignment(FieldOffset, FieldAlign);
- if (!IsUnion) {
- while (true) {
- // Check if we can place the field at this offset.
- if (canPlaceFieldAtOffset(D, FieldOffset))
- break;
-
+ if (!IsUnion && EmptySubobjects) {
+ // Check if we can place the field at this offset.
+ while (!EmptySubobjects->CanPlaceFieldAtOffset(D, FieldOffset)) {
// We couldn't place the field at the offset. Try again at a new offset.
FieldOffset += FieldAlign;
}
-
- UpdateEmptyClassOffsets(D, FieldOffset);
}
// Place this field at the current location.
@@ -1261,8 +1415,6 @@ void RecordLayoutBuilder::UpdateAlignment(unsigned NewAlignment) {
const CXXMethodDecl *
RecordLayoutBuilder::ComputeKeyFunction(const CXXRecordDecl *RD) {
- assert(RD->isDynamicClass() && "Class does not have any virtual methods!");
-
// If a class isn't polymorphic it doesn't have a key function.
if (!RD->isPolymorphic())
return 0;
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 80f5695e42ad..6dbe8f4d18c1 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -499,14 +499,101 @@ void DeclStmt::DoDestroy(ASTContext &C) {
DG.getDeclGroup().Destroy(C);
}
+IfStmt::IfStmt(ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond,
+ Stmt *then, SourceLocation EL, Stmt *elsev)
+ : Stmt(IfStmtClass), IfLoc(IL), ElseLoc(EL)
+{
+ setConditionVariable(C, var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[THEN] = then;
+ SubExprs[ELSE] = elsev;
+}
+
+VarDecl *IfStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void IfStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void IfStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
+ForStmt::ForStmt(ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
+ Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
+ SourceLocation RP)
+ : Stmt(ForStmtClass), ForLoc(FL), LParenLoc(LP), RParenLoc(RP)
+{
+ SubExprs[INIT] = Init;
+ setConditionVariable(C, condVar);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(Cond);
+ SubExprs[INC] = reinterpret_cast<Stmt*>(Inc);
+ SubExprs[BODY] = Body;
+}
+
+VarDecl *ForStmt::getConditionVariable() const {
+ if (!SubExprs[CONDVAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[CONDVAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void ForStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[CONDVAR] = 0;
+ return;
+ }
+
+ SubExprs[CONDVAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void ForStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
+SwitchStmt::SwitchStmt(ASTContext &C, VarDecl *Var, Expr *cond)
+ : Stmt(SwitchStmtClass), FirstCase(0)
+{
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = NULL;
+}
+
+VarDecl *SwitchStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void SwitchStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void SwitchStmt::DoDestroy(ASTContext &C) {
// Destroy the SwitchCase statements in this switch. In the normal
// case, this loop will merely decrement the reference counts from
@@ -521,6 +608,35 @@ void SwitchStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
+WhileStmt::WhileStmt(ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body,
+ SourceLocation WL)
+: Stmt(WhileStmtClass)
+{
+ setConditionVariable(C, Var);
+ SubExprs[COND] = reinterpret_cast<Stmt*>(cond);
+ SubExprs[BODY] = body;
+ WhileLoc = WL;
+}
+
+VarDecl *WhileStmt::getConditionVariable() const {
+ if (!SubExprs[VAR])
+ return 0;
+
+ DeclStmt *DS = cast<DeclStmt>(SubExprs[VAR]);
+ return cast<VarDecl>(DS->getSingleDecl());
+}
+
+void WhileStmt::setConditionVariable(ASTContext &C, VarDecl *V) {
+ if (!V) {
+ SubExprs[VAR] = 0;
+ return;
+ }
+
+ SubExprs[VAR] = new (C) DeclStmt(DeclGroupRef(V),
+ V->getSourceRange().getBegin(),
+ V->getSourceRange().getEnd());
+}
+
void WhileStmt::DoDestroy(ASTContext &C) {
BranchDestroy(C, this, SubExprs, END_EXPR);
}
@@ -572,26 +688,26 @@ Stmt::child_iterator LabelStmt::child_end() { return &SubStmt+1; }
// IfStmt
Stmt::child_iterator IfStmt::child_begin() {
- return child_iterator(Var, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator IfStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// SwitchStmt
Stmt::child_iterator SwitchStmt::child_begin() {
- return child_iterator(Var, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator SwitchStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// WhileStmt
Stmt::child_iterator WhileStmt::child_begin() {
- return child_iterator(Var, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator WhileStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// DoStmt
@@ -600,10 +716,10 @@ Stmt::child_iterator DoStmt::child_end() { return &SubExprs[0]+END_EXPR; }
// ForStmt
Stmt::child_iterator ForStmt::child_begin() {
- return child_iterator(CondVar, &SubExprs[0]);
+ return &SubExprs[0];
}
Stmt::child_iterator ForStmt::child_end() {
- return child_iterator(0, &SubExprs[0]+END_EXPR);
+ return &SubExprs[0]+END_EXPR;
}
// ObjCForCollectionStmt
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 9bef49c3984d..7043c3551628 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -682,7 +682,7 @@ void StmtPrinter::VisitUnaryOperator(UnaryOperator *Node) {
bool StmtPrinter::PrintOffsetOfDesignator(Expr *E) {
if (isa<UnaryOperator>(E)) {
// Base case, print the type and comma.
- OS << E->getType().getAsString() << ", ";
+ OS << E->getType().getAsString(Policy) << ", ";
return true;
} else if (ArraySubscriptExpr *ASE = dyn_cast<ArraySubscriptExpr>(E)) {
PrintOffsetOfDesignator(ASE->getLHS());
@@ -706,7 +706,7 @@ void StmtPrinter::VisitUnaryOffsetOf(UnaryOperator *Node) {
void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
OS << "__builtin_offsetof(";
- OS << Node->getTypeSourceInfo()->getType().getAsString() << ", ";
+ OS << Node->getTypeSourceInfo()->getType().getAsString(Policy) << ", ";
bool PrintedSomething = false;
for (unsigned i = 0, n = Node->getNumComponents(); i < n; ++i) {
OffsetOfExpr::OffsetOfNode ON = Node->getComponent(i);
@@ -740,7 +740,7 @@ void StmtPrinter::VisitOffsetOfExpr(OffsetOfExpr *Node) {
void StmtPrinter::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr *Node) {
OS << (Node->isSizeOf() ? "sizeof" : "__alignof");
if (Node->isArgumentType())
- OS << "(" << Node->getArgumentType().getAsString() << ")";
+ OS << "(" << Node->getArgumentType().getAsString(Policy) << ")";
else {
OS << " ";
PrintExpr(Node->getArgumentExpr());
@@ -802,11 +802,11 @@ void StmtPrinter::VisitExplicitCastExpr(ExplicitCastExpr *) {
assert(0 && "ExplicitCastExpr is an abstract class");
}
void StmtPrinter::VisitCStyleCastExpr(CStyleCastExpr *Node) {
- OS << "(" << Node->getType().getAsString() << ")";
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
PrintExpr(Node->getSubExpr());
}
void StmtPrinter::VisitCompoundLiteralExpr(CompoundLiteralExpr *Node) {
- OS << "(" << Node->getType().getAsString() << ")";
+ OS << "(" << Node->getType().getAsString(Policy) << ")";
PrintExpr(Node->getInitializer());
}
void StmtPrinter::VisitImplicitCastExpr(ImplicitCastExpr *Node) {
@@ -852,8 +852,8 @@ void StmtPrinter::VisitStmtExpr(StmtExpr *E) {
void StmtPrinter::VisitTypesCompatibleExpr(TypesCompatibleExpr *Node) {
OS << "__builtin_types_compatible_p(";
- OS << Node->getArgType1().getAsString() << ",";
- OS << Node->getArgType2().getAsString() << ")";
+ OS << Node->getArgType1().getAsString(Policy) << ",";
+ OS << Node->getArgType2().getAsString(Policy) << ")";
}
void StmtPrinter::VisitChooseExpr(ChooseExpr *Node) {
@@ -947,7 +947,7 @@ void StmtPrinter::VisitVAArgExpr(VAArgExpr *Node) {
OS << "__builtin_va_arg(";
PrintExpr(Node->getSubExpr());
OS << ", ";
- OS << Node->getType().getAsString();
+ OS << Node->getType().getAsString(Policy);
OS << ")";
}
@@ -1002,7 +1002,7 @@ void StmtPrinter::VisitCXXMemberCallExpr(CXXMemberCallExpr *Node) {
void StmtPrinter::VisitCXXNamedCastExpr(CXXNamedCastExpr *Node) {
OS << Node->getCastName() << '<';
- OS << Node->getTypeAsWritten().getAsString() << ">(";
+ OS << Node->getTypeAsWritten().getAsString(Policy) << ">(";
PrintExpr(Node->getSubExpr());
OS << ")";
}
@@ -1026,7 +1026,7 @@ void StmtPrinter::VisitCXXConstCastExpr(CXXConstCastExpr *Node) {
void StmtPrinter::VisitCXXTypeidExpr(CXXTypeidExpr *Node) {
OS << "typeid(";
if (Node->isTypeOperand()) {
- OS << Node->getTypeOperand().getAsString();
+ OS << Node->getTypeOperand().getAsString(Policy);
} else {
PrintExpr(Node->getExprOperand());
}
@@ -1059,7 +1059,7 @@ void StmtPrinter::VisitCXXDefaultArgExpr(CXXDefaultArgExpr *Node) {
}
void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
- OS << Node->getType().getAsString();
+ OS << Node->getType().getAsString(Policy);
OS << "(";
PrintExpr(Node->getSubExpr());
OS << ")";
@@ -1074,7 +1074,7 @@ void StmtPrinter::VisitCXXBindReferenceExpr(CXXBindReferenceExpr *Node) {
}
void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
- OS << Node->getType().getAsString();
+ OS << Node->getType().getAsString(Policy);
OS << "(";
for (CXXTemporaryObjectExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
@@ -1086,8 +1086,8 @@ void StmtPrinter::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *Node) {
OS << ")";
}
-void StmtPrinter::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *Node) {
- OS << Node->getType().getAsString() << "()";
+void StmtPrinter::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *Node) {
+ OS << Node->getType().getAsString(Policy) << "()";
}
void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
@@ -1177,7 +1177,7 @@ void StmtPrinter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
void
StmtPrinter::VisitCXXUnresolvedConstructExpr(
CXXUnresolvedConstructExpr *Node) {
- OS << Node->getTypeAsWritten().getAsString();
+ OS << Node->getTypeAsWritten().getAsString(Policy);
OS << "(";
for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
ArgEnd = Node->arg_end();
@@ -1254,7 +1254,7 @@ static const char *getTypeTraitName(UnaryTypeTrait UTT) {
void StmtPrinter::VisitUnaryTypeTraitExpr(UnaryTypeTraitExpr *E) {
OS << getTypeTraitName(E->getTrait()) << "("
- << E->getQueriedType().getAsString() << ")";
+ << E->getQueriedType().getAsString(Policy) << ")";
}
// Obj-C
@@ -1265,7 +1265,7 @@ void StmtPrinter::VisitObjCStringLiteral(ObjCStringLiteral *Node) {
}
void StmtPrinter::VisitObjCEncodeExpr(ObjCEncodeExpr *Node) {
- OS << "@encode(" << Node->getEncodedType().getAsString() << ')';
+ OS << "@encode(" << Node->getEncodedType().getAsString(Policy) << ')';
}
void StmtPrinter::VisitObjCSelectorExpr(ObjCSelectorExpr *Node) {
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index ac3a9eeda66f..cff86a4e1cec 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -211,9 +211,11 @@ void StmtProfiler::VisitExpr(Expr *S) {
void StmtProfiler::VisitDeclRefExpr(DeclRefExpr *S) {
VisitExpr(S);
- VisitNestedNameSpecifier(S->getQualifier());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
VisitDecl(S->getDecl());
- VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
+ if (!Canonical)
+ VisitTemplateArguments(S->getTemplateArgs(), S->getNumTemplateArgs());
}
void StmtProfiler::VisitPredefinedExpr(PredefinedExpr *S) {
@@ -307,7 +309,8 @@ void StmtProfiler::VisitCallExpr(CallExpr *S) {
void StmtProfiler::VisitMemberExpr(MemberExpr *S) {
VisitExpr(S);
VisitDecl(S->getMemberDecl());
- VisitNestedNameSpecifier(S->getQualifier());
+ if (!Canonical)
+ VisitNestedNameSpecifier(S->getQualifier());
ID.AddBoolean(S->isArrow());
}
@@ -428,6 +431,8 @@ void StmtProfiler::VisitBlockDeclRefExpr(BlockDeclRefExpr *S) {
VisitDecl(S->getDecl());
ID.AddBoolean(S->isByRef());
ID.AddBoolean(S->isConstQualAdded());
+ if (S->getCopyConstructorExpr())
+ Visit(S->getCopyConstructorExpr());
}
static Stmt::StmtClass DecodeOperatorCall(CXXOperatorCallExpr *S,
@@ -719,7 +724,7 @@ void StmtProfiler::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *S) {
VisitCXXConstructExpr(S);
}
-void StmtProfiler::VisitCXXZeroInitValueExpr(CXXZeroInitValueExpr *S) {
+void StmtProfiler::VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *S) {
VisitExpr(S);
}
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index 1c775efe5777..02e648879ad7 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -90,6 +90,33 @@ void TemplateArgument::Profile(llvm::FoldingSetNodeID &ID,
}
}
+bool TemplateArgument::structurallyEquals(const TemplateArgument &Other) const {
+ if (getKind() != Other.getKind()) return false;
+
+ switch (getKind()) {
+ case Null:
+ case Type:
+ case Declaration:
+ case Template:
+ case Expression:
+ return TypeOrValue == Other.TypeOrValue;
+
+ case Integral:
+ return getIntegralType() == Other.getIntegralType() &&
+ *getAsIntegral() == *Other.getAsIntegral();
+
+ case Pack:
+ if (Args.NumArgs != Other.Args.NumArgs) return false;
+ for (unsigned I = 0, E = Args.NumArgs; I != E; ++I)
+ if (!Args.Args[I].structurallyEquals(Other.Args.Args[I]))
+ return false;
+ return true;
+ }
+
+ // Suppress warnings.
+ return false;
+}
+
//===----------------------------------------------------------------------===//
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/AST/TemplateName.cpp b/lib/AST/TemplateName.cpp
index 14722f70398c..ef7b315314d4 100644
--- a/lib/AST/TemplateName.cpp
+++ b/lib/AST/TemplateName.cpp
@@ -21,6 +21,17 @@
using namespace clang;
using namespace llvm;
+TemplateName::NameKind TemplateName::getKind() const {
+ if (Storage.is<TemplateDecl *>())
+ return Template;
+ if (Storage.is<OverloadedTemplateStorage *>())
+ return OverloadedTemplate;
+ if (Storage.is<QualifiedTemplateName *>())
+ return QualifiedTemplate;
+ assert(Storage.is<DependentTemplateName *>() && "There's a case unhandled!");
+ return DependentTemplate;
+}
+
TemplateDecl *TemplateName::getAsTemplateDecl() const {
if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
return Template;
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 1aab65ebec55..ab64eafbee21 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -439,17 +439,49 @@ bool Type::isIntegerType() const {
return false;
}
-bool Type::isIntegralType() const {
+/// \brief Determine whether this type is an integral type.
+///
+/// This routine determines whether the given type is an integral type per
+/// C++ [basic.fundamental]p7. Although the C standard does not define the
+/// term "integral type", it has a similar term "integer type", and in C++
+/// the two terms are equivalent. However, C's "integer type" includes
+/// enumeration types, while C++'s "integer type" does not. The \c ASTContext
+/// parameter is used to determine whether we should be following the C or
+/// C++ rules when determining whether this type is an integral/integer type.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// type", use this routine.
+///
+/// For cases where C permits "an integer type" and C++ permits "an integral
+/// or enumeration type", use \c isIntegralOrEnumerationType() instead.
+///
+/// \param Ctx The context in which this type occurs.
+///
+/// \returns true if the type is considered an integral type, false otherwise.
+bool Type::isIntegralType(ASTContext &Ctx) const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->getKind() >= BuiltinType::Bool &&
BT->getKind() <= BuiltinType::Int128;
- if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
- if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
- return true; // Complete enum types are integral.
- // FIXME: In C++, enum types are never integral.
+
+ if (!Ctx.getLangOptions().CPlusPlus)
+ if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
+ if (TT->getDecl()->isEnum() && TT->getDecl()->isDefinition())
+ return true; // Complete enum types are integral in C.
+
return false;
}
+bool Type::isIntegralOrEnumerationType() const {
+ if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
+ return BT->getKind() >= BuiltinType::Bool &&
+ BT->getKind() <= BuiltinType::Int128;
+
+ if (isa<EnumType>(CanonicalType))
+ return true;
+
+ return false;
+}
+
bool Type::isEnumeralType() const {
if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
return TT->getDecl()->isEnum();
@@ -531,16 +563,19 @@ bool Type::isFloatingType() const {
BT->getKind() <= BuiltinType::LongDouble;
if (const ComplexType *CT = dyn_cast<ComplexType>(CanonicalType))
return CT->getElementType()->isFloatingType();
+ return false;
+}
+
+bool Type::hasFloatingRepresentation() const {
if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isFloatingType();
- return false;
+ else
+ return isFloatingType();
}
bool Type::isRealFloatingType() const {
if (const BuiltinType *BT = dyn_cast<BuiltinType>(CanonicalType))
return BT->isFloatingPoint();
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
- return VT->getElementType()->isRealFloatingType();
return false;
}
@@ -550,8 +585,6 @@ bool Type::isRealType() const {
BT->getKind() <= BuiltinType::LongDouble;
if (const TagType *TT = dyn_cast<TagType>(CanonicalType))
return TT->getDecl()->isEnum() && TT->getDecl()->isDefinition();
- if (const VectorType *VT = dyn_cast<VectorType>(CanonicalType))
- return VT->getElementType()->isRealType();
return false;
}
@@ -563,7 +596,7 @@ bool Type::isArithmeticType() const {
// GCC allows forward declaration of enum types (forbid by C99 6.7.2.3p2).
// If a body isn't seen by the time we get here, return false.
return ET->getDecl()->isDefinition();
- return isa<ComplexType>(CanonicalType) || isa<VectorType>(CanonicalType);
+ return isa<ComplexType>(CanonicalType);
}
bool Type::isScalarType() const {
@@ -768,6 +801,7 @@ bool Type::isSpecifierType() const {
case TemplateSpecialization:
case Elaborated:
case DependentName:
+ case DependentTemplateSpecialization:
case ObjCInterface:
case ObjCObject:
case ObjCObjectPointer: // FIXME: object pointers aren't really specifiers
@@ -856,12 +890,56 @@ TypeWithKeyword::getKeywordName(ElaboratedTypeKeyword Keyword) {
}
}
+ElaboratedType::~ElaboratedType() {}
+DependentNameType::~DependentNameType() {}
+DependentTemplateSpecializationType::~DependentTemplateSpecializationType() {}
+
+void DependentTemplateSpecializationType::Destroy(ASTContext &C) {
+ for (unsigned Arg = 0; Arg < NumArgs; ++Arg) {
+ // FIXME: Not all expressions get cloned, so we can't yet perform
+ // this destruction.
+ // if (Expr *E = getArg(Arg).getAsExpr())
+ // E->Destroy(C);
+ }
+}
+
+DependentTemplateSpecializationType::DependentTemplateSpecializationType(
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS, const IdentifierInfo *Name,
+ unsigned NumArgs, const TemplateArgument *Args,
+ QualType Canon)
+ : TypeWithKeyword(Keyword, DependentTemplateSpecialization, Canon, true),
+ NNS(NNS), Name(Name), NumArgs(NumArgs) {
+ assert(NNS && NNS->isDependent() &&
+ "DependentTemplateSpecializatonType requires dependent qualifier");
+ for (unsigned I = 0; I != NumArgs; ++I)
+ new (&getArgBuffer()[I]) TemplateArgument(Args[I]);
+}
+
+void
+DependentTemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
+ ASTContext &Context,
+ ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *Qualifier,
+ const IdentifierInfo *Name,
+ unsigned NumArgs,
+ const TemplateArgument *Args) {
+ ID.AddInteger(Keyword);
+ ID.AddPointer(Qualifier);
+ ID.AddPointer(Name);
+ for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
+ Args[Idx].Profile(ID, Context);
+}
+
bool Type::isElaboratedTypeSpecifier() const {
ElaboratedTypeKeyword Keyword;
if (const ElaboratedType *Elab = dyn_cast<ElaboratedType>(this))
Keyword = Elab->getKeyword();
else if (const DependentNameType *DepName = dyn_cast<DependentNameType>(this))
Keyword = DepName->getKeyword();
+ else if (const DependentTemplateSpecializationType *DepTST =
+ dyn_cast<DependentTemplateSpecializationType>(this))
+ Keyword = DepTST->getKeyword();
else
return false;
@@ -914,6 +992,22 @@ const char *BuiltinType::getName(const LangOptions &LO) const {
void FunctionType::ANCHOR() {} // Key function for FunctionType.
+QualType QualType::getCallResultType(ASTContext &Context) const {
+ if (const ReferenceType *RefType = getTypePtr()->getAs<ReferenceType>())
+ return RefType->getPointeeType();
+
+ // C++0x [basic.lval]:
+ // Class prvalues can have cv-qualified types; non-class prvalues always
+ // have cv-unqualified types.
+ //
+ // See also C99 6.3.2.1p2.
+ if (!Context.getLangOptions().CPlusPlus ||
+ !getTypePtr()->isDependentType() && !getTypePtr()->isRecordType())
+ return getUnqualifiedType();
+
+ return *this;
+}
+
llvm::StringRef FunctionType::getNameForCallConv(CallingConv CC) {
switch (CC) {
case CC_Default: llvm_unreachable("no name for default cc");
@@ -1085,14 +1179,12 @@ anyDependentTemplateArguments(const TemplateArgument *Args, unsigned N) {
}
TemplateSpecializationType::
-TemplateSpecializationType(ASTContext &Context, TemplateName T,
- bool IsCurrentInstantiation,
+TemplateSpecializationType(TemplateName T,
const TemplateArgument *Args,
unsigned NumArgs, QualType Canon)
: Type(TemplateSpecialization,
Canon.isNull()? QualType(this, 0) : Canon,
T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)),
- ContextAndCurrentInstantiation(&Context, IsCurrentInstantiation),
Template(T), NumArgs(NumArgs) {
assert((!Canon.isNull() ||
T.isDependent() || anyDependentTemplateArguments(Args, NumArgs)) &&
@@ -1113,25 +1205,12 @@ void TemplateSpecializationType::Destroy(ASTContext& C) {
}
}
-TemplateSpecializationType::iterator
-TemplateSpecializationType::end() const {
- return begin() + getNumArgs();
-}
-
-const TemplateArgument &
-TemplateSpecializationType::getArg(unsigned Idx) const {
- assert(Idx < getNumArgs() && "Template argument out of range");
- return getArgs()[Idx];
-}
-
void
TemplateSpecializationType::Profile(llvm::FoldingSetNodeID &ID,
TemplateName T,
- bool IsCurrentInstantiation,
const TemplateArgument *Args,
unsigned NumArgs,
ASTContext &Context) {
- ID.AddBoolean(IsCurrentInstantiation);
T.Profile(ID);
for (unsigned Idx = 0; Idx < NumArgs; ++Idx)
Args[Idx].Profile(ID, Context);
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 35a7e096994d..a08ee1ae695d 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -227,12 +227,13 @@ void TypePrinter::PrintDependentSizedExtVector(
}
void TypePrinter::PrintVector(const VectorType *T, std::string &S) {
- if (T->isAltiVec()) {
- if (T->isPixel())
+ if (T->getAltiVecSpecific() != VectorType::NotAltiVec) {
+ if (T->getAltiVecSpecific() == VectorType::Pixel)
S = "__vector __pixel " + S;
else {
Print(T->getElementType(), S);
- S = "__vector " + S;
+ S = ((T->getAltiVecSpecific() == VectorType::Bool)
+ ? "__vector __bool " : "__vector ") + S;
}
} else {
// FIXME: We prefer to print the size directly here, but have no way
@@ -452,11 +453,13 @@ void TypePrinter::PrintTag(TagDecl *D, std::string &InnerString) {
if (!HasKindDecoration)
OS << " " << D->getKindName();
- PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
- D->getLocation());
- OS << " at " << PLoc.getFilename()
- << ':' << PLoc.getLine()
- << ':' << PLoc.getColumn();
+ if (D->getLocation().isValid()) {
+ PresumedLoc PLoc = D->getASTContext().getSourceManager().getPresumedLoc(
+ D->getLocation());
+ OS << " at " << PLoc.getFilename()
+ << ':' << PLoc.getLine()
+ << ':' << PLoc.getColumn();
+ }
}
OS << '>';
@@ -578,15 +581,31 @@ void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S)
T->getQualifier()->print(OS, Policy);
- if (const IdentifierInfo *Ident = T->getIdentifier())
- OS << Ident->getName();
- else if (const TemplateSpecializationType *Spec = T->getTemplateId()) {
- Spec->getTemplateName().print(OS, Policy, true);
- OS << TemplateSpecializationType::PrintTemplateArgumentList(
- Spec->getArgs(),
- Spec->getNumArgs(),
+ OS << T->getIdentifier()->getName();
+ }
+
+ if (S.empty())
+ S.swap(MyString);
+ else
+ S = MyString + ' ' + S;
+}
+
+void TypePrinter::PrintDependentTemplateSpecialization(
+ const DependentTemplateSpecializationType *T, std::string &S) {
+ std::string MyString;
+ {
+ llvm::raw_string_ostream OS(MyString);
+
+ OS << TypeWithKeyword::getKeywordName(T->getKeyword());
+ if (T->getKeyword() != ETK_None)
+ OS << " ";
+
+ T->getQualifier()->print(OS, Policy);
+ OS << T->getIdentifier()->getName();
+ OS << TemplateSpecializationType::PrintTemplateArgumentList(
+ T->getArgs(),
+ T->getNumArgs(),
Policy);
- }
}
if (S.empty())
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 6f2cb41a6e40..08543aacba5c 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -171,8 +171,8 @@ private:
void autoCreateBlock() { if (!Block) Block = createBlock(); }
CFGBlock *createBlock(bool add_successor = true);
bool FinishBlock(CFGBlock* B);
- CFGBlock *addStmt(Stmt *S, AddStmtChoice asc = AddStmtChoice::AlwaysAdd) {
- return Visit(S, asc);
+ CFGBlock *addStmt(Stmt *S) {
+ return Visit(S, AddStmtChoice::AlwaysAdd);
}
void AppendStmt(CFGBlock *B, Stmt *S,
@@ -538,6 +538,15 @@ CFGBlock *CFGBuilder::VisitBinaryOperator(BinaryOperator *B,
addStmt(B->getRHS());
return addStmt(B->getLHS());
}
+ else if (B->isAssignmentOp()) {
+ if (asc.alwaysAdd()) {
+ autoCreateBlock();
+ AppendStmt(Block, B, asc);
+ }
+
+ Visit(B->getRHS());
+ return Visit(B->getLHS(), AddStmtChoice::AsLValueNotAlwaysAdd);
+ }
return VisitStmt(B, asc);
}
@@ -612,8 +621,12 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!CanThrow(C->getCallee()))
AddEHEdge = false;
- if (!NoReturn && !AddEHEdge)
- return VisitStmt(C, AddStmtChoice::AlwaysAdd);
+ if (!NoReturn && !AddEHEdge) {
+ if (asc.asLValue())
+ return VisitStmt(C, AddStmtChoice::AlwaysAddAsLValue);
+ else
+ return VisitStmt(C, AddStmtChoice::AlwaysAdd);
+ }
if (Block) {
Succ = Block;
@@ -651,13 +664,13 @@ CFGBlock *CFGBuilder::VisitChooseExpr(ChooseExpr *C,
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* LHSBlock = addStmt(C->getLHS(), asc);
+ CFGBlock* LHSBlock = Visit(C->getLHS(), asc);
if (!FinishBlock(LHSBlock))
return 0;
Succ = ConfluenceBlock;
Block = NULL;
- CFGBlock* RHSBlock = addStmt(C->getRHS(), asc);
+ CFGBlock* RHSBlock = Visit(C->getRHS(), asc);
if (!FinishBlock(RHSBlock))
return 0;
@@ -709,7 +722,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C,
Block = NULL;
CFGBlock* LHSBlock = NULL;
if (C->getLHS()) {
- LHSBlock = addStmt(C->getLHS(), asc);
+ LHSBlock = Visit(C->getLHS(), asc);
if (!FinishBlock(LHSBlock))
return 0;
Block = NULL;
@@ -717,7 +730,7 @@ CFGBlock *CFGBuilder::VisitConditionalOperator(ConditionalOperator *C,
// Create the block for the RHS expression.
Succ = ConfluenceBlock;
- CFGBlock* RHSBlock = addStmt(C->getRHS(), asc);
+ CFGBlock* RHSBlock = Visit(C->getRHS(), asc);
if (!FinishBlock(RHSBlock))
return 0;
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index a8e37087c09c..f2916c2068e1 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -9,4 +9,5 @@ add_clang_library(clangAnalysis
UninitializedValues.cpp
)
-add_dependencies(clangAnalysis ClangDiagnosticAnalysis ClangStmtNodes)
+add_dependencies(clangAnalysis ClangAttrClasses ClangAttrList
+ ClangDiagnosticAnalysis ClangDeclNodes ClangStmtNodes)
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 01a36a1074e8..4efe25ea1e0e 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -256,17 +256,21 @@ void TransferFuncs::VisitAssign(BinaryOperator* B) {
// Assigning to a variable?
if (DeclRefExpr* DR = dyn_cast<DeclRefExpr>(LHS->IgnoreParens())) {
+ // Assignments to references don't kill the ref's address
+ if (DR->getDecl()->getType()->isReferenceType()) {
+ VisitDeclRefExpr(DR);
+ } else {
+ // Update liveness inforamtion.
+ unsigned bit = AD.getIdx(DR->getDecl());
+ LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
- // Update liveness inforamtion.
- unsigned bit = AD.getIdx(DR->getDecl());
- LiveState.getDeclBit(bit) = Dead | AD.AlwaysLive.getDeclBit(bit);
-
- if (AD.Observer) { AD.Observer->ObserverKill(DR); }
+ if (AD.Observer) { AD.Observer->ObserverKill(DR); }
- // Handle things like +=, etc., which also generate "uses"
- // of a variable. Do this just by visiting the subexpression.
- if (B->getOpcode() != BinaryOperator::Assign)
- VisitDeclRefExpr(DR);
+ // Handle things like +=, etc., which also generate "uses"
+ // of a variable. Do this just by visiting the subexpression.
+ if (B->getOpcode() != BinaryOperator::Assign)
+ VisitDeclRefExpr(DR);
+ }
}
else // Not assigning to a variable. Process LHS as usual.
Visit(LHS);
diff --git a/lib/Analysis/Makefile b/lib/Analysis/Makefile
index 9b473803fa92..03bf7a6712f3 100644
--- a/lib/Analysis/Makefile
+++ b/lib/Analysis/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangAnalysis
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index c38aae34764c..558d38af0c6e 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -14,12 +14,16 @@
#include "clang/Analysis/Analyses/PrintfFormatString.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Type.h"
+#include "llvm/Support/raw_ostream.h"
using clang::analyze_printf::ArgTypeResult;
using clang::analyze_printf::FormatSpecifier;
using clang::analyze_printf::FormatStringHandler;
using clang::analyze_printf::OptionalAmount;
using clang::analyze_printf::PositionContext;
+using clang::analyze_printf::ConversionSpecifier;
+using clang::analyze_printf::LengthModifier;
using namespace clang;
@@ -35,7 +39,6 @@ public:
const FormatSpecifier &fs)
: FS(fs), Start(start), Stop(false) {}
-
const char *getStart() const { return Start; }
bool shouldStop() const { return Stop; }
bool hasValue() const { return Start != 0; }
@@ -80,7 +83,8 @@ static OptionalAmount ParseAmount(const char *&Beg, const char *E) {
}
if (hasDigits)
- return OptionalAmount(OptionalAmount::Constant, accumulator, Beg);
+ return OptionalAmount(OptionalAmount::Constant, accumulator, Beg, I - Beg,
+ false);
break;
}
@@ -92,7 +96,7 @@ static OptionalAmount ParseNonPositionAmount(const char *&Beg, const char *E,
unsigned &argIndex) {
if (*Beg == '*') {
++Beg;
- return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg);
+ return OptionalAmount(OptionalAmount::Arg, argIndex++, Beg, 0, false);
}
return ParseAmount(Beg, E);
@@ -120,6 +124,8 @@ static OptionalAmount ParsePositionAmount(FormatStringHandler &H,
assert(Amt.getHowSpecified() == OptionalAmount::Constant);
if (*I == '$') {
+ // Handle positional arguments
+
// Special case: '*0$', since this is an easy mistake.
if (Amt.getConstantAmount() == 0) {
H.HandleZeroPosition(Beg, I - Beg + 1);
@@ -130,7 +136,7 @@ static OptionalAmount ParsePositionAmount(FormatStringHandler &H,
Beg = ++I;
return OptionalAmount(OptionalAmount::Arg, Amt.getConstantAmount() - 1,
- Tmp);
+ Tmp, 0, true);
}
H.HandleInvalidPosition(Beg, I - Beg, p);
@@ -173,7 +179,6 @@ static bool ParseFieldWidth(FormatStringHandler &H, FormatSpecifier &FS,
return false;
}
-
static bool ParseArgPosition(FormatStringHandler &H,
FormatSpecifier &FS, const char *Start,
const char *&Beg, const char *E) {
@@ -257,11 +262,11 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
for ( ; I != E; ++I) {
switch (*I) {
default: hasMore = false; break;
- case '-': FS.setIsLeftJustified(); break;
- case '+': FS.setHasPlusPrefix(); break;
- case ' ': FS.setHasSpacePrefix(); break;
- case '#': FS.setHasAlternativeForm(); break;
- case '0': FS.setHasLeadingZeros(); break;
+ case '-': FS.setIsLeftJustified(I); break;
+ case '+': FS.setHasPlusPrefix(I); break;
+ case ' ': FS.setHasSpacePrefix(I); break;
+ case '#': FS.setHasAlternativeForm(I); break;
+ case '0': FS.setHasLeadingZeros(I); break;
}
if (!hasMore)
break;
@@ -304,24 +309,28 @@ static FormatSpecifierResult ParseFormatSpecifier(FormatStringHandler &H,
}
// Look for the length modifier.
- LengthModifier lm = None;
+ LengthModifier::Kind lmKind = LengthModifier::None;
+ const char *lmPosition = I;
switch (*I) {
default:
break;
case 'h':
++I;
- lm = (I != E && *I == 'h') ? ++I, AsChar : AsShort;
+ lmKind = (I != E && *I == 'h') ?
+ ++I, LengthModifier::AsChar : LengthModifier::AsShort;
break;
case 'l':
++I;
- lm = (I != E && *I == 'l') ? ++I, AsLongLong : AsLong;
+ lmKind = (I != E && *I == 'l') ?
+ ++I, LengthModifier::AsLongLong : LengthModifier::AsLong;
break;
- case 'j': lm = AsIntMax; ++I; break;
- case 'z': lm = AsSizeT; ++I; break;
- case 't': lm = AsPtrDiff; ++I; break;
- case 'L': lm = AsLongDouble; ++I; break;
- case 'q': lm = AsLongLong; ++I; break;
+ case 'j': lmKind = LengthModifier::AsIntMax; ++I; break;
+ case 'z': lmKind = LengthModifier::AsSizeT; ++I; break;
+ case 't': lmKind = LengthModifier::AsPtrDiff; ++I; break;
+ case 'L': lmKind = LengthModifier::AsLongDouble; ++I; break;
+ case 'q': lmKind = LengthModifier::AsLongLong; ++I; break;
}
+ LengthModifier lm(lmPosition, lmKind);
FS.setLengthModifier(lm);
if (I == E) {
@@ -414,95 +423,111 @@ FormatStringHandler::~FormatStringHandler() {}
//===----------------------------------------------------------------------===//
bool ArgTypeResult::matchesType(ASTContext &C, QualType argTy) const {
- assert(isValid());
-
- if (K == UnknownTy)
- return true;
-
- if (K == SpecificTy) {
- argTy = C.getCanonicalType(argTy).getUnqualifiedType();
-
- if (T == argTy)
+ switch (K) {
+ case InvalidTy:
+ assert(false && "ArgTypeResult must be valid");
return true;
- if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
- switch (BT->getKind()) {
- default:
- break;
- case BuiltinType::Char_S:
- case BuiltinType::SChar:
- return T == C.UnsignedCharTy;
- case BuiltinType::Char_U:
- case BuiltinType::UChar:
- return T == C.SignedCharTy;
- case BuiltinType::Short:
- return T == C.UnsignedShortTy;
- case BuiltinType::UShort:
- return T == C.ShortTy;
- case BuiltinType::Int:
- return T == C.UnsignedIntTy;
- case BuiltinType::UInt:
- return T == C.IntTy;
- case BuiltinType::Long:
- return T == C.UnsignedLongTy;
- case BuiltinType::ULong:
- return T == C.LongTy;
- case BuiltinType::LongLong:
- return T == C.UnsignedLongLongTy;
- case BuiltinType::ULongLong:
- return T == C.LongLongTy;
- }
-
- return false;
- }
+ case UnknownTy:
+ return true;
- if (K == CStrTy) {
- const PointerType *PT = argTy->getAs<PointerType>();
- if (!PT)
+ case SpecificTy: {
+ argTy = C.getCanonicalType(argTy).getUnqualifiedType();
+ if (T == argTy)
+ return true;
+ if (const BuiltinType *BT = argTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ default:
+ break;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return T == C.UnsignedCharTy;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ return T == C.SignedCharTy;
+ case BuiltinType::Short:
+ return T == C.UnsignedShortTy;
+ case BuiltinType::UShort:
+ return T == C.ShortTy;
+ case BuiltinType::Int:
+ return T == C.UnsignedIntTy;
+ case BuiltinType::UInt:
+ return T == C.IntTy;
+ case BuiltinType::Long:
+ return T == C.UnsignedLongTy;
+ case BuiltinType::ULong:
+ return T == C.LongTy;
+ case BuiltinType::LongLong:
+ return T == C.UnsignedLongLongTy;
+ case BuiltinType::ULongLong:
+ return T == C.LongLongTy;
+ }
return false;
+ }
- QualType pointeeTy = PT->getPointeeType();
-
- if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
- switch (BT->getKind()) {
- case BuiltinType::Void:
- case BuiltinType::Char_U:
- case BuiltinType::UChar:
- case BuiltinType::Char_S:
- case BuiltinType::SChar:
- return true;
- default:
- break;
- }
-
- return false;
- }
+ case CStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ QualType pointeeTy = PT->getPointeeType();
+ if (const BuiltinType *BT = pointeeTy->getAs<BuiltinType>())
+ switch (BT->getKind()) {
+ case BuiltinType::Void:
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ return true;
+ default:
+ break;
+ }
- if (K == WCStrTy) {
- const PointerType *PT = argTy->getAs<PointerType>();
- if (!PT)
return false;
+ }
+
+ case WCStrTy: {
+ const PointerType *PT = argTy->getAs<PointerType>();
+ if (!PT)
+ return false;
+ QualType pointeeTy =
+ C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+ return pointeeTy == C.getWCharType();
+ }
- QualType pointeeTy =
- C.getCanonicalType(PT->getPointeeType()).getUnqualifiedType();
+ case CPointerTy:
+ return argTy->getAs<PointerType>() != NULL ||
+ argTy->getAs<ObjCObjectPointerType>() != NULL;
- return pointeeTy == C.getWCharType();
+ case ObjCPointerTy:
+ return argTy->getAs<ObjCObjectPointerType>() != NULL;
}
+ // FIXME: Should be unreachable, but Clang is currently emitting
+ // a warning.
return false;
}
QualType ArgTypeResult::getRepresentativeType(ASTContext &C) const {
- assert(isValid());
- if (K == SpecificTy)
- return T;
- if (K == CStrTy)
- return C.getPointerType(C.CharTy);
- if (K == WCStrTy)
- return C.getPointerType(C.getWCharType());
- if (K == ObjCPointerTy)
- return C.ObjCBuiltinIdTy;
+ switch (K) {
+ case InvalidTy:
+ assert(false && "No representative type for Invalid ArgTypeResult");
+ // Fall-through.
+ case UnknownTy:
+ return QualType();
+ case SpecificTy:
+ return T;
+ case CStrTy:
+ return C.getPointerType(C.CharTy);
+ case WCStrTy:
+ return C.getPointerType(C.getWCharType());
+ case ObjCPointerTy:
+ return C.ObjCBuiltinIdTy;
+ case CPointerTy:
+ return C.VoidPtrTy;
+ }
+ // FIXME: Should be unreachable, but Clang is currently emitting
+ // a warning.
return QualType();
}
@@ -515,6 +540,98 @@ ArgTypeResult OptionalAmount::getArgType(ASTContext &Ctx) const {
}
//===----------------------------------------------------------------------===//
+// Methods on ConversionSpecifier.
+//===----------------------------------------------------------------------===//
+const char *ConversionSpecifier::toString() const {
+ switch (kind) {
+ case dArg: return "d";
+ case iArg: return "i";
+ case oArg: return "o";
+ case uArg: return "u";
+ case xArg: return "x";
+ case XArg: return "X";
+ case fArg: return "f";
+ case FArg: return "F";
+ case eArg: return "e";
+ case EArg: return "E";
+ case gArg: return "g";
+ case GArg: return "G";
+ case aArg: return "a";
+ case AArg: return "A";
+ case IntAsCharArg: return "c";
+ case CStrArg: return "s";
+ case VoidPtrArg: return "p";
+ case OutIntPtrArg: return "n";
+ case PercentArg: return "%";
+ case InvalidSpecifier: return NULL;
+
+ // MacOS X unicode extensions.
+ case CArg: return "C";
+ case UnicodeStrArg: return "S";
+
+ // Objective-C specific specifiers.
+ case ObjCObjArg: return "@";
+
+ // GlibC specific specifiers.
+ case PrintErrno: return "m";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on LengthModifier.
+//===----------------------------------------------------------------------===//
+
+const char *LengthModifier::toString() const {
+ switch (kind) {
+ case AsChar:
+ return "hh";
+ case AsShort:
+ return "h";
+ case AsLong: // or AsWideChar
+ return "l";
+ case AsLongLong:
+ return "ll";
+ case AsIntMax:
+ return "j";
+ case AsSizeT:
+ return "z";
+ case AsPtrDiff:
+ return "t";
+ case AsLongDouble:
+ return "L";
+ case None:
+ return "";
+ }
+ return NULL;
+}
+
+//===----------------------------------------------------------------------===//
+// Methods on OptionalAmount.
+//===----------------------------------------------------------------------===//
+
+void OptionalAmount::toString(llvm::raw_ostream &os) const {
+ switch (hs) {
+ case Invalid:
+ case NotSpecified:
+ return;
+ case Arg:
+ if (UsesDotPrefix)
+ os << ".";
+ if (usesPositionalArg())
+ os << "*" << getPositionalArgIndex() << "$";
+ else
+ os << "*";
+ break;
+ case Constant:
+ if (UsesDotPrefix)
+ os << ".";
+ os << amt;
+ break;
+ }
+}
+
+//===----------------------------------------------------------------------===//
// Methods on FormatSpecifier.
//===----------------------------------------------------------------------===//
@@ -523,57 +640,60 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const {
return ArgTypeResult::Invalid();
if (CS.isIntArg())
- switch (LM) {
- case AsLongDouble:
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
return ArgTypeResult::Invalid();
- case None: return Ctx.IntTy;
- case AsChar: return Ctx.SignedCharTy;
- case AsShort: return Ctx.ShortTy;
- case AsLong: return Ctx.LongTy;
- case AsLongLong: return Ctx.LongLongTy;
- case AsIntMax:
+ case LengthModifier::None: return Ctx.IntTy;
+ case LengthModifier::AsChar: return Ctx.SignedCharTy;
+ case LengthModifier::AsShort: return Ctx.ShortTy;
+ case LengthModifier::AsLong: return Ctx.LongTy;
+ case LengthModifier::AsLongLong: return Ctx.LongLongTy;
+ case LengthModifier::AsIntMax:
// FIXME: Return unknown for now.
return ArgTypeResult();
- case AsSizeT: return Ctx.getSizeType();
- case AsPtrDiff: return Ctx.getPointerDiffType();
+ case LengthModifier::AsSizeT: return Ctx.getSizeType();
+ case LengthModifier::AsPtrDiff: return Ctx.getPointerDiffType();
}
if (CS.isUIntArg())
- switch (LM) {
- case AsLongDouble:
+ switch (LM.getKind()) {
+ case LengthModifier::AsLongDouble:
return ArgTypeResult::Invalid();
- case None: return Ctx.UnsignedIntTy;
- case AsChar: return Ctx.UnsignedCharTy;
- case AsShort: return Ctx.UnsignedShortTy;
- case AsLong: return Ctx.UnsignedLongTy;
- case AsLongLong: return Ctx.UnsignedLongLongTy;
- case AsIntMax:
+ case LengthModifier::None: return Ctx.UnsignedIntTy;
+ case LengthModifier::AsChar: return Ctx.UnsignedCharTy;
+ case LengthModifier::AsShort: return Ctx.UnsignedShortTy;
+ case LengthModifier::AsLong: return Ctx.UnsignedLongTy;
+ case LengthModifier::AsLongLong: return Ctx.UnsignedLongLongTy;
+ case LengthModifier::AsIntMax:
// FIXME: Return unknown for now.
return ArgTypeResult();
- case AsSizeT:
+ case LengthModifier::AsSizeT:
// FIXME: How to get the corresponding unsigned
// version of size_t?
return ArgTypeResult();
- case AsPtrDiff:
+ case LengthModifier::AsPtrDiff:
// FIXME: How to get the corresponding unsigned
// version of ptrdiff_t?
return ArgTypeResult();
}
if (CS.isDoubleArg()) {
- if (LM == AsLongDouble)
+ if (LM.getKind() == LengthModifier::AsLongDouble)
return Ctx.LongDoubleTy;
return Ctx.DoubleTy;
}
switch (CS.getKind()) {
case ConversionSpecifier::CStrArg:
- return ArgTypeResult(LM == AsWideChar ? ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy);
+ return ArgTypeResult(LM.getKind() == LengthModifier::AsWideChar ?
+ ArgTypeResult::WCStrTy : ArgTypeResult::CStrTy);
case ConversionSpecifier::UnicodeStrArg:
// FIXME: This appears to be Mac OS X specific.
return ArgTypeResult::WCStrTy;
case ConversionSpecifier::CArg:
return Ctx.WCharTy;
+ case ConversionSpecifier::VoidPtrArg:
+ return ArgTypeResult::CPointerTy;
default:
break;
}
@@ -582,3 +702,329 @@ ArgTypeResult FormatSpecifier::getArgType(ASTContext &Ctx) const {
return ArgTypeResult();
}
+bool FormatSpecifier::fixType(QualType QT) {
+ // Handle strings first (char *, wchar_t *)
+ if (QT->isPointerType() && (QT->getPointeeType()->isAnyCharacterType())) {
+ CS.setKind(ConversionSpecifier::CStrArg);
+
+ // Disable irrelevant flags
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+
+ // Set the long length modifier for wide characters
+ if (QT->getPointeeType()->isWideCharType())
+ LM.setKind(LengthModifier::AsWideChar);
+
+ return true;
+ }
+
+ // We can only work with builtin types.
+ if (!QT->isBuiltinType())
+ return false;
+
+ // Everything else should be a base type
+ const BuiltinType *BT = QT->getAs<BuiltinType>();
+
+ // Set length modifier
+ switch (BT->getKind()) {
+ default:
+ // The rest of the conversions are either optional or for non-builtin types
+ LM.setKind(LengthModifier::None);
+ break;
+
+ case BuiltinType::WChar:
+ case BuiltinType::Long:
+ case BuiltinType::ULong:
+ LM.setKind(LengthModifier::AsLong);
+ break;
+
+ case BuiltinType::LongLong:
+ case BuiltinType::ULongLong:
+ LM.setKind(LengthModifier::AsLongLong);
+ break;
+
+ case BuiltinType::LongDouble:
+ LM.setKind(LengthModifier::AsLongDouble);
+ break;
+ }
+
+ // Set conversion specifier and disable any flags which do not apply to it.
+ if (QT->isAnyCharacterType()) {
+ CS.setKind(ConversionSpecifier::IntAsCharArg);
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+ HasPlusPrefix = 0;
+ }
+ // Test for Floating type first as LongDouble can pass isUnsignedIntegerType
+ else if (QT->isRealFloatingType()) {
+ CS.setKind(ConversionSpecifier::fArg);
+ }
+ else if (QT->isPointerType()) {
+ CS.setKind(ConversionSpecifier::VoidPtrArg);
+ Precision.setHowSpecified(OptionalAmount::NotSpecified);
+ HasAlternativeForm = 0;
+ HasLeadingZeroes = 0;
+ HasPlusPrefix = 0;
+ }
+ else if (QT->isSignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::dArg);
+ HasAlternativeForm = 0;
+ }
+ else if (QT->isUnsignedIntegerType()) {
+ CS.setKind(ConversionSpecifier::uArg);
+ HasAlternativeForm = 0;
+ HasPlusPrefix = 0;
+ }
+ else {
+ return false;
+ }
+
+ return true;
+}
+
+void FormatSpecifier::toString(llvm::raw_ostream &os) const {
+ // Whilst some features have no defined order, we are using the order
+ // appearing in the C99 standard (ISO/IEC 9899:1999 (E) ¤7.19.6.1)
+ os << "%";
+
+ // Positional args
+ if (usesPositionalArg()) {
+ os << getPositionalArgIndex() << "$";
+ }
+
+ // Conversion flags
+ if (IsLeftJustified) os << "-";
+ if (HasPlusPrefix) os << "+";
+ if (HasSpacePrefix) os << " ";
+ if (HasAlternativeForm) os << "#";
+ if (HasLeadingZeroes) os << "0";
+
+ // Minimum field width
+ FieldWidth.toString(os);
+ // Precision
+ Precision.toString(os);
+ // Length modifier
+ os << LM.toString();
+ // Conversion specifier
+ os << CS.toString();
+}
+
+bool FormatSpecifier::hasValidPlusPrefix() const {
+ if (!HasPlusPrefix)
+ return true;
+
+ // The plus prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidAlternativeForm() const {
+ if (!HasAlternativeForm)
+ return true;
+
+ // Alternate form flag only valid with the oxaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidLeadingZeros() const {
+ if (!HasLeadingZeroes)
+ return true;
+
+ // Leading zeroes flag only valid with the diouxXaAeEfFgG conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidSpacePrefix() const {
+ if (!HasSpacePrefix)
+ return true;
+
+ // The space prefix only makes sense for signed conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+bool FormatSpecifier::hasValidLeftJustified() const {
+ if (!IsLeftJustified)
+ return true;
+
+ // The left justified flag is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::OutIntPtrArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
+
+bool FormatSpecifier::hasValidLengthModifier() const {
+ switch (LM.getKind()) {
+ case LengthModifier::None:
+ return true;
+
+ // Handle most integer flags
+ case LengthModifier::AsChar:
+ case LengthModifier::AsShort:
+ case LengthModifier::AsLongLong:
+ case LengthModifier::AsIntMax:
+ case LengthModifier::AsSizeT:
+ case LengthModifier::AsPtrDiff:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::OutIntPtrArg:
+ return true;
+ default:
+ return false;
+ }
+
+ // Handle 'l' flag
+ case LengthModifier::AsLong:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::OutIntPtrArg:
+ case ConversionSpecifier::IntAsCharArg:
+ case ConversionSpecifier::CStrArg:
+ return true;
+ default:
+ return false;
+ }
+
+ case LengthModifier::AsLongDouble:
+ switch (CS.getKind()) {
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+}
+
+bool FormatSpecifier::hasValidPrecision() const {
+ if (Precision.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // Precision is only valid with the diouxXaAeEfFgGs conversions
+ switch (CS.getKind()) {
+ case ConversionSpecifier::dArg:
+ case ConversionSpecifier::iArg:
+ case ConversionSpecifier::oArg:
+ case ConversionSpecifier::uArg:
+ case ConversionSpecifier::xArg:
+ case ConversionSpecifier::XArg:
+ case ConversionSpecifier::aArg:
+ case ConversionSpecifier::AArg:
+ case ConversionSpecifier::eArg:
+ case ConversionSpecifier::EArg:
+ case ConversionSpecifier::fArg:
+ case ConversionSpecifier::FArg:
+ case ConversionSpecifier::gArg:
+ case ConversionSpecifier::GArg:
+ case ConversionSpecifier::CStrArg:
+ return true;
+
+ default:
+ return false;
+ }
+}
+bool FormatSpecifier::hasValidFieldWidth() const {
+ if (FieldWidth.getHowSpecified() == OptionalAmount::NotSpecified)
+ return true;
+
+ // The field width is valid for all conversions except n
+ switch (CS.getKind()) {
+ case ConversionSpecifier::OutIntPtrArg:
+ return false;
+
+ default:
+ return true;
+ }
+}
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index 1a89acc65af3..87bf834c2752 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -25,6 +25,8 @@ if (Subversion_FOUND AND EXISTS "${CLANG_SOURCE_DIR}/.svn")
endif()
add_dependencies(clangBasic
+ ClangARMNeon
+ ClangAttrList
ClangDiagnosticAnalysis
ClangDiagnosticAST
ClangDiagnosticCommon
@@ -34,3 +36,4 @@ add_dependencies(clangBasic
ClangDiagnosticLex
ClangDiagnosticParse
ClangDiagnosticSema)
+
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 2fd985f53803..641d87bb9afa 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -250,6 +250,7 @@ Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) {
ErrorsAsFatal = false;
SuppressSystemWarnings = false;
SuppressAllDiagnostics = false;
+ ShowOverloads = Ovl_All;
ExtBehavior = Ext_Ignore;
ErrorOccurred = false;
@@ -1042,8 +1043,7 @@ StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
const DiagnosticInfo &Info)
- : Level(Level), Loc(Info.getLocation())
-{
+ : Level(Level), Loc(Info.getLocation()) {
llvm::SmallString<64> Message;
Info.FormatDiagnostic(Message);
this->Message.assign(Message.begin(), Message.end());
@@ -1130,6 +1130,7 @@ void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const {
WriteSourceLocation(OS, SM, R->getBegin());
WriteSourceLocation(OS, SM, R->getEnd());
+ WriteUnsigned(OS, R->isTokenRange());
}
}
@@ -1158,6 +1159,7 @@ void StoredDiagnostic::Serialize(llvm::raw_ostream &OS) const {
for (fixit_iterator F = fixit_begin(), FEnd = fixit_end(); F != FEnd; ++F) {
WriteSourceLocation(OS, SM, F->RemoveRange.getBegin());
WriteSourceLocation(OS, SM, F->RemoveRange.getEnd());
+ WriteUnsigned(OS, F->RemoveRange.isTokenRange());
WriteSourceLocation(OS, SM, F->InsertionLoc);
WriteString(OS, F->CodeToInsert);
}
@@ -1271,11 +1273,14 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
return Diag;
for (unsigned I = 0; I != NumSourceRanges; ++I) {
SourceLocation Begin, End;
+ unsigned IsTokenRange;
if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, Begin) ||
- ReadSourceLocation(FM, SM, Memory, MemoryEnd, End))
+ ReadSourceLocation(FM, SM, Memory, MemoryEnd, End) ||
+ ReadUnsigned(Memory, MemoryEnd, IsTokenRange))
return Diag;
- Diag.Ranges.push_back(SourceRange(Begin, End));
+ Diag.Ranges.push_back(CharSourceRange(SourceRange(Begin, End),
+ IsTokenRange));
}
// Read the fix-it hints.
@@ -1284,9 +1289,10 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
return Diag;
for (unsigned I = 0; I != NumFixIts; ++I) {
SourceLocation RemoveBegin, RemoveEnd, InsertionLoc;
- unsigned InsertLen = 0;
+ unsigned InsertLen = 0, RemoveIsTokenRange;
if (ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveBegin) ||
ReadSourceLocation(FM, SM, Memory, MemoryEnd, RemoveEnd) ||
+ ReadUnsigned(Memory, MemoryEnd, RemoveIsTokenRange) ||
ReadSourceLocation(FM, SM, Memory, MemoryEnd, InsertionLoc) ||
ReadUnsigned(Memory, MemoryEnd, InsertLen) ||
Memory + InsertLen > MemoryEnd) {
@@ -1295,7 +1301,8 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
}
FixItHint Hint;
- Hint.RemoveRange = SourceRange(RemoveBegin, RemoveEnd);
+ Hint.RemoveRange = CharSourceRange(SourceRange(RemoveBegin, RemoveEnd),
+ RemoveIsTokenRange);
Hint.InsertionLoc = InsertionLoc;
Hint.CodeToInsert.assign(Memory, Memory + InsertLen);
Memory += InsertLen;
diff --git a/lib/Basic/Makefile b/lib/Basic/Makefile
index 58ac7eb86e75..51b8ac1b4569 100644
--- a/lib/Basic/Makefile
+++ b/lib/Basic/Makefile
@@ -11,16 +11,11 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangBasic
BUILD_ARCHIVE = 1
-CPPFLAGS += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-ifdef CLANG_VENDOR
-CPPFLAGS += -DCLANG_VENDOR='"$(CLANG_VENDOR) "'
-endif
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
SVN_REVISION := $(shell $(LLVM_SRC_ROOT)/utils/GetSourceVersion $(PROJ_SRC_DIR)/../..)
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index 6692e641f2a4..e4eaf6313780 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -34,6 +34,8 @@ TargetInfo::TargetInfo(const std::string &T) : Triple(T) {
DoubleAlign = 64;
LongDoubleWidth = 64;
LongDoubleAlign = 64;
+ LargeArrayMinWidth = 0;
+ LargeArrayAlign = 0;
SizeType = UnsignedLong;
PtrDiffType = SignedLong;
IntMaxType = SignedLongLong;
@@ -282,6 +284,8 @@ bool TargetInfo::validateOutputConstraint(ConstraintInfo &Info) const {
Info.setAllowsRegister();
Info.setAllowsMemory();
break;
+ case ',': // FIXME: Until we handle multiple alternative constraints,
+ return true; // ignore everything after the first comma.
}
Name++;
@@ -375,6 +379,8 @@ bool TargetInfo::validateInputConstraint(ConstraintInfo *OutputConstraints,
Info.setAllowsRegister();
Info.setAllowsMemory();
break;
+ case ',': // FIXME: Until we handle multiple alternative constraints,
+ return true; // ignore everything after the first comma.
}
Name++;
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 92fd417173bd..8d793163bc0b 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -150,7 +150,7 @@ protected:
public:
DarwinTargetInfo(const std::string& triple) :
OSTargetInfo<Target>(triple) {
- this->TLSSupported = false;
+ this->TLSSupported = llvm::Triple(triple).getDarwinMajorNumber() > 10;
}
virtual std::string isValidSectionSpecifier(llvm::StringRef SR) const {
@@ -160,6 +160,12 @@ public:
return llvm::MCSectionMachO::ParseSectionSpecifier(SR, Segment, Section,
TAA, StubSize);
}
+
+ virtual const char *getStaticInitSectionSpecifier() const {
+ // FIXME: We should return 0 when building kexts.
+ return "__TEXT,__StaticInit,regular,pure_instructions";
+ }
+
};
@@ -206,6 +212,30 @@ public:
}
};
+// Minix Target
+template<typename Target>
+class MinixTargetInfo : public OSTargetInfo<Target> {
+protected:
+ virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const {
+ // Minix defines
+
+ Builder.defineMacro("__minix", "3");
+ Builder.defineMacro("_EM_WSIZE", "4");
+ Builder.defineMacro("_EM_PSIZE", "4");
+ Builder.defineMacro("_EM_SSIZE", "2");
+ Builder.defineMacro("_EM_LSIZE", "4");
+ Builder.defineMacro("_EM_FSIZE", "4");
+ Builder.defineMacro("_EM_DSIZE", "8");
+ DefineStd(Builder, "unix", Opts);
+ }
+public:
+ MinixTargetInfo(const std::string &triple)
+ : OSTargetInfo<Target>(triple) {
+ this->UserLabelPrefix = "";
+ }
+};
+
// Linux target
template<typename Target>
class LinuxTargetInfo : public OSTargetInfo<Target> {
@@ -299,13 +329,20 @@ protected:
Builder.defineMacro("__CELLOS_LV2__");
Builder.defineMacro("__ELF__");
Builder.defineMacro("__LP32__");
+ Builder.defineMacro("_ARCH_PPC64");
+ Builder.defineMacro("__powerpc64__");
}
public:
PS3PPUTargetInfo(const std::string& triple)
: OSTargetInfo<Target>(triple) {
this->UserLabelPrefix = "";
this->LongWidth = this->LongAlign = this->PointerWidth = this->PointerAlign = 32;
+ this->IntMaxType = TargetInfo::SignedLongLong;
+ this->UIntMaxType = TargetInfo::UnsignedLongLong;
+ this->Int64Type = TargetInfo::SignedLongLong;
this->SizeType = TargetInfo::UnsignedInt;
+ this->DescriptionString = "E-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-"
+ "i64:64:64-f32:32:32-f64:64:64-v128:128:128-n32";
}
};
@@ -413,12 +450,98 @@ public:
switch (*Name) {
default: return false;
case 'O': // Zero
- return true;
+ break;
case 'b': // Base register
case 'f': // Floating point register
Info.setAllowsRegister();
- return true;
+ break;
+ // FIXME: The following are added to allow parsing.
+ // I just took a guess at what the actions should be.
+ // Also, is more specific checking needed? I.e. specific registers?
+ case 'd': // Floating point register (containing 64-bit value)
+ case 'v': // Altivec vector register
+ Info.setAllowsRegister();
+ break;
+ case 'w':
+ switch (Name[1]) {
+ case 'd':// VSX vector register to hold vector double data
+ case 'f':// VSX vector register to hold vector float data
+ case 's':// VSX vector register to hold scalar float data
+ case 'a':// Any VSX register
+ break;
+ default:
+ return false;
+ }
+ Info.setAllowsRegister();
+ Name++; // Skip over 'w'.
+ break;
+ case 'h': // `MQ', `CTR', or `LINK' register
+ case 'q': // `MQ' register
+ case 'c': // `CTR' register
+ case 'l': // `LINK' register
+ case 'x': // `CR' register (condition register) number 0
+ case 'y': // `CR' register (condition register)
+ case 'z': // `XER[CA]' carry bit (part of the XER register)
+ Info.setAllowsRegister();
+ break;
+ case 'I': // Signed 16-bit constant
+ case 'J': // Unsigned 16-bit constant shifted left 16 bits
+ // (use `L' instead for SImode constants)
+ case 'K': // Unsigned 16-bit constant
+ case 'L': // Signed 16-bit constant shifted left 16 bits
+ case 'M': // Constant larger than 31
+ case 'N': // Exact power of 2
+ case 'P': // Constant whose negation is a signed 16-bit constant
+ case 'G': // Floating point constant that can be loaded into a
+ // register with one instruction per word
+ case 'H': // Integer/Floating point constant that can be loaded
+ // into a register using three instructions
+ break;
+ case 'm': // Memory operand. Note that on PowerPC targets, m can
+ // include addresses that update the base register. It
+ // is therefore only safe to use `m' in an asm statement
+ // if that asm statement accesses the operand exactly once.
+ // The asm statement must also use `%U<opno>' as a
+ // placeholder for the “update” flag in the corresponding
+ // load or store instruction. For example:
+ // asm ("st%U0 %1,%0" : "=m" (mem) : "r" (val));
+ // is correct but:
+ // asm ("st %1,%0" : "=m" (mem) : "r" (val));
+ // is not. Use es rather than m if you don't want the base
+ // register to be updated.
+ case 'e':
+ if (Name[1] != 's')
+ return false;
+ // es: A “stable” memory operand; that is, one which does not
+ // include any automodification of the base register. Unlike
+ // `m', this constraint can be used in asm statements that
+ // might access the operand several times, or that might not
+ // access it at all.
+ Info.setAllowsMemory();
+ Name++; // Skip over 'e'.
+ break;
+ case 'Q': // Memory operand that is an offset from a register (it is
+ // usually better to use `m' or `es' in asm statements)
+ case 'Z': // Memory operand that is an indexed or indirect from a
+ // register (it is usually better to use `m' or `es' in
+ // asm statements)
+ Info.setAllowsMemory();
+ Info.setAllowsRegister();
+ break;
+ case 'R': // AIX TOC entry
+ case 'a': // Address operand that is an indexed or indirect from a
+ // register (`p' is preferable for asm statements)
+ case 'S': // Constant suitable as a 64-bit mask operand
+ case 'T': // Constant suitable as a 32-bit mask operand
+ case 'U': // System V Release 4 small data area reference
+ case 't': // AND masks that can be performed by two rldic{l, r}
+ // instructions
+ case 'W': // Vector constant that does not require memory
+ case 'j': // Vector constant that is all zeros.
+ break;
+ // End FIXME.
}
+ return true;
}
virtual const char *getClobbers() const {
return "";
@@ -600,6 +723,27 @@ public:
};
} // end anonymous namespace.
+
+namespace {
+class DarwinPPCTargetInfo :
+ public DarwinTargetInfo<PPCTargetInfo> {
+public:
+ DarwinPPCTargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPCTargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ }
+};
+
+class DarwinPPC64TargetInfo :
+ public DarwinTargetInfo<PPC64TargetInfo> {
+public:
+ DarwinPPC64TargetInfo(const std::string& triple)
+ : DarwinTargetInfo<PPC64TargetInfo>(triple) {
+ HasAlignMac68kSupport = true;
+ }
+};
+} // end anonymous namespace.
+
namespace {
// MBlaze abstract base class
class MBlazeTargetInfo : public TargetInfo {
@@ -1257,6 +1401,8 @@ public:
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
LongDoubleWidth = 128;
LongDoubleAlign = 128;
+ LargeArrayMinWidth = 128;
+ LargeArrayAlign = 128;
IntMaxType = SignedLong;
UIntMaxType = UnsignedLong;
Int64Type = SignedLong;
@@ -2294,6 +2440,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::arm:
case llvm::Triple::thumb:
switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<ARMTargetInfo>(T);
case llvm::Triple::Darwin:
return new DarwinARMTargetInfo(T);
case llvm::Triple::FreeBSD:
@@ -2327,14 +2475,14 @@ static TargetInfo *AllocateTarget(const std::string &T) {
case llvm::Triple::ppc:
if (os == llvm::Triple::Darwin)
- return new DarwinTargetInfo<PPCTargetInfo>(T);
+ return new DarwinPPCTargetInfo(T);
else if (os == llvm::Triple::FreeBSD)
return new FreeBSDTargetInfo<PPC32TargetInfo>(T);
return new PPC32TargetInfo(T);
case llvm::Triple::ppc64:
if (os == llvm::Triple::Darwin)
- return new DarwinTargetInfo<PPC64TargetInfo>(T);
+ return new DarwinPPC64TargetInfo(T);
else if (os == llvm::Triple::Lv2)
return new PS3PPUTargetInfo<PPC64TargetInfo>(T);
else if (os == llvm::Triple::FreeBSD)
@@ -2377,6 +2525,8 @@ static TargetInfo *AllocateTarget(const std::string &T) {
return new OpenBSDI386TargetInfo(T);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(T);
+ case llvm::Triple::Minix:
+ return new MinixTargetInfo<X86_32TargetInfo>(T);
case llvm::Triple::Solaris:
return new SolarisTargetInfo<X86_32TargetInfo>(T);
case llvm::Triple::Cygwin:
@@ -2444,6 +2594,12 @@ TargetInfo *TargetInfo::CreateTargetInfo(Diagnostic &Diags,
return 0;
}
+ // Set the target C++ ABI.
+ if (!Target->setCXXABI(Opts.CXXABI)) {
+ Diags.Report(diag::err_target_unknown_cxxabi) << Opts.CXXABI;
+ return 0;
+ }
+
// Compute the default target features, we need the target to handle this
// because features may have dependencies on one another.
llvm::StringMap<bool> Features;
diff --git a/lib/Frontend/AnalysisConsumer.cpp b/lib/Checker/AnalysisConsumer.cpp
index 6a4727929e7a..524f37e39662 100644
--- a/lib/Frontend/AnalysisConsumer.cpp
+++ b/lib/Checker/AnalysisConsumer.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/AnalysisConsumer.h"
+#include "clang/Checker/AnalysisConsumer.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
@@ -27,9 +27,11 @@
#include "clang/Checker/BugReporter/BugReporter.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
+#include "clang/Checker/PathDiagnosticClients.h"
+#include "GRExprEngineExperimentalChecks.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Frontend/AnalyzerOptions.h"
#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/System/Path.h"
@@ -79,8 +81,6 @@ public:
const Preprocessor &PP;
const std::string OutDir;
AnalyzerOptions Opts;
- bool declDisplayed;
-
// PD is owned by AnalysisManager.
PathDiagnosticClient *PD;
@@ -94,7 +94,7 @@ public:
const std::string& outdir,
const AnalyzerOptions& opts)
: Ctx(0), PP(pp), OutDir(outdir),
- Opts(opts), declDisplayed(false), PD(0) {
+ Opts(opts), PD(0) {
DigestAnalyzerOptions();
}
@@ -137,10 +137,9 @@ public:
}
void DisplayFunction(const Decl *D) {
- if (!Opts.AnalyzerDisplayProgress || declDisplayed)
+ if (!Opts.AnalyzerDisplayProgress)
return;
- declDisplayed = true;
SourceManager &SM = Mgr->getASTContext().getSourceManager();
PresumedLoc Loc = SM.getPresumedLoc(D->getLocation());
llvm::errs() << "ANALYZE: " << Loc.getFilename();
@@ -181,7 +180,7 @@ public:
}
virtual void HandleTranslationUnit(ASTContext &C);
- void HandleCode(Decl *D, Stmt* Body, Actions& actions);
+ void HandleCode(Decl *D, Actions& actions);
};
} // end anonymous namespace
@@ -209,7 +208,8 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
FD->getDeclName().getAsString() != Opts.AnalyzeSpecificFunction)
break;
- HandleCode(FD, FD->getBody(), FunctionActions);
+ DisplayFunction(FD);
+ HandleCode(FD, FunctionActions);
}
break;
}
@@ -221,14 +221,15 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
Opts.AnalyzeSpecificFunction != MD->getSelector().getAsString())
break;
- HandleCode(MD, MD->getBody(), ObjCMethodActions);
+ DisplayFunction(MD);
+ HandleCode(MD, ObjCMethodActions);
}
break;
}
case Decl::ObjCImplementation: {
ObjCImplementationDecl* ID = cast<ObjCImplementationDecl>(*I);
- HandleCode(ID, 0, ObjCImplementationActions);
+ HandleCode(ID, ObjCImplementationActions);
for (ObjCImplementationDecl::method_iterator MI = ID->meth_begin(),
ME = ID->meth_end(); MI != ME; ++MI) {
@@ -236,7 +237,7 @@ void AnalysisConsumer::HandleTranslationUnit(ASTContext &C) {
if (!Opts.AnalyzeSpecificFunction.empty() &&
Opts.AnalyzeSpecificFunction != (*MI)->getSelector().getAsString())
break;
- HandleCode(*MI, (*MI)->getBody(), ObjCMethodActions);
+ HandleCode(*MI, ObjCMethodActions);
}
}
break;
@@ -269,7 +270,7 @@ static void FindBlocks(DeclContext *D, llvm::SmallVectorImpl<Decl*> &WL) {
FindBlocks(DC, WL);
}
-void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) {
+void AnalysisConsumer::HandleCode(Decl *D, Actions& actions) {
// Don't run the actions if an error has occured with parsing the file.
Diagnostic &Diags = PP.getDiagnostics();
@@ -278,8 +279,9 @@ void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) {
// Don't run the actions on declarations in header files unless
// otherwise specified.
- if (!Opts.AnalyzeAll &&
- !Ctx->getSourceManager().isFromMainFile(D->getLocation()))
+ SourceManager &SM = Ctx->getSourceManager();
+ SourceLocation SL = SM.getInstantiationLoc(D->getLocation());
+ if (!Opts.AnalyzeAll && !SM.isFromMainFile(SL))
return;
// Clear the AnalysisManager of old AnalysisContexts.
@@ -289,7 +291,7 @@ void AnalysisConsumer::HandleCode(Decl *D, Stmt* Body, Actions& actions) {
llvm::SmallVector<Decl*, 10> WL;
WL.push_back(D);
- if (Body && Opts.AnalyzeNestedBlocks)
+ if (D->hasBody() && Opts.AnalyzeNestedBlocks)
FindBlocks(cast<DeclContext>(D), WL);
for (Actions::iterator I = actions.begin(), E = actions.end(); I != E; ++I)
@@ -339,6 +341,9 @@ static void ActionGRExprEngine(AnalysisConsumer &C, AnalysisManager& mgr,
if (C.Opts.EnableExperimentalChecks)
RegisterExperimentalChecks(Eng);
+ if (C.Opts.EnableIdempotentOperationChecker)
+ RegisterIdempotentOperationChecker(Eng);
+
// Set the graph auditor.
llvm::OwningPtr<ExplodedNode::Auditor> Auditor;
if (mgr.shouldVisualizeUbigraph()) {
diff --git a/lib/Checker/AttrNonNullChecker.cpp b/lib/Checker/AttrNonNullChecker.cpp
index 309a74ce544b..d0bccb27b405 100644
--- a/lib/Checker/AttrNonNullChecker.cpp
+++ b/lib/Checker/AttrNonNullChecker.cpp
@@ -60,9 +60,10 @@ void AttrNonNullChecker::PreVisitCallExpr(CheckerContext &C,
if (!Att->isNonNull(idx))
continue;
- const SVal &V = state->getSVal(*I);
- const DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
+ SVal V = state->getSVal(*I);
+ DefinedSVal *DV = dyn_cast<DefinedSVal>(&V);
+ // If the value is unknown or undefined, we can't perform this check.
if (!DV)
continue;
diff --git a/lib/Checker/BasicConstraintManager.cpp b/lib/Checker/BasicConstraintManager.cpp
index e89546ecb018..eee5c5946674 100644
--- a/lib/Checker/BasicConstraintManager.cpp
+++ b/lib/Checker/BasicConstraintManager.cpp
@@ -54,22 +54,28 @@ public:
ISetFactory(statemgr.getAllocator()) {}
const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V);
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment);
const GRState* AddEQ(const GRState* state, SymbolRef sym, const llvm::APSInt& V);
@@ -94,46 +100,52 @@ ConstraintManager* clang::CreateBasicConstraintManager(GRStateManager& statemgr,
return new BasicConstraintManager(statemgr, subengine);
}
+
const GRState*
BasicConstraintManager::AssumeSymNE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) {
- // First, determine if sym == X, where X != V.
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // First, determine if sym == X, where X+Adjustment != V.
+ llvm::APSInt Adjusted = V-Adjustment;
if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = (*X != V);
+ bool isFeasible = (*X != Adjusted);
return isFeasible ? state : NULL;
}
- // Second, determine if sym != V.
- if (isNotEqual(state, sym, V))
+ // Second, determine if sym+Adjustment != V.
+ if (isNotEqual(state, sym, Adjusted))
return state;
// If we reach here, sym is not a constant and we don't know if it is != V.
// Make that assumption.
- return AddNE(state, sym, V);
+ return AddNE(state, sym, Adjusted);
}
-const GRState *BasicConstraintManager::AssumeSymEQ(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt &V) {
- // First, determine if sym == X, where X != V.
+const GRState*
+BasicConstraintManager::AssumeSymEQ(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // First, determine if sym == X, where X+Adjustment != V.
+ llvm::APSInt Adjusted = V-Adjustment;
if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = *X == V;
+ bool isFeasible = (*X == Adjusted);
return isFeasible ? state : NULL;
}
- // Second, determine if sym != V.
- if (isNotEqual(state, sym, V))
+ // Second, determine if sym+Adjustment != V.
+ if (isNotEqual(state, sym, Adjusted))
return NULL;
// If we reach here, sym is not a constant and we don't know if it is == V.
// Make that assumption.
- return AddEQ(state, sym, V);
+ return AddEQ(state, sym, Adjusted);
}
-// These logic will be handled in another ConstraintManager.
-const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt& V) {
+// The logic for these will be handled in another ConstraintManager.
+const GRState*
+BasicConstraintManager::AssumeSymLT(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
// Is 'V' the smallest possible value?
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
// sym cannot be any value less than 'V'. This path is infeasible.
@@ -141,13 +153,13 @@ const GRState *BasicConstraintManager::AssumeSymLT(const GRState *state,
}
// FIXME: For now have assuming x < y be the same as assuming sym != V;
- return AssumeSymNE(state, sym, V);
+ return AssumeSymNE(state, sym, V, Adjustment);
}
-const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt& V) {
-
+const GRState*
+BasicConstraintManager::AssumeSymGT(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
// Is 'V' the largest possible value?
if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
// sym cannot be any value greater than 'V'. This path is infeasible.
@@ -155,56 +167,60 @@ const GRState *BasicConstraintManager::AssumeSymGT(const GRState *state,
}
// FIXME: For now have assuming x > y be the same as assuming sym != V;
- return AssumeSymNE(state, sym, V);
+ return AssumeSymNE(state, sym, V, Adjustment);
}
-const GRState *BasicConstraintManager::AssumeSymGE(const GRState *state,
- SymbolRef sym,
- const llvm::APSInt &V) {
-
- // Reject a path if the value of sym is a constant X and !(X >= V).
+const GRState*
+BasicConstraintManager::AssumeSymGE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Reject a path if the value of sym is a constant X and !(X+Adj >= V).
if (const llvm::APSInt *X = getSymVal(state, sym)) {
- bool isFeasible = *X >= V;
+ bool isFeasible = (*X >= V-Adjustment);
return isFeasible ? state : NULL;
}
// Sym is not a constant, but it is worth looking to see if V is the
// maximum integer value.
if (V == llvm::APSInt::getMaxValue(V.getBitWidth(), V.isUnsigned())) {
- // If we know that sym != V, then this condition is infeasible since
- // there is no other value greater than V.
- bool isFeasible = !isNotEqual(state, sym, V);
+ llvm::APSInt Adjusted = V-Adjustment;
+
+ // If we know that sym != V (after adjustment), then this condition
+ // is infeasible since there is no other value greater than V.
+ bool isFeasible = !isNotEqual(state, sym, Adjusted);
// If the path is still feasible then as a consequence we know that
- // 'sym == V' because we cannot have 'sym > V' (no larger values).
+ // 'sym+Adjustment == V' because there are no larger values.
// Add this constraint.
- return isFeasible ? AddEQ(state, sym, V) : NULL;
+ return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
}
return state;
}
const GRState*
-BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
- const llvm::APSInt& V) {
-
- // Reject a path if the value of sym is a constant X and !(X <= V).
+BasicConstraintManager::AssumeSymLE(const GRState *state, SymbolRef sym,
+ const llvm::APSInt &V,
+ const llvm::APSInt &Adjustment) {
+ // Reject a path if the value of sym is a constant X and !(X+Adj <= V).
if (const llvm::APSInt* X = getSymVal(state, sym)) {
- bool isFeasible = *X <= V;
+ bool isFeasible = (*X <= V-Adjustment);
return isFeasible ? state : NULL;
}
// Sym is not a constant, but it is worth looking to see if V is the
// minimum integer value.
if (V == llvm::APSInt::getMinValue(V.getBitWidth(), V.isUnsigned())) {
- // If we know that sym != V, then this condition is infeasible since
- // there is no other value less than V.
- bool isFeasible = !isNotEqual(state, sym, V);
+ llvm::APSInt Adjusted = V-Adjustment;
+
+ // If we know that sym != V (after adjustment), then this condition
+ // is infeasible since there is no other value less than V.
+ bool isFeasible = !isNotEqual(state, sym, Adjusted);
// If the path is still feasible then as a consequence we know that
- // 'sym == V' because we cannot have 'sym < V' (no smaller values).
+ // 'sym+Adjustment == V' because there are no smaller values.
// Add this constraint.
- return isFeasible ? AddEQ(state, sym, V) : NULL;
+ return isFeasible ? AddEQ(state, sym, Adjusted) : NULL;
}
return state;
@@ -213,7 +229,7 @@ BasicConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
const GRState* BasicConstraintManager::AddEQ(const GRState* state, SymbolRef sym,
const llvm::APSInt& V) {
// Create a new state with the old binding replaced.
- return state->set<ConstEq>(sym, &V);
+ return state->set<ConstEq>(sym, &state->getBasicVals().getValue(V));
}
const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym,
@@ -224,7 +240,7 @@ const GRState* BasicConstraintManager::AddNE(const GRState* state, SymbolRef sym
GRState::IntSetTy S = T ? *T : ISetFactory.GetEmptySet();
// Now add V to the NE set.
- S = ISetFactory.Add(S, &V);
+ S = ISetFactory.Add(S, &state->getBasicVals().getValue(V));
// Create a new state with the old binding replaced.
return state->set<ConstNotEq>(sym, S);
@@ -243,7 +259,7 @@ bool BasicConstraintManager::isNotEqual(const GRState* state, SymbolRef sym,
const ConstNotEqTy::data_type* T = state->get<ConstNotEq>(sym);
// See if V is present in the NE-set.
- return T ? T->contains(&V) : false;
+ return T ? T->contains(&state->getBasicVals().getValue(V)) : false;
}
bool BasicConstraintManager::isEqual(const GRState* state, SymbolRef sym,
diff --git a/lib/Checker/BasicObjCFoundationChecks.cpp b/lib/Checker/BasicObjCFoundationChecks.cpp
index b852e2ad9a48..ecb2d1c4e34b 100644
--- a/lib/Checker/BasicObjCFoundationChecks.cpp
+++ b/lib/Checker/BasicObjCFoundationChecks.cpp
@@ -415,59 +415,72 @@ clang::CreateAuditCFNumberCreate(ASTContext& Ctx, BugReporter& BR) {
}
//===----------------------------------------------------------------------===//
-// CFRetain/CFRelease auditing for null arguments.
+// CFRetain/CFRelease checking for null arguments.
//===----------------------------------------------------------------------===//
namespace {
-class AuditCFRetainRelease : public GRSimpleAPICheck {
+class CFRetainReleaseChecker : public CheckerVisitor<CFRetainReleaseChecker> {
APIMisuse *BT;
-
- // FIXME: Either this should be refactored into GRSimpleAPICheck, or
- // it should always be passed with a call to Audit. The latter
- // approach makes this class more stateless.
- ASTContext& Ctx;
IdentifierInfo *Retain, *Release;
- BugReporter& BR;
public:
- AuditCFRetainRelease(ASTContext& ctx, BugReporter& br)
- : BT(0), Ctx(ctx),
- Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease")),
- BR(br){}
+ CFRetainReleaseChecker(ASTContext& Ctx): BT(NULL),
+ Retain(&Ctx.Idents.get("CFRetain")), Release(&Ctx.Idents.get("CFRelease"))
+ {}
- ~AuditCFRetainRelease() {}
+ static void *getTag() { static int x = 0; return &x; }
- bool Audit(ExplodedNode* N, GRStateManager&);
+ void PreVisitCallExpr(CheckerContext& C, const CallExpr* CE);
};
} // end anonymous namespace
-bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
- const CallExpr* CE = cast<CallExpr>(cast<PostStmt>(N->getLocation()).getStmt());
-
+void CFRetainReleaseChecker::PreVisitCallExpr(CheckerContext& C,
+ const CallExpr* CE) {
// If the CallExpr doesn't have exactly 1 argument just give up checking.
if (CE->getNumArgs() != 1)
- return false;
+ return;
- // Check if we called CFRetain/CFRelease.
- const GRState* state = N->getState();
+ // Get the function declaration of the callee.
+ const GRState* state = C.getState();
SVal X = state->getSVal(CE->getCallee());
const FunctionDecl* FD = X.getAsFunctionDecl();
if (!FD)
- return false;
+ return;
+ // Check if we called CFRetain/CFRelease.
const IdentifierInfo *FuncII = FD->getIdentifier();
if (!(FuncII == Retain || FuncII == Release))
- return false;
+ return;
+
+ // FIXME: The rest of this just checks that the argument is non-null.
+ // It should probably be refactored and combined with AttrNonNullChecker.
+
+ // Get the argument's value.
+ const Expr *Arg = CE->getArg(0);
+ SVal ArgVal = state->getSVal(Arg);
+ DefinedSVal *DefArgVal = dyn_cast<DefinedSVal>(&ArgVal);
+ if (!DefArgVal)
+ return;
+
+ // Get a NULL value.
+ ValueManager &ValMgr = C.getValueManager();
+ DefinedSVal Zero = cast<DefinedSVal>(ValMgr.makeZeroVal(Arg->getType()));
+
+ // Make an expression asserting that they're equal.
+ SValuator &SVator = ValMgr.getSValuator();
+ DefinedOrUnknownSVal ArgIsNull = SVator.EvalEQ(state, Zero, *DefArgVal);
+
+ // Are they equal?
+ const GRState *stateTrue, *stateFalse;
+ llvm::tie(stateTrue, stateFalse) = state->Assume(ArgIsNull);
+
+ if (stateTrue && !stateFalse) {
+ ExplodedNode *N = C.GenerateSink(stateTrue);
+ if (!N)
+ return;
- // Finally, check if the argument is NULL.
- // FIXME: We should be able to bifurcate the state here, as a successful
- // check will result in the value not being NULL afterwards.
- // FIXME: Need a way to register vistors for the BugReporter. Would like
- // to benefit from the same diagnostics that regular null dereference
- // reporting has.
- if (state->getStateManager().isEqual(state, CE->getArg(0), 0)) {
if (!BT)
BT = new APIMisuse("null passed to CFRetain/CFRelease");
@@ -475,19 +488,16 @@ bool AuditCFRetainRelease::Audit(ExplodedNode* N, GRStateManager&) {
? "Null pointer argument in call to CFRetain"
: "Null pointer argument in call to CFRelease";
- RangedBugReport *report = new RangedBugReport(*BT, description, N);
- report->addRange(CE->getArg(0)->getSourceRange());
- BR.EmitReport(report);
- return true;
- }
-
- return false;
-}
+ EnhancedBugReport *report = new EnhancedBugReport(*BT, description, N);
+ report->addRange(Arg->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, Arg);
+ C.EmitReport(report);
+ return;
+ }
-GRSimpleAPICheck*
-clang::CreateAuditCFRetainRelease(ASTContext& Ctx, BugReporter& BR) {
- return new AuditCFRetainRelease(Ctx, BR);
+ // From here on, we know the argument is non-null.
+ C.addTransition(stateFalse);
}
//===----------------------------------------------------------------------===//
@@ -569,9 +579,10 @@ void clang::RegisterAppleChecks(GRExprEngine& Eng, const Decl &D) {
Eng.AddCheck(CreateBasicObjCFoundationChecks(Ctx, BR),
Stmt::ObjCMessageExprClass);
Eng.AddCheck(CreateAuditCFNumberCreate(Ctx, BR), Stmt::CallExprClass);
- Eng.AddCheck(CreateAuditCFRetainRelease(Ctx, BR), Stmt::CallExprClass);
RegisterNSErrorChecks(BR, Eng, D);
RegisterNSAutoreleasePoolChecks(Eng);
+
+ Eng.registerCheck(new CFRetainReleaseChecker(Ctx));
Eng.registerCheck(new ClassReleaseChecker(Ctx));
}
diff --git a/lib/Checker/BasicObjCFoundationChecks.h b/lib/Checker/BasicObjCFoundationChecks.h
index 679c6dc1df2d..8fb057087086 100644
--- a/lib/Checker/BasicObjCFoundationChecks.h
+++ b/lib/Checker/BasicObjCFoundationChecks.h
@@ -30,9 +30,6 @@ GRSimpleAPICheck *CreateBasicObjCFoundationChecks(ASTContext& Ctx,
GRSimpleAPICheck *CreateAuditCFNumberCreate(ASTContext& Ctx,
BugReporter& BR);
-GRSimpleAPICheck *CreateAuditCFRetainRelease(ASTContext& Ctx,
- BugReporter& BR);
-
void RegisterNSErrorChecks(BugReporter& BR, GRExprEngine &Eng, const Decl &D);
void RegisterNSAutoreleasePoolChecks(GRExprEngine &Eng);
diff --git a/lib/Checker/BasicStore.cpp b/lib/Checker/BasicStore.cpp
index 5be5ca615ed6..62c8d9c248ae 100644
--- a/lib/Checker/BasicStore.cpp
+++ b/lib/Checker/BasicStore.cpp
@@ -46,9 +46,14 @@ public:
SVal Retrieve(Store store, Loc loc, QualType T = QualType());
- Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
+ Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
unsigned Count, InvalidatedSymbols *IS);
+ Store InvalidateRegions(Store store, const MemRegion * const *Begin,
+ const MemRegion * const *End, const Expr *E,
+ unsigned Count, InvalidatedSymbols *IS,
+ bool invalidateGlobals);
+
Store scanForIvars(Stmt *B, const Decl* SelfDecl,
const MemRegion *SelfRegion, Store St);
@@ -72,9 +77,9 @@ public:
/// RemoveDeadBindings - Scans a BasicStore of 'state' for dead values.
/// It updatees the GRState object in place with the values removed.
- const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
- const StackFrameContext *LCtx,
- SymbolReaper& SymReaper,
+ const GRState *RemoveDeadBindings(GRState &state,
+ const StackFrameContext *LCtx,
+ SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
void iterBindings(Store store, BindingsHandler& f);
@@ -144,9 +149,30 @@ SVal BasicStoreManager::LazyRetrieve(Store store, const TypedRegion *R) {
// Globals and parameters start with symbolic values.
// Local variables initially are undefined.
+
+ // Non-static globals may have had their values reset by InvalidateRegions.
+ const MemSpaceRegion *MS = VR->getMemorySpace();
+ if (isa<NonStaticGlobalSpaceRegion>(MS)) {
+ BindingsTy B = GetBindings(store);
+ // FIXME: Copy-and-pasted from RegionStore.cpp.
+ if (BindingsTy::data_type *Val = B.lookup(MS)) {
+ if (SymbolRef parentSym = Val->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (Val->isZeroConstant())
+ return ValMgr.makeZeroVal(T);
+
+ if (Val->isUnknownOrUndef())
+ return *Val;
+
+ assert(0 && "Unknown default value.");
+ }
+ }
+
if (VR->hasGlobalsOrParametersStorage() ||
isa<UnknownSpaceRegion>(VR->getMemorySpace()))
return ValMgr.getRegionValueSymbolVal(R);
+
return UndefinedVal();
}
@@ -194,6 +220,14 @@ Store BasicStoreManager::Bind(Store store, Loc loc, SVal V) {
return store;
const MemRegion* R = cast<loc::MemRegionVal>(loc).getRegion();
+
+ // Special case: a default symbol assigned to the NonStaticGlobalsSpaceRegion
+ // that is used to derive other symbols.
+ if (isa<NonStaticGlobalSpaceRegion>(R)) {
+ BindingsTy B = GetBindings(store);
+ return VBFactory.Add(B, R, V).getRoot();
+ }
+
ASTContext &C = StateMgr.getContext();
// Special case: handle store of pointer values (Loc) to pointers via
@@ -251,7 +285,7 @@ Store BasicStoreManager::Remove(Store store, Loc loc) {
}
}
-const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
@@ -263,14 +297,14 @@ const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
// Iterate over the variable bindings.
for (BindingsTy::iterator I=B.begin(), E=B.end(); I!=E ; ++I) {
if (const VarRegion *VR = dyn_cast<VarRegion>(I.getKey())) {
- if (SymReaper.isLive(Loc, VR))
+ if (SymReaper.isLive(VR))
RegionRoots.push_back(VR);
else
continue;
}
- else if (isa<ObjCIvarRegion>(I.getKey())) {
+ else if (isa<ObjCIvarRegion>(I.getKey()) ||
+ isa<NonStaticGlobalSpaceRegion>(I.getKey()))
RegionRoots.push_back(I.getKey());
- }
else
continue;
@@ -292,7 +326,8 @@ const GRState *BasicStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
SymReaper.markLive(SymR->getSymbol());
break;
}
- else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR)) {
+ else if (isa<VarRegion>(MR) || isa<ObjCIvarRegion>(MR) ||
+ isa<NonStaticGlobalSpaceRegion>(MR)) {
if (Marked.count(MR))
break;
@@ -475,7 +510,8 @@ void BasicStoreManager::iterBindings(Store store, BindingsHandler& f) {
BindingsTy B = GetBindings(store);
for (BindingsTy::iterator I=B.begin(), E=B.end(); I != E; ++I)
- f.HandleBinding(*this, store, I.getKey(), I.getData());
+ if (!f.HandleBinding(*this, store, I.getKey(), I.getData()))
+ return;
}
@@ -485,6 +521,49 @@ StoreManager::BindingsHandler::~BindingsHandler() {}
// Binding invalidation.
//===----------------------------------------------------------------------===//
+
+Store BasicStoreManager::InvalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *End,
+ const Expr *E, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) {
+ if (invalidateGlobals) {
+ BindingsTy B = GetBindings(store);
+ for (BindingsTy::iterator I=B.begin(), End=B.end(); I != End; ++I) {
+ const MemRegion *R = I.getKey();
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ store = InvalidateRegion(store, R, E, Count, IS);
+ }
+ }
+
+ for ( ; I != End ; ++I) {
+ const MemRegion *R = *I;
+ // Don't invalidate globals twice.
+ if (invalidateGlobals) {
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ continue;
+ }
+ store = InvalidateRegion(store, *I, E, Count, IS);
+ }
+
+ // FIXME: This is copy-and-paste from RegionStore.cpp.
+ if (invalidateGlobals) {
+ // Bind the non-static globals memory space to a new symbol that we will
+ // use to derive the bindings for all non-static globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
+ SVal V =
+ ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, E,
+ /* symbol type, doesn't matter */ Ctx.IntTy,
+ Count);
+
+ store = Bind(store, loc::MemRegionVal(GS), V);
+ }
+
+ return store;
+}
+
+
Store BasicStoreManager::InvalidateRegion(Store store,
const MemRegion *R,
const Expr *E,
diff --git a/lib/Checker/BugReporter.cpp b/lib/Checker/BugReporter.cpp
index 3bcc03f4f29c..0422d80ae26d 100644
--- a/lib/Checker/BugReporter.cpp
+++ b/lib/Checker/BugReporter.cpp
@@ -925,7 +925,7 @@ public:
// statement (if it doesn't already exist).
// FIXME: Should handle CXXTryStmt if analyser starts supporting C++.
if (const CompoundStmt *CS =
- PDB.getCodeDecl().getCompoundBody())
+ dyn_cast_or_null<CompoundStmt>(PDB.getCodeDecl().getBody()))
if (!CS->body_empty()) {
SourceLocation Loc = (*CS->body_begin())->getLocStart();
rawAddEdge(PathDiagnosticLocation(Loc, PDB.getSourceManager()));
@@ -1403,7 +1403,7 @@ MakeReportGraph(const ExplodedGraph* G,
// Create a new (third!) graph with a single path. This is the graph
// that will be returned to the caller.
- ExplodedGraph *GNew = new ExplodedGraph(GTrim->getContext());
+ ExplodedGraph *GNew = new ExplodedGraph();
// Sometimes the trimmed graph can contain a cycle. Perform a reverse BFS
// to the root node, and then construct a new graph that contains only
diff --git a/lib/Checker/BuiltinFunctionChecker.cpp b/lib/Checker/BuiltinFunctionChecker.cpp
index 9c8b51657b26..057e474626f6 100644
--- a/lib/Checker/BuiltinFunctionChecker.cpp
+++ b/lib/Checker/BuiltinFunctionChecker.cpp
@@ -57,15 +57,24 @@ bool BuiltinFunctionChecker::EvalCallExpr(CheckerContext &C,const CallExpr *CE){
case Builtin::BI__builtin_alloca: {
// FIXME: Refactor into StoreManager itself?
MemRegionManager& RM = C.getStoreManager().getRegionManager();
- const MemRegion* R =
+ const AllocaRegion* R =
RM.getAllocaRegion(CE, C.getNodeBuilder().getCurrentBlockCount(),
C.getPredecessor()->getLocationContext());
// Set the extent of the region in bytes. This enables us to use the
// SVal of the argument directly. If we save the extent in bits, we
// cannot represent values like symbol*8.
- SVal Extent = state->getSVal(*(CE->arg_begin()));
- state = C.getStoreManager().setExtent(state, R, Extent);
+ DefinedOrUnknownSVal Size =
+ cast<DefinedOrUnknownSVal>(state->getSVal(*(CE->arg_begin())));
+
+ ValueManager& ValMgr = C.getValueManager();
+ DefinedOrUnknownSVal Extent = R->getExtent(ValMgr);
+
+ SValuator& SVator = ValMgr.getSValuator();
+ DefinedOrUnknownSVal ExtentMatchesSizeArg =
+ SVator.EvalEQ(state, Extent, Size);
+ state = state->Assume(ExtentMatchesSizeArg, true);
+
C.GenerateNode(state->BindExpr(CE, loc::MemRegionVal(R)));
return true;
}
diff --git a/lib/Checker/CFRefCount.cpp b/lib/Checker/CFRefCount.cpp
index 42e6f67f0172..3c74cd8f9b27 100644
--- a/lib/Checker/CFRefCount.cpp
+++ b/lib/Checker/CFRefCount.cpp
@@ -228,111 +228,111 @@ public:
ErrorOverAutorelease,
ErrorReturnedNotOwned
};
-
+
private:
Kind kind;
RetEffect::ObjKind okind;
unsigned Cnt;
unsigned ACnt;
QualType T;
-
+
RefVal(Kind k, RetEffect::ObjKind o, unsigned cnt, unsigned acnt, QualType t)
: kind(k), okind(o), Cnt(cnt), ACnt(acnt), T(t) {}
-
+
RefVal(Kind k, unsigned cnt = 0)
: kind(k), okind(RetEffect::AnyObj), Cnt(cnt), ACnt(0) {}
-
+
public:
Kind getKind() const { return kind; }
-
+
RetEffect::ObjKind getObjKind() const { return okind; }
-
+
unsigned getCount() const { return Cnt; }
unsigned getAutoreleaseCount() const { return ACnt; }
unsigned getCombinedCounts() const { return Cnt + ACnt; }
void clearCounts() { Cnt = 0; ACnt = 0; }
void setCount(unsigned i) { Cnt = i; }
void setAutoreleaseCount(unsigned i) { ACnt = i; }
-
+
QualType getType() const { return T; }
-
+
// Useful predicates.
-
+
static bool isError(Kind k) { return k >= ERROR_START; }
-
+
static bool isLeak(Kind k) { return k >= ERROR_LEAK_START; }
-
+
bool isOwned() const {
return getKind() == Owned;
}
-
+
bool isNotOwned() const {
return getKind() == NotOwned;
}
-
+
bool isReturnedOwned() const {
return getKind() == ReturnedOwned;
}
-
+
bool isReturnedNotOwned() const {
return getKind() == ReturnedNotOwned;
}
-
+
bool isNonLeakError() const {
Kind k = getKind();
return isError(k) && !isLeak(k);
}
-
+
static RefVal makeOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 1) {
return RefVal(Owned, o, Count, 0, t);
}
-
+
static RefVal makeNotOwned(RetEffect::ObjKind o, QualType t,
unsigned Count = 0) {
return RefVal(NotOwned, o, Count, 0, t);
}
-
+
// Comparison, profiling, and pretty-printing.
-
+
bool operator==(const RefVal& X) const {
return kind == X.kind && Cnt == X.Cnt && T == X.T && ACnt == X.ACnt;
}
-
+
RefVal operator-(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() - i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator+(size_t i) const {
return RefVal(getKind(), getObjKind(), getCount() + i,
getAutoreleaseCount(), getType());
}
-
+
RefVal operator^(Kind k) const {
return RefVal(k, getObjKind(), getCount(), getAutoreleaseCount(),
getType());
}
-
+
RefVal autorelease() const {
return RefVal(getKind(), getObjKind(), getCount(), getAutoreleaseCount()+1,
getType());
}
-
+
void Profile(llvm::FoldingSetNodeID& ID) const {
ID.AddInteger((unsigned) kind);
ID.AddInteger(Cnt);
ID.AddInteger(ACnt);
ID.Add(T);
}
-
+
void print(llvm::raw_ostream& Out) const;
};
void RefVal::print(llvm::raw_ostream& Out) const {
if (!T.isNull())
Out << "Tracked Type:" << T.getAsString() << '\n';
-
+
switch (getKind()) {
default: assert(false);
case Owned: {
@@ -341,69 +341,69 @@ void RefVal::print(llvm::raw_ostream& Out) const {
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case NotOwned: {
Out << "NotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case ReturnedOwned: {
Out << "ReturnedOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case ReturnedNotOwned: {
Out << "ReturnedNotOwned";
unsigned cnt = getCount();
if (cnt) Out << " (+ " << cnt << ")";
break;
}
-
+
case Released:
Out << "Released";
break;
-
+
case ErrorDeallocGC:
Out << "-dealloc (GC)";
break;
-
+
case ErrorDeallocNotOwned:
Out << "-dealloc (not-owned)";
break;
-
+
case ErrorLeak:
Out << "Leaked";
break;
-
+
case ErrorLeakReturned:
Out << "Leaked (Bad naming)";
break;
-
+
case ErrorGCLeakReturned:
Out << "Leaked (GC-ed at return)";
break;
-
+
case ErrorUseAfterRelease:
Out << "Use-After-Release [ERROR]";
break;
-
+
case ErrorReleaseNotOwned:
Out << "Release of Not-Owned [ERROR]";
break;
-
+
case RefVal::ErrorOverAutorelease:
Out << "Over autoreleased";
break;
-
+
case RefVal::ErrorReturnedNotOwned:
Out << "Non-owned object returned instead of owned";
break;
}
-
+
if (ACnt) {
Out << " [ARC +" << ACnt << ']';
}
@@ -897,7 +897,7 @@ public:
RetainSummary *getInstanceMethodSummary(const ObjCMessageExpr *ME,
const GRState *state,
const LocationContext *LC);
-
+
RetainSummary* getInstanceMethodSummary(const ObjCMessageExpr* ME,
const ObjCInterfaceDecl* ID) {
return getInstanceMethodSummary(ME->getSelector(), 0,
@@ -927,7 +927,7 @@ public:
break;
}
- return getClassMethodSummary(ME->getSelector(),
+ return getClassMethodSummary(ME->getSelector(),
Class? Class->getIdentifier() : 0,
Class,
ME->getMethodDecl(), ME->getType());
@@ -1419,16 +1419,16 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
if (Receiver) {
receiverV = state->getSValAsScalarOrLoc(Receiver);
-
+
// FIXME: Eventually replace the use of state->get<RefBindings> with
// a generic API for reasoning about the Objective-C types of symbolic
// objects.
if (SymbolRef Sym = receiverV.getAsLocSymbol())
if (const RefVal *T = state->get<RefBindings>(Sym))
- if (const ObjCObjectPointerType* PT =
+ if (const ObjCObjectPointerType* PT =
T->getType()->getAs<ObjCObjectPointerType>())
ID = PT->getInterfaceDecl();
-
+
// FIXME: this is a hack. This may or may not be the actual method
// that is called.
if (!ID) {
@@ -1444,7 +1444,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
// FIXME: The receiver could be a reference to a class, meaning that
// we should use the class method.
RetainSummary *Summ = getInstanceMethodSummary(ME, ID);
-
+
// Special-case: are we sending a mesage to "self"?
// This is a hack. When we have full-IP this should be removed.
if (isa<ObjCMethodDecl>(LC->getDecl()) && Receiver) {
@@ -1461,7 +1461,7 @@ RetainSummaryManager::getInstanceMethodSummary(const ObjCMessageExpr *ME,
}
}
}
-
+
return Summ ? Summ : getDefaultSummary();
}
@@ -1849,7 +1849,7 @@ public:
GRExprEngine& Engine,
GRStmtNodeBuilder& Builder,
ExplodedNode* Pred,
- Stmt* S, const GRState* state,
+ const GRState* state,
SymbolReaper& SymReaper);
std::pair<ExplodedNode*, const GRState *>
@@ -2619,7 +2619,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
SymbolRef ErrorSym = 0;
llvm::SmallVector<const MemRegion*, 10> RegionsToInvalidate;
-
+
for (ExprIterator I = arg_beg; I != arg_end; ++I, ++idx) {
SVal V = state->getSValAsScalarOrLoc(*I);
SymbolRef Sym = V.getAsLocSymbol();
@@ -2659,7 +2659,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
// approriately delegated to the respective StoreManagers while
// still allowing us to do checker-specific logic (e.g.,
// invalidating reference counts), probably via callbacks.
- if (ER->getElementType()->isIntegralType()) {
+ if (ER->getElementType()->isIntegralOrEnumerationType()) {
const MemRegion *superReg = ER->getSuperRegion();
if (isa<VarRegion>(superReg) || isa<FieldRegion>(superReg) ||
isa<ObjCIvarRegion>(superReg))
@@ -2667,7 +2667,7 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
}
// FIXME: What about layers of ElementRegions?
}
-
+
// Mark this region for invalidation. We batch invalidate regions
// below for efficiency.
RegionsToInvalidate.push_back(R);
@@ -2687,37 +2687,39 @@ void CFRefCount::EvalSummary(ExplodedNodeSet& Dst,
goto tryAgain;
}
}
-
+
// Block calls result in all captured values passed-via-reference to be
// invalidated.
if (const BlockDataRegion *BR = dyn_cast_or_null<BlockDataRegion>(Callee)) {
RegionsToInvalidate.push_back(BR);
}
-
+
// Invalidate regions we designed for invalidation use the batch invalidation
// API.
- if (!RegionsToInvalidate.empty()) {
- // FIXME: We can have collisions on the conjured symbol if the
- // expression *I also creates conjured symbols. We probably want
- // to identify conjured symbols by an expression pair: the enclosing
- // expression (the context) and the expression itself. This should
- // disambiguate conjured symbols.
- unsigned Count = Builder.getCurrentBlockCount();
- StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
-
-
- StoreManager::InvalidatedSymbols IS;
- Store store = state->getStore();
- store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(),
- RegionsToInvalidate.data() +
- RegionsToInvalidate.size(),
- Ex, Count, &IS);
- state = state->makeWithStore(store);
- for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(),
- E = IS.end(); I!=E; ++I) {
- // Remove any existing reference-count binding.
- state = state->remove<RefBindings>(*I);
- }
+
+ // FIXME: We can have collisions on the conjured symbol if the
+ // expression *I also creates conjured symbols. We probably want
+ // to identify conjured symbols by an expression pair: the enclosing
+ // expression (the context) and the expression itself. This should
+ // disambiguate conjured symbols.
+ unsigned Count = Builder.getCurrentBlockCount();
+ StoreManager& StoreMgr = Eng.getStateManager().getStoreManager();
+ StoreManager::InvalidatedSymbols IS;
+ Store store = state->getStore();
+
+ // NOTE: Even if RegionsToInvalidate is empty, we must still invalidate
+ // global variables.
+ store = StoreMgr.InvalidateRegions(store, RegionsToInvalidate.data(),
+ RegionsToInvalidate.data() +
+ RegionsToInvalidate.size(),
+ Ex, Count, &IS,
+ /* invalidateGlobals = */ true);
+
+ state = state->makeWithStore(store);
+ for (StoreManager::InvalidatedSymbols::iterator I = IS.begin(),
+ E = IS.end(); I!=E; ++I) {
+ // Remove any existing reference-count binding.
+ state = state->remove<RefBindings>(*I);
}
// Evaluate the effect on the message receiver.
@@ -2862,7 +2864,7 @@ void CFRefCount::EvalCall(ExplodedNodeSet& Dst,
ExplodedNode* Pred) {
RetainSummary *Summ = 0;
-
+
// FIXME: Better support for blocks. For now we stop tracking anything
// that is passed to blocks.
// FIXME: Need to handle variables that are "captured" by the block.
@@ -3400,10 +3402,9 @@ void CFRefCount::EvalDeadSymbols(ExplodedNodeSet& Dst,
GRExprEngine& Eng,
GRStmtNodeBuilder& Builder,
ExplodedNode* Pred,
- Stmt* S,
const GRState* state,
SymbolReaper& SymReaper) {
-
+ Stmt *S = Builder.getStmt();
RefBindings B = state->get<RefBindings>();
// Update counts from autorelease pools
@@ -3501,7 +3502,7 @@ class RetainReleaseChecker
public:
RetainReleaseChecker(CFRefCount *tf) : TF(tf) {}
static void* getTag() { static int x = 0; return &x; }
-
+
void PostVisitBlockExpr(CheckerContext &C, const BlockExpr *BE);
};
} // end anonymous namespace
@@ -3509,29 +3510,29 @@ public:
void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
const BlockExpr *BE) {
-
+
// Scan the BlockDecRefExprs for any object the retain/release checker
- // may be tracking.
+ // may be tracking.
if (!BE->hasBlockDeclRefExprs())
return;
-
+
const GRState *state = C.getState();
const BlockDataRegion *R =
cast<BlockDataRegion>(state->getSVal(BE).getAsRegion());
-
+
BlockDataRegion::referenced_vars_iterator I = R->referenced_vars_begin(),
E = R->referenced_vars_end();
-
+
if (I == E)
return;
-
+
// FIXME: For now we invalidate the tracking of all symbols passed to blocks
// via captured variables, even though captured variables result in a copy
// and in implicit increment/decrement of a retain count.
llvm::SmallVector<const MemRegion*, 10> Regions;
const LocationContext *LC = C.getPredecessor()->getLocationContext();
MemRegionManager &MemMgr = C.getValueManager().getRegionManager();
-
+
for ( ; I != E; ++I) {
const VarRegion *VR = *I;
if (VR->getSuperRegion() == R) {
@@ -3539,7 +3540,7 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
}
Regions.push_back(VR);
}
-
+
state =
state->scanReachableSymbols<StopTrackingCallback>(Regions.data(),
Regions.data() + Regions.size()).getState();
@@ -3552,28 +3553,28 @@ void RetainReleaseChecker::PostVisitBlockExpr(CheckerContext &C,
void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
BugReporter &BR = Eng.getBugReporter();
-
+
useAfterRelease = new UseAfterRelease(this);
BR.Register(useAfterRelease);
-
+
releaseNotOwned = new BadRelease(this);
BR.Register(releaseNotOwned);
-
+
deallocGC = new DeallocGC(this);
BR.Register(deallocGC);
-
+
deallocNotOwned = new DeallocNotOwned(this);
BR.Register(deallocNotOwned);
-
+
overAutorelease = new OverAutorelease(this);
BR.Register(overAutorelease);
-
+
returnNotOwnedForOwned = new ReturnedNotOwnedForOwned(this);
BR.Register(returnNotOwnedForOwned);
-
+
// First register "return" leaks.
const char* name = 0;
-
+
if (isGCEnabled())
name = "Leak of returned object when using garbage collection";
else if (getLangOptions().getGCMode() == LangOptions::HybridGC)
@@ -3583,12 +3584,12 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak of returned object";
}
-
+
// Leaks should not be reported if they are post-dominated by a sink.
leakAtReturn = new LeakAtReturn(this, name);
leakAtReturn->setSuppressOnSink(true);
BR.Register(leakAtReturn);
-
+
// Second, register leaks within a function/method.
if (isGCEnabled())
name = "Leak of object when using garbage collection";
@@ -3599,15 +3600,15 @@ void CFRefCount::RegisterChecks(GRExprEngine& Eng) {
assert(getLangOptions().getGCMode() == LangOptions::NonGC);
name = "Leak";
}
-
+
// Leaks should not be reported if they are post-dominated by sinks.
leakWithinFunction = new LeakWithinFunction(this, name);
leakWithinFunction->setSuppressOnSink(true);
BR.Register(leakWithinFunction);
-
+
// Save the reference to the BugReporter.
this->BR = &BR;
-
+
// Register the RetainReleaseChecker with the GRExprEngine object.
// Functionality in CFRefCount will be migrated to RetainReleaseChecker
// over time.
diff --git a/lib/Checker/CMakeLists.txt b/lib/Checker/CMakeLists.txt
index 9c6adc6bf2ee..259346a97068 100644
--- a/lib/Checker/CMakeLists.txt
+++ b/lib/Checker/CMakeLists.txt
@@ -3,6 +3,7 @@ set(LLVM_NO_RTTI 1)
add_clang_library(clangChecker
AdjustedReturnValueChecker.cpp
AggExprVisitor.cpp
+ AnalysisConsumer.cpp
ArrayBoundChecker.cpp
AttrNonNullChecker.cpp
BasicConstraintManager.cpp
@@ -12,63 +13,70 @@ add_clang_library(clangChecker
BugReporter.cpp
BugReporterVisitors.cpp
BuiltinFunctionChecker.cpp
+ CFRefCount.cpp
CallAndMessageChecker.cpp
CallInliner.cpp
CastSizeChecker.cpp
CastToStructChecker.cpp
- CFRefCount.cpp
CheckDeadStores.cpp
- Checker.cpp
CheckObjCDealloc.cpp
CheckObjCInstMethSignature.cpp
CheckSecuritySyntaxOnly.cpp
CheckSizeofPointer.cpp
+ Checker.cpp
CocoaConventions.cpp
+ CStringChecker.cpp
DereferenceChecker.cpp
DivZeroChecker.cpp
Environment.cpp
ExplodedGraph.cpp
FixedAddressChecker.cpp
FlatStore.cpp
+ FrontendActions.cpp
GRBlockCounter.cpp
- GRCoreEngine.cpp
GRCXXExprEngine.cpp
+ GRCoreEngine.cpp
GRExprEngine.cpp
GRExprEngineExperimentalChecks.cpp
GRState.cpp
+ HTMLDiagnostics.cpp
+ IdempotentOperationChecker.cpp
LLVMConventionsChecker.cpp
MacOSXAPIChecker.cpp
MallocChecker.cpp
ManagerRegistry.cpp
MemRegion.cpp
- NoReturnFunctionChecker.cpp
NSAutoreleasePoolChecker.cpp
NSErrorChecker.cpp
- ObjCUnusedIVarsChecker.cpp
+ NoReturnFunctionChecker.cpp
OSAtomicChecker.cpp
+ ObjCUnusedIVarsChecker.cpp
PathDiagnostic.cpp
+ PlistDiagnostics.cpp
PointerArithChecker.cpp
PointerSubChecker.cpp
PthreadLockChecker.cpp
RangeConstraintManager.cpp
RegionStore.cpp
ReturnPointerRangeChecker.cpp
- ReturnStackAddressChecker.cpp
ReturnUndefChecker.cpp
+ SVals.cpp
+ SValuator.cpp
SimpleConstraintManager.cpp
SimpleSValuator.cpp
+ StackAddrLeakChecker.cpp
Store.cpp
- SVals.cpp
- SValuator.cpp
+ StreamChecker.cpp
SymbolManager.cpp
UndefBranchChecker.cpp
UndefCapturedBlockVarChecker.cpp
+ UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
- UndefResultChecker.cpp
UnixAPIChecker.cpp
- ValueManager.cpp
VLASizeChecker.cpp
+ ValueManager.cpp
)
-add_dependencies(clangChecker ClangStmtNodes)
+add_dependencies(clangChecker ClangAttrClasses ClangAttrList ClangDeclNodes
+ ClangStmtNodes)
diff --git a/lib/Checker/CStringChecker.cpp b/lib/Checker/CStringChecker.cpp
new file mode 100644
index 000000000000..a92d409703a3
--- /dev/null
+++ b/lib/Checker/CStringChecker.cpp
@@ -0,0 +1,525 @@
+//= CStringChecker.h - Checks calls to C string functions ----------*- C++ -*-//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines CStringChecker, which is an assortment of checks on calls
+// to functions in <string.h>.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+
+namespace {
+class CStringChecker : public CheckerVisitor<CStringChecker> {
+ BugType *BT_Null, *BT_Bounds, *BT_Overlap;
+public:
+ CStringChecker()
+ : BT_Null(0), BT_Bounds(0), BT_Overlap(0) {}
+ static void *getTag() { static int tag; return &tag; }
+
+ bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+
+ typedef void (CStringChecker::*FnCheck)(CheckerContext &, const CallExpr *);
+
+ void EvalMemcpy(CheckerContext &C, const CallExpr *CE);
+ void EvalMemmove(CheckerContext &C, const CallExpr *CE);
+ void EvalBcopy(CheckerContext &C, const CallExpr *CE);
+ void EvalCopyCommon(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *Source, const Expr *Dest,
+ bool Restricted = false);
+
+ void EvalMemcmp(CheckerContext &C, const CallExpr *CE);
+
+ // Utility methods
+ std::pair<const GRState*, const GRState*>
+ AssumeZero(CheckerContext &C, const GRState *state, SVal V, QualType Ty);
+
+ const GRState *CheckNonNull(CheckerContext &C, const GRState *state,
+ const Expr *S, SVal l);
+ const GRState *CheckLocation(CheckerContext &C, const GRState *state,
+ const Expr *S, SVal l);
+ const GRState *CheckBufferAccess(CheckerContext &C, const GRState *state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf = NULL);
+ const GRState *CheckOverlap(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *First,
+ const Expr *Second);
+ void EmitOverlapBug(CheckerContext &C, const GRState *state,
+ const Stmt *First, const Stmt *Second);
+};
+} //end anonymous namespace
+
+void clang::RegisterCStringChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new CStringChecker());
+}
+
+//===----------------------------------------------------------------------===//
+// Individual checks and utility methods.
+//===----------------------------------------------------------------------===//
+
+std::pair<const GRState*, const GRState*>
+CStringChecker::AssumeZero(CheckerContext &C, const GRState *state, SVal V,
+ QualType Ty) {
+ DefinedSVal *Val = dyn_cast<DefinedSVal>(&V);
+ if (!Val)
+ return std::pair<const GRState*, const GRState *>(state, state);
+
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SV = ValMgr.getSValuator();
+
+ DefinedOrUnknownSVal Zero = ValMgr.makeZeroVal(Ty);
+ DefinedOrUnknownSVal ValIsZero = SV.EvalEQ(state, *Val, Zero);
+
+ return state->Assume(ValIsZero);
+}
+
+const GRState *CStringChecker::CheckNonNull(CheckerContext &C,
+ const GRState *state,
+ const Expr *S, SVal l) {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ const GRState *stateNull, *stateNonNull;
+ llvm::tie(stateNull, stateNonNull) = AssumeZero(C, state, l, S->getType());
+
+ if (stateNull && !stateNonNull) {
+ ExplodedNode *N = C.GenerateSink(stateNull);
+ if (!N)
+ return NULL;
+
+ if (!BT_Null)
+ BT_Null = new BuiltinBug("API",
+ "Null pointer argument in call to byte string function");
+
+ // Generate a report for this bug.
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Null);
+ EnhancedBugReport *report = new EnhancedBugReport(*BT,
+ BT->getDescription(), N);
+
+ report->addRange(S->getSourceRange());
+ report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, S);
+ C.EmitReport(report);
+ return NULL;
+ }
+
+ // From here on, assume that the value is non-null.
+ assert(stateNonNull);
+ return stateNonNull;
+}
+
+// FIXME: This was originally copied from ArrayBoundChecker.cpp. Refactor?
+const GRState *CStringChecker::CheckLocation(CheckerContext &C,
+ const GRState *state,
+ const Expr *S, SVal l) {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ // Check for out of bound array element access.
+ const MemRegion *R = l.getAsRegion();
+ if (!R)
+ return state;
+
+ const ElementRegion *ER = dyn_cast<ElementRegion>(R);
+ if (!ER)
+ return state;
+
+ assert(ER->getValueType(C.getASTContext()) == C.getASTContext().CharTy &&
+ "CheckLocation should only be called with char* ElementRegions");
+
+ // Get the size of the array.
+ const SubRegion *Super = cast<SubRegion>(ER->getSuperRegion());
+ ValueManager &ValMgr = C.getValueManager();
+ SVal Extent = ValMgr.convertToArrayIndex(Super->getExtent(ValMgr));
+ DefinedOrUnknownSVal Size = cast<DefinedOrUnknownSVal>(Extent);
+
+ // Get the index of the accessed element.
+ DefinedOrUnknownSVal &Idx = cast<DefinedOrUnknownSVal>(ER->getIndex());
+
+ const GRState *StInBound = state->AssumeInBound(Idx, Size, true);
+ const GRState *StOutBound = state->AssumeInBound(Idx, Size, false);
+ if (StOutBound && !StInBound) {
+ ExplodedNode *N = C.GenerateSink(StOutBound);
+ if (!N)
+ return NULL;
+
+ if (!BT_Bounds)
+ BT_Bounds = new BuiltinBug("Out-of-bound array access",
+ "Byte string function accesses out-of-bound array element "
+ "(buffer overflow)");
+
+ // FIXME: It would be nice to eventually make this diagnostic more clear,
+ // e.g., by referencing the original declaration or by saying *why* this
+ // reference is outside the range.
+
+ // Generate a report for this bug.
+ BuiltinBug *BT = static_cast<BuiltinBug*>(BT_Bounds);
+ RangedBugReport *report = new RangedBugReport(*BT, BT->getDescription(), N);
+
+ report->addRange(S->getSourceRange());
+ C.EmitReport(report);
+ return NULL;
+ }
+
+ // Array bound check succeeded. From this point forward the array bound
+ // should always succeed.
+ return StInBound;
+}
+
+const GRState *CStringChecker::CheckBufferAccess(CheckerContext &C,
+ const GRState *state,
+ const Expr *Size,
+ const Expr *FirstBuf,
+ const Expr *SecondBuf) {
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ ValueManager &VM = C.getValueManager();
+ SValuator &SV = VM.getSValuator();
+ ASTContext &Ctx = C.getASTContext();
+
+ QualType SizeTy = Ctx.getSizeType();
+ QualType PtrTy = Ctx.getPointerType(Ctx.CharTy);
+
+ // Check that the first buffer is non-null.
+ SVal BufVal = state->getSVal(FirstBuf);
+ state = CheckNonNull(C, state, FirstBuf, BufVal);
+ if (!state)
+ return NULL;
+
+ // Get the access length and make sure it is known.
+ SVal LengthVal = state->getSVal(Size);
+ NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
+ if (!Length)
+ return state;
+
+ // Compute the offset of the last element to be accessed: size-1.
+ NonLoc One = cast<NonLoc>(VM.makeIntVal(1, SizeTy));
+ NonLoc LastOffset = cast<NonLoc>(SV.EvalBinOpNN(state, BinaryOperator::Sub,
+ *Length, One, SizeTy));
+
+ // Check that the first buffer is sufficently long.
+ Loc BufStart = cast<Loc>(SV.EvalCast(BufVal, PtrTy, FirstBuf->getType()));
+ SVal BufEnd
+ = SV.EvalBinOpLN(state, BinaryOperator::Add, BufStart, LastOffset, PtrTy);
+ state = CheckLocation(C, state, FirstBuf, BufEnd);
+
+ // If the buffer isn't large enough, abort.
+ if (!state)
+ return NULL;
+
+ // If there's a second buffer, check it as well.
+ if (SecondBuf) {
+ BufVal = state->getSVal(SecondBuf);
+ state = CheckNonNull(C, state, SecondBuf, BufVal);
+ if (!state)
+ return NULL;
+
+ BufStart = cast<Loc>(SV.EvalCast(BufVal, PtrTy, SecondBuf->getType()));
+ BufEnd
+ = SV.EvalBinOpLN(state, BinaryOperator::Add, BufStart, LastOffset, PtrTy);
+ state = CheckLocation(C, state, SecondBuf, BufEnd);
+ }
+
+ // Large enough or not, return this state!
+ return state;
+}
+
+const GRState *CStringChecker::CheckOverlap(CheckerContext &C,
+ const GRState *state,
+ const Expr *Size,
+ const Expr *First,
+ const Expr *Second) {
+ // Do a simple check for overlap: if the two arguments are from the same
+ // buffer, see if the end of the first is greater than the start of the second
+ // or vice versa.
+
+ // If a previous check has failed, propagate the failure.
+ if (!state)
+ return NULL;
+
+ ValueManager &VM = state->getStateManager().getValueManager();
+ SValuator &SV = VM.getSValuator();
+ ASTContext &Ctx = VM.getContext();
+ const GRState *stateTrue, *stateFalse;
+
+ // Get the buffer values and make sure they're known locations.
+ SVal FirstVal = state->getSVal(First);
+ SVal SecondVal = state->getSVal(Second);
+
+ Loc *FirstLoc = dyn_cast<Loc>(&FirstVal);
+ if (!FirstLoc)
+ return state;
+
+ Loc *SecondLoc = dyn_cast<Loc>(&SecondVal);
+ if (!SecondLoc)
+ return state;
+
+ // Are the two values the same?
+ DefinedOrUnknownSVal EqualTest = SV.EvalEQ(state, *FirstLoc, *SecondLoc);
+ llvm::tie(stateTrue, stateFalse) = state->Assume(EqualTest);
+
+ if (stateTrue && !stateFalse) {
+ // If the values are known to be equal, that's automatically an overlap.
+ EmitOverlapBug(C, stateTrue, First, Second);
+ return NULL;
+ }
+
+ // Assume the two expressions are not equal.
+ assert(stateFalse);
+ state = stateFalse;
+
+ // Which value comes first?
+ QualType CmpTy = Ctx.IntTy;
+ SVal Reverse = SV.EvalBinOpLL(state, BinaryOperator::GT,
+ *FirstLoc, *SecondLoc, CmpTy);
+ DefinedOrUnknownSVal *ReverseTest = dyn_cast<DefinedOrUnknownSVal>(&Reverse);
+ if (!ReverseTest)
+ return state;
+
+ llvm::tie(stateTrue, stateFalse) = state->Assume(*ReverseTest);
+
+ if (stateTrue) {
+ if (stateFalse) {
+ // If we don't know which one comes first, we can't perform this test.
+ return state;
+ } else {
+ // Switch the values so that FirstVal is before SecondVal.
+ Loc *tmpLoc = FirstLoc;
+ FirstLoc = SecondLoc;
+ SecondLoc = tmpLoc;
+
+ // Switch the Exprs as well, so that they still correspond.
+ const Expr *tmpExpr = First;
+ First = Second;
+ Second = tmpExpr;
+ }
+ }
+
+ // Get the length, and make sure it too is known.
+ SVal LengthVal = state->getSVal(Size);
+ NonLoc *Length = dyn_cast<NonLoc>(&LengthVal);
+ if (!Length)
+ return state;
+
+ // Convert the first buffer's start address to char*.
+ // Bail out if the cast fails.
+ QualType CharPtrTy = Ctx.getPointerType(Ctx.CharTy);
+ SVal FirstStart = SV.EvalCast(*FirstLoc, CharPtrTy, First->getType());
+ Loc *FirstStartLoc = dyn_cast<Loc>(&FirstStart);
+ if (!FirstStartLoc)
+ return state;
+
+ // Compute the end of the first buffer. Bail out if THAT fails.
+ SVal FirstEnd = SV.EvalBinOpLN(state, BinaryOperator::Add,
+ *FirstStartLoc, *Length, CharPtrTy);
+ Loc *FirstEndLoc = dyn_cast<Loc>(&FirstEnd);
+ if (!FirstEndLoc)
+ return state;
+
+ // Is the end of the first buffer past the start of the second buffer?
+ SVal Overlap = SV.EvalBinOpLL(state, BinaryOperator::GT,
+ *FirstEndLoc, *SecondLoc, CmpTy);
+ DefinedOrUnknownSVal *OverlapTest = dyn_cast<DefinedOrUnknownSVal>(&Overlap);
+ if (!OverlapTest)
+ return state;
+
+ llvm::tie(stateTrue, stateFalse) = state->Assume(*OverlapTest);
+
+ if (stateTrue && !stateFalse) {
+ // Overlap!
+ EmitOverlapBug(C, stateTrue, First, Second);
+ return NULL;
+ }
+
+ // Assume the two expressions don't overlap.
+ assert(stateFalse);
+ return stateFalse;
+}
+
+void CStringChecker::EmitOverlapBug(CheckerContext &C, const GRState *state,
+ const Stmt *First, const Stmt *Second) {
+ ExplodedNode *N = C.GenerateSink(state);
+ if (!N)
+ return;
+
+ if (!BT_Overlap)
+ BT_Overlap = new BugType("Unix API", "Improper arguments");
+
+ // Generate a report for this bug.
+ RangedBugReport *report =
+ new RangedBugReport(*BT_Overlap,
+ "Arguments must not be overlapping buffers", N);
+ report->addRange(First->getSourceRange());
+ report->addRange(Second->getSourceRange());
+
+ C.EmitReport(report);
+}
+
+//===----------------------------------------------------------------------===//
+// Evaluation of individual function calls.
+//===----------------------------------------------------------------------===//
+
+void CStringChecker::EvalCopyCommon(CheckerContext &C, const GRState *state,
+ const Expr *Size, const Expr *Dest,
+ const Expr *Source, bool Restricted) {
+ // See if the size argument is zero.
+ SVal SizeVal = state->getSVal(Size);
+ QualType SizeTy = Size->getType();
+
+ const GRState *StZeroSize, *StNonZeroSize;
+ llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy);
+
+ // If the size is zero, there won't be any actual memory access.
+ if (StZeroSize)
+ C.addTransition(StZeroSize);
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (StNonZeroSize) {
+ state = StNonZeroSize;
+ state = CheckBufferAccess(C, state, Size, Dest, Source);
+ if (Restricted)
+ state = CheckOverlap(C, state, Size, Dest, Source);
+ if (state)
+ C.addTransition(state);
+ }
+}
+
+
+void CStringChecker::EvalMemcpy(CheckerContext &C, const CallExpr *CE) {
+ // void *memcpy(void *restrict dst, const void *restrict src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ const GRState *state = C.getState();
+ state = state->BindExpr(CE, state->getSVal(Dest));
+ EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1), true);
+}
+
+void CStringChecker::EvalMemmove(CheckerContext &C, const CallExpr *CE) {
+ // void *memmove(void *dst, const void *src, size_t n);
+ // The return value is the address of the destination buffer.
+ const Expr *Dest = CE->getArg(0);
+ const GRState *state = C.getState();
+ state = state->BindExpr(CE, state->getSVal(Dest));
+ EvalCopyCommon(C, state, CE->getArg(2), Dest, CE->getArg(1));
+}
+
+void CStringChecker::EvalBcopy(CheckerContext &C, const CallExpr *CE) {
+ // void bcopy(const void *src, void *dst, size_t n);
+ EvalCopyCommon(C, C.getState(), CE->getArg(2), CE->getArg(1), CE->getArg(0));
+}
+
+void CStringChecker::EvalMemcmp(CheckerContext &C, const CallExpr *CE) {
+ // int memcmp(const void *s1, const void *s2, size_t n);
+ const Expr *Left = CE->getArg(0);
+ const Expr *Right = CE->getArg(1);
+ const Expr *Size = CE->getArg(2);
+
+ const GRState *state = C.getState();
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SV = ValMgr.getSValuator();
+
+ // See if the size argument is zero.
+ SVal SizeVal = state->getSVal(Size);
+ QualType SizeTy = Size->getType();
+
+ const GRState *StZeroSize, *StNonZeroSize;
+ llvm::tie(StZeroSize, StNonZeroSize) = AssumeZero(C, state, SizeVal, SizeTy);
+
+ // If the size can be zero, the result will be 0 in that case, and we don't
+ // have to check either of the buffers.
+ if (StZeroSize) {
+ state = StZeroSize;
+ state = state->BindExpr(CE, ValMgr.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+
+ // If the size can be nonzero, we have to check the other arguments.
+ if (StNonZeroSize) {
+ state = StNonZeroSize;
+
+ // If we know the two buffers are the same, we know the result is 0.
+ // First, get the two buffers' addresses. Another checker will have already
+ // made sure they're not undefined.
+ DefinedOrUnknownSVal LV = cast<DefinedOrUnknownSVal>(state->getSVal(Left));
+ DefinedOrUnknownSVal RV = cast<DefinedOrUnknownSVal>(state->getSVal(Right));
+
+ // See if they are the same.
+ DefinedOrUnknownSVal SameBuf = SV.EvalEQ(state, LV, RV);
+ const GRState *StSameBuf, *StNotSameBuf;
+ llvm::tie(StSameBuf, StNotSameBuf) = state->Assume(SameBuf);
+
+ // If the two arguments might be the same buffer, we know the result is zero,
+ // and we only need to check one size.
+ if (StSameBuf) {
+ state = StSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left);
+ if (state) {
+ state = StSameBuf->BindExpr(CE, ValMgr.makeZeroVal(CE->getType()));
+ C.addTransition(state);
+ }
+ }
+
+ // If the two arguments might be different buffers, we have to check the
+ // size of both of them.
+ if (StNotSameBuf) {
+ state = StNotSameBuf;
+ state = CheckBufferAccess(C, state, Size, Left, Right);
+ if (state) {
+ // The return value is the comparison result, which we don't know.
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ SVal CmpV = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
+ state = state->BindExpr(CE, CmpV);
+ C.addTransition(state);
+ }
+ }
+ }
+}
+
+//===----------------------------------------------------------------------===//
+// The driver method.
+//===----------------------------------------------------------------------===//
+
+bool CStringChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ // Get the callee. All the functions we care about are C functions
+ // with simple identifiers.
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ const FunctionDecl *FD = state->getSVal(Callee).getAsFunctionDecl();
+
+ if (!FD)
+ return false;
+
+ // Get the name of the callee. If it's a builtin, strip off the prefix.
+ llvm::StringRef Name = FD->getName();
+ if (Name.startswith("__builtin_"))
+ Name = Name.substr(10);
+
+ FnCheck EvalFunction = llvm::StringSwitch<FnCheck>(Name)
+ .Cases("memcpy", "__memcpy_chk", &CStringChecker::EvalMemcpy)
+ .Cases("memcmp", "bcmp", &CStringChecker::EvalMemcmp)
+ .Cases("memmove", "__memmove_chk", &CStringChecker::EvalMemmove)
+ .Case("bcopy", &CStringChecker::EvalBcopy)
+ .Default(NULL);
+
+ // If the callee isn't a string function, let another checker handle it.
+ if (!EvalFunction)
+ return false;
+
+ // Check and evaluate the call.
+ (this->*EvalFunction)(C, CE);
+ return true;
+}
diff --git a/lib/Checker/CallInliner.cpp b/lib/Checker/CallInliner.cpp
index 88e1a05d1191..c47e06c78fb2 100644
--- a/lib/Checker/CallInliner.cpp
+++ b/lib/Checker/CallInliner.cpp
@@ -42,7 +42,7 @@ bool CallInliner::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
if (!FD)
return false;
- if (!FD->getBody(FD))
+ if (!FD->hasBody(FD))
return false;
// Now we have the definition of the callee, create a CallEnter node.
diff --git a/lib/Checker/CastSizeChecker.cpp b/lib/Checker/CastSizeChecker.cpp
index 754d775a65d1..a502c10cac16 100644
--- a/lib/Checker/CastSizeChecker.cpp
+++ b/lib/Checker/CastSizeChecker.cpp
@@ -44,7 +44,8 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
QualType ToPointeeTy = ToPTy->getPointeeType();
- const MemRegion *R = C.getState()->getSVal(E).getAsRegion();
+ const GRState *state = C.getState();
+ const MemRegion *R = state->getSVal(E).getAsRegion();
if (R == 0)
return;
@@ -52,17 +53,21 @@ void CastSizeChecker::PreVisitCastExpr(CheckerContext &C, const CastExpr *CE) {
if (SR == 0)
return;
- llvm::Optional<SVal> V =
- C.getEngine().getStoreManager().getExtent(C.getState(), SR);
- if (!V)
- return;
+ ValueManager &ValMgr = C.getValueManager();
+ SVal Extent = SR->getExtent(ValMgr);
- const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(V);
- if (!CI)
+ SValuator &SVator = ValMgr.getSValuator();
+ const llvm::APSInt *ExtentInt = SVator.getKnownValue(state, Extent);
+ if (!ExtentInt)
return;
- CharUnits RegionSize = CharUnits::fromQuantity(CI->getValue().getSExtValue());
+ CharUnits RegionSize = CharUnits::fromQuantity(ExtentInt->getSExtValue());
CharUnits TypeSize = C.getASTContext().getTypeSizeInChars(ToPointeeTy);
+
+ // Ignore void, and a few other un-sizeable types.
+ if (TypeSize.isZero())
+ return;
+
if (RegionSize % TypeSize != 0) {
if (ExplodedNode *N = C.GenerateSink()) {
if (!BT)
diff --git a/lib/Checker/CheckSecuritySyntaxOnly.cpp b/lib/Checker/CheckSecuritySyntaxOnly.cpp
index 74e12b18a81a..af85c2faee3a 100644
--- a/lib/Checker/CheckSecuritySyntaxOnly.cpp
+++ b/lib/Checker/CheckSecuritySyntaxOnly.cpp
@@ -191,8 +191,8 @@ void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
const DeclRefExpr *drRHS = dyn_cast<DeclRefExpr>(B->getRHS()->IgnoreParens());
// Does at least one of the variables have a floating point type?
- drLHS = drLHS && drLHS->getType()->isFloatingType() ? drLHS : NULL;
- drRHS = drRHS && drRHS->getType()->isFloatingType() ? drRHS : NULL;
+ drLHS = drLHS && drLHS->getType()->isRealFloatingType() ? drLHS : NULL;
+ drRHS = drRHS && drRHS->getType()->isRealFloatingType() ? drRHS : NULL;
if (!drLHS && !drRHS)
return;
diff --git a/lib/Checker/Environment.cpp b/lib/Checker/Environment.cpp
index addfc21c1805..48152ceb46f0 100644
--- a/lib/Checker/Environment.cpp
+++ b/lib/Checker/Environment.cpp
@@ -125,7 +125,7 @@ static bool isBlockExprInCallers(const Stmt *E, const LocationContext *LC) {
// - Mark the region in DRoots if the binding is a loc::MemRegionVal.
Environment
-EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
+EnvironmentManager::RemoveDeadBindings(Environment Env,
SymbolReaper &SymReaper,
const GRState *ST,
llvm::SmallVectorImpl<const MemRegion*> &DRoots) {
@@ -163,7 +163,7 @@ EnvironmentManager::RemoveDeadBindings(Environment Env, const Stmt *S,
if (!C.isBlkExpr(BlkExpr))
continue;
- if (SymReaper.isLive(S, BlkExpr)) {
+ if (SymReaper.isLive(BlkExpr)) {
// Copy the binding to the new map.
NewEnv.ExprBindings = F.Add(NewEnv.ExprBindings, BlkExpr, X);
diff --git a/lib/Checker/FlatStore.cpp b/lib/Checker/FlatStore.cpp
index 7f1c579c6edb..64575b3c9783 100644
--- a/lib/Checker/FlatStore.cpp
+++ b/lib/Checker/FlatStore.cpp
@@ -44,7 +44,7 @@ public:
}
SVal ArrayToPointer(Loc Array);
- const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const GRState *RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots){
@@ -59,6 +59,11 @@ public:
Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
unsigned Count, InvalidatedSymbols *IS);
+
+ Store InvalidateRegions(Store store, const MemRegion * const *I,
+ const MemRegion * const *E, const Expr *Ex,
+ unsigned Count, InvalidatedSymbols *IS,
+ bool invalidateGlobals);
void print(Store store, llvm::raw_ostream& Out, const char* nl,
const char *sep);
@@ -141,9 +146,20 @@ Store FlatStoreManager::BindDeclWithNoInit(Store store, const VarRegion *VR) {
return store;
}
+Store FlatStoreManager::InvalidateRegions(Store store,
+ const MemRegion * const *I,
+ const MemRegion * const *E,
+ const Expr *Ex, unsigned Count,
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) {
+ assert(false && "Not implemented");
+ return store;
+}
+
Store FlatStoreManager::InvalidateRegion(Store store, const MemRegion *R,
const Expr *E, unsigned Count,
InvalidatedSymbols *IS) {
+ assert(false && "Not implemented");
return store;
}
diff --git a/lib/Checker/FrontendActions.cpp b/lib/Checker/FrontendActions.cpp
new file mode 100644
index 000000000000..d9a54a021bc9
--- /dev/null
+++ b/lib/Checker/FrontendActions.cpp
@@ -0,0 +1,21 @@
+//===--- FrontendActions.cpp ----------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/FrontendActions.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Checker/AnalysisConsumer.h"
+using namespace clang;
+
+ASTConsumer *AnalysisAction::CreateASTConsumer(CompilerInstance &CI,
+ llvm::StringRef InFile) {
+ return CreateAnalysisConsumer(CI.getPreprocessor(),
+ CI.getFrontendOpts().OutputFile,
+ CI.getAnalyzerOpts());
+}
+
diff --git a/lib/Checker/GRCoreEngine.cpp b/lib/Checker/GRCoreEngine.cpp
index 23a87d303b4d..a816186a307d 100644
--- a/lib/Checker/GRCoreEngine.cpp
+++ b/lib/Checker/GRCoreEngine.cpp
@@ -221,6 +221,7 @@ bool GRCoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps) {
}
}
+ SubEngine.ProcessEndWorklist(WList->hasWork() || BlockAborted);
return WList->hasWork();
}
@@ -257,7 +258,10 @@ void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
// FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
if (ProcessBlockEntrance(Blk, Pred, WList->getBlockCounter()))
- GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred);
+ GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()),
+ Pred->State, Pred);
+ else
+ BlockAborted = true;
}
void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
diff --git a/lib/Checker/GRExprEngine.cpp b/lib/Checker/GRExprEngine.cpp
index 24176586728c..4652a4c89ff3 100644
--- a/lib/Checker/GRExprEngine.cpp
+++ b/lib/Checker/GRExprEngine.cpp
@@ -172,15 +172,39 @@ public:
void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
ExplodedNodeSet &Src, bool isPrevisit) {
- if (Checkers.empty()) {
+ // Determine if we already have a cached 'CheckersOrdered' vector
+ // specifically tailored for the provided <Stmt kind, isPrevisit>. This
+ // can reduce the number of checkers actually called.
+ CheckersOrdered *CO = &Checkers;
+ llvm::OwningPtr<CheckersOrdered> NewCO;
+
+ const std::pair<unsigned, unsigned> &K =
+ std::make_pair((unsigned)S->getStmtClass(), isPrevisit ? 1U : 0U);
+
+ CheckersOrdered *& CO_Ref = COCache[K];
+
+ if (!CO_Ref) {
+ // If we have no previously cached CheckersOrdered vector for this
+ // statement kind, then create one.
+ NewCO.reset(new CheckersOrdered);
+ }
+ else {
+ // Use the already cached set.
+ CO = CO_Ref;
+ }
+
+ if (CO->empty()) {
+ // If there are no checkers, return early without doing any
+ // more work.
Dst.insert(Src);
return;
}
ExplodedNodeSet Tmp;
ExplodedNodeSet *PrevSet = &Src;
+ unsigned checkersEvaluated = 0;
- for (CheckersOrdered::iterator I=Checkers.begin(),E=Checkers.end(); I!=E;++I){
+ for (CheckersOrdered::iterator I=CO->begin(), E=CO->end(); I!=E; ++I){
ExplodedNodeSet *CurrSet = 0;
if (I+1 == E)
CurrSet = &Dst;
@@ -190,12 +214,30 @@ void GRExprEngine::CheckerVisit(Stmt *S, ExplodedNodeSet &Dst,
}
void *tag = I->first;
Checker *checker = I->second;
+ bool respondsToCallback = true;
for (ExplodedNodeSet::iterator NI = PrevSet->begin(), NE = PrevSet->end();
- NI != NE; ++NI)
- checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit);
+ NI != NE; ++NI) {
+
+ checker->GR_Visit(*CurrSet, *Builder, *this, S, *NI, tag, isPrevisit,
+ respondsToCallback);
+
+ }
+
PrevSet = CurrSet;
+
+ if (NewCO.get()) {
+ ++checkersEvaluated;
+ if (respondsToCallback)
+ NewCO->push_back(*I);
+ }
}
+
+ // If we built NewCO, check if we called all the checkers. This is important
+ // so that we know that we accurately determined the entire set of checkers
+ // that responds to this callback.
+ if (NewCO.get() && checkersEvaluated == Checkers.size())
+ CO_Ref = NewCO.take();
// Don't autotransition. The CheckerContext objects should do this
// automatically.
@@ -312,18 +354,20 @@ static void RegisterInternalChecks(GRExprEngine &Eng) {
// automatically. Note that the check itself is owned by the GRExprEngine
// object.
RegisterAdjustedReturnValueChecker(Eng);
- RegisterAttrNonNullChecker(Eng);
+ // CallAndMessageChecker should be registered before AttrNonNullChecker,
+ // where we assume arguments are not undefined.
RegisterCallAndMessageChecker(Eng);
+ RegisterAttrNonNullChecker(Eng);
RegisterDereferenceChecker(Eng);
RegisterVLASizeChecker(Eng);
RegisterDivZeroChecker(Eng);
- RegisterReturnStackAddressChecker(Eng);
RegisterReturnUndefChecker(Eng);
RegisterUndefinedArraySubscriptChecker(Eng);
RegisterUndefinedAssignmentChecker(Eng);
RegisterUndefBranchChecker(Eng);
RegisterUndefCapturedBlockVarChecker(Eng);
RegisterUndefResultChecker(Eng);
+ RegisterStackAddrLeakChecker(Eng);
// This is not a checker yet.
RegisterNoReturnFunctionChecker(Eng);
@@ -335,10 +379,10 @@ static void RegisterInternalChecks(GRExprEngine &Eng) {
GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
: AMgr(mgr),
- CoreEngine(mgr.getASTContext(), *this),
+ CoreEngine(*this),
G(CoreEngine.getGraph()),
Builder(NULL),
- StateMgr(G.getContext(), mgr.getStoreManagerCreator(),
+ StateMgr(getContext(), mgr.getStoreManagerCreator(),
mgr.getConstraintManagerCreator(), G.getAllocator(),
*this),
SymMgr(StateMgr.getSymbolManager()),
@@ -346,7 +390,7 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
SVator(ValMgr.getSValuator()),
CurrentStmt(NULL),
NSExceptionII(NULL), NSExceptionInstanceRaiseSelectors(NULL),
- RaiseSel(GetNullarySelector("raise", G.getContext())),
+ RaiseSel(GetNullarySelector("raise", getContext())),
BR(mgr, *this), TF(tf) {
// Register internal checks.
RegisterInternalChecks(*this);
@@ -359,8 +403,14 @@ GRExprEngine::GRExprEngine(AnalysisManager &mgr, GRTransferFuncs *tf)
GRExprEngine::~GRExprEngine() {
BR.FlushReports();
delete [] NSExceptionInstanceRaiseSelectors;
+
+ // Delete the set of checkers.
for (CheckersOrdered::iterator I=Checkers.begin(), E=Checkers.end(); I!=E;++I)
delete I->second;
+
+ for (CheckersOrderedCache::iterator I=COCache.begin(), E=COCache.end();
+ I!=E;++I)
+ delete I->second;
}
//===----------------------------------------------------------------------===//
@@ -464,6 +514,13 @@ const GRState *GRExprEngine::ProcessAssume(const GRState *state, SVal cond,
return TF->EvalAssume(state, cond, assumption);
}
+void GRExprEngine::ProcessEndWorklist(bool hasWorkRemaining) {
+ for (CheckersOrdered::iterator I = Checkers.begin(), E = Checkers.end();
+ I != E; ++I) {
+ I->second->VisitEndAnalysis(G, BR, hasWorkRemaining);
+ }
+}
+
void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
CurrentStmt = CE.getStmt();
PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),
@@ -480,10 +537,10 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
// Create the cleaned state.
const ExplodedNode *BasePred = Builder->getBasePredecessor();
- SymbolReaper SymReaper(BasePred->getLocationContext(), SymMgr);
+ SymbolReaper SymReaper(BasePred->getLocationContext(), CurrentStmt, SymMgr);
CleanedState = AMgr.shouldPurgeDead()
- ? StateMgr.RemoveDeadBindings(EntryNode->getState(), CurrentStmt,
+ ? StateMgr.RemoveDeadBindings(EntryNode->getState(),
BasePred->getLocationContext()->getCurrentStackFrame(),
SymReaper)
: EntryNode->getState();
@@ -502,7 +559,7 @@ void GRExprEngine::ProcessStmt(CFGElement CE, GRStmtNodeBuilder& builder) {
// FIXME: This should soon be removed.
ExplodedNodeSet Tmp2;
- getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode, CurrentStmt,
+ getTF().EvalDeadSymbols(Tmp2, *this, *Builder, EntryNode,
CleanedState, SymReaper);
if (Checkers.empty())
@@ -598,7 +655,7 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
case Stmt::CXXTryStmtClass:
case Stmt::CXXTypeidExprClass:
case Stmt::CXXUnresolvedConstructExprClass:
- case Stmt::CXXZeroInitValueExprClass:
+ case Stmt::CXXScalarValueInitExprClass:
case Stmt::DependentScopeDeclRefExprClass:
case Stmt::UnaryTypeTraitExprClass:
case Stmt::UnresolvedLookupExprClass:
@@ -627,10 +684,14 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
llvm_unreachable("Stmt should not be in analyzer evaluation loop");
break;
+ case Stmt::GNUNullExprClass: {
+ MakeNode(Dst, S, Pred, GetState(Pred)->BindExpr(S, ValMgr.makeNull()));
+ break;
+ }
+
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::ExtVectorElementExprClass:
- case Stmt::GNUNullExprClass:
case Stmt::ImaginaryLiteralClass:
case Stmt::ImplicitValueInitExprClass:
case Stmt::ObjCAtCatchStmtClass:
@@ -901,7 +962,7 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
// C++ stuff we don't support yet.
case Stmt::CXXExprWithTemporariesClass:
case Stmt::CXXMemberCallExprClass:
- case Stmt::CXXZeroInitValueExprClass: {
+ case Stmt::CXXScalarValueInitExprClass: {
SaveAndRestore<bool> OldSink(Builder->BuildSinks);
Builder->BuildSinks = true;
MakeNode(Dst, Ex, Pred, GetState(Pred));
@@ -998,16 +1059,21 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
CreateCXXTemporaryObject(Ex, Pred, Dst);
return;
- default:
+ default: {
// Arbitrary subexpressions can return aggregate temporaries that
// can be used in a lvalue context. We need to enhance our support
// of such temporaries in both the environment and the store, so right
// now we just do a regular visit.
- assert ((Ex->getType()->isAggregateType()) &&
- "Other kinds of expressions with non-aggregate/union types do"
- " not have lvalues.");
+
+ // NOTE: Do not use 'isAggregateType()' here as CXXRecordDecls that
+ // are non-pod are not aggregates.
+ assert ((isa<RecordType>(Ex->getType().getDesugaredType()) ||
+ isa<ArrayType>(Ex->getType().getDesugaredType())) &&
+ "Other kinds of expressions with non-aggregate/union/class types"
+ " do not have lvalues.");
Visit(Ex, Pred, Dst);
+ }
}
}
@@ -1819,7 +1885,7 @@ bool GRExprEngine::InlineCall(ExplodedNodeSet &Dst, const CallExpr *CE,
if (!FD)
return false;
- if (!FD->getBody(FD))
+ if (!FD->hasBody(FD))
return false;
// Now we have the definition of the callee, create a CallEnter node.
@@ -1940,7 +2006,8 @@ void GRExprEngine::VisitCall(CallExpr* CE, ExplodedNode* Pred,
// Finally, perform the post-condition check of the CallExpr and store
// the created nodes in 'Dst'.
-
+ // If the callee returns a reference and we want an rvalue, skip this check
+ // and do the load.
if (!(!asLValue && CalleeReturnsReference(CE))) {
CheckerVisit(CE, Dst, DstTmp3, false);
return;
@@ -2380,7 +2447,7 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
case CastExpr::CK_DerivedToBase:
- case CastExpr::CK_UncheckedDerivedToBase:
+ case CastExpr::CK_UncheckedDerivedToBase: {
// Delegate to SValuator to process.
for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
ExplodedNode* N = *I;
@@ -2391,10 +2458,24 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
MakeNode(Dst, CastE, N, state);
}
return;
-
- default:
- llvm::errs() << "Cast kind " << CastE->getCastKind() << " not handled.\n";
- assert(0);
+ }
+
+ // Various C++ casts that are not handled yet.
+ case CastExpr::CK_Dynamic:
+ case CastExpr::CK_ToUnion:
+ case CastExpr::CK_BaseToDerived:
+ case CastExpr::CK_NullToMemberPointer:
+ case CastExpr::CK_BaseToDerivedMemberPointer:
+ case CastExpr::CK_DerivedToBaseMemberPointer:
+ case CastExpr::CK_UserDefinedConversion:
+ case CastExpr::CK_ConstructorConversion:
+ case CastExpr::CK_VectorSplat:
+ case CastExpr::CK_MemberPointerToBoolean: {
+ SaveAndRestore<bool> OldSink(Builder->BuildSinks);
+ Builder->BuildSinks = true;
+ MakeNode(Dst, CastE, Pred, GetState(Pred));
+ return;
+ }
}
}
@@ -2615,9 +2696,38 @@ void GRExprEngine::VisitSizeOfAlignOfExpr(SizeOfAlignOfExpr* Ex,
// sizeof(void) == 1 byte.
amt = CharUnits::One();
}
- else if (!T.getTypePtr()->isConstantSizeType()) {
- // FIXME: Add support for VLAs.
- Dst.Add(Pred);
+ else if (!T->isConstantSizeType()) {
+ assert(T->isVariableArrayType() && "Unknown non-constant-sized type.");
+
+ // FIXME: Add support for VLA type arguments, not just VLA expressions.
+ // When that happens, we should probably refactor VLASizeChecker's code.
+ if (Ex->isArgumentType()) {
+ Dst.Add(Pred);
+ return;
+ }
+
+ // Get the size by getting the extent of the sub-expression.
+ // First, visit the sub-expression to find its region.
+ Expr *Arg = Ex->getArgumentExpr();
+ ExplodedNodeSet Tmp;
+ VisitLValue(Arg, Pred, Tmp);
+
+ for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
+ const GRState* state = GetState(*I);
+ const MemRegion *MR = state->getSVal(Arg).getAsRegion();
+
+ // If the subexpression can't be resolved to a region, we don't know
+ // anything about its size. Just leave the state as is and continue.
+ if (!MR) {
+ Dst.Add(*I);
+ continue;
+ }
+
+ // The result is the extent of the VLA.
+ SVal Extent = cast<SubRegion>(MR)->getExtent(ValMgr);
+ MakeNode(Dst, Ex, *I, state->BindExpr(Ex, Extent));
+ }
+
return;
}
else if (T->getAs<ObjCObjectType>()) {
@@ -2749,7 +2859,7 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred,
return;
}
- case UnaryOperator::Plus: assert (!asLValue); // FALL-THROUGH.
+ case UnaryOperator::Plus: assert(!asLValue); // FALL-THROUGH.
case UnaryOperator::Extension: {
// Unary "+" is a no-op, similar to a parentheses. We still have places
@@ -2759,7 +2869,11 @@ void GRExprEngine::VisitUnaryOperator(UnaryOperator* U, ExplodedNode* Pred,
Expr* Ex = U->getSubExpr()->IgnoreParens();
ExplodedNodeSet Tmp;
- Visit(Ex, Pred, Tmp);
+
+ if (asLValue)
+ VisitLValue(Ex, Pred, Tmp);
+ else
+ Visit(Ex, Pred, Tmp);
for (ExplodedNodeSet::iterator I=Tmp.begin(), E=Tmp.end(); I!=E; ++I) {
const GRState* state = GetState(*I);
diff --git a/lib/Checker/GRExprEngineExperimentalChecks.cpp b/lib/Checker/GRExprEngineExperimentalChecks.cpp
index 6066a1c74d33..d138e81c4691 100644
--- a/lib/Checker/GRExprEngineExperimentalChecks.cpp
+++ b/lib/Checker/GRExprEngineExperimentalChecks.cpp
@@ -23,6 +23,8 @@ void clang::RegisterExperimentalChecks(GRExprEngine &Eng) {
// within GRExprEngine.
RegisterPthreadLockChecker(Eng);
RegisterMallocChecker(Eng);
+ RegisterStreamChecker(Eng);
+ RegisterCStringChecker(Eng);
}
void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
@@ -38,4 +40,5 @@ void clang::RegisterExperimentalInternalChecks(GRExprEngine &Eng) {
RegisterCastToStructChecker(Eng);
RegisterCastSizeChecker(Eng);
RegisterArrayBoundChecker(Eng);
+
}
diff --git a/lib/Checker/GRExprEngineExperimentalChecks.h b/lib/Checker/GRExprEngineExperimentalChecks.h
index 9a9da32e556e..7d1eb77f9fd6 100644
--- a/lib/Checker/GRExprEngineExperimentalChecks.h
+++ b/lib/Checker/GRExprEngineExperimentalChecks.h
@@ -19,8 +19,11 @@ namespace clang {
class GRExprEngine;
+void RegisterCStringChecker(GRExprEngine &Eng);
void RegisterPthreadLockChecker(GRExprEngine &Eng);
void RegisterMallocChecker(GRExprEngine &Eng);
+void RegisterStreamChecker(GRExprEngine &Eng);
+void RegisterIdempotentOperationChecker(GRExprEngine &Eng);
} // end clang namespace
#endif
diff --git a/lib/Checker/GRExprEngineInternalChecks.h b/lib/Checker/GRExprEngineInternalChecks.h
index 335b85e308e0..f91a759b3299 100644
--- a/lib/Checker/GRExprEngineInternalChecks.h
+++ b/lib/Checker/GRExprEngineInternalChecks.h
@@ -34,8 +34,8 @@ void RegisterNoReturnFunctionChecker(GRExprEngine &Eng);
void RegisterPointerArithChecker(GRExprEngine &Eng);
void RegisterPointerSubChecker(GRExprEngine &Eng);
void RegisterReturnPointerRangeChecker(GRExprEngine &Eng);
-void RegisterReturnStackAddressChecker(GRExprEngine &Eng);
void RegisterReturnUndefChecker(GRExprEngine &Eng);
+void RegisterStackAddrLeakChecker(GRExprEngine &Eng);
void RegisterUndefBranchChecker(GRExprEngine &Eng);
void RegisterUndefCapturedBlockVarChecker(GRExprEngine &Eng);
void RegisterUndefResultChecker(GRExprEngine &Eng);
diff --git a/lib/Checker/GRState.cpp b/lib/Checker/GRState.cpp
index b16e922776e5..9e584b56148c 100644
--- a/lib/Checker/GRState.cpp
+++ b/lib/Checker/GRState.cpp
@@ -34,7 +34,7 @@ GRStateManager::~GRStateManager() {
}
const GRState*
-GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
+GRStateManager::RemoveDeadBindings(const GRState* state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper) {
@@ -47,11 +47,11 @@ GRStateManager::RemoveDeadBindings(const GRState* state, Stmt* Loc,
llvm::SmallVector<const MemRegion*, 10> RegionRoots;
GRState NewState = *state;
- NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, Loc, SymReaper,
+ NewState.Env = EnvMgr.RemoveDeadBindings(NewState.Env, SymReaper,
state, RegionRoots);
// Clean up the store.
- const GRState *s = StoreMgr->RemoveDeadBindings(NewState, Loc, LCtx,
+ const GRState *s = StoreMgr->RemoveDeadBindings(NewState, LCtx,
SymReaper, RegionRoots);
return ConstraintMgr->RemoveDeadBindings(s, SymReaper);
@@ -343,28 +343,3 @@ bool GRState::scanReachableSymbols(const MemRegion * const *I,
}
return true;
}
-
-//===----------------------------------------------------------------------===//
-// Queries.
-//===----------------------------------------------------------------------===//
-
-bool GRStateManager::isEqual(const GRState* state, const Expr* Ex,
- const llvm::APSInt& Y) {
-
- SVal V = state->getSVal(Ex);
-
- if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
- return X->getValue() == Y;
-
- if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
- return X->getValue() == Y;
-
- if (SymbolRef Sym = V.getAsSymbol())
- return ConstraintMgr->isEqual(state, Sym, Y);
-
- return false;
-}
-
-bool GRStateManager::isEqual(const GRState* state, const Expr* Ex, uint64_t x) {
- return isEqual(state, Ex, getBasicVals().getValue(x, Ex->getType()));
-}
diff --git a/lib/Frontend/HTMLDiagnostics.cpp b/lib/Checker/HTMLDiagnostics.cpp
index 022a34d0bd4f..ff9867fb5f16 100644
--- a/lib/Frontend/HTMLDiagnostics.cpp
+++ b/lib/Checker/HTMLDiagnostics.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Checker/PathDiagnosticClients.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
@@ -294,8 +294,8 @@ void HTMLDiagnostics::ReportDiag(const PathDiagnostic& D,
llvm::raw_fd_ostream os(H.c_str(), ErrorMsg);
if (!ErrorMsg.empty()) {
- (llvm::errs() << "warning: could not create file '" << F.str()
- << "'\n").flush();
+ llvm::errs() << "warning: could not create file '" << F.str()
+ << "'\n";
return;
}
diff --git a/lib/Checker/IdempotentOperationChecker.cpp b/lib/Checker/IdempotentOperationChecker.cpp
new file mode 100644
index 000000000000..6ed18417a2c2
--- /dev/null
+++ b/lib/Checker/IdempotentOperationChecker.cpp
@@ -0,0 +1,454 @@
+//==- IdempotentOperationChecker.cpp - Idempotent Operations ----*- C++ -*-==//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a set of path-sensitive checks for idempotent and/or
+// tautological operations. Each potential operation is checked along all paths
+// to see if every path results in a pointless operation.
+// +-------------------------------------------+
+// |Table of idempotent/tautological operations|
+// +-------------------------------------------+
+//+--------------------------------------------------------------------------+
+//|Operator | x op x | x op 1 | 1 op x | x op 0 | 0 op x | x op ~0 | ~0 op x |
+//+--------------------------------------------------------------------------+
+// +, += | | | | x | x | |
+// -, -= | | | | x | -x | |
+// *, *= | | x | x | 0 | 0 | |
+// /, /= | 1 | x | | N/A | 0 | |
+// &, &= | x | | | 0 | 0 | x | x
+// |, |= | x | | | x | x | ~0 | ~0
+// ^, ^= | 0 | | | x | x | |
+// <<, <<= | | | | x | 0 | |
+// >>, >>= | | | | x | 0 | |
+// || | 1 | 1 | 1 | x | x | 1 | 1
+// && | 1 | x | x | 0 | 0 | x | x
+// = | x | | | | | |
+// == | 1 | | | | | |
+// >= | 1 | | | | | |
+// <= | 1 | | | | | |
+// > | 0 | | | | | |
+// < | 0 | | | | | |
+// != | 0 | | | | | |
+//===----------------------------------------------------------------------===//
+//
+// Ways to reduce false positives (that need to be implemented):
+// - Don't flag downsizing casts
+// - Improved handling of static/global variables
+// - Per-block marking of incomplete analysis
+// - Handling ~0 values
+// - False positives involving silencing unused variable warnings
+//
+// Other things TODO:
+// - Improved error messages
+// - Handle mixed assumptions (which assumptions can belong together?)
+// - Finer grained false positive control (levels)
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/SVals.h"
+#include "clang/AST/Stmt.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/ErrorHandling.h"
+
+using namespace clang;
+
+namespace {
+class IdempotentOperationChecker
+ : public CheckerVisitor<IdempotentOperationChecker> {
+ public:
+ static void *getTag();
+ void PreVisitBinaryOperator(CheckerContext &C, const BinaryOperator *B);
+ void VisitEndAnalysis(ExplodedGraph &G, BugReporter &B,
+ bool hasWorkRemaining);
+
+ private:
+ // Our assumption about a particular operation.
+ enum Assumption { Possible, Impossible, Equal, LHSis1, RHSis1, LHSis0,
+ RHSis0 };
+
+ void UpdateAssumption(Assumption &A, const Assumption &New);
+
+ /// contains* - Useful recursive methods to see if a statement contains an
+ /// element somewhere. Used in static analysis to reduce false positives.
+ static bool containsMacro(const Stmt *S);
+ static bool containsEnum(const Stmt *S);
+ static bool containsBuiltinOffsetOf(const Stmt *S);
+ static bool containsZeroConstant(const Stmt *S);
+ static bool containsOneConstant(const Stmt *S);
+ template <class T> static bool containsStmt(const Stmt *S) {
+ if (isa<T>(S))
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsStmt<T>(child))
+ return true;
+
+ return false;
+ }
+
+ // Hash table
+ typedef llvm::DenseMap<const BinaryOperator *, Assumption> AssumptionMap;
+ AssumptionMap hash;
+};
+}
+
+void *IdempotentOperationChecker::getTag() {
+ static int x = 0;
+ return &x;
+}
+
+void clang::RegisterIdempotentOperationChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new IdempotentOperationChecker());
+}
+
+void IdempotentOperationChecker::PreVisitBinaryOperator(
+ CheckerContext &C,
+ const BinaryOperator *B) {
+ // Find or create an entry in the hash for this BinaryOperator instance
+ AssumptionMap::iterator i = hash.find(B);
+ Assumption &A = i == hash.end() ? hash[B] : i->second;
+
+ // If we had to create an entry, initialise the value to Possible
+ if (i == hash.end())
+ A = Possible;
+
+ // If we already have visited this node on a path that does not contain an
+ // idempotent operation, return immediately.
+ if (A == Impossible)
+ return;
+
+ // Skip binary operators containing common false positives
+ if (containsMacro(B) || containsEnum(B) || containsStmt<SizeOfAlignOfExpr>(B)
+ || containsZeroConstant(B) || containsOneConstant(B)
+ || containsBuiltinOffsetOf(B)) {
+ A = Impossible;
+ return;
+ }
+
+ const Expr *LHS = B->getLHS();
+ const Expr *RHS = B->getRHS();
+
+ const GRState *state = C.getState();
+
+ SVal LHSVal = state->getSVal(LHS);
+ SVal RHSVal = state->getSVal(RHS);
+
+ // If either value is unknown, we can't be 100% sure of all paths.
+ if (LHSVal.isUnknownOrUndef() || RHSVal.isUnknownOrUndef()) {
+ A = Impossible;
+ return;
+ }
+ BinaryOperator::Opcode Op = B->getOpcode();
+
+ // Dereference the LHS SVal if this is an assign operation
+ switch (Op) {
+ default:
+ break;
+
+ // Fall through intentional
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::Assign:
+ // Assign statements have one extra level of indirection
+ if (!isa<Loc>(LHSVal)) {
+ A = Impossible;
+ return;
+ }
+ LHSVal = state->getSVal(cast<Loc>(LHSVal));
+ }
+
+
+ // We now check for various cases which result in an idempotent operation.
+
+ // x op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::Assign:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Div:
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (LHSVal != RHSVal)
+ break;
+ UpdateAssumption(A, Equal);
+ return;
+ }
+
+ // x op 1
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!RHSVal.isConstant(1))
+ break;
+ UpdateAssumption(A, RHSis1);
+ return;
+ }
+
+ // 1 op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::Mul:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!LHSVal.isConstant(1))
+ break;
+ UpdateAssumption(A, LHSis1);
+ return;
+ }
+
+ // x op 0
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Mul:
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!RHSVal.isConstant(0))
+ break;
+ UpdateAssumption(A, RHSis0);
+ return;
+ }
+
+ // 0 op x
+ switch (Op) {
+ default:
+ break; // We don't care about any other operators.
+
+ // Fall through intentional
+ //case BinaryOperator::AddAssign: // Common false positive
+ case BinaryOperator::SubAssign: // Check only if unsigned
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::AndAssign:
+ //case BinaryOperator::OrAssign: // Common false positive
+ //case BinaryOperator::XorAssign: // Common false positive
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Mul:
+ case BinaryOperator::Div:
+ case BinaryOperator::And:
+ case BinaryOperator::Or:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::LOr:
+ case BinaryOperator::LAnd:
+ if (!LHSVal.isConstant(0))
+ break;
+ UpdateAssumption(A, LHSis0);
+ return;
+ }
+
+ // If we get to this point, there has been a valid use of this operation.
+ A = Impossible;
+}
+
+void IdempotentOperationChecker::VisitEndAnalysis(ExplodedGraph &G,
+ BugReporter &B,
+ bool hasWorkRemaining) {
+ // If there is any work remaining we cannot be 100% sure about our warnings
+ if (hasWorkRemaining)
+ return;
+
+ // Iterate over the hash to see if we have any paths with definite
+ // idempotent operations.
+ for (AssumptionMap::const_iterator i =
+ hash.begin(); i != hash.end(); ++i) {
+ if (i->second != Impossible) {
+ // Select the error message.
+ const char *msg = 0;
+ switch (i->second) {
+ case Equal:
+ msg = "idempotent operation; both operands are always equal in value";
+ break;
+ case LHSis1:
+ msg = "idempotent operation; the left operand is always 1";
+ break;
+ case RHSis1:
+ msg = "idempotent operation; the right operand is always 1";
+ break;
+ case LHSis0:
+ msg = "idempotent operation; the left operand is always 0";
+ break;
+ case RHSis0:
+ msg = "idempotent operation; the right operand is always 0";
+ break;
+ case Possible:
+ llvm_unreachable("Operation was never marked with an assumption");
+ case Impossible:
+ llvm_unreachable(0);
+ }
+
+ // Create the SourceRange Arrays
+ SourceRange S[2] = { i->first->getLHS()->getSourceRange(),
+ i->first->getRHS()->getSourceRange() };
+ B.EmitBasicReport("Idempotent operation", msg, i->first->getOperatorLoc(),
+ S, 2);
+ }
+ }
+}
+
+// Updates the current assumption given the new assumption
+inline void IdempotentOperationChecker::UpdateAssumption(Assumption &A,
+ const Assumption &New) {
+ switch (A) {
+ // If we don't currently have an assumption, set it
+ case Possible:
+ A = New;
+ return;
+
+ // If we have determined that a valid state happened, ignore the new
+ // assumption.
+ case Impossible:
+ return;
+
+ // Any other case means that we had a different assumption last time. We don't
+ // currently support mixing assumptions for diagnostic reasons, so we set
+ // our assumption to be impossible.
+ default:
+ A = Impossible;
+ return;
+ }
+}
+
+// Recursively find any substatements containing macros
+bool IdempotentOperationChecker::containsMacro(const Stmt *S) {
+ if (S->getLocStart().isMacroID())
+ return true;
+
+ if (S->getLocEnd().isMacroID())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsMacro(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing enum constants
+bool IdempotentOperationChecker::containsEnum(const Stmt *S) {
+ const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(S);
+
+ if (DR && isa<EnumConstantDecl>(DR->getDecl()))
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsEnum(child))
+ return true;
+
+ return false;
+}
+
+// Recursively find any substatements containing __builtin_offset_of
+bool IdempotentOperationChecker::containsBuiltinOffsetOf(const Stmt *S) {
+ const UnaryOperator *UO = dyn_cast<UnaryOperator>(S);
+
+ if (UO && UO->getOpcode() == UnaryOperator::OffsetOf)
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsBuiltinOffsetOf(child))
+ return true;
+
+ return false;
+}
+
+bool IdempotentOperationChecker::containsZeroConstant(const Stmt *S) {
+ const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(S);
+ if (IL && IL->getValue() == 0)
+ return true;
+
+ const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(S);
+ if (FL && FL->getValue().isZero())
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsZeroConstant(child))
+ return true;
+
+ return false;
+}
+
+bool IdempotentOperationChecker::containsOneConstant(const Stmt *S) {
+ const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(S);
+ if (IL && IL->getValue() == 1)
+ return true;
+
+ const FloatingLiteral *FL = dyn_cast<FloatingLiteral>(S);
+ const llvm::APFloat one(1.0);
+ if (FL && FL->getValue().compare(one) == llvm::APFloat::cmpEqual)
+ return true;
+
+ for (Stmt::const_child_iterator I = S->child_begin(); I != S->child_end();
+ ++I)
+ if (const Stmt *child = *I)
+ if (containsOneConstant(child))
+ return true;
+
+ return false;
+}
+
diff --git a/lib/Checker/LLVMConventionsChecker.cpp b/lib/Checker/LLVMConventionsChecker.cpp
index 39ded431279c..0576f08e4e01 100644
--- a/lib/Checker/LLVMConventionsChecker.cpp
+++ b/lib/Checker/LLVMConventionsChecker.cpp
@@ -34,13 +34,15 @@ static bool IsLLVMStringRef(QualType T) {
"class llvm::StringRef";
}
-static bool InStdNamespace(const Decl *D) {
+/// Check whether the declaration is semantically inside the top-level
+/// namespace named by ns.
+static bool InNamespace(const Decl *D, const llvm::StringRef &NS) {
const DeclContext *DC = D->getDeclContext();
const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
if (!ND)
return false;
const IdentifierInfo *II = ND->getIdentifier();
- if (!II || II->getName() != "std")
+ if (!II || !II->getName().equals(NS))
return false;
DC = ND->getDeclContext();
return isa<TranslationUnitDecl>(DC);
@@ -56,50 +58,26 @@ static bool IsStdString(QualType T) {
const TypedefDecl *TD = TT->getDecl();
- if (!InStdNamespace(TD))
+ if (!InNamespace(TD, "std"))
return false;
return TD->getName() == "string";
}
-static bool InClangNamespace(const Decl *D) {
- const DeclContext *DC = D->getDeclContext();
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
- if (!ND)
- return false;
- const IdentifierInfo *II = ND->getIdentifier();
- if (!II || II->getName() != "clang")
- return false;
- DC = ND->getDeclContext();
- return isa<TranslationUnitDecl>(DC);
-}
-
-static bool InLLVMNamespace(const Decl *D) {
- const DeclContext *DC = D->getDeclContext();
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(D->getDeclContext());
- if (!ND)
- return false;
- const IdentifierInfo *II = ND->getIdentifier();
- if (!II || II->getName() != "llvm")
- return false;
- DC = ND->getDeclContext();
- return isa<TranslationUnitDecl>(DC);
-}
-
static bool IsClangType(const RecordDecl *RD) {
- return RD->getName() == "Type" && InClangNamespace(RD);
+ return RD->getName() == "Type" && InNamespace(RD, "clang");
}
static bool IsClangDecl(const RecordDecl *RD) {
- return RD->getName() == "Decl" && InClangNamespace(RD);
+ return RD->getName() == "Decl" && InNamespace(RD, "clang");
}
static bool IsClangStmt(const RecordDecl *RD) {
- return RD->getName() == "Stmt" && InClangNamespace(RD);
+ return RD->getName() == "Stmt" && InNamespace(RD, "clang");
}
-static bool isClangAttr(const RecordDecl *RD) {
- return RD->getName() == "Attr" && InClangNamespace(RD);
+static bool IsClangAttr(const RecordDecl *RD) {
+ return RD->getName() == "Attr" && InNamespace(RD, "clang");
}
static bool IsStdVector(QualType T) {
@@ -110,7 +88,7 @@ static bool IsStdVector(QualType T) {
TemplateName TM = TS->getTemplateName();
TemplateDecl *TD = TM.getAsTemplateDecl();
- if (!TD || !InStdNamespace(TD))
+ if (!TD || !InNamespace(TD, "std"))
return false;
return TD->getName() == "vector";
@@ -124,7 +102,7 @@ static bool IsSmallVector(QualType T) {
TemplateName TM = TS->getTemplateName();
TemplateDecl *TD = TM.getAsTemplateDecl();
- if (!TD || !InLLVMNamespace(TD))
+ if (!TD || !InNamespace(TD, "llvm"))
return false;
return TD->getName() == "SmallVector";
@@ -214,7 +192,7 @@ static bool AllocatesMemory(QualType T) {
// This type checking could be sped up via dynamic programming.
static bool IsPartOfAST(const CXXRecordDecl *R) {
- if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || isClangAttr(R))
+ if (IsClangStmt(R) || IsClangType(R) || IsClangDecl(R) || IsClangAttr(R))
return true;
for (CXXRecordDecl::base_class_const_iterator I = R->bases_begin(),
@@ -316,7 +294,7 @@ static void ScanCodeDecls(DeclContext *DC, BugReporter &BR) {
Decl *D = *I;
- if (D->getBody())
+ if (D->hasBody())
CheckStringRefAssignedTemporary(D, BR);
if (CXXRecordDecl *R = dyn_cast<CXXRecordDecl>(D))
diff --git a/lib/Checker/Makefile b/lib/Checker/Makefile
index c45ab294dec4..1bc652916403 100644
--- a/lib/Checker/Makefile
+++ b/lib/Checker/Makefile
@@ -11,11 +11,9 @@
#
##===----------------------------------------------------------------------===##
-LEVEL = ../../../..
+CLANG_LEVEL := ../..
LIBRARYNAME := clangChecker
BUILD_ARCHIVE = 1
-CPP.Flags += -I$(PROJ_SRC_DIR)/../../include -I$(PROJ_OBJ_DIR)/../../include
-
-include $(LEVEL)/Makefile.common
+include $(CLANG_LEVEL)/Makefile
diff --git a/lib/Checker/MallocChecker.cpp b/lib/Checker/MallocChecker.cpp
index 086dbd8fdd36..dcc21ca38619 100644
--- a/lib/Checker/MallocChecker.cpp
+++ b/lib/Checker/MallocChecker.cpp
@@ -59,15 +59,16 @@ class MallocChecker : public CheckerVisitor<MallocChecker> {
BuiltinBug *BT_DoubleFree;
BuiltinBug *BT_Leak;
BuiltinBug *BT_UseFree;
- IdentifierInfo *II_malloc, *II_free, *II_realloc;
+ BuiltinBug *BT_BadFree;
+ IdentifierInfo *II_malloc, *II_free, *II_realloc, *II_calloc;
public:
MallocChecker()
- : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0),
- II_malloc(0), II_free(0), II_realloc(0) {}
+ : BT_DoubleFree(0), BT_Leak(0), BT_UseFree(0), BT_BadFree(0),
+ II_malloc(0), II_free(0), II_realloc(0), II_calloc(0) {}
static void *getTag();
bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
- void EvalDeadSymbols(CheckerContext &C,const Stmt *S,SymbolReaper &SymReaper);
+ void EvalDeadSymbols(CheckerContext &C, SymbolReaper &SymReaper);
void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *S);
const GRState *EvalAssume(const GRState *state, SVal Cond, bool Assumption);
@@ -76,12 +77,24 @@ public:
private:
void MallocMem(CheckerContext &C, const CallExpr *CE);
const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
- const Expr *SizeEx, const GRState *state);
+ const Expr *SizeEx, SVal Init,
+ const GRState *state) {
+ return MallocMemAux(C, CE, state->getSVal(SizeEx), Init, state);
+ }
+ const GRState *MallocMemAux(CheckerContext &C, const CallExpr *CE,
+ SVal SizeEx, SVal Init,
+ const GRState *state);
+
void FreeMem(CheckerContext &C, const CallExpr *CE);
const GRState *FreeMemAux(CheckerContext &C, const CallExpr *CE,
const GRState *state);
void ReallocMem(CheckerContext &C, const CallExpr *CE);
+ void CallocMem(CheckerContext &C, const CallExpr *CE);
+
+ bool SummarizeValue(llvm::raw_ostream& os, SVal V);
+ bool SummarizeRegion(llvm::raw_ostream& os, const MemRegion *MR);
+ void ReportBadFree(CheckerContext &C, SVal ArgVal, SourceRange range);
};
} // end anonymous namespace
@@ -120,6 +133,8 @@ bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
II_free = &Ctx.Idents.get("free");
if (!II_realloc)
II_realloc = &Ctx.Idents.get("realloc");
+ if (!II_calloc)
+ II_calloc = &Ctx.Idents.get("calloc");
if (FD->getIdentifier() == II_malloc) {
MallocMem(C, CE);
@@ -136,30 +151,44 @@ bool MallocChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
return true;
}
+ if (FD->getIdentifier() == II_calloc) {
+ CallocMem(C, CE);
+ return true;
+ }
+
return false;
}
void MallocChecker::MallocMem(CheckerContext &C, const CallExpr *CE) {
- const GRState *state = MallocMemAux(C, CE, CE->getArg(0), C.getState());
+ const GRState *state = MallocMemAux(C, CE, CE->getArg(0), UndefinedVal(),
+ C.getState());
C.addTransition(state);
}
const GRState *MallocChecker::MallocMemAux(CheckerContext &C,
const CallExpr *CE,
- const Expr *SizeEx,
+ SVal Size, SVal Init,
const GRState *state) {
unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
ValueManager &ValMgr = C.getValueManager();
+ // Set the return value.
SVal RetVal = ValMgr.getConjuredSymbolVal(NULL, CE, CE->getType(), Count);
+ state = state->BindExpr(CE, RetVal);
- SVal Size = state->getSVal(SizeEx);
+ // Fill the region with the initialization value.
+ state = state->bindDefault(RetVal, Init);
- state = C.getEngine().getStoreManager().setExtent(state, RetVal.getAsRegion(),
- Size);
+ // Set the region's extent equal to the Size parameter.
+ const SymbolicRegion *R = cast<SymbolicRegion>(RetVal.getAsRegion());
+ DefinedOrUnknownSVal Extent = R->getExtent(ValMgr);
+ DefinedOrUnknownSVal DefinedSize = cast<DefinedOrUnknownSVal>(Size);
+
+ SValuator &SVator = ValMgr.getSValuator();
+ DefinedOrUnknownSVal ExtentMatchesSize =
+ SVator.EvalEQ(state, Extent, DefinedSize);
+ state = state->Assume(ExtentMatchesSize, true);
- state = state->BindExpr(CE, RetVal);
-
SymbolRef Sym = RetVal.getAsLocSymbol();
assert(Sym);
// Set the symbol's state to Allocated.
@@ -175,18 +204,59 @@ void MallocChecker::FreeMem(CheckerContext &C, const CallExpr *CE) {
const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
const GRState *state) {
- SVal ArgVal = state->getSVal(CE->getArg(0));
+ const Expr *ArgExpr = CE->getArg(0);
+ SVal ArgVal = state->getSVal(ArgExpr);
// If ptr is NULL, no operation is preformed.
if (ArgVal.isZeroConstant())
return state;
+
+ // Unknown values could easily be okay
+ // Undefined values are handled elsewhere
+ if (ArgVal.isUnknownOrUndef())
+ return state;
- SymbolRef Sym = ArgVal.getAsLocSymbol();
-
+ const MemRegion *R = ArgVal.getAsRegion();
+
+ // Nonlocs can't be freed, of course.
+ // Non-region locations (labels and fixed addresses) also shouldn't be freed.
+ if (!R) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return NULL;
+ }
+
+ R = R->StripCasts();
+
+ // Blocks might show up as heap data, but should not be free()d
+ if (isa<BlockDataRegion>(R)) {
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return NULL;
+ }
+
+ const MemSpaceRegion *MS = R->getMemorySpace();
+
+ // Parameters, locals, statics, and globals shouldn't be freed.
+ if (!(isa<UnknownSpaceRegion>(MS) || isa<HeapSpaceRegion>(MS))) {
+ // FIXME: at the time this code was written, malloc() regions were
+ // represented by conjured symbols, which are all in UnknownSpaceRegion.
+ // This means that there isn't actually anything from HeapSpaceRegion
+ // that should be freed, even though we allow it here.
+ // Of course, free() can work on memory allocated outside the current
+ // function, so UnknownSpaceRegion is always a possibility.
+ // False negatives are better than false positives.
+
+ ReportBadFree(C, ArgVal, ArgExpr->getSourceRange());
+ return NULL;
+ }
+
+ const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(R);
// Various cases could lead to non-symbol values here.
- if (!Sym)
+ // For now, ignore them.
+ if (!SR)
return state;
+ SymbolRef Sym = SR->getSymbol();
+
const RefState *RS = state->get<RegionState>(Sym);
// If the symbol has not been tracked, return. This is possible when free() is
@@ -214,6 +284,135 @@ const GRState *MallocChecker::FreeMemAux(CheckerContext &C, const CallExpr *CE,
return state->set<RegionState>(Sym, RefState::getReleased(CE));
}
+bool MallocChecker::SummarizeValue(llvm::raw_ostream& os, SVal V) {
+ if (nonloc::ConcreteInt *IntVal = dyn_cast<nonloc::ConcreteInt>(&V))
+ os << "an integer (" << IntVal->getValue() << ")";
+ else if (loc::ConcreteInt *ConstAddr = dyn_cast<loc::ConcreteInt>(&V))
+ os << "a constant address (" << ConstAddr->getValue() << ")";
+ else if (loc::GotoLabel *Label = dyn_cast<loc::GotoLabel>(&V))
+ os << "the address of the label '"
+ << Label->getLabel()->getID()->getName()
+ << "'";
+ else
+ return false;
+
+ return true;
+}
+
+bool MallocChecker::SummarizeRegion(llvm::raw_ostream& os,
+ const MemRegion *MR) {
+ switch (MR->getKind()) {
+ case MemRegion::FunctionTextRegionKind: {
+ const FunctionDecl *FD = cast<FunctionTextRegion>(MR)->getDecl();
+ if (FD)
+ os << "the address of the function '" << FD << "'";
+ else
+ os << "the address of a function";
+ return true;
+ }
+ case MemRegion::BlockTextRegionKind:
+ os << "block text";
+ return true;
+ case MemRegion::BlockDataRegionKind:
+ // FIXME: where the block came from?
+ os << "a block";
+ return true;
+ default: {
+ const MemSpaceRegion *MS = MR->getMemorySpace();
+
+ switch (MS->getKind()) {
+ case MemRegion::StackLocalsSpaceRegionKind: {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD)
+ os << "the address of the local variable '" << VD->getName() << "'";
+ else
+ os << "the address of a local stack variable";
+ return true;
+ }
+ case MemRegion::StackArgumentsSpaceRegionKind: {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD)
+ os << "the address of the parameter '" << VD->getName() << "'";
+ else
+ os << "the address of a parameter";
+ return true;
+ }
+ case MemRegion::NonStaticGlobalSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind: {
+ const VarRegion *VR = dyn_cast<VarRegion>(MR);
+ const VarDecl *VD;
+ if (VR)
+ VD = VR->getDecl();
+ else
+ VD = NULL;
+
+ if (VD) {
+ if (VD->isStaticLocal())
+ os << "the address of the static variable '" << VD->getName() << "'";
+ else
+ os << "the address of the global variable '" << VD->getName() << "'";
+ } else
+ os << "the address of a global variable";
+ return true;
+ }
+ default:
+ return false;
+ }
+ }
+ }
+}
+
+void MallocChecker::ReportBadFree(CheckerContext &C, SVal ArgVal,
+ SourceRange range) {
+ ExplodedNode *N = C.GenerateSink();
+ if (N) {
+ if (!BT_BadFree)
+ BT_BadFree = new BuiltinBug("Bad free");
+
+ llvm::SmallString<100> buf;
+ llvm::raw_svector_ostream os(buf);
+
+ const MemRegion *MR = ArgVal.getAsRegion();
+ if (MR) {
+ while (const ElementRegion *ER = dyn_cast<ElementRegion>(MR))
+ MR = ER->getSuperRegion();
+
+ // Special case for alloca()
+ if (isa<AllocaRegion>(MR))
+ os << "Argument to free() was allocated by alloca(), not malloc()";
+ else {
+ os << "Argument to free() is ";
+ if (SummarizeRegion(os, MR))
+ os << ", which is not memory allocated by malloc()";
+ else
+ os << "not memory allocated by malloc()";
+ }
+ } else {
+ os << "Argument to free() is ";
+ if (SummarizeValue(os, ArgVal))
+ os << ", which is not memory allocated by malloc()";
+ else
+ os << "not memory allocated by malloc()";
+ }
+
+ EnhancedBugReport *R = new EnhancedBugReport(*BT_BadFree, os.str(), N);
+ R->addRange(range);
+ C.EmitReport(R);
+ }
+}
+
void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
const GRState *state = C.getState();
const Expr *Arg0 = CE->getArg(0);
@@ -234,7 +433,8 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
if (Sym)
stateEqual = stateEqual->set<RegionState>(Sym, RefState::getReleased(CE));
- const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1), stateEqual);
+ const GRState *stateMalloc = MallocMemAux(C, CE, CE->getArg(1),
+ UndefinedVal(), stateEqual);
C.addTransition(stateMalloc);
}
@@ -256,15 +456,31 @@ void MallocChecker::ReallocMem(CheckerContext &C, const CallExpr *CE) {
if (stateFree) {
// FIXME: We should copy the content of the original buffer.
const GRState *stateRealloc = MallocMemAux(C, CE, CE->getArg(1),
- stateFree);
+ UnknownVal(), stateFree);
C.addTransition(stateRealloc);
}
}
}
}
-void MallocChecker::EvalDeadSymbols(CheckerContext &C, const Stmt *S,
- SymbolReaper &SymReaper) {
+void MallocChecker::CallocMem(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SVator = C.getSValuator();
+
+ SVal Count = state->getSVal(CE->getArg(0));
+ SVal EleSize = state->getSVal(CE->getArg(1));
+ SVal TotalSize = SVator.EvalBinOp(state, BinaryOperator::Mul, Count, EleSize,
+ ValMgr.getContext().getSizeType());
+
+ SVal Zero = ValMgr.makeZeroVal(ValMgr.getContext().CharTy);
+
+ state = MallocMemAux(C, CE, TotalSize, Zero, state);
+ C.addTransition(state);
+}
+
+void MallocChecker::EvalDeadSymbols(CheckerContext &C,SymbolReaper &SymReaper) {
for (SymbolReaper::dead_iterator I = SymReaper.dead_begin(),
E = SymReaper.dead_end(); I != E; ++I) {
SymbolRef Sym = *I;
diff --git a/lib/Checker/MemRegion.cpp b/lib/Checker/MemRegion.cpp
index 575458c9dc79..9cfeb7ae2b5c 100644
--- a/lib/Checker/MemRegion.cpp
+++ b/lib/Checker/MemRegion.cpp
@@ -14,6 +14,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/Checker/PathSensitive/ValueManager.h"
#include "clang/Analysis/AnalysisContext.h"
#include "clang/Analysis/Support/BumpVector.h"
#include "clang/AST/CharUnits.h"
@@ -29,22 +30,22 @@ template<typename RegionTy> struct MemRegionManagerTrait;
template <typename RegionTy, typename A1>
RegionTy* MemRegionManager::getRegion(const A1 a1) {
-
+
const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1);
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
@@ -56,72 +57,72 @@ RegionTy* MemRegionManager::getSubRegion(const A1 a1,
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
template <typename RegionTy, typename A1, typename A2>
RegionTy* MemRegionManager::getRegion(const A1 a1, const A2 a2) {
-
+
const typename MemRegionManagerTrait<RegionTy>::SuperRegionTy *superRegion =
MemRegionManagerTrait<RegionTy>::getSuperRegion(*this, a1, a2);
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, a2, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, a2, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
template <typename RegionTy, typename A1, typename A2>
RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2,
const MemRegion *superRegion) {
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, a2, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, a2, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
template <typename RegionTy, typename A1, typename A2, typename A3>
RegionTy* MemRegionManager::getSubRegion(const A1 a1, const A2 a2, const A3 a3,
const MemRegion *superRegion) {
-
+
llvm::FoldingSetNodeID ID;
RegionTy::ProfileRegion(ID, a1, a2, a3, superRegion);
void* InsertPos;
RegionTy* R = cast_or_null<RegionTy>(Regions.FindNodeOrInsertPos(ID,
InsertPos));
-
+
if (!R) {
R = (RegionTy*) A.Allocate<RegionTy>();
new (R) RegionTy(a1, a2, a3, superRegion);
Regions.InsertNode(R, InsertPos);
}
-
+
return R;
}
@@ -171,6 +172,53 @@ const StackFrameContext *VarRegion::getStackFrame() const {
}
//===----------------------------------------------------------------------===//
+// Region extents.
+//===----------------------------------------------------------------------===//
+
+DefinedOrUnknownSVal DeclRegion::getExtent(ValueManager& ValMgr) const {
+ ASTContext& Ctx = ValMgr.getContext();
+ QualType T = getDesugaredValueType(Ctx);
+
+ if (isa<VariableArrayType>(T))
+ return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+ if (isa<IncompleteArrayType>(T))
+ return UnknownVal();
+
+ CharUnits Size = Ctx.getTypeSizeInChars(T);
+ QualType SizeTy = Ctx.getSizeType();
+ return ValMgr.makeIntVal(Size.getQuantity(), SizeTy);
+}
+
+DefinedOrUnknownSVal FieldRegion::getExtent(ValueManager& ValMgr) const {
+ DefinedOrUnknownSVal Extent = DeclRegion::getExtent(ValMgr);
+
+ // A zero-length array at the end of a struct often stands for dynamically-
+ // allocated extra memory.
+ if (Extent.isZeroConstant()) {
+ ASTContext& Ctx = ValMgr.getContext();
+ QualType T = getDesugaredValueType(Ctx);
+
+ if (isa<ConstantArrayType>(T))
+ return UnknownVal();
+ }
+
+ return Extent;
+}
+
+DefinedOrUnknownSVal AllocaRegion::getExtent(ValueManager& ValMgr) const {
+ return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal SymbolicRegion::getExtent(ValueManager& ValMgr) const {
+ return nonloc::SymbolVal(ValMgr.getSymbolManager().getExtentSymbol(this));
+}
+
+DefinedOrUnknownSVal StringRegion::getExtent(ValueManager& ValMgr) const {
+ QualType SizeTy = ValMgr.getContext().getSizeType();
+ return ValMgr.makeIntVal(getStringLiteral()->getByteLength()+1, SizeTy);
+}
+
+//===----------------------------------------------------------------------===//
// FoldingSet profiling.
//===----------------------------------------------------------------------===//
@@ -183,6 +231,11 @@ void StackSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(getStackFrame());
}
+void StaticGlobalSpaceRegion::Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddInteger((unsigned)getKind());
+ ID.AddPointer(getCodeRegion());
+}
+
void StringRegion::ProfileRegion(llvm::FoldingSetNodeID& ID,
const StringLiteral* Str,
const MemRegion* superRegion) {
@@ -226,7 +279,7 @@ void CXXThisRegion::ProfileRegion(llvm::FoldingSetNodeID &ID,
void CXXThisRegion::Profile(llvm::FoldingSetNodeID &ID) const {
CXXThisRegion::ProfileRegion(ID, ThisPointerTy, superRegion);
}
-
+
void DeclRegion::ProfileRegion(llvm::FoldingSetNodeID& ID, const Decl* D,
const MemRegion* superRegion, Kind k) {
ID.AddInteger((unsigned) k);
@@ -349,7 +402,6 @@ void BlockDataRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "block_data{" << BC << '}';
}
-
void CompoundLiteralRegion::dumpToStream(llvm::raw_ostream& os) const {
// FIXME: More elaborate pretty-printing.
os << "{ " << (void*) CL << " }";
@@ -368,6 +420,10 @@ void FieldRegion::dumpToStream(llvm::raw_ostream& os) const {
os << superRegion << "->" << getDecl();
}
+void NonStaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "NonStaticGlobalSpaceRegion";
+}
+
void ObjCIvarRegion::dumpToStream(llvm::raw_ostream& os) const {
os << "ivar{" << superRegion << ',' << getDecl() << '}';
}
@@ -392,6 +448,10 @@ void RegionRawOffset::dumpToStream(llvm::raw_ostream& os) const {
os << "raw_offset{" << getRegion() << ',' << getByteOffset() << '}';
}
+void StaticGlobalSpaceRegion::dumpToStream(llvm::raw_ostream &os) const {
+ os << "StaticGlobalsMemSpace{" << CR << '}';
+}
+
//===----------------------------------------------------------------------===//
// MemRegionManager methods.
//===----------------------------------------------------------------------===//
@@ -412,7 +472,7 @@ const REG *MemRegionManager::LazyAllocate(REG*& region, ARG a) {
region = (REG*) A.Allocate<REG>();
new (region) REG(this, a);
}
-
+
return region;
}
@@ -442,8 +502,18 @@ MemRegionManager::getStackArgumentsRegion(const StackFrameContext *STC) {
return R;
}
-const GlobalsSpaceRegion *MemRegionManager::getGlobalsRegion() {
- return LazyAllocate(globals);
+const GlobalsSpaceRegion
+*MemRegionManager::getGlobalsRegion(const CodeTextRegion *CR) {
+ if (!CR)
+ return LazyAllocate(globals);
+
+ StaticGlobalSpaceRegion *&R = StaticsGlobalSpaceRegions[CR];
+ if (R)
+ return R;
+
+ R = A.Allocate<StaticGlobalSpaceRegion>();
+ new (R) StaticGlobalSpaceRegion(this, CR);
+ return R;
}
const HeapSpaceRegion *MemRegionManager::getHeapRegion() {
@@ -462,7 +532,7 @@ const MemSpaceRegion *MemRegionManager::getCodeRegion() {
// Constructing regions.
//===----------------------------------------------------------------------===//
-const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str) {
+const StringRegion* MemRegionManager::getStringRegion(const StringLiteral* Str){
return getSubRegion<StringRegion>(Str, getGlobalsRegion());
}
@@ -470,7 +540,9 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
const LocationContext *LC) {
const MemRegion *sReg = 0;
- if (D->hasLocalStorage()) {
+ if (D->hasGlobalStorage() && !D->isStaticLocal())
+ sReg = getGlobalsRegion();
+ else {
// FIXME: Once we implement scope handling, we will need to properly lookup
// 'D' to the proper LocationContext.
const DeclContext *DC = D->getDeclContext();
@@ -479,15 +551,32 @@ const VarRegion* MemRegionManager::getVarRegion(const VarDecl *D,
if (!STC)
sReg = getUnknownRegion();
else {
- sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
- ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
- : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ if (D->hasLocalStorage()) {
+ sReg = isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)
+ ? static_cast<const MemRegion*>(getStackArgumentsRegion(STC))
+ : static_cast<const MemRegion*>(getStackLocalsRegion(STC));
+ }
+ else {
+ assert(D->isStaticLocal());
+ const Decl *D = STC->getDecl();
+ if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D))
+ sReg = getGlobalsRegion(getFunctionTextRegion(FD));
+ else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ const BlockTextRegion *BTR =
+ getBlockTextRegion(BD,
+ C.getCanonicalType(BD->getSignatureAsWritten()->getType()),
+ STC->getAnalysisContext());
+ sReg = getGlobalsRegion(BTR);
+ }
+ else {
+ // FIXME: For ObjC-methods, we need a new CodeTextRegion. For now
+ // just use the main global memspace.
+ sReg = getGlobalsRegion();
+ }
+ }
}
}
- else {
- sReg = getGlobalsRegion();
- }
-
+
return getSubRegion<VarRegion>(D, sReg);
}
@@ -500,10 +589,10 @@ const BlockDataRegion *
MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
const LocationContext *LC) {
const MemRegion *sReg = 0;
-
- if (LC) {
+
+ if (LC) {
// FIXME: Once we implement scope handling, we want the parent region
- // to be the scope.
+ // to be the scope.
const StackFrameContext *STC = LC->getCurrentStackFrame();
assert(STC);
sReg = getStackLocalsRegion(STC);
@@ -520,9 +609,9 @@ MemRegionManager::getBlockDataRegion(const BlockTextRegion *BC,
const CompoundLiteralRegion*
MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL,
const LocationContext *LC) {
-
+
const MemRegion *sReg = 0;
-
+
if (CL->isFileScope())
sReg = getGlobalsRegion();
else {
@@ -530,7 +619,7 @@ MemRegionManager::getCompoundLiteralRegion(const CompoundLiteralExpr* CL,
assert(STC);
sReg = getStackLocalsRegion(STC);
}
-
+
return getSubRegion<CompoundLiteralRegion>(CL, sReg);
}
@@ -749,24 +838,24 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
AnalysisContext *AC = getCodeRegion()->getAnalysisContext();
AnalysisContext::referenced_decls_iterator I, E;
llvm::tie(I, E) = AC->getReferencedBlockVars(BC->getDecl());
-
+
if (I == E) {
ReferencedVars = (void*) 0x1;
return;
}
-
+
MemRegionManager &MemMgr = *getMemRegionManager();
llvm::BumpPtrAllocator &A = MemMgr.getAllocator();
BumpVectorContext BC(A);
-
+
typedef BumpVector<const MemRegion*> VarVec;
VarVec *BV = (VarVec*) A.Allocate<VarVec>();
new (BV) VarVec(BC, E - I);
-
+
for ( ; I != E; ++I) {
const VarDecl *VD = *I;
const VarRegion *VR = 0;
-
+
if (!VD->getAttr<BlocksAttr>() && VD->hasLocalStorage())
VR = MemMgr.getVarRegion(VD, this);
else {
@@ -776,11 +865,11 @@ void BlockDataRegion::LazyInitializeReferencedVars() {
VR = MemMgr.getVarRegion(VD, MemMgr.getUnknownRegion());
}
}
-
+
assert(VR);
BV->push_back(VR, BC);
}
-
+
ReferencedVars = BV;
}
@@ -790,7 +879,7 @@ BlockDataRegion::referenced_vars_begin() const {
BumpVector<const MemRegion*> *Vec =
static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
-
+
return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
NULL : Vec->begin());
}
@@ -801,7 +890,7 @@ BlockDataRegion::referenced_vars_end() const {
BumpVector<const MemRegion*> *Vec =
static_cast<BumpVector<const MemRegion*>*>(ReferencedVars);
-
+
return BlockDataRegion::referenced_vars_iterator(Vec == (void*) 0x1 ?
NULL : Vec->end());
}
diff --git a/lib/Checker/OSAtomicChecker.cpp b/lib/Checker/OSAtomicChecker.cpp
index e743528e2399..1ea1bd98d6dc 100644
--- a/lib/Checker/OSAtomicChecker.cpp
+++ b/lib/Checker/OSAtomicChecker.cpp
@@ -100,7 +100,13 @@ bool OSAtomicChecker::EvalOSAtomicCompareAndSwap(CheckerContext &C,
const GRState *state = C.getState();
ExplodedNodeSet Tmp;
SVal location = state->getSVal(theValueExpr);
- // Here we should use the value type of the region as the load type.
+ // Here we should use the value type of the region as the load type, because
+ // we are simulating the semantics of the function, not the semantics of
+ // passing argument. So the type of theValue expr is not we are loading.
+ // But usually the type of the varregion is not the type we want either,
+ // we still need to do a CastRetrievedVal in store manager. So actually this
+ // LoadTy specifying can be omitted. But we put it here to emphasize the
+ // semantics.
QualType LoadTy;
if (const TypedRegion *TR =
dyn_cast_or_null<TypedRegion>(location.getAsRegion())) {
diff --git a/lib/Checker/PathDiagnostic.cpp b/lib/Checker/PathDiagnostic.cpp
index 963923c9ad10..cf05a7df67f3 100644
--- a/lib/Checker/PathDiagnostic.cpp
+++ b/lib/Checker/PathDiagnostic.cpp
@@ -107,7 +107,7 @@ void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
new PathDiagnosticEventPiece(Info.getLocation(), StrC.str());
for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
- P->addRange(Info.getRange(i));
+ P->addRange(Info.getRange(i).getAsRange());
for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i)
P->addFixItHint(Info.getFixItHint(i));
D->push_front(P);
@@ -181,15 +181,8 @@ PathDiagnosticRange PathDiagnosticLocation::asRange() const {
if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSourceRange();
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
- // FIXME: We would like to always get the function body, even
- // when it needs to be de-serialized, but getting the
- // ASTContext here requires significant changes.
- if (Stmt *Body = FD->getBody()) {
- if (CompoundStmt *CS = dyn_cast<CompoundStmt>(Body))
- return CS->getSourceRange();
- else
- return cast<CXXTryStmt>(Body)->getSourceRange();
- }
+ if (Stmt *Body = FD->getBody())
+ return Body->getSourceRange();
}
else {
SourceLocation L = D->getLocation();
diff --git a/lib/Frontend/PlistDiagnostics.cpp b/lib/Checker/PlistDiagnostics.cpp
index 5706a07e5a0f..13accbbff8c7 100644
--- a/lib/Frontend/PlistDiagnostics.cpp
+++ b/lib/Checker/PlistDiagnostics.cpp
@@ -11,7 +11,7 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Frontend/PathDiagnosticClients.h"
+#include "clang/Checker/PathDiagnosticClients.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/FileManager.h"
diff --git a/lib/Checker/RangeConstraintManager.cpp b/lib/Checker/RangeConstraintManager.cpp
index c904c33e08d2..2a35d326a988 100644
--- a/lib/Checker/RangeConstraintManager.cpp
+++ b/lib/Checker/RangeConstraintManager.cpp
@@ -105,97 +105,69 @@ public:
return ranges.isSingleton() ? ranges.begin()->getConcreteValue() : 0;
}
- /// AddEQ - Create a new RangeSet with the additional constraint that the
- /// value be equal to V.
- RangeSet AddEQ(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- // Search for a range that includes 'V'. If so, return a new RangeSet
- // representing { [V, V] }.
- for (PrimRangeSet::iterator i = begin(), e = end(); i!=e; ++i)
- if (i->Includes(V))
- return RangeSet(F, V, V);
-
- return RangeSet(F);
- }
-
- /// AddNE - Create a new RangeSet with the additional constraint that the
- /// value be not be equal to V.
- RangeSet AddNE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = ranges;
-
- // FIXME: We can perhaps enhance ImmutableSet to do this search for us
- // in log(N) time using the sorted property of the internal AVL tree.
- for (iterator i = begin(), e = end(); i != e; ++i) {
- if (i->Includes(V)) {
- // Remove the old range.
- newRanges = F.Remove(newRanges, *i);
- // Split the old range into possibly one or two ranges.
- if (V != i->From())
- newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
- if (V != i->To())
- newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
- // All of the ranges are non-overlapping, so we can stop.
+private:
+ void IntersectInRange(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper,
+ PrimRangeSet &newRanges,
+ PrimRangeSet::iterator &i,
+ PrimRangeSet::iterator &e) const {
+ // There are six cases for each range R in the set:
+ // 1. R is entirely before the intersection range.
+ // 2. R is entirely after the intersection range.
+ // 3. R contains the entire intersection range.
+ // 4. R starts before the intersection range and ends in the middle.
+ // 5. R starts in the middle of the intersection range and ends after it.
+ // 6. R is entirely contained in the intersection range.
+ // These correspond to each of the conditions below.
+ for (/* i = begin(), e = end() */; i != e; ++i) {
+ if (i->To() < Lower) {
+ continue;
+ }
+ if (i->From() > Upper) {
break;
}
- }
-
- return newRanges;
- }
-
- /// AddNE - Create a new RangeSet with the additional constraint that the
- /// value be less than V.
- RangeSet AddLT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = F.GetEmptySet();
-
- for (iterator i = begin(), e = end() ; i != e ; ++i) {
- if (i->Includes(V) && i->From() < V)
- newRanges = F.Add(newRanges, Range(i->From(), BV.Sub1(V)));
- else if (i->To() < V)
- newRanges = F.Add(newRanges, *i);
- }
-
- return newRanges;
- }
-
- RangeSet AddLE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = F.GetEmptySet();
- for (iterator i = begin(), e = end(); i != e; ++i) {
- // Strictly we should test for includes *V + 1, but no harm is
- // done by this formulation
- if (i->Includes(V))
- newRanges = F.Add(newRanges, Range(i->From(), V));
- else if (i->To() <= V)
- newRanges = F.Add(newRanges, *i);
- }
-
- return newRanges;
- }
-
- RangeSet AddGT(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
- PrimRangeSet newRanges = F.GetEmptySet();
-
- for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
- if (i->Includes(V) && i->To() > V)
- newRanges = F.Add(newRanges, Range(BV.Add1(V), i->To()));
- else if (i->From() > V)
- newRanges = F.Add(newRanges, *i);
+ if (i->Includes(Lower)) {
+ if (i->Includes(Upper)) {
+ newRanges = F.Add(newRanges, Range(BV.getValue(Lower),
+ BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.Add(newRanges, Range(BV.getValue(Lower), i->To()));
+ } else {
+ if (i->Includes(Upper)) {
+ newRanges = F.Add(newRanges, Range(i->From(), BV.getValue(Upper)));
+ break;
+ } else
+ newRanges = F.Add(newRanges, *i);
+ }
}
-
- return newRanges;
}
- RangeSet AddGE(BasicValueFactory &BV, Factory &F, const llvm::APSInt &V) {
+public:
+ // Returns a set containing the values in the receiving set, intersected with
+ // the closed range [Lower, Upper]. Unlike the Range type, this range uses
+ // modular arithmetic, corresponding to the common treatment of C integer
+ // overflow. Thus, if the Lower bound is greater than the Upper bound, the
+ // range is taken to wrap around. This is equivalent to taking the
+ // intersection with the two ranges [Min, Upper] and [Lower, Max],
+ // or, alternatively, /removing/ all integers between Upper and Lower.
+ RangeSet Intersect(BasicValueFactory &BV, Factory &F,
+ const llvm::APSInt &Lower,
+ const llvm::APSInt &Upper) const {
PrimRangeSet newRanges = F.GetEmptySet();
- for (PrimRangeSet::iterator i = begin(), e = end(); i != e; ++i) {
- // Strictly we should test for includes *V - 1, but no harm is
- // done by this formulation
- if (i->Includes(V))
- newRanges = F.Add(newRanges, Range(V, i->To()));
- else if (i->From() >= V)
- newRanges = F.Add(newRanges, *i);
+ PrimRangeSet::iterator i = begin(), e = end();
+ if (Lower <= Upper)
+ IntersectInRange(BV, F, Lower, Upper, newRanges, i, e);
+ else {
+ // The order of the next two statements is important!
+ // IntersectInRange() does not reset the iteration state for i and e.
+ // Therefore, the lower range most be handled first.
+ IntersectInRange(BV, F, BV.getMinValue(Upper), Upper, newRanges, i, e);
+ IntersectInRange(BV, F, Lower, BV.getMaxValue(Lower), newRanges, i, e);
}
-
return newRanges;
}
@@ -237,23 +209,29 @@ public:
RangeConstraintManager(GRSubEngine &subengine)
: SimpleConstraintManager(subengine) {}
- const GRState* AssumeSymNE(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymNE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymEQ(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLT(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymLT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGT(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymGT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymGE(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymGE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
- const GRState* AssumeSymLE(const GRState* St, SymbolRef sym,
- const llvm::APSInt& V);
+ const GRState* AssumeSymLE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment);
const llvm::APSInt* getSymVal(const GRState* St, SymbolRef sym) const;
@@ -303,10 +281,6 @@ RangeConstraintManager::RemoveDeadBindings(const GRState* state,
return state->set<ConstraintRange>(CR);
}
-//===------------------------------------------------------------------------===
-// AssumeSymX methods: public interface for RangeConstraintManager.
-//===------------------------------------------------------------------------===/
-
RangeSet
RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
if (ConstraintRangeTy::data_type* V = state->get<ConstraintRange>(sym))
@@ -323,20 +297,127 @@ RangeConstraintManager::GetRange(const GRState *state, SymbolRef sym) {
// AssumeSymX methods: public interface for RangeConstraintManager.
//===------------------------------------------------------------------------===/
-#define AssumeX(OP)\
-const GRState*\
-RangeConstraintManager::AssumeSym ## OP(const GRState* state, SymbolRef sym,\
- const llvm::APSInt& V){\
- const RangeSet& R = GetRange(state, sym).Add##OP(state->getBasicVals(), F, V);\
- return !R.isEmpty() ? state->set<ConstraintRange>(sym, R) : NULL;\
+// The syntax for ranges below is mathematical, using [x, y] for closed ranges
+// and (x, y) for open ranges. These ranges are modular, corresponding with
+// a common treatment of C integer overflow. This means that these methods
+// do not have to worry about overflow; RangeSet::Intersect can handle such a
+// "wraparound" range.
+// As an example, the range [UINT_MAX-1, 3) contains five values: UINT_MAX-1,
+// UINT_MAX, 0, 1, and 2.
+
+const GRState*
+RangeConstraintManager::AssumeSymNE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Lower;
+ --Lower;
+ ++Upper;
+
+ // [Int-Adjustment+1, Int-Adjustment-1]
+ // Notice that the lower bound is greater than the upper bound.
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Upper, Lower);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
}
-AssumeX(EQ)
-AssumeX(NE)
-AssumeX(LT)
-AssumeX(GT)
-AssumeX(LE)
-AssumeX(GE)
+const GRState*
+RangeConstraintManager::AssumeSymEQ(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ // [Int-Adjustment, Int-Adjustment]
+ BasicValueFactory &BV = state->getBasicVals();
+ llvm::APSInt AdjInt = Int-Adjustment;
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, AdjInt, AdjInt);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymLT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ // Special case for Int == Min. This is always false.
+ if (Int == Min)
+ return NULL;
+
+ llvm::APSInt Lower = Min-Adjustment;
+ llvm::APSInt Upper = Int-Adjustment;
+ --Upper;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymGT(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ // Special case for Int == Max. This is always false.
+ if (Int == Max)
+ return NULL;
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Max-Adjustment;
+ ++Lower;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymGE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ // Special case for Int == Min. This is always feasible.
+ if (Int == Min)
+ return state;
+
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ llvm::APSInt Lower = Int-Adjustment;
+ llvm::APSInt Upper = Max-Adjustment;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
+
+const GRState*
+RangeConstraintManager::AssumeSymLE(const GRState* state, SymbolRef sym,
+ const llvm::APSInt& Int,
+ const llvm::APSInt& Adjustment) {
+ BasicValueFactory &BV = state->getBasicVals();
+
+ QualType T = state->getSymbolManager().getType(sym);
+ const llvm::APSInt &Max = BV.getMaxValue(T);
+
+ // Special case for Int == Max. This is always feasible.
+ if (Int == Max)
+ return state;
+
+ const llvm::APSInt &Min = BV.getMinValue(T);
+
+ llvm::APSInt Lower = Min-Adjustment;
+ llvm::APSInt Upper = Int-Adjustment;
+
+ RangeSet New = GetRange(state, sym).Intersect(BV, F, Lower, Upper);
+ return New.isEmpty() ? NULL : state->set<ConstraintRange>(sym, New);
+}
//===------------------------------------------------------------------------===
// Pretty-printing.
diff --git a/lib/Checker/RegionStore.cpp b/lib/Checker/RegionStore.cpp
index c4072fd80307..74a7fee04889 100644
--- a/lib/Checker/RegionStore.cpp
+++ b/lib/Checker/RegionStore.cpp
@@ -118,22 +118,6 @@ public:
}
//===----------------------------------------------------------------------===//
-// Region "Extents"
-//===----------------------------------------------------------------------===//
-//
-// MemRegions represent chunks of memory with a size (their "extent"). This
-// GDM entry tracks the extents for regions. Extents are in bytes.
-//
-namespace { class RegionExtents {}; }
-static int RegionExtentsIndex = 0;
-namespace clang {
- template<> struct GRStateTrait<RegionExtents>
- : public GRStatePartialTrait<llvm::ImmutableMap<const MemRegion*, SVal> > {
- static void* GDMIndex() { return &RegionExtentsIndex; }
- };
-}
-
-//===----------------------------------------------------------------------===//
// Utility functions.
//===----------------------------------------------------------------------===//
@@ -244,14 +228,16 @@ public:
Store InvalidateRegion(Store store, const MemRegion *R, const Expr *E,
unsigned Count, InvalidatedSymbols *IS) {
- return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS);
+ return RegionStoreManager::InvalidateRegions(store, &R, &R+1, E, Count, IS,
+ false);
}
Store InvalidateRegions(Store store,
const MemRegion * const *Begin,
const MemRegion * const *End,
const Expr *E, unsigned Count,
- InvalidatedSymbols *IS);
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals);
public: // Made public for helper classes.
@@ -280,6 +266,14 @@ public: // Part of public interface to class.
Store Bind(Store store, Loc LV, SVal V);
+ // BindDefault is only used to initialize a region with a default value.
+ Store BindDefault(Store store, const MemRegion *R, SVal V) {
+ RegionBindings B = GetRegionBindings(store);
+ assert(!Lookup(B, R, BindingKey::Default));
+ assert(!Lookup(B, R, BindingKey::Direct));
+ return Add(B, R, BindingKey::Default, V).getRoot();
+ }
+
Store BindCompoundLiteral(Store store, const CompoundLiteralExpr* CL,
const LocationContext *LC, SVal V);
@@ -339,6 +333,12 @@ public: // Part of public interface to class.
SVal RetrieveArray(Store store, const TypedRegion* R);
+ /// Used to lazily generate derived symbols for bindings that are defined
+ /// implicitly by default bindings in a super region.
+ Optional<SVal> RetrieveDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedRegion *R, QualType Ty);
+
/// Get the state and region whose binding this region R corresponds to.
std::pair<Store, const MemRegion*>
GetLazyBinding(RegionBindings B, const MemRegion *R);
@@ -352,7 +352,7 @@ public: // Part of public interface to class.
/// RemoveDeadBindings - Scans the RegionStore of 'state' for dead values.
/// It returns a new Store with these values removed.
- const GRState *RemoveDeadBindings(GRState &state, Stmt* Loc,
+ const GRState *RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots);
@@ -364,18 +364,7 @@ public: // Part of public interface to class.
// Region "extents".
//===------------------------------------------------------------------===//
- const GRState *setExtent(const GRState *state,const MemRegion* R,SVal Extent){
- return state->set<RegionExtents>(R, Extent);
- }
-
- Optional<SVal> getExtent(const GRState *state, const MemRegion *R) {
- const SVal *V = state->get<RegionExtents>(R);
- if (V)
- return *V;
- else
- return Optional<SVal>();
- }
-
+ // FIXME: This method will soon be eliminated; see the note in Store.h.
DefinedOrUnknownSVal getSizeInElements(const GRState *state,
const MemRegion* R, QualType EleTy);
@@ -391,12 +380,17 @@ public: // Part of public interface to class.
const char *sep);
void iterBindings(Store store, BindingsHandler& f) {
- // FIXME: Implement.
- }
-
- // FIXME: Remove.
- BasicValueFactory& getBasicVals() {
- return StateMgr.getBasicVals();
+ RegionBindings B = GetRegionBindings(store);
+ for (RegionBindings::iterator I=B.begin(), E=B.end(); I!=E; ++I) {
+ const BindingKey &K = I.getKey();
+ if (!K.isDirect())
+ continue;
+ if (const SubRegion *R = dyn_cast<SubRegion>(I.getKey().getRegion())) {
+ // FIXME: Possibly incorporate the offset?
+ if (!f.HandleBinding(*this, store, R, I.getData()))
+ return;
+ }
+ }
}
// FIXME: Remove.
@@ -483,12 +477,13 @@ public:
RegionBindings getRegionBindings() const { return B; }
- void AddToCluster(BindingKey K) {
+ RegionCluster &AddToCluster(BindingKey K) {
const MemRegion *R = K.getRegion();
const MemRegion *baseR = R->getBaseRegion();
RegionCluster &C = getCluster(baseR);
C.push_back(K, BVC);
static_cast<DERIVED*>(this)->VisitAddedToCluster(baseR, C);
+ return C;
}
bool isVisited(const MemRegion *R) {
@@ -504,15 +499,20 @@ public:
return *CRef;
}
- void GenerateClusters() {
+ void GenerateClusters(bool includeGlobals = false) {
// Scan the entire set of bindings and make the region clusters.
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- AddToCluster(RI.getKey());
+ RegionCluster &C = AddToCluster(RI.getKey());
if (const MemRegion *R = RI.getData().getAsRegion()) {
// Generate a cluster, but don't add the region to the cluster
// if there aren't any bindings.
getCluster(R->getBaseRegion());
}
+ if (includeGlobals) {
+ const MemRegion *R = RI.getKey().getRegion();
+ if (isa<NonStaticGlobalSpaceRegion>(R->getMemorySpace()))
+ AddToWorkList(R, C);
+ }
}
}
@@ -615,8 +615,8 @@ void InvalidateRegionsWorker::VisitBinding(SVal V) {
RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- const MemRegion *baseR = RI.getKey().getRegion();
- if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR))
+ const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
+ if (baseR && baseR->isSubRegionOf(LazyR))
VisitBinding(RI.getData());
}
@@ -706,13 +706,14 @@ Store RegionStoreManager::InvalidateRegions(Store store,
const MemRegion * const *I,
const MemRegion * const *E,
const Expr *Ex, unsigned Count,
- InvalidatedSymbols *IS) {
+ InvalidatedSymbols *IS,
+ bool invalidateGlobals) {
InvalidateRegionsWorker W(*this, StateMgr,
RegionStoreManager::GetRegionBindings(store),
Ex, Count, IS);
// Scan the bindings and generate the clusters.
- W.GenerateClusters();
+ W.GenerateClusters(invalidateGlobals);
// Add I .. E to the worklist.
for ( ; I != E; ++I)
@@ -721,7 +722,20 @@ Store RegionStoreManager::InvalidateRegions(Store store,
W.RunWorkList();
// Return the new bindings.
- return W.getRegionBindings().getRoot();
+ RegionBindings B = W.getRegionBindings();
+
+ if (invalidateGlobals) {
+ // Bind the non-static globals memory space to a new symbol that we will
+ // use to derive the bindings for all non-static globals.
+ const GlobalsSpaceRegion *GS = MRMgr.getGlobalsRegion();
+ SVal V =
+ ValMgr.getConjuredSymbolVal(/* SymbolTag = */ (void*) GS, Ex,
+ /* symbol type, doesn't matter */ Ctx.IntTy,
+ Count);
+ B = Add(B, BindingKey::Make(GS, BindingKey::Default), V);
+ }
+
+ return B.getRoot();
}
//===----------------------------------------------------------------------===//
@@ -731,82 +745,19 @@ Store RegionStoreManager::InvalidateRegions(Store store,
DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
const MemRegion *R,
QualType EleTy) {
+ SVal Size = cast<SubRegion>(R)->getExtent(ValMgr);
+ SValuator &SVator = ValMgr.getSValuator();
+ const llvm::APSInt *SizeInt = SVator.getKnownValue(state, Size);
+ if (!SizeInt)
+ return UnknownVal();
- switch (R->getKind()) {
- case MemRegion::CXXThisRegionKind:
- assert(0 && "Cannot get size of 'this' region");
- case MemRegion::GenericMemSpaceRegionKind:
- case MemRegion::StackLocalsSpaceRegionKind:
- case MemRegion::StackArgumentsSpaceRegionKind:
- case MemRegion::HeapSpaceRegionKind:
- case MemRegion::GlobalsSpaceRegionKind:
- case MemRegion::UnknownSpaceRegionKind:
- assert(0 && "Cannot index into a MemSpace");
- return UnknownVal();
-
- case MemRegion::FunctionTextRegionKind:
- case MemRegion::BlockTextRegionKind:
- case MemRegion::BlockDataRegionKind:
- // Technically this can happen if people do funny things with casts.
- return UnknownVal();
-
- // Not yet handled.
- case MemRegion::AllocaRegionKind:
- case MemRegion::CompoundLiteralRegionKind:
- case MemRegion::ElementRegionKind:
- case MemRegion::FieldRegionKind:
- case MemRegion::ObjCIvarRegionKind:
- case MemRegion::CXXObjectRegionKind:
- return UnknownVal();
-
- case MemRegion::SymbolicRegionKind: {
- const SVal *Size = state->get<RegionExtents>(R);
- if (!Size)
- return UnknownVal();
- const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(Size);
- if (!CI)
- return UnknownVal();
-
- CharUnits RegionSize =
- CharUnits::fromQuantity(CI->getValue().getSExtValue());
- CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
- assert(RegionSize % EleSize == 0);
-
- return ValMgr.makeIntVal(RegionSize / EleSize, false);
- }
-
- case MemRegion::StringRegionKind: {
- const StringLiteral* Str = cast<StringRegion>(R)->getStringLiteral();
- // We intentionally made the size value signed because it participates in
- // operations with signed indices.
- return ValMgr.makeIntVal(Str->getByteLength()+1, false);
- }
-
- case MemRegion::VarRegionKind: {
- const VarRegion* VR = cast<VarRegion>(R);
- // Get the type of the variable.
- QualType T = VR->getDesugaredValueType(getContext());
-
- // FIXME: Handle variable-length arrays.
- if (isa<VariableArrayType>(T))
- return UnknownVal();
-
- if (const ConstantArrayType* CAT = dyn_cast<ConstantArrayType>(T)) {
- // return the size as signed integer.
- return ValMgr.makeIntVal(CAT->getSize(), false);
- }
-
- // Clients can reinterpret ordinary variables as arrays, possibly of
- // another type. The width is rounded down to ensure that an access is
- // entirely within bounds.
- CharUnits VarSize = getContext().getTypeSizeInChars(T);
- CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
- return ValMgr.makeIntVal(VarSize / EleSize, false);
- }
- }
+ CharUnits RegionSize = CharUnits::fromQuantity(SizeInt->getSExtValue());
+ CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
- assert(0 && "Unreachable");
- return UnknownVal();
+ // If a variable is reinterpreted as a type that doesn't fit into a larger
+ // type evenly, round it down.
+ // This is a signed value, since it's used in arithmetic with signed indices.
+ return ValMgr.makeIntVal(RegionSize / EleSize, false);
}
//===----------------------------------------------------------------------===//
@@ -849,6 +800,19 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
if (!isa<loc::MemRegionVal>(L))
return UnknownVal();
+ // Special case for zero RHS.
+ if (R.isZeroConstant()) {
+ switch (Op) {
+ default:
+ // Handle it normally.
+ break;
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ // FIXME: does this need to be casted to match resultTy?
+ return L;
+ }
+ }
+
const MemRegion* MR = cast<loc::MemRegionVal>(L).getRegion();
const ElementRegion *ER = 0;
@@ -870,8 +834,7 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
}
case MemRegion::AllocaRegionKind: {
const AllocaRegion *AR = cast<AllocaRegion>(MR);
- QualType T = getContext().CharTy; // Create an ElementRegion of bytes.
- QualType EleTy = T->getAs<PointerType>()->getPointeeType();
+ QualType EleTy = getContext().CharTy; // Create an ElementRegion of bytes.
SVal ZeroIdx = ValMgr.makeZeroArrayIndex();
ER = MRMgr.getElementRegion(EleTy, ZeroIdx, AR, getContext());
break;
@@ -907,7 +870,8 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
case MemRegion::StackLocalsSpaceRegionKind:
case MemRegion::StackArgumentsSpaceRegionKind:
case MemRegion::HeapSpaceRegionKind:
- case MemRegion::GlobalsSpaceRegionKind:
+ case MemRegion::NonStaticGlobalSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind:
case MemRegion::UnknownSpaceRegionKind:
assert(0 && "Cannot perform pointer arithmetic on a MemSpace");
return UnknownVal();
@@ -946,7 +910,8 @@ SVal RegionStoreManager::EvalBinOp(BinaryOperator::Opcode Op, Loc L, NonLoc R,
//===----------------------------------------------------------------------===//
Optional<SVal> RegionStoreManager::getDirectBinding(RegionBindings B,
- const MemRegion *R) {
+ const MemRegion *R) {
+
if (const SVal *V = Lookup(B, R, BindingKey::Direct))
return *V;
@@ -1009,8 +974,13 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
const MemRegion *MR = cast<loc::MemRegionVal>(L).getRegion();
- if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR))
+ if (isa<AllocaRegion>(MR) || isa<SymbolicRegion>(MR)) {
+ if (T.isNull()) {
+ const SymbolicRegion *SR = cast<SymbolicRegion>(MR);
+ T = SR->getSymbol()->getType(getContext());
+ }
MR = GetElementZeroRegion(MR, T);
+ }
if (isa<CodeTextRegion>(MR)) {
assert(0 && "Why load from a code text region?");
@@ -1172,27 +1142,33 @@ SVal RegionStoreManager::RetrieveElement(Store store,
}
}
- // Check if the immediate super region has a direct binding.
- if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
- if (SymbolRef parentSym = V->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
-
- if (V->isUnknownOrUndef())
- return *V;
-
- // Handle LazyCompoundVals for the immediate super region. Other cases
- // are handled in 'RetrieveFieldOrElementCommon'.
- if (const nonloc::LazyCompoundVal *LCV =
- dyn_cast<nonloc::LazyCompoundVal>(V)) {
-
- R = MRMgr.getElementRegionWithSuper(R, LCV->getRegion());
- return RetrieveElement(LCV->getStore(), R);
+ // Handle the case where we are indexing into a larger scalar object.
+ // For example, this handles:
+ // int x = ...
+ // char *y = &x;
+ // return *y;
+ // FIXME: This is a hack, and doesn't do anything really intelligent yet.
+ const RegionRawOffset &O = R->getAsRawOffset();
+ if (const TypedRegion *baseR = dyn_cast_or_null<TypedRegion>(O.getRegion())) {
+ QualType baseT = baseR->getValueType(Ctx);
+ if (baseT->isScalarType()) {
+ QualType elemT = R->getElementType();
+ if (elemT->isScalarType()) {
+ if (Ctx.getTypeSizeInChars(baseT) >= Ctx.getTypeSizeInChars(elemT)) {
+ if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
+ if (SymbolRef parentSym = V->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (V->isUnknownOrUndef())
+ return *V;
+ // Other cases: give up. We are indexing into a larger object
+ // that has some value, but we don't know how to handle that yet.
+ return UnknownVal();
+ }
+ }
+ }
}
-
- // Other cases: give up.
- return UnknownVal();
}
-
return RetrieveFieldOrElementCommon(store, R, R->getElementType(), superR);
}
@@ -1208,6 +1184,28 @@ SVal RegionStoreManager::RetrieveField(Store store,
return RetrieveFieldOrElementCommon(store, R, Ty, R->getSuperRegion());
}
+Optional<SVal>
+RegionStoreManager::RetrieveDerivedDefaultValue(RegionBindings B,
+ const MemRegion *superR,
+ const TypedRegion *R,
+ QualType Ty) {
+
+ if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
+ if (SymbolRef parentSym = D->getAsSymbol())
+ return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
+
+ if (D->isZeroConstant())
+ return ValMgr.makeZeroVal(Ty);
+
+ if (D->isUnknownOrUndef())
+ return *D;
+
+ assert(0 && "Unknown default value");
+ }
+
+ return Optional<SVal>();
+}
+
SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
const TypedRegion *R,
QualType Ty,
@@ -1219,18 +1217,8 @@ SVal RegionStoreManager::RetrieveFieldOrElementCommon(Store store,
RegionBindings B = GetRegionBindings(store);
while (superR) {
- if (const Optional<SVal> &D = getDefaultBinding(B, superR)) {
- if (SymbolRef parentSym = D->getAsSymbol())
- return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
-
- if (D->isZeroConstant())
- return ValMgr.makeZeroVal(Ty);
-
- if (D->isUnknown())
- return *D;
-
- assert(0 && "Unknown default value");
- }
+ if (const Optional<SVal> &D = RetrieveDerivedDefaultValue(B, superR, R, Ty))
+ return *D;
// If our super region is a field or element itself, walk up the region
// hierarchy to see if there is a default value installed in an ancestor.
@@ -1311,7 +1299,7 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
return ValMgr.getRegionValueSymbolVal(R);
if (isa<GlobalsSpaceRegion>(MS)) {
- if (VD->isFileVarDecl()) {
+ if (isa<NonStaticGlobalSpaceRegion>(MS)) {
// Is 'VD' declared constant? If so, retrieve the constant value.
QualType CT = Ctx.getCanonicalType(T);
if (CT.isConstQualified()) {
@@ -1326,6 +1314,9 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
}
}
+ if (const Optional<SVal> &V = RetrieveDerivedDefaultValue(B, MS, R, CT))
+ return V.getValue();
+
return ValMgr.getRegionValueSymbolVal(R);
}
@@ -1449,6 +1440,7 @@ Store RegionStoreManager::BindCompoundLiteral(Store store,
V);
}
+
Store RegionStoreManager::setImplicitDefaultValue(Store store,
const MemRegion *R,
QualType T) {
@@ -1691,15 +1683,14 @@ class RemoveDeadBindingsWorker :
public ClusterAnalysis<RemoveDeadBindingsWorker> {
llvm::SmallVector<const SymbolicRegion*, 12> Postponed;
SymbolReaper &SymReaper;
- Stmt *Loc;
const StackFrameContext *CurrentLCtx;
-
+
public:
RemoveDeadBindingsWorker(RegionStoreManager &rm, GRStateManager &stateMgr,
RegionBindings b, SymbolReaper &symReaper,
- Stmt *loc, const StackFrameContext *LCtx)
+ const StackFrameContext *LCtx)
: ClusterAnalysis<RemoveDeadBindingsWorker>(rm, stateMgr, b),
- SymReaper(symReaper), Loc(loc), CurrentLCtx(LCtx) {}
+ SymReaper(symReaper), CurrentLCtx(LCtx) {}
// Called by ClusterAnalysis.
void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C);
@@ -1715,7 +1706,7 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
RegionCluster &C) {
if (const VarRegion *VR = dyn_cast<VarRegion>(baseR)) {
- if (SymReaper.isLive(Loc, VR))
+ if (SymReaper.isLive(VR))
AddToWorkList(baseR, C);
return;
@@ -1730,9 +1721,14 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
return;
}
+ if (isa<NonStaticGlobalSpaceRegion>(baseR)) {
+ AddToWorkList(baseR, C);
+ return;
+ }
+
// CXXThisRegion in the current or parent location context is live.
if (const CXXThisRegion *TR = dyn_cast<CXXThisRegion>(baseR)) {
- const StackArgumentsSpaceRegion *StackReg =
+ const StackArgumentsSpaceRegion *StackReg =
cast<StackArgumentsSpaceRegion>(TR->getSuperRegion());
const StackFrameContext *RegCtx = StackReg->getStackFrame();
if (RegCtx == CurrentLCtx || RegCtx->isParentOf(CurrentLCtx))
@@ -1754,8 +1750,8 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
const MemRegion *LazyR = LCS->getRegion();
RegionBindings B = RegionStoreManager::GetRegionBindings(LCS->getStore());
for (RegionBindings::iterator RI = B.begin(), RE = B.end(); RI != RE; ++RI){
- const MemRegion *baseR = RI.getKey().getRegion();
- if (cast<SubRegion>(baseR)->isSubRegionOf(LazyR))
+ const SubRegion *baseR = dyn_cast<SubRegion>(RI.getKey().getRegion());
+ if (baseR && baseR->isSubRegionOf(LazyR))
VisitBinding(RI.getData());
}
return;
@@ -1822,13 +1818,13 @@ bool RemoveDeadBindingsWorker::UpdatePostponed() {
return changed;
}
-const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
+const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state,
const StackFrameContext *LCtx,
SymbolReaper& SymReaper,
llvm::SmallVectorImpl<const MemRegion*>& RegionRoots)
{
RegionBindings B = GetRegionBindings(state.getStore());
- RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, Loc, LCtx);
+ RemoveDeadBindingsWorker W(*this, StateMgr, B, SymReaper, LCtx);
W.GenerateClusters();
// Enqueue the region roots onto the worklist.
@@ -1862,13 +1858,6 @@ const GRState *RegionStoreManager::RemoveDeadBindings(GRState &state, Stmt* Loc,
}
state.setStore(B.getRoot());
const GRState *s = StateMgr.getPersistentState(state);
- // Remove the extents of dead symbolic regions.
- llvm::ImmutableMap<const MemRegion*,SVal> Extents = s->get<RegionExtents>();
- for (llvm::ImmutableMap<const MemRegion *, SVal>::iterator I=Extents.begin(),
- E = Extents.end(); I != E; ++I) {
- if (!W.isVisited(I->first))
- s = s->remove<RegionExtents>(I->first);
- }
return s;
}
@@ -1887,9 +1876,9 @@ GRState const *RegionStoreManager::EnterStackFrame(GRState const *state,
SVal ArgVal = state->getSVal(*AI);
store = Bind(store, ValMgr.makeLoc(MRMgr.getVarRegion(*PI,frame)),ArgVal);
}
- } else if (const CXXConstructExpr *CE =
+ } else if (const CXXConstructExpr *CE =
dyn_cast<CXXConstructExpr>(frame->getCallSite())) {
- CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
+ CXXConstructExpr::const_arg_iterator AI = CE->arg_begin(),
AE = CE->arg_end();
// Copy the arg expression value to the arg variables.
diff --git a/lib/Checker/ReturnStackAddressChecker.cpp b/lib/Checker/ReturnStackAddressChecker.cpp
deleted file mode 100644
index 35b1cdebf620..000000000000
--- a/lib/Checker/ReturnStackAddressChecker.cpp
+++ /dev/null
@@ -1,125 +0,0 @@
-//== ReturnStackAddressChecker.cpp ------------------------------*- C++ -*--==//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file defines ReturnStackAddressChecker, which is a path-sensitive
-// check which looks for the addresses of stack variables being returned to
-// callers.
-//
-//===----------------------------------------------------------------------===//
-
-#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/BugReporter/BugType.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/SmallString.h"
-
-using namespace clang;
-
-namespace {
-class ReturnStackAddressChecker :
- public CheckerVisitor<ReturnStackAddressChecker> {
- BuiltinBug *BT;
-public:
- ReturnStackAddressChecker() : BT(0) {}
- static void *getTag();
- void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
-private:
- void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE);
-};
-}
-
-void clang::RegisterReturnStackAddressChecker(GRExprEngine &Eng) {
- Eng.registerCheck(new ReturnStackAddressChecker());
-}
-
-void *ReturnStackAddressChecker::getTag() {
- static int x = 0; return &x;
-}
-
-void ReturnStackAddressChecker::EmitStackError(CheckerContext &C,
- const MemRegion *R,
- const Expr *RetE) {
- ExplodedNode *N = C.GenerateSink();
-
- if (!N)
- return;
-
- if (!BT)
- BT = new BuiltinBug("Return of address to stack-allocated memory");
-
- // Generate a report for this bug.
- llvm::SmallString<512> buf;
- llvm::raw_svector_ostream os(buf);
- SourceRange range;
-
- // Get the base region, stripping away fields and elements.
- R = R->getBaseRegion();
-
- // Check if the region is a compound literal.
- if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
- const CompoundLiteralExpr* CL = CR->getLiteralExpr();
- os << "Address of stack memory associated with a compound literal "
- "declared on line "
- << C.getSourceManager().getInstantiationLineNumber(CL->getLocStart())
- << " returned to caller";
- range = CL->getSourceRange();
- }
- else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
- const Expr* ARE = AR->getExpr();
- SourceLocation L = ARE->getLocStart();
- range = ARE->getSourceRange();
- os << "Address of stack memory allocated by call to alloca() on line "
- << C.getSourceManager().getInstantiationLineNumber(L)
- << " returned to caller";
- }
- else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
- const BlockDecl *BD = BR->getCodeRegion()->getDecl();
- SourceLocation L = BD->getLocStart();
- range = BD->getSourceRange();
- os << "Address of stack-allocated block declared on line "
- << C.getSourceManager().getInstantiationLineNumber(L)
- << " returned to caller";
- }
- else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
- os << "Address of stack memory associated with local variable '"
- << VR->getString() << "' returned";
- range = VR->getDecl()->getSourceRange();
- }
- else {
- assert(false && "Invalid region in ReturnStackAddressChecker.");
- return;
- }
-
- RangedBugReport *report = new RangedBugReport(*BT, os.str(), N);
- report->addRange(RetE->getSourceRange());
- if (range.isValid())
- report->addRange(range);
-
- C.EmitReport(report);
-}
-
-void ReturnStackAddressChecker::PreVisitReturnStmt(CheckerContext &C,
- const ReturnStmt *RS) {
-
- const Expr *RetE = RS->getRetValue();
- if (!RetE)
- return;
-
- SVal V = C.getState()->getSVal(RetE);
- const MemRegion *R = V.getAsRegion();
-
- if (!R || !R->hasStackStorage())
- return;
-
- if (R->hasStackStorage()) {
- EmitStackError(C, R, RetE);
- return;
- }
-}
diff --git a/lib/Checker/SVals.cpp b/lib/Checker/SVals.cpp
index d756be70dc25..7a99e8681df9 100644
--- a/lib/Checker/SVals.cpp
+++ b/lib/Checker/SVals.cpp
@@ -200,15 +200,19 @@ bool SVal::isConstant() const {
return isa<nonloc::ConcreteInt>(this) || isa<loc::ConcreteInt>(this);
}
-bool SVal::isZeroConstant() const {
+bool SVal::isConstant(int I) const {
if (isa<loc::ConcreteInt>(*this))
- return cast<loc::ConcreteInt>(*this).getValue() == 0;
+ return cast<loc::ConcreteInt>(*this).getValue() == I;
else if (isa<nonloc::ConcreteInt>(*this))
- return cast<nonloc::ConcreteInt>(*this).getValue() == 0;
+ return cast<nonloc::ConcreteInt>(*this).getValue() == I;
else
return false;
}
+bool SVal::isZeroConstant() const {
+ return isConstant(0);
+}
+
//===----------------------------------------------------------------------===//
// Transfer function dispatch for Non-Locs.
diff --git a/lib/Checker/SValuator.cpp b/lib/Checker/SValuator.cpp
index 542fc1b1078d..a7e15fc3fd73 100644
--- a/lib/Checker/SValuator.cpp
+++ b/lib/Checker/SValuator.cpp
@@ -29,15 +29,15 @@ SVal SValuator::EvalBinOp(const GRState *ST, BinaryOperator::Opcode Op,
if (isa<Loc>(L)) {
if (isa<Loc>(R))
- return EvalBinOpLL(Op, cast<Loc>(L), cast<Loc>(R), T);
+ return EvalBinOpLL(ST, Op, cast<Loc>(L), cast<Loc>(R), T);
return EvalBinOpLN(ST, Op, cast<Loc>(L), cast<NonLoc>(R), T);
}
if (isa<Loc>(R)) {
- // Support pointer arithmetic where the increment/decrement operand
- // is on the left and the pointer on the right.
- assert(Op == BinaryOperator::Add || Op == BinaryOperator::Sub);
+ // Support pointer arithmetic where the addend is on the left
+ // and the pointer on the right.
+ assert(Op == BinaryOperator::Add);
// Commute the operands.
return EvalBinOpLN(ST, Op, cast<Loc>(R), cast<NonLoc>(L), T);
diff --git a/lib/Checker/SimpleConstraintManager.cpp b/lib/Checker/SimpleConstraintManager.cpp
index 8c423a99777d..321381b045ad 100644
--- a/lib/Checker/SimpleConstraintManager.cpp
+++ b/lib/Checker/SimpleConstraintManager.cpp
@@ -35,12 +35,11 @@ bool SimpleConstraintManager::canReasonAbout(SVal X) const {
case BinaryOperator::Or:
case BinaryOperator::Xor:
return false;
- // We don't reason yet about arithmetic constraints on symbolic values.
+ // We don't reason yet about these arithmetic constraints on
+ // symbolic values.
case BinaryOperator::Mul:
case BinaryOperator::Div:
case BinaryOperator::Rem:
- case BinaryOperator::Add:
- case BinaryOperator::Sub:
case BinaryOperator::Shl:
case BinaryOperator::Shr:
return false;
@@ -90,12 +89,11 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
while (SubR) {
// FIXME: now we only find the first symbolic region.
if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(SubR)) {
+ const llvm::APSInt &zero = BasicVals.getZeroWithPtrWidth();
if (Assumption)
- return AssumeSymNE(state, SymR->getSymbol(),
- BasicVals.getZeroWithPtrWidth());
+ return AssumeSymNE(state, SymR->getSymbol(), zero, zero);
else
- return AssumeSymEQ(state, SymR->getSymbol(),
- BasicVals.getZeroWithPtrWidth());
+ return AssumeSymEQ(state, SymR->getSymbol(), zero, zero);
}
SubR = dyn_cast<SubRegion>(SubR->getSuperRegion());
}
@@ -121,11 +119,27 @@ const GRState *SimpleConstraintManager::Assume(const GRState *state,
return SU.ProcessAssume(state, cond, assumption);
}
+static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
+ // FIXME: This should probably be part of BinaryOperator, since this isn't
+ // the only place it's used. (This code was copied from SimpleSValuator.cpp.)
+ switch (op) {
+ default:
+ assert(false && "Invalid opcode.");
+ case BinaryOperator::LT: return BinaryOperator::GE;
+ case BinaryOperator::GT: return BinaryOperator::LE;
+ case BinaryOperator::LE: return BinaryOperator::GT;
+ case BinaryOperator::GE: return BinaryOperator::LT;
+ case BinaryOperator::EQ: return BinaryOperator::NE;
+ case BinaryOperator::NE: return BinaryOperator::EQ;
+ }
+}
+
const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
NonLoc Cond,
bool Assumption) {
- // We cannot reason about SymIntExpr and SymSymExpr.
+ // We cannot reason about SymSymExprs,
+ // and can only reason about some SymIntExprs.
if (!canReasonAbout(Cond)) {
// Just return the current state indicating that the path is feasible.
// This may be an over-approximation of what is possible.
@@ -144,30 +158,35 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
SymbolRef sym = SV.getSymbol();
QualType T = SymMgr.getType(sym);
const llvm::APSInt &zero = BasicVals.getValue(0, T);
-
- return Assumption ? AssumeSymNE(state, sym, zero)
- : AssumeSymEQ(state, sym, zero);
+ if (Assumption)
+ return AssumeSymNE(state, sym, zero, zero);
+ else
+ return AssumeSymEQ(state, sym, zero, zero);
}
case nonloc::SymExprValKind: {
nonloc::SymExprVal V = cast<nonloc::SymExprVal>(Cond);
- if (const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression())){
- // FIXME: This is a hack. It silently converts the RHS integer to be
- // of the same type as on the left side. This should be removed once
- // we support truncation/extension of symbolic values.
- GRStateManager &StateMgr = state->getStateManager();
- ASTContext &Ctx = StateMgr.getContext();
- QualType LHSType = SE->getLHS()->getType(Ctx);
- BasicValueFactory &BasicVals = StateMgr.getBasicVals();
- const llvm::APSInt &RHS = BasicVals.Convert(LHSType, SE->getRHS());
- SymIntExpr SENew(SE->getLHS(), SE->getOpcode(), RHS, SE->getType(Ctx));
-
- return AssumeSymInt(state, Assumption, &SENew);
+
+ // For now, we only handle expressions whose RHS is an integer.
+ // All other expressions are assumed to be feasible.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(V.getSymbolicExpression());
+ if (!SE)
+ return state;
+
+ BinaryOperator::Opcode op = SE->getOpcode();
+ // Implicitly compare non-comparison expressions to 0.
+ if (!BinaryOperator::isComparisonOp(op)) {
+ QualType T = SymMgr.getType(SE);
+ const llvm::APSInt &zero = BasicVals.getValue(0, T);
+ op = (Assumption ? BinaryOperator::NE : BinaryOperator::EQ);
+ return AssumeSymRel(state, SE, op, zero);
}
- // For all other symbolic expressions, over-approximate and consider
- // the constraint feasible.
- return state;
+ // From here on out, op is the real comparison we'll be testing.
+ if (!Assumption)
+ op = NegateComparison(op);
+
+ return AssumeSymRel(state, SE->getLHS(), op, SE->getRHS());
}
case nonloc::ConcreteIntKind: {
@@ -182,43 +201,98 @@ const GRState *SimpleConstraintManager::AssumeAux(const GRState *state,
} // end switch
}
-const GRState *SimpleConstraintManager::AssumeSymInt(const GRState *state,
- bool Assumption,
- const SymIntExpr *SE) {
+const GRState *SimpleConstraintManager::AssumeSymRel(const GRState *state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int) {
+ assert(BinaryOperator::isComparisonOp(op) &&
+ "Non-comparison ops should be rewritten as comparisons to zero.");
+
+ // We only handle simple comparisons of the form "$sym == constant"
+ // or "($sym+constant1) == constant2".
+ // The adjustment is "constant1" in the above expression. It's used to
+ // "slide" the solution range around for modular arithmetic. For example,
+ // x < 4 has the solution [0, 3]. x+2 < 4 has the solution [0-2, 3-2], which
+ // in modular arithmetic is [0, 1] U [UINT_MAX-1, UINT_MAX]. It's up to
+ // the subclasses of SimpleConstraintManager to handle the adjustment.
+ llvm::APSInt Adjustment;
+
+ // First check if the LHS is a simple symbol reference.
+ SymbolRef Sym = dyn_cast<SymbolData>(LHS);
+ if (Sym) {
+ Adjustment = 0;
+ } else {
+ // Next, see if it's a "($sym+constant1)" expression.
+ const SymIntExpr *SE = dyn_cast<SymIntExpr>(LHS);
+
+ // We don't handle "($sym1+$sym2)".
+ // Give up and assume the constraint is feasible.
+ if (!SE)
+ return state;
+
+ // We don't handle "(<expr>+constant1)".
+ // Give up and assume the constraint is feasible.
+ Sym = dyn_cast<SymbolData>(SE->getLHS());
+ if (!Sym)
+ return state;
+
+ // Get the constant out of the expression "($sym+constant1)".
+ switch (SE->getOpcode()) {
+ case BinaryOperator::Add:
+ Adjustment = SE->getRHS();
+ break;
+ case BinaryOperator::Sub:
+ Adjustment = -SE->getRHS();
+ break;
+ default:
+ // We don't handle non-additive operators.
+ // Give up and assume the constraint is feasible.
+ return state;
+ }
+ }
+
+ // FIXME: This next section is a hack. It silently converts the integers to
+ // be of the same type as the symbol, which is not always correct. Really the
+ // comparisons should be performed using the Int's type, then mapped back to
+ // the symbol's range of values.
+ GRStateManager &StateMgr = state->getStateManager();
+ ASTContext &Ctx = StateMgr.getContext();
+
+ QualType T = Sym->getType(Ctx);
+ assert(T->isIntegerType() || Loc::IsLocType(T));
+ unsigned bitwidth = Ctx.getTypeSize(T);
+ bool isSymUnsigned = T->isUnsignedIntegerType() || Loc::IsLocType(T);
+ // Convert the adjustment.
+ Adjustment.setIsUnsigned(isSymUnsigned);
+ Adjustment.extOrTrunc(bitwidth);
- // Here we assume that LHS is a symbol. This is consistent with the
- // rest of the constraint manager logic.
- SymbolRef Sym = cast<SymbolData>(SE->getLHS());
- const llvm::APSInt &Int = SE->getRHS();
+ // Convert the right-hand side integer.
+ llvm::APSInt ConvertedInt(Int, isSymUnsigned);
+ ConvertedInt.extOrTrunc(bitwidth);
- switch (SE->getOpcode()) {
+ switch (op) {
default:
// No logic yet for other operators. Assume the constraint is feasible.
return state;
case BinaryOperator::EQ:
- return Assumption ? AssumeSymEQ(state, Sym, Int)
- : AssumeSymNE(state, Sym, Int);
+ return AssumeSymEQ(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::NE:
- return Assumption ? AssumeSymNE(state, Sym, Int)
- : AssumeSymEQ(state, Sym, Int);
+ return AssumeSymNE(state, Sym, ConvertedInt, Adjustment);
+
case BinaryOperator::GT:
- return Assumption ? AssumeSymGT(state, Sym, Int)
- : AssumeSymLE(state, Sym, Int);
+ return AssumeSymGT(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::GE:
- return Assumption ? AssumeSymGE(state, Sym, Int)
- : AssumeSymLT(state, Sym, Int);
+ return AssumeSymGE(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::LT:
- return Assumption ? AssumeSymLT(state, Sym, Int)
- : AssumeSymGE(state, Sym, Int);
+ return AssumeSymLT(state, Sym, ConvertedInt, Adjustment);
case BinaryOperator::LE:
- return Assumption ? AssumeSymLE(state, Sym, Int)
- : AssumeSymGT(state, Sym, Int);
+ return AssumeSymLE(state, Sym, ConvertedInt, Adjustment);
} // end switch
}
diff --git a/lib/Checker/SimpleConstraintManager.h b/lib/Checker/SimpleConstraintManager.h
index 5f20e0072b20..45057e64f31f 100644
--- a/lib/Checker/SimpleConstraintManager.h
+++ b/lib/Checker/SimpleConstraintManager.h
@@ -38,8 +38,10 @@ public:
const GRState *Assume(const GRState *state, NonLoc Cond, bool Assumption);
- const GRState *AssumeSymInt(const GRState *state, bool Assumption,
- const SymIntExpr *SE);
+ const GRState *AssumeSymRel(const GRState *state,
+ const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt& Int);
const GRState *AssumeInBound(const GRState *state, DefinedSVal Idx,
DefinedSVal UpperBound,
@@ -51,23 +53,31 @@ protected:
// Interface that subclasses must implement.
//===------------------------------------------------------------------===//
+ // Each of these is of the form "$sym+Adj <> V", where "<>" is the comparison
+ // operation for the method being invoked.
virtual const GRState *AssumeSymNE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymEQ(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymLT(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymGT(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymLE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
virtual const GRState *AssumeSymGE(const GRState *state, SymbolRef sym,
- const llvm::APSInt& V) = 0;
+ const llvm::APSInt& V,
+ const llvm::APSInt& Adjustment) = 0;
//===------------------------------------------------------------------===//
// Internal implementation.
diff --git a/lib/Checker/SimpleSValuator.cpp b/lib/Checker/SimpleSValuator.cpp
index dd38a435a1d7..3bc4ee7d0613 100644
--- a/lib/Checker/SimpleSValuator.cpp
+++ b/lib/Checker/SimpleSValuator.cpp
@@ -30,10 +30,17 @@ public:
virtual SVal EvalComplement(NonLoc val);
virtual SVal EvalBinOpNN(const GRState *state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy);
- virtual SVal EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
- QualType resultTy);
+ virtual SVal EvalBinOpLL(const GRState *state, BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs, QualType resultTy);
virtual SVal EvalBinOpLN(const GRState *state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy);
+
+ /// getKnownValue - Evaluates a given SVal. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
+ virtual const llvm::APSInt *getKnownValue(const GRState *state, SVal V);
+
+ SVal MakeSymIntVal(const SymExpr *LHS, BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS, QualType resultTy);
};
} // end anonymous namespace
@@ -170,45 +177,93 @@ static BinaryOperator::Opcode NegateComparison(BinaryOperator::Opcode op) {
}
}
-// Equality operators for Locs.
-// FIXME: All this logic will be revamped when we have MemRegion::getLocation()
-// implemented.
-
-static SVal EvalEquality(ValueManager &ValMgr, Loc lhs, Loc rhs, bool isEqual,
- QualType resultTy) {
+static BinaryOperator::Opcode ReverseComparison(BinaryOperator::Opcode op) {
+ switch (op) {
+ default:
+ assert(false && "Invalid opcode.");
+ case BinaryOperator::LT: return BinaryOperator::GT;
+ case BinaryOperator::GT: return BinaryOperator::LT;
+ case BinaryOperator::LE: return BinaryOperator::GE;
+ case BinaryOperator::GE: return BinaryOperator::LE;
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ return op;
+ }
+}
- switch (lhs.getSubKind()) {
- default:
- assert(false && "EQ/NE not implemented for this Loc.");
- return UnknownVal();
+SVal SimpleSValuator::MakeSymIntVal(const SymExpr *LHS,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt &RHS,
+ QualType resultTy) {
+ bool isIdempotent = false;
- case loc::ConcreteIntKind: {
- if (SymbolRef rSym = rhs.getAsSymbol())
- return ValMgr.makeNonLoc(rSym,
- isEqual ? BinaryOperator::EQ
- : BinaryOperator::NE,
- cast<loc::ConcreteInt>(lhs).getValue(),
- resultTy);
- break;
- }
- case loc::MemRegionKind: {
- if (SymbolRef lSym = lhs.getAsLocSymbol()) {
- if (isa<loc::ConcreteInt>(rhs)) {
- return ValMgr.makeNonLoc(lSym,
- isEqual ? BinaryOperator::EQ
- : BinaryOperator::NE,
- cast<loc::ConcreteInt>(rhs).getValue(),
- resultTy);
- }
- }
- break;
+ // Check for a few special cases with known reductions first.
+ switch (op) {
+ default:
+ // We can't reduce this case; just treat it normally.
+ break;
+ case BinaryOperator::Mul:
+ // a*0 and a*1
+ if (RHS == 0)
+ return ValMgr.makeIntVal(0, resultTy);
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BinaryOperator::Div:
+ // a/0 and a/1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ isIdempotent = true;
+ break;
+ case BinaryOperator::Rem:
+ // a%0 and a%1
+ if (RHS == 0)
+ // This is also handled elsewhere.
+ return UndefinedVal();
+ else if (RHS == 1)
+ return ValMgr.makeIntVal(0, resultTy);
+ break;
+ case BinaryOperator::Add:
+ case BinaryOperator::Sub:
+ case BinaryOperator::Shl:
+ case BinaryOperator::Shr:
+ case BinaryOperator::Xor:
+ // a+0, a-0, a<<0, a>>0, a^0
+ if (RHS == 0)
+ isIdempotent = true;
+ break;
+ case BinaryOperator::And:
+ // a&0 and a&(~0)
+ if (RHS == 0)
+ return ValMgr.makeIntVal(0, resultTy);
+ else if (RHS.isAllOnesValue())
+ isIdempotent = true;
+ break;
+ case BinaryOperator::Or:
+ // a|0 and a|(~0)
+ if (RHS == 0)
+ isIdempotent = true;
+ else if (RHS.isAllOnesValue()) {
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+ const llvm::APSInt &Result = BVF.Convert(resultTy, RHS);
+ return nonloc::ConcreteInt(Result);
}
+ break;
+ }
- case loc::GotoLabelKind:
- break;
+ // Idempotent ops (like a*1) can still change the type of an expression.
+ // Wrap the LHS up in a NonLoc again and let EvalCastNL do the dirty work.
+ if (isIdempotent) {
+ if (SymbolRef LHSSym = dyn_cast<SymbolData>(LHS))
+ return EvalCastNL(nonloc::SymbolVal(LHSSym), resultTy);
+ return EvalCastNL(nonloc::SymExprVal(LHS), resultTy);
}
- return ValMgr.makeTruthVal(isEqual ? lhs == rhs : lhs != rhs, resultTy);
+ // If we reach this point, the expression cannot be simplified.
+ // Make a SymExprVal for the entire thing.
+ return ValMgr.makeNonLoc(LHS, op, RHS, resultTy);
}
SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
@@ -228,6 +283,12 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
case BinaryOperator::GT:
case BinaryOperator::NE:
return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::Xor:
+ case BinaryOperator::Sub:
+ return ValMgr.makeIntVal(0, resultTy);
+ case BinaryOperator::Or:
+ case BinaryOperator::And:
+ return EvalCastNL(lhs, resultTy);
}
while (1) {
@@ -238,7 +299,8 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
Loc lhsL = cast<nonloc::LocAsInteger>(lhs).getLoc();
switch (rhs.getSubKind()) {
case nonloc::LocAsIntegerKind:
- return EvalBinOpLL(op, lhsL, cast<nonloc::LocAsInteger>(rhs).getLoc(),
+ return EvalBinOpLL(state, op, lhsL,
+ cast<nonloc::LocAsInteger>(rhs).getLoc(),
resultTy);
case nonloc::ConcreteIntKind: {
// Transform the integer into a location and compare.
@@ -246,7 +308,7 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
llvm::APSInt i = cast<nonloc::ConcreteInt>(rhs).getValue();
i.setIsUnsigned(true);
i.extOrTrunc(Ctx.getTypeSize(Ctx.VoidPtrTy));
- return EvalBinOpLL(op, lhsL, ValMgr.makeLoc(i), resultTy);
+ return EvalBinOpLL(state, op, lhsL, ValMgr.makeLoc(i), resultTy);
}
default:
switch (op) {
@@ -261,87 +323,136 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
}
case nonloc::SymExprValKind: {
- // Logical not?
- if (!(op == BinaryOperator::EQ && rhs.isZeroConstant()))
+ nonloc::SymExprVal *selhs = cast<nonloc::SymExprVal>(&lhs);
+
+ // Only handle LHS of the form "$sym op constant", at least for now.
+ const SymIntExpr *symIntExpr =
+ dyn_cast<SymIntExpr>(selhs->getSymbolicExpression());
+
+ if (!symIntExpr)
return UnknownVal();
- const SymExpr *symExpr =
- cast<nonloc::SymExprVal>(lhs).getSymbolicExpression();
+ // Is this a logical not? (!x is represented as x == 0.)
+ if (op == BinaryOperator::EQ && rhs.isZeroConstant()) {
+ // We know how to negate certain expressions. Simplify them here.
- // Only handle ($sym op constant) for now.
- if (const SymIntExpr *symIntExpr = dyn_cast<SymIntExpr>(symExpr)) {
BinaryOperator::Opcode opc = symIntExpr->getOpcode();
switch (opc) {
- case BinaryOperator::LAnd:
- case BinaryOperator::LOr:
- assert(false && "Logical operators handled by branching logic.");
- return UnknownVal();
- case BinaryOperator::Assign:
- case BinaryOperator::MulAssign:
- case BinaryOperator::DivAssign:
- case BinaryOperator::RemAssign:
- case BinaryOperator::AddAssign:
- case BinaryOperator::SubAssign:
- case BinaryOperator::ShlAssign:
- case BinaryOperator::ShrAssign:
- case BinaryOperator::AndAssign:
- case BinaryOperator::XorAssign:
- case BinaryOperator::OrAssign:
- case BinaryOperator::Comma:
- assert(false && "'=' and ',' operators handled by GRExprEngine.");
- return UnknownVal();
- case BinaryOperator::PtrMemD:
- case BinaryOperator::PtrMemI:
- assert(false && "Pointer arithmetic not handled here.");
- return UnknownVal();
- case BinaryOperator::Mul:
- case BinaryOperator::Div:
- case BinaryOperator::Rem:
- case BinaryOperator::Add:
- case BinaryOperator::Sub:
- case BinaryOperator::Shl:
- case BinaryOperator::Shr:
- case BinaryOperator::And:
- case BinaryOperator::Xor:
- case BinaryOperator::Or:
- // Not handled yet.
- return UnknownVal();
- case BinaryOperator::LT:
- case BinaryOperator::GT:
- case BinaryOperator::LE:
- case BinaryOperator::GE:
- case BinaryOperator::EQ:
- case BinaryOperator::NE:
- opc = NegateComparison(opc);
- assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
- return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc,
- symIntExpr->getRHS(), resultTy);
+ default:
+ // We don't know how to negate this operation.
+ // Just handle it as if it were a normal comparison to 0.
+ break;
+ case BinaryOperator::LAnd:
+ case BinaryOperator::LOr:
+ assert(false && "Logical operators handled by branching logic.");
+ return UnknownVal();
+ case BinaryOperator::Assign:
+ case BinaryOperator::MulAssign:
+ case BinaryOperator::DivAssign:
+ case BinaryOperator::RemAssign:
+ case BinaryOperator::AddAssign:
+ case BinaryOperator::SubAssign:
+ case BinaryOperator::ShlAssign:
+ case BinaryOperator::ShrAssign:
+ case BinaryOperator::AndAssign:
+ case BinaryOperator::XorAssign:
+ case BinaryOperator::OrAssign:
+ case BinaryOperator::Comma:
+ assert(false && "'=' and ',' operators handled by GRExprEngine.");
+ return UnknownVal();
+ case BinaryOperator::PtrMemD:
+ case BinaryOperator::PtrMemI:
+ assert(false && "Pointer arithmetic not handled here.");
+ return UnknownVal();
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ case BinaryOperator::EQ:
+ case BinaryOperator::NE:
+ // Negate the comparison and make a value.
+ opc = NegateComparison(opc);
+ assert(symIntExpr->getType(ValMgr.getContext()) == resultTy);
+ return ValMgr.makeNonLoc(symIntExpr->getLHS(), opc,
+ symIntExpr->getRHS(), resultTy);
}
}
+
+ // For now, only handle expressions whose RHS is a constant.
+ const nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs);
+ if (!rhsInt)
+ return UnknownVal();
+
+ // If both the LHS and the current expression are additive,
+ // fold their constants.
+ if (BinaryOperator::isAdditiveOp(op)) {
+ BinaryOperator::Opcode lop = symIntExpr->getOpcode();
+ if (BinaryOperator::isAdditiveOp(lop)) {
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+
+ // resultTy may not be the best type to convert to, but it's
+ // probably the best choice in expressions with mixed type
+ // (such as x+1U+2LL). The rules for implicit conversions should
+ // choose a reasonable type to preserve the expression, and will
+ // at least match how the value is going to be used.
+ const llvm::APSInt &first =
+ BVF.Convert(resultTy, symIntExpr->getRHS());
+ const llvm::APSInt &second =
+ BVF.Convert(resultTy, rhsInt->getValue());
+
+ const llvm::APSInt *newRHS;
+ if (lop == op)
+ newRHS = BVF.EvaluateAPSInt(BinaryOperator::Add, first, second);
+ else
+ newRHS = BVF.EvaluateAPSInt(BinaryOperator::Sub, first, second);
+ return MakeSymIntVal(symIntExpr->getLHS(), lop, *newRHS, resultTy);
+ }
+ }
+
+ // Otherwise, make a SymExprVal out of the expression.
+ return MakeSymIntVal(symIntExpr, op, rhsInt->getValue(), resultTy);
}
case nonloc::ConcreteIntKind: {
+ const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
+
if (isa<nonloc::ConcreteInt>(rhs)) {
- const nonloc::ConcreteInt& lhsInt = cast<nonloc::ConcreteInt>(lhs);
return lhsInt.evalBinOp(ValMgr, op, cast<nonloc::ConcreteInt>(rhs));
- }
- else {
+ } else {
+ const llvm::APSInt& lhsValue = lhsInt.getValue();
+
// Swap the left and right sides and flip the operator if doing so
// allows us to better reason about the expression (this is a form
// of expression canonicalization).
+ // While we're at it, catch some special cases for non-commutative ops.
NonLoc tmp = rhs;
rhs = lhs;
lhs = tmp;
switch (op) {
- case BinaryOperator::LT: op = BinaryOperator::GT; continue;
- case BinaryOperator::GT: op = BinaryOperator::LT; continue;
- case BinaryOperator::LE: op = BinaryOperator::GE; continue;
- case BinaryOperator::GE: op = BinaryOperator::LE; continue;
+ case BinaryOperator::LT:
+ case BinaryOperator::GT:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ op = ReverseComparison(op);
+ continue;
case BinaryOperator::EQ:
case BinaryOperator::NE:
case BinaryOperator::Add:
case BinaryOperator::Mul:
+ case BinaryOperator::And:
+ case BinaryOperator::Xor:
+ case BinaryOperator::Or:
continue;
+ case BinaryOperator::Shr:
+ if (lhsValue.isAllOnesValue() && lhsValue.isSigned())
+ // At this point lhs and rhs have been swapped.
+ return rhs;
+ // FALL-THROUGH
+ case BinaryOperator::Shl:
+ if (lhsValue == 0)
+ // At this point lhs and rhs have been swapped.
+ return rhs;
+ return UnknownVal();
default:
return UnknownVal();
}
@@ -377,9 +488,9 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
if (isa<nonloc::ConcreteInt>(rhs)) {
- return ValMgr.makeNonLoc(slhs->getSymbol(), op,
- cast<nonloc::ConcreteInt>(rhs).getValue(),
- resultTy);
+ return MakeSymIntVal(slhs->getSymbol(), op,
+ cast<nonloc::ConcreteInt>(rhs).getValue(),
+ resultTy);
}
return UnknownVal();
@@ -388,21 +499,301 @@ SVal SimpleSValuator::EvalBinOpNN(const GRState *state,
}
}
-SVal SimpleSValuator::EvalBinOpLL(BinaryOperator::Opcode op, Loc lhs, Loc rhs,
+// FIXME: all this logic will change if/when we have MemRegion::getLocation().
+SVal SimpleSValuator::EvalBinOpLL(const GRState *state,
+ BinaryOperator::Opcode op,
+ Loc lhs, Loc rhs,
QualType resultTy) {
- switch (op) {
+ // Only comparisons and subtractions are valid operations on two pointers.
+ // See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
+ // However, if a pointer is casted to an integer, EvalBinOpNN may end up
+ // calling this function with another operation (PR7527). We don't attempt to
+ // model this for now, but it could be useful, particularly when the
+ // "location" is actually an integer value that's been passed through a void*.
+ if (!(BinaryOperator::isComparisonOp(op) || op == BinaryOperator::Sub))
+ return UnknownVal();
+
+ // Special cases for when both sides are identical.
+ if (lhs == rhs) {
+ switch (op) {
default:
+ assert(false && "Unimplemented operation for two identical values");
return UnknownVal();
+ case BinaryOperator::Sub:
+ return ValMgr.makeZeroVal(resultTy);
case BinaryOperator::EQ:
+ case BinaryOperator::LE:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
case BinaryOperator::NE:
- return EvalEquality(ValMgr, lhs, rhs, op == BinaryOperator::EQ, resultTy);
case BinaryOperator::LT:
case BinaryOperator::GT:
- // FIXME: Generalize. For now, just handle the trivial case where
- // the two locations are identical.
- if (lhs == rhs)
+ return ValMgr.makeTruthVal(false, resultTy);
+ }
+ }
+
+ switch (lhs.getSubKind()) {
+ default:
+ assert(false && "Ordering not implemented for this Loc.");
+ return UnknownVal();
+
+ case loc::GotoLabelKind:
+ // The only thing we know about labels is that they're non-null.
+ if (rhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::Sub:
+ return EvalCastL(lhs, resultTy);
+ case BinaryOperator::EQ:
+ case BinaryOperator::LE:
+ case BinaryOperator::LT:
return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ case BinaryOperator::GT:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+ // There may be two labels for the same location, and a function region may
+ // have the same address as a label at the start of the function (depending
+ // on the ABI).
+ // FIXME: we can probably do a comparison against other MemRegions, though.
+ // FIXME: is there a way to tell if two labels refer to the same location?
+ return UnknownVal();
+
+ case loc::ConcreteIntKind: {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef rSym = rhs.getAsLocSymbol()) {
+ // We can only build expressions with symbols on the left,
+ // so we need a reversible operator.
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ const llvm::APSInt &lVal = cast<loc::ConcreteInt>(lhs).getValue();
+ return ValMgr.makeNonLoc(rSym, ReverseComparison(op), lVal, resultTy);
+ }
+
+ // If both operands are constants, just perform the operation.
+ if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
+ BasicValueFactory &BVF = ValMgr.getBasicValueFactory();
+ SVal ResultVal = cast<loc::ConcreteInt>(lhs).EvalBinOp(BVF, op, *rInt);
+ if (Loc *Result = dyn_cast<Loc>(&ResultVal))
+ return EvalCastL(*Result, resultTy);
+ else
+ return UnknownVal();
+ }
+
+ // Special case comparisons against NULL.
+ // This must come after the test if the RHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL, as is any label.
+ assert(isa<loc::MemRegionVal>(rhs) || isa<loc::GotoLabel>(rhs));
+ if (lhs.isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::EQ:
+ case BinaryOperator::GT:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ case BinaryOperator::LT:
+ case BinaryOperator::LE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing an arbitrary integer to a region or label address is
+ // completely unknowable.
+ return UnknownVal();
+ }
+ case loc::MemRegionKind: {
+ if (loc::ConcreteInt *rInt = dyn_cast<loc::ConcreteInt>(&rhs)) {
+ // If one of the operands is a symbol and the other is a constant,
+ // build an expression for use by the constraint manager.
+ if (SymbolRef lSym = lhs.getAsLocSymbol())
+ return MakeSymIntVal(lSym, op, rInt->getValue(), resultTy);
+
+ // Special case comparisons to NULL.
+ // This must come after the test if the LHS is a symbol, which is used to
+ // build constraints. The address of any non-symbolic region is guaranteed
+ // to be non-NULL.
+ if (rInt->isZeroConstant()) {
+ switch (op) {
+ default:
+ break;
+ case BinaryOperator::Sub:
+ return EvalCastL(lhs, resultTy);
+ case BinaryOperator::EQ:
+ case BinaryOperator::LT:
+ case BinaryOperator::LE:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ case BinaryOperator::GT:
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+
+ // Comparing a region to an arbitrary integer is completely unknowable.
+ return UnknownVal();
+ }
+
+ // Get both values as regions, if possible.
+ const MemRegion *LeftMR = lhs.getAsRegion();
+ assert(LeftMR && "MemRegionKind SVal doesn't have a region!");
+
+ const MemRegion *RightMR = rhs.getAsRegion();
+ if (!RightMR)
+ // The RHS is probably a label, which in theory could address a region.
+ // FIXME: we can probably make a more useful statement about non-code
+ // regions, though.
+ return UnknownVal();
+
+ // If both values wrap regions, see if they're from different base regions.
+ const MemRegion *LeftBase = LeftMR->getBaseRegion();
+ const MemRegion *RightBase = RightMR->getBaseRegion();
+ if (LeftBase != RightBase &&
+ !isa<SymbolicRegion>(LeftBase) && !isa<SymbolicRegion>(RightBase)) {
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BinaryOperator::EQ:
+ return ValMgr.makeTruthVal(false, resultTy);
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(true, resultTy);
+ }
+ }
+
+ // The two regions are from the same base region. See if they're both a
+ // type of region we know how to compare.
+
+ // FIXME: If/when there is a getAsRawOffset() for FieldRegions, this
+ // ElementRegion path and the FieldRegion path below should be unified.
+ if (const ElementRegion *LeftER = dyn_cast<ElementRegion>(LeftMR)) {
+ // First see if the right region is also an ElementRegion.
+ const ElementRegion *RightER = dyn_cast<ElementRegion>(RightMR);
+ if (!RightER)
+ return UnknownVal();
+
+ // Next, see if the two ERs have the same super-region and matching types.
+ // FIXME: This should do something useful even if the types don't match,
+ // though if both indexes are constant the RegionRawOffset path will
+ // give the correct answer.
+ if (LeftER->getSuperRegion() == RightER->getSuperRegion() &&
+ LeftER->getElementType() == RightER->getElementType()) {
+ // Get the left index and cast it to the correct type.
+ // If the index is unknown or undefined, bail out here.
+ SVal LeftIndexVal = LeftER->getIndex();
+ NonLoc *LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
+ if (!LeftIndex)
+ return UnknownVal();
+ LeftIndexVal = EvalCastNL(*LeftIndex, resultTy);
+ LeftIndex = dyn_cast<NonLoc>(&LeftIndexVal);
+ if (!LeftIndex)
+ return UnknownVal();
+
+ // Do the same for the right index.
+ SVal RightIndexVal = RightER->getIndex();
+ NonLoc *RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
+ if (!RightIndex)
+ return UnknownVal();
+ RightIndexVal = EvalCastNL(*RightIndex, resultTy);
+ RightIndex = dyn_cast<NonLoc>(&RightIndexVal);
+ if (!RightIndex)
+ return UnknownVal();
+
+ // Actually perform the operation.
+ // EvalBinOpNN expects the two indexes to already be the right type.
+ return EvalBinOpNN(state, op, *LeftIndex, *RightIndex, resultTy);
+ }
+
+ // If the element indexes aren't comparable, see if the raw offsets are.
+ RegionRawOffset LeftOffset = LeftER->getAsRawOffset();
+ RegionRawOffset RightOffset = RightER->getAsRawOffset();
+
+ if (LeftOffset.getRegion() != NULL &&
+ LeftOffset.getRegion() == RightOffset.getRegion()) {
+ int64_t left = LeftOffset.getByteOffset();
+ int64_t right = RightOffset.getByteOffset();
+
+ switch (op) {
+ default:
+ return UnknownVal();
+ case BinaryOperator::LT:
+ return ValMgr.makeTruthVal(left < right, resultTy);
+ case BinaryOperator::GT:
+ return ValMgr.makeTruthVal(left > right, resultTy);
+ case BinaryOperator::LE:
+ return ValMgr.makeTruthVal(left <= right, resultTy);
+ case BinaryOperator::GE:
+ return ValMgr.makeTruthVal(left >= right, resultTy);
+ case BinaryOperator::EQ:
+ return ValMgr.makeTruthVal(left == right, resultTy);
+ case BinaryOperator::NE:
+ return ValMgr.makeTruthVal(left != right, resultTy);
+ }
+ }
+
+ // If we get here, we have no way of comparing the ElementRegions.
return UnknownVal();
+ }
+
+ // See if both regions are fields of the same structure.
+ // FIXME: This doesn't handle nesting, inheritance, or Objective-C ivars.
+ if (const FieldRegion *LeftFR = dyn_cast<FieldRegion>(LeftMR)) {
+ // Only comparisons are meaningful here!
+ if (!BinaryOperator::isComparisonOp(op))
+ return UnknownVal();
+
+ // First see if the right region is also a FieldRegion.
+ const FieldRegion *RightFR = dyn_cast<FieldRegion>(RightMR);
+ if (!RightFR)
+ return UnknownVal();
+
+ // Next, see if the two FRs have the same super-region.
+ // FIXME: This doesn't handle casts yet, and simply stripping the casts
+ // doesn't help.
+ if (LeftFR->getSuperRegion() != RightFR->getSuperRegion())
+ return UnknownVal();
+
+ const FieldDecl *LeftFD = LeftFR->getDecl();
+ const FieldDecl *RightFD = RightFR->getDecl();
+ const RecordDecl *RD = LeftFD->getParent();
+
+ // Make sure the two FRs are from the same kind of record. Just in case!
+ // FIXME: This is probably where inheritance would be a problem.
+ if (RD != RightFD->getParent())
+ return UnknownVal();
+
+ // We know for sure that the two fields are not the same, since that
+ // would have given us the same SVal.
+ if (op == BinaryOperator::EQ)
+ return ValMgr.makeTruthVal(false, resultTy);
+ if (op == BinaryOperator::NE)
+ return ValMgr.makeTruthVal(true, resultTy);
+
+ // Iterate through the fields and see which one comes first.
+ // [C99 6.7.2.1.13] "Within a structure object, the non-bit-field
+ // members and the units in which bit-fields reside have addresses that
+ // increase in the order in which they are declared."
+ bool leftFirst = (op == BinaryOperator::LT || op == BinaryOperator::LE);
+ for (RecordDecl::field_iterator I = RD->field_begin(),
+ E = RD->field_end(); I!=E; ++I) {
+ if (*I == LeftFD)
+ return ValMgr.makeTruthVal(leftFirst, resultTy);
+ if (*I == RightFD)
+ return ValMgr.makeTruthVal(!leftFirst, resultTy);
+ }
+
+ assert(false && "Fields not found in parent record's definition");
+ }
+
+ // If we get here, we have no way of comparing the regions.
+ return UnknownVal();
+ }
}
}
@@ -414,7 +805,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
// triggered, but transfer functions like those for OSCommpareAndSwapBarrier32
// can generate comparisons that trigger this code.
// FIXME: Are all locations guaranteed to have pointer width?
- if (BinaryOperator::isEqualityOp(op)) {
+ if (BinaryOperator::isComparisonOp(op)) {
if (nonloc::ConcreteInt *rhsInt = dyn_cast<nonloc::ConcreteInt>(&rhs)) {
const llvm::APSInt *x = &rhsInt->getValue();
ASTContext &ctx = ValMgr.getContext();
@@ -423,7 +814,7 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
if (x->isSigned())
x = &ValMgr.getBasicValueFactory().getValue(*x, true);
- return EvalBinOpLL(op, lhs, loc::ConcreteInt(*x), resultTy);
+ return EvalBinOpLL(state, op, lhs, loc::ConcreteInt(*x), resultTy);
}
}
}
@@ -432,3 +823,21 @@ SVal SimpleSValuator::EvalBinOpLN(const GRState *state,
return state->getStateManager().getStoreManager().EvalBinOp(op, lhs,
rhs, resultTy);
}
+
+const llvm::APSInt *SimpleSValuator::getKnownValue(const GRState *state,
+ SVal V) {
+ if (V.isUnknownOrUndef())
+ return NULL;
+
+ if (loc::ConcreteInt* X = dyn_cast<loc::ConcreteInt>(&V))
+ return &X->getValue();
+
+ if (nonloc::ConcreteInt* X = dyn_cast<nonloc::ConcreteInt>(&V))
+ return &X->getValue();
+
+ if (SymbolRef Sym = V.getAsSymbol())
+ return state->getSymVal(Sym);
+
+ // FIXME: Add support for SymExprs.
+ return NULL;
+}
diff --git a/lib/Checker/StackAddrLeakChecker.cpp b/lib/Checker/StackAddrLeakChecker.cpp
new file mode 100644
index 000000000000..f4a9db62df4b
--- /dev/null
+++ b/lib/Checker/StackAddrLeakChecker.cpp
@@ -0,0 +1,204 @@
+//=== StackAddrLeakChecker.cpp ------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines stack address leak checker, which checks if an invalid
+// stack address is stored into a global or heap location. See CERT DCL30-C.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/SmallString.h"
+using namespace clang;
+
+namespace {
+class StackAddrLeakChecker : public CheckerVisitor<StackAddrLeakChecker> {
+ BuiltinBug *BT_stackleak;
+ BuiltinBug *BT_returnstack;
+
+public:
+ StackAddrLeakChecker() : BT_stackleak(0), BT_returnstack(0) {}
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+ void PreVisitReturnStmt(CheckerContext &C, const ReturnStmt *RS);
+ void EvalEndPath(GREndPathNodeBuilder &B, void *tag, GRExprEngine &Eng);
+private:
+ void EmitStackError(CheckerContext &C, const MemRegion *R, const Expr *RetE);
+ SourceRange GenName(llvm::raw_ostream &os, const MemRegion *R,
+ SourceManager &SM);
+};
+}
+
+void clang::RegisterStackAddrLeakChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new StackAddrLeakChecker());
+}
+
+SourceRange StackAddrLeakChecker::GenName(llvm::raw_ostream &os,
+ const MemRegion *R,
+ SourceManager &SM) {
+ // Get the base region, stripping away fields and elements.
+ R = R->getBaseRegion();
+ SourceRange range;
+ os << "Address of ";
+
+ // Check if the region is a compound literal.
+ if (const CompoundLiteralRegion* CR = dyn_cast<CompoundLiteralRegion>(R)) {
+ const CompoundLiteralExpr* CL = CR->getLiteralExpr();
+ os << "stack memory associated with a compound literal "
+ "declared on line "
+ << SM.getInstantiationLineNumber(CL->getLocStart())
+ << " returned to caller";
+ range = CL->getSourceRange();
+ }
+ else if (const AllocaRegion* AR = dyn_cast<AllocaRegion>(R)) {
+ const Expr* ARE = AR->getExpr();
+ SourceLocation L = ARE->getLocStart();
+ range = ARE->getSourceRange();
+ os << "stack memory allocated by call to alloca() on line "
+ << SM.getInstantiationLineNumber(L);
+ }
+ else if (const BlockDataRegion *BR = dyn_cast<BlockDataRegion>(R)) {
+ const BlockDecl *BD = BR->getCodeRegion()->getDecl();
+ SourceLocation L = BD->getLocStart();
+ range = BD->getSourceRange();
+ os << "stack-allocated block declared on line "
+ << SM.getInstantiationLineNumber(L);
+ }
+ else if (const VarRegion *VR = dyn_cast<VarRegion>(R)) {
+ os << "stack memory associated with local variable '"
+ << VR->getString() << '\'';
+ range = VR->getDecl()->getSourceRange();
+ }
+ else {
+ assert(false && "Invalid region in ReturnStackAddressChecker.");
+ }
+
+ return range;
+}
+
+void StackAddrLeakChecker::EmitStackError(CheckerContext &C, const MemRegion *R,
+ const Expr *RetE) {
+ ExplodedNode *N = C.GenerateSink();
+
+ if (!N)
+ return;
+
+ if (!BT_returnstack)
+ BT_returnstack=new BuiltinBug("Return of address to stack-allocated memory");
+
+ // Generate a report for this bug.
+ llvm::SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = GenName(os, R, C.getSourceManager());
+ os << " returned to caller";
+ RangedBugReport *report = new RangedBugReport(*BT_returnstack, os.str(), N);
+ report->addRange(RetE->getSourceRange());
+ if (range.isValid())
+ report->addRange(range);
+
+ C.EmitReport(report);
+}
+
+void StackAddrLeakChecker::PreVisitReturnStmt(CheckerContext &C,
+ const ReturnStmt *RS) {
+
+ const Expr *RetE = RS->getRetValue();
+ if (!RetE)
+ return;
+
+ SVal V = C.getState()->getSVal(RetE);
+ const MemRegion *R = V.getAsRegion();
+
+ if (!R || !R->hasStackStorage())
+ return;
+
+ if (R->hasStackStorage()) {
+ EmitStackError(C, R, RetE);
+ return;
+ }
+}
+
+void StackAddrLeakChecker::EvalEndPath(GREndPathNodeBuilder &B, void *tag,
+ GRExprEngine &Eng) {
+ SaveAndRestore<bool> OldHasGen(B.HasGeneratedNode);
+ const GRState *state = B.getState();
+
+ // Iterate over all bindings to global variables and see if it contains
+ // a memory region in the stack space.
+ class CallBack : public StoreManager::BindingsHandler {
+ private:
+ const StackFrameContext *CurSFC;
+ public:
+ llvm::SmallVector<std::pair<const MemRegion*, const MemRegion*>, 10> V;
+
+ CallBack(const LocationContext *LCtx)
+ : CurSFC(LCtx->getCurrentStackFrame()) {}
+
+ bool HandleBinding(StoreManager &SMgr, Store store,
+ const MemRegion *region, SVal val) {
+
+ if (!isa<GlobalsSpaceRegion>(region->getMemorySpace()))
+ return true;
+
+ const MemRegion *vR = val.getAsRegion();
+ if (!vR)
+ return true;
+
+ if (const StackSpaceRegion *SSR =
+ dyn_cast<StackSpaceRegion>(vR->getMemorySpace())) {
+ // If the global variable holds a location in the current stack frame,
+ // record the binding to emit a warning.
+ if (SSR->getStackFrame() == CurSFC)
+ V.push_back(std::make_pair(region, vR));
+ }
+
+ return true;
+ }
+ };
+
+ CallBack cb(B.getPredecessor()->getLocationContext());
+ state->getStateManager().getStoreManager().iterBindings(state->getStore(),cb);
+
+ if (cb.V.empty())
+ return;
+
+ // Generate an error node.
+ ExplodedNode *N = B.generateNode(state, tag, B.getPredecessor());
+ if (!N)
+ return;
+
+ if (!BT_stackleak)
+ BT_stackleak =
+ new BuiltinBug("Stack address stored into global variable",
+ "Stack address was saved into a global variable. "
+ "This is dangerous because the address will become "
+ "invalid after returning from the function");
+
+ for (unsigned i = 0, e = cb.V.size(); i != e; ++i) {
+ // Generate a report for this bug.
+ llvm::SmallString<512> buf;
+ llvm::raw_svector_ostream os(buf);
+ SourceRange range = GenName(os, cb.V[i].second,
+ Eng.getContext().getSourceManager());
+ os << " is still referred to by the global variable '";
+ const VarRegion *VR = cast<VarRegion>(cb.V[i].first->getBaseRegion());
+ os << VR->getDecl()->getNameAsString()
+ << "' upon returning to the caller. This will be a dangling reference";
+ RangedBugReport *report = new RangedBugReport(*BT_stackleak, os.str(), N);
+ if (range.isValid())
+ report->addRange(range);
+
+ Eng.getBugReporter().EmitReport(report);
+ }
+}
diff --git a/lib/Checker/Store.cpp b/lib/Checker/Store.cpp
index c12065b89a04..b12833170506 100644
--- a/lib/Checker/Store.cpp
+++ b/lib/Checker/Store.cpp
@@ -91,7 +91,8 @@ const MemRegion *StoreManager::CastRegion(const MemRegion *R, QualType CastToTy)
case MemRegion::StackArgumentsSpaceRegionKind:
case MemRegion::HeapSpaceRegionKind:
case MemRegion::UnknownSpaceRegionKind:
- case MemRegion::GlobalsSpaceRegionKind: {
+ case MemRegion::NonStaticGlobalSpaceRegionKind:
+ case MemRegion::StaticGlobalSpaceRegionKind: {
assert(0 && "Invalid region cast");
break;
}
@@ -232,17 +233,6 @@ SVal StoreManager::CastRetrievedVal(SVal V, const TypedRegion *R,
return V;
}
-Store StoreManager::InvalidateRegions(Store store,
- const MemRegion * const *I,
- const MemRegion * const *End,
- const Expr *E, unsigned Count,
- InvalidatedSymbols *IS) {
- for ( ; I != End ; ++I)
- store = InvalidateRegion(store, *I, E, Count, IS);
-
- return store;
-}
-
SVal StoreManager::getLValueFieldOrIvar(const Decl* D, SVal Base) {
if (Base.isUnknownOrUndef())
return Base;
diff --git a/lib/Checker/StreamChecker.cpp b/lib/Checker/StreamChecker.cpp
new file mode 100644
index 000000000000..c527ca24496f
--- /dev/null
+++ b/lib/Checker/StreamChecker.cpp
@@ -0,0 +1,287 @@
+//===-- StreamChecker.cpp -----------------------------------------*- C++ -*--//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines checkers that model and check stream handling functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRState.h"
+#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "llvm/ADT/ImmutableMap.h"
+
+using namespace clang;
+
+namespace {
+
+class StreamChecker : public CheckerVisitor<StreamChecker> {
+ IdentifierInfo *II_fopen, *II_fread, *II_fwrite,
+ *II_fseek, *II_ftell, *II_rewind, *II_fgetpos, *II_fsetpos,
+ *II_clearerr, *II_feof, *II_ferror, *II_fileno;
+ BuiltinBug *BT_nullfp, *BT_illegalwhence;
+
+public:
+ StreamChecker()
+ : II_fopen(0), II_fread(0), II_fwrite(0),
+ II_fseek(0), II_ftell(0), II_rewind(0), II_fgetpos(0), II_fsetpos(0),
+ II_clearerr(0), II_feof(0), II_ferror(0), II_fileno(0),
+ BT_nullfp(0), BT_illegalwhence(0) {}
+
+ static void *getTag() {
+ static int x;
+ return &x;
+ }
+
+ virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+
+private:
+ void Fopen(CheckerContext &C, const CallExpr *CE);
+ void Fread(CheckerContext &C, const CallExpr *CE);
+ void Fwrite(CheckerContext &C, const CallExpr *CE);
+ void Fseek(CheckerContext &C, const CallExpr *CE);
+ void Ftell(CheckerContext &C, const CallExpr *CE);
+ void Rewind(CheckerContext &C, const CallExpr *CE);
+ void Fgetpos(CheckerContext &C, const CallExpr *CE);
+ void Fsetpos(CheckerContext &C, const CallExpr *CE);
+ void Clearerr(CheckerContext &C, const CallExpr *CE);
+ void Feof(CheckerContext &C, const CallExpr *CE);
+ void Ferror(CheckerContext &C, const CallExpr *CE);
+ void Fileno(CheckerContext &C, const CallExpr *CE);
+
+ // Return true indicates the stream pointer is NULL.
+ const GRState *CheckNullStream(SVal SV, const GRState *state,
+ CheckerContext &C);
+};
+
+} // end anonymous namespace
+
+void clang::RegisterStreamChecker(GRExprEngine &Eng) {
+ Eng.registerCheck(new StreamChecker());
+}
+
+bool StreamChecker::EvalCallExpr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ const Expr *Callee = CE->getCallee();
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return false;
+
+ ASTContext &Ctx = C.getASTContext();
+ if (!II_fopen)
+ II_fopen = &Ctx.Idents.get("fopen");
+ if (!II_fread)
+ II_fread = &Ctx.Idents.get("fread");
+ if (!II_fwrite)
+ II_fwrite = &Ctx.Idents.get("fwrite");
+ if (!II_fseek)
+ II_fseek = &Ctx.Idents.get("fseek");
+ if (!II_ftell)
+ II_ftell = &Ctx.Idents.get("ftell");
+ if (!II_rewind)
+ II_rewind = &Ctx.Idents.get("rewind");
+ if (!II_fgetpos)
+ II_fgetpos = &Ctx.Idents.get("fgetpos");
+ if (!II_fsetpos)
+ II_fsetpos = &Ctx.Idents.get("fsetpos");
+ if (!II_clearerr)
+ II_clearerr = &Ctx.Idents.get("clearerr");
+ if (!II_feof)
+ II_feof = &Ctx.Idents.get("feof");
+ if (!II_ferror)
+ II_ferror = &Ctx.Idents.get("ferror");
+ if (!II_fileno)
+ II_fileno = &Ctx.Idents.get("fileno");
+
+ if (FD->getIdentifier() == II_fopen) {
+ Fopen(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fread) {
+ Fread(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fwrite) {
+ Fwrite(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fseek) {
+ Fseek(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ftell) {
+ Ftell(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_rewind) {
+ Rewind(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fgetpos) {
+ Fgetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fsetpos) {
+ Fsetpos(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_clearerr) {
+ Clearerr(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_feof) {
+ Feof(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_ferror) {
+ Ferror(C, CE);
+ return true;
+ }
+ if (FD->getIdentifier() == II_fileno) {
+ Fileno(C, CE);
+ return true;
+ }
+
+ return false;
+}
+
+void StreamChecker::Fopen(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ unsigned Count = C.getNodeBuilder().getCurrentBlockCount();
+ ValueManager &ValMgr = C.getValueManager();
+ DefinedSVal RetVal = cast<DefinedSVal>(ValMgr.getConjuredSymbolVal(0, CE,
+ Count));
+ state = state->BindExpr(CE, RetVal);
+
+ ConstraintManager &CM = C.getConstraintManager();
+ // Bifurcate the state into two: one with a valid FILE* pointer, the other
+ // with a NULL.
+ const GRState *stateNotNull, *stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, RetVal);
+
+ C.addTransition(stateNotNull);
+ C.addTransition(stateNull);
+}
+
+void StreamChecker::Fread(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C))
+ return;
+}
+
+void StreamChecker::Fwrite(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(3)), state, C))
+ return;
+}
+
+void StreamChecker::Fseek(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!(state = CheckNullStream(state->getSVal(CE->getArg(0)), state, C)))
+ return;
+ // Check the legality of the 'whence' argument of 'fseek'.
+ SVal Whence = state->getSVal(CE->getArg(2));
+ bool WhenceIsLegal = true;
+ const nonloc::ConcreteInt *CI = dyn_cast<nonloc::ConcreteInt>(&Whence);
+ if (!CI)
+ WhenceIsLegal = false;
+
+ int64_t x = CI->getValue().getSExtValue();
+ if (!(x == 0 || x == 1 || x == 2))
+ WhenceIsLegal = false;
+
+ if (!WhenceIsLegal) {
+ if (ExplodedNode *N = C.GenerateSink(state)) {
+ if (!BT_illegalwhence)
+ BT_illegalwhence = new BuiltinBug("Illegal whence argument",
+ "The whence argument to fseek() should be "
+ "SEEK_SET, SEEK_END, or SEEK_CUR.");
+ BugReport *R = new BugReport(*BT_illegalwhence,
+ BT_illegalwhence->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return;
+ }
+
+ C.addTransition(state);
+}
+
+void StreamChecker::Ftell(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Rewind(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Fgetpos(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Fsetpos(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Clearerr(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Feof(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Ferror(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+void StreamChecker::Fileno(CheckerContext &C, const CallExpr *CE) {
+ const GRState *state = C.getState();
+ if (!CheckNullStream(state->getSVal(CE->getArg(0)), state, C))
+ return;
+}
+
+const GRState *StreamChecker::CheckNullStream(SVal SV, const GRState *state,
+ CheckerContext &C) {
+ const DefinedSVal *DV = dyn_cast<DefinedSVal>(&SV);
+ if (!DV)
+ return 0;
+
+ ConstraintManager &CM = C.getConstraintManager();
+ const GRState *stateNotNull, *stateNull;
+ llvm::tie(stateNotNull, stateNull) = CM.AssumeDual(state, *DV);
+
+ if (!stateNotNull && stateNull) {
+ if (ExplodedNode *N = C.GenerateSink(stateNull)) {
+ if (!BT_nullfp)
+ BT_nullfp = new BuiltinBug("NULL stream pointer",
+ "Stream pointer might be NULL.");
+ BugReport *R =new BugReport(*BT_nullfp, BT_nullfp->getDescription(), N);
+ C.EmitReport(R);
+ }
+ return 0;
+ }
+ return stateNotNull;
+}
diff --git a/lib/Checker/SymbolManager.cpp b/lib/Checker/SymbolManager.cpp
index f3a803c57d32..c2b557ea57db 100644
--- a/lib/Checker/SymbolManager.cpp
+++ b/lib/Checker/SymbolManager.cpp
@@ -74,6 +74,10 @@ void SymbolDerived::dumpToStream(llvm::raw_ostream& os) const {
<< getParentSymbol() << ',' << getRegion() << '}';
}
+void SymbolExtent::dumpToStream(llvm::raw_ostream& os) const {
+ os << "extent_$" << getSymbolID() << '{' << getRegion() << '}';
+}
+
void SymbolRegionValue::dumpToStream(llvm::raw_ostream& os) const {
os << "reg_$" << getSymbolID() << "<" << R << ">";
}
@@ -130,6 +134,22 @@ SymbolManager::getDerivedSymbol(SymbolRef parentSymbol,
return cast<SymbolDerived>(SD);
}
+const SymbolExtent*
+SymbolManager::getExtentSymbol(const SubRegion *R) {
+ llvm::FoldingSetNodeID profile;
+ SymbolExtent::Profile(profile, R);
+ void* InsertPos;
+ SymExpr *SD = DataSet.FindNodeOrInsertPos(profile, InsertPos);
+ if (!SD) {
+ SD = (SymExpr*) BPAlloc.Allocate<SymbolExtent>();
+ new (SD) SymbolExtent(SymbolCounter, R);
+ DataSet.InsertNode(SD, InsertPos);
+ ++SymbolCounter;
+ }
+
+ return cast<SymbolExtent>(SD);
+}
+
const SymIntExpr *SymbolManager::getSymIntExpr(const SymExpr *lhs,
BinaryOperator::Opcode op,
const llvm::APSInt& v,
@@ -170,11 +190,14 @@ QualType SymbolConjured::getType(ASTContext&) const {
return T;
}
-
QualType SymbolDerived::getType(ASTContext& Ctx) const {
return R->getValueType(Ctx);
}
+QualType SymbolExtent::getType(ASTContext& Ctx) const {
+ return Ctx.getSizeType();
+}
+
QualType SymbolRegionValue::getType(ASTContext& C) const {
return R->getValueType(C);
}
@@ -210,16 +233,25 @@ bool SymbolReaper::isLive(SymbolRef sym) {
return false;
}
+ if (const SymbolExtent *extent = dyn_cast<SymbolExtent>(sym)) {
+ const MemRegion *Base = extent->getRegion()->getBaseRegion();
+ if (const VarRegion *VR = dyn_cast<VarRegion>(Base))
+ return isLive(VR);
+ if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(Base))
+ return isLive(SR->getSymbol());
+ return false;
+ }
+
// Interogate the symbol. It may derive from an input value to
// the analyzed function/method.
return isa<SymbolRegionValue>(sym);
}
-bool SymbolReaper::isLive(const Stmt* Loc, const Stmt* ExprVal) const {
+bool SymbolReaper::isLive(const Stmt* ExprVal) const {
return LCtx->getLiveVariables()->isLive(Loc, ExprVal);
}
-bool SymbolReaper::isLive(const Stmt *Loc, const VarRegion *VR) const {
+bool SymbolReaper::isLive(const VarRegion *VR) const {
const StackFrameContext *SFC = VR->getStackFrame();
if (SFC == LCtx->getCurrentStackFrame())
diff --git a/lib/Checker/VLASizeChecker.cpp b/lib/Checker/VLASizeChecker.cpp
index cea9d191aa77..936991d6133c 100644
--- a/lib/Checker/VLASizeChecker.cpp
+++ b/lib/Checker/VLASizeChecker.cpp
@@ -9,10 +9,13 @@
//
// This defines VLASizeChecker, a builtin check in GRExprEngine that
// performs checks for declaration of VLA of undefined or zero size.
+// In addition, VLASizeChecker is responsible for defining the extent
+// of the MemRegion that represents a VLA.
//
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/AST/CharUnits.h"
#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
@@ -42,9 +45,9 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
const VarDecl *VD = dyn_cast<VarDecl>(DS->getSingleDecl());
if (!VD)
return;
-
- const VariableArrayType *VLA
- = C.getASTContext().getAsVariableArrayType(VD->getType());
+
+ ASTContext &Ctx = C.getASTContext();
+ const VariableArrayType *VLA = Ctx.getAsVariableArrayType(VD->getType());
if (!VLA)
return;
@@ -70,9 +73,14 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
C.EmitReport(report);
return;
}
+
+ // See if the size value is known. It can't be undefined because we would have
+ // warned about that already.
+ if (sizeV.isUnknown())
+ return;
// Check if the size is zero.
- DefinedOrUnknownSVal sizeD = cast<DefinedOrUnknownSVal>(sizeV);
+ DefinedSVal sizeD = cast<DefinedSVal>(sizeV);
const GRState *stateNotZero, *stateZero;
llvm::tie(stateNotZero, stateZero) = state->Assume(sizeD);
@@ -92,5 +100,36 @@ void VLASizeChecker::PreVisitDeclStmt(CheckerContext &C, const DeclStmt *DS) {
}
// From this point on, assume that the size is not zero.
- C.addTransition(stateNotZero);
+ state = stateNotZero;
+
+ // VLASizeChecker is responsible for defining the extent of the array being
+ // declared. We do this by multiplying the array length by the element size,
+ // then matching that with the array region's extent symbol.
+
+ // Convert the array length to size_t.
+ ValueManager &ValMgr = C.getValueManager();
+ SValuator &SV = ValMgr.getSValuator();
+ QualType SizeTy = Ctx.getSizeType();
+ NonLoc ArrayLength = cast<NonLoc>(SV.EvalCast(sizeD, SizeTy, SE->getType()));
+
+ // Get the element size.
+ CharUnits EleSize = Ctx.getTypeSizeInChars(VLA->getElementType());
+ SVal EleSizeVal = ValMgr.makeIntVal(EleSize.getQuantity(), SizeTy);
+
+ // Multiply the array length by the element size.
+ SVal ArraySizeVal = SV.EvalBinOpNN(state, BinaryOperator::Mul, ArrayLength,
+ cast<NonLoc>(EleSizeVal), SizeTy);
+
+ // Finally, Assume that the array's extent matches the given size.
+ const LocationContext *LC = C.getPredecessor()->getLocationContext();
+ DefinedOrUnknownSVal Extent = state->getRegion(VD, LC)->getExtent(ValMgr);
+ DefinedOrUnknownSVal ArraySize = cast<DefinedOrUnknownSVal>(ArraySizeVal);
+ DefinedOrUnknownSVal SizeIsKnown = SV.EvalEQ(state, Extent, ArraySize);
+ state = state->Assume(SizeIsKnown, true);
+
+ // Assume should not fail at this point.
+ assert(state);
+
+ // Remember our assumptions!
+ C.addTransition(state);
}
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index 1ab2f55295fa..85524acbe1f7 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -11,11 +11,9 @@
#define CLANG_CODEGEN_ABIINFO_H
#include "clang/AST/Type.h"
-
-#include <cassert>
+#include "llvm/Type.h"
namespace llvm {
- class Type;
class Value;
class LLVMContext;
}
@@ -70,7 +68,7 @@ namespace clang {
private:
Kind TheKind;
- const llvm::Type *TypeData;
+ llvm::PATypeHolder TypeData;
unsigned UIntData;
bool BoolData;
@@ -136,7 +134,11 @@ namespace clang {
virtual void computeInfo(CodeGen::CGFunctionInfo &FI,
ASTContext &Ctx,
- llvm::LLVMContext &VMContext) const = 0;
+ llvm::LLVMContext &VMContext,
+ // This is the preferred type for argument lowering
+ // which can be used to generate better IR.
+ const llvm::Type *const *PrefTypes = 0,
+ unsigned NumPrefTypes = 0) const = 0;
/// EmitVAArg - Emit the target dependent code to load a value of
/// \arg Ty from the va_list pointed to by \arg VAListAddr.
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
new file mode 100644
index 000000000000..69efe438cc9a
--- /dev/null
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -0,0 +1,339 @@
+//===--- BackendUtil.cpp - LLVM Backend Utilities -------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/CodeGen/BackendUtil.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Frontend/CodeGenOptions.h"
+#include "clang/Frontend/FrontendDiagnostic.h"
+#include "llvm/Module.h"
+#include "llvm/PassManager.h"
+#include "llvm/Assembly/PrintModulePass.h"
+#include "llvm/Bitcode/ReaderWriter.h"
+#include "llvm/CodeGen/RegAllocRegistry.h"
+#include "llvm/CodeGen/SchedulerRegistry.h"
+#include "llvm/Support/CommandLine.h"
+#include "llvm/Support/FormattedStream.h"
+#include "llvm/Support/PrettyStackTrace.h"
+#include "llvm/Support/StandardPasses.h"
+#include "llvm/Support/Timer.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/SubtargetFeature.h"
+#include "llvm/Target/TargetData.h"
+#include "llvm/Target/TargetMachine.h"
+#include "llvm/Target/TargetOptions.h"
+#include "llvm/Target/TargetRegistry.h"
+using namespace clang;
+using namespace llvm;
+
+namespace {
+
+class EmitAssemblyHelper {
+ Diagnostic &Diags;
+ const CodeGenOptions &CodeGenOpts;
+ const TargetOptions &TargetOpts;
+ Module *TheModule;
+
+ Timer CodeGenerationTime;
+
+ mutable FunctionPassManager *CodeGenPasses;
+ mutable PassManager *PerModulePasses;
+ mutable FunctionPassManager *PerFunctionPasses;
+
+private:
+ FunctionPassManager *getCodeGenPasses() const {
+ if (!CodeGenPasses) {
+ CodeGenPasses = new FunctionPassManager(TheModule);
+ CodeGenPasses->add(new TargetData(TheModule));
+ }
+ return CodeGenPasses;
+ }
+
+ PassManager *getPerModulePasses() const {
+ if (!PerModulePasses) {
+ PerModulePasses = new PassManager();
+ PerModulePasses->add(new TargetData(TheModule));
+ }
+ return PerModulePasses;
+ }
+
+ FunctionPassManager *getPerFunctionPasses() const {
+ if (!PerFunctionPasses) {
+ PerFunctionPasses = new FunctionPassManager(TheModule);
+ PerFunctionPasses->add(new TargetData(TheModule));
+ }
+ return PerFunctionPasses;
+ }
+
+ void CreatePasses();
+
+ /// AddEmitPasses - Add passes necessary to emit assembly or LLVM IR.
+ ///
+ /// \return True on success.
+ bool AddEmitPasses(BackendAction Action, formatted_raw_ostream &OS);
+
+public:
+ EmitAssemblyHelper(Diagnostic &_Diags,
+ const CodeGenOptions &CGOpts, const TargetOptions &TOpts,
+ Module *M)
+ : Diags(_Diags), CodeGenOpts(CGOpts), TargetOpts(TOpts),
+ TheModule(M), CodeGenerationTime("Code Generation Time"),
+ CodeGenPasses(0), PerModulePasses(0), PerFunctionPasses(0) {}
+
+ ~EmitAssemblyHelper() {
+ delete CodeGenPasses;
+ delete PerModulePasses;
+ delete PerFunctionPasses;
+ }
+
+ void EmitAssembly(BackendAction Action, raw_ostream *OS);
+};
+
+}
+
+void EmitAssemblyHelper::CreatePasses() {
+ unsigned OptLevel = CodeGenOpts.OptimizationLevel;
+ CodeGenOptions::InliningMethod Inlining = CodeGenOpts.Inlining;
+
+ // Handle disabling of LLVM optimization, where we want to preserve the
+ // internal module before any optimization.
+ if (CodeGenOpts.DisableLLVMOpts) {
+ OptLevel = 0;
+ Inlining = CodeGenOpts.NoInlining;
+ }
+
+ // In -O0 if checking is disabled, we don't even have per-function passes.
+ if (CodeGenOpts.VerifyModule)
+ getPerFunctionPasses()->add(createVerifierPass());
+
+ // Assume that standard function passes aren't run for -O0.
+ if (OptLevel > 0)
+ llvm::createStandardFunctionPasses(getPerFunctionPasses(), OptLevel);
+
+ llvm::Pass *InliningPass = 0;
+ switch (Inlining) {
+ case CodeGenOptions::NoInlining: break;
+ case CodeGenOptions::NormalInlining: {
+ // Set the inline threshold following llvm-gcc.
+ //
+ // FIXME: Derive these constants in a principled fashion.
+ unsigned Threshold = 225;
+ if (CodeGenOpts.OptimizeSize)
+ Threshold = 75;
+ else if (OptLevel > 2)
+ Threshold = 275;
+ InliningPass = createFunctionInliningPass(Threshold);
+ break;
+ }
+ case CodeGenOptions::OnlyAlwaysInlining:
+ InliningPass = createAlwaysInlinerPass(); // Respect always_inline
+ break;
+ }
+
+ // For now we always create per module passes.
+ llvm::createStandardModulePasses(getPerModulePasses(), OptLevel,
+ CodeGenOpts.OptimizeSize,
+ CodeGenOpts.UnitAtATime,
+ CodeGenOpts.UnrollLoops,
+ CodeGenOpts.SimplifyLibCalls,
+ /*HaveExceptions=*/true,
+ InliningPass);
+}
+
+bool EmitAssemblyHelper::AddEmitPasses(BackendAction Action,
+ formatted_raw_ostream &OS) {
+ // Create the TargetMachine for generating code.
+ std::string Error;
+ std::string Triple = TheModule->getTargetTriple();
+ const llvm::Target *TheTarget = TargetRegistry::lookupTarget(Triple, Error);
+ if (!TheTarget) {
+ Diags.Report(diag::err_fe_unable_to_create_target) << Error;
+ return false;
+ }
+
+ // FIXME: Expose these capabilities via actual APIs!!!! Aside from just
+ // being gross, this is also totally broken if we ever care about
+ // concurrency.
+
+ // Set frame pointer elimination mode.
+ if (!CodeGenOpts.DisableFPElim) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = false;
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ llvm::NoFramePointerElim = false;
+ llvm::NoFramePointerElimNonLeaf = true;
+ } else {
+ llvm::NoFramePointerElim = true;
+ llvm::NoFramePointerElimNonLeaf = true;
+ }
+
+ // Set float ABI type.
+ if (CodeGenOpts.FloatABI == "soft")
+ llvm::FloatABIType = llvm::FloatABI::Soft;
+ else if (CodeGenOpts.FloatABI == "hard")
+ llvm::FloatABIType = llvm::FloatABI::Hard;
+ else {
+ assert(CodeGenOpts.FloatABI.empty() && "Invalid float abi!");
+ llvm::FloatABIType = llvm::FloatABI::Default;
+ }
+
+ NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ llvm::UseSoftFloat = CodeGenOpts.SoftFloat;
+ UnwindTablesMandatory = CodeGenOpts.UnwindTables;
+
+ TargetMachine::setAsmVerbosityDefault(CodeGenOpts.AsmVerbose);
+
+ TargetMachine::setFunctionSections(CodeGenOpts.FunctionSections);
+ TargetMachine::setDataSections (CodeGenOpts.DataSections);
+
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.RelocationModel == "static") {
+ TargetMachine::setRelocationModel(llvm::Reloc::Static);
+ } else if (CodeGenOpts.RelocationModel == "pic") {
+ TargetMachine::setRelocationModel(llvm::Reloc::PIC_);
+ } else {
+ assert(CodeGenOpts.RelocationModel == "dynamic-no-pic" &&
+ "Invalid PIC model!");
+ TargetMachine::setRelocationModel(llvm::Reloc::DynamicNoPIC);
+ }
+ // FIXME: Parse this earlier.
+ if (CodeGenOpts.CodeModel == "small") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Small);
+ } else if (CodeGenOpts.CodeModel == "kernel") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Kernel);
+ } else if (CodeGenOpts.CodeModel == "medium") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Medium);
+ } else if (CodeGenOpts.CodeModel == "large") {
+ TargetMachine::setCodeModel(llvm::CodeModel::Large);
+ } else {
+ assert(CodeGenOpts.CodeModel.empty() && "Invalid code model!");
+ TargetMachine::setCodeModel(llvm::CodeModel::Default);
+ }
+
+ std::vector<const char *> BackendArgs;
+ BackendArgs.push_back("clang"); // Fake program name.
+ if (!CodeGenOpts.DebugPass.empty()) {
+ BackendArgs.push_back("-debug-pass");
+ BackendArgs.push_back(CodeGenOpts.DebugPass.c_str());
+ }
+ if (!CodeGenOpts.LimitFloatPrecision.empty()) {
+ BackendArgs.push_back("-limit-float-precision");
+ BackendArgs.push_back(CodeGenOpts.LimitFloatPrecision.c_str());
+ }
+ if (llvm::TimePassesIsEnabled)
+ BackendArgs.push_back("-time-passes");
+ BackendArgs.push_back(0);
+ llvm::cl::ParseCommandLineOptions(BackendArgs.size() - 1,
+ const_cast<char **>(&BackendArgs[0]));
+
+ std::string FeaturesStr;
+ if (TargetOpts.CPU.size() || TargetOpts.Features.size()) {
+ SubtargetFeatures Features;
+ Features.setCPU(TargetOpts.CPU);
+ for (std::vector<std::string>::const_iterator
+ it = TargetOpts.Features.begin(),
+ ie = TargetOpts.Features.end(); it != ie; ++it)
+ Features.AddFeature(*it);
+ FeaturesStr = Features.getString();
+ }
+ TargetMachine *TM = TheTarget->createTargetMachine(Triple, FeaturesStr);
+
+ if (CodeGenOpts.RelaxAll)
+ TM->setMCRelaxAll(true);
+
+ // Create the code generator passes.
+ FunctionPassManager *PM = getCodeGenPasses();
+ CodeGenOpt::Level OptLevel = CodeGenOpt::Default;
+
+ switch (CodeGenOpts.OptimizationLevel) {
+ default: break;
+ case 0: OptLevel = CodeGenOpt::None; break;
+ case 3: OptLevel = CodeGenOpt::Aggressive; break;
+ }
+
+ // Normal mode, emit a .s or .o file by running the code generator. Note,
+ // this also adds codegenerator level optimization passes.
+ TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
+ if (Action == Backend_EmitObj)
+ CGFT = TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ CGFT = TargetMachine::CGFT_Null;
+ else
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+ if (TM->addPassesToEmitFile(*PM, OS, CGFT, OptLevel,
+ /*DisableVerify=*/!CodeGenOpts.VerifyModule)) {
+ Diags.Report(diag::err_fe_unable_to_interface_with_target);
+ return false;
+ }
+
+ return true;
+}
+
+void EmitAssemblyHelper::EmitAssembly(BackendAction Action, raw_ostream *OS) {
+ TimeRegion Region(llvm::TimePassesIsEnabled ? &CodeGenerationTime : 0);
+ llvm::formatted_raw_ostream FormattedOS;
+
+ CreatePasses();
+ switch (Action) {
+ case Backend_EmitNothing:
+ break;
+
+ case Backend_EmitBC:
+ getPerModulePasses()->add(createBitcodeWriterPass(*OS));
+ break;
+
+ case Backend_EmitLL:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ getPerModulePasses()->add(createPrintModulePass(&FormattedOS));
+ break;
+
+ default:
+ FormattedOS.setStream(*OS, formatted_raw_ostream::PRESERVE_STREAM);
+ if (!AddEmitPasses(Action, FormattedOS))
+ return;
+ }
+
+ // Run passes. For now we do all passes at once, but eventually we
+ // would like to have the option of streaming code generation.
+
+ if (PerFunctionPasses) {
+ PrettyStackTraceString CrashInfo("Per-function optimization");
+
+ PerFunctionPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ PerFunctionPasses->run(*I);
+ PerFunctionPasses->doFinalization();
+ }
+
+ if (PerModulePasses) {
+ PrettyStackTraceString CrashInfo("Per-module optimization passes");
+ PerModulePasses->run(*TheModule);
+ }
+
+ if (CodeGenPasses) {
+ PrettyStackTraceString CrashInfo("Code generation");
+
+ CodeGenPasses->doInitialization();
+ for (Module::iterator I = TheModule->begin(),
+ E = TheModule->end(); I != E; ++I)
+ if (!I->isDeclaration())
+ CodeGenPasses->run(*I);
+ CodeGenPasses->doFinalization();
+ }
+}
+
+void clang::EmitBackendOutput(Diagnostic &Diags, const CodeGenOptions &CGOpts,
+ const TargetOptions &TOpts, Module *M,
+ BackendAction Action, raw_ostream *OS) {
+ EmitAssemblyHelper AsmHelper(Diags, CGOpts, TOpts, M);
+
+ AsmHelper.EmitAssembly(Action, OS);
+}
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index de58597e298d..0d05a62138a2 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -228,7 +228,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
// block literal.
// __invoke
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, CurFuncDecl,
+ = CodeGenFunction(CGM).GenerateBlockFunction(CurGD, BE, Info, CurFuncDecl,
LocalDeclMap);
BlockHasCopyDispose |= Info.BlockHasCopyDispose;
Elts[3] = Fn;
@@ -296,8 +296,11 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
const BlockDeclRefExpr *BDRE = dyn_cast<BlockDeclRefExpr>(E);
QualType Ty = E->getType();
if (BDRE && BDRE->isByRef()) {
- Types[i+BlockFields] = llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
- } else
+ Types[i+BlockFields] =
+ llvm::PointerType::get(BuildByRefType(BDRE->getDecl()), 0);
+ } else if (BDRE && BDRE->getDecl()->getType()->isReferenceType()) {
+ Types[i+BlockFields] = llvm::PointerType::get(ConvertType(Ty), 0);
+ } else
Types[i+BlockFields] = ConvertType(Ty);
}
@@ -358,11 +361,23 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
Builder.CreateStore(Loc, Addr);
continue;
} else {
- E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
- VD->getType(),
- SourceLocation());
+ if (BDRE->getCopyConstructorExpr()) {
+ E = BDRE->getCopyConstructorExpr();
+ PushDestructorCleanup(E->getType(), Addr);
+ }
+ else {
+ E = new (getContext()) DeclRefExpr(const_cast<ValueDecl*>(VD),
+ VD->getType().getNonReferenceType(),
+ SourceLocation());
+ if (VD->getType()->isReferenceType()) {
+ E = new (getContext())
+ UnaryOperator(const_cast<Expr*>(E), UnaryOperator::AddrOf,
+ getContext().getPointerType(E->getType()),
+ SourceLocation());
+ }
+ }
+ }
}
- }
if (BDRE->isByRef()) {
E = new (getContext())
@@ -386,8 +401,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
llvm::Value *BlockLiteral = LoadBlockStruct();
Loc = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
"block.literal");
Ty = llvm::PointerType::get(Ty, 0);
Loc = Builder.CreateBitCast(Loc, Ty);
@@ -599,13 +613,13 @@ void CodeGenFunction::AllocateBlockDecl(const BlockDeclRefExpr *E) {
llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
bool IsByRef) {
+
CharUnits offset = BlockDecls[VD];
assert(!offset.isZero() && "getting address of unallocated decl");
llvm::Value *BlockLiteral = LoadBlockStruct();
llvm::Value *V = Builder.CreateGEP(BlockLiteral,
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext),
- offset.getQuantity()),
+ llvm::ConstantInt::get(Int64Ty, offset.getQuantity()),
"block.literal");
if (IsByRef) {
const llvm::Type *PtrStructTy
@@ -626,9 +640,10 @@ llvm::Value *CodeGenFunction::GetAddrOfBlockDecl(const ValueDecl *VD,
V = Builder.CreateLoad(V);
} else {
const llvm::Type *Ty = CGM.getTypes().ConvertType(VD->getType());
-
Ty = llvm::PointerType::get(Ty, 0);
V = Builder.CreateBitCast(V, Ty);
+ if (VD->getType()->isReferenceType())
+ V = Builder.CreateLoad(V, "ref.tmp");
}
return V;
}
@@ -680,7 +695,7 @@ BlockModule::GetAddrOfGlobalBlock(const BlockExpr *BE, const char * n) {
CGBlockInfo Info(n);
llvm::DenseMap<const Decl*, llvm::Value*> LocalDeclMap;
llvm::Function *Fn
- = CodeGenFunction(CGM).GenerateBlockFunction(BE, Info, 0, LocalDeclMap);
+ = CodeGenFunction(CGM).GenerateBlockFunction(GlobalDecl(), BE, Info, 0, LocalDeclMap);
assert(Info.BlockSize == BlockLiteralSize
&& "no imports allowed for global block");
@@ -719,7 +734,7 @@ llvm::Value *CodeGenFunction::LoadBlockStruct() {
}