aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Analysis
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2018-07-28 11:06:01 +0000
committerDimitry Andric <dim@FreeBSD.org>2018-07-28 11:06:01 +0000
commit486754660bb926339aefcf012a3f848592babb8b (patch)
treeecdbc446c9876f4f120f701c243373cd3cb43db3 /lib/Analysis
parent55e6d896ad333f07bb3b1ba487df214fc268a4ab (diff)
downloadsrc-486754660bb926339aefcf012a3f848592babb8b.tar.gz
src-486754660bb926339aefcf012a3f848592babb8b.zip
Vendor import of clang trunk r338150:vendor/clang/clang-trunk-r338150
Notes
Notes: svn path=/vendor/clang/dist/; revision=336815 svn path=/vendor/clang/clang-trunk-r338150/; revision=336816; tag=vendor/clang/clang-trunk-r338150
Diffstat (limited to 'lib/Analysis')
-rw-r--r--lib/Analysis/AnalysisDeclContext.cpp165
-rw-r--r--lib/Analysis/BodyFarm.cpp68
-rw-r--r--lib/Analysis/CFG.cpp644
-rw-r--r--lib/Analysis/CFGReachabilityAnalysis.cpp11
-rw-r--r--lib/Analysis/CMakeLists.txt1
-rw-r--r--lib/Analysis/CloneDetection.cpp6
-rw-r--r--lib/Analysis/ConstructionContext.cpp184
-rw-r--r--lib/Analysis/Consumed.cpp171
-rw-r--r--lib/Analysis/Dominators.cpp4
-rw-r--r--lib/Analysis/LiveVariables.cpp87
-rw-r--r--lib/Analysis/PostOrderCFGView.cpp7
-rw-r--r--lib/Analysis/PrintfFormatString.cpp114
-rw-r--r--lib/Analysis/ReachableCode.cpp25
-rw-r--r--lib/Analysis/ThreadSafety.cpp547
-rw-r--r--lib/Analysis/ThreadSafetyCommon.cpp107
-rw-r--r--lib/Analysis/ThreadSafetyTIL.cpp38
-rw-r--r--lib/Analysis/UninitializedValues.cpp172
17 files changed, 1608 insertions, 743 deletions
diff --git a/lib/Analysis/AnalysisDeclContext.cpp b/lib/Analysis/AnalysisDeclContext.cpp
index 181edff0a03f..486fffbe1299 100644
--- a/lib/Analysis/AnalysisDeclContext.cpp
+++ b/lib/Analysis/AnalysisDeclContext.cpp
@@ -1,4 +1,4 @@
-//== AnalysisDeclContext.cpp - Analysis context for Path Sens analysis -*- C++ -*-//
+//===- AnalysisDeclContext.cpp - Analysis context for Path Sens analysis --===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,67 +7,72 @@
//
//===----------------------------------------------------------------------===//
//
-// This file defines AnalysisDeclContext, a class that manages the analysis context
-// data for path sensitive analysis.
+// This file defines AnalysisDeclContext, a class that manages the analysis
+// context data for path sensitive analysis.
//
//===----------------------------------------------------------------------===//
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/LambdaCapture.h"
#include "clang/AST/ParentMap.h"
+#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtCXX.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
-#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Analysis/Analyses/PseudoConstantAnalysis.h"
#include "clang/Analysis/BodyFarm.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/CFGStmtMap.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Basic/LLVM.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FoldingSet.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
+#include "llvm/ADT/iterator_range.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
#include "llvm/Support/raw_ostream.h"
+#include <cassert>
+#include <memory>
using namespace clang;
-typedef llvm::DenseMap<const void *, ManagedAnalysis *> ManagedAnalysisMap;
+using ManagedAnalysisMap = llvm::DenseMap<const void *, ManagedAnalysis *>;
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d,
const CFG::BuildOptions &buildOptions)
- : Manager(Mgr),
- D(d),
- cfgBuildOptions(buildOptions),
- forcedBlkExprs(nullptr),
- builtCFG(false),
- builtCompleteCFG(false),
- ReferencedBlockVars(nullptr),
- ManagedAnalyses(nullptr)
-{
+ : Manager(Mgr), D(d), cfgBuildOptions(buildOptions) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
AnalysisDeclContext::AnalysisDeclContext(AnalysisDeclContextManager *Mgr,
const Decl *d)
-: Manager(Mgr),
- D(d),
- forcedBlkExprs(nullptr),
- builtCFG(false),
- builtCompleteCFG(false),
- ReferencedBlockVars(nullptr),
- ManagedAnalyses(nullptr)
-{
+ : Manager(Mgr), D(d) {
cfgBuildOptions.forcedBlkExprs = &forcedBlkExprs;
}
AnalysisDeclContextManager::AnalysisDeclContextManager(
ASTContext &ASTCtx, bool useUnoptimizedCFG, bool addImplicitDtors,
bool addInitializers, bool addTemporaryDtors, bool addLifetime,
- bool addLoopExit, bool synthesizeBodies, bool addStaticInitBranch,
- bool addCXXNewAllocator, CodeInjector *injector)
+ bool addLoopExit, bool addScopes, bool synthesizeBodies,
+ bool addStaticInitBranch, bool addCXXNewAllocator,
+ bool addRichCXXConstructors, bool markElidedCXXConstructors,
+ CodeInjector *injector)
: Injector(injector), FunctionBodyFarm(ASTCtx, injector),
SynthesizeBodies(synthesizeBodies) {
cfgBuildOptions.PruneTriviallyFalseEdges = !useUnoptimizedCFG;
@@ -76,15 +81,18 @@ AnalysisDeclContextManager::AnalysisDeclContextManager(
cfgBuildOptions.AddTemporaryDtors = addTemporaryDtors;
cfgBuildOptions.AddLifetime = addLifetime;
cfgBuildOptions.AddLoopExit = addLoopExit;
+ cfgBuildOptions.AddScopes = addScopes;
cfgBuildOptions.AddStaticInitBranches = addStaticInitBranch;
cfgBuildOptions.AddCXXNewAllocator = addCXXNewAllocator;
+ cfgBuildOptions.AddRichCXXConstructors = addRichCXXConstructors;
+ cfgBuildOptions.MarkElidedCXXConstructors = markElidedCXXConstructors;
}
void AnalysisDeclContextManager::clear() { Contexts.clear(); }
Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
IsAutosynthesized = false;
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
Stmt *Body = FD->getBody();
if (auto *CoroBody = dyn_cast_or_null<CoroutineBodyStmt>(Body))
Body = CoroBody->getBody();
@@ -97,7 +105,7 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
}
return Body;
}
- else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
+ else if (const auto *MD = dyn_cast<ObjCMethodDecl>(D)) {
Stmt *Body = MD->getBody();
if (Manager && Manager->synthesizeBodies()) {
Stmt *SynthesizedBody = Manager->getBodyFarm().getBody(MD);
@@ -107,10 +115,9 @@ Stmt *AnalysisDeclContext::getBody(bool &IsAutosynthesized) const {
}
}
return Body;
- } else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D))
+ } else if (const auto *BD = dyn_cast<BlockDecl>(D))
return BD->getBody();
- else if (const FunctionTemplateDecl *FunTmpl
- = dyn_cast_or_null<FunctionTemplateDecl>(D))
+ else if (const auto *FunTmpl = dyn_cast_or_null<FunctionTemplateDecl>(D))
return FunTmpl->getTemplatedDecl()->getBody();
llvm_unreachable("unknown code decl");
@@ -139,9 +146,9 @@ static bool isSelfDecl(const VarDecl *VD) {
}
const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
- if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D))
+ if (const auto *MD = dyn_cast<ObjCMethodDecl>(D))
return MD->getSelfDecl();
- if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
+ if (const auto *BD = dyn_cast<BlockDecl>(D)) {
// See if 'self' was captured by the block.
for (const auto &I : BD->captures()) {
const VarDecl *VD = I.getVariable();
@@ -158,7 +165,7 @@ const ImplicitParamDecl *AnalysisDeclContext::getSelfDecl() const {
if (!parent->isLambda())
return nullptr;
- for (const LambdaCapture &LC : parent->captures()) {
+ for (const auto &LC : parent->captures()) {
if (!LC.capturesVariable())
continue;
@@ -174,7 +181,7 @@ void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
if (!forcedBlkExprs)
forcedBlkExprs = new CFG::BuildOptions::ForcedBlkExprs();
// Default construct an entry for 'stmt'.
- if (const Expr *e = dyn_cast<Expr>(stmt))
+ if (const auto *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
(void) (*forcedBlkExprs)[stmt];
}
@@ -182,7 +189,7 @@ void AnalysisDeclContext::registerForcedBlockExpression(const Stmt *stmt) {
const CFGBlock *
AnalysisDeclContext::getBlockForRegisteredExpression(const Stmt *stmt) {
assert(forcedBlkExprs);
- if (const Expr *e = dyn_cast<Expr>(stmt))
+ if (const auto *e = dyn_cast<Expr>(stmt))
stmt = e->IgnoreParens();
CFG::BuildOptions::ForcedBlkExprs::const_iterator itr =
forcedBlkExprs->find(stmt);
@@ -266,13 +273,13 @@ CFGReverseBlockReachabilityAnalysis *AnalysisDeclContext::getCFGReachablityAnaly
}
void AnalysisDeclContext::dumpCFG(bool ShowColors) {
- getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
+ getCFG()->dump(getASTContext().getLangOpts(), ShowColors);
}
ParentMap &AnalysisDeclContext::getParentMap() {
if (!PM) {
PM.reset(new ParentMap(getBody()));
- if (const CXXConstructorDecl *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
+ if (const auto *C = dyn_cast<CXXConstructorDecl>(getDecl())) {
for (const auto *I : C->inits()) {
PM->addStmt(I->getInit());
}
@@ -292,7 +299,7 @@ PseudoConstantAnalysis *AnalysisDeclContext::getPseudoConstantAnalysis() {
}
AnalysisDeclContext *AnalysisDeclContextManager::getContext(const Decl *D) {
- if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
// Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
// that has the body.
FD->hasBody(FD);
@@ -315,7 +322,7 @@ AnalysisDeclContext::getStackFrame(LocationContext const *Parent, const Stmt *S,
const BlockInvocationContext *
AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
- const clang::BlockDecl *BD,
+ const BlockDecl *BD,
const void *ContextData) {
return getLocationContextManager().getBlockInvocationContext(this, parent,
BD, ContextData);
@@ -323,7 +330,7 @@ AnalysisDeclContext::getBlockInvocationContext(const LocationContext *parent,
bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
const DeclContext *DC = D->getDeclContext()->getEnclosingNamespaceContext();
- const NamespaceDecl *ND = dyn_cast<NamespaceDecl>(DC);
+ const auto *ND = dyn_cast<NamespaceDecl>(DC);
if (!ND)
return false;
@@ -336,7 +343,7 @@ bool AnalysisDeclContext::isInStdNamespace(const Decl *D) {
return ND->isStdNamespace();
}
-LocationContextManager & AnalysisDeclContext::getLocationContextManager() {
+LocationContextManager &AnalysisDeclContext::getLocationContextManager() {
assert(Manager &&
"Cannot create LocationContexts without an AnalysisDeclContextManager!");
return Manager->getLocationContextManager();
@@ -399,7 +406,7 @@ LocationContextManager::getStackFrame(AnalysisDeclContext *ctx,
llvm::FoldingSetNodeID ID;
StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
void *InsertPos;
- StackFrameContext *L =
+ auto *L =
cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
L = new StackFrameContext(ctx, parent, s, blk, idx);
@@ -423,7 +430,7 @@ LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
llvm::FoldingSetNodeID ID;
BlockInvocationContext::Profile(ID, ctx, parent, BD, ContextData);
void *InsertPos;
- BlockInvocationContext *L =
+ auto *L =
cast_or_null<BlockInvocationContext>(Contexts.FindNodeOrInsertPos(ID,
InsertPos));
if (!L) {
@@ -437,10 +444,10 @@ LocationContextManager::getBlockInvocationContext(AnalysisDeclContext *ctx,
// LocationContext methods.
//===----------------------------------------------------------------------===//
-const StackFrameContext *LocationContext::getCurrentStackFrame() const {
+const StackFrameContext *LocationContext::getStackFrame() const {
const LocationContext *LC = this;
while (LC) {
- if (const StackFrameContext *SFC = dyn_cast<StackFrameContext>(LC))
+ if (const auto *SFC = dyn_cast<StackFrameContext>(LC))
return SFC;
LC = LC->getParent();
}
@@ -448,7 +455,7 @@ const StackFrameContext *LocationContext::getCurrentStackFrame() const {
}
bool LocationContext::inTopFrame() const {
- return getCurrentStackFrame()->inTopFrame();
+ return getStackFrame()->inTopFrame();
}
bool LocationContext::isParentOf(const LocationContext *LC) const {
@@ -463,28 +470,53 @@ bool LocationContext::isParentOf(const LocationContext *LC) const {
return false;
}
-void LocationContext::dumpStack(raw_ostream &OS, StringRef Indent) const {
+static void printLocation(raw_ostream &OS, const SourceManager &SM,
+ SourceLocation SLoc) {
+ if (SLoc.isFileID() && SM.isInMainFile(SLoc))
+ OS << "line " << SM.getExpansionLineNumber(SLoc);
+ else
+ SLoc.print(OS, SM);
+}
+
+void LocationContext::dumpStack(
+ raw_ostream &OS, StringRef Indent, const char *NL, const char *Sep,
+ std::function<void(const LocationContext *)> printMoreInfoPerContext) const {
ASTContext &Ctx = getAnalysisDeclContext()->getASTContext();
PrintingPolicy PP(Ctx.getLangOpts());
PP.TerseOutput = 1;
+ const SourceManager &SM =
+ getAnalysisDeclContext()->getASTContext().getSourceManager();
+
unsigned Frame = 0;
for (const LocationContext *LCtx = this; LCtx; LCtx = LCtx->getParent()) {
switch (LCtx->getKind()) {
case StackFrame:
- OS << Indent << '#' << Frame++ << ' ';
- cast<StackFrameContext>(LCtx)->getDecl()->print(OS, PP);
- OS << '\n';
+ OS << Indent << '#' << Frame << ' ';
+ ++Frame;
+ if (const auto *D = dyn_cast<NamedDecl>(LCtx->getDecl()))
+ OS << "Calling " << D->getQualifiedNameAsString();
+ else
+ OS << "Calling anonymous code";
+ if (const Stmt *S = cast<StackFrameContext>(LCtx)->getCallSite()) {
+ OS << " at ";
+ printLocation(OS, SM, S->getLocStart());
+ }
break;
case Scope:
- OS << Indent << " (scope)\n";
+ OS << "Entering scope";
break;
case Block:
- OS << Indent << " (block context: "
- << cast<BlockInvocationContext>(LCtx)->getContextData()
- << ")\n";
+ OS << "Invoking block";
+ if (const Decl *D = cast<BlockInvocationContext>(LCtx)->getDecl()) {
+ OS << " defined at ";
+ printLocation(OS, SM, D->getLocStart());
+ }
break;
}
+ OS << NL;
+
+ printMoreInfoPerContext(LCtx);
}
}
@@ -497,25 +529,27 @@ LLVM_DUMP_METHOD void LocationContext::dumpStack() const {
//===----------------------------------------------------------------------===//
namespace {
+
class FindBlockDeclRefExprsVals : public StmtVisitor<FindBlockDeclRefExprsVals>{
- BumpVector<const VarDecl*> &BEVals;
+ BumpVector<const VarDecl *> &BEVals;
BumpVectorContext &BC;
- llvm::SmallPtrSet<const VarDecl*, 4> Visited;
- llvm::SmallPtrSet<const DeclContext*, 4> IgnoredContexts;
+ llvm::SmallPtrSet<const VarDecl *, 4> Visited;
+ llvm::SmallPtrSet<const DeclContext *, 4> IgnoredContexts;
+
public:
FindBlockDeclRefExprsVals(BumpVector<const VarDecl*> &bevals,
BumpVectorContext &bc)
- : BEVals(bevals), BC(bc) {}
+ : BEVals(bevals), BC(bc) {}
void VisitStmt(Stmt *S) {
- for (Stmt *Child : S->children())
+ for (auto *Child : S->children())
if (Child)
Visit(Child);
}
void VisitDeclRefExpr(DeclRefExpr *DR) {
// Non-local variables are also directly modified.
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ if (const auto *VD = dyn_cast<VarDecl>(DR->getDecl())) {
if (!VD->hasLocalStorage()) {
if (Visited.insert(VD).second)
BEVals.push_back(VD, BC);
@@ -533,15 +567,16 @@ public:
for (PseudoObjectExpr::semantics_iterator it = PE->semantics_begin(),
et = PE->semantics_end(); it != et; ++it) {
Expr *Semantic = *it;
- if (OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
+ if (auto *OVE = dyn_cast<OpaqueValueExpr>(Semantic))
Semantic = OVE->getSourceExpr();
Visit(Semantic);
}
}
};
-} // end anonymous namespace
-typedef BumpVector<const VarDecl*> DeclVec;
+} // namespace
+
+using DeclVec = BumpVector<const VarDecl *>;
static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
void *&Vec,
@@ -587,7 +622,7 @@ ManagedAnalysis *&AnalysisDeclContext::getAnalysisImpl(const void *tag) {
// Cleanup.
//===----------------------------------------------------------------------===//
-ManagedAnalysis::~ManagedAnalysis() {}
+ManagedAnalysis::~ManagedAnalysis() = default;
AnalysisDeclContext::~AnalysisDeclContext() {
delete forcedBlkExprs;
@@ -600,7 +635,7 @@ AnalysisDeclContext::~AnalysisDeclContext() {
}
}
-LocationContext::~LocationContext() {}
+LocationContext::~LocationContext() = default;
LocationContextManager::~LocationContextManager() {
clear();
@@ -613,7 +648,5 @@ void LocationContextManager::clear() {
++I;
delete LC;
}
-
Contexts.clear();
}
-
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 89ca8484819d..b9fb15b2db25 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -149,7 +149,8 @@ DeclRefExpr *ASTMaker::makeDeclRefExpr(
UnaryOperator *ASTMaker::makeDereference(const Expr *Arg, QualType Ty) {
return new (C) UnaryOperator(const_cast<Expr*>(Arg), UO_Deref, Ty,
- VK_LValue, OK_Ordinary, SourceLocation());
+ VK_LValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow*/ false);
}
ImplicitCastExpr *ASTMaker::makeLvalueToRvalue(const Expr *Arg, QualType Ty) {
@@ -253,21 +254,24 @@ static CallExpr *create_call_once_funcptr_call(ASTContext &C, ASTMaker M,
QualType Ty = Callback->getType();
DeclRefExpr *Call = M.makeDeclRefExpr(Callback);
- CastKind CK;
+ Expr *SubExpr;
if (Ty->isRValueReferenceType()) {
- CK = CK_LValueToRValue;
- } else {
- assert(Ty->isLValueReferenceType());
- CK = CK_FunctionToPointerDecay;
+ SubExpr = M.makeImplicitCast(
+ Call, Ty.getNonReferenceType(), CK_LValueToRValue);
+ } else if (Ty->isLValueReferenceType() &&
+ Call->getType()->isFunctionType()) {
Ty = C.getPointerType(Ty.getNonReferenceType());
+ SubExpr = M.makeImplicitCast(Call, Ty, CK_FunctionToPointerDecay);
+ } else if (Ty->isLValueReferenceType()
+ && Call->getType()->isPointerType()
+ && Call->getType()->getPointeeType()->isFunctionType()){
+ SubExpr = Call;
+ } else {
+ llvm_unreachable("Unexpected state");
}
return new (C)
- CallExpr(C, M.makeImplicitCast(Call, Ty.getNonReferenceType(), CK),
- /*args=*/CallArgs,
- /*QualType=*/C.VoidTy,
- /*ExprValueType=*/VK_RValue,
- /*SourceLocation=*/SourceLocation());
+ CallExpr(C, SubExpr, CallArgs, C.VoidTy, VK_RValue, SourceLocation());
}
static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
@@ -313,7 +317,7 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/// }
/// \endcode
static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
- DEBUG(llvm::dbgs() << "Generating body for call_once\n");
+ LLVM_DEBUG(llvm::dbgs() << "Generating body for call_once\n");
// We need at least two parameters.
if (D->param_size() < 2)
@@ -341,9 +345,9 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
auto *FlagRecordDecl = dyn_cast_or_null<RecordDecl>(FlagType->getAsTagDecl());
if (!FlagRecordDecl) {
- DEBUG(llvm::dbgs() << "Flag field is not a record: "
- << "unknown std::call_once implementation, "
- << "ignoring the call.\n");
+ LLVM_DEBUG(llvm::dbgs() << "Flag field is not a record: "
+ << "unknown std::call_once implementation, "
+ << "ignoring the call.\n");
return nullptr;
}
@@ -358,16 +362,17 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
}
if (!FlagFieldDecl) {
- DEBUG(llvm::dbgs() << "No field _M_once or __state_ found on "
- << "std::once_flag struct: unknown std::call_once "
- << "implementation, ignoring the call.");
+ LLVM_DEBUG(llvm::dbgs() << "No field _M_once or __state_ found on "
+ << "std::once_flag struct: unknown std::call_once "
+ << "implementation, ignoring the call.");
return nullptr;
}
bool isLambdaCall = CallbackRecordDecl && CallbackRecordDecl->isLambda();
if (CallbackRecordDecl && !isLambdaCall) {
- DEBUG(llvm::dbgs() << "Not supported: synthesizing body for functors when "
- << "body farming std::call_once, ignoring the call.");
+ LLVM_DEBUG(llvm::dbgs()
+ << "Not supported: synthesizing body for functors when "
+ << "body farming std::call_once, ignoring the call.");
return nullptr;
}
@@ -394,9 +399,9 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
// First two arguments are used for the flag and for the callback.
if (D->getNumParams() != CallbackFunctionType->getNumParams() + 2) {
- DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
- << "params passed to std::call_once, "
- << "ignoring the call\n");
+ LLVM_DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
+ << "params passed to std::call_once, "
+ << "ignoring the call\n");
return nullptr;
}
@@ -405,6 +410,16 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
// reference.
for (unsigned int ParamIdx = 2; ParamIdx < D->getNumParams(); ParamIdx++) {
const ParmVarDecl *PDecl = D->getParamDecl(ParamIdx);
+ if (PDecl &&
+ CallbackFunctionType->getParamType(ParamIdx - 2)
+ .getNonReferenceType()
+ .getCanonicalType() !=
+ PDecl->getType().getNonReferenceType().getCanonicalType()) {
+ LLVM_DEBUG(llvm::dbgs() << "Types of params of the callback do not match "
+ << "params passed to std::call_once, "
+ << "ignoring the call\n");
+ return nullptr;
+ }
Expr *ParamExpr = M.makeDeclRefExpr(PDecl);
if (!CallbackFunctionType->getParamType(ParamIdx - 2)->isReferenceType()) {
QualType PTy = PDecl->getType().getNonReferenceType();
@@ -441,7 +456,8 @@ static Stmt *create_call_once(ASTContext &C, const FunctionDecl *D) {
/* opc=*/ UO_LNot,
/* QualType=*/ C.IntTy,
/* ExprValueKind=*/ VK_RValue,
- /* ExprObjectKind=*/ OK_Ordinary, SourceLocation());
+ /* ExprObjectKind=*/ OK_Ordinary, SourceLocation(),
+ /* CanOverflow*/ false);
// Create assignment.
BinaryOperator *FlagAssignment = M.makeAssignment(
@@ -505,7 +521,8 @@ static Stmt *create_dispatch_once(ASTContext &C, const FunctionDecl *D) {
// (2) Create the assignment to the predicate.
Expr *DoneValue =
new (C) UnaryOperator(M.makeIntegerLiteral(0, C.LongTy), UO_Not, C.LongTy,
- VK_RValue, OK_Ordinary, SourceLocation());
+ VK_RValue, OK_Ordinary, SourceLocation(),
+ /*CanOverflow*/false);
BinaryOperator *B =
M.makeAssignment(
@@ -813,4 +830,3 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
return Val.getValue();
}
-
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index 714b85d3a9ff..8a3ab15458dd 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -29,6 +29,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Support/BumpVector.h"
+#include "clang/Analysis/ConstructionContext.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/LLVM.h"
@@ -233,6 +234,13 @@ public:
assert(VarIter != 0 && "Iterator has invalid value of VarIter member");
return &Scope->Vars[VarIter - 1];
}
+
+ const VarDecl *getFirstVarInScope() const {
+ assert(Scope && "Dereferencing invalid iterator is not allowed");
+ assert(VarIter != 0 && "Iterator has invalid value of VarIter member");
+ return Scope->Vars[0];
+ }
+
VarDecl *operator*() const {
return *this->operator->();
}
@@ -266,6 +274,7 @@ public:
int distance(const_iterator L);
const_iterator shared_parent(const_iterator L);
+ bool pointsToFirstDeclaredVar() { return VarIter == 1; }
};
private:
@@ -472,6 +481,15 @@ class CFGBuilder {
using LabelSetTy = llvm::SmallSetVector<LabelDecl *, 8>;
LabelSetTy AddressTakenLabels;
+ // Information about the currently visited C++ object construction site.
+ // This is set in the construction trigger and read when the constructor
+ // or a function that returns an object by value is being visited.
+ llvm::DenseMap<Expr *, const ConstructionContextLayer *>
+ ConstructionContextMap;
+
+ using DeclsWithEndedScopeSetTy = llvm::SmallSetVector<VarDecl *, 16>;
+ DeclsWithEndedScopeSetTy DeclsWithEndedScope;
+
bool badCFG = false;
const CFG::BuildOptions &BuildOpts;
@@ -491,7 +509,8 @@ public:
explicit CFGBuilder(ASTContext *astContext,
const CFG::BuildOptions &buildOpts)
: Context(astContext), cfg(new CFG()), // crew a new CFG
- BuildOpts(buildOpts) {}
+ ConstructionContextMap(), BuildOpts(buildOpts) {}
+
// buildCFG - Used by external clients to construct the CFG.
std::unique_ptr<CFG> buildCFG(const Decl *D, Stmt *Statement);
@@ -541,6 +560,8 @@ private:
Stmt *Term,
CFGBlock *TrueBlock,
CFGBlock *FalseBlock);
+ CFGBlock *VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *MTE,
+ AddStmtChoice asc);
CFGBlock *VisitMemberExpr(MemberExpr *M, AddStmtChoice asc);
CFGBlock *VisitObjCAtCatchStmt(ObjCAtCatchStmt *S);
CFGBlock *VisitObjCAtSynchronizedStmt(ObjCAtSynchronizedStmt *S);
@@ -566,6 +587,12 @@ private:
CFGBlock *VisitChildren(Stmt *S);
CFGBlock *VisitNoRecurse(Expr *E, AddStmtChoice asc);
+ void maybeAddScopeBeginForVarDecl(CFGBlock *B, const VarDecl *VD,
+ const Stmt *S) {
+ if (ScopePos && (VD == ScopePos.getFirstVarInScope()))
+ appendScopeBegin(B, VD, S);
+ }
+
/// When creating the CFG for temporary destructors, we want to mirror the
/// branch structure of the corresponding constructor calls.
/// Thus, while visiting a statement for temporary destructors, we keep a
@@ -643,6 +670,24 @@ private:
return Block;
}
+ // Remember to apply the construction context based on the current \p Layer
+ // when constructing the CFG element for \p CE.
+ void consumeConstructionContext(const ConstructionContextLayer *Layer,
+ Expr *E);
+
+ // Scan \p Child statement to find constructors in it, while keeping in mind
+ // that its parent statement is providing a partial construction context
+ // described by \p Layer. If a constructor is found, it would be assigned
+ // the context based on the layer. If an additional construction context layer
+ // is found, the function recurses into that.
+ void findConstructionContexts(const ConstructionContextLayer *Layer,
+ Stmt *Child);
+
+ // Unset the construction context after consuming it. This is done immediately
+ // after adding the CFGConstructor or CFGCXXRecordTypedCall element, so
+ // there's no need to do this manually in every Visit... function.
+ void cleanupConstructionContext(Expr *E);
+
void autoCreateBlock() { if (!Block) Block = createBlock(); }
CFGBlock *createBlock(bool add_successor = true);
CFGBlock *createNoReturnBlock();
@@ -660,6 +705,11 @@ private:
void addAutomaticObjHandling(LocalScope::const_iterator B,
LocalScope::const_iterator E, Stmt *S);
void addImplicitDtorsForDestructor(const CXXDestructorDecl *DD);
+ void addScopesEnd(LocalScope::const_iterator B, LocalScope::const_iterator E,
+ Stmt *S);
+
+ void getDeclsWithEndedScope(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S);
// Local scopes creation.
LocalScope* createOrReuseLocalScope(LocalScope* Scope);
@@ -682,6 +732,45 @@ private:
B->appendStmt(const_cast<Stmt*>(S), cfg->getBumpVectorContext());
}
+ void appendConstructor(CFGBlock *B, CXXConstructExpr *CE) {
+ if (BuildOpts.AddRichCXXConstructors) {
+ if (const ConstructionContextLayer *Layer =
+ ConstructionContextMap.lookup(CE)) {
+ cleanupConstructionContext(CE);
+ if (const auto *CC = ConstructionContext::createFromLayers(
+ cfg->getBumpVectorContext(), Layer)) {
+ B->appendConstructor(CE, CC, cfg->getBumpVectorContext());
+ return;
+ }
+ }
+ }
+
+ // No valid construction context found. Fall back to statement.
+ B->appendStmt(CE, cfg->getBumpVectorContext());
+ }
+
+ void appendCall(CFGBlock *B, CallExpr *CE) {
+ if (alwaysAdd(CE) && cachedEntry)
+ cachedEntry->second = B;
+
+ if (BuildOpts.AddRichCXXConstructors) {
+ if (CFGCXXRecordTypedCall::isCXXRecordTypedCall(CE, *Context)) {
+ if (const ConstructionContextLayer *Layer =
+ ConstructionContextMap.lookup(CE)) {
+ cleanupConstructionContext(CE);
+ if (const auto *CC = ConstructionContext::createFromLayers(
+ cfg->getBumpVectorContext(), Layer)) {
+ B->appendCXXRecordTypedCall(CE, CC, cfg->getBumpVectorContext());
+ return;
+ }
+ }
+ }
+ }
+
+ // No valid construction context found. Fall back to statement.
+ B->appendStmt(CE, cfg->getBumpVectorContext());
+ }
+
void appendInitializer(CFGBlock *B, CXXCtorInitializer *I) {
B->appendInitializer(I, cfg->getBumpVectorContext());
}
@@ -725,6 +814,11 @@ private:
LocalScope::const_iterator B,
LocalScope::const_iterator E);
+ const VarDecl *
+ prependAutomaticObjScopeEndWithTerminator(CFGBlock *Blk,
+ LocalScope::const_iterator B,
+ LocalScope::const_iterator E);
+
void addSuccessor(CFGBlock *B, CFGBlock *S, bool IsReachable = true) {
B->addSuccessor(CFGBlock::AdjacentBlock(S, IsReachable),
cfg->getBumpVectorContext());
@@ -737,7 +831,27 @@ private:
cfg->getBumpVectorContext());
}
- /// \brief Find a relational comparison with an expression evaluating to a
+ void appendScopeBegin(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->appendScopeBegin(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void prependScopeBegin(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->prependScopeBegin(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void appendScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->appendScopeEnd(VD, S, cfg->getBumpVectorContext());
+ }
+
+ void prependScopeEnd(CFGBlock *B, const VarDecl *VD, const Stmt *S) {
+ if (BuildOpts.AddScopes)
+ B->prependScopeEnd(VD, S, cfg->getBumpVectorContext());
+ }
+
+ /// Find a relational comparison with an expression evaluating to a
/// boolean and a constant other than 0 and 1.
/// e.g. if ((x < y) == 10)
TryResult checkIncorrectRelationalOperator(const BinaryOperator *B) {
@@ -850,7 +964,7 @@ private:
}
}
- /// \brief Find a pair of comparison expressions with or without parentheses
+ /// Find a pair of comparison expressions with or without parentheses
/// with a shared variable and constants and a logical operator between them
/// that always evaluates to either true or false.
/// e.g. if (x != 3 || x != 4)
@@ -1006,7 +1120,7 @@ private:
return evaluateAsBooleanConditionNoCache(S);
}
- /// \brief Evaluate as boolean \param E without using the cache.
+ /// Evaluate as boolean \param E without using the cache.
TryResult evaluateAsBooleanConditionNoCache(Expr *E) {
if (BinaryOperator *Bop = dyn_cast<BinaryOperator>(E)) {
if (Bop->isLogicalOp()) {
@@ -1116,6 +1230,127 @@ static const VariableArrayType *FindVA(const Type *t) {
return nullptr;
}
+void CFGBuilder::consumeConstructionContext(
+ const ConstructionContextLayer *Layer, Expr *E) {
+ if (const ConstructionContextLayer *PreviouslyStoredLayer =
+ ConstructionContextMap.lookup(E)) {
+ (void)PreviouslyStoredLayer;
+ // We might have visited this child when we were finding construction
+ // contexts within its parents.
+ assert(PreviouslyStoredLayer->isStrictlyMoreSpecificThan(Layer) &&
+ "Already within a different construction context!");
+ } else {
+ ConstructionContextMap[E] = Layer;
+ }
+}
+
+void CFGBuilder::findConstructionContexts(
+ const ConstructionContextLayer *Layer, Stmt *Child) {
+ if (!BuildOpts.AddRichCXXConstructors)
+ return;
+
+ if (!Child)
+ return;
+
+ auto withExtraLayer = [this, Layer](Stmt *S) {
+ return ConstructionContextLayer::create(cfg->getBumpVectorContext(), S,
+ Layer);
+ };
+
+ switch(Child->getStmtClass()) {
+ case Stmt::CXXConstructExprClass:
+ case Stmt::CXXTemporaryObjectExprClass: {
+ // Support pre-C++17 copy elision AST.
+ auto *CE = cast<CXXConstructExpr>(Child);
+ if (BuildOpts.MarkElidedCXXConstructors && CE->isElidable()) {
+ findConstructionContexts(withExtraLayer(CE), CE->getArg(0));
+ }
+
+ consumeConstructionContext(Layer, CE);
+ break;
+ }
+ // FIXME: This, like the main visit, doesn't support CUDAKernelCallExpr.
+ // FIXME: An isa<> would look much better but this whole switch is a
+ // workaround for an internal compiler error in MSVC 2015 (see r326021).
+ case Stmt::CallExprClass:
+ case Stmt::CXXMemberCallExprClass:
+ case Stmt::CXXOperatorCallExprClass:
+ case Stmt::UserDefinedLiteralClass: {
+ auto *CE = cast<CallExpr>(Child);
+ if (CFGCXXRecordTypedCall::isCXXRecordTypedCall(CE, *Context))
+ consumeConstructionContext(Layer, CE);
+ break;
+ }
+ case Stmt::ExprWithCleanupsClass: {
+ auto *Cleanups = cast<ExprWithCleanups>(Child);
+ findConstructionContexts(Layer, Cleanups->getSubExpr());
+ break;
+ }
+ case Stmt::CXXFunctionalCastExprClass: {
+ auto *Cast = cast<CXXFunctionalCastExpr>(Child);
+ findConstructionContexts(Layer, Cast->getSubExpr());
+ break;
+ }
+ case Stmt::ImplicitCastExprClass: {
+ auto *Cast = cast<ImplicitCastExpr>(Child);
+ // Should we support other implicit cast kinds?
+ switch (Cast->getCastKind()) {
+ case CK_NoOp:
+ case CK_ConstructorConversion:
+ findConstructionContexts(Layer, Cast->getSubExpr());
+ default:
+ break;
+ }
+ break;
+ }
+ case Stmt::CXXBindTemporaryExprClass: {
+ auto *BTE = cast<CXXBindTemporaryExpr>(Child);
+ findConstructionContexts(withExtraLayer(BTE), BTE->getSubExpr());
+ break;
+ }
+ case Stmt::MaterializeTemporaryExprClass: {
+ // Normally we don't want to search in MaterializeTemporaryExpr because
+ // it indicates the beginning of a temporary object construction context,
+ // so it shouldn't be found in the middle. However, if it is the beginning
+ // of an elidable copy or move construction context, we need to include it.
+ if (const auto *CE =
+ dyn_cast_or_null<CXXConstructExpr>(Layer->getTriggerStmt())) {
+ if (CE->isElidable()) {
+ auto *MTE = cast<MaterializeTemporaryExpr>(Child);
+ findConstructionContexts(withExtraLayer(MTE), MTE->GetTemporaryExpr());
+ }
+ }
+ break;
+ }
+ case Stmt::ConditionalOperatorClass: {
+ auto *CO = cast<ConditionalOperator>(Child);
+ if (!dyn_cast_or_null<MaterializeTemporaryExpr>(Layer->getTriggerStmt())) {
+ // If the object returned by the conditional operator is not going to be a
+ // temporary object that needs to be immediately materialized, then
+ // it must be C++17 with its mandatory copy elision. Do not yet promise
+ // to support this case.
+ assert(!CO->getType()->getAsCXXRecordDecl() || CO->isGLValue() ||
+ Context->getLangOpts().CPlusPlus17);
+ break;
+ }
+ findConstructionContexts(Layer, CO->getLHS());
+ findConstructionContexts(Layer, CO->getRHS());
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+void CFGBuilder::cleanupConstructionContext(Expr *E) {
+ assert(BuildOpts.AddRichCXXConstructors &&
+ "We should not be managing construction contexts!");
+ assert(ConstructionContextMap.count(E) &&
+ "Cannot exit construction context without the context!");
+ ConstructionContextMap.erase(E);
+}
+
+
/// BuildCFG - Constructs a CFG from an AST (a Stmt*). The AST can represent an
/// arbitrary statement. Examples include a single expression or a function
/// body (compound statement). The ownership of the returned CFG is
@@ -1176,6 +1411,9 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
JT.scopePosition);
prependAutomaticObjDtorsWithTerminator(B, I->scopePosition,
JT.scopePosition);
+ const VarDecl *VD = prependAutomaticObjScopeEndWithTerminator(
+ B, I->scopePosition, JT.scopePosition);
+ appendScopeBegin(JT.block, VD, G);
addSuccessor(B, JT.block);
}
@@ -1196,6 +1434,10 @@ std::unique_ptr<CFG> CFGBuilder::buildCFG(const Decl *D, Stmt *Statement) {
// Create an empty entry block that has no predecessors.
cfg->setEntry(createBlock());
+ if (BuildOpts.AddRichCXXConstructors)
+ assert(ConstructionContextMap.empty() &&
+ "Not all construction contexts were cleaned up!");
+
return std::move(cfg);
}
@@ -1243,6 +1485,10 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
appendInitializer(Block, I);
if (Init) {
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), I),
+ Init);
+
if (HasTemporaries) {
// For expression with temporaries go directly to subexpression to omit
// generating destructors for the second time.
@@ -1267,21 +1513,20 @@ CFGBlock *CFGBuilder::addInitializer(CXXCtorInitializer *I) {
return Block;
}
-/// \brief Retrieve the type of the temporary object whose lifetime was
+/// Retrieve the type of the temporary object whose lifetime was
/// extended by a local reference with the given initializer.
-static QualType getReferenceInitTemporaryType(ASTContext &Context,
- const Expr *Init,
+static QualType getReferenceInitTemporaryType(const Expr *Init,
bool *FoundMTE = nullptr) {
while (true) {
// Skip parentheses.
Init = Init->IgnoreParens();
-
+
// Skip through cleanups.
if (const ExprWithCleanups *EWC = dyn_cast<ExprWithCleanups>(Init)) {
Init = EWC->getSubExpr();
continue;
}
-
+
// Skip through the temporary-materialization expression.
if (const MaterializeTemporaryExpr *MTE
= dyn_cast<MaterializeTemporaryExpr>(Init)) {
@@ -1290,26 +1535,17 @@ static QualType getReferenceInitTemporaryType(ASTContext &Context,
*FoundMTE = true;
continue;
}
-
- // Skip derived-to-base and no-op casts.
- if (const CastExpr *CE = dyn_cast<CastExpr>(Init)) {
- if ((CE->getCastKind() == CK_DerivedToBase ||
- CE->getCastKind() == CK_UncheckedDerivedToBase ||
- CE->getCastKind() == CK_NoOp) &&
- Init->getType()->isRecordType()) {
- Init = CE->getSubExpr();
- continue;
- }
- }
-
- // Skip member accesses into rvalues.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(Init)) {
- if (!ME->isArrow() && ME->getBase()->isRValue()) {
- Init = ME->getBase();
- continue;
- }
+
+ // Skip sub-object accesses into rvalues.
+ SmallVector<const Expr *, 2> CommaLHSs;
+ SmallVector<SubobjectAdjustment, 2> Adjustments;
+ const Expr *SkippedInit =
+ Init->skipRValueSubobjectAdjustments(CommaLHSs, Adjustments);
+ if (SkippedInit != Init) {
+ Init = SkippedInit;
+ continue;
}
-
+
break;
}
@@ -1325,9 +1561,34 @@ void CFGBuilder::addLoopExit(const Stmt *LoopStmt){
appendLoopExit(Block, LoopStmt);
}
+void CFGBuilder::getDeclsWithEndedScope(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ if (!BuildOpts.AddScopes)
+ return;
+
+ if (B == E)
+ return;
+
+ // To go from B to E, one first goes up the scopes from B to P
+ // then sideways in one scope from P to P' and then down
+ // the scopes from P' to E.
+ // The lifetime of all objects between B and P end.
+ LocalScope::const_iterator P = B.shared_parent(E);
+ int Dist = B.distance(P);
+ if (Dist <= 0)
+ return;
+
+ for (LocalScope::const_iterator I = B; I != P; ++I)
+ if (I.pointsToFirstDeclaredVar())
+ DeclsWithEndedScope.insert(*I);
+}
+
void CFGBuilder::addAutomaticObjHandling(LocalScope::const_iterator B,
LocalScope::const_iterator E,
Stmt *S) {
+ getDeclsWithEndedScope(B, E, S);
+ if (BuildOpts.AddScopes)
+ addScopesEnd(B, E, S);
if (BuildOpts.AddImplicitDtors)
addAutomaticObjDtors(B, E, S);
if (BuildOpts.AddLifetime)
@@ -1379,6 +1640,23 @@ void CFGBuilder::addLifetimeEnds(LocalScope::const_iterator B,
appendLifetimeEnds(Block, *I, S);
}
+/// Add to current block markers for ending scopes.
+void CFGBuilder::addScopesEnd(LocalScope::const_iterator B,
+ LocalScope::const_iterator E, Stmt *S) {
+ // If implicit destructors are enabled, we'll add scope ends in
+ // addAutomaticObjDtors.
+ if (BuildOpts.AddImplicitDtors)
+ return;
+
+ autoCreateBlock();
+
+ for (auto I = DeclsWithEndedScope.rbegin(), E = DeclsWithEndedScope.rend();
+ I != E; ++I)
+ appendScopeEnd(Block, *I, S);
+
+ return;
+}
+
/// addAutomaticObjDtors - Add to current block automatic objects destructors
/// for objects in range of local scope positions. Use S as trigger statement
/// for destructors.
@@ -1402,12 +1680,21 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
for (SmallVectorImpl<VarDecl*>::reverse_iterator I = Decls.rbegin(),
E = Decls.rend();
I != E; ++I) {
+ if (hasTrivialDestructor(*I)) {
+ // If AddScopes is enabled and *I is a first variable in a scope, add a
+ // ScopeEnd marker in a Block.
+ if (BuildOpts.AddScopes && DeclsWithEndedScope.count(*I)) {
+ autoCreateBlock();
+ appendScopeEnd(Block, *I, S);
+ }
+ continue;
+ }
// If this destructor is marked as a no-return destructor, we need to
// create a new block for the destructor which does not have as a successor
// anything built thus far: control won't flow out of this block.
QualType Ty = (*I)->getType();
if (Ty->isReferenceType()) {
- Ty = getReferenceInitTemporaryType(*Context, (*I)->getInit());
+ Ty = getReferenceInitTemporaryType((*I)->getInit());
}
Ty = Context->getBaseElementType(Ty);
@@ -1416,6 +1703,9 @@ void CFGBuilder::addAutomaticObjDtors(LocalScope::const_iterator B,
else
autoCreateBlock();
+ // Add ScopeEnd just after automatic obj destructor.
+ if (BuildOpts.AddScopes && DeclsWithEndedScope.count(*I))
+ appendScopeEnd(Block, *I, S);
appendAutomaticObjDtor(Block, *I, S);
}
}
@@ -1478,7 +1768,8 @@ LocalScope* CFGBuilder::createOrReuseLocalScope(LocalScope* Scope) {
/// addLocalScopeForStmt - Add LocalScope to local scopes tree for statement
/// that should create implicit scope (e.g. if/else substatements).
void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
- if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes)
return;
LocalScope *Scope = nullptr;
@@ -1503,7 +1794,8 @@ void CFGBuilder::addLocalScopeForStmt(Stmt *S) {
/// reuse Scope if not NULL.
LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
LocalScope* Scope) {
- if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes)
return Scope;
for (auto *DI : DS->decls())
@@ -1515,7 +1807,7 @@ LocalScope* CFGBuilder::addLocalScopeForDeclStmt(DeclStmt *DS,
bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
// Check for const references bound to temporary. Set type to pointee.
QualType QT = VD->getType();
- if (QT.getTypePtr()->isReferenceType()) {
+ if (QT->isReferenceType()) {
// Attempt to determine whether this declaration lifetime-extends a
// temporary.
//
@@ -1525,12 +1817,16 @@ bool CFGBuilder::hasTrivialDestructor(VarDecl *VD) {
// MaterializeTemporaryExpr instead.
const Expr *Init = VD->getInit();
- if (!Init)
+ if (!Init) {
+ // Probably an exception catch-by-reference variable.
+ // FIXME: It doesn't really mean that the object has a trivial destructor.
+ // Also are there other cases?
return true;
+ }
- // Lifetime-extending a temporary.
+ // Lifetime-extending a temporary?
bool FoundMTE = false;
- QT = getReferenceInitTemporaryType(*Context, Init, &FoundMTE);
+ QT = getReferenceInitTemporaryType(Init, &FoundMTE);
if (!FoundMTE)
return true;
}
@@ -1555,7 +1851,8 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
LocalScope* Scope) {
assert(!(BuildOpts.AddImplicitDtors && BuildOpts.AddLifetime) &&
"AddImplicitDtors and AddLifetime cannot be used at the same time");
- if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime)
+ if (!BuildOpts.AddImplicitDtors && !BuildOpts.AddLifetime &&
+ !BuildOpts.AddScopes)
return Scope;
// Check if variable is local.
@@ -1568,7 +1865,7 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
}
if (BuildOpts.AddImplicitDtors) {
- if (!hasTrivialDestructor(VD)) {
+ if (!hasTrivialDestructor(VD) || BuildOpts.AddScopes) {
// Add the variable to scope
Scope = createOrReuseLocalScope(Scope);
Scope->addVar(VD);
@@ -1628,6 +1925,26 @@ void CFGBuilder::prependAutomaticObjLifetimeWithTerminator(
InsertPos = Blk->insertLifetimeEnds(InsertPos, *I, Blk->getTerminator());
}
+/// prependAutomaticObjScopeEndWithTerminator - Prepend scope end CFGElements for
+/// variables with automatic storage duration to CFGBlock's elements vector.
+/// Elements will be prepended to physical beginning of the vector which
+/// happens to be logical end. Use blocks terminator as statement that specifies
+/// where scope ends.
+const VarDecl *
+CFGBuilder::prependAutomaticObjScopeEndWithTerminator(
+ CFGBlock *Blk, LocalScope::const_iterator B, LocalScope::const_iterator E) {
+ if (!BuildOpts.AddScopes)
+ return nullptr;
+ BumpVectorContext &C = cfg->getBumpVectorContext();
+ CFGBlock::iterator InsertPos =
+ Blk->beginScopeEndInsert(Blk->end(), 1, C);
+ LocalScope::const_iterator PlaceToInsert = B;
+ for (LocalScope::const_iterator I = B; I != E; ++I)
+ PlaceToInsert = I;
+ Blk->insertScopeEnd(InsertPos, *PlaceToInsert, Blk->getTerminator());
+ return *PlaceToInsert;
+}
+
/// Visit - Walk the subtree of a statement and add extra
/// blocks for ternary operators, &&, and ||. We also process "," and
/// DeclStmts (which may contain nested control-flow).
@@ -1756,6 +2073,10 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc) {
case Stmt::LambdaExprClass:
return VisitLambdaExpr(cast<LambdaExpr>(S), asc);
+ case Stmt::MaterializeTemporaryExprClass:
+ return VisitMaterializeTemporaryExpr(cast<MaterializeTemporaryExpr>(S),
+ asc);
+
case Stmt::MemberExprClass:
return VisitMemberExpr(cast<MemberExpr>(S), asc);
@@ -2045,7 +2366,7 @@ static bool CanThrow(Expr *E, ASTContext &Ctx) {
if (FT) {
if (const FunctionProtoType *Proto = dyn_cast<FunctionProtoType>(FT))
if (!isUnresolvedExceptionSpec(Proto->getExceptionSpecType()) &&
- Proto->isNothrow(Ctx))
+ Proto->isNothrow())
return false;
}
return true;
@@ -2062,6 +2383,13 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
if (!boundType.isNull()) calleeType = boundType;
}
+ // FIXME: Once actually implemented, this construction context layer should
+ // include the number of the argument as well.
+ for (auto Arg: C->arguments()) {
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), C), Arg);
+ }
+
// If this is a call to a no-return function, this stops the block here.
bool NoReturn = getFunctionExtInfo(*calleeType).getNoReturn();
@@ -2078,7 +2406,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
bool OmitArguments = false;
if (FunctionDecl *FD = C->getDirectCallee()) {
- if (FD->isNoReturn())
+ if (FD->isNoReturn() || C->isBuiltinAssumeFalse(*Context))
NoReturn = true;
if (FD->hasAttr<NoThrowAttr>())
AddEHEdge = false;
@@ -2098,7 +2426,10 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
}
if (!NoReturn && !AddEHEdge) {
- return VisitStmt(C, asc.withAlwaysAdd(true));
+ autoCreateBlock();
+ appendCall(Block, C);
+
+ return VisitChildren(C);
}
if (Block) {
@@ -2112,7 +2443,7 @@ CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
else
Block = createBlock();
- appendStmt(Block, C);
+ appendCall(Block, C);
if (AddEHEdge) {
// Add exceptional edges.
@@ -2326,7 +2657,11 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
autoCreateBlock();
appendStmt(Block, DS);
-
+
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), DS),
+ Init);
+
// Keep track of the last non-null block, as 'Block' can be nulled out
// if the initializer expression is something like a 'while' in a
// statement-expression.
@@ -2353,6 +2688,8 @@ CFGBlock *CFGBuilder::VisitDeclSubExpr(DeclStmt *DS) {
LastBlock = newBlock;
}
+ maybeAddScopeBeginForVarDecl(Block, VD, DS);
+
// Remove variable from local scope.
if (ScopePos && VD == *ScopePos)
++ScopePos;
@@ -2517,6 +2854,10 @@ CFGBlock *CFGBuilder::VisitReturnStmt(ReturnStmt *R) {
addAutomaticObjHandling(ScopePos, LocalScope::const_iterator(), R);
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), R),
+ R->getRetValue());
+
// If the one of the destructors does not return, we already have the Exit
// block as a successor.
if (!Block->hasNoReturnElement())
@@ -2813,6 +3154,7 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
do {
Expr *C = F->getCond();
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
// Specially handle logical operators, which have a slightly
// more optimal CFG representation.
@@ -2843,9 +3185,16 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
if (VarDecl *VD = F->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- appendStmt(Block, F->getConditionVariableDeclStmt());
+ const DeclStmt *DS = F->getConditionVariableDeclStmt();
+ assert(DS->isSingleDecl());
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(),
+ const_cast<DeclStmt *>(DS)),
+ Init);
+ appendStmt(Block, DS);
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
+ maybeAddScopeBeginForVarDecl(EntryConditionBlock, VD, C);
}
}
@@ -2872,6 +3221,8 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
// If the loop contains initialization, create a new block for those
// statements. This block can also contain statements that precede the loop.
if (Stmt *I = F->getInit()) {
+ SaveAndRestore<LocalScope::const_iterator> save_scope_pos(ScopePos);
+ ScopePos = LoopBeginScopePos;
Block = createBlock();
return addStmt(I);
}
@@ -2883,6 +3234,16 @@ CFGBlock *CFGBuilder::VisitForStmt(ForStmt *F) {
return EntryConditionBlock;
}
+CFGBlock *
+CFGBuilder::VisitMaterializeTemporaryExpr(MaterializeTemporaryExpr *MTE,
+ AddStmtChoice asc) {
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), MTE),
+ MTE->getTemporary());
+
+ return VisitStmt(MTE, asc);
+}
+
CFGBlock *CFGBuilder::VisitMemberExpr(MemberExpr *M, AddStmtChoice asc) {
if (asc.alwaysAdd(*this, M)) {
autoCreateBlock();
@@ -3155,9 +3516,16 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
if (VarDecl *VD = W->getConditionVariable()) {
if (Expr *Init = VD->getInit()) {
autoCreateBlock();
- appendStmt(Block, W->getConditionVariableDeclStmt());
+ const DeclStmt *DS = W->getConditionVariableDeclStmt();
+ assert(DS->isSingleDecl());
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(),
+ const_cast<DeclStmt *>(DS)),
+ Init);
+ appendStmt(Block, DS);
EntryConditionBlock = addStmt(Init);
assert(Block == EntryConditionBlock);
+ maybeAddScopeBeginForVarDecl(EntryConditionBlock, VD, C);
}
}
@@ -3483,6 +3851,7 @@ CFGBlock *CFGBuilder::VisitSwitchStmt(SwitchStmt *Terminator) {
autoCreateBlock();
appendStmt(Block, Terminator->getConditionVariableDeclStmt());
LastBlock = addStmt(Init);
+ maybeAddScopeBeginForVarDecl(LastBlock, VD, Init);
}
}
@@ -3863,6 +4232,10 @@ CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
autoCreateBlock();
appendStmt(Block, E);
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), E),
+ E->getSubExpr());
+
// We do not want to propagate the AlwaysAdd property.
asc = asc.withAlwaysAdd(false);
}
@@ -3872,7 +4245,7 @@ CFGBlock *CFGBuilder::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *E,
CFGBlock *CFGBuilder::VisitCXXConstructExpr(CXXConstructExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
- appendStmt(Block, C);
+ appendConstructor(Block, C);
return VisitChildren(C);
}
@@ -3882,15 +4255,23 @@ CFGBlock *CFGBuilder::VisitCXXNewExpr(CXXNewExpr *NE,
autoCreateBlock();
appendStmt(Block, NE);
+ findConstructionContexts(
+ ConstructionContextLayer::create(cfg->getBumpVectorContext(), NE),
+ const_cast<CXXConstructExpr *>(NE->getConstructExpr()));
+
if (NE->getInitializer())
Block = Visit(NE->getInitializer());
+
if (BuildOpts.AddCXXNewAllocator)
appendNewAllocator(Block, NE);
+
if (NE->isArray())
Block = Visit(NE->getArraySize());
+
for (CXXNewExpr::arg_iterator I = NE->placement_arg_begin(),
E = NE->placement_arg_end(); I != E; ++I)
Block = Visit(*I);
+
return Block;
}
@@ -3925,7 +4306,7 @@ CFGBlock *CFGBuilder::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *E,
CFGBlock *CFGBuilder::VisitCXXTemporaryObjectExpr(CXXTemporaryObjectExpr *C,
AddStmtChoice asc) {
autoCreateBlock();
- appendStmt(Block, C);
+ appendConstructor(Block, C);
return VisitChildren(C);
}
@@ -4027,9 +4408,11 @@ tryAgain:
auto *LE = cast<LambdaExpr>(E);
CFGBlock *B = Block;
for (Expr *Init : LE->capture_inits()) {
- if (CFGBlock *R = VisitForTemporaryDtors(
- Init, /*BindToTemporary=*/false, Context))
- B = R;
+ if (Init) {
+ if (CFGBlock *R = VisitForTemporaryDtors(
+ Init, /*BindToTemporary=*/false, Context))
+ B = R;
+ }
}
return B;
}
@@ -4210,11 +4593,15 @@ std::unique_ptr<CFG> CFG::buildCFG(const Decl *D, Stmt *Statement,
const CXXDestructorDecl *
CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
switch (getKind()) {
- case CFGElement::Statement:
case CFGElement::Initializer:
case CFGElement::NewAllocator:
case CFGElement::LoopExit:
case CFGElement::LifetimeEnds:
+ case CFGElement::Statement:
+ case CFGElement::Constructor:
+ case CFGElement::CXXRecordTypedCall:
+ case CFGElement::ScopeBegin:
+ case CFGElement::ScopeEnd:
llvm_unreachable("getDestructorDecl should only be used with "
"ImplicitDtors");
case CFGElement::AutomaticObjectDtor: {
@@ -4227,7 +4614,7 @@ CFGImplicitDtor::getDestructorDecl(ASTContext &astContext) const {
// temporary in an initializer expression.
if (ty->isReferenceType()) {
if (const Expr *Init = var->getInit()) {
- ty = getReferenceInitTemporaryType(astContext, Init);
+ ty = getReferenceInitTemporaryType(Init);
}
}
@@ -4343,8 +4730,8 @@ public:
switch (stmt->getStmtClass()) {
case Stmt::DeclStmtClass:
- DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
- break;
+ DeclMap[cast<DeclStmt>(stmt)->getSingleDecl()] = P;
+ break;
case Stmt::IfStmtClass: {
const VarDecl *var = cast<IfStmt>(stmt)->getConditionVariable();
if (var)
@@ -4544,6 +4931,95 @@ public:
} // namespace
+static void print_initializer(raw_ostream &OS, StmtPrinterHelper &Helper,
+ const CXXCtorInitializer *I) {
+ if (I->isBaseInitializer())
+ OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
+ else if (I->isDelegatingInitializer())
+ OS << I->getTypeSourceInfo()->getType()->getAsCXXRecordDecl()->getName();
+ else
+ OS << I->getAnyMember()->getName();
+ OS << "(";
+ if (Expr *IE = I->getInit())
+ IE->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
+ OS << ")";
+
+ if (I->isBaseInitializer())
+ OS << " (Base initializer)";
+ else if (I->isDelegatingInitializer())
+ OS << " (Delegating initializer)";
+ else
+ OS << " (Member initializer)";
+}
+
+static void print_construction_context(raw_ostream &OS,
+ StmtPrinterHelper &Helper,
+ const ConstructionContext *CC) {
+ SmallVector<const Stmt *, 3> Stmts;
+ switch (CC->getKind()) {
+ case ConstructionContext::SimpleConstructorInitializerKind: {
+ OS << ", ";
+ const auto *SICC = cast<SimpleConstructorInitializerConstructionContext>(CC);
+ print_initializer(OS, Helper, SICC->getCXXCtorInitializer());
+ break;
+ }
+ case ConstructionContext::CXX17ElidedCopyConstructorInitializerKind: {
+ OS << ", ";
+ const auto *CICC =
+ cast<CXX17ElidedCopyConstructorInitializerConstructionContext>(CC);
+ print_initializer(OS, Helper, CICC->getCXXCtorInitializer());
+ Stmts.push_back(CICC->getCXXBindTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::SimpleVariableKind: {
+ const auto *SDSCC = cast<SimpleVariableConstructionContext>(CC);
+ Stmts.push_back(SDSCC->getDeclStmt());
+ break;
+ }
+ case ConstructionContext::CXX17ElidedCopyVariableKind: {
+ const auto *CDSCC = cast<CXX17ElidedCopyVariableConstructionContext>(CC);
+ Stmts.push_back(CDSCC->getDeclStmt());
+ Stmts.push_back(CDSCC->getCXXBindTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::NewAllocatedObjectKind: {
+ const auto *NECC = cast<NewAllocatedObjectConstructionContext>(CC);
+ Stmts.push_back(NECC->getCXXNewExpr());
+ break;
+ }
+ case ConstructionContext::SimpleReturnedValueKind: {
+ const auto *RSCC = cast<SimpleReturnedValueConstructionContext>(CC);
+ Stmts.push_back(RSCC->getReturnStmt());
+ break;
+ }
+ case ConstructionContext::CXX17ElidedCopyReturnedValueKind: {
+ const auto *RSCC =
+ cast<CXX17ElidedCopyReturnedValueConstructionContext>(CC);
+ Stmts.push_back(RSCC->getReturnStmt());
+ Stmts.push_back(RSCC->getCXXBindTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::SimpleTemporaryObjectKind: {
+ const auto *TOCC = cast<SimpleTemporaryObjectConstructionContext>(CC);
+ Stmts.push_back(TOCC->getCXXBindTemporaryExpr());
+ Stmts.push_back(TOCC->getMaterializedTemporaryExpr());
+ break;
+ }
+ case ConstructionContext::ElidedTemporaryObjectKind: {
+ const auto *TOCC = cast<ElidedTemporaryObjectConstructionContext>(CC);
+ Stmts.push_back(TOCC->getCXXBindTemporaryExpr());
+ Stmts.push_back(TOCC->getMaterializedTemporaryExpr());
+ Stmts.push_back(TOCC->getConstructorAfterElision());
+ break;
+ }
+ }
+ for (auto I: Stmts)
+ if (I) {
+ OS << ", ";
+ Helper.handledStmt(const_cast<Stmt *>(I), OS);
+ }
+}
+
static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
const CFGElement &E) {
if (Optional<CFGStmt> CS = E.getAs<CFGStmt>()) {
@@ -4573,16 +5049,23 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
}
S->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
- if (isa<CXXOperatorCallExpr>(S)) {
+ if (auto VTC = E.getAs<CFGCXXRecordTypedCall>()) {
+ if (isa<CXXOperatorCallExpr>(S))
+ OS << " (OperatorCall)";
+ OS << " (CXXRecordTypedCall";
+ print_construction_context(OS, Helper, VTC->getConstructionContext());
+ OS << ")";
+ } else if (isa<CXXOperatorCallExpr>(S)) {
OS << " (OperatorCall)";
- }
- else if (isa<CXXBindTemporaryExpr>(S)) {
+ } else if (isa<CXXBindTemporaryExpr>(S)) {
OS << " (BindTemporary)";
- }
- else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
- OS << " (CXXConstructExpr, " << CCE->getType().getAsString() << ")";
- }
- else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
+ } else if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(S)) {
+ OS << " (CXXConstructExpr";
+ if (Optional<CFGConstructor> CE = E.getAs<CFGConstructor>()) {
+ print_construction_context(OS, Helper, CE->getConstructionContext());
+ }
+ OS << ", " << CCE->getType().getAsString() << ")";
+ } else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
OS << " (" << CE->getStmtClassName() << ", "
<< CE->getCastKindName()
<< ", " << CE->getType().getAsString()
@@ -4593,32 +5076,19 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
if (isa<Expr>(S))
OS << '\n';
} else if (Optional<CFGInitializer> IE = E.getAs<CFGInitializer>()) {
- const CXXCtorInitializer *I = IE->getInitializer();
- if (I->isBaseInitializer())
- OS << I->getBaseClass()->getAsCXXRecordDecl()->getName();
- else if (I->isDelegatingInitializer())
- OS << I->getTypeSourceInfo()->getType()->getAsCXXRecordDecl()->getName();
- else OS << I->getAnyMember()->getName();
-
- OS << "(";
- if (Expr *IE = I->getInit())
- IE->printPretty(OS, &Helper, PrintingPolicy(Helper.getLangOpts()));
- OS << ")";
-
- if (I->isBaseInitializer())
- OS << " (Base initializer)\n";
- else if (I->isDelegatingInitializer())
- OS << " (Delegating initializer)\n";
- else OS << " (Member initializer)\n";
+ print_initializer(OS, Helper, IE->getInitializer());
+ OS << '\n';
} else if (Optional<CFGAutomaticObjDtor> DE =
E.getAs<CFGAutomaticObjDtor>()) {
const VarDecl *VD = DE->getVarDecl();
Helper.handleDecl(VD, OS);
- const Type* T = VD->getType().getTypePtr();
- if (const ReferenceType* RT = T->getAs<ReferenceType>())
- T = RT->getPointeeType().getTypePtr();
- T = T->getBaseElementTypeUnsafe();
+ ASTContext &ACtx = VD->getASTContext();
+ QualType T = VD->getType();
+ if (T->isReferenceType())
+ T = getReferenceInitTemporaryType(VD->getInit(), nullptr);
+ if (const ArrayType *AT = ACtx.getAsArrayType(T))
+ T = ACtx.getBaseElementType(AT);
OS << ".~" << T->getAsCXXRecordDecl()->getName().str() << "()";
OS << " (Implicit destructor)\n";
@@ -4630,6 +5100,16 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
} else if (Optional<CFGLoopExit> LE = E.getAs<CFGLoopExit>()) {
const Stmt *LoopStmt = LE->getLoopStmt();
OS << LoopStmt->getStmtClassName() << " (LoopExit)\n";
+ } else if (Optional<CFGScopeBegin> SB = E.getAs<CFGScopeBegin>()) {
+ OS << "CFGScopeBegin(";
+ if (const VarDecl *VD = SB->getVarDecl())
+ OS << VD->getQualifiedNameAsString();
+ OS << ")\n";
+ } else if (Optional<CFGScopeEnd> SE = E.getAs<CFGScopeEnd>()) {
+ OS << "CFGScopeEnd(";
+ if (const VarDecl *VD = SE->getVarDecl())
+ OS << VD->getQualifiedNameAsString();
+ OS << ")\n";
} else if (Optional<CFGNewAllocator> NE = E.getAs<CFGNewAllocator>()) {
OS << "CFGNewAllocator(";
if (const CXXNewExpr *AllocExpr = NE->getAllocatorExpr())
diff --git a/lib/Analysis/CFGReachabilityAnalysis.cpp b/lib/Analysis/CFGReachabilityAnalysis.cpp
index 4ae135f1ea77..6f557e092fd7 100644
--- a/lib/Analysis/CFGReachabilityAnalysis.cpp
+++ b/lib/Analysis/CFGReachabilityAnalysis.cpp
@@ -1,4 +1,4 @@
-//==- CFGReachabilityAnalysis.cpp - Basic reachability analysis --*- C++ -*-==//
+//===- CFGReachabilityAnalysis.cpp - Basic reachability analysis ----------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,18 +13,19 @@
//
//===----------------------------------------------------------------------===//
-#include "llvm/ADT/SmallVector.h"
#include "clang/Analysis/Analyses/CFGReachabilityAnalysis.h"
#include "clang/Analysis/CFG.h"
+#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/SmallVector.h"
using namespace clang;
-CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(const CFG &cfg)
- : analyzed(cfg.getNumBlockIDs(), false) {}
+CFGReverseBlockReachabilityAnalysis::CFGReverseBlockReachabilityAnalysis(
+ const CFG &cfg)
+ : analyzed(cfg.getNumBlockIDs(), false) {}
bool CFGReverseBlockReachabilityAnalysis::isReachable(const CFGBlock *Src,
const CFGBlock *Dst) {
-
const unsigned DstBlockID = Dst->getBlockID();
// If we haven't analyzed the destination node, run the analysis now
diff --git a/lib/Analysis/CMakeLists.txt b/lib/Analysis/CMakeLists.txt
index fdc9e6cee8e1..432067d98157 100644
--- a/lib/Analysis/CMakeLists.txt
+++ b/lib/Analysis/CMakeLists.txt
@@ -11,6 +11,7 @@ add_clang_library(clangAnalysis
CallGraph.cpp
CloneDetection.cpp
CocoaConventions.cpp
+ ConstructionContext.cpp
Consumed.cpp
CodeInjector.cpp
Dominators.cpp
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
index 098803f9a417..8912b3b76751 100644
--- a/lib/Analysis/CloneDetection.cpp
+++ b/lib/Analysis/CloneDetection.cpp
@@ -381,7 +381,7 @@ void RecursiveCloneTypeIIHashConstraint::constrain(
for (unsigned i = 0; i < StmtsByHash.size() - 1; ++i) {
const auto Current = StmtsByHash[i];
- // It's likely that we just found an sequence of StmtSequences that
+ // It's likely that we just found a sequence of StmtSequences that
// represent a CloneGroup, so we create a new group and start checking and
// adding the StmtSequences in this sequence.
CloneDetector::CloneGroup NewGroup;
@@ -534,14 +534,14 @@ void VariablePattern::addVariableOccurence(const VarDecl *VarDecl,
// First check if we already reference this variable
for (size_t KindIndex = 0; KindIndex < Variables.size(); ++KindIndex) {
if (Variables[KindIndex] == VarDecl) {
- // If yes, add a new occurence that points to the existing entry in
+ // If yes, add a new occurrence that points to the existing entry in
// the Variables vector.
Occurences.emplace_back(KindIndex, Mention);
return;
}
}
// If this variable wasn't already referenced, add it to the list of
- // referenced variables and add a occurence that points to this new entry.
+ // referenced variables and add a occurrence that points to this new entry.
Occurences.emplace_back(Variables.size(), Mention);
Variables.push_back(VarDecl);
}
diff --git a/lib/Analysis/ConstructionContext.cpp b/lib/Analysis/ConstructionContext.cpp
new file mode 100644
index 000000000000..ed1e63243217
--- /dev/null
+++ b/lib/Analysis/ConstructionContext.cpp
@@ -0,0 +1,184 @@
+//===- ConstructionContext.cpp - CFG constructor information --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines the ConstructionContext class and its sub-classes,
+// which represent various different ways of constructing C++ objects
+// with the additional information the users may want to know about
+// the constructor.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/ConstructionContext.h"
+
+using namespace clang;
+
+const ConstructionContextLayer *
+ConstructionContextLayer::create(BumpVectorContext &C, TriggerTy Trigger,
+ const ConstructionContextLayer *Parent) {
+ ConstructionContextLayer *CC =
+ C.getAllocator().Allocate<ConstructionContextLayer>();
+ return new (CC) ConstructionContextLayer(Trigger, Parent);
+}
+
+bool ConstructionContextLayer::isStrictlyMoreSpecificThan(
+ const ConstructionContextLayer *Other) const {
+ const ConstructionContextLayer *Self = this;
+ while (true) {
+ if (!Other)
+ return Self;
+ if (!Self || !Self->isSameLayer(Other))
+ return false;
+ Self = Self->getParent();
+ Other = Other->getParent();
+ }
+ llvm_unreachable("The above loop can only be terminated via return!");
+}
+
+const ConstructionContext *ConstructionContext::createFromLayers(
+ BumpVectorContext &C, const ConstructionContextLayer *TopLayer) {
+ // Before this point all we've had was a stockpile of arbitrary layers.
+ // Now validate that it is shaped as one of the finite amount of expected
+ // patterns.
+ if (const Stmt *S = TopLayer->getTriggerStmt()) {
+ if (const auto *DS = dyn_cast<DeclStmt>(S)) {
+ assert(TopLayer->isLast());
+ return create<SimpleVariableConstructionContext>(C, DS);
+ }
+ if (const auto *NE = dyn_cast<CXXNewExpr>(S)) {
+ assert(TopLayer->isLast());
+ return create<NewAllocatedObjectConstructionContext>(C, NE);
+ }
+ if (const auto *BTE = dyn_cast<CXXBindTemporaryExpr>(S)) {
+ const MaterializeTemporaryExpr *MTE = nullptr;
+ assert(BTE->getType().getCanonicalType()
+ ->getAsCXXRecordDecl()->hasNonTrivialDestructor());
+ // For temporaries with destructors, there may or may not be
+ // lifetime extension on the parent layer.
+ if (const ConstructionContextLayer *ParentLayer = TopLayer->getParent()) {
+ // C++17 *requires* elision of the constructor at the return site
+ // and at variable/member initialization site, while previous standards
+ // were allowing an optional elidable constructor.
+ // This is the C++17 copy-elided construction into a ctor initializer.
+ if (const CXXCtorInitializer *I = ParentLayer->getTriggerInit()) {
+ return create<
+ CXX17ElidedCopyConstructorInitializerConstructionContext>(C,
+ I, BTE);
+ }
+ assert(ParentLayer->getTriggerStmt() &&
+ "Non-statement-based layers have been handled above!");
+ // This is the normal, non-C++17 case: a temporary object which has
+ // both destruction and materialization info attached to it in the AST.
+ if ((MTE = dyn_cast<MaterializeTemporaryExpr>(
+ ParentLayer->getTriggerStmt()))) {
+ if (MTE->getStorageDuration() != SD_FullExpression) {
+ // If the temporary is lifetime-extended, don't save the BTE,
+ // because we don't need a temporary destructor, but an automatic
+ // destructor.
+ BTE = nullptr;
+ }
+
+ // Handle pre-C++17 copy and move elision.
+ const CXXConstructExpr *ElidedCE = nullptr;
+ const ConstructionContext *ElidedCC = nullptr;
+ if (const ConstructionContextLayer *ElidedLayer =
+ ParentLayer->getParent()) {
+ ElidedCE = cast<CXXConstructExpr>(ElidedLayer->getTriggerStmt());
+ assert(ElidedCE->isElidable());
+ // We're creating a construction context that might have already
+ // been created elsewhere. Maybe we should unique our construction
+ // contexts. That's what we often do, but in this case it's unlikely
+ // to bring any benefits.
+ ElidedCC = createFromLayers(C, ElidedLayer->getParent());
+ if (!ElidedCC) {
+ // We may fail to create the elided construction context.
+ // In this case, skip copy elision entirely.
+ return create<SimpleTemporaryObjectConstructionContext>(C, BTE,
+ MTE);
+ } else {
+ return create<ElidedTemporaryObjectConstructionContext>(
+ C, BTE, MTE, ElidedCE, ElidedCC);
+ }
+ }
+ assert(ParentLayer->isLast());
+ return create<SimpleTemporaryObjectConstructionContext>(C, BTE, MTE);
+ }
+ assert(ParentLayer->isLast());
+
+ // This is a constructor into a function argument. Not implemented yet.
+ if (isa<CallExpr>(ParentLayer->getTriggerStmt()))
+ return nullptr;
+ // This is C++17 copy-elided construction into return statement.
+ if (auto *RS = dyn_cast<ReturnStmt>(ParentLayer->getTriggerStmt())) {
+ assert(!RS->getRetValue()->getType().getCanonicalType()
+ ->getAsCXXRecordDecl()->hasTrivialDestructor());
+ return create<CXX17ElidedCopyReturnedValueConstructionContext>(C,
+ RS, BTE);
+ }
+ // This is C++17 copy-elided construction into a simple variable.
+ if (auto *DS = dyn_cast<DeclStmt>(ParentLayer->getTriggerStmt())) {
+ assert(!cast<VarDecl>(DS->getSingleDecl())->getType()
+ .getCanonicalType()->getAsCXXRecordDecl()
+ ->hasTrivialDestructor());
+ return create<CXX17ElidedCopyVariableConstructionContext>(C, DS, BTE);
+ }
+ llvm_unreachable("Unexpected construction context with destructor!");
+ }
+ // A temporary object that doesn't require materialization.
+ // In particular, it shouldn't require copy elision, because
+ // copy/move constructors take a reference, which requires
+ // materialization to obtain the glvalue.
+ return create<SimpleTemporaryObjectConstructionContext>(C, BTE,
+ /*MTE=*/nullptr);
+ }
+ if (const auto *MTE = dyn_cast<MaterializeTemporaryExpr>(S)) {
+ // If the object requires destruction and is not lifetime-extended,
+ // then it must have a BTE within its MTE.
+ // FIXME: This should be an assertion.
+ if (!(MTE->getType().getCanonicalType()
+ ->getAsCXXRecordDecl()->hasTrivialDestructor() ||
+ MTE->getStorageDuration() != SD_FullExpression))
+ return nullptr;
+
+ // Handle pre-C++17 copy and move elision.
+ const CXXConstructExpr *ElidedCE = nullptr;
+ const ConstructionContext *ElidedCC = nullptr;
+ if (const ConstructionContextLayer *ElidedLayer = TopLayer->getParent()) {
+ ElidedCE = cast<CXXConstructExpr>(ElidedLayer->getTriggerStmt());
+ assert(ElidedCE->isElidable());
+ // We're creating a construction context that might have already
+ // been created elsewhere. Maybe we should unique our construction
+ // contexts. That's what we often do, but in this case it's unlikely
+ // to bring any benefits.
+ ElidedCC = createFromLayers(C, ElidedLayer->getParent());
+ if (!ElidedCC) {
+ // We may fail to create the elided construction context.
+ // In this case, skip copy elision entirely.
+ return create<SimpleTemporaryObjectConstructionContext>(C, nullptr,
+ MTE);
+ }
+ return create<ElidedTemporaryObjectConstructionContext>(
+ C, nullptr, MTE, ElidedCE, ElidedCC);
+ }
+ assert(TopLayer->isLast());
+ return create<SimpleTemporaryObjectConstructionContext>(C, nullptr, MTE);
+ }
+ if (const auto *RS = dyn_cast<ReturnStmt>(S)) {
+ assert(TopLayer->isLast());
+ return create<SimpleReturnedValueConstructionContext>(C, RS);
+ }
+ // This is a constructor into a function argument. Not implemented yet.
+ if (isa<CallExpr>(TopLayer->getTriggerStmt()))
+ return nullptr;
+ llvm_unreachable("Unexpected construction context with statement!");
+ } else if (const CXXCtorInitializer *I = TopLayer->getTriggerInit()) {
+ assert(TopLayer->isLast());
+ return create<SimpleConstructorInitializerConstructionContext>(C, I);
+ }
+ llvm_unreachable("Unexpected construction context!");
+}
diff --git a/lib/Analysis/Consumed.cpp b/lib/Analysis/Consumed.cpp
index 96edad0c3019..a46386e2d13d 100644
--- a/lib/Analysis/Consumed.cpp
+++ b/lib/Analysis/Consumed.cpp
@@ -1,4 +1,4 @@
-//===- Consumed.cpp --------------------------------------------*- C++ --*-===//
+//===- Consumed.cpp -------------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,21 +13,29 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/Consumed.h"
-#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/RecursiveASTVisitor.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
#include <memory>
+#include <utility>
// TODO: Adjust states of args to constructors in the same way that arguments to
// function calls are handled.
@@ -49,7 +57,7 @@ using namespace clang;
using namespace consumed;
// Key method definition
-ConsumedWarningsHandlerBase::~ConsumedWarningsHandlerBase() {}
+ConsumedWarningsHandlerBase::~ConsumedWarningsHandlerBase() = default;
static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
// Find the source location of the first statement in the block, if the block
@@ -63,7 +71,7 @@ static SourceLocation getFirstStmtLoc(const CFGBlock *Block) {
if (Block->succ_size() == 1 && *Block->succ_begin())
return getFirstStmtLoc(*Block->succ_begin());
- return SourceLocation();
+ return {};
}
static SourceLocation getLastStmtLoc(const CFGBlock *Block) {
@@ -109,7 +117,6 @@ static ConsumedState invertConsumedUnconsumed(ConsumedState State) {
static bool isCallableInState(const CallableWhenAttr *CWAttr,
ConsumedState State) {
-
for (const auto &S : CWAttr->callableStates()) {
ConsumedState MappedAttrState = CS_None;
@@ -134,7 +141,6 @@ static bool isCallableInState(const CallableWhenAttr *CWAttr,
return false;
}
-
static bool isConsumableType(const QualType &QT) {
if (QT->isPointerType() || QT->isReferenceType())
return false;
@@ -161,7 +167,6 @@ static bool isSetOnReadPtrType(const QualType &QT) {
return false;
}
-
static bool isKnownState(ConsumedState State) {
switch (State) {
case CS_Unconsumed:
@@ -270,11 +275,13 @@ static ConsumedState testsFor(const FunctionDecl *FunDecl) {
}
namespace {
+
struct VarTestResult {
const VarDecl *Var;
ConsumedState TestsFor;
};
-} // end anonymous::VarTestResult
+
+} // namespace
namespace clang {
namespace consumed {
@@ -292,7 +299,7 @@ class PropagationInfo {
IT_BinTest,
IT_Var,
IT_Tmp
- } InfoType;
+ } InfoType = IT_None;
struct BinTestTy {
const BinaryOperator *Source;
@@ -310,22 +317,19 @@ class PropagationInfo {
};
public:
- PropagationInfo() : InfoType(IT_None) {}
-
+ PropagationInfo() = default;
PropagationInfo(const VarTestResult &VarTest)
- : InfoType(IT_VarTest), VarTest(VarTest) {}
-
+ : InfoType(IT_VarTest), VarTest(VarTest) {}
+
PropagationInfo(const VarDecl *Var, ConsumedState TestsFor)
- : InfoType(IT_VarTest) {
-
+ : InfoType(IT_VarTest) {
VarTest.Var = Var;
VarTest.TestsFor = TestsFor;
}
PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
const VarTestResult &LTest, const VarTestResult &RTest)
- : InfoType(IT_BinTest) {
-
+ : InfoType(IT_BinTest) {
BinTest.Source = Source;
BinTest.EOp = EOp;
BinTest.LTest = LTest;
@@ -335,8 +339,7 @@ public:
PropagationInfo(const BinaryOperator *Source, EffectiveOp EOp,
const VarDecl *LVar, ConsumedState LTestsFor,
const VarDecl *RVar, ConsumedState RTestsFor)
- : InfoType(IT_BinTest) {
-
+ : InfoType(IT_BinTest) {
BinTest.Source = Source;
BinTest.EOp = EOp;
BinTest.LTest.Var = LVar;
@@ -346,38 +349,37 @@ public:
}
PropagationInfo(ConsumedState State)
- : InfoType(IT_State), State(State) {}
-
+ : InfoType(IT_State), State(State) {}
PropagationInfo(const VarDecl *Var) : InfoType(IT_Var), Var(Var) {}
PropagationInfo(const CXXBindTemporaryExpr *Tmp)
- : InfoType(IT_Tmp), Tmp(Tmp) {}
+ : InfoType(IT_Tmp), Tmp(Tmp) {}
- const ConsumedState & getState() const {
+ const ConsumedState &getState() const {
assert(InfoType == IT_State);
return State;
}
- const VarTestResult & getVarTest() const {
+ const VarTestResult &getVarTest() const {
assert(InfoType == IT_VarTest);
return VarTest;
}
- const VarTestResult & getLTest() const {
+ const VarTestResult &getLTest() const {
assert(InfoType == IT_BinTest);
return BinTest.LTest;
}
- const VarTestResult & getRTest() const {
+ const VarTestResult &getRTest() const {
assert(InfoType == IT_BinTest);
return BinTest.RTest;
}
- const VarDecl * getVar() const {
+ const VarDecl *getVar() const {
assert(InfoType == IT_Var);
return Var;
}
- const CXXBindTemporaryExpr * getTmp() const {
+ const CXXBindTemporaryExpr *getTmp() const {
assert(InfoType == IT_Tmp);
return Tmp;
}
@@ -405,12 +407,12 @@ public:
return BinTest.Source;
}
- inline bool isValid() const { return InfoType != IT_None; }
- inline bool isState() const { return InfoType == IT_State; }
- inline bool isVarTest() const { return InfoType == IT_VarTest; }
- inline bool isBinTest() const { return InfoType == IT_BinTest; }
- inline bool isVar() const { return InfoType == IT_Var; }
- inline bool isTmp() const { return InfoType == IT_Tmp; }
+ bool isValid() const { return InfoType != IT_None; }
+ bool isState() const { return InfoType == IT_State; }
+ bool isVarTest() const { return InfoType == IT_VarTest; }
+ bool isBinTest() const { return InfoType == IT_BinTest; }
+ bool isVar() const { return InfoType == IT_Var; }
+ bool isTmp() const { return InfoType == IT_Tmp; }
bool isTest() const {
return InfoType == IT_VarTest || InfoType == IT_BinTest;
@@ -433,15 +435,17 @@ public:
BinTest.LTest.Var, invertConsumedUnconsumed(BinTest.LTest.TestsFor),
BinTest.RTest.Var, invertConsumedUnconsumed(BinTest.RTest.TestsFor));
} else {
- return PropagationInfo();
+ return {};
}
}
};
-static inline void
+} // namespace consumed
+} // namespace clang
+
+static void
setStateForVarOrTmp(ConsumedStateMap *StateMap, const PropagationInfo &PInfo,
ConsumedState State) {
-
assert(PInfo.isVar() || PInfo.isTmp());
if (PInfo.isVar())
@@ -450,12 +454,14 @@ setStateForVarOrTmp(ConsumedStateMap *StateMap, const PropagationInfo &PInfo,
StateMap->setState(PInfo.getTmp(), State);
}
+namespace clang {
+namespace consumed {
+
class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
-
- typedef llvm::DenseMap<const Stmt *, PropagationInfo> MapType;
- typedef std::pair<const Stmt *, PropagationInfo> PairType;
- typedef MapType::iterator InfoEntry;
- typedef MapType::const_iterator ConstInfoEntry;
+ using MapType = llvm::DenseMap<const Stmt *, PropagationInfo>;
+ using PairType= std::pair<const Stmt *, PropagationInfo>;
+ using InfoEntry = MapType::iterator;
+ using ConstInfoEntry = MapType::const_iterator;
AnalysisDeclContext &AC;
ConsumedAnalyzer &Analyzer;
@@ -463,17 +469,19 @@ class ConsumedStmtVisitor : public ConstStmtVisitor<ConsumedStmtVisitor> {
MapType PropagationMap;
InfoEntry findInfo(const Expr *E) {
- if (auto Cleanups = dyn_cast<ExprWithCleanups>(E))
+ if (const auto Cleanups = dyn_cast<ExprWithCleanups>(E))
if (!Cleanups->cleanupsHaveSideEffects())
E = Cleanups->getSubExpr();
return PropagationMap.find(E->IgnoreParens());
}
+
ConstInfoEntry findInfo(const Expr *E) const {
- if (auto Cleanups = dyn_cast<ExprWithCleanups>(E))
+ if (const auto Cleanups = dyn_cast<ExprWithCleanups>(E))
if (!Cleanups->cleanupsHaveSideEffects())
E = Cleanups->getSubExpr();
return PropagationMap.find(E->IgnoreParens());
}
+
void insertInfo(const Expr *E, const PropagationInfo &PI) {
PropagationMap.insert(PairType(E->IgnoreParens(), PI));
}
@@ -517,7 +525,7 @@ public:
if (Entry != PropagationMap.end())
return Entry->second;
else
- return PropagationInfo();
+ return {};
}
void reset(ConsumedStateMap *NewStateMap) {
@@ -525,6 +533,8 @@ public:
}
};
+} // namespace consumed
+} // namespace clang
void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
InfoEntry Entry = findInfo(From);
@@ -532,7 +542,6 @@ void ConsumedStmtVisitor::forwardInfo(const Expr *From, const Expr *To) {
insertInfo(To, Entry->second);
}
-
// Create a new state for To, which is initialized to the state of From.
// If NS is not CS_None, sets the state of From to NS.
void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
@@ -548,7 +557,6 @@ void ConsumedStmtVisitor::copyInfo(const Expr *From, const Expr *To,
}
}
-
// Get the ConsumedState for From
ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
InfoEntry Entry = findInfo(From);
@@ -559,7 +567,6 @@ ConsumedState ConsumedStmtVisitor::getInfo(const Expr *From) {
return CS_None;
}
-
// If we already have info for To then update it, otherwise create a new entry.
void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
InfoEntry Entry = findInfo(To);
@@ -572,8 +579,6 @@ void ConsumedStmtVisitor::setInfo(const Expr *To, ConsumedState NS) {
}
}
-
-
void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
const FunctionDecl *FunDecl,
SourceLocation BlameLoc) {
@@ -592,7 +597,6 @@ void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
Analyzer.WarningsHandler.warnUseInInvalidState(
FunDecl->getNameAsString(), PInfo.getVar()->getNameAsString(),
stateToString(VarState), BlameLoc);
-
} else {
ConsumedState TmpState = PInfo.getAsState(StateMap);
@@ -604,7 +608,6 @@ void ConsumedStmtVisitor::checkCallability(const PropagationInfo &PInfo,
}
}
-
// Factors out common behavior for function, method, and operator calls.
// Check parameters and set parameter state if necessary.
// Returns true if the state of ObjArg is set, or false otherwise.
@@ -681,7 +684,6 @@ bool ConsumedStmtVisitor::handleCall(const CallExpr *Call, const Expr *ObjArg,
return false;
}
-
void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
const FunctionDecl *Fun) {
QualType RetType = Fun->getCallResultType();
@@ -699,7 +701,6 @@ void ConsumedStmtVisitor::propagateReturnType(const Expr *Call,
}
}
-
void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
switch (BinOp->getOpcode()) {
case BO_LAnd:
@@ -711,7 +712,6 @@ void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
if (LEntry != PropagationMap.end() && LEntry->second.isVarTest()) {
LTest = LEntry->second.getVarTest();
-
} else {
LTest.Var = nullptr;
LTest.TestsFor = CS_None;
@@ -719,7 +719,6 @@ void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
if (REntry != PropagationMap.end() && REntry->second.isVarTest()) {
RTest = REntry->second.getVarTest();
-
} else {
RTest.Var = nullptr;
RTest.TestsFor = CS_None;
@@ -728,7 +727,6 @@ void ConsumedStmtVisitor::VisitBinaryOperator(const BinaryOperator *BinOp) {
if (!(LTest.Var == nullptr && RTest.Var == nullptr))
PropagationMap.insert(PairType(BinOp, PropagationInfo(BinOp,
static_cast<EffectiveOp>(BinOp->getOpcode() == BO_LOr), LTest, RTest)));
-
break;
}
@@ -805,7 +803,6 @@ void ConsumedStmtVisitor::VisitCXXConstructExpr(const CXXConstructExpr *Call) {
}
}
-
void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
const CXXMemberCallExpr *Call) {
CXXMethodDecl* MD = Call->getMethodDecl();
@@ -816,12 +813,9 @@ void ConsumedStmtVisitor::VisitCXXMemberCallExpr(
propagateReturnType(Call, MD);
}
-
void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
const CXXOperatorCallExpr *Call) {
-
- const FunctionDecl *FunDecl =
- dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
+ const auto *FunDecl = dyn_cast_or_null<FunctionDecl>(Call->getDirectCallee());
if (!FunDecl) return;
if (Call->getOperator() == OO_Equal) {
@@ -831,7 +825,7 @@ void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
return;
}
- if (const CXXMemberCallExpr *MCall = dyn_cast<CXXMemberCallExpr>(Call))
+ if (const auto *MCall = dyn_cast<CXXMemberCallExpr>(Call))
handleCall(MCall, MCall->getImplicitObjectArgument(), FunDecl);
else
handleCall(Call, Call->getArg(0), FunDecl);
@@ -840,7 +834,7 @@ void ConsumedStmtVisitor::VisitCXXOperatorCallExpr(
}
void ConsumedStmtVisitor::VisitDeclRefExpr(const DeclRefExpr *DeclRef) {
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclRef->getDecl()))
+ if (const auto *Var = dyn_cast_or_null<VarDecl>(DeclRef->getDecl()))
if (StateMap->getState(Var) != consumed::CS_None)
PropagationMap.insert(PairType(DeclRef, PropagationInfo(Var)));
}
@@ -851,13 +845,12 @@ void ConsumedStmtVisitor::VisitDeclStmt(const DeclStmt *DeclS) {
VisitVarDecl(cast<VarDecl>(DI));
if (DeclS->isSingleDecl())
- if (const VarDecl *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
+ if (const auto *Var = dyn_cast_or_null<VarDecl>(DeclS->getSingleDecl()))
PropagationMap.insert(PairType(DeclS, PropagationInfo(Var)));
}
void ConsumedStmtVisitor::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *Temp) {
-
forwardInfo(Temp->GetTemporaryExpr(), Temp);
}
@@ -865,7 +858,6 @@ void ConsumedStmtVisitor::VisitMemberExpr(const MemberExpr *MExpr) {
forwardInfo(MExpr->getBase(), MExpr);
}
-
void ConsumedStmtVisitor::VisitParmVarDecl(const ParmVarDecl *Param) {
QualType ParamType = Param->getType();
ConsumedState ParamState = consumed::CS_None;
@@ -943,10 +935,6 @@ void ConsumedStmtVisitor::VisitVarDecl(const VarDecl *Var) {
StateMap->setState(Var, consumed::CS_Unknown);
}
}
-}} // end clang::consumed::ConsumedStmtVisitor
-
-namespace clang {
-namespace consumed {
static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test,
ConsumedStateMap *ThenStates,
@@ -956,10 +944,8 @@ static void splitVarStateForIf(const IfStmt *IfNode, const VarTestResult &Test,
if (VarState == CS_Unknown) {
ThenStates->setState(Test.Var, Test.TestsFor);
ElseStates->setState(Test.Var, invertConsumedUnconsumed(Test.TestsFor));
-
} else if (VarState == invertConsumedUnconsumed(Test.TestsFor)) {
ThenStates->markUnreachable();
-
} else if (VarState == Test.TestsFor) {
ElseStates->markUnreachable();
}
@@ -978,28 +964,22 @@ static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
if (PInfo.testEffectiveOp() == EO_And) {
if (LState == CS_Unknown) {
ThenStates->setState(LTest.Var, LTest.TestsFor);
-
} else if (LState == invertConsumedUnconsumed(LTest.TestsFor)) {
ThenStates->markUnreachable();
-
} else if (LState == LTest.TestsFor && isKnownState(RState)) {
if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
else
ThenStates->markUnreachable();
}
-
} else {
if (LState == CS_Unknown) {
ElseStates->setState(LTest.Var,
invertConsumedUnconsumed(LTest.TestsFor));
-
} else if (LState == LTest.TestsFor) {
ElseStates->markUnreachable();
-
} else if (LState == invertConsumedUnconsumed(LTest.TestsFor) &&
isKnownState(RState)) {
-
if (RState == RTest.TestsFor)
ElseStates->markUnreachable();
else
@@ -1014,7 +994,6 @@ static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
ThenStates->setState(RTest.Var, RTest.TestsFor);
else if (RState == invertConsumedUnconsumed(RTest.TestsFor))
ThenStates->markUnreachable();
-
} else {
if (RState == CS_Unknown)
ElseStates->setState(RTest.Var,
@@ -1027,7 +1006,6 @@ static void splitVarStateForIfBinOp(const PropagationInfo &PInfo,
bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
const CFGBlock *TargetBlock) {
-
assert(CurrBlock && "Block pointer must not be NULL");
assert(TargetBlock && "TargetBlock pointer must not be NULL");
@@ -1043,7 +1021,6 @@ bool ConsumedBlockInfo::allBackEdgesVisited(const CFGBlock *CurrBlock,
void ConsumedBlockInfo::addInfo(
const CFGBlock *Block, ConsumedStateMap *StateMap,
std::unique_ptr<ConsumedStateMap> &OwnedStateMap) {
-
assert(Block && "Block pointer must not be NULL");
auto &Entry = StateMapsArray[Block->getBlockID()];
@@ -1058,7 +1035,6 @@ void ConsumedBlockInfo::addInfo(
void ConsumedBlockInfo::addInfo(const CFGBlock *Block,
std::unique_ptr<ConsumedStateMap> StateMap) {
-
assert(Block && "Block pointer must not be NULL");
auto &Entry = StateMapsArray[Block->getBlockID()];
@@ -1119,7 +1095,7 @@ void ConsumedStateMap::checkParamsForReturnTypestate(SourceLocation BlameLoc,
for (const auto &DM : VarMap) {
if (isa<ParmVarDecl>(DM.first)) {
- const ParmVarDecl *Param = cast<ParmVarDecl>(DM.first);
+ const auto *Param = cast<ParmVarDecl>(DM.first);
const ReturnTypestateAttr *RTA = Param->getAttr<ReturnTypestateAttr>();
if (!RTA)
@@ -1226,7 +1202,7 @@ bool ConsumedStateMap::operator!=(const ConsumedStateMap *Other) const {
void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
const FunctionDecl *D) {
QualType ReturnType;
- if (const CXXConstructorDecl *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
+ if (const auto *Constructor = dyn_cast<CXXConstructorDecl>(D)) {
ASTContext &CurrContext = AC.getASTContext();
ReturnType = Constructor->getThisType(CurrContext)->getPointeeType();
} else
@@ -1256,14 +1232,12 @@ void ConsumedAnalyzer::determineExpectedReturnState(AnalysisDeclContext &AC,
bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
const ConsumedStmtVisitor &Visitor) {
-
std::unique_ptr<ConsumedStateMap> FalseStates(
new ConsumedStateMap(*CurrStates));
PropagationInfo PInfo;
- if (const IfStmt *IfNode =
- dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
-
+ if (const auto *IfNode =
+ dyn_cast_or_null<IfStmt>(CurrBlock->getTerminator().getStmt())) {
const Expr *Cond = IfNode->getCond();
PInfo = Visitor.getInfo(Cond);
@@ -1275,19 +1249,15 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
FalseStates->setSource(Cond);
splitVarStateForIf(IfNode, PInfo.getVarTest(), CurrStates.get(),
FalseStates.get());
-
} else if (PInfo.isBinTest()) {
CurrStates->setSource(PInfo.testSourceNode());
FalseStates->setSource(PInfo.testSourceNode());
splitVarStateForIfBinOp(PInfo, CurrStates.get(), FalseStates.get());
-
} else {
return false;
}
-
- } else if (const BinaryOperator *BinOp =
- dyn_cast_or_null<BinaryOperator>(CurrBlock->getTerminator().getStmt())) {
-
+ } else if (const auto *BinOp =
+ dyn_cast_or_null<BinaryOperator>(CurrBlock->getTerminator().getStmt())) {
PInfo = Visitor.getInfo(BinOp->getLHS());
if (!PInfo.isVarTest()) {
if ((BinOp = dyn_cast_or_null<BinaryOperator>(BinOp->getLHS()))) {
@@ -1295,7 +1265,6 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
if (!PInfo.isVarTest())
return false;
-
} else {
return false;
}
@@ -1320,7 +1289,6 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
else if (VarState == Test.TestsFor)
FalseStates->markUnreachable();
}
-
} else {
return false;
}
@@ -1339,7 +1307,7 @@ bool ConsumedAnalyzer::splitState(const CFGBlock *CurrBlock,
}
void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
- const FunctionDecl *D = dyn_cast_or_null<FunctionDecl>(AC.getDecl());
+ const auto *D = dyn_cast_or_null<FunctionDecl>(AC.getDecl());
if (!D)
return;
@@ -1368,7 +1336,6 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
if (!CurrStates) {
continue;
-
} else if (!CurrStates->isReachable()) {
CurrStates = nullptr;
continue;
@@ -1423,7 +1390,6 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
-
if (*SI == nullptr) continue;
if (BlockInfo.isBackEdge(CurrBlock, *SI)) {
@@ -1452,4 +1418,3 @@ void ConsumedAnalyzer::run(AnalysisDeclContext &AC) {
WarningsHandler.emitDiagnostics();
}
-}} // end namespace clang::consumed
diff --git a/lib/Analysis/Dominators.cpp b/lib/Analysis/Dominators.cpp
index 0e02c6d7174a..1b7dd8c804e1 100644
--- a/lib/Analysis/Dominators.cpp
+++ b/lib/Analysis/Dominators.cpp
@@ -1,4 +1,4 @@
-//=- Dominators.cpp - Implementation of dominators tree for Clang CFG C++ -*-=//
+//===- Dominators.cpp - Implementation of dominators tree for Clang CFG ---===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,4 +11,4 @@
using namespace clang;
-void DominatorTree::anchor() { }
+void DominatorTree::anchor() {}
diff --git a/lib/Analysis/LiveVariables.cpp b/lib/Analysis/LiveVariables.cpp
index 4752c2b020ae..b8ea1e960095 100644
--- a/lib/Analysis/LiveVariables.cpp
+++ b/lib/Analysis/LiveVariables.cpp
@@ -77,6 +77,7 @@ public:
AnalysisDeclContext &analysisContext;
llvm::ImmutableSet<const Stmt *>::Factory SSetFact;
llvm::ImmutableSet<const VarDecl *>::Factory DSetFact;
+ llvm::ImmutableSet<const BindingDecl *>::Factory BSetFact;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksEndToLiveness;
llvm::DenseMap<const CFGBlock *, LiveVariables::LivenessValues> blocksBeginToLiveness;
llvm::DenseMap<const Stmt *, LiveVariables::LivenessValues> stmtsToLiveness;
@@ -97,6 +98,7 @@ public:
: analysisContext(ac),
SSetFact(false), // Do not canonicalize ImmutableSets by default.
DSetFact(false), // This is a *major* performance win.
+ BSetFact(false),
killAtAssign(KillAtAssign) {}
};
}
@@ -114,6 +116,12 @@ bool LiveVariables::LivenessValues::isLive(const Stmt *S) const {
}
bool LiveVariables::LivenessValues::isLive(const VarDecl *D) const {
+ if (const auto *DD = dyn_cast<DecompositionDecl>(D)) {
+ bool alive = false;
+ for (const BindingDecl *BD : DD->bindings())
+ alive |= liveBindings.contains(BD);
+ return alive;
+ }
return liveDecls.contains(D);
}
@@ -145,14 +153,19 @@ LiveVariablesImpl::merge(LiveVariables::LivenessValues valsA,
DSetRefA(valsA.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory()),
DSetRefB(valsB.liveDecls.getRootWithoutRetain(), DSetFact.getTreeFactory());
+ llvm::ImmutableSetRef<const BindingDecl *>
+ BSetRefA(valsA.liveBindings.getRootWithoutRetain(), BSetFact.getTreeFactory()),
+ BSetRefB(valsB.liveBindings.getRootWithoutRetain(), BSetFact.getTreeFactory());
SSetRefA = mergeSets(SSetRefA, SSetRefB);
DSetRefA = mergeSets(DSetRefA, DSetRefB);
+ BSetRefA = mergeSets(BSetRefA, BSetRefB);
// asImmutableSet() canonicalizes the tree, allowing us to do an easy
// comparison afterwards.
return LiveVariables::LivenessValues(SSetRefA.asImmutableSet(),
- DSetRefA.asImmutableSet());
+ DSetRefA.asImmutableSet(),
+ BSetRefA.asImmutableSet());
}
bool LiveVariables::LivenessValues::equals(const LivenessValues &V) const {
@@ -322,6 +335,11 @@ void TransferFunctions::Visit(Stmt *S) {
}
}
+static bool writeShouldKill(const VarDecl *VD) {
+ return VD && !VD->getType()->isReferenceType() &&
+ !isAlwaysAlive(VD);
+}
+
void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
if (B->isAssignmentOp()) {
if (!LV.killAtAssign)
@@ -329,21 +347,25 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *B) {
// Assigning to a variable?
Expr *LHS = B->getLHS()->IgnoreParens();
-
- if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
- // Assignments to references don't kill the ref's address
- if (VD->getType()->isReferenceType())
- return;
-
- if (!isAlwaysAlive(VD)) {
- // The variable is now dead.
+
+ if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(LHS)) {
+ const Decl* D = DR->getDecl();
+ bool Killed = false;
+
+ if (const BindingDecl* BD = dyn_cast<BindingDecl>(D)) {
+ Killed = !BD->getType()->isReferenceType();
+ if (Killed)
+ val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ Killed = writeShouldKill(VD);
+ if (Killed)
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
- }
- if (observer)
- observer->observerKill(DR);
}
+
+ if (Killed && observer)
+ observer->observerKill(DR);
+ }
}
}
@@ -357,17 +379,27 @@ void TransferFunctions::VisitBlockExpr(BlockExpr *BE) {
}
void TransferFunctions::VisitDeclRefExpr(DeclRefExpr *DR) {
- if (const VarDecl *D = dyn_cast<VarDecl>(DR->getDecl()))
- if (!isAlwaysAlive(D) && LV.inAssignment.find(DR) == LV.inAssignment.end())
- val.liveDecls = LV.DSetFact.add(val.liveDecls, D);
+ const Decl* D = DR->getDecl();
+ bool InAssignment = LV.inAssignment[DR];
+ if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ if (!InAssignment)
+ val.liveBindings = LV.BSetFact.add(val.liveBindings, BD);
+ } else if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!InAssignment && !isAlwaysAlive(VD))
+ val.liveDecls = LV.DSetFact.add(val.liveDecls, VD);
+ }
}
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
- for (const auto *DI : DS->decls())
- if (const auto *VD = dyn_cast<VarDecl>(DI)) {
+ for (const auto *DI : DS->decls()) {
+ if (const auto *DD = dyn_cast<DecompositionDecl>(DI)) {
+ for (const auto *BD : DD->bindings())
+ val.liveBindings = LV.BSetFact.remove(val.liveBindings, BD);
+ } else if (const auto *VD = dyn_cast<VarDecl>(DI)) {
if (!isAlwaysAlive(VD))
val.liveDecls = LV.DSetFact.remove(val.liveDecls, VD);
}
+ }
}
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *OS) {
@@ -422,12 +454,14 @@ void TransferFunctions::VisitUnaryOperator(UnaryOperator *UO) {
case UO_PreDec:
break;
}
-
- if (DeclRefExpr *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens()))
- if (isa<VarDecl>(DR->getDecl())) {
+
+ if (auto *DR = dyn_cast<DeclRefExpr>(UO->getSubExpr()->IgnoreParens())) {
+ const Decl *D = DR->getDecl();
+ if (isa<VarDecl>(D) || isa<BindingDecl>(D)) {
// Treat ++/-- as a kill.
observer->observerKill(DR);
}
+ }
}
LiveVariables::LivenessValues
@@ -508,10 +542,10 @@ LiveVariables::computeLiveness(AnalysisDeclContext &AC,
for (CFGBlock::const_iterator bi = block->begin(), be = block->end();
bi != be; ++bi) {
if (Optional<CFGStmt> cs = bi->getAs<CFGStmt>()) {
- if (const BinaryOperator *BO =
- dyn_cast<BinaryOperator>(cs->getStmt())) {
+ const Stmt* stmt = cs->getStmt();
+ if (const auto *BO = dyn_cast<BinaryOperator>(stmt)) {
if (BO->getOpcode() == BO_Assign) {
- if (const DeclRefExpr *DR =
+ if (const auto *DR =
dyn_cast<DeclRefExpr>(BO->getLHS()->IgnoreParens())) {
LV->inAssignment[DR] = 1;
}
@@ -563,7 +597,7 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
it != ei; ++it) {
vec.push_back(it->first);
}
- std::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
+ llvm::sort(vec.begin(), vec.end(), [](const CFGBlock *A, const CFGBlock *B) {
return A->getBlockID() < B->getBlockID();
});
@@ -583,7 +617,8 @@ void LiveVariablesImpl::dumpBlockLiveness(const SourceManager &M) {
declVec.push_back(*si);
}
- std::sort(declVec.begin(), declVec.end(), [](const Decl *A, const Decl *B) {
+ llvm::sort(declVec.begin(), declVec.end(),
+ [](const Decl *A, const Decl *B) {
return A->getLocStart() < B->getLocStart();
});
diff --git a/lib/Analysis/PostOrderCFGView.cpp b/lib/Analysis/PostOrderCFGView.cpp
index 5a3c8182a140..124424bf2567 100644
--- a/lib/Analysis/PostOrderCFGView.cpp
+++ b/lib/Analysis/PostOrderCFGView.cpp
@@ -1,4 +1,4 @@
-//===- PostOrderCFGView.cpp - Post order view of CFG blocks -------*- C++ --*-//
+//===- PostOrderCFGView.cpp - Post order view of CFG blocks ---------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,10 +12,12 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
+#include "clang/Analysis/AnalysisDeclContext.h"
+#include "clang/Analysis/CFG.h"
using namespace clang;
-void PostOrderCFGView::anchor() { }
+void PostOrderCFGView::anchor() {}
PostOrderCFGView::PostOrderCFGView(const CFG *cfg) {
Blocks.reserve(cfg->getNumBlockIDs());
@@ -46,4 +48,3 @@ bool PostOrderCFGView::BlockOrderCompare::operator()(const CFGBlock *b1,
unsigned b2V = (b2It == POV.BlockOrder.end()) ? 0 : b2It->second;
return b1V > b2V;
}
-
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index dfaed26564e6..00591ab2b048 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/FormatString.h"
+#include "clang/Analysis/Analyses/OSLog.h"
#include "FormatStringParsing.h"
#include "clang/Basic/TargetInfo.h"
@@ -119,36 +120,56 @@ static PrintfSpecifierResult ParsePrintfSpecifier(FormatStringHandler &H,
return true;
}
- const char *OSLogVisibilityFlagsStart = nullptr,
- *OSLogVisibilityFlagsEnd = nullptr;
if (*I == '{') {
- OSLogVisibilityFlagsStart = I++;
- // Find the end of the modifier.
- while (I != E && *I != '}') {
- I++;
- }
- if (I == E) {
- if (Warn)
- H.HandleIncompleteSpecifier(Start, E - Start);
- return true;
- }
- assert(*I == '}');
- OSLogVisibilityFlagsEnd = I++;
-
- // Just see if 'private' or 'public' is the first word. os_log itself will
- // do any further parsing.
- const char *P = OSLogVisibilityFlagsStart + 1;
- while (P < OSLogVisibilityFlagsEnd && isspace(*P))
- P++;
- const char *WordStart = P;
- while (P < OSLogVisibilityFlagsEnd && (isalnum(*P) || *P == '_'))
- P++;
- const char *WordEnd = P;
- StringRef Word(WordStart, WordEnd - WordStart);
- if (Word == "private") {
- FS.setIsPrivate(WordStart);
- } else if (Word == "public") {
- FS.setIsPublic(WordStart);
+ ++I;
+ unsigned char PrivacyFlags = 0;
+ StringRef MatchedStr;
+
+ do {
+ StringRef Str(I, E - I);
+ std::string Match = "^[\t\n\v\f\r ]*(private|public)[\t\n\v\f\r ]*(,|})";
+ llvm::Regex R(Match);
+ SmallVector<StringRef, 2> Matches;
+
+ if (R.match(Str, &Matches)) {
+ MatchedStr = Matches[1];
+ I += Matches[0].size();
+
+ // Set the privacy flag if the privacy annotation in the
+ // comma-delimited segment is at least as strict as the privacy
+ // annotations in previous comma-delimited segments.
+ if (MatchedStr.equals("private"))
+ PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPrivate;
+ else if (PrivacyFlags == 0 && MatchedStr.equals("public"))
+ PrivacyFlags = clang::analyze_os_log::OSLogBufferItem::IsPublic;
+ } else {
+ size_t CommaOrBracePos =
+ Str.find_if([](char c) { return c == ',' || c == '}'; });
+
+ if (CommaOrBracePos == StringRef::npos) {
+ // Neither a comma nor the closing brace was found.
+ if (Warn)
+ H.HandleIncompleteSpecifier(Start, E - Start);
+ return true;
+ }
+
+ I += CommaOrBracePos + 1;
+ }
+ // Continue until the closing brace is found.
+ } while (*(I - 1) == ',');
+
+ // Set the privacy flag.
+ switch (PrivacyFlags) {
+ case 0:
+ break;
+ case clang::analyze_os_log::OSLogBufferItem::IsPrivate:
+ FS.setIsPrivate(MatchedStr.data());
+ break;
+ case clang::analyze_os_log::OSLogBufferItem::IsPublic:
+ FS.setIsPublic(MatchedStr.data());
+ break;
+ default:
+ llvm_unreachable("Unexpected privacy flag value");
}
}
@@ -466,13 +487,14 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsIntMax:
return ArgType(Ctx.getIntMaxType(), "intmax_t");
case LengthModifier::AsSizeT:
- return ArgType(Ctx.getSignedSizeType(), "ssize_t");
+ return ArgType::makeSizeT(ArgType(Ctx.getSignedSizeType(), "ssize_t"));
case LengthModifier::AsInt3264:
return Ctx.getTargetInfo().getTriple().isArch64Bit()
? ArgType(Ctx.LongLongTy, "__int64")
: ArgType(Ctx.IntTy, "__int32");
case LengthModifier::AsPtrDiff:
- return ArgType(Ctx.getPointerDiffType(), "ptrdiff_t");
+ return ArgType::makePtrdiffT(
+ ArgType(Ctx.getPointerDiffType(), "ptrdiff_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsWide:
@@ -499,13 +521,14 @@ ArgType PrintfSpecifier::getArgType(ASTContext &Ctx,
case LengthModifier::AsIntMax:
return ArgType(Ctx.getUIntMaxType(), "uintmax_t");
case LengthModifier::AsSizeT:
- return ArgType(Ctx.getSizeType(), "size_t");
+ return ArgType::makeSizeT(ArgType(Ctx.getSizeType(), "size_t"));
case LengthModifier::AsInt3264:
return Ctx.getTargetInfo().getTriple().isArch64Bit()
? ArgType(Ctx.UnsignedLongLongTy, "unsigned __int64")
: ArgType(Ctx.UnsignedIntTy, "unsigned __int32");
case LengthModifier::AsPtrDiff:
- return ArgType(Ctx.getUnsignedPointerDiffType(), "unsigned ptrdiff_t");
+ return ArgType::makePtrdiffT(
+ ArgType(Ctx.getUnsignedPointerDiffType(), "unsigned ptrdiff_t"));
case LengthModifier::AsAllocate:
case LengthModifier::AsMAllocate:
case LengthModifier::AsWide:
@@ -647,6 +670,7 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::Bool:
case BuiltinType::WChar_U:
case BuiltinType::WChar_S:
+ case BuiltinType::Char8: // FIXME: Treat like 'char'?
case BuiltinType::Char16:
case BuiltinType::Char32:
case BuiltinType::UInt128:
@@ -654,6 +678,30 @@ bool PrintfSpecifier::fixType(QualType QT, const LangOptions &LangOpt,
case BuiltinType::Half:
case BuiltinType::Float16:
case BuiltinType::Float128:
+ case BuiltinType::ShortAccum:
+ case BuiltinType::Accum:
+ case BuiltinType::LongAccum:
+ case BuiltinType::UShortAccum:
+ case BuiltinType::UAccum:
+ case BuiltinType::ULongAccum:
+ case BuiltinType::ShortFract:
+ case BuiltinType::Fract:
+ case BuiltinType::LongFract:
+ case BuiltinType::UShortFract:
+ case BuiltinType::UFract:
+ case BuiltinType::ULongFract:
+ case BuiltinType::SatShortAccum:
+ case BuiltinType::SatAccum:
+ case BuiltinType::SatLongAccum:
+ case BuiltinType::SatUShortAccum:
+ case BuiltinType::SatUAccum:
+ case BuiltinType::SatULongAccum:
+ case BuiltinType::SatShortFract:
+ case BuiltinType::SatFract:
+ case BuiltinType::SatLongFract:
+ case BuiltinType::SatUShortFract:
+ case BuiltinType::SatUFract:
+ case BuiltinType::SatULongFract:
// Various types which are non-trivial to correct.
return false;
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index 7e72795a47f6..f644d503dc49 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -66,6 +66,21 @@ static bool isBuiltinUnreachable(const Stmt *S) {
return false;
}
+static bool isBuiltinAssumeFalse(const CFGBlock *B, const Stmt *S,
+ ASTContext &C) {
+ if (B->empty()) {
+ // Happens if S is B's terminator and B contains nothing else
+ // (e.g. a CFGBlock containing only a goto).
+ return false;
+ }
+ if (Optional<CFGStmt> CS = B->back().getAs<CFGStmt>()) {
+ if (const auto *CE = dyn_cast<CallExpr>(CS->getStmt())) {
+ return CE->getCallee()->IgnoreCasts() == S && CE->isBuiltinAssumeFalse(C);
+ }
+ }
+ return false;
+}
+
static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
// Look to see if the current control flow ends with a 'return', and see if
// 'S' is a substatement. The 'return' may not be the last element in the
@@ -372,6 +387,7 @@ namespace {
llvm::BitVector &Reachable;
SmallVector<const CFGBlock *, 10> WorkList;
Preprocessor &PP;
+ ASTContext &C;
typedef SmallVector<std::pair<const CFGBlock *, const Stmt *>, 12>
DeferredLocsTy;
@@ -379,10 +395,10 @@ namespace {
DeferredLocsTy DeferredLocs;
public:
- DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP)
+ DeadCodeScan(llvm::BitVector &reachable, Preprocessor &PP, ASTContext &C)
: Visited(reachable.size()),
Reachable(reachable),
- PP(PP) {}
+ PP(PP), C(C) {}
void enqueue(const CFGBlock *block);
unsigned scanBackwards(const CFGBlock *Start,
@@ -600,7 +616,8 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
if (isa<BreakStmt>(S)) {
UK = reachable_code::UK_Break;
- } else if (isTrivialDoWhile(B, S) || isBuiltinUnreachable(S)) {
+ } else if (isTrivialDoWhile(B, S) || isBuiltinUnreachable(S) ||
+ isBuiltinAssumeFalse(B, S, C)) {
return;
}
else if (isDeadReturn(B, S)) {
@@ -693,7 +710,7 @@ void FindUnreachableCode(AnalysisDeclContext &AC, Preprocessor &PP,
if (reachable[block->getBlockID()])
continue;
- DeadCodeScan DS(reachable, PP);
+ DeadCodeScan DS(reachable, PP, AC.getASTContext());
numReachable += DS.scanBackwards(block, CB);
if (numReachable == cfg->getNumBlockIDs())
diff --git a/lib/Analysis/ThreadSafety.cpp b/lib/Analysis/ThreadSafety.cpp
index 6a9c9a04c55d..03cc234dce5c 100644
--- a/lib/Analysis/ThreadSafety.cpp
+++ b/lib/Analysis/ThreadSafety.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafety.cpp ----------------------------------------*- C++ --*-===//
+//===- ThreadSafety.cpp ---------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -17,41 +17,59 @@
#include "clang/Analysis/Analyses/ThreadSafety.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
-#include "clang/Analysis/Analyses/ThreadSafetyLogical.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+#include "clang/Analysis/Analyses/ThreadSafetyUtil.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
-#include "clang/Analysis/CFGStmtMap.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/SourceLocation.h"
-#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
-#include "llvm/ADT/PostOrderIterator.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/Casting.h"
+#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
-#include <ostream>
-#include <sstream>
+#include <cassert>
+#include <functional>
+#include <iterator>
+#include <memory>
+#include <string>
+#include <type_traits>
#include <utility>
#include <vector>
+
using namespace clang;
using namespace threadSafety;
// Key method definition
-ThreadSafetyHandler::~ThreadSafetyHandler() {}
+ThreadSafetyHandler::~ThreadSafetyHandler() = default;
namespace {
+
class TILPrinter :
- public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
+ public til::PrettyPrinter<TILPrinter, llvm::raw_ostream> {};
+} // namespace
/// Issue a warning about an invalid lock expression
static void warnInvalidLock(ThreadSafetyHandler &Handler,
@@ -66,11 +84,13 @@ static void warnInvalidLock(ThreadSafetyHandler &Handler,
Handler.handleInvalidLockExp(Kind, Loc);
}
-/// \brief A set of CapabilityInfo objects, which are compiled from the
+namespace {
+
+/// A set of CapabilityInfo objects, which are compiled from the
/// requires attributes on a function.
class CapExprSet : public SmallVector<CapabilityExpr, 4> {
public:
- /// \brief Push M onto list, but discard duplicates.
+ /// Push M onto list, but discard duplicates.
void push_back_nodup(const CapabilityExpr &CapE) {
iterator It = std::find_if(begin(), end(),
[=](const CapabilityExpr &CapE2) {
@@ -84,33 +104,37 @@ public:
class FactManager;
class FactSet;
-/// \brief This is a helper class that stores a fact that is known at a
+/// This is a helper class that stores a fact that is known at a
/// particular point in program execution. Currently, a fact is a capability,
/// along with additional information, such as where it was acquired, whether
/// it is exclusive or shared, etc.
///
-/// FIXME: this analysis does not currently support either re-entrant
-/// locking or lock "upgrading" and "downgrading" between exclusive and
-/// shared.
+/// FIXME: this analysis does not currently support re-entrant locking.
class FactEntry : public CapabilityExpr {
private:
- LockKind LKind; ///< exclusive or shared
- SourceLocation AcquireLoc; ///< where it was acquired.
- bool Asserted; ///< true if the lock was asserted
- bool Declared; ///< true if the lock was declared
+ /// Exclusive or shared.
+ LockKind LKind;
+
+ /// Where it was acquired.
+ SourceLocation AcquireLoc;
+
+ /// True if the lock was asserted.
+ bool Asserted;
+
+ /// True if the lock was declared.
+ bool Declared;
public:
FactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
bool Asrt, bool Declrd = false)
: CapabilityExpr(CE), LKind(LK), AcquireLoc(Loc), Asserted(Asrt),
Declared(Declrd) {}
+ virtual ~FactEntry() = default;
- virtual ~FactEntry() {}
-
- LockKind kind() const { return LKind; }
- SourceLocation loc() const { return AcquireLoc; }
- bool asserted() const { return Asserted; }
- bool declared() const { return Declared; }
+ LockKind kind() const { return LKind; }
+ SourceLocation loc() const { return AcquireLoc; }
+ bool asserted() const { return Asserted; }
+ bool declared() const { return Declared; }
void setDeclared(bool D) { Declared = D; }
@@ -129,10 +153,9 @@ public:
}
};
+using FactID = unsigned short;
-typedef unsigned short FactID;
-
-/// \brief FactManager manages the memory for all facts that are created during
+/// FactManager manages the memory for all facts that are created during
/// the analysis of a single routine.
class FactManager {
private:
@@ -148,8 +171,7 @@ public:
FactEntry &operator[](FactID F) { return *Facts[F]; }
};
-
-/// \brief A FactSet is the set of facts that are known to be true at a
+/// A FactSet is the set of facts that are known to be true at a
/// particular program point. FactSets must be small, because they are
/// frequently copied, and are thus implemented as a set of indices into a
/// table maintained by a FactManager. A typical FactSet only holds 1 or 2
@@ -158,25 +180,25 @@ public:
/// may involve partial pattern matches, rather than exact matches.
class FactSet {
private:
- typedef SmallVector<FactID, 4> FactVec;
+ using FactVec = SmallVector<FactID, 4>;
FactVec FactIDs;
public:
- typedef FactVec::iterator iterator;
- typedef FactVec::const_iterator const_iterator;
+ using iterator = FactVec::iterator;
+ using const_iterator = FactVec::const_iterator;
- iterator begin() { return FactIDs.begin(); }
+ iterator begin() { return FactIDs.begin(); }
const_iterator begin() const { return FactIDs.begin(); }
- iterator end() { return FactIDs.end(); }
+ iterator end() { return FactIDs.end(); }
const_iterator end() const { return FactIDs.end(); }
bool isEmpty() const { return FactIDs.size() == 0; }
// Return true if the set contains only negative facts
bool isEmpty(FactManager &FactMan) const {
- for (FactID FID : *this) {
+ for (const auto FID : *this) {
if (!FactMan[FID].negative())
return false;
}
@@ -247,28 +269,30 @@ public:
};
class ThreadSafetyAnalyzer;
+
} // namespace
namespace clang {
namespace threadSafety {
+
class BeforeSet {
private:
- typedef SmallVector<const ValueDecl*, 4> BeforeVect;
+ using BeforeVect = SmallVector<const ValueDecl *, 4>;
struct BeforeInfo {
- BeforeInfo() : Visited(0) {}
- BeforeInfo(BeforeInfo &&) = default;
-
BeforeVect Vect;
- int Visited;
+ int Visited = 0;
+
+ BeforeInfo() = default;
+ BeforeInfo(BeforeInfo &&) = default;
};
- typedef llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>
- BeforeMap;
- typedef llvm::DenseMap<const ValueDecl*, bool> CycleMap;
+ using BeforeMap =
+ llvm::DenseMap<const ValueDecl *, std::unique_ptr<BeforeInfo>>;
+ using CycleMap = llvm::DenseMap<const ValueDecl *, bool>;
public:
- BeforeSet() { }
+ BeforeSet() = default;
BeforeInfo* insertAttrExprs(const ValueDecl* Vd,
ThreadSafetyAnalyzer& Analyzer);
@@ -283,15 +307,18 @@ public:
private:
BeforeMap BMap;
- CycleMap CycMap;
+ CycleMap CycMap;
};
-} // end namespace threadSafety
-} // end namespace clang
+
+} // namespace threadSafety
+} // namespace clang
namespace {
-typedef llvm::ImmutableMap<const NamedDecl*, unsigned> LocalVarContext;
+
class LocalVariableMap;
+using LocalVarContext = llvm::ImmutableMap<const NamedDecl *, unsigned>;
+
/// A side (entry or exit) of a CFG node.
enum CFGBlockSide { CBS_Entry, CBS_Exit };
@@ -299,33 +326,46 @@ enum CFGBlockSide { CBS_Entry, CBS_Exit };
/// maintained for each block in the CFG. See LocalVariableMap for more
/// information about the contexts.
struct CFGBlockInfo {
- FactSet EntrySet; // Lockset held at entry to block
- FactSet ExitSet; // Lockset held at exit from block
- LocalVarContext EntryContext; // Context held at entry to block
- LocalVarContext ExitContext; // Context held at exit from block
- SourceLocation EntryLoc; // Location of first statement in block
- SourceLocation ExitLoc; // Location of last statement in block.
- unsigned EntryIndex; // Used to replay contexts later
- bool Reachable; // Is this block reachable?
+ // Lockset held at entry to block
+ FactSet EntrySet;
+
+ // Lockset held at exit from block
+ FactSet ExitSet;
+
+ // Context held at entry to block
+ LocalVarContext EntryContext;
+
+ // Context held at exit from block
+ LocalVarContext ExitContext;
+
+ // Location of first statement in block
+ SourceLocation EntryLoc;
+
+ // Location of last statement in block.
+ SourceLocation ExitLoc;
+
+ // Used to replay contexts later
+ unsigned EntryIndex;
+
+ // Is this block reachable?
+ bool Reachable = false;
const FactSet &getSet(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntrySet : ExitSet;
}
+
SourceLocation getLocation(CFGBlockSide Side) const {
return Side == CBS_Entry ? EntryLoc : ExitLoc;
}
private:
CFGBlockInfo(LocalVarContext EmptyCtx)
- : EntryContext(EmptyCtx), ExitContext(EmptyCtx), Reachable(false)
- { }
+ : EntryContext(EmptyCtx), ExitContext(EmptyCtx) {}
public:
static CFGBlockInfo getEmptyBlockInfo(LocalVariableMap &M);
};
-
-
// A LocalVariableMap maintains a map from local variables to their currently
// valid definitions. It provides SSA-like functionality when traversing the
// CFG. Like SSA, each definition or assignment to a variable is assigned a
@@ -341,7 +381,7 @@ public:
// that Context to look up the definitions of variables.
class LocalVariableMap {
public:
- typedef LocalVarContext Context;
+ using Context = LocalVarContext;
/// A VarDefinition consists of an expression, representing the value of the
/// variable, along with the context in which that expression should be
@@ -351,30 +391,35 @@ public:
public:
friend class LocalVariableMap;
- const NamedDecl *Dec; // The original declaration for this variable.
- const Expr *Exp; // The expression for this variable, OR
- unsigned Ref; // Reference to another VarDefinition
- Context Ctx; // The map with which Exp should be interpreted.
+ // The original declaration for this variable.
+ const NamedDecl *Dec;
+
+ // The expression for this variable, OR
+ const Expr *Exp = nullptr;
+
+ // Reference to another VarDefinition
+ unsigned Ref = 0;
+
+ // The map with which Exp should be interpreted.
+ Context Ctx;
bool isReference() { return !Exp; }
private:
// Create ordinary variable definition
VarDefinition(const NamedDecl *D, const Expr *E, Context C)
- : Dec(D), Exp(E), Ref(0), Ctx(C)
- { }
+ : Dec(D), Exp(E), Ctx(C) {}
// Create reference to previous definition
VarDefinition(const NamedDecl *D, unsigned R, Context C)
- : Dec(D), Exp(nullptr), Ref(R), Ctx(C)
- { }
+ : Dec(D), Ref(R), Ctx(C) {}
};
private:
Context::Factory ContextFactory;
std::vector<VarDefinition> VarDefinitions;
std::vector<unsigned> CtxIndices;
- std::vector<std::pair<Stmt*, Context> > SavedContexts;
+ std::vector<std::pair<Stmt *, Context>> SavedContexts;
public:
LocalVariableMap() {
@@ -471,12 +516,14 @@ public:
std::vector<CFGBlockInfo> &BlockInfo);
protected:
+ friend class VarMapBuilder;
+
// Get the current context index
unsigned getContextIndex() { return SavedContexts.size()-1; }
// Save the current context for later replay
void saveContext(Stmt *S, Context C) {
- SavedContexts.push_back(std::make_pair(S,C));
+ SavedContexts.push_back(std::make_pair(S, C));
}
// Adds a new definition to the given context, and returns a new context.
@@ -533,16 +580,16 @@ protected:
Context intersectContexts(Context C1, Context C2);
Context createReferenceContext(Context C);
void intersectBackEdge(Context C1, Context C2);
-
- friend class VarMapBuilder;
};
+} // namespace
// This has to be defined after LocalVariableMap.
CFGBlockInfo CFGBlockInfo::getEmptyBlockInfo(LocalVariableMap &M) {
return CFGBlockInfo(M.getEmptyContext());
}
+namespace {
/// Visitor which builds a LocalVariableMap
class VarMapBuilder : public StmtVisitor<VarMapBuilder> {
@@ -551,12 +598,13 @@ public:
LocalVariableMap::Context Ctx;
VarMapBuilder(LocalVariableMap *VM, LocalVariableMap::Context C)
- : VMap(VM), Ctx(C) {}
+ : VMap(VM), Ctx(C) {}
void VisitDeclStmt(DeclStmt *S);
void VisitBinaryOperator(BinaryOperator *BO);
};
+} // namespace
// Add new local variables to the variable map
void VarMapBuilder::VisitDeclStmt(DeclStmt *S) {
@@ -586,8 +634,8 @@ void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
Expr *LHSExp = BO->getLHS()->IgnoreParenCasts();
// Update the variable map and current context.
- if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
- ValueDecl *VDec = DRE->getDecl();
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(LHSExp)) {
+ const ValueDecl *VDec = DRE->getDecl();
if (Ctx.lookup(VDec)) {
if (BO->getOpcode() == BO_Assign)
Ctx = VMap->updateDefinition(VDec, BO->getRHS(), Ctx);
@@ -599,7 +647,6 @@ void VarMapBuilder::VisitBinaryOperator(BinaryOperator *BO) {
}
}
-
// Computes the intersection of two contexts. The intersection is the
// set of variables which have the same definition in both contexts;
// variables with different definitions are discarded.
@@ -642,7 +689,6 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
}
}
-
// Traverse the CFG in topological order, so all predecessors of a block
// (excluding back-edges) are visited before the block itself. At
// each point in the code, we calculate a Context, which holds the set of
@@ -680,7 +726,6 @@ void LocalVariableMap::intersectBackEdge(Context C1, Context C2) {
// while (b) { x -> x2, y -> y1 | [1st:] x2=x1; [2nd:] x2=NULL; }
// x = x+1; { x -> x3, y -> y1 | x3 = x2 + 1, ... }
// ... { y -> y1 | x3 = 2, x2 = 1, ... }
-//
void LocalVariableMap::traverseCFG(CFG *CFGraph,
const PostOrderCFGView *SortedGraph,
std::vector<CFGBlockInfo> &BlockInfo) {
@@ -731,12 +776,11 @@ void LocalVariableMap::traverseCFG(CFG *CFGraph,
// Visit all the statements in the basic block.
VarMapBuilder VMapBuilder(this, CurrBlockInfo->EntryContext);
- for (CFGBlock::const_iterator BI = CurrBlock->begin(),
- BE = CurrBlock->end(); BI != BE; ++BI) {
- switch (BI->getKind()) {
+ for (const auto &BI : *CurrBlock) {
+ switch (BI.getKind()) {
case CFGElement::Statement: {
- CFGStmt CS = BI->castAs<CFGStmt>();
- VMapBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
+ CFGStmt CS = BI.castAs<CFGStmt>();
+ VMapBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
break;
}
default:
@@ -790,10 +834,9 @@ static void findBlockLocations(CFG *CFGraph,
if (CurrBlockInfo->ExitLoc.isValid()) {
// This block contains at least one statement. Find the source location
// of the first statement in the block.
- for (CFGBlock::const_iterator BI = CurrBlock->begin(),
- BE = CurrBlock->end(); BI != BE; ++BI) {
+ for (const auto &BI : *CurrBlock) {
// FIXME: Handle other CFGElement kinds.
- if (Optional<CFGStmt> CS = BI->getAs<CFGStmt>()) {
+ if (Optional<CFGStmt> CS = BI.getAs<CFGStmt>()) {
CurrBlockInfo->EntryLoc = CS->getStmt()->getLocStart();
break;
}
@@ -808,9 +851,12 @@ static void findBlockLocations(CFG *CFGraph,
}
}
+namespace {
+
class LockableFactEntry : public FactEntry {
private:
- bool Managed; ///< managed by ScopedLockable object
+ /// managed by ScopedLockable object
+ bool Managed;
public:
LockableFactEntry(const CapabilityExpr &CE, LockKind LK, SourceLocation Loc,
@@ -857,7 +903,7 @@ public:
handleRemovalFromIntersection(const FactSet &FSet, FactManager &FactMan,
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
- for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto *UnderlyingMutex : UnderlyingMutexes) {
if (FSet.findLock(FactMan, CapabilityExpr(UnderlyingMutex, false))) {
// If this scoped lock manages another mutex, and if the underlying
// mutex is still held, then warn about the underlying mutex.
@@ -872,7 +918,7 @@ public:
bool FullyRemove, ThreadSafetyHandler &Handler,
StringRef DiagKind) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
- for (const til::SExpr *UnderlyingMutex : UnderlyingMutexes) {
+ for (const auto *UnderlyingMutex : UnderlyingMutexes) {
CapabilityExpr UnderCp(UnderlyingMutex, false);
auto UnderEntry = llvm::make_unique<LockableFactEntry>(
!UnderCp, LK_Exclusive, UnlockLoc);
@@ -900,7 +946,7 @@ public:
}
};
-/// \brief Class which implements the core thread safety analysis routines.
+/// Class which implements the core thread safety analysis routines.
class ThreadSafetyAnalyzer {
friend class BuildLockset;
friend class threadSafety::BeforeSet;
@@ -909,17 +955,17 @@ class ThreadSafetyAnalyzer {
threadSafety::til::MemRegionRef Arena;
threadSafety::SExprBuilder SxBuilder;
- ThreadSafetyHandler &Handler;
- const CXXMethodDecl *CurrentMethod;
- LocalVariableMap LocalVarMap;
- FactManager FactMan;
+ ThreadSafetyHandler &Handler;
+ const CXXMethodDecl *CurrentMethod;
+ LocalVariableMap LocalVarMap;
+ FactManager FactMan;
std::vector<CFGBlockInfo> BlockInfo;
- BeforeSet* GlobalBeforeSet;
+ BeforeSet *GlobalBeforeSet;
public:
ThreadSafetyAnalyzer(ThreadSafetyHandler &H, BeforeSet* Bset)
- : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
+ : Arena(&Bpa), SxBuilder(Arena), Handler(H), GlobalBeforeSet(Bset) {}
bool inCurrentScope(const CapabilityExpr &CapE);
@@ -959,6 +1005,7 @@ public:
void runAnalysis(AnalysisDeclContext &AC);
};
+
} // namespace
/// Process acquired_before and acquired_after attributes on Vd.
@@ -975,10 +1022,10 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
Info = InfoPtr.get();
}
- for (Attr* At : Vd->attrs()) {
+ for (const auto *At : Vd->attrs()) {
switch (At->getKind()) {
case attr::AcquiredBefore: {
- auto *A = cast<AcquiredBeforeAttr>(At);
+ const auto *A = cast<AcquiredBeforeAttr>(At);
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
@@ -986,7 +1033,7 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
Analyzer.SxBuilder.translateAttrExpr(Arg, nullptr);
if (const ValueDecl *Cpvd = Cp.valueDecl()) {
Info->Vect.push_back(Cpvd);
- auto It = BMap.find(Cpvd);
+ const auto It = BMap.find(Cpvd);
if (It == BMap.end())
insertAttrExprs(Cpvd, Analyzer);
}
@@ -994,7 +1041,7 @@ BeforeSet::BeforeInfo* BeforeSet::insertAttrExprs(const ValueDecl* Vd,
break;
}
case attr::AcquiredAfter: {
- auto *A = cast<AcquiredAfterAttr>(At);
+ const auto *A = cast<AcquiredAfterAttr>(At);
// Read exprs from the attribute, and add them to BeforeVect.
for (const auto *Arg : A->args()) {
@@ -1055,7 +1102,7 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
InfoVect.push_back(Info);
Info->Visited = 1;
- for (auto *Vdb : Info->Vect) {
+ for (const auto *Vdb : Info->Vect) {
// Exclude mutexes in our immediate before set.
if (FSet.containsMutexDecl(Analyzer.FactMan, Vdb)) {
StringRef L1 = StartVd->getName();
@@ -1077,13 +1124,11 @@ void BeforeSet::checkBeforeAfter(const ValueDecl* StartVd,
traverse(StartVd);
- for (auto* Info : InfoVect)
+ for (auto *Info : InfoVect)
Info->Visited = 0;
}
-
-
-/// \brief Gets the value decl pointer from DeclRefExprs or MemberExprs.
+/// Gets the value decl pointer from DeclRefExprs or MemberExprs.
static const ValueDecl *getValueDecl(const Expr *Exp) {
if (const auto *CE = dyn_cast<ImplicitCastExpr>(Exp))
return getValueDecl(CE->getSubExpr());
@@ -1098,10 +1143,11 @@ static const ValueDecl *getValueDecl(const Expr *Exp) {
}
namespace {
+
template <typename Ty>
class has_arg_iterator_range {
- typedef char yes[1];
- typedef char no[2];
+ using yes = char[1];
+ using no = char[2];
template <typename Inner>
static yes& test(Inner *I, decltype(I->args()) * = nullptr);
@@ -1112,6 +1158,7 @@ class has_arg_iterator_range {
public:
static const bool value = sizeof(test<Ty>(nullptr)) == sizeof(yes);
};
+
} // namespace
static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
@@ -1163,20 +1210,18 @@ ClassifyDiagnostic(const AttrTy *A) {
return "mutex";
}
-
-inline bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
+bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
if (!CurrentMethod)
return false;
- if (auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
- auto *VD = P->clangDecl();
+ if (const auto *P = dyn_cast_or_null<til::Project>(CapE.sexpr())) {
+ const auto *VD = P->clangDecl();
if (VD)
return VD->getDeclContext() == CurrentMethod->getDeclContext();
}
return false;
}
-
-/// \brief Add a new lock to the lockset, warning if the lock is already there.
+/// Add a new lock to the lockset, warning if the lock is already there.
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
std::unique_ptr<FactEntry> Entry,
@@ -1214,8 +1259,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
}
}
-
-/// \brief Remove a lock from the lockset, warning if the lock is not there.
+/// Remove a lock from the lockset, warning if the lock is not there.
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation UnlockLoc,
@@ -1241,8 +1285,7 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
DiagKind);
}
-
-/// \brief Extract the list of mutexIDs from the attribute on an expression,
+/// Extract the list of mutexIDs from the attribute on an expression,
/// and push them onto Mtxs, discarding any duplicates.
template <typename AttrType>
void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
@@ -1273,8 +1316,7 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
}
}
-
-/// \brief Extract the list of mutexIDs from a trylock attribute. If the
+/// Extract the list of mutexIDs from a trylock attribute. If the
/// trylock applies to the given edge, then push them onto Mtxs, discarding
/// any duplicates.
template <class AttrType>
@@ -1285,9 +1327,9 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
Expr *BrE, bool Neg) {
// Find out which branch has the lock
bool branch = false;
- if (CXXBoolLiteralExpr *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
+ if (const auto *BLE = dyn_cast_or_null<CXXBoolLiteralExpr>(BrE))
branch = BLE->getValue();
- else if (IntegerLiteral *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
+ else if (const auto *ILE = dyn_cast_or_null<IntegerLiteral>(BrE))
branch = ILE->getValue().getBoolValue();
int branchnum = branch ? 0 : 1;
@@ -1307,19 +1349,17 @@ static bool getStaticBooleanValue(Expr *E, bool &TCond) {
if (isa<CXXNullPtrLiteralExpr>(E) || isa<GNUNullExpr>(E)) {
TCond = false;
return true;
- } else if (CXXBoolLiteralExpr *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
+ } else if (const auto *BLE = dyn_cast<CXXBoolLiteralExpr>(E)) {
TCond = BLE->getValue();
return true;
- } else if (IntegerLiteral *ILE = dyn_cast<IntegerLiteral>(E)) {
+ } else if (const auto *ILE = dyn_cast<IntegerLiteral>(E)) {
TCond = ILE->getValue().getBoolValue();
return true;
- } else if (ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(E)) {
+ } else if (auto *CE = dyn_cast<ImplicitCastExpr>(E))
return getStaticBooleanValue(CE->getSubExpr(), TCond);
- }
return false;
}
-
// If Cond can be traced back to a function call, return the call expression.
// The negate variable should be called with false, and will be set to true
// if the function call is negated, e.g. if (!mu.tryLock(...))
@@ -1329,30 +1369,26 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
if (!Cond)
return nullptr;
- if (const CallExpr *CallExp = dyn_cast<CallExpr>(Cond)) {
+ if (const auto *CallExp = dyn_cast<CallExpr>(Cond))
return CallExp;
- }
- else if (const ParenExpr *PE = dyn_cast<ParenExpr>(Cond)) {
+ else if (const auto *PE = dyn_cast<ParenExpr>(Cond))
return getTrylockCallExpr(PE->getSubExpr(), C, Negate);
- }
- else if (const ImplicitCastExpr *CE = dyn_cast<ImplicitCastExpr>(Cond)) {
+ else if (const auto *CE = dyn_cast<ImplicitCastExpr>(Cond))
return getTrylockCallExpr(CE->getSubExpr(), C, Negate);
- }
- else if (const ExprWithCleanups* EWC = dyn_cast<ExprWithCleanups>(Cond)) {
+ else if (const auto *EWC = dyn_cast<ExprWithCleanups>(Cond))
return getTrylockCallExpr(EWC->getSubExpr(), C, Negate);
- }
- else if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Cond)) {
+ else if (const auto *DRE = dyn_cast<DeclRefExpr>(Cond)) {
const Expr *E = LocalVarMap.lookupExpr(DRE->getDecl(), C);
return getTrylockCallExpr(E, C, Negate);
}
- else if (const UnaryOperator *UOP = dyn_cast<UnaryOperator>(Cond)) {
+ else if (const auto *UOP = dyn_cast<UnaryOperator>(Cond)) {
if (UOP->getOpcode() == UO_LNot) {
Negate = !Negate;
return getTrylockCallExpr(UOP->getSubExpr(), C, Negate);
}
return nullptr;
}
- else if (const BinaryOperator *BOP = dyn_cast<BinaryOperator>(Cond)) {
+ else if (const auto *BOP = dyn_cast<BinaryOperator>(Cond)) {
if (BOP->getOpcode() == BO_EQ || BOP->getOpcode() == BO_NE) {
if (BOP->getOpcode() == BO_NE)
Negate = !Negate;
@@ -1373,16 +1409,14 @@ const CallExpr* ThreadSafetyAnalyzer::getTrylockCallExpr(const Stmt *Cond,
// LHS must have been evaluated in a different block.
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
}
- if (BOP->getOpcode() == BO_LOr) {
+ if (BOP->getOpcode() == BO_LOr)
return getTrylockCallExpr(BOP->getRHS(), C, Negate);
- }
return nullptr;
}
return nullptr;
}
-
-/// \brief Find the lockset that holds on the edge between PredBlock
+/// Find the lockset that holds on the edge between PredBlock
/// and CurrBlock. The edge set is the exit set of PredBlock (passed
/// as the ExitSet parameter) plus any trylocks, which are conditionally held.
void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
@@ -1400,12 +1434,11 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
StringRef CapDiagKind = "mutex";
- CallExpr *Exp =
- const_cast<CallExpr*>(getTrylockCallExpr(Cond, LVarCtx, Negate));
+ auto *Exp = const_cast<CallExpr *>(getTrylockCallExpr(Cond, LVarCtx, Negate));
if (!Exp)
return;
- NamedDecl *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ auto *FunDecl = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!FunDecl || !FunDecl->hasAttrs())
return;
@@ -1413,19 +1446,25 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
CapExprSet SharedLocksToAdd;
// If the condition is a call to a Trylock function, then grab the attributes
- for (auto *Attr : FunDecl->attrs()) {
+ for (const auto *Attr : FunDecl->attrs()) {
switch (Attr->getKind()) {
+ case attr::TryAcquireCapability: {
+ auto *A = cast<TryAcquireCapabilityAttr>(Attr);
+ getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
+ Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
+ Negate);
+ CapDiagKind = ClassifyDiagnostic(A);
+ break;
+ };
case attr::ExclusiveTrylockFunction: {
- ExclusiveTrylockFunctionAttr *A =
- cast<ExclusiveTrylockFunctionAttr>(Attr);
+ const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
CapDiagKind = ClassifyDiagnostic(A);
break;
}
case attr::SharedTrylockFunction: {
- SharedTrylockFunctionAttr *A =
- cast<SharedTrylockFunctionAttr>(Attr);
+ const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
PredBlock, CurrBlock, A->getSuccessValue(), Negate);
CapDiagKind = ClassifyDiagnostic(A);
@@ -1449,7 +1488,8 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
}
namespace {
-/// \brief We use this class to visit different types of expressions in
+
+/// We use this class to visit different types of expressions in
/// CFGBlocks, and build up the lockset.
/// An expression may cause us to add or remove locks from the lockset, or else
/// output error messages related to missing locks.
@@ -1478,12 +1518,8 @@ class BuildLockset : public StmtVisitor<BuildLockset> {
public:
BuildLockset(ThreadSafetyAnalyzer *Anlzr, CFGBlockInfo &Info)
- : StmtVisitor<BuildLockset>(),
- Analyzer(Anlzr),
- FSet(Info.EntrySet),
- LVarCtx(Info.EntryContext),
- CtxIndex(Info.EntryIndex)
- {}
+ : StmtVisitor<BuildLockset>(), Analyzer(Anlzr), FSet(Info.EntrySet),
+ LVarCtx(Info.EntryContext), CtxIndex(Info.EntryIndex) {}
void VisitUnaryOperator(UnaryOperator *UO);
void VisitBinaryOperator(BinaryOperator *BO);
@@ -1492,9 +1528,10 @@ public:
void VisitCXXConstructExpr(CXXConstructExpr *Exp);
void VisitDeclStmt(DeclStmt *S);
};
+
} // namespace
-/// \brief Warn if the LSet does not contain a lock sufficient to protect access
+/// Warn if the LSet does not contain a lock sufficient to protect access
/// of at least the passed in AccessKind.
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
AccessKind AK, Expr *MutexExp,
@@ -1558,7 +1595,7 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
}
}
-/// \brief Warn if the LSet contains the given lock.
+/// Warn if the LSet contains the given lock.
void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
Expr *MutexExp, StringRef DiagKind) {
CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
@@ -1576,7 +1613,7 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
}
}
-/// \brief Checks guarded_by and pt_guarded_by attributes.
+/// Checks guarded_by and pt_guarded_by attributes.
/// Whenever we identify an access (read or write) to a DeclRefExpr that is
/// marked with guarded_by, we must ensure the appropriate mutexes are held.
/// Similarly, we check if the access is to an expression that dereferences
@@ -1600,19 +1637,19 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
break;
}
- if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Exp)) {
+ if (const auto *UO = dyn_cast<UnaryOperator>(Exp)) {
// For dereferences
- if (UO->getOpcode() == clang::UO_Deref)
+ if (UO->getOpcode() == UO_Deref)
checkPtAccess(UO->getSubExpr(), AK, POK);
return;
}
- if (const ArraySubscriptExpr *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
+ if (const auto *AE = dyn_cast<ArraySubscriptExpr>(Exp)) {
checkPtAccess(AE->getLHS(), AK, POK);
return;
}
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(Exp)) {
+ if (const auto *ME = dyn_cast<MemberExpr>(Exp)) {
if (ME->isArrow())
checkPtAccess(ME->getBase(), AK, POK);
else
@@ -1632,17 +1669,16 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
ClassifyDiagnostic(I), Loc);
}
-
-/// \brief Checks pt_guarded_by and pt_guarded_var attributes.
+/// Checks pt_guarded_by and pt_guarded_var attributes.
/// POK is the same operationKind that was passed to checkAccess.
void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK) {
while (true) {
- if (const ParenExpr *PE = dyn_cast<ParenExpr>(Exp)) {
+ if (const auto *PE = dyn_cast<ParenExpr>(Exp)) {
Exp = PE->getSubExpr();
continue;
}
- if (const CastExpr *CE = dyn_cast<CastExpr>(Exp)) {
+ if (const auto *CE = dyn_cast<CastExpr>(Exp)) {
if (CE->getCastKind() == CK_ArrayToPointerDecay) {
// If it's an actual array, and not a pointer, then it's elements
// are protected by GUARDED_BY, not PT_GUARDED_BY;
@@ -1672,7 +1708,7 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
ClassifyDiagnostic(I), Exp->getExprLoc());
}
-/// \brief Process a function call, method call, constructor call,
+/// Process a function call, method call, constructor call,
/// or destructor call. This involves looking at the attributes on the
/// corresponding function/method/constructor/destructor, issuing warnings,
/// and updating the locksets accordingly.
@@ -1689,23 +1725,22 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
CapExprSet ScopedExclusiveReqs, ScopedSharedReqs;
StringRef CapDiagKind = "mutex";
- // Figure out if we're calling the constructor of scoped lockable class
+ // Figure out if we're constructing an object of scoped lockable class
bool isScopedVar = false;
if (VD) {
- if (const CXXConstructorDecl *CD = dyn_cast<const CXXConstructorDecl>(D)) {
+ if (const auto *CD = dyn_cast<const CXXConstructorDecl>(D)) {
const CXXRecordDecl* PD = CD->getParent();
if (PD && PD->hasAttr<ScopedLockableAttr>())
isScopedVar = true;
}
}
- for(Attr *Atconst : D->attrs()) {
- Attr* At = const_cast<Attr*>(Atconst);
+ for(const Attr *At : D->attrs()) {
switch (At->getKind()) {
// When we encounter a lock function, we need to add the lock to our
// lockset.
case attr::AcquireCapability: {
- auto *A = cast<AcquireCapabilityAttr>(At);
+ const auto *A = cast<AcquireCapabilityAttr>(At);
Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
: ExclusiveLocksToAdd,
A, Exp, D, VD);
@@ -1718,7 +1753,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// a warning if it is already there, and will not generate a warning
// if it is not removed.
case attr::AssertExclusiveLock: {
- AssertExclusiveLockAttr *A = cast<AssertExclusiveLockAttr>(At);
+ const auto *A = cast<AssertExclusiveLockAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
@@ -1730,7 +1765,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
break;
}
case attr::AssertSharedLock: {
- AssertSharedLockAttr *A = cast<AssertSharedLockAttr>(At);
+ const auto *A = cast<AssertSharedLockAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
@@ -1743,7 +1778,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
case attr::AssertCapability: {
- AssertCapabilityAttr *A = cast<AssertCapabilityAttr>(At);
+ const auto *A = cast<AssertCapabilityAttr>(At);
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
@@ -1759,7 +1794,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
// When we encounter an unlock function, we need to remove unlocked
// mutexes from the lockset, and flag a warning if they are not there.
case attr::ReleaseCapability: {
- auto *A = cast<ReleaseCapabilityAttr>(At);
+ const auto *A = cast<ReleaseCapabilityAttr>(At);
if (A->isGeneric())
Analyzer->getMutexIDs(GenericLocksToRemove, A, Exp, D, VD);
else if (A->isShared())
@@ -1772,7 +1807,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
case attr::RequiresCapability: {
- RequiresCapabilityAttr *A = cast<RequiresCapabilityAttr>(At);
+ const auto *A = cast<RequiresCapabilityAttr>(At);
for (auto *Arg : A->args()) {
warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
POK_FunctionCall, ClassifyDiagnostic(A),
@@ -1788,7 +1823,7 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
case attr::LocksExcluded: {
- LocksExcludedAttr *A = cast<LocksExcludedAttr>(At);
+ const auto *A = cast<LocksExcludedAttr>(At);
for (auto *Arg : A->args())
warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
break;
@@ -1800,6 +1835,16 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
}
}
+ // Remove locks first to allow lock upgrading/downgrading.
+ // FIXME -- should only fully remove if the attribute refers to 'this'.
+ bool Dtor = isa<CXXDestructorDecl>(D);
+ for (const auto &M : ExclusiveLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
+ for (const auto &M : SharedLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
+ for (const auto &M : GenericLocksToRemove)
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
+
// Add locks.
for (const auto &M : ExclusiveLocksToAdd)
Analyzer->addLock(FSet, llvm::make_unique<LockableFactEntry>(
@@ -1826,31 +1871,19 @@ void BuildLockset::handleCall(Expr *Exp, const NamedDecl *D, VarDecl *VD) {
Scp, MLoc, ExclusiveLocksToAdd, SharedLocksToAdd),
CapDiagKind);
}
-
- // Remove locks.
- // FIXME -- should only fully remove if the attribute refers to 'this'.
- bool Dtor = isa<CXXDestructorDecl>(D);
- for (const auto &M : ExclusiveLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
- for (const auto &M : SharedLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
- for (const auto &M : GenericLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
}
-
-/// \brief For unary operations which read and write a variable, we need to
+/// For unary operations which read and write a variable, we need to
/// check whether we hold any required mutexes. Reads are checked in
/// VisitCastExpr.
void BuildLockset::VisitUnaryOperator(UnaryOperator *UO) {
switch (UO->getOpcode()) {
- case clang::UO_PostDec:
- case clang::UO_PostInc:
- case clang::UO_PreDec:
- case clang::UO_PreInc: {
+ case UO_PostDec:
+ case UO_PostInc:
+ case UO_PreDec:
+ case UO_PreInc:
checkAccess(UO->getSubExpr(), AK_Written);
break;
- }
default:
break;
}
@@ -1869,7 +1902,6 @@ void BuildLockset::VisitBinaryOperator(BinaryOperator *BO) {
checkAccess(BO->getLHS(), AK_Written);
}
-
/// Whenever we do an LValue to Rvalue cast, we are reading a variable and
/// need to ensure we hold any required mutexes.
/// FIXME: Deal with non-primitive types.
@@ -1879,23 +1911,21 @@ void BuildLockset::VisitCastExpr(CastExpr *CE) {
checkAccess(CE->getSubExpr(), AK_Read);
}
-
void BuildLockset::VisitCallExpr(CallExpr *Exp) {
bool ExamineArgs = true;
bool OperatorFun = false;
- if (CXXMemberCallExpr *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
- MemberExpr *ME = dyn_cast<MemberExpr>(CE->getCallee());
+ if (const auto *CE = dyn_cast<CXXMemberCallExpr>(Exp)) {
+ const auto *ME = dyn_cast<MemberExpr>(CE->getCallee());
// ME can be null when calling a method pointer
- CXXMethodDecl *MD = CE->getMethodDecl();
+ const CXXMethodDecl *MD = CE->getMethodDecl();
if (ME && MD) {
if (ME->isArrow()) {
- if (MD->isConst()) {
+ if (MD->isConst())
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
- } else { // FIXME -- should be AK_Written
+ else // FIXME -- should be AK_Written
checkPtAccess(CE->getImplicitObjectArgument(), AK_Read);
- }
} else {
if (MD->isConst())
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
@@ -1903,7 +1933,7 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
checkAccess(CE->getImplicitObjectArgument(), AK_Read);
}
}
- } else if (CXXOperatorCallExpr *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
+ } else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
OperatorFun = true;
auto OEop = OE->getOperator();
@@ -1938,13 +1968,11 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
if (ExamineArgs) {
if (FunctionDecl *FD = Exp->getDirectCallee()) {
-
// NO_THREAD_SAFETY_ANALYSIS does double duty here. Normally it
// only turns off checking within the body of a function, but we also
// use it to turn off checking in arguments to the function. This
// could result in some false negatives, but the alternative is to
// create yet another attribute.
- //
if (!FD->hasAttr<NoThreadSafetyAnalysisAttr>()) {
unsigned Fn = FD->getNumParams();
unsigned Cn = Exp->getNumArgs();
@@ -1976,7 +2004,7 @@ void BuildLockset::VisitCallExpr(CallExpr *Exp) {
}
}
- NamedDecl *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
+ auto *D = dyn_cast_or_null<NamedDecl>(Exp->getCalleeDecl());
if(!D || !D->hasAttrs())
return;
handleCall(Exp, D);
@@ -1991,30 +2019,74 @@ void BuildLockset::VisitCXXConstructExpr(CXXConstructExpr *Exp) {
// FIXME -- only handles constructors in DeclStmt below.
}
+static CXXConstructorDecl *
+findConstructorForByValueReturn(const CXXRecordDecl *RD) {
+ // Prefer a move constructor over a copy constructor. If there's more than
+ // one copy constructor or more than one move constructor, we arbitrarily
+ // pick the first declared such constructor rather than trying to guess which
+ // one is more appropriate.
+ CXXConstructorDecl *CopyCtor = nullptr;
+ for (auto *Ctor : RD->ctors()) {
+ if (Ctor->isDeleted())
+ continue;
+ if (Ctor->isMoveConstructor())
+ return Ctor;
+ if (!CopyCtor && Ctor->isCopyConstructor())
+ CopyCtor = Ctor;
+ }
+ return CopyCtor;
+}
+
+static Expr *buildFakeCtorCall(CXXConstructorDecl *CD, ArrayRef<Expr *> Args,
+ SourceLocation Loc) {
+ ASTContext &Ctx = CD->getASTContext();
+ return CXXConstructExpr::Create(Ctx, Ctx.getRecordType(CD->getParent()), Loc,
+ CD, true, Args, false, false, false, false,
+ CXXConstructExpr::CK_Complete,
+ SourceRange(Loc, Loc));
+}
+
void BuildLockset::VisitDeclStmt(DeclStmt *S) {
// adjust the context
LVarCtx = Analyzer->LocalVarMap.getNextContext(CtxIndex, S, LVarCtx);
for (auto *D : S->getDeclGroup()) {
- if (VarDecl *VD = dyn_cast_or_null<VarDecl>(D)) {
+ if (auto *VD = dyn_cast_or_null<VarDecl>(D)) {
Expr *E = VD->getInit();
+ if (!E)
+ continue;
+ E = E->IgnoreParens();
+
// handle constructors that involve temporaries
- if (ExprWithCleanups *EWC = dyn_cast_or_null<ExprWithCleanups>(E))
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(E))
E = EWC->getSubExpr();
+ if (auto *BTE = dyn_cast<CXXBindTemporaryExpr>(E))
+ E = BTE->getSubExpr();
- if (CXXConstructExpr *CE = dyn_cast_or_null<CXXConstructExpr>(E)) {
- NamedDecl *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
+ if (const auto *CE = dyn_cast<CXXConstructExpr>(E)) {
+ const auto *CtorD = dyn_cast_or_null<NamedDecl>(CE->getConstructor());
if (!CtorD || !CtorD->hasAttrs())
- return;
- handleCall(CE, CtorD, VD);
+ continue;
+ handleCall(E, CtorD, VD);
+ } else if (isa<CallExpr>(E) && E->isRValue()) {
+ // If the object is initialized by a function call that returns a
+ // scoped lockable by value, use the attributes on the copy or move
+ // constructor to figure out what effect that should have on the
+ // lockset.
+ // FIXME: Is this really the best way to handle this situation?
+ auto *RD = E->getType()->getAsCXXRecordDecl();
+ if (!RD || !RD->hasAttr<ScopedLockableAttr>())
+ continue;
+ CXXConstructorDecl *CtorD = findConstructorForByValueReturn(RD);
+ if (!CtorD || !CtorD->hasAttrs())
+ continue;
+ handleCall(buildFakeCtorCall(CtorD, {E}, E->getLocStart()), CtorD, VD);
}
}
}
}
-
-
-/// \brief Compute the intersection of two locksets and issue warnings for any
+/// Compute the intersection of two locksets and issue warnings for any
/// locks in the symmetric difference.
///
/// This function is used at a merge point in the CFG when comparing the lockset
@@ -2076,7 +2148,6 @@ void ThreadSafetyAnalyzer::intersectAndWarn(FactSet &FSet1,
}
}
-
// Return true if block B never continues to its successors.
static bool neverReturns(const CFGBlock *B) {
if (B->hasNoReturnElement())
@@ -2092,8 +2163,7 @@ static bool neverReturns(const CFGBlock *B) {
return false;
}
-
-/// \brief Check a function's CFG for thread-safety violations.
+/// Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
@@ -2110,7 +2180,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CFG *CFGraph = walker.getGraph();
const NamedDecl *D = walker.getDecl();
- const FunctionDecl *CurrentFunction = dyn_cast<FunctionDecl>(D);
+ const auto *CurrentFunction = dyn_cast<FunctionDecl>(D);
CurrentMethod = dyn_cast<CXXMethodDecl>(D);
if (D->hasAttr<NoThreadSafetyAnalysisAttr>())
@@ -2184,10 +2254,13 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
A, nullptr, D);
CapDiagKind = ClassifyDiagnostic(A);
} else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
- // Don't try to check trylock functions for now
+ // Don't try to check trylock functions for now.
return;
} else if (isa<SharedTrylockFunctionAttr>(Attr)) {
- // Don't try to check trylock functions for now
+ // Don't try to check trylock functions for now.
+ return;
+ } else if (isa<TryAcquireCapabilityAttr>(Attr)) {
+ // Don't try to check trylock functions for now.
return;
}
}
@@ -2229,7 +2302,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
SmallVector<CFGBlock *, 8> SpecialBlocks;
for (CFGBlock::const_pred_iterator PI = CurrBlock->pred_begin(),
PE = CurrBlock->pred_end(); PI != PE; ++PI) {
-
// if *PI -> CurrBlock is a back edge
if (*PI == nullptr || !VisitedBlocks.alreadySet(*PI))
continue;
@@ -2306,24 +2378,23 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
BuildLockset LocksetBuilder(this, *CurrBlockInfo);
// Visit all the statements in the basic block.
- for (CFGBlock::const_iterator BI = CurrBlock->begin(),
- BE = CurrBlock->end(); BI != BE; ++BI) {
- switch (BI->getKind()) {
+ for (const auto &BI : *CurrBlock) {
+ switch (BI.getKind()) {
case CFGElement::Statement: {
- CFGStmt CS = BI->castAs<CFGStmt>();
- LocksetBuilder.Visit(const_cast<Stmt*>(CS.getStmt()));
+ CFGStmt CS = BI.castAs<CFGStmt>();
+ LocksetBuilder.Visit(const_cast<Stmt *>(CS.getStmt()));
break;
}
// Ignore BaseDtor, MemberDtor, and TemporaryDtor for now.
case CFGElement::AutomaticObjectDtor: {
- CFGAutomaticObjDtor AD = BI->castAs<CFGAutomaticObjDtor>();
- CXXDestructorDecl *DD = const_cast<CXXDestructorDecl *>(
+ CFGAutomaticObjDtor AD = BI.castAs<CFGAutomaticObjDtor>();
+ auto *DD = const_cast<CXXDestructorDecl *>(
AD.getDestructorDecl(AC.getASTContext()));
if (!DD->hasAttrs())
break;
// Create a dummy expression,
- VarDecl *VD = const_cast<VarDecl*>(AD.getVarDecl());
+ auto *VD = const_cast<VarDecl *>(AD.getVarDecl());
DeclRefExpr DRE(VD, false, VD->getType().getNonReferenceType(),
VK_LValue, AD.getTriggerStmt()->getLocEnd());
LocksetBuilder.handleCall(&DRE, DD);
@@ -2341,7 +2412,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
// Lockset held at the beginning of FirstLoopBlock in the EntryLockSets map.
for (CFGBlock::const_succ_iterator SI = CurrBlock->succ_begin(),
SE = CurrBlock->succ_end(); SI != SE; ++SI) {
-
// if CurrBlock -> *SI is *not* a back edge
if (*SI == nullptr || !VisitedBlocks.alreadySet(*SI))
continue;
@@ -2389,8 +2459,7 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
Handler.leaveFunction(CurrentFunction);
}
-
-/// \brief Check a function's CFG for thread-safety violations.
+/// Check a function's CFG for thread-safety violations.
///
/// We traverse the blocks in the CFG, compute the set of mutexes that are held
/// at the end of each block, and issue warnings for thread safety violations.
@@ -2406,7 +2475,7 @@ void threadSafety::runThreadSafetyAnalysis(AnalysisDeclContext &AC,
void threadSafety::threadSafetyCleanup(BeforeSet *Cache) { delete Cache; }
-/// \brief Helper function that returns a LockKind required for the given level
+/// Helper function that returns a LockKind required for the given level
/// of access.
LockKind threadSafety::getLockKindFromAccessKind(AccessKind AK) {
switch (AK) {
diff --git a/lib/Analysis/ThreadSafetyCommon.cpp b/lib/Analysis/ThreadSafetyCommon.cpp
index 99284f07b45b..fced17ff9197 100644
--- a/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/lib/Analysis/ThreadSafetyCommon.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafetyCommon.cpp -----------------------------------*- C++ -*-===//
+//===- ThreadSafetyCommon.cpp ---------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -13,24 +13,32 @@
#include "clang/Analysis/Analyses/ThreadSafetyCommon.h"
#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclGroup.h"
#include "clang/AST/DeclObjC.h"
+#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
-#include "clang/AST/StmtCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
-#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
-#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
+#include "clang/Basic/LLVM.h"
#include "clang/Basic/OperatorKinds.h"
-#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/Specifiers.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
#include <algorithm>
+#include <cassert>
+#include <string>
+#include <utility>
using namespace clang;
using namespace threadSafety;
// From ThreadSafetyUtil.h
-std::string threadSafety::getSourceLiteralString(const clang::Expr *CE) {
+std::string threadSafety::getSourceLiteralString(const Expr *CE) {
switch (CE->getStmtClass()) {
case Stmt::IntegerLiteralClass:
return cast<IntegerLiteral>(CE)->getValue().toString(10, true);
@@ -59,7 +67,7 @@ static bool isIncompletePhi(const til::SExpr *E) {
return false;
}
-typedef SExprBuilder::CallingContext CallingContext;
+using CallingContext = SExprBuilder::CallingContext;
til::SExpr *SExprBuilder::lookupStmt(const Stmt *S) {
auto It = SMap.find(S);
@@ -74,11 +82,11 @@ til::SCFG *SExprBuilder::buildCFG(CFGWalker &Walker) {
}
static bool isCalleeArrow(const Expr *E) {
- const MemberExpr *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
+ const auto *ME = dyn_cast<MemberExpr>(E->IgnoreParenCasts());
return ME ? ME->isArrow() : false;
}
-/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
/// \param AttrExp The expression to translate.
@@ -97,20 +105,18 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// Examine DeclExp to find SelfArg and FunArgs, which are used to substitute
// for formal parameters when we call buildMutexID later.
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(DeclExp)) {
+ if (const auto *ME = dyn_cast<MemberExpr>(DeclExp)) {
Ctx.SelfArg = ME->getBase();
Ctx.SelfArrow = ME->isArrow();
- } else if (const CXXMemberCallExpr *CE =
- dyn_cast<CXXMemberCallExpr>(DeclExp)) {
+ } else if (const auto *CE = dyn_cast<CXXMemberCallExpr>(DeclExp)) {
Ctx.SelfArg = CE->getImplicitObjectArgument();
Ctx.SelfArrow = isCalleeArrow(CE->getCallee());
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
- } else if (const CallExpr *CE = dyn_cast<CallExpr>(DeclExp)) {
+ } else if (const auto *CE = dyn_cast<CallExpr>(DeclExp)) {
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
- } else if (const CXXConstructExpr *CE =
- dyn_cast<CXXConstructExpr>(DeclExp)) {
+ } else if (const auto *CE = dyn_cast<CXXConstructExpr>(DeclExp)) {
Ctx.SelfArg = nullptr; // Will be set below
Ctx.NumArgs = CE->getNumArgs();
Ctx.FunArgs = CE->getArgs();
@@ -140,14 +146,14 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return translateAttrExpr(AttrExp, &Ctx);
}
-/// \brief Translate a clang expression in an attribute to a til::SExpr.
+/// Translate a clang expression in an attribute to a til::SExpr.
// This assumes a CallingContext has already been created.
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CallingContext *Ctx) {
if (!AttrExp)
return CapabilityExpr(nullptr, false);
- if (auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
+ if (const auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
if (SLit->getString() == StringRef("*"))
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
@@ -158,13 +164,13 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
}
bool Neg = false;
- if (auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
+ if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(AttrExp)) {
if (OE->getOperator() == OO_Exclaim) {
Neg = true;
AttrExp = OE->getArg(0);
}
}
- else if (auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
+ else if (const auto *UO = dyn_cast<UnaryOperator>(AttrExp)) {
if (UO->getOpcode() == UO_LNot) {
Neg = true;
AttrExp = UO->getSubExpr();
@@ -179,7 +185,7 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
return CapabilityExpr(nullptr, false);
// Hack to deal with smart pointers -- strip off top-level pointer casts.
- if (auto *CE = dyn_cast_or_null<til::Cast>(E)) {
+ if (const auto *CE = dyn_cast_or_null<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
return CapabilityExpr(CE->expr(), Neg);
}
@@ -254,7 +260,7 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
default:
break;
}
- if (const CastExpr *CE = dyn_cast<CastExpr>(S))
+ if (const auto *CE = dyn_cast<CastExpr>(S))
return translateCastExpr(CE, Ctx);
return new (Arena) til::Undefined(S);
@@ -262,11 +268,11 @@ til::SExpr *SExprBuilder::translate(const Stmt *S, CallingContext *Ctx) {
til::SExpr *SExprBuilder::translateDeclRefExpr(const DeclRefExpr *DRE,
CallingContext *Ctx) {
- const ValueDecl *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
+ const auto *VD = cast<ValueDecl>(DRE->getDecl()->getCanonicalDecl());
// Function parameters require substitution and/or renaming.
- if (const ParmVarDecl *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
- const FunctionDecl *FD =
+ if (const auto *PV = dyn_cast_or_null<ParmVarDecl>(VD)) {
+ const auto *FD =
cast<FunctionDecl>(PV->getDeclContext())->getCanonicalDecl();
unsigned I = PV->getFunctionScopeIndex();
@@ -294,13 +300,13 @@ til::SExpr *SExprBuilder::translateCXXThisExpr(const CXXThisExpr *TE,
}
static const ValueDecl *getValueDeclFromSExpr(const til::SExpr *E) {
- if (auto *V = dyn_cast<til::Variable>(E))
+ if (const auto *V = dyn_cast<til::Variable>(E))
return V->clangDecl();
- if (auto *Ph = dyn_cast<til::Phi>(E))
+ if (const auto *Ph = dyn_cast<til::Phi>(E))
return Ph->clangDecl();
- if (auto *P = dyn_cast<til::Project>(E))
+ if (const auto *P = dyn_cast<til::Project>(E))
return P->clangDecl();
- if (auto *L = dyn_cast<til::LiteralPtr>(E))
+ if (const auto *L = dyn_cast<til::LiteralPtr>(E))
return L->clangDecl();
return nullptr;
}
@@ -309,7 +315,7 @@ static bool hasCppPointerType(const til::SExpr *E) {
auto *VD = getValueDeclFromSExpr(E);
if (VD && VD->getType()->isPointerType())
return true;
- if (auto *C = dyn_cast<til::Cast>(E))
+ if (const auto *C = dyn_cast<til::Cast>(E))
return C->castOpcode() == til::CAST_objToPtr;
return false;
@@ -333,9 +339,8 @@ til::SExpr *SExprBuilder::translateMemberExpr(const MemberExpr *ME,
til::SExpr *BE = translate(ME->getBase(), Ctx);
til::SExpr *E = new (Arena) til::SApply(BE);
- const ValueDecl *D =
- cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
- if (auto *VD = dyn_cast<CXXMethodDecl>(D))
+ const auto *D = cast<ValueDecl>(ME->getMemberDecl()->getCanonicalDecl());
+ if (const auto *VD = dyn_cast<CXXMethodDecl>(D))
D = getFirstVirtualDecl(VD);
til::Project *P = new (Arena) til::Project(E, D);
@@ -356,7 +361,7 @@ til::SExpr *SExprBuilder::translateCallExpr(const CallExpr *CE,
LRCallCtx.SelfArg = SelfE;
LRCallCtx.NumArgs = CE->getNumArgs();
LRCallCtx.FunArgs = CE->getArgs();
- return const_cast<til::SExpr*>(
+ return const_cast<til::SExpr *>(
translateAttrExpr(At->getArg(), &LRCallCtx).sexpr());
}
}
@@ -407,10 +412,10 @@ til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
case UO_PreDec:
return new (Arena) til::Undefined(UO);
- case UO_AddrOf: {
+ case UO_AddrOf:
if (CapabilityExprMode) {
// interpret &Graph::mu_ as an existential.
- if (DeclRefExpr* DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(UO->getSubExpr())) {
if (DRE->getDecl()->isCXXInstanceMember()) {
// This is a pointer-to-member expression, e.g. &MyClass::mu_.
// We interpret this syntax specially, as a wildcard.
@@ -421,7 +426,6 @@ til::SExpr *SExprBuilder::translateUnaryOperator(const UnaryOperator *UO,
}
// otherwise, & is a no-op
return translate(UO->getSubExpr(), Ctx);
- }
// We treat these as no-ops
case UO_Deref:
@@ -470,7 +474,7 @@ til::SExpr *SExprBuilder::translateBinAssign(til::TIL_BinaryOpcode Op,
const ValueDecl *VD = nullptr;
til::SExpr *CV = nullptr;
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(LHS)) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(LHS)) {
VD = DRE->getDecl();
CV = lookupVarDecl(VD);
}
@@ -533,10 +537,10 @@ til::SExpr *SExprBuilder::translateBinaryOperator(const BinaryOperator *BO,
til::SExpr *SExprBuilder::translateCastExpr(const CastExpr *CE,
CallingContext *Ctx) {
- clang::CastKind K = CE->getCastKind();
+ CastKind K = CE->getCastKind();
switch (K) {
case CK_LValueToRValue: {
- if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(CE->getSubExpr())) {
til::SExpr *E0 = lookupVarDecl(DRE->getDecl());
if (E0)
return E0;
@@ -584,16 +588,15 @@ SExprBuilder::translateAbstractConditionalOperator(
til::SExpr *
SExprBuilder::translateDeclStmt(const DeclStmt *S, CallingContext *Ctx) {
DeclGroupRef DGrp = S->getDeclGroup();
- for (DeclGroupRef::iterator I = DGrp.begin(), E = DGrp.end(); I != E; ++I) {
- if (VarDecl *VD = dyn_cast_or_null<VarDecl>(*I)) {
+ for (auto I : DGrp) {
+ if (auto *VD = dyn_cast_or_null<VarDecl>(I)) {
Expr *E = VD->getInit();
til::SExpr* SE = translate(E, Ctx);
// Add local variables with trivial type to the variable map
QualType T = VD->getType();
- if (T.isTrivialType(VD->getASTContext())) {
+ if (T.isTrivialType(VD->getASTContext()))
return addVarDecl(VD, SE);
- }
else {
// TODO: add alloca
}
@@ -632,7 +635,7 @@ til::SExpr *SExprBuilder::lookupVarDecl(const ValueDecl *VD) {
static void maybeUpdateVD(til::SExpr *E, const ValueDecl *VD) {
if (!E)
return;
- if (til::Variable *V = dyn_cast<til::Variable>(E)) {
+ if (auto *V = dyn_cast<til::Variable>(E)) {
if (!V->clangDecl())
V->setClangDecl(VD);
}
@@ -672,7 +675,7 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
if (CurrE->block() == CurrentBB) {
// We already have a Phi node in the current block,
// so just add the new variable to the Phi node.
- til::Phi *Ph = dyn_cast<til::Phi>(CurrE);
+ auto *Ph = dyn_cast<til::Phi>(CurrE);
assert(Ph && "Expecting Phi node.");
if (E)
Ph->values()[ArgIndex] = E;
@@ -690,9 +693,8 @@ void SExprBuilder::makePhiNodeVar(unsigned i, unsigned NPreds, til::SExpr *E) {
Ph->setClangDecl(CurrentLVarMap[i].first);
// If E is from a back-edge, or either E or CurrE are incomplete, then
// mark this node as incomplete; we may need to remove it later.
- if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE)) {
+ if (!E || isIncompletePhi(E) || isIncompletePhi(CurrE))
Ph->setStatus(til::Phi::PH_Incomplete);
- }
// Add Phi node to current block, and update CurrentLVarMap[i]
CurrentArguments.push_back(Ph);
@@ -721,7 +723,7 @@ void SExprBuilder::mergeEntryMap(LVarDefinitionMap Map) {
unsigned MSz = Map.size();
unsigned Sz = std::min(ESz, MSz);
- for (unsigned i=0; i<Sz; ++i) {
+ for (unsigned i = 0; i < Sz; ++i) {
if (CurrentLVarMap[i].first != Map[i].first) {
// We've reached the end of variables in common.
CurrentLVarMap.makeWritable();
@@ -758,9 +760,8 @@ void SExprBuilder::mergeEntryMapBackEdge() {
unsigned Sz = CurrentLVarMap.size();
unsigned NPreds = CurrentBB->numPredecessors();
- for (unsigned i=0; i < Sz; ++i) {
+ for (unsigned i = 0; i < Sz; ++i)
makePhiNodeVar(i, NPreds, nullptr);
- }
}
// Update the phi nodes that were initially created for a back edge
@@ -772,7 +773,7 @@ void SExprBuilder::mergePhiNodesBackEdge(const CFGBlock *Blk) {
assert(ArgIndex > 0 && ArgIndex < BB->numPredecessors());
for (til::SExpr *PE : BB->arguments()) {
- til::Phi *Ph = dyn_cast_or_null<til::Phi>(PE);
+ auto *Ph = dyn_cast_or_null<til::Phi>(PE);
assert(Ph && "Expecting Phi Node.");
assert(Ph->values()[ArgIndex] == nullptr && "Wrong index for back edge.");
@@ -816,7 +817,7 @@ void SExprBuilder::enterCFG(CFG *Cfg, const NamedDecl *D,
}
void SExprBuilder::enterCFGBlock(const CFGBlock *B) {
- // Intialize TIL basic block and add it to the CFG.
+ // Initialize TIL basic block and add it to the CFG.
CurrentBB = lookupBlock(B);
CurrentBB->reservePredecessors(B->pred_size());
Scfg->add(CurrentBB);
@@ -891,7 +892,7 @@ void SExprBuilder::exitCFGBlockBody(const CFGBlock *B) {
til::BasicBlock *BB1 = *It ? lookupBlock(*It) : nullptr;
++It;
til::BasicBlock *BB2 = *It ? lookupBlock(*It) : nullptr;
- // FIXME: make sure these arent' critical edges.
+ // FIXME: make sure these aren't critical edges.
auto *Tm = new (Arena) til::Branch(C, BB1, BB2);
CurrentBB->setTerminator(Tm);
}
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index cd7cdc69ab73..798bbfb29d7b 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -1,4 +1,4 @@
-//===- ThreadSafetyTIL.cpp -------------------------------------*- C++ --*-===//
+//===- ThreadSafetyTIL.cpp ------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -8,7 +8,11 @@
//===----------------------------------------------------------------------===//
#include "clang/Analysis/Analyses/ThreadSafetyTIL.h"
-#include "clang/Analysis/Analyses/ThreadSafetyTraverse.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <cstddef>
+
using namespace clang;
using namespace threadSafety;
using namespace til;
@@ -19,7 +23,7 @@ StringRef til::getUnaryOpcodeString(TIL_UnaryOpcode Op) {
case UOP_BitNot: return "~";
case UOP_LogicNot: return "!";
}
- return "";
+ return {};
}
StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
@@ -42,10 +46,9 @@ StringRef til::getBinaryOpcodeString(TIL_BinaryOpcode Op) {
case BOP_LogicAnd: return "&&";
case BOP_LogicOr: return "||";
}
- return "";
+ return {};
}
-
SExpr* Future::force() {
Status = FS_evaluating;
Result = compute();
@@ -53,13 +56,12 @@ SExpr* Future::force() {
return Result;
}
-
unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
unsigned Idx = Predecessors.size();
Predecessors.reserveCheck(1, Arena);
Predecessors.push_back(Pred);
- for (SExpr *E : Args) {
- if (Phi* Ph = dyn_cast<Phi>(E)) {
+ for (auto *E : Args) {
+ if (auto *Ph = dyn_cast<Phi>(E)) {
Ph->values().reserveCheck(1, Arena);
Ph->values().push_back(nullptr);
}
@@ -67,28 +69,26 @@ unsigned BasicBlock::addPredecessor(BasicBlock *Pred) {
return Idx;
}
-
void BasicBlock::reservePredecessors(unsigned NumPreds) {
Predecessors.reserve(NumPreds, Arena);
- for (SExpr *E : Args) {
- if (Phi* Ph = dyn_cast<Phi>(E)) {
+ for (auto *E : Args) {
+ if (auto *Ph = dyn_cast<Phi>(E)) {
Ph->values().reserve(NumPreds, Arena);
}
}
}
-
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
const SExpr *til::getCanonicalVal(const SExpr *E) {
while (true) {
- if (auto *V = dyn_cast<Variable>(E)) {
+ if (const auto *V = dyn_cast<Variable>(E)) {
if (V->kind() == Variable::VK_Let) {
E = V->definition();
continue;
}
}
- if (const Phi *Ph = dyn_cast<Phi>(E)) {
+ if (const auto *Ph = dyn_cast<Phi>(E)) {
if (Ph->status() == Phi::PH_SingleVal) {
E = Ph->values()[0];
continue;
@@ -99,7 +99,6 @@ const SExpr *til::getCanonicalVal(const SExpr *E) {
return E;
}
-
// If E is a variable, then trace back through any aliases or redundant
// Phi nodes to find the canonical definition.
// The non-const version will simplify incomplete Phi nodes.
@@ -129,7 +128,6 @@ SExpr *til::simplifyToCanonicalVal(SExpr *E) {
}
}
-
// Trace the arguments of an incomplete Phi node to see if they have the same
// canonical definition. If so, mark the Phi node as redundant.
// getCanonicalVal() will recursively call simplifyIncompletePhi().
@@ -140,7 +138,7 @@ void til::simplifyIncompleteArg(til::Phi *Ph) {
Ph->setStatus(Phi::PH_MultiVal);
SExpr *E0 = simplifyToCanonicalVal(Ph->values()[0]);
- for (unsigned i=1, n=Ph->values().size(); i<n; ++i) {
+ for (unsigned i = 1, n = Ph->values().size(); i < n; ++i) {
SExpr *Ei = simplifyToCanonicalVal(Ph->values()[i]);
if (Ei == Ph)
continue; // Recursive reference to itself. Don't count.
@@ -151,7 +149,6 @@ void til::simplifyIncompleteArg(til::Phi *Ph) {
Ph->setStatus(Phi::PH_SingleVal);
}
-
// Renumbers the arguments and instructions to have unique, sequential IDs.
int BasicBlock::renumberInstrs(int ID) {
for (auto *Arg : Args)
@@ -166,7 +163,7 @@ int BasicBlock::renumberInstrs(int ID) {
// Each block will be written into the Blocks array in order, and its BlockID
// will be set to the index in the array. Sorting should start from the entry
// block, and ID should be the total number of blocks.
-int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
+int BasicBlock::topologicalSort(SimpleArray<BasicBlock *> &Blocks, int ID) {
if (Visited) return ID;
Visited = true;
for (auto *Block : successors())
@@ -258,7 +255,6 @@ void BasicBlock::computePostDominator() {
PostDominatorNode.SizeOfSubTree = 1;
}
-
// Renumber instructions in all blocks
void SCFG::renumberInstrs() {
int InstrID = 0;
@@ -266,7 +262,6 @@ void SCFG::renumberInstrs() {
InstrID = Block->renumberInstrs(InstrID);
}
-
static inline void computeNodeSize(BasicBlock *B,
BasicBlock::TopologyNode BasicBlock::*TN) {
BasicBlock::TopologyNode *N = &(B->*TN);
@@ -287,7 +282,6 @@ static inline void computeNodeID(BasicBlock *B,
}
}
-
// Normalizes a CFG. Normalization has a few major components:
// 1) Removing unreachable blocks.
// 2) Computing dominators and post-dominators
diff --git a/lib/Analysis/UninitializedValues.cpp b/lib/Analysis/UninitializedValues.cpp
index 5f11d8a2a36b..63353292349b 100644
--- a/lib/Analysis/UninitializedValues.cpp
+++ b/lib/Analysis/UninitializedValues.cpp
@@ -1,4 +1,4 @@
-//==- UninitializedValues.cpp - Find Uninitialized Values -------*- C++ --*-==//
+//===- UninitializedValues.cpp - Find Uninitialized Values ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,23 +11,31 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/AST/ASTContext.h"
+#include "clang/Analysis/Analyses/UninitializedValues.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
-#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclBase.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/Stmt.h"
+#include "clang/AST/StmtObjC.h"
#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
-#include "clang/Analysis/Analyses/UninitializedValues.h"
#include "clang/Analysis/AnalysisDeclContext.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/DomainSpecific/ObjCNoReturn.h"
+#include "clang/Basic/LLVM.h"
+#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/PackedVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/Support/SaveAndRestore.h"
-#include <utility>
+#include "llvm/Support/Casting.h"
+#include <algorithm>
+#include <cassert>
using namespace clang;
@@ -48,10 +56,12 @@ static bool isTrackedVar(const VarDecl *vd, const DeclContext *dc) {
//====------------------------------------------------------------------------//
namespace {
+
class DeclToIndex {
llvm::DenseMap<const VarDecl *, unsigned> map;
+
public:
- DeclToIndex() {}
+ DeclToIndex() = default;
/// Compute the actual mapping from declarations to bits.
void computeMap(const DeclContext &dc);
@@ -62,7 +72,8 @@ public:
/// Returns the bit vector index for a given declaration.
Optional<unsigned> getValueIndex(const VarDecl *d) const;
};
-}
+
+} // namespace
void DeclToIndex::computeMap(const DeclContext &dc) {
unsigned count = 0;
@@ -96,25 +107,28 @@ enum Value { Unknown = 0x0, /* 00 */
static bool isUninitialized(const Value v) {
return v >= Uninitialized;
}
+
static bool isAlwaysUninit(const Value v) {
return v == Uninitialized;
}
namespace {
-typedef llvm::PackedVector<Value, 2, llvm::SmallBitVector> ValueVector;
+using ValueVector = llvm::PackedVector<Value, 2, llvm::SmallBitVector>;
class CFGBlockValues {
const CFG &cfg;
SmallVector<ValueVector, 8> vals;
ValueVector scratch;
DeclToIndex declToIndex;
+
public:
CFGBlockValues(const CFG &cfg);
unsigned getNumEntries() const { return declToIndex.size(); }
void computeSetOfDeclarations(const DeclContext &dc);
+
ValueVector &getValueVector(const CFGBlock *block) {
return vals[block->getBlockID()];
}
@@ -138,7 +152,8 @@ public:
return getValueVector(block)[idx.getValue()];
}
};
-} // end anonymous namespace
+
+} // namespace
CFGBlockValues::CFGBlockValues(const CFG &c) : cfg(c), vals(0) {}
@@ -150,17 +165,16 @@ void CFGBlockValues::computeSetOfDeclarations(const DeclContext &dc) {
if (!n)
return;
vals.resize(n);
- for (unsigned i = 0; i < n; ++i)
- vals[i].resize(decls);
+ for (auto &val : vals)
+ val.resize(decls);
}
#if DEBUG_LOGGING
static void printVector(const CFGBlock *block, ValueVector &bv,
unsigned num) {
llvm::errs() << block->getBlockID() << " :";
- for (unsigned i = 0; i < bv.size(); ++i) {
- llvm::errs() << ' ' << bv[i];
- }
+ for (const auto &i : bv)
+ llvm::errs() << ' ' << i;
llvm::errs() << " : " << num << '\n';
}
#endif
@@ -204,28 +218,31 @@ ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
//====------------------------------------------------------------------------//
namespace {
+
class DataflowWorklist {
PostOrderCFGView::iterator PO_I, PO_E;
SmallVector<const CFGBlock *, 20> worklist;
llvm::BitVector enqueuedBlocks;
+
public:
DataflowWorklist(const CFG &cfg, PostOrderCFGView &view)
- : PO_I(view.begin()), PO_E(view.end()),
- enqueuedBlocks(cfg.getNumBlockIDs(), true) {
- // Treat the first block as already analyzed.
- if (PO_I != PO_E) {
- assert(*PO_I == &cfg.getEntry());
- enqueuedBlocks[(*PO_I)->getBlockID()] = false;
- ++PO_I;
- }
- }
+ : PO_I(view.begin()), PO_E(view.end()),
+ enqueuedBlocks(cfg.getNumBlockIDs(), true) {
+ // Treat the first block as already analyzed.
+ if (PO_I != PO_E) {
+ assert(*PO_I == &cfg.getEntry());
+ enqueuedBlocks[(*PO_I)->getBlockID()] = false;
+ ++PO_I;
+ }
+ }
void enqueueSuccessors(const CFGBlock *block);
const CFGBlock *dequeue();
};
-}
-void DataflowWorklist::enqueueSuccessors(const clang::CFGBlock *block) {
+} // namespace
+
+void DataflowWorklist::enqueueSuccessors(const CFGBlock *block) {
for (CFGBlock::const_succ_iterator I = block->succ_begin(),
E = block->succ_end(); I != E; ++I) {
const CFGBlock *Successor = *I;
@@ -250,9 +267,8 @@ const CFGBlock *DataflowWorklist::dequeue() {
B = *PO_I;
++PO_I;
}
- else {
+ else
return nullptr;
- }
assert(enqueuedBlocks[B->getBlockID()] == true);
enqueuedBlocks[B->getBlockID()] = false;
@@ -264,9 +280,11 @@ const CFGBlock *DataflowWorklist::dequeue() {
//====------------------------------------------------------------------------//
namespace {
+
class FindVarResult {
const VarDecl *vd;
const DeclRefExpr *dr;
+
public:
FindVarResult(const VarDecl *vd, const DeclRefExpr *dr) : vd(vd), dr(dr) {}
@@ -274,10 +292,12 @@ public:
const VarDecl *getDecl() const { return vd; }
};
+} // namespace
+
static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
while (Ex) {
Ex = Ex->IgnoreParenNoopCasts(C);
- if (const CastExpr *CE = dyn_cast<CastExpr>(Ex)) {
+ if (const auto *CE = dyn_cast<CastExpr>(Ex)) {
if (CE->getCastKind() == CK_LValueBitCast) {
Ex = CE->getSubExpr();
continue;
@@ -291,15 +311,17 @@ static const Expr *stripCasts(ASTContext &C, const Expr *Ex) {
/// If E is an expression comprising a reference to a single variable, find that
/// variable.
static FindVarResult findVar(const Expr *E, const DeclContext *DC) {
- if (const DeclRefExpr *DRE =
- dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
- if (const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl()))
+ if (const auto *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(DC->getParentASTContext(), E)))
+ if (const auto *VD = dyn_cast<VarDecl>(DRE->getDecl()))
if (isTrackedVar(VD, DC))
return FindVarResult(VD, DRE);
return FindVarResult(nullptr, nullptr);
}
-/// \brief Classify each DeclRefExpr as an initialization or a use. Any
+namespace {
+
+/// Classify each DeclRefExpr as an initialization or a use. Any
/// DeclRefExpr which isn't explicitly classified will be assumed to have
/// escaped the analysis and will be treated as an initialization.
class ClassifyRefs : public StmtVisitor<ClassifyRefs> {
@@ -313,7 +335,7 @@ public:
private:
const DeclContext *DC;
- llvm::DenseMap<const DeclRefExpr*, Class> Classification;
+ llvm::DenseMap<const DeclRefExpr *, Class> Classification;
bool isTrackedVar(const VarDecl *VD) const {
return ::isTrackedVar(VD, DC);
@@ -338,21 +360,22 @@ public:
if (I != Classification.end())
return I->second;
- const VarDecl *VD = dyn_cast<VarDecl>(DRE->getDecl());
+ const auto *VD = dyn_cast<VarDecl>(DRE->getDecl());
if (!VD || !isTrackedVar(VD))
return Ignore;
return Init;
}
};
-}
+
+} // namespace
static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
if (VD->getType()->isRecordType())
return nullptr;
if (Expr *Init = VD->getInit()) {
- const DeclRefExpr *DRE
- = dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
+ const auto *DRE =
+ dyn_cast<DeclRefExpr>(stripCasts(VD->getASTContext(), Init));
if (DRE && DRE->getDecl() == VD)
return DRE;
}
@@ -362,32 +385,31 @@ static const DeclRefExpr *getSelfInitExpr(VarDecl *VD) {
void ClassifyRefs::classify(const Expr *E, Class C) {
// The result of a ?: could also be an lvalue.
E = E->IgnoreParens();
- if (const ConditionalOperator *CO = dyn_cast<ConditionalOperator>(E)) {
+ if (const auto *CO = dyn_cast<ConditionalOperator>(E)) {
classify(CO->getTrueExpr(), C);
classify(CO->getFalseExpr(), C);
return;
}
- if (const BinaryConditionalOperator *BCO =
- dyn_cast<BinaryConditionalOperator>(E)) {
+ if (const auto *BCO = dyn_cast<BinaryConditionalOperator>(E)) {
classify(BCO->getFalseExpr(), C);
return;
}
- if (const OpaqueValueExpr *OVE = dyn_cast<OpaqueValueExpr>(E)) {
+ if (const auto *OVE = dyn_cast<OpaqueValueExpr>(E)) {
classify(OVE->getSourceExpr(), C);
return;
}
- if (const MemberExpr *ME = dyn_cast<MemberExpr>(E)) {
- if (VarDecl *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ if (const auto *VD = dyn_cast<VarDecl>(ME->getMemberDecl())) {
if (!VD->isStaticDataMember())
classify(ME->getBase(), C);
}
return;
}
- if (const BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
+ if (const auto *BO = dyn_cast<BinaryOperator>(E)) {
switch (BO->getOpcode()) {
case BO_PtrMemD:
case BO_PtrMemI:
@@ -408,7 +430,7 @@ void ClassifyRefs::classify(const Expr *E, Class C) {
void ClassifyRefs::VisitDeclStmt(DeclStmt *DS) {
for (auto *DI : DS->decls()) {
- VarDecl *VD = dyn_cast<VarDecl>(DI);
+ auto *VD = dyn_cast<VarDecl>(DI);
if (VD && isTrackedVar(VD))
if (const DeclRefExpr *DRE = getSelfInitExpr(VD))
Classification[DRE] = SelfInit;
@@ -457,7 +479,7 @@ void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
classify((*I), Ignore);
} else if (isPointerToConst((*I)->getType())) {
const Expr *Ex = stripCasts(DC->getParentASTContext(), *I);
- const UnaryOperator *UO = dyn_cast<UnaryOperator>(Ex);
+ const auto *UO = dyn_cast<UnaryOperator>(Ex);
if (UO && UO->getOpcode() == UO_AddrOf)
Ex = UO->getSubExpr();
classify(Ex, Ignore);
@@ -468,7 +490,7 @@ void ClassifyRefs::VisitCallExpr(CallExpr *CE) {
void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
if (CE->getCastKind() == CK_LValueToRValue)
classify(CE->getSubExpr(), Use);
- else if (CStyleCastExpr *CSE = dyn_cast<CStyleCastExpr>(CE)) {
+ else if (const auto *CSE = dyn_cast<CStyleCastExpr>(CE)) {
if (CSE->getType()->isVoidType()) {
// Squelch any detected load of an uninitialized value if
// we cast it to void.
@@ -483,6 +505,7 @@ void ClassifyRefs::VisitCastExpr(CastExpr *CE) {
//====------------------------------------------------------------------------//
namespace {
+
class TransferFunctions : public StmtVisitor<TransferFunctions> {
CFGBlockValues &vals;
const CFG &cfg;
@@ -497,9 +520,9 @@ public:
const CFGBlock *block, AnalysisDeclContext &ac,
const ClassifyRefs &classification,
UninitVariablesHandler &handler)
- : vals(vals), cfg(cfg), block(block), ac(ac),
- classification(classification), objCNoRet(ac.getASTContext()),
- handler(handler) {}
+ : vals(vals), cfg(cfg), block(block), ac(ac),
+ classification(classification), objCNoRet(ac.getASTContext()),
+ handler(handler) {}
void reportUse(const Expr *ex, const VarDecl *vd);
@@ -627,8 +650,7 @@ public:
// Scan the frontier, looking for blocks where the variable was
// uninitialized.
- for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
- const CFGBlock *Block = *BI;
+ for (const auto *Block : cfg) {
unsigned BlockID = Block->getBlockID();
const Stmt *Term = Block->getTerminator();
if (SuccsVisited[BlockID] && SuccsVisited[BlockID] < Block->succ_size() &&
@@ -668,7 +690,8 @@ public:
return Use;
}
};
-}
+
+} // namespace
void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
Value v = vals[vd];
@@ -678,8 +701,8 @@ void TransferFunctions::reportUse(const Expr *ex, const VarDecl *vd) {
void TransferFunctions::VisitObjCForCollectionStmt(ObjCForCollectionStmt *FS) {
// This represents an initialization of the 'element' value.
- if (DeclStmt *DS = dyn_cast<DeclStmt>(FS->getElement())) {
- const VarDecl *VD = cast<VarDecl>(DS->getSingleDecl());
+ if (const auto *DS = dyn_cast<DeclStmt>(FS->getElement())) {
+ const auto *VD = cast<VarDecl>(DS->getSingleDecl());
if (isTrackedVar(VD))
vals[VD] = Initialized;
}
@@ -748,7 +771,7 @@ void TransferFunctions::VisitBinaryOperator(BinaryOperator *BO) {
void TransferFunctions::VisitDeclStmt(DeclStmt *DS) {
for (auto *DI : DS->decls()) {
- VarDecl *VD = dyn_cast<VarDecl>(DI);
+ auto *VD = dyn_cast<VarDecl>(DI);
if (VD && isTrackedVar(VD)) {
if (getSelfInitExpr(VD)) {
// If the initializer consists solely of a reference to itself, we
@@ -815,34 +838,32 @@ static bool runOnBlock(const CFGBlock *block, const CFG &cfg,
}
// Apply the transfer function.
TransferFunctions tf(vals, cfg, block, ac, classification, handler);
- for (CFGBlock::const_iterator I = block->begin(), E = block->end();
- I != E; ++I) {
- if (Optional<CFGStmt> cs = I->getAs<CFGStmt>())
- tf.Visit(const_cast<Stmt*>(cs->getStmt()));
+ for (const auto &I : *block) {
+ if (Optional<CFGStmt> cs = I.getAs<CFGStmt>())
+ tf.Visit(const_cast<Stmt *>(cs->getStmt()));
}
return vals.updateValueVectorWithScratch(block);
}
+namespace {
+
/// PruneBlocksHandler is a special UninitVariablesHandler that is used
/// to detect when a CFGBlock has any *potential* use of an uninitialized
/// variable. It is mainly used to prune out work during the final
/// reporting pass.
-namespace {
struct PruneBlocksHandler : public UninitVariablesHandler {
- PruneBlocksHandler(unsigned numBlocks)
- : hadUse(numBlocks, false), hadAnyUse(false),
- currentBlock(0) {}
-
- ~PruneBlocksHandler() override {}
-
/// Records if a CFGBlock had a potential use of an uninitialized variable.
llvm::BitVector hadUse;
/// Records if any CFGBlock had a potential use of an uninitialized variable.
- bool hadAnyUse;
+ bool hadAnyUse = false;
/// The current block to scribble use information.
- unsigned currentBlock;
+ unsigned currentBlock = 0;
+
+ PruneBlocksHandler(unsigned numBlocks) : hadUse(numBlocks, false) {}
+
+ ~PruneBlocksHandler() override = default;
void handleUseOfUninitVariable(const VarDecl *vd,
const UninitUse &use) override {
@@ -858,7 +879,8 @@ struct PruneBlocksHandler : public UninitVariablesHandler {
hadAnyUse = true;
}
};
-}
+
+} // namespace
void clang::runUninitializedVariablesAnalysis(
const DeclContext &dc,
@@ -881,7 +903,7 @@ void clang::runUninitializedVariablesAnalysis(
const CFGBlock &entry = cfg.getEntry();
ValueVector &vec = vals.getValueVector(&entry);
const unsigned n = vals.getNumEntries();
- for (unsigned j = 0; j < n ; ++j) {
+ for (unsigned j = 0; j < n; ++j) {
vec[j] = Uninitialized;
}
@@ -909,13 +931,11 @@ void clang::runUninitializedVariablesAnalysis(
return;
// Run through the blocks one more time, and report uninitialized variables.
- for (CFG::const_iterator BI = cfg.begin(), BE = cfg.end(); BI != BE; ++BI) {
- const CFGBlock *block = *BI;
+ for (const auto *block : cfg)
if (PBH.hadUse[block->getBlockID()]) {
runOnBlock(block, cfg, ac, vals, classification, wasAnalyzed, handler);
++stats.NumBlockVisits;
}
- }
}
-UninitVariablesHandler::~UninitVariablesHandler() {}
+UninitVariablesHandler::~UninitVariablesHandler() = default;