aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorRoman Divacky <rdivacky@FreeBSD.org>2010-04-02 08:55:10 +0000
committerRoman Divacky <rdivacky@FreeBSD.org>2010-04-02 08:55:10 +0000
commit11d2b2d2bb706fca0656f2760839721bb7f6cb6f (patch)
treed374cdca417e76f1bf101f139dba2db1d10ee8f7 /lib
parentc0c7bca4e5b8d12699dc93a0da49e9e4bb79671b (diff)
downloadsrc-11d2b2d2bb706fca0656f2760839721bb7f6cb6f.tar.gz
src-11d2b2d2bb706fca0656f2760839721bb7f6cb6f.zip
Update clang to r100181.
Notes
Notes: svn path=/vendor/clang/dist/; revision=206084
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp194
-rw-r--r--lib/AST/ASTImporter.cpp22
-rw-r--r--lib/AST/CMakeLists.txt4
-rw-r--r--lib/AST/CXXInheritance.cpp237
-rw-r--r--lib/AST/Decl.cpp6
-rw-r--r--lib/AST/DeclBase.cpp96
-rw-r--r--lib/AST/DeclCXX.cpp133
-rw-r--r--lib/AST/Expr.cpp62
-rw-r--r--lib/AST/Type.cpp36
-rw-r--r--lib/AST/TypePrinter.cpp22
-rw-r--r--lib/Analysis/AnalysisContext.cpp28
-rw-r--r--lib/Analysis/CFG.cpp2
-rw-r--r--lib/Analysis/PrintfFormatString.cpp2
-rw-r--r--lib/Basic/Diagnostic.cpp85
-rw-r--r--lib/Basic/SourceManager.cpp20
-rw-r--r--lib/Basic/Targets.cpp1
-rw-r--r--lib/Checker/AdjustedReturnValueChecker.cpp4
-rw-r--r--lib/Checker/AggExprVisitor.cpp55
-rw-r--r--lib/Checker/ArrayBoundChecker.cpp4
-rw-r--r--lib/Checker/AttrNonNullChecker.cpp4
-rw-r--r--lib/Checker/BasicObjCFoundationChecks.cpp3
-rw-r--r--lib/Checker/BugReporter.cpp6
-rw-r--r--lib/Checker/BugReporterVisitors.cpp1
-rw-r--r--lib/Checker/CFRefCount.cpp3
-rw-r--r--lib/Checker/CMakeLists.txt17
-rw-r--r--lib/Checker/CallAndMessageChecker.cpp6
-rw-r--r--lib/Checker/CastToStructChecker.cpp1
-rw-r--r--lib/Checker/CheckSecuritySyntaxOnly.cpp40
-rw-r--r--lib/Checker/DereferenceChecker.cpp59
-rw-r--r--lib/Checker/DivZeroChecker.cpp3
-rw-r--r--lib/Checker/Environment.cpp5
-rw-r--r--lib/Checker/FixedAddressChecker.cpp3
-rw-r--r--lib/Checker/GRBlockCounter.cpp43
-rw-r--r--lib/Checker/GRCoreEngine.cpp10
-rw-r--r--lib/Checker/GRExprEngine.cpp139
-rw-r--r--lib/Checker/GRState.cpp14
-rw-r--r--lib/Checker/MacOSXAPIChecker.cpp2
-rw-r--r--lib/Checker/MallocChecker.cpp1
-rw-r--r--lib/Checker/MemRegion.cpp4
-rw-r--r--lib/Checker/NSErrorChecker.cpp2
-rw-r--r--lib/Checker/NoReturnFunctionChecker.cpp82
-rw-r--r--lib/Checker/PathDiagnostic.cpp4
-rw-r--r--lib/Checker/PointerArithChecker.cpp3
-rw-r--r--lib/Checker/PointerSubChecker.cpp3
-rw-r--r--lib/Checker/RegionStore.cpp108
-rw-r--r--lib/Checker/ReturnPointerRangeChecker.cpp4
-rw-r--r--lib/Checker/ReturnStackAddressChecker.cpp2
-rw-r--r--lib/Checker/ReturnUndefChecker.cpp5
-rw-r--r--lib/Checker/SymbolManager.cpp2
-rw-r--r--lib/Checker/UndefBranchChecker.cpp1
-rw-r--r--lib/Checker/UndefCapturedBlockVarChecker.cpp2
-rw-r--r--lib/Checker/UndefResultChecker.cpp2
-rw-r--r--lib/Checker/UndefinedArraySubscriptChecker.cpp4
-rw-r--r--lib/Checker/UndefinedAssignmentChecker.cpp40
-rw-r--r--lib/Checker/UnixAPIChecker.cpp5
-rw-r--r--lib/Checker/VLASizeChecker.cpp2
-rw-r--r--lib/CodeGen/CGBlocks.cpp20
-rw-r--r--lib/CodeGen/CGBlocks.h1
-rw-r--r--lib/CodeGen/CGBuiltin.cpp17
-rw-r--r--lib/CodeGen/CGCXX.cpp315
-rw-r--r--lib/CodeGen/CGCall.cpp48
-rw-r--r--lib/CodeGen/CGCall.h15
-rw-r--r--lib/CodeGen/CGClass.cpp327
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp72
-rw-r--r--lib/CodeGen/CGDebugInfo.h5
-rw-r--r--lib/CodeGen/CGExpr.cpp15
-rw-r--r--lib/CodeGen/CGExprAgg.cpp7
-rw-r--r--lib/CodeGen/CGExprCXX.cpp48
-rw-r--r--lib/CodeGen/CGExprComplex.cpp30
-rw-r--r--lib/CodeGen/CGExprConstant.cpp11
-rw-r--r--lib/CodeGen/CGExprScalar.cpp9
-rw-r--r--lib/CodeGen/CGObjC.cpp27
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp6
-rw-r--r--lib/CodeGen/CGObjCMac.cpp23
-rw-r--r--lib/CodeGen/CGObjCRuntime.h1
-rw-r--r--lib/CodeGen/CGRTTI.cpp156
-rw-r--r--lib/CodeGen/CGRecordLayout.h95
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.cpp176
-rw-r--r--lib/CodeGen/CGRecordLayoutBuilder.h142
-rw-r--r--lib/CodeGen/CGStmt.cpp3
-rw-r--r--lib/CodeGen/CGTemporaries.cpp15
-rw-r--r--lib/CodeGen/CGVTT.cpp608
-rw-r--r--lib/CodeGen/CGVtable.cpp2089
-rw-r--r--lib/CodeGen/CGVtable.h257
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp8
-rw-r--r--lib/CodeGen/CodeGenFunction.h70
-rw-r--r--lib/CodeGen/CodeGenModule.cpp32
-rw-r--r--lib/CodeGen/CodeGenModule.h37
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp48
-rw-r--r--lib/CodeGen/CodeGenTypes.h82
-rw-r--r--lib/CodeGen/Mangle.cpp98
-rw-r--r--lib/CodeGen/Mangle.h16
-rw-r--r--lib/Driver/CMakeLists.txt2
-rw-r--r--lib/Driver/Driver.cpp26
-rw-r--r--lib/Driver/HostInfo.cpp10
-rw-r--r--lib/Driver/Tools.cpp65
-rw-r--r--lib/Frontend/ASTUnit.cpp2
-rw-r--r--lib/Frontend/CompilerInstance.cpp4
-rw-r--r--lib/Frontend/CompilerInvocation.cpp3
-rw-r--r--lib/Frontend/DependencyFile.cpp9
-rw-r--r--lib/Frontend/FixItRewriter.cpp14
-rw-r--r--lib/Frontend/FrontendAction.cpp4
-rw-r--r--lib/Frontend/HTMLDiagnostics.cpp3
-rw-r--r--lib/Frontend/InitHeaderSearch.cpp11
-rw-r--r--lib/Frontend/PCHReader.cpp34
-rw-r--r--lib/Frontend/PCHWriter.cpp15
-rw-r--r--lib/Frontend/RewriteObjC.cpp51
-rw-r--r--lib/Frontend/TextDiagnosticPrinter.cpp14
-rw-r--r--lib/Headers/emmintrin.h426
-rw-r--r--lib/Headers/mm_malloc.h9
-rw-r--r--lib/Headers/mmintrin.h134
-rw-r--r--lib/Headers/pmmintrin.h26
-rw-r--r--lib/Headers/smmintrin.h128
-rw-r--r--lib/Headers/tmmintrin.h60
-rw-r--r--lib/Headers/wmmintrin.h67
-rw-r--r--lib/Headers/xmmintrin.h240
-rw-r--r--lib/Lex/Lexer.cpp3
-rw-r--r--lib/Lex/PPDirectives.cpp6
-rw-r--r--lib/Lex/PPLexerChange.cpp1
-rw-r--r--lib/Lex/PPMacroExpansion.cpp11
-rw-r--r--lib/Lex/Preprocessor.cpp5
-rw-r--r--lib/Lex/TokenConcatenation.cpp2
-rw-r--r--lib/Parse/AttributeList.cpp1
-rw-r--r--lib/Parse/DeclSpec.cpp4
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp25
-rw-r--r--lib/Parse/ParseDecl.cpp10
-rw-r--r--lib/Parse/ParseDeclCXX.cpp11
-rw-r--r--lib/Parse/ParseExprCXX.cpp4
-rw-r--r--lib/Parse/ParseInit.cpp7
-rw-r--r--lib/Parse/ParseObjc.cpp13
-rw-r--r--lib/Parse/ParseStmt.cpp8
-rw-r--r--lib/Parse/ParseTemplate.cpp2
-rw-r--r--lib/Parse/Parser.cpp10
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp110
-rw-r--r--lib/Sema/AnalysisBasedWarnings.h42
-rw-r--r--lib/Sema/Lookup.h6
-rw-r--r--lib/Sema/Sema.cpp27
-rw-r--r--lib/Sema/Sema.h162
-rw-r--r--lib/Sema/SemaAccess.cpp760
-rw-r--r--lib/Sema/SemaCXXCast.cpp58
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp8
-rw-r--r--lib/Sema/SemaChecking.cpp64
-rw-r--r--lib/Sema/SemaDecl.cpp391
-rw-r--r--lib/Sema/SemaDeclAttr.cpp31
-rw-r--r--lib/Sema/SemaDeclCXX.cpp704
-rw-r--r--lib/Sema/SemaDeclObjC.cpp48
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp154
-rw-r--r--lib/Sema/SemaExpr.cpp243
-rw-r--r--lib/Sema/SemaExprCXX.cpp249
-rw-r--r--lib/Sema/SemaExprObjC.cpp5
-rw-r--r--lib/Sema/SemaInit.cpp86
-rw-r--r--lib/Sema/SemaInit.h10
-rw-r--r--lib/Sema/SemaLookup.cpp21
-rw-r--r--lib/Sema/SemaObjCProperty.cpp29
-rw-r--r--lib/Sema/SemaOverload.cpp130
-rw-r--r--lib/Sema/SemaStmt.cpp57
-rw-r--r--lib/Sema/SemaTemplate.cpp672
-rw-r--r--lib/Sema/SemaTemplate.h34
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp273
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp131
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp316
-rw-r--r--lib/Sema/SemaType.cpp84
-rw-r--r--lib/Sema/TreeTransform.h126
163 files changed, 7485 insertions, 5603 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index 7f5c9b1ec1e6..c77acce1bd06 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -45,7 +45,8 @@ ASTContext::ASTContext(const LangOptions& LOpts, SourceManager &SM,
sigjmp_bufDecl(0), BlockDescriptorType(0), BlockDescriptorExtendedType(0),
SourceMgr(SM), LangOpts(LOpts), FreeMemory(FreeMem), Target(t),
Idents(idents), Selectors(sels),
- BuiltinInfo(builtins), ExternalSource(0), PrintingPolicy(LOpts) {
+ BuiltinInfo(builtins), ExternalSource(0), PrintingPolicy(LOpts),
+ LastSDM(0, 0) {
ObjCIdRedefinitionType = QualType();
ObjCClassRedefinitionType = QualType();
ObjCSelRedefinitionType = QualType();
@@ -858,34 +859,22 @@ void ASTContext::CollectInheritedProtocols(const Decl *CDecl,
}
}
-unsigned ASTContext::CountProtocolSynthesizedIvars(const ObjCProtocolDecl *PD) {
- unsigned count = 0;
- for (ObjCContainerDecl::prop_iterator I = PD->prop_begin(),
- E = PD->prop_end(); I != E; ++I)
- if ((*I)->getPropertyIvarDecl())
- ++count;
-
- // Also look into nested protocols.
- for (ObjCProtocolDecl::protocol_iterator P = PD->protocol_begin(),
- E = PD->protocol_end(); P != E; ++P)
- count += CountProtocolSynthesizedIvars(*P);
- return count;
-}
-
-unsigned ASTContext::CountSynthesizedIvars(const ObjCInterfaceDecl *OI) {
- unsigned count = 0;
- for (ObjCInterfaceDecl::prop_iterator I = OI->prop_begin(),
- E = OI->prop_end(); I != E; ++I) {
- if ((*I)->getPropertyIvarDecl())
+unsigned ASTContext::CountNonClassIvars(const ObjCInterfaceDecl *OI) {
+ unsigned count = 0;
+ // Count ivars declared in class extension.
+ if (const ObjCCategoryDecl *CDecl = OI->getClassExtension()) {
+ for (ObjCCategoryDecl::ivar_iterator I = CDecl->ivar_begin(),
+ E = CDecl->ivar_end(); I != E; ++I) {
++count;
+ }
}
- // Also look into interface's protocol list for properties declared
- // in the protocol and whose ivars are synthesized.
- for (ObjCInterfaceDecl::protocol_iterator P = OI->protocol_begin(),
- PE = OI->protocol_end(); P != PE; ++P) {
- ObjCProtocolDecl *PD = (*P);
- count += CountProtocolSynthesizedIvars(PD);
- }
+
+ // Count ivar defined in this class's implementation. This
+ // includes synthesized ivars.
+ if (ObjCImplementationDecl *ImplDecl = OI->getImplementation())
+ for (ObjCImplementationDecl::ivar_iterator I = ImplDecl->ivar_begin(),
+ E = ImplDecl->ivar_end(); I != E; ++I)
+ ++count;
return count;
}
@@ -966,7 +955,7 @@ ASTContext::getObjCLayout(const ObjCInterfaceDecl *D,
// Add in synthesized ivar count if laying out an implementation.
if (Impl) {
- unsigned SynthCount = CountSynthesizedIvars(D);
+ unsigned SynthCount = CountNonClassIvars(D);
// If there aren't any sythesized ivars then reuse the interface
// entry. Note we can't cache this because we simply free all
// entries later; however we shouldn't look up implementations
@@ -1108,14 +1097,12 @@ QualType ASTContext::getObjCGCQualType(QualType T,
return getExtQualType(TypeNode, Quals);
}
-static QualType getNoReturnCallConvType(ASTContext& Context, QualType T,
- bool AddNoReturn,
- CallingConv CallConv) {
+static QualType getExtFunctionType(ASTContext& Context, QualType T,
+ const FunctionType::ExtInfo &Info) {
QualType ResultType;
if (const PointerType *Pointer = T->getAs<PointerType>()) {
QualType Pointee = Pointer->getPointeeType();
- ResultType = getNoReturnCallConvType(Context, Pointee, AddNoReturn,
- CallConv);
+ ResultType = getExtFunctionType(Context, Pointee, Info);
if (ResultType == Pointee)
return T;
@@ -1123,19 +1110,18 @@ static QualType getNoReturnCallConvType(ASTContext& Context, QualType T,
} else if (const BlockPointerType *BlockPointer
= T->getAs<BlockPointerType>()) {
QualType Pointee = BlockPointer->getPointeeType();
- ResultType = getNoReturnCallConvType(Context, Pointee, AddNoReturn,
- CallConv);
+ ResultType = getExtFunctionType(Context, Pointee, Info);
if (ResultType == Pointee)
return T;
ResultType = Context.getBlockPointerType(ResultType);
} else if (const FunctionType *F = T->getAs<FunctionType>()) {
- if (F->getNoReturnAttr() == AddNoReturn && F->getCallConv() == CallConv)
+ if (F->getExtInfo() == Info)
return T;
if (const FunctionNoProtoType *FNPT = dyn_cast<FunctionNoProtoType>(F)) {
ResultType = Context.getFunctionNoProtoType(FNPT->getResultType(),
- AddNoReturn, CallConv);
+ Info);
} else {
const FunctionProtoType *FPT = cast<FunctionProtoType>(F);
ResultType
@@ -1146,7 +1132,7 @@ static QualType getNoReturnCallConvType(ASTContext& Context, QualType T,
FPT->hasAnyExceptionSpec(),
FPT->getNumExceptions(),
FPT->exception_begin(),
- AddNoReturn, CallConv);
+ Info);
}
} else
return T;
@@ -1155,11 +1141,21 @@ static QualType getNoReturnCallConvType(ASTContext& Context, QualType T,
}
QualType ASTContext::getNoReturnType(QualType T, bool AddNoReturn) {
- return getNoReturnCallConvType(*this, T, AddNoReturn, T.getCallConv());
+ FunctionType::ExtInfo Info = getFunctionExtInfo(T);
+ return getExtFunctionType(*this, T,
+ Info.withNoReturn(AddNoReturn));
}
QualType ASTContext::getCallConvType(QualType T, CallingConv CallConv) {
- return getNoReturnCallConvType(*this, T, T.getNoReturnAttr(), CallConv);
+ FunctionType::ExtInfo Info = getFunctionExtInfo(T);
+ return getExtFunctionType(*this, T,
+ Info.withCallingConv(CallConv));
+}
+
+QualType ASTContext::getRegParmType(QualType T, unsigned RegParm) {
+ FunctionType::ExtInfo Info = getFunctionExtInfo(T);
+ return getExtFunctionType(*this, T,
+ Info.withRegParm(RegParm));
}
/// getComplexType - Return the uniqued reference to the type for a complex
@@ -1617,12 +1613,13 @@ QualType ASTContext::getDependentSizedExtVectorType(QualType vecType,
/// getFunctionNoProtoType - Return a K&R style C function type like 'int()'.
///
-QualType ASTContext::getFunctionNoProtoType(QualType ResultTy, bool NoReturn,
- CallingConv CallConv) {
+QualType ASTContext::getFunctionNoProtoType(QualType ResultTy,
+ const FunctionType::ExtInfo &Info) {
+ const CallingConv CallConv = Info.getCC();
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
- FunctionNoProtoType::Profile(ID, ResultTy, NoReturn, CallConv);
+ FunctionNoProtoType::Profile(ID, ResultTy, Info);
void *InsertPos = 0;
if (FunctionNoProtoType *FT =
@@ -1632,8 +1629,9 @@ QualType ASTContext::getFunctionNoProtoType(QualType ResultTy, bool NoReturn,
QualType Canonical;
if (!ResultTy.isCanonical() ||
getCanonicalCallConv(CallConv) != CallConv) {
- Canonical = getFunctionNoProtoType(getCanonicalType(ResultTy), NoReturn,
- getCanonicalCallConv(CallConv));
+ Canonical =
+ getFunctionNoProtoType(getCanonicalType(ResultTy),
+ Info.withCallingConv(getCanonicalCallConv(CallConv)));
// Get the new insert position for the node we care about.
FunctionNoProtoType *NewIP =
@@ -1642,7 +1640,7 @@ QualType ASTContext::getFunctionNoProtoType(QualType ResultTy, bool NoReturn,
}
FunctionNoProtoType *New = new (*this, TypeAlignment)
- FunctionNoProtoType(ResultTy, Canonical, NoReturn, CallConv);
+ FunctionNoProtoType(ResultTy, Canonical, Info);
Types.push_back(New);
FunctionNoProtoTypes.InsertNode(New, InsertPos);
return QualType(New, 0);
@@ -1654,14 +1652,15 @@ QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
unsigned NumArgs, bool isVariadic,
unsigned TypeQuals, bool hasExceptionSpec,
bool hasAnyExceptionSpec, unsigned NumExs,
- const QualType *ExArray, bool NoReturn,
- CallingConv CallConv) {
+ const QualType *ExArray,
+ const FunctionType::ExtInfo &Info) {
+ const CallingConv CallConv= Info.getCC();
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
FunctionProtoType::Profile(ID, ResultTy, ArgArray, NumArgs, isVariadic,
TypeQuals, hasExceptionSpec, hasAnyExceptionSpec,
- NumExs, ExArray, NoReturn, CallConv);
+ NumExs, ExArray, Info);
void *InsertPos = 0;
if (FunctionProtoType *FTP =
@@ -1686,8 +1685,8 @@ QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
Canonical = getFunctionType(getCanonicalType(ResultTy),
CanonicalArgs.data(), NumArgs,
isVariadic, TypeQuals, false,
- false, 0, 0, NoReturn,
- getCanonicalCallConv(CallConv));
+ false, 0, 0,
+ Info.withCallingConv(getCanonicalCallConv(CallConv)));
// Get the new insert position for the node we care about.
FunctionProtoType *NewIP =
@@ -1704,7 +1703,7 @@ QualType ASTContext::getFunctionType(QualType ResultTy,const QualType *ArgArray,
NumExs*sizeof(QualType), TypeAlignment);
new (FTP) FunctionProtoType(ResultTy, ArgArray, NumArgs, isVariadic,
TypeQuals, hasExceptionSpec, hasAnyExceptionSpec,
- ExArray, NumExs, Canonical, NoReturn, CallConv);
+ ExArray, NumExs, Canonical, Info);
Types.push_back(FTP);
FunctionProtoTypes.InsertNode(FTP, InsertPos);
return QualType(FTP, 0);
@@ -1963,66 +1962,76 @@ ASTContext::getQualifiedNameType(NestedNameSpecifier *NNS,
return QualType(T, 0);
}
-QualType ASTContext::getTypenameType(NestedNameSpecifier *NNS,
- const IdentifierInfo *Name,
- QualType Canon) {
+QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const IdentifierInfo *Name,
+ QualType Canon) {
assert(NNS->isDependent() && "nested-name-specifier must be dependent");
if (Canon.isNull()) {
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
- if (CanonNNS != NNS)
- Canon = getTypenameType(CanonNNS, Name);
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None)
+ CanonKeyword = ETK_Typename;
+
+ if (CanonNNS != NNS || CanonKeyword != Keyword)
+ Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
}
llvm::FoldingSetNodeID ID;
- TypenameType::Profile(ID, NNS, Name);
+ DependentNameType::Profile(ID, Keyword, NNS, Name);
void *InsertPos = 0;
- TypenameType *T
- = TypenameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentNameType *T
+ = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
- T = new (*this) TypenameType(NNS, Name, Canon);
+ T = new (*this) DependentNameType(Keyword, NNS, Name, Canon);
Types.push_back(T);
- TypenameTypes.InsertNode(T, InsertPos);
+ DependentNameTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
QualType
-ASTContext::getTypenameType(NestedNameSpecifier *NNS,
- const TemplateSpecializationType *TemplateId,
- QualType Canon) {
+ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
+ NestedNameSpecifier *NNS,
+ const TemplateSpecializationType *TemplateId,
+ QualType Canon) {
assert(NNS->isDependent() && "nested-name-specifier must be dependent");
llvm::FoldingSetNodeID ID;
- TypenameType::Profile(ID, NNS, TemplateId);
+ DependentNameType::Profile(ID, Keyword, NNS, TemplateId);
void *InsertPos = 0;
- TypenameType *T
- = TypenameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentNameType *T
+ = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
if (T)
return QualType(T, 0);
if (Canon.isNull()) {
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
QualType CanonType = getCanonicalType(QualType(TemplateId, 0));
- if (CanonNNS != NNS || CanonType != QualType(TemplateId, 0)) {
+ ElaboratedTypeKeyword CanonKeyword = Keyword;
+ if (Keyword == ETK_None)
+ CanonKeyword = ETK_Typename;
+ if (CanonNNS != NNS || CanonKeyword != Keyword ||
+ CanonType != QualType(TemplateId, 0)) {
const TemplateSpecializationType *CanonTemplateId
= CanonType->getAs<TemplateSpecializationType>();
assert(CanonTemplateId &&
"Canonical type must also be a template specialization type");
- Canon = getTypenameType(CanonNNS, CanonTemplateId);
+ Canon = getDependentNameType(CanonKeyword, CanonNNS, CanonTemplateId);
}
- TypenameType *CheckT
- = TypenameTypes.FindNodeOrInsertPos(ID, InsertPos);
+ DependentNameType *CheckT
+ = DependentNameTypes.FindNodeOrInsertPos(ID, InsertPos);
assert(!CheckT && "Typename canonical type is broken"); (void)CheckT;
}
- T = new (*this) TypenameType(NNS, TemplateId, Canon);
+ T = new (*this) DependentNameType(Keyword, NNS, TemplateId, Canon);
Types.push_back(T);
- TypenameTypes.InsertNode(T, InsertPos);
+ DependentNameTypes.InsertNode(T, InsertPos);
return QualType(T, 0);
}
@@ -4127,14 +4136,15 @@ bool ASTContext::canAssignObjCInterfaces(const ObjCObjectPointerType *LHSOPT,
bool ASTContext::canAssignObjCInterfacesInBlockPointer(
const ObjCObjectPointerType *LHSOPT,
const ObjCObjectPointerType *RHSOPT) {
- if (RHSOPT->isObjCBuiltinType())
+ if (RHSOPT->isObjCBuiltinType() ||
+ LHSOPT->isObjCIdType() || LHSOPT->isObjCQualifiedIdType())
return true;
if (LHSOPT->isObjCBuiltinType()) {
return RHSOPT->isObjCBuiltinType() || RHSOPT->isObjCQualifiedIdType();
}
- if (LHSOPT->isObjCQualifiedIdType() || RHSOPT->isObjCQualifiedIdType())
+ if (RHSOPT->isObjCQualifiedIdType())
return ObjCQualifiedIdTypesAreCompatible(QualType(LHSOPT,0),
QualType(RHSOPT,0),
false);
@@ -4315,13 +4325,22 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (getCanonicalType(retType) != getCanonicalType(rbase->getResultType()))
allRTypes = false;
// FIXME: double check this
- bool NoReturn = lbase->getNoReturnAttr() || rbase->getNoReturnAttr();
- if (NoReturn != lbase->getNoReturnAttr())
+ // FIXME: should we error if lbase->getRegParmAttr() != 0 &&
+ // rbase->getRegParmAttr() != 0 &&
+ // lbase->getRegParmAttr() != rbase->getRegParmAttr()?
+ FunctionType::ExtInfo lbaseInfo = lbase->getExtInfo();
+ FunctionType::ExtInfo rbaseInfo = rbase->getExtInfo();
+ unsigned RegParm = lbaseInfo.getRegParm() == 0 ? rbaseInfo.getRegParm() :
+ lbaseInfo.getRegParm();
+ bool NoReturn = lbaseInfo.getNoReturn() || rbaseInfo.getNoReturn();
+ if (NoReturn != lbaseInfo.getNoReturn() ||
+ RegParm != lbaseInfo.getRegParm())
allLTypes = false;
- if (NoReturn != rbase->getNoReturnAttr())
+ if (NoReturn != rbaseInfo.getNoReturn() ||
+ RegParm != rbaseInfo.getRegParm())
allRTypes = false;
- CallingConv lcc = lbase->getCallConv();
- CallingConv rcc = rbase->getCallConv();
+ CallingConv lcc = lbaseInfo.getCC();
+ CallingConv rcc = rbaseInfo.getCC();
// Compatible functions must have compatible calling conventions
if (!isSameCallConv(lcc, rcc))
return QualType();
@@ -4360,7 +4379,8 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (allRTypes) return rhs;
return getFunctionType(retType, types.begin(), types.size(),
lproto->isVariadic(), lproto->getTypeQuals(),
- false, false, 0, 0, NoReturn, lcc);
+ false, false, 0, 0,
+ FunctionType::ExtInfo(NoReturn, RegParm, lcc));
}
if (lproto) allRTypes = false;
@@ -4393,13 +4413,15 @@ QualType ASTContext::mergeFunctionTypes(QualType lhs, QualType rhs,
if (allRTypes) return rhs;
return getFunctionType(retType, proto->arg_type_begin(),
proto->getNumArgs(), proto->isVariadic(),
- proto->getTypeQuals(),
- false, false, 0, 0, NoReturn, lcc);
+ proto->getTypeQuals(),
+ false, false, 0, 0,
+ FunctionType::ExtInfo(NoReturn, RegParm, lcc));
}
if (allLTypes) return lhs;
if (allRTypes) return rhs;
- return getFunctionNoProtoType(retType, NoReturn, lcc);
+ FunctionType::ExtInfo Info(NoReturn, RegParm, lcc);
+ return getFunctionNoProtoType(retType, Info);
}
QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
@@ -4903,7 +4925,7 @@ QualType ASTContext::GetBuiltinType(unsigned id,
// FIXME: Should we create noreturn types?
return getFunctionType(ResType, ArgTypes.data(), ArgTypes.size(),
TypeStr[0] == '.', 0, false, false, 0, 0,
- false, CC_Default);
+ FunctionType::ExtInfo());
}
QualType
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index dd2528a6b3b3..75cf1380a112 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -73,7 +73,7 @@ namespace {
// FIXME: SubstTemplateTypeParmType
// FIXME: TemplateSpecializationType
QualType VisitQualifiedNameType(QualifiedNameType *T);
- // FIXME: TypenameType
+ // FIXME: DependentNameType
QualType VisitObjCInterfaceType(ObjCInterfaceType *T);
QualType VisitObjCObjectPointerType(ObjCObjectPointerType *T);
@@ -484,10 +484,8 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Function1->getResultType(),
Function2->getResultType()))
return false;
- if (Function1->getNoReturnAttr() != Function2->getNoReturnAttr())
- return false;
- if (Function1->getCallConv() != Function2->getCallConv())
- return false;
+ if (Function1->getExtInfo() != Function2->getExtInfo())
+ return false;
break;
}
@@ -620,9 +618,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
break;
}
- case Type::Typename: {
- const TypenameType *Typename1 = cast<TypenameType>(T1);
- const TypenameType *Typename2 = cast<TypenameType>(T2);
+ case Type::DependentName: {
+ const DependentNameType *Typename1 = cast<DependentNameType>(T1);
+ const DependentNameType *Typename2 = cast<DependentNameType>(T2);
if (!IsStructurallyEquivalent(Context,
Typename1->getQualifier(),
Typename2->getQualifier()))
@@ -1200,10 +1198,9 @@ QualType ASTNodeImporter::VisitFunctionNoProtoType(FunctionNoProtoType *T) {
QualType ToResultType = Importer.Import(T->getResultType());
if (ToResultType.isNull())
return QualType();
-
+
return Importer.getToContext().getFunctionNoProtoType(ToResultType,
- T->getNoReturnAttr(),
- T->getCallConv());
+ T->getExtInfo());
}
QualType ASTNodeImporter::VisitFunctionProtoType(FunctionProtoType *T) {
@@ -1241,8 +1238,7 @@ QualType ASTNodeImporter::VisitFunctionProtoType(FunctionProtoType *T) {
T->hasAnyExceptionSpec(),
ExceptionTypes.size(),
ExceptionTypes.data(),
- T->getNoReturnAttr(),
- T->getCallConv());
+ T->getExtInfo());
}
QualType ASTNodeImporter::VisitTypedefType(TypedefType *T) {
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index 3408a1e3cc74..91aaddc9a481 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -9,6 +9,7 @@ add_clang_library(clangAST
AttrImpl.cpp
CXXInheritance.cpp
Decl.cpp
+ DeclarationName.cpp
DeclBase.cpp
DeclCXX.cpp
DeclFriend.cpp
@@ -16,10 +17,9 @@ add_clang_library(clangAST
DeclObjC.cpp
DeclPrinter.cpp
DeclTemplate.cpp
- DeclarationName.cpp
Expr.cpp
- ExprCXX.cpp
ExprConstant.cpp
+ ExprCXX.cpp
FullExpr.cpp
InheritViz.cpp
NestedNameSpecifier.cpp
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index 70f8ee4bca5e..a9f223045864 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -416,3 +416,240 @@ FindNestedNameSpecifierMember(const CXXBaseSpecifier *Specifier,
return false;
}
+
+void OverridingMethods::add(unsigned OverriddenSubobject,
+ UniqueVirtualMethod Overriding) {
+ llvm::SmallVector<UniqueVirtualMethod, 4> &SubobjectOverrides
+ = Overrides[OverriddenSubobject];
+ if (std::find(SubobjectOverrides.begin(), SubobjectOverrides.end(),
+ Overriding) == SubobjectOverrides.end())
+ SubobjectOverrides.push_back(Overriding);
+}
+
+void OverridingMethods::add(const OverridingMethods &Other) {
+ for (const_iterator I = Other.begin(), IE = Other.end(); I != IE; ++I) {
+ for (overriding_const_iterator M = I->second.begin(),
+ MEnd = I->second.end();
+ M != MEnd;
+ ++M)
+ add(I->first, *M);
+ }
+}
+
+void OverridingMethods::replaceAll(UniqueVirtualMethod Overriding) {
+ for (iterator I = begin(), IEnd = end(); I != IEnd; ++I) {
+ I->second.clear();
+ I->second.push_back(Overriding);
+ }
+}
+
+
+namespace {
+ class FinalOverriderCollector {
+ /// \brief The number of subobjects of a given class type that
+ /// occur within the class hierarchy.
+ llvm::DenseMap<const CXXRecordDecl *, unsigned> SubobjectCount;
+
+ /// \brief Overriders for each virtual base subobject.
+ llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *> VirtualOverriders;
+
+ CXXFinalOverriderMap FinalOverriders;
+
+ public:
+ ~FinalOverriderCollector();
+
+ void Collect(const CXXRecordDecl *RD, bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders);
+ };
+}
+
+void FinalOverriderCollector::Collect(const CXXRecordDecl *RD,
+ bool VirtualBase,
+ const CXXRecordDecl *InVirtualSubobject,
+ CXXFinalOverriderMap &Overriders) {
+ unsigned SubobjectNumber = 0;
+ if (!VirtualBase)
+ SubobjectNumber
+ = ++SubobjectCount[cast<CXXRecordDecl>(RD->getCanonicalDecl())];
+
+ for (CXXRecordDecl::base_class_const_iterator Base = RD->bases_begin(),
+ BaseEnd = RD->bases_end(); Base != BaseEnd; ++Base) {
+ if (const RecordType *RT = Base->getType()->getAs<RecordType>()) {
+ const CXXRecordDecl *BaseDecl = cast<CXXRecordDecl>(RT->getDecl());
+ if (!BaseDecl->isPolymorphic())
+ continue;
+
+ if (Overriders.empty() && !Base->isVirtual()) {
+ // There are no other overriders of virtual member functions,
+ // so let the base class fill in our overriders for us.
+ Collect(BaseDecl, false, InVirtualSubobject, Overriders);
+ continue;
+ }
+
+ // Collect all of the overridders from the base class subobject
+ // and merge them into the set of overridders for this class.
+ // For virtual base classes, populate or use the cached virtual
+ // overrides so that we do not walk the virtual base class (and
+ // its base classes) more than once.
+ CXXFinalOverriderMap ComputedBaseOverriders;
+ CXXFinalOverriderMap *BaseOverriders = &ComputedBaseOverriders;
+ if (Base->isVirtual()) {
+ CXXFinalOverriderMap *&MyVirtualOverriders = VirtualOverriders[BaseDecl];
+ if (!MyVirtualOverriders) {
+ MyVirtualOverriders = new CXXFinalOverriderMap;
+ Collect(BaseDecl, true, BaseDecl, *MyVirtualOverriders);
+ }
+
+ BaseOverriders = MyVirtualOverriders;
+ } else
+ Collect(BaseDecl, false, InVirtualSubobject, ComputedBaseOverriders);
+
+ // Merge the overriders from this base class into our own set of
+ // overriders.
+ for (CXXFinalOverriderMap::iterator OM = BaseOverriders->begin(),
+ OMEnd = BaseOverriders->end();
+ OM != OMEnd;
+ ++OM) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>(OM->first->getCanonicalDecl());
+ Overriders[CanonOM].add(OM->second);
+ }
+ }
+ }
+
+ for (CXXRecordDecl::method_iterator M = RD->method_begin(),
+ MEnd = RD->method_end();
+ M != MEnd;
+ ++M) {
+ // We only care about virtual methods.
+ if (!M->isVirtual())
+ continue;
+
+ CXXMethodDecl *CanonM = cast<CXXMethodDecl>(M->getCanonicalDecl());
+
+ if (CanonM->begin_overridden_methods()
+ == CanonM->end_overridden_methods()) {
+ // This is a new virtual function that does not override any
+ // other virtual function. Add it to the map of virtual
+ // functions for which we are tracking overridders.
+
+ // C++ [class.virtual]p2:
+ // For convenience we say that any virtual function overrides itself.
+ Overriders[CanonM].add(SubobjectNumber,
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ continue;
+ }
+
+ // This virtual method overrides other virtual methods, so it does
+ // not add any new slots into the set of overriders. Instead, we
+ // replace entries in the set of overriders with the new
+ // overrider. To do so, we dig down to the original virtual
+ // functions using data recursion and update all of the methods it
+ // overrides.
+ typedef std::pair<CXXMethodDecl::method_iterator,
+ CXXMethodDecl::method_iterator> OverriddenMethods;
+ llvm::SmallVector<OverriddenMethods, 4> Stack;
+ Stack.push_back(std::make_pair(CanonM->begin_overridden_methods(),
+ CanonM->end_overridden_methods()));
+ while (!Stack.empty()) {
+ OverriddenMethods OverMethods = Stack.back();
+ Stack.pop_back();
+
+ for (; OverMethods.first != OverMethods.second; ++OverMethods.first) {
+ const CXXMethodDecl *CanonOM
+ = cast<CXXMethodDecl>((*OverMethods.first)->getCanonicalDecl());
+ if (CanonOM->begin_overridden_methods()
+ == CanonOM->end_overridden_methods()) {
+ // C++ [class.virtual]p2:
+ // A virtual member function C::vf of a class object S is
+ // a final overrider unless the most derived class (1.8)
+ // of which S is a base class subobject (if any) declares
+ // or inherits another member function that overrides vf.
+ //
+ // Treating this object like the most derived class, we
+ // replace any overrides from base classes with this
+ // overriding virtual function.
+ Overriders[CanonOM].replaceAll(
+ UniqueVirtualMethod(CanonM, SubobjectNumber,
+ InVirtualSubobject));
+ continue;
+ }
+
+ // Continue recursion to the methods that this virtual method
+ // overrides.
+ Stack.push_back(std::make_pair(CanonOM->begin_overridden_methods(),
+ CanonOM->end_overridden_methods()));
+ }
+ }
+ }
+}
+
+FinalOverriderCollector::~FinalOverriderCollector() {
+ for (llvm::DenseMap<const CXXRecordDecl *, CXXFinalOverriderMap *>::iterator
+ VO = VirtualOverriders.begin(), VOEnd = VirtualOverriders.end();
+ VO != VOEnd;
+ ++VO)
+ delete VO->second;
+}
+
+void
+CXXRecordDecl::getFinalOverriders(CXXFinalOverriderMap &FinalOverriders) const {
+ FinalOverriderCollector Collector;
+ Collector.Collect(this, false, 0, FinalOverriders);
+
+ // Weed out any final overriders that come from virtual base class
+ // subobjects that were hidden by other subobjects along any path.
+ // This is the final-overrider variant of C++ [class.member.lookup]p10.
+ for (CXXFinalOverriderMap::iterator OM = FinalOverriders.begin(),
+ OMEnd = FinalOverriders.end();
+ OM != OMEnd;
+ ++OM) {
+ for (OverridingMethods::iterator SO = OM->second.begin(),
+ SOEnd = OM->second.end();
+ SO != SOEnd;
+ ++SO) {
+ llvm::SmallVector<UniqueVirtualMethod, 4> &Overriding = SO->second;
+ if (Overriding.size() < 2)
+ continue;
+
+ for (llvm::SmallVector<UniqueVirtualMethod, 4>::iterator
+ Pos = Overriding.begin(), PosEnd = Overriding.end();
+ Pos != PosEnd;
+ /* increment in loop */) {
+ if (!Pos->InVirtualSubobject) {
+ ++Pos;
+ continue;
+ }
+
+ // We have an overriding method in a virtual base class
+ // subobject (or non-virtual base class subobject thereof);
+ // determine whether there exists an other overriding method
+ // in a base class subobject that hides the virtual base class
+ // subobject.
+ bool Hidden = false;
+ for (llvm::SmallVector<UniqueVirtualMethod, 4>::iterator
+ OP = Overriding.begin(), OPEnd = Overriding.end();
+ OP != OPEnd && !Hidden;
+ ++OP) {
+ if (Pos == OP)
+ continue;
+
+ if (OP->Method->getParent()->isVirtuallyDerivedFrom(
+ const_cast<CXXRecordDecl *>(Pos->InVirtualSubobject)))
+ Hidden = true;
+ }
+
+ if (Hidden) {
+ // The current overriding function is hidden by another
+ // overriding function; remove this one.
+ Pos = Overriding.erase(Pos);
+ PosEnd = Overriding.end();
+ } else {
+ ++Pos;
+ }
+ }
+ }
+ }
+}
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 6c9a45ef6c44..dc9fb59e3090 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -1413,10 +1413,8 @@ void TagDecl::startDefinition() {
CXXRecordDecl *D = cast<CXXRecordDecl>(this);
struct CXXRecordDecl::DefinitionData *Data =
new (getASTContext()) struct CXXRecordDecl::DefinitionData(D);
- do {
- D->DefinitionData = Data;
- D = cast_or_null<CXXRecordDecl>(D->getPreviousDeclaration());
- } while (D);
+ for (redecl_iterator I = redecls_begin(), E = redecls_end(); I != E; ++I)
+ cast<CXXRecordDecl>(*I)->DefinitionData = Data;
}
}
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 1aac7cfd598a..c693e153dda5 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclTemplate.h"
+#include "clang/AST/DependentDiagnostic.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Type.h"
@@ -481,7 +482,7 @@ DeclContext::~DeclContext() {
// FIXME: Currently ~ASTContext will delete the StoredDeclsMaps because
// ~DeclContext() is not guaranteed to be called when ASTContext uses
// a BumpPtrAllocator.
- // delete static_cast<StoredDeclsMap*>(LookupPtr);
+ // delete LookupPtr;
}
void DeclContext::DestroyDecls(ASTContext &C) {
@@ -516,10 +517,16 @@ bool DeclContext::isDependentContext() const {
if (Record->getDescribedClassTemplate())
return true;
- if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this))
+ if (const FunctionDecl *Function = dyn_cast<FunctionDecl>(this)) {
if (Function->getDescribedFunctionTemplate())
return true;
+ // Friend function declarations are dependent if their *lexical*
+ // context is dependent.
+ if (cast<Decl>(this)->getFriendObjectKind())
+ return getLexicalParent()->isDependentContext();
+ }
+
return getParent() && getParent()->isDependentContext();
}
@@ -666,9 +673,7 @@ DeclContext::LoadVisibleDeclsFromExternalStorage() const {
// Load the declaration IDs for all of the names visible in this
// context.
assert(!LookupPtr && "Have a lookup map before de-serialization?");
- StoredDeclsMap *Map =
- (StoredDeclsMap*) getParentASTContext().CreateStoredDeclsMap();
- LookupPtr = Map;
+ StoredDeclsMap *Map = CreateStoredDeclsMap(getParentASTContext());
for (unsigned I = 0, N = Decls.size(); I != N; ++I) {
(*Map)[Decls[I].Name].setFromDeclIDs(Decls[I].Declarations);
}
@@ -727,10 +732,9 @@ void DeclContext::removeDecl(Decl *D) {
if (isa<NamedDecl>(D)) {
NamedDecl *ND = cast<NamedDecl>(D);
- void *OpaqueMap = getPrimaryContext()->LookupPtr;
- if (!OpaqueMap) return;
+ StoredDeclsMap *Map = getPrimaryContext()->LookupPtr;
+ if (!Map) return;
- StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(OpaqueMap);
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
Pos->second.remove(ND);
@@ -808,9 +812,8 @@ DeclContext::lookup(DeclarationName Name) {
return lookup_result(0, 0);
}
- StoredDeclsMap *Map = static_cast<StoredDeclsMap*>(LookupPtr);
- StoredDeclsMap::iterator Pos = Map->find(Name);
- if (Pos == Map->end())
+ StoredDeclsMap::iterator Pos = LookupPtr->find(Name);
+ if (Pos == LookupPtr->end())
return lookup_result(0, 0);
return Pos->second.getLookupResult(getParentASTContext());
}
@@ -878,12 +881,11 @@ void DeclContext::makeDeclVisibleInContextImpl(NamedDecl *D) {
ASTContext *C = 0;
if (!LookupPtr) {
C = &getParentASTContext();
- LookupPtr = (StoredDeclsMap*) C->CreateStoredDeclsMap();
+ CreateStoredDeclsMap(*C);
}
// Insert this declaration into the map.
- StoredDeclsMap &Map = *static_cast<StoredDeclsMap*>(LookupPtr);
- StoredDeclsList &DeclNameEntries = Map[D->getDeclName()];
+ StoredDeclsList &DeclNameEntries = (*LookupPtr)[D->getDeclName()];
if (DeclNameEntries.isNull()) {
DeclNameEntries.setOnlyValue(D);
return;
@@ -952,13 +954,69 @@ void StoredDeclsList::materializeDecls(ASTContext &Context) {
// Creation and Destruction of StoredDeclsMaps. //
//===----------------------------------------------------------------------===//
-void *ASTContext::CreateStoredDeclsMap() {
- StoredDeclsMap *M = new StoredDeclsMap();
- SDMs.push_back(M);
+StoredDeclsMap *DeclContext::CreateStoredDeclsMap(ASTContext &C) const {
+ assert(!LookupPtr && "context already has a decls map");
+ assert(getPrimaryContext() == this &&
+ "creating decls map on non-primary context");
+
+ StoredDeclsMap *M;
+ bool Dependent = isDependentContext();
+ if (Dependent)
+ M = new DependentStoredDeclsMap();
+ else
+ M = new StoredDeclsMap();
+ M->Previous = C.LastSDM;
+ C.LastSDM = llvm::PointerIntPair<StoredDeclsMap*,1>(M, Dependent);
+ LookupPtr = M;
return M;
}
void ASTContext::ReleaseDeclContextMaps() {
- for (std::vector<void*>::iterator I = SDMs.begin(), E = SDMs.end(); I!=E; ++I)
- delete (StoredDeclsMap*) *I;
+ // It's okay to delete DependentStoredDeclsMaps via a StoredDeclsMap
+ // pointer because the subclass doesn't add anything that needs to
+ // be deleted.
+
+ StoredDeclsMap::DestroyAll(LastSDM.getPointer(), LastSDM.getInt());
+}
+
+void StoredDeclsMap::DestroyAll(StoredDeclsMap *Map, bool Dependent) {
+ while (Map) {
+ // Advance the iteration before we invalidate memory.
+ llvm::PointerIntPair<StoredDeclsMap*,1> Next = Map->Previous;
+
+ if (Dependent)
+ delete static_cast<DependentStoredDeclsMap*>(Map);
+ else
+ delete Map;
+
+ Map = Next.getPointer();
+ Dependent = Next.getInt();
+ }
+}
+
+DependentDiagnostic *DependentDiagnostic::Create(ASTContext &C,
+ DeclContext *Parent,
+ const PartialDiagnostic &PDiag) {
+ assert(Parent->isDependentContext()
+ && "cannot iterate dependent diagnostics of non-dependent context");
+ Parent = Parent->getPrimaryContext();
+ if (!Parent->LookupPtr)
+ Parent->CreateStoredDeclsMap(C);
+
+ DependentStoredDeclsMap *Map
+ = static_cast<DependentStoredDeclsMap*>(Parent->LookupPtr);
+
+ // Allocate the copy of the PartialDiagnostic via the ASTContext's
+ // BumpPtrAllocator, rather than the ASTContext itself.
+ PartialDiagnostic::Storage *DiagStorage = 0;
+ if (PDiag.hasStorage())
+ DiagStorage = new (C) PartialDiagnostic::Storage;
+
+ DependentDiagnostic *DD = new (C) DependentDiagnostic(PDiag, DiagStorage);
+
+ // TODO: Maybe we shouldn't reverse the order during insertion.
+ DD->NextDiagnostic = Map->FirstDiagnostic;
+ Map->FirstDiagnostic = DD;
+
+ return DD;
}
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index 37f7479b36cd..94ed85c7cd2d 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -83,9 +83,11 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (data().Bases)
C.Deallocate(data().Bases);
- int vbaseCount = 0;
- llvm::SmallVector<const CXXBaseSpecifier*, 8> UniqueVbases;
- bool hasDirectVirtualBase = false;
+ // The set of seen virtual base types.
+ llvm::SmallPtrSet<CanQualType, 8> SeenVBaseTypes;
+
+ // The virtual bases of this class.
+ llvm::SmallVector<const CXXBaseSpecifier *, 8> VBases;
data().Bases = new(C) CXXBaseSpecifier [NumBases];
data().NumBases = NumBases;
@@ -99,58 +101,44 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
continue;
CXXRecordDecl *BaseClassDecl
= cast<CXXRecordDecl>(BaseType->getAs<RecordType>()->getDecl());
- if (Base->isVirtual())
- hasDirectVirtualBase = true;
+
+ // Now go through all virtual bases of this base and add them.
for (CXXRecordDecl::base_class_iterator VBase =
BaseClassDecl->vbases_begin(),
E = BaseClassDecl->vbases_end(); VBase != E; ++VBase) {
- // Add this vbase to the array of vbases for current class if it is
- // not already in the list.
- // FIXME. Note that we do a linear search as number of such classes are
- // very few.
- int i;
- for (i = 0; i < vbaseCount; ++i)
- if (UniqueVbases[i]->getType() == VBase->getType())
- break;
- if (i == vbaseCount) {
- UniqueVbases.push_back(VBase);
- ++vbaseCount;
- }
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(VBase->getType())))
+ VBases.push_back(VBase);
}
- }
- if (hasDirectVirtualBase) {
- // Iterate one more time through the direct bases and add the virtual
- // base to the list of vritual bases for current class.
- for (unsigned i = 0; i < NumBases; ++i) {
- const CXXBaseSpecifier *VBase = Bases[i];
- if (!VBase->isVirtual())
- continue;
- int j;
- for (j = 0; j < vbaseCount; ++j)
- if (UniqueVbases[j]->getType() == VBase->getType())
- break;
- if (j == vbaseCount) {
- UniqueVbases.push_back(VBase);
- ++vbaseCount;
- }
+
+ if (Base->isVirtual()) {
+ // Add this base if it's not already in the list.
+ if (SeenVBaseTypes.insert(C.getCanonicalType(BaseType)))
+ VBases.push_back(Base);
}
+
}
- if (vbaseCount > 0) {
- // build AST for inhireted, direct or indirect, virtual bases.
- data().VBases = new (C) CXXBaseSpecifier [vbaseCount];
- data().NumVBases = vbaseCount;
- for (int i = 0; i < vbaseCount; i++) {
- QualType QT = UniqueVbases[i]->getType();
- // Skip dependent types; we can't do any checking on them now.
- if (QT->isDependentType())
- continue;
- CXXRecordDecl *VBaseClassDecl
- = cast<CXXRecordDecl>(QT->getAs<RecordType>()->getDecl());
- data().VBases[i] =
- CXXBaseSpecifier(VBaseClassDecl->getSourceRange(), true,
- VBaseClassDecl->getTagKind() == RecordDecl::TK_class,
- UniqueVbases[i]->getAccessSpecifier(), QT);
- }
+
+ if (VBases.empty())
+ return;
+
+ // Create base specifier for any direct or indirect virtual bases.
+ data().VBases = new (C) CXXBaseSpecifier[VBases.size()];
+ data().NumVBases = VBases.size();
+ for (int I = 0, E = VBases.size(); I != E; ++I) {
+ QualType VBaseType = VBases[I]->getType();
+
+ // Skip dependent types; we can't do any checking on them now.
+ if (VBaseType->isDependentType())
+ continue;
+
+ CXXRecordDecl *VBaseClassDecl
+ = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
+
+ data().VBases[I] =
+ CXXBaseSpecifier(VBaseClassDecl->getSourceRange(), true,
+ VBaseClassDecl->getTagKind() == RecordDecl::TK_class,
+ VBases[I]->getAccessSpecifier(), VBaseType);
}
}
@@ -320,6 +308,8 @@ void CXXRecordDecl::addedAssignmentOperator(ASTContext &Context,
static CanQualType GetConversionType(ASTContext &Context, NamedDecl *Conv) {
QualType T;
+ if (isa<UsingShadowDecl>(Conv))
+ Conv = cast<UsingShadowDecl>(Conv)->getTargetDecl();
if (FunctionTemplateDecl *ConvTemp = dyn_cast<FunctionTemplateDecl>(Conv))
T = ConvTemp->getTemplatedDecl()->getResultType();
else
@@ -457,26 +447,45 @@ const UnresolvedSetImpl *CXXRecordDecl::getVisibleConversionFunctions() {
return &data().VisibleConversions;
}
-void CXXRecordDecl::addConversionFunction(CXXConversionDecl *ConvDecl) {
- assert(!ConvDecl->getDescribedFunctionTemplate() &&
- "Conversion function templates should cast to FunctionTemplateDecl.");
+#ifndef NDEBUG
+void CXXRecordDecl::CheckConversionFunction(NamedDecl *ConvDecl) {
assert(ConvDecl->getDeclContext() == this &&
"conversion function does not belong to this record");
- // We intentionally don't use the decl's access here because it
- // hasn't been set yet. That's really just a misdesign in Sema.
- data().Conversions.addDecl(ConvDecl);
+ ConvDecl = ConvDecl->getUnderlyingDecl();
+ if (FunctionTemplateDecl *Temp = dyn_cast<FunctionTemplateDecl>(ConvDecl)) {
+ assert(isa<CXXConversionDecl>(Temp->getTemplatedDecl()));
+ } else {
+ assert(isa<CXXConversionDecl>(ConvDecl));
+ }
}
+#endif
+
+void CXXRecordDecl::removeConversion(const NamedDecl *ConvDecl) {
+ // This operation is O(N) but extremely rare. Sema only uses it to
+ // remove UsingShadowDecls in a class that were followed by a direct
+ // declaration, e.g.:
+ // class A : B {
+ // using B::operator int;
+ // operator int();
+ // };
+ // This is uncommon by itself and even more uncommon in conjunction
+ // with sufficiently large numbers of directly-declared conversions
+ // that asymptotic behavior matters.
+
+ UnresolvedSetImpl &Convs = *getConversionFunctions();
+ for (unsigned I = 0, E = Convs.size(); I != E; ++I) {
+ if (Convs[I].getDecl() == ConvDecl) {
+ Convs.erase(I);
+ assert(std::find(Convs.begin(), Convs.end(), ConvDecl) == Convs.end()
+ && "conversion was found multiple times in unresolved set");
+ return;
+ }
+ }
-void CXXRecordDecl::addConversionFunction(FunctionTemplateDecl *ConvDecl) {
- assert(isa<CXXConversionDecl>(ConvDecl->getTemplatedDecl()) &&
- "Function template is not a conversion function template");
- assert(ConvDecl->getDeclContext() == this &&
- "conversion function does not belong to this record");
- data().Conversions.addDecl(ConvDecl);
+ llvm_unreachable("conversion not found in set!");
}
-
void CXXRecordDecl::setMethodAsVirtual(FunctionDecl *Method) {
Method->setVirtualAsWritten(true);
setAggregate(false);
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 6a71e925d9b3..6764612c80b6 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -492,44 +492,45 @@ QualType CallExpr::getCallReturnType() const {
return FnType->getResultType();
}
-MemberExpr::MemberExpr(Expr *base, bool isarrow, NestedNameSpecifier *qual,
- SourceRange qualrange, ValueDecl *memberdecl,
- SourceLocation l, const TemplateArgumentListInfo *targs,
- QualType ty)
- : Expr(MemberExprClass, ty,
- base->isTypeDependent() || (qual && qual->isDependent()),
- base->isValueDependent() || (qual && qual->isDependent())),
- Base(base), MemberDecl(memberdecl), MemberLoc(l), IsArrow(isarrow),
- HasQualifier(qual != 0), HasExplicitTemplateArgumentList(targs) {
- // Initialize the qualifier, if any.
- if (HasQualifier) {
- NameQualifier *NQ = getMemberQualifier();
- NQ->NNS = qual;
- NQ->Range = qualrange;
- }
-
- // Initialize the explicit template argument list, if any.
- if (targs)
- getExplicitTemplateArgumentList()->initializeFrom(*targs);
-}
-
MemberExpr *MemberExpr::Create(ASTContext &C, Expr *base, bool isarrow,
NestedNameSpecifier *qual,
SourceRange qualrange,
ValueDecl *memberdecl,
+ NamedDecl *founddecl,
SourceLocation l,
const TemplateArgumentListInfo *targs,
QualType ty) {
std::size_t Size = sizeof(MemberExpr);
- if (qual != 0)
- Size += sizeof(NameQualifier);
+
+ bool hasQualOrFound = (qual != 0 || founddecl != memberdecl);
+ if (hasQualOrFound)
+ Size += sizeof(MemberNameQualifier);
if (targs)
Size += ExplicitTemplateArgumentList::sizeFor(*targs);
void *Mem = C.Allocate(Size, llvm::alignof<MemberExpr>());
- return new (Mem) MemberExpr(base, isarrow, qual, qualrange, memberdecl, l,
- targs, ty);
+ MemberExpr *E = new (Mem) MemberExpr(base, isarrow, memberdecl, l, ty);
+
+ if (hasQualOrFound) {
+ if (qual && qual->isDependent()) {
+ E->setValueDependent(true);
+ E->setTypeDependent(true);
+ }
+ E->HasQualifierOrFoundDecl = true;
+
+ MemberNameQualifier *NQ = E->getMemberQualifier();
+ NQ->NNS = qual;
+ NQ->Range = qualrange;
+ NQ->FoundDecl = founddecl;
+ }
+
+ if (targs) {
+ E->HasExplicitTemplateArgumentList = true;
+ E->getExplicitTemplateArgumentList()->initializeFrom(*targs);
+ }
+
+ return E;
}
const char *CastExpr::getCastKindName() const {
@@ -544,6 +545,8 @@ const char *CastExpr::getCastKindName() const {
return "BaseToDerived";
case CastExpr::CK_DerivedToBase:
return "DerivedToBase";
+ case CastExpr::CK_UncheckedDerivedToBase:
+ return "UncheckedDerivedToBase";
case CastExpr::CK_Dynamic:
return "Dynamic";
case CastExpr::CK_ToUnion:
@@ -914,8 +917,15 @@ bool Expr::isUnusedResultAWarning(SourceLocation &Loc, SourceRange &R1,
case CXXConstructExprClass:
return false;
- case ObjCMessageExprClass:
+ case ObjCMessageExprClass: {
+ const ObjCMessageExpr *ME = cast<ObjCMessageExpr>(this);
+ const ObjCMethodDecl *MD = ME->getMethodDecl();
+ if (MD && MD->getAttr<WarnUnusedResultAttr>()) {
+ Loc = getExprLoc();
+ return true;
+ }
return false;
+ }
case ObjCImplicitSetterGetterRefExprClass: { // Dot syntax for message send.
#if 0
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 8a64f8ea97ec..27a277ddbcb0 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -750,7 +750,7 @@ bool Type::isSpecifierType() const {
case SubstTemplateTypeParm:
case TemplateSpecialization:
case QualifiedName:
- case Typename:
+ case DependentName:
case ObjCInterface:
case ObjCObjectPointer:
case Elaborated:
@@ -760,6 +760,27 @@ bool Type::isSpecifierType() const {
}
}
+bool Type::isElaboratedTypeSpecifier() const {
+ if (getTypeClass() == Elaborated)
+ return true;
+
+ if (const DependentNameType *Dependent = dyn_cast<DependentNameType>(this)) {
+ switch (Dependent->getKeyword()) {
+ case ETK_None:
+ case ETK_Typename:
+ return false;
+
+ case ETK_Class:
+ case ETK_Struct:
+ case ETK_Union:
+ case ETK_Enum:
+ return true;
+ }
+ }
+
+ return false;
+}
+
const char *Type::getTypeClassName() const {
switch (TC) {
default: assert(0 && "Type class not in TypeNodes.def!");
@@ -820,8 +841,8 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
unsigned NumArgs, bool isVariadic,
unsigned TypeQuals, bool hasExceptionSpec,
bool anyExceptionSpec, unsigned NumExceptions,
- exception_iterator Exs, bool NoReturn,
- CallingConv CallConv) {
+ exception_iterator Exs,
+ const FunctionType::ExtInfo &Info) {
ID.AddPointer(Result.getAsOpaquePtr());
for (unsigned i = 0; i != NumArgs; ++i)
ID.AddPointer(ArgTys[i].getAsOpaquePtr());
@@ -833,15 +854,16 @@ void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID, QualType Result,
for (unsigned i = 0; i != NumExceptions; ++i)
ID.AddPointer(Exs[i].getAsOpaquePtr());
}
- ID.AddInteger(NoReturn);
- ID.AddInteger(CallConv);
+ ID.AddInteger(Info.getNoReturn());
+ ID.AddInteger(Info.getRegParm());
+ ID.AddInteger(Info.getCC());
}
void FunctionProtoType::Profile(llvm::FoldingSetNodeID &ID) {
Profile(ID, getResultType(), arg_type_begin(), NumArgs, isVariadic(),
getTypeQuals(), hasExceptionSpec(), hasAnyExceptionSpec(),
- getNumExceptions(), exception_begin(), getNoReturnAttr(),
- getCallConv());
+ getNumExceptions(), exception_begin(),
+ getExtInfo());
}
void ObjCObjectPointerType::Profile(llvm::FoldingSetNodeID &ID,
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index 0c4896decf85..4cf0922ee316 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -282,7 +282,8 @@ void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
S += ")";
- switch(T->getCallConv()) {
+ FunctionType::ExtInfo Info = T->getExtInfo();
+ switch(Info.getCC()) {
case CC_Default:
default: break;
case CC_C:
@@ -295,9 +296,11 @@ void TypePrinter::PrintFunctionProto(const FunctionProtoType *T,
S += " __attribute__((fastcall))";
break;
}
- if (T->getNoReturnAttr())
+ if (Info.getNoReturn())
S += " __attribute__((noreturn))";
-
+ if (Info.getRegParm())
+ S += " __attribute__((regparm (" +
+ llvm::utostr_32(Info.getRegParm()) + ")))";
if (T->hasExceptionSpec()) {
S += " throw(";
@@ -564,12 +567,20 @@ void TypePrinter::PrintQualifiedName(const QualifiedNameType *T,
S = MyString + ' ' + S;
}
-void TypePrinter::PrintTypename(const TypenameType *T, std::string &S) {
+void TypePrinter::PrintDependentName(const DependentNameType *T, std::string &S) {
std::string MyString;
{
llvm::raw_string_ostream OS(MyString);
- OS << "typename ";
+ switch (T->getKeyword()) {
+ case ETK_None: break;
+ case ETK_Typename: OS << "typename "; break;
+ case ETK_Class: OS << "class "; break;
+ case ETK_Struct: OS << "struct "; break;
+ case ETK_Union: OS << "union "; break;
+ case ETK_Enum: OS << "enum "; break;
+ }
+
T->getQualifier()->print(OS, Policy);
if (const IdentifierInfo *Ident = T->getIdentifier())
@@ -819,4 +830,3 @@ void QualType::getAsStringInternal(std::string &S,
TypePrinter Printer(Policy);
Printer.Print(*this, S);
}
-
diff --git a/lib/Analysis/AnalysisContext.cpp b/lib/Analysis/AnalysisContext.cpp
index 5640c4a461e0..06d8aec3910e 100644
--- a/lib/Analysis/AnalysisContext.cpp
+++ b/lib/Analysis/AnalysisContext.cpp
@@ -54,8 +54,12 @@ const ImplicitParamDecl *AnalysisContext::getSelfDecl() const {
}
CFG *AnalysisContext::getCFG() {
- if (!cfg)
+ if (!builtCFG) {
cfg = CFG::buildCFG(D, getBody(), &D->getASTContext(), AddEHEdges);
+ // Even when the cfg is not successfully built, we don't
+ // want to try building it again.
+ builtCFG = true;
+ }
return cfg;
}
@@ -126,9 +130,9 @@ LocationContextManager::getLocationContext(AnalysisContext *ctx,
llvm::FoldingSetNodeID ID;
LOC::Profile(ID, ctx, parent, d);
void *InsertPos;
-
+
LOC *L = cast_or_null<LOC>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
-
+
if (!L) {
L = new LOC(ctx, parent, d);
Contexts.InsertNode(L, InsertPos);
@@ -144,7 +148,7 @@ LocationContextManager::getStackFrame(AnalysisContext *ctx,
llvm::FoldingSetNodeID ID;
StackFrameContext::Profile(ID, ctx, parent, s, blk, idx);
void *InsertPos;
- StackFrameContext *L =
+ StackFrameContext *L =
cast_or_null<StackFrameContext>(Contexts.FindNodeOrInsertPos(ID, InsertPos));
if (!L) {
L = new StackFrameContext(ctx, parent, s, blk, idx);
@@ -253,7 +257,7 @@ public:
IgnoredContexts.insert(BR->getBlockDecl());
Visit(BR->getBlockDecl()->getBody());
}
-};
+};
} // end anonymous namespace
typedef BumpVector<const VarDecl*> DeclVec;
@@ -263,16 +267,16 @@ static DeclVec* LazyInitializeReferencedDecls(const BlockDecl *BD,
llvm::BumpPtrAllocator &A) {
if (Vec)
return (DeclVec*) Vec;
-
+
BumpVectorContext BC(A);
DeclVec *BV = (DeclVec*) A.Allocate<DeclVec>();
new (BV) DeclVec(BC, 10);
-
+
// Find the referenced variables.
FindBlockDeclRefExprsVals F(*BV, BC);
F.Visit(BD->getBody());
-
- Vec = BV;
+
+ Vec = BV;
return BV;
}
@@ -281,7 +285,7 @@ std::pair<AnalysisContext::referenced_decls_iterator,
AnalysisContext::getReferencedBlockVars(const BlockDecl *BD) {
if (!ReferencedBlockVars)
ReferencedBlockVars = new llvm::DenseMap<const BlockDecl*,void*>();
-
+
DeclVec *V = LazyInitializeReferencedDecls(BD, (*ReferencedBlockVars)[BD], A);
return std::make_pair(V->begin(), V->end());
}
@@ -310,12 +314,12 @@ LocationContextManager::~LocationContextManager() {
void LocationContextManager::clear() {
for (llvm::FoldingSet<LocationContext>::iterator I = Contexts.begin(),
- E = Contexts.end(); I != E; ) {
+ E = Contexts.end(); I != E; ) {
LocationContext *LC = &*I;
++I;
delete LC;
}
-
+
Contexts.clear();
}
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index a4a021f20b21..f94f6b3f127f 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -571,7 +571,7 @@ static bool CanThrow(Expr *E) {
CFGBlock *CFGBuilder::VisitCallExpr(CallExpr *C, AddStmtChoice asc) {
// If this is a call to a no-return function, this stops the block here.
bool NoReturn = false;
- if (C->getCallee()->getType().getNoReturnAttr()) {
+ if (getFunctionExtInfo(*C->getCallee()->getType()).getNoReturn()) {
NoReturn = true;
}
diff --git a/lib/Analysis/PrintfFormatString.cpp b/lib/Analysis/PrintfFormatString.cpp
index 46acc8a377bf..c38aae34764c 100644
--- a/lib/Analysis/PrintfFormatString.cpp
+++ b/lib/Analysis/PrintfFormatString.cpp
@@ -75,7 +75,7 @@ static OptionalAmount ParseAmount(const char *&Beg, const char *E) {
char c = *I;
if (c >= '0' && c <= '9') {
hasDigits = true;
- accumulator += (accumulator * 10) + (c - '0');
+ accumulator = (accumulator * 10) + (c - '0');
continue;
}
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index f7ec873e4c15..2b7fcd07f9d0 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Lex/LexDiagnostic.h"
#include "clang/Parse/ParseDiagnostic.h"
@@ -124,10 +125,20 @@ const char *Diagnostic::getWarningOptionForDiag(unsigned DiagID) {
return 0;
}
-bool Diagnostic::isBuiltinSFINAEDiag(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return Info->SFINAE && Info->Class == CLASS_ERROR;
- return false;
+Diagnostic::SFINAEResponse
+Diagnostic::getDiagnosticSFINAEResponse(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID)) {
+ if (!Info->SFINAE)
+ return SFINAE_Report;
+
+ if (Info->Class == CLASS_ERROR)
+ return SFINAE_SubstitutionFailure;
+
+ // Suppress notes, warnings, and extensions;
+ return SFINAE_Suppress;
+ }
+
+ return SFINAE_Report;
}
/// getDiagClass - Return the class field of the diagnostic.
@@ -222,6 +233,8 @@ Diagnostic::Diagnostic(DiagnosticClient *client) : Client(client) {
ArgToStringFn = DummyArgToStringFn;
ArgToStringCookie = 0;
+ DelayedDiagID = 0;
+
// Set all mappings to 'unset'.
DiagMappings BlankDiags(diag::DIAG_UPPER_LIMIT/2, 0);
DiagMappingsStack.push_back(BlankDiags);
@@ -289,6 +302,23 @@ const char *Diagnostic::getDescription(unsigned DiagID) const {
return CustomDiagInfo->getDescription(DiagID);
}
+void Diagnostic::SetDelayedDiagnostic(unsigned DiagID, llvm::StringRef Arg1,
+ llvm::StringRef Arg2) {
+ if (DelayedDiagID)
+ return;
+
+ DelayedDiagID = DiagID;
+ DelayedDiagArg1 = Arg1.str();
+ DelayedDiagArg2 = Arg2.str();
+}
+
+void Diagnostic::ReportDelayed() {
+ Report(DelayedDiagID) << DelayedDiagArg1 << DelayedDiagArg2;
+ DelayedDiagID = 0;
+ DelayedDiagArg1.clear();
+ DelayedDiagArg2.clear();
+}
+
/// getDiagnosticLevel - Based on the way the client configured the Diagnostic
/// object, classify the specified diagnostic ID into a Level, consumable by
/// the DiagnosticClient.
@@ -532,6 +562,35 @@ bool Diagnostic::ProcessDiag() {
return true;
}
+bool DiagnosticBuilder::Emit() {
+ // If DiagObj is null, then its soul was stolen by the copy ctor
+ // or the user called Emit().
+ if (DiagObj == 0) return false;
+
+ // When emitting diagnostics, we set the final argument count into
+ // the Diagnostic object.
+ DiagObj->NumDiagArgs = NumArgs;
+ DiagObj->NumDiagRanges = NumRanges;
+ DiagObj->NumFixItHints = NumFixItHints;
+
+ // Process the diagnostic, sending the accumulated information to the
+ // DiagnosticClient.
+ bool Emitted = DiagObj->ProcessDiag();
+
+ // Clear out the current diagnostic object.
+ unsigned DiagID = DiagObj->CurDiagID;
+ DiagObj->Clear();
+
+ // If there was a delayed diagnostic, emit it now.
+ if (DiagObj->DelayedDiagID && DiagObj->DelayedDiagID != DiagID)
+ DiagObj->ReportDelayed();
+
+ // This diagnostic is dead.
+ DiagObj = 0;
+
+ return Emitted;
+}
+
DiagnosticClient::~DiagnosticClient() {}
@@ -937,9 +996,9 @@ StoredDiagnostic::StoredDiagnostic(Diagnostic::Level Level,
for (unsigned I = 0, N = Info.getNumRanges(); I != N; ++I)
Ranges.push_back(Info.getRange(I));
- FixIts.reserve(Info.getNumCodeModificationHints());
- for (unsigned I = 0, N = Info.getNumCodeModificationHints(); I != N; ++I)
- FixIts.push_back(Info.getCodeModificationHint(I));
+ FixIts.reserve(Info.getNumFixItHints());
+ for (unsigned I = 0, N = Info.getNumFixItHints(); I != N; ++I)
+ FixIts.push_back(Info.getFixItHint(I));
}
StoredDiagnostic::~StoredDiagnostic() { }
@@ -1172,7 +1231,7 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
return Diag;
}
- CodeModificationHint Hint;
+ FixItHint Hint;
Hint.RemoveRange = SourceRange(RemoveBegin, RemoveEnd);
Hint.InsertionLoc = InsertionLoc;
Hint.CodeToInsert.assign(Memory, Memory + InsertLen);
@@ -1188,3 +1247,13 @@ StoredDiagnostic::Deserialize(FileManager &FM, SourceManager &SM,
/// DiagnosticClient should be included in the number of diagnostics
/// reported by Diagnostic.
bool DiagnosticClient::IncludeInDiagnosticCounts() const { return true; }
+
+PartialDiagnostic::StorageAllocator::StorageAllocator() {
+ for (unsigned I = 0; I != NumCached; ++I)
+ FreeList[I] = Cached + I;
+ NumFreeListEntries = NumCached;
+}
+
+PartialDiagnostic::StorageAllocator::~StorageAllocator() {
+ assert(NumFreeListEntries == NumCached && "A partial is on the lamb");
+}
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 6def967c4cfa..27cb9bebde42 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -89,16 +89,26 @@ const llvm::MemoryBuffer *ContentCache::getBuffer(Diagnostic &Diag,
char *Ptr = const_cast<char*>(Buffer.getPointer()->getBufferStart());
for (unsigned i = 0, e = Entry->getSize(); i != e; ++i)
Ptr[i] = FillStr[i % FillStr.size()];
- Diag.Report(diag::err_cannot_open_file)
- << Entry->getName() << ErrorStr;
+
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_cannot_open_file,
+ Entry->getName(), ErrorStr);
+ else
+ Diag.Report(diag::err_cannot_open_file)
+ << Entry->getName() << ErrorStr;
+
Buffer.setInt(true);
} else if (FileInfo.st_size != Entry->getSize() ||
- FileInfo.st_mtime != Entry->getModificationTime() ||
- FileInfo.st_ino != Entry->getInode()) {
+ FileInfo.st_mtime != Entry->getModificationTime()) {
// Check that the file's size, modification time, and inode are
// the same as in the file entry (which may have come from a
// stat cache).
- Diag.Report(diag::err_file_modified) << Entry->getName();
+ if (Diag.isDiagnosticInFlight())
+ Diag.SetDelayedDiagnostic(diag::err_file_modified,
+ Entry->getName());
+ else
+ Diag.Report(diag::err_file_modified) << Entry->getName();
+
Buffer.setInt(true);
}
}
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 8f472b3d3d45..e3d9ed3335a7 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -292,6 +292,7 @@ protected:
virtual void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const {
// PS3 PPU defines.
+ Builder.defineMacro("__PPC__");
Builder.defineMacro("__PPU__");
Builder.defineMacro("__CELLOS_LV2__");
Builder.defineMacro("__ELF__");
diff --git a/lib/Checker/AdjustedReturnValueChecker.cpp b/lib/Checker/AdjustedReturnValueChecker.cpp
index e95a86b838b6..b92f2e705625 100644
--- a/lib/Checker/AdjustedReturnValueChecker.cpp
+++ b/lib/Checker/AdjustedReturnValueChecker.cpp
@@ -14,11 +14,9 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Basic/SourceManager.h"
-#include "llvm/ADT/SmallString.h"
using namespace clang;
diff --git a/lib/Checker/AggExprVisitor.cpp b/lib/Checker/AggExprVisitor.cpp
new file mode 100644
index 000000000000..343afec18d21
--- /dev/null
+++ b/lib/Checker/AggExprVisitor.cpp
@@ -0,0 +1,55 @@
+//=-- AggExprVisitor.cpp - evaluating expressions of C++ class type -*- C++ -*-=
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines AggExprVisitor class, which contains lots of boiler
+// plate code for evaluating expressions of C++ class type.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
+#include "clang/AST/StmtVisitor.h"
+
+using namespace clang;
+
+namespace {
+class AggExprVisitor : public StmtVisitor<AggExprVisitor> {
+ SVal DestPtr;
+ ExplodedNode *Pred;
+ ExplodedNodeSet &DstSet;
+ GRExprEngine &Eng;
+
+public:
+ AggExprVisitor(SVal dest, ExplodedNode *N, ExplodedNodeSet &dst,
+ GRExprEngine &eng)
+ : DestPtr(dest), Pred(N), DstSet(dst), Eng(eng) {}
+
+ void VisitCastExpr(CastExpr *E);
+ void VisitCXXConstructExpr(CXXConstructExpr *E);
+};
+}
+
+void AggExprVisitor::VisitCastExpr(CastExpr *E) {
+ switch (E->getCastKind()) {
+ default:
+ assert(0 && "Unhandled cast kind");
+ case CastExpr::CK_NoOp:
+ case CastExpr::CK_ConstructorConversion:
+ Visit(E->getSubExpr());
+ break;
+ }
+}
+
+void AggExprVisitor::VisitCXXConstructExpr(CXXConstructExpr *E) {
+ Eng.VisitCXXConstructExpr(E, DestPtr, Pred, DstSet);
+}
+
+void GRExprEngine::VisitAggExpr(const Expr *E, SVal Dest, ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ AggExprVisitor(Dest, Pred, Dst, *this).Visit(const_cast<Expr *>(E));
+}
diff --git a/lib/Checker/ArrayBoundChecker.cpp b/lib/Checker/ArrayBoundChecker.cpp
index 74fb06f45564..746b3f95d41e 100644
--- a/lib/Checker/ArrayBoundChecker.cpp
+++ b/lib/Checker/ArrayBoundChecker.cpp
@@ -13,9 +13,9 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
using namespace clang;
diff --git a/lib/Checker/AttrNonNullChecker.cpp b/lib/Checker/AttrNonNullChecker.cpp
index 83dc13e92b63..309a74ce544b 100644
--- a/lib/Checker/AttrNonNullChecker.cpp
+++ b/lib/Checker/AttrNonNullChecker.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
using namespace clang;
diff --git a/lib/Checker/BasicObjCFoundationChecks.cpp b/lib/Checker/BasicObjCFoundationChecks.cpp
index d6c09a2e04a6..810d0fbb997a 100644
--- a/lib/Checker/BasicObjCFoundationChecks.cpp
+++ b/lib/Checker/BasicObjCFoundationChecks.cpp
@@ -19,9 +19,8 @@
#include "clang/Checker/PathSensitive/GRSimpleAPICheck.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/MemRegion.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/Checkers/LocalCheckers.h"
#include "clang/AST/DeclObjC.h"
diff --git a/lib/Checker/BugReporter.cpp b/lib/Checker/BugReporter.cpp
index 7272b348581b..12e61afa1010 100644
--- a/lib/Checker/BugReporter.cpp
+++ b/lib/Checker/BugReporter.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/AST/ASTContext.h"
#include "clang/Analysis/CFG.h"
@@ -1139,12 +1140,9 @@ void EdgeBuilder::addContext(const Stmt *S) {
static void GenerateExtensivePathDiagnostic(PathDiagnostic& PD,
PathDiagnosticBuilder &PDB,
const ExplodedNode *N) {
-
-
EdgeBuilder EB(PD, PDB);
- const ExplodedNode* NextNode = N->pred_empty()
- ? NULL : *(N->pred_begin());
+ const ExplodedNode* NextNode = N->pred_empty() ? NULL : *(N->pred_begin());
while (NextNode) {
N = NextNode;
NextNode = GetPredecessorNode(N);
diff --git a/lib/Checker/BugReporterVisitors.cpp b/lib/Checker/BugReporterVisitors.cpp
index 1d6994b94b4b..06cee5bcd1bc 100644
--- a/lib/Checker/BugReporterVisitors.cpp
+++ b/lib/Checker/BugReporterVisitors.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/Checker/BugReporter/BugReporter.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/PathSensitive/ExplodedGraph.h"
#include "clang/Checker/PathSensitive/GRState.h"
using namespace clang;
diff --git a/lib/Checker/CFRefCount.cpp b/lib/Checker/CFRefCount.cpp
index 9a76f6a2a3ad..3c4a27cc07f4 100644
--- a/lib/Checker/CFRefCount.cpp
+++ b/lib/Checker/CFRefCount.cpp
@@ -16,8 +16,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceManager.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/BugReporter/PathDiagnostic.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/BugReporter/PathDiagnostic.h"
#include "clang/Checker/Checkers/LocalCheckers.h"
#include "clang/Checker/DomainSpecific/CocoaConventions.h"
diff --git a/lib/Checker/CMakeLists.txt b/lib/Checker/CMakeLists.txt
index c5bd2eb7cc2c..dec375e65da7 100644
--- a/lib/Checker/CMakeLists.txt
+++ b/lib/Checker/CMakeLists.txt
@@ -2,6 +2,7 @@ set(LLVM_NO_RTTI 1)
add_clang_library(clangChecker
AdjustedReturnValueChecker.cpp
+ AggExprVisitor.cpp
ArrayBoundChecker.cpp
AttrNonNullChecker.cpp
BasicConstraintManager.cpp
@@ -11,16 +12,16 @@ add_clang_library(clangChecker
BugReporter.cpp
BugReporterVisitors.cpp
BuiltinFunctionChecker.cpp
- CFRefCount.cpp
CallAndMessageChecker.cpp
CallInliner.cpp
CastToStructChecker.cpp
+ CFRefCount.cpp
CheckDeadStores.cpp
+ Checker.cpp
CheckObjCDealloc.cpp
CheckObjCInstMethSignature.cpp
CheckSecuritySyntaxOnly.cpp
CheckSizeofPointer.cpp
- Checker.cpp
CocoaConventions.cpp
DereferenceChecker.cpp
DivZeroChecker.cpp
@@ -38,11 +39,11 @@ add_clang_library(clangChecker
MallocChecker.cpp
ManagerRegistry.cpp
MemRegion.cpp
+ NoReturnFunctionChecker.cpp
NSAutoreleasePoolChecker.cpp
NSErrorChecker.cpp
- NoReturnFunctionChecker.cpp
- OSAtomicChecker.cpp
ObjCUnusedIVarsChecker.cpp
+ OSAtomicChecker.cpp
PathDiagnostic.cpp
PointerArithChecker.cpp
PointerSubChecker.cpp
@@ -52,18 +53,18 @@ add_clang_library(clangChecker
ReturnPointerRangeChecker.cpp
ReturnStackAddressChecker.cpp
ReturnUndefChecker.cpp
- SVals.cpp
- SValuator.cpp
SimpleConstraintManager.cpp
SimpleSValuator.cpp
Store.cpp
+ SVals.cpp
+ SValuator.cpp
SymbolManager.cpp
UndefBranchChecker.cpp
UndefCapturedBlockVarChecker.cpp
- UndefResultChecker.cpp
UndefinedArraySubscriptChecker.cpp
UndefinedAssignmentChecker.cpp
+ UndefResultChecker.cpp
UnixAPIChecker.cpp
- VLASizeChecker.cpp
ValueManager.cpp
+ VLASizeChecker.cpp
)
diff --git a/lib/Checker/CallAndMessageChecker.cpp b/lib/Checker/CallAndMessageChecker.cpp
index 32cf7534c8f6..dd1856c9d2d6 100644
--- a/lib/Checker/CallAndMessageChecker.cpp
+++ b/lib/Checker/CallAndMessageChecker.cpp
@@ -12,11 +12,11 @@
//
//===----------------------------------------------------------------------===//
+#include "GRExprEngineInternalChecks.h"
+#include "clang/AST/ParentMap.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/AST/ParentMap.h"
-#include "GRExprEngineInternalChecks.h"
using namespace clang;
diff --git a/lib/Checker/CastToStructChecker.cpp b/lib/Checker/CastToStructChecker.cpp
index bef5bc285ee2..2c16f8905811 100644
--- a/lib/Checker/CastToStructChecker.cpp
+++ b/lib/Checker/CastToStructChecker.cpp
@@ -13,6 +13,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "GRExprEngineInternalChecks.h"
diff --git a/lib/Checker/CheckSecuritySyntaxOnly.cpp b/lib/Checker/CheckSecuritySyntaxOnly.cpp
index 923baf50f3f6..efbce6126146 100644
--- a/lib/Checker/CheckSecuritySyntaxOnly.cpp
+++ b/lib/Checker/CheckSecuritySyntaxOnly.cpp
@@ -36,7 +36,7 @@ class WalkAST : public StmtVisitor<WalkAST> {
IdentifierInfo *II_random;
enum { num_setids = 6 };
IdentifierInfo *II_setid[num_setids];
-
+
const bool CheckRand;
public:
@@ -214,8 +214,8 @@ void WalkAST::CheckLoopConditionForFloat(const ForStmt *FS) {
const DeclRefExpr *drCond = vdLHS == drInc->getDecl() ? drLHS : drRHS;
llvm::SmallVector<SourceRange, 2> ranges;
- std::string sbuf;
- llvm::raw_string_ostream os(sbuf);
+ llvm::SmallString<256> sbuf;
+ llvm::raw_svector_ostream os(sbuf);
os << "Variable '" << drCond->getDecl()->getNameAsCString()
<< "' with floating point type '" << drCond->getType().getAsString()
@@ -315,7 +315,7 @@ void WalkAST::CheckCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
const FunctionProtoType *FPT = dyn_cast<FunctionProtoType>(FD->getType());
if(!FPT)
return;
-
+
// Verify that the funcion takes a single argument.
if (FPT->getNumArgs() != 1)
return;
@@ -328,17 +328,16 @@ void WalkAST::CheckCall_mktemp(const CallExpr *CE, const FunctionDecl *FD) {
// Verify that the argument is a 'char*'.
if (PT->getPointeeType().getUnqualifiedType() != BR.getContext().CharTy)
return;
-
+
// Issue a waring.
SourceRange R = CE->getCallee()->getSourceRange();
BR.EmitBasicReport("Potential insecure temporary file in call 'mktemp'",
"Security",
"Call to function 'mktemp' is insecure as it always "
- "creates or uses insecure temporary file",
+ "creates or uses insecure temporary file. Use 'mkstemp' instead",
CE->getLocStart(), &R, 1);
}
-
//===----------------------------------------------------------------------===//
// Check: Linear congruent random number generators should not be used
// Originally: <rdar://problem/63371000>
@@ -386,20 +385,18 @@ void WalkAST::CheckCall_rand(const CallExpr *CE, const FunctionDecl *FD) {
return;
// Issue a warning.
- std::string buf1;
- llvm::raw_string_ostream os1(buf1);
+ llvm::SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
os1 << "'" << FD->getNameAsString() << "' is a poor random number generator";
- std::string buf2;
- llvm::raw_string_ostream os2(buf2);
+ llvm::SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
os2 << "Function '" << FD->getNameAsString()
<< "' is obsolete because it implements a poor random number generator."
<< " Use 'arc4random' instead";
SourceRange R = CE->getCallee()->getSourceRange();
-
- BR.EmitBasicReport(os1.str(), "Security", os2.str(),
- CE->getLocStart(), &R, 1);
+ BR.EmitBasicReport(os1.str(), "Security", os2.str(),CE->getLocStart(), &R, 1);
}
//===----------------------------------------------------------------------===//
@@ -425,8 +422,7 @@ void WalkAST::CheckCall_random(const CallExpr *CE, const FunctionDecl *FD) {
"Security",
"The 'random' function produces a sequence of values that "
"an adversary may be able to predict. Use 'arc4random' "
- "instead",
- CE->getLocStart(), &R, 1);
+ "instead", CE->getLocStart(), &R, 1);
}
//===----------------------------------------------------------------------===//
@@ -474,22 +470,20 @@ void WalkAST::CheckUncheckedReturnValue(CallExpr *CE) {
return;
// Issue a warning.
- std::string buf1;
- llvm::raw_string_ostream os1(buf1);
+ llvm::SmallString<256> buf1;
+ llvm::raw_svector_ostream os1(buf1);
os1 << "Return value is not checked in call to '" << FD->getNameAsString()
<< "'";
- std::string buf2;
- llvm::raw_string_ostream os2(buf2);
+ llvm::SmallString<256> buf2;
+ llvm::raw_svector_ostream os2(buf2);
os2 << "The return value from the call to '" << FD->getNameAsString()
<< "' is not checked. If an error occurs in '"
<< FD->getNameAsString()
<< "', the following code may execute with unexpected privileges";
SourceRange R = CE->getCallee()->getSourceRange();
-
- BR.EmitBasicReport(os1.str(), "Security", os2.str(),
- CE->getLocStart(), &R, 1);
+ BR.EmitBasicReport(os1.str(), "Security", os2.str(),CE->getLocStart(), &R, 1);
}
//===----------------------------------------------------------------------===//
diff --git a/lib/Checker/DereferenceChecker.cpp b/lib/Checker/DereferenceChecker.cpp
index 0cbc4086701a..af74c79558d5 100644
--- a/lib/Checker/DereferenceChecker.cpp
+++ b/lib/Checker/DereferenceChecker.cpp
@@ -12,11 +12,11 @@
//
//===----------------------------------------------------------------------===//
+#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/Checkers/DereferenceChecker.h"
#include "clang/Checker/PathSensitive/Checker.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "GRExprEngineInternalChecks.h"
using namespace clang;
@@ -29,9 +29,9 @@ public:
DereferenceChecker() : BT_null(0), BT_undef(0) {}
static void *getTag() { static int tag = 0; return &tag; }
void VisitLocation(CheckerContext &C, const Stmt *S, SVal location);
-
+
std::pair<ExplodedNode * const*, ExplodedNode * const*>
- getImplicitNodes() const {
+ getImplicitNodes() const {
return std::make_pair(ImplicitNullDerefNodes.data(),
ImplicitNullDerefNodes.data() +
ImplicitNullDerefNodes.size());
@@ -59,7 +59,7 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
if (ExplodedNode *N = C.GenerateSink()) {
if (!BT_undef)
BT_undef = new BuiltinBug("Dereference of undefined pointer value");
-
+
EnhancedBugReport *report =
new EnhancedBugReport(*BT_undef, BT_undef->getDescription(), N);
report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
@@ -68,31 +68,32 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
}
return;
}
-
+
DefinedOrUnknownSVal location = cast<DefinedOrUnknownSVal>(l);
-
- // Check for null dereferences.
+
+ // Check for null dereferences.
if (!isa<Loc>(location))
return;
-
+
const GRState *state = C.getState();
const GRState *notNullState, *nullState;
llvm::tie(notNullState, nullState) = state->Assume(location);
-
+
// The explicit NULL case.
if (nullState) {
- if (!notNullState) {
+ if (!notNullState) {
// Generate an error node.
ExplodedNode *N = C.GenerateSink(nullState);
if (!N)
return;
-
+
// We know that 'location' cannot be non-null. This is what
- // we call an "explicit" null dereference.
+ // we call an "explicit" null dereference.
if (!BT_null)
BT_null = new BuiltinBug("Dereference of null pointer");
-
+
llvm::SmallString<100> buf;
+ llvm::SmallVector<SourceRange, 2> Ranges;
switch (S->getStmtClass()) {
case Stmt::UnaryOperatorClass: {
@@ -101,10 +102,26 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
if (const DeclRefExpr *DR = dyn_cast<DeclRefExpr>(SU)) {
if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
llvm::raw_svector_ostream os(buf);
- os << "Dereference of null pointer loaded from variable '"
- << VD->getName() << '\'';
+ os << "Dereference of null pointer (loaded from variable '"
+ << VD->getName() << "')";
+ Ranges.push_back(DR->getSourceRange());
}
}
+ break;
+ }
+ case Stmt::MemberExprClass: {
+ const MemberExpr *M = cast<MemberExpr>(S);
+ if (M->isArrow())
+ if (DeclRefExpr *DR =
+ dyn_cast<DeclRefExpr>(M->getBase()->IgnoreParenCasts())) {
+ if (const VarDecl *VD = dyn_cast<VarDecl>(DR->getDecl())) {
+ llvm::raw_svector_ostream os(buf);
+ os << "Field access results in a dereference of a null pointer "
+ "(loaded from variable '" << VD->getName() << "')";
+ Ranges.push_back(M->getBase()->getSourceRange());
+ }
+ }
+ break;
}
default:
break;
@@ -117,19 +134,23 @@ void DereferenceChecker::VisitLocation(CheckerContext &C, const Stmt *S,
report->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue,
bugreporter::GetDerefExpr(N));
-
+
+ for (llvm::SmallVectorImpl<SourceRange>::iterator
+ I = Ranges.begin(), E = Ranges.end(); I!=E; ++I)
+ report->addRange(*I);
+
C.EmitReport(report);
return;
}
else {
// Otherwise, we have the case where the location could either be
// null or not-null. Record the error node as an "implicit" null
- // dereference.
+ // dereference.
if (ExplodedNode *N = C.GenerateSink(nullState))
ImplicitNullDerefNodes.push_back(N);
}
}
-
+
// From this point forward, we know that the location is not null.
C.addTransition(notNullState);
}
diff --git a/lib/Checker/DivZeroChecker.cpp b/lib/Checker/DivZeroChecker.cpp
index e1346e11b6fd..e09a87149f5c 100644
--- a/lib/Checker/DivZeroChecker.cpp
+++ b/lib/Checker/DivZeroChecker.cpp
@@ -12,8 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
using namespace clang;
diff --git a/lib/Checker/Environment.cpp b/lib/Checker/Environment.cpp
index 671cf89119f3..cc71f8569c62 100644
--- a/lib/Checker/Environment.cpp
+++ b/lib/Checker/Environment.cpp
@@ -10,9 +10,10 @@
// This file defined the Environment and EnvironmentManager classes.
//
//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/CFG.h"
#include "clang/Checker/PathSensitive/GRState.h"
-#include "clang/Analysis/Analyses/LiveVariables.h"
-#include "llvm/ADT/ImmutableMap.h"
using namespace clang;
diff --git a/lib/Checker/FixedAddressChecker.cpp b/lib/Checker/FixedAddressChecker.cpp
index 04c17d6d7abb..4fce45bd35e8 100644
--- a/lib/Checker/FixedAddressChecker.cpp
+++ b/lib/Checker/FixedAddressChecker.cpp
@@ -13,8 +13,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
using namespace clang;
diff --git a/lib/Checker/GRBlockCounter.cpp b/lib/Checker/GRBlockCounter.cpp
index 3fa3e1ebb9c6..cd26060ef0aa 100644
--- a/lib/Checker/GRBlockCounter.cpp
+++ b/lib/Checker/GRBlockCounter.cpp
@@ -18,7 +18,34 @@
using namespace clang;
-typedef llvm::ImmutableMap<unsigned,unsigned> CountMap;
+namespace {
+
+class CountKey {
+ const StackFrameContext *CallSite;
+ unsigned BlockID;
+
+public:
+ CountKey(const StackFrameContext *CS, unsigned ID)
+ : CallSite(CS), BlockID(ID) {}
+
+ bool operator==(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) && (BlockID == RHS.BlockID);
+ }
+
+ bool operator<(const CountKey &RHS) const {
+ return (CallSite == RHS.CallSite) ? (BlockID < RHS.BlockID)
+ : (CallSite < RHS.CallSite);
+ }
+
+ void Profile(llvm::FoldingSetNodeID &ID) const {
+ ID.AddPointer(CallSite);
+ ID.AddInteger(BlockID);
+ }
+};
+
+}
+
+typedef llvm::ImmutableMap<CountKey, unsigned> CountMap;
static inline CountMap GetMap(void* D) {
return CountMap(static_cast<CountMap::TreeTy*>(D));
@@ -28,9 +55,10 @@ static inline CountMap::Factory& GetFactory(void* F) {
return *static_cast<CountMap::Factory*>(F);
}
-unsigned GRBlockCounter::getNumVisited(unsigned BlockID) const {
+unsigned GRBlockCounter::getNumVisited(const StackFrameContext *CallSite,
+ unsigned BlockID) const {
CountMap M = GetMap(Data);
- CountMap::data_type* T = M.lookup(BlockID);
+ CountMap::data_type* T = M.lookup(CountKey(CallSite, BlockID));
return T ? *T : 0;
}
@@ -43,9 +71,12 @@ GRBlockCounter::Factory::~Factory() {
}
GRBlockCounter
-GRBlockCounter::Factory::IncrementCount(GRBlockCounter BC, unsigned BlockID) {
- return GRBlockCounter(GetFactory(F).Add(GetMap(BC.Data), BlockID,
- BC.getNumVisited(BlockID)+1).getRoot());
+GRBlockCounter::Factory::IncrementCount(GRBlockCounter BC,
+ const StackFrameContext *CallSite,
+ unsigned BlockID) {
+ return GRBlockCounter(GetFactory(F).Add(GetMap(BC.Data),
+ CountKey(CallSite, BlockID),
+ BC.getNumVisited(CallSite, BlockID)+1).getRoot());
}
GRBlockCounter
diff --git a/lib/Checker/GRCoreEngine.cpp b/lib/Checker/GRCoreEngine.cpp
index a9347d01641c..e4ef6b0e106f 100644
--- a/lib/Checker/GRCoreEngine.cpp
+++ b/lib/Checker/GRCoreEngine.cpp
@@ -126,9 +126,9 @@ void GRCoreEngine::ProcessStmt(CFGElement E, GRStmtNodeBuilder& Builder) {
SubEngine.ProcessStmt(E, Builder);
}
-bool GRCoreEngine::ProcessBlockEntrance(CFGBlock* Blk, const GRState* State,
+bool GRCoreEngine::ProcessBlockEntrance(CFGBlock* Blk, const ExplodedNode *Pred,
GRBlockCounter BC) {
- return SubEngine.ProcessBlockEntrance(Blk, State, BC);
+ return SubEngine.ProcessBlockEntrance(Blk, Pred, BC);
}
void GRCoreEngine::ProcessBranch(Stmt* Condition, Stmt* Terminator,
@@ -256,7 +256,7 @@ void GRCoreEngine::HandleBlockEdge(const BlockEdge& L, ExplodedNode* Pred) {
// FIXME: Should we allow ProcessBlockEntrance to also manipulate state?
- if (ProcessBlockEntrance(Blk, Pred->State, WList->getBlockCounter()))
+ if (ProcessBlockEntrance(Blk, Pred, WList->getBlockCounter()))
GenerateNode(BlockEntrance(Blk, Pred->getLocationContext()), Pred->State, Pred);
}
@@ -265,7 +265,9 @@ void GRCoreEngine::HandleBlockEntrance(const BlockEntrance& L,
// Increment the block counter.
GRBlockCounter Counter = WList->getBlockCounter();
- Counter = BCounterFactory.IncrementCount(Counter, L.getBlock()->getBlockID());
+ Counter = BCounterFactory.IncrementCount(Counter,
+ Pred->getLocationContext()->getCurrentStackFrame(),
+ L.getBlock()->getBlockID());
WList->setBlockCounter(Counter);
// Process the entrance of the block.
diff --git a/lib/Checker/GRExprEngine.cpp b/lib/Checker/GRExprEngine.cpp
index 3ace552adcb2..bab8922a8c2b 100644
--- a/lib/Checker/GRExprEngine.cpp
+++ b/lib/Checker/GRExprEngine.cpp
@@ -13,6 +13,8 @@
//
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/AnalysisManager.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/PathSensitive/GRExprEngineBuilders.h"
#include "clang/Checker/PathSensitive/Checker.h"
@@ -582,7 +584,6 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
switch (S->getStmtClass()) {
// C++ stuff we don't support yet.
- case Stmt::CXXMemberCallExprClass:
case Stmt::CXXNamedCastExprClass:
case Stmt::CXXStaticCastExprClass:
case Stmt::CXXDynamicCastExprClass:
@@ -671,6 +672,12 @@ void GRExprEngine::Visit(Stmt* S, ExplodedNode* Pred, ExplodedNodeSet& Dst) {
break;
}
+ case Stmt::CXXMemberCallExprClass: {
+ CXXMemberCallExpr *MCE = cast<CXXMemberCallExpr>(S);
+ VisitCXXMemberCallExpr(MCE, Pred, Dst);
+ break;
+ }
+
// FIXME: ChooseExpr is really a constant. We need to fix
// the CFG do not model them as explicit control-flow.
@@ -895,6 +902,11 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
return;
}
+ case Stmt::ObjCIsaExprClass:
+ // FIXME: Do something more intelligent with 'x->isa = ...'.
+ // For now, just ignore the assignment.
+ return;
+
case Stmt::ObjCPropertyRefExprClass:
case Stmt::ObjCImplicitSetterGetterRefExprClass:
// FIXME: Property assignments are lvalues, but not really "locations".
@@ -944,10 +956,11 @@ void GRExprEngine::VisitLValue(Expr* Ex, ExplodedNode* Pred,
// Block entrance. (Update counters).
//===----------------------------------------------------------------------===//
-bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const GRState*,
+bool GRExprEngine::ProcessBlockEntrance(CFGBlock* B, const ExplodedNode *Pred,
GRBlockCounter BC) {
- return BC.getNumVisited(B->getBlockID()) < 3;
+ return BC.getNumVisited(Pred->getLocationContext()->getCurrentStackFrame(),
+ B->getBlockID()) < 3;
}
//===----------------------------------------------------------------------===//
@@ -1328,6 +1341,22 @@ void GRExprEngine::ProcessCallExit(GRCallExitNodeBuilder &B) {
if (ReturnedExpr) {
SVal RetVal = state->getSVal(ReturnedExpr);
state = state->BindExpr(CE, RetVal);
+ // Clear the return expr GDM.
+ state = state->remove<ReturnExpr>();
+ }
+
+ // Bind the constructed object value to CXXConstructExpr.
+ if (const CXXConstructExpr *CCE = dyn_cast<CXXConstructExpr>(CE)) {
+ const CXXThisRegion *ThisR = getCXXThisRegion(CCE->getConstructor(),LocCtx);
+ // We might not have 'this' region in the binding if we didn't inline
+ // the ctor call.
+ SVal ThisV = state->getSVal(ThisR);
+ loc::MemRegionVal *V = dyn_cast<loc::MemRegionVal>(&ThisV);
+ if (V) {
+ SVal ObjVal = state->getSVal(V->getRegion());
+ assert(isa<nonloc::LazyCompoundVal>(ObjVal));
+ state = state->BindExpr(CCE, ObjVal);
+ }
}
B.GenerateNode(state);
@@ -2282,6 +2311,7 @@ void GRExprEngine::VisitCast(CastExpr *CastE, Expr *Ex, ExplodedNode *Pred,
case CastExpr::CK_AnyPointerToObjCPointerCast:
case CastExpr::CK_AnyPointerToBlockPointerCast:
case CastExpr::CK_DerivedToBase:
+ case CastExpr::CK_UncheckedDerivedToBase:
// Delegate to SValuator to process.
for (ExplodedNodeSet::iterator I = S2.begin(), E = S2.end(); I != E; ++I) {
ExplodedNode* N = *I;
@@ -2338,8 +2368,10 @@ void GRExprEngine::VisitDeclStmt(DeclStmt *DS, ExplodedNode *Pred,
ExplodedNodeSet Tmp;
if (InitEx) {
- if (const CXXConstructExpr *E = dyn_cast<CXXConstructExpr>(InitEx)) {
- VisitCXXConstructExpr(E, GetState(Pred)->getLValue(VD,
+ QualType InitTy = InitEx->getType();
+ if (getContext().getLangOptions().CPlusPlus && InitTy->isRecordType()) {
+ // Delegate expressions of C++ record type evaluation to AggExprVisitor.
+ VisitAggExpr(InitEx, GetState(Pred)->getLValue(VD,
Pred->getLocationContext()), Pred, Dst);
return;
} else if (VD->getType()->isReferenceType())
@@ -2908,7 +2940,8 @@ void GRExprEngine::VisitReturnStmt(ReturnStmt *RS, ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
ExplodedNodeSet Src;
if (Expr *RetE = RS->getRetValue()) {
- // Record the returned expression in the state.
+ // Record the returned expression in the state. It will be used in
+ // ProcessCallExit to bind the return value to the call expr.
{
static int Tag = 0;
SaveAndRestore<const void *> OldTag(Builder->Tag, &Tag);
@@ -3137,6 +3170,10 @@ void GRExprEngine::CreateCXXTemporaryObject(Expr *Ex, ExplodedNode *Pred,
void GRExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
ExplodedNode *Pred,
ExplodedNodeSet &Dst) {
+ if (E->isElidable()) {
+ VisitAggExpr(E->getArg(0), Dest, Pred, Dst);
+ return;
+ }
const CXXConstructorDecl *CD = E->getConstructor();
assert(CD);
@@ -3190,10 +3227,7 @@ void GRExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
Pred->getLocationContext(),
E, Builder->getBlock(), Builder->getIndex());
- Type *T = CD->getParent()->getTypeForDecl();
- QualType PT = getContext().getPointerType(QualType(T,0));
- const CXXThisRegion *ThisR = ValMgr.getRegionManager().getCXXThisRegion(PT,
- SFC);
+ const CXXThisRegion *ThisR = getCXXThisRegion(E->getConstructor(), SFC);
CallEnter Loc(E, CD, Pred->getLocationContext());
for (ExplodedNodeSet::iterator NI = ArgsEvaluated.begin(),
@@ -3206,6 +3240,91 @@ void GRExprEngine::VisitCXXConstructExpr(const CXXConstructExpr *E, SVal Dest,
Dst.Add(N);
}
}
+
+void GRExprEngine::VisitCXXMemberCallExpr(const CXXMemberCallExpr *MCE,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ // Get the method type.
+ const FunctionProtoType *FnType =
+ MCE->getCallee()->getType()->getAs<FunctionProtoType>();
+ assert(FnType && "Method type not available");
+
+ // Evaluate explicit arguments with a worklist.
+ CallExpr::arg_iterator AB = const_cast<CXXMemberCallExpr*>(MCE)->arg_begin(),
+ AE = const_cast<CXXMemberCallExpr*>(MCE)->arg_end();
+ llvm::SmallVector<CallExprWLItem, 20> WorkList;
+ WorkList.reserve(AE - AB);
+ WorkList.push_back(CallExprWLItem(AB, Pred));
+ ExplodedNodeSet ArgsEvaluated;
+
+ while (!WorkList.empty()) {
+ CallExprWLItem Item = WorkList.back();
+ WorkList.pop_back();
+
+ if (Item.I == AE) {
+ ArgsEvaluated.insert(Item.N);
+ continue;
+ }
+
+ ExplodedNodeSet Tmp;
+ const unsigned ParamIdx = Item.I - AB;
+ bool VisitAsLvalue = FnType->getArgType(ParamIdx)->isReferenceType();
+
+ if (VisitAsLvalue)
+ VisitLValue(*Item.I, Item.N, Tmp);
+ else
+ Visit(*Item.I, Item.N, Tmp);
+
+ ++(Item.I);
+ for (ExplodedNodeSet::iterator NI=Tmp.begin(), NE=Tmp.end(); NI != NE; ++NI)
+ WorkList.push_back(CallExprWLItem(Item.I, *NI));
+ }
+ // Evaluate the implicit object argument.
+ ExplodedNodeSet AllArgsEvaluated;
+ const MemberExpr *ME = dyn_cast<MemberExpr>(MCE->getCallee()->IgnoreParens());
+ if (!ME)
+ return;
+ Expr *ObjArgExpr = ME->getBase();
+ for (ExplodedNodeSet::iterator I = ArgsEvaluated.begin(),
+ E = ArgsEvaluated.end(); I != E; ++I) {
+ if (ME->isArrow())
+ Visit(ObjArgExpr, *I, AllArgsEvaluated);
+ else
+ VisitLValue(ObjArgExpr, *I, AllArgsEvaluated);
+ }
+
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(ME->getMemberDecl());
+ assert(MD && "not a CXXMethodDecl?");
+
+ if (!MD->isThisDeclarationADefinition())
+ // FIXME: conservative method call evaluation.
+ return;
+
+ const StackFrameContext *SFC = AMgr.getStackFrame(MD,
+ Pred->getLocationContext(),
+ MCE,
+ Builder->getBlock(),
+ Builder->getIndex());
+ const CXXThisRegion *ThisR = getCXXThisRegion(MD, SFC);
+ CallEnter Loc(MCE, MD, Pred->getLocationContext());
+ for (ExplodedNodeSet::iterator I = AllArgsEvaluated.begin(),
+ E = AllArgsEvaluated.end(); I != E; ++I) {
+ // Set up 'this' region.
+ const GRState *state = GetState(*I);
+ state = state->bindLoc(loc::MemRegionVal(ThisR),state->getSVal(ObjArgExpr));
+ ExplodedNode *N = Builder->generateNode(Loc, state, *I);
+ if (N)
+ Dst.Add(N);
+ }
+}
+
+const CXXThisRegion *GRExprEngine::getCXXThisRegion(const CXXMethodDecl *D,
+ const StackFrameContext *SFC) {
+ Type *T = D->getParent()->getTypeForDecl();
+ QualType PT = getContext().getPointerType(QualType(T,0));
+ return ValMgr.getRegionManager().getCXXThisRegion(PT, SFC);
+}
+
//===----------------------------------------------------------------------===//
// Checker registration/lookup.
//===----------------------------------------------------------------------===//
diff --git a/lib/Checker/GRState.cpp b/lib/Checker/GRState.cpp
index 2defbcd93c01..f68e10b0cbc9 100644
--- a/lib/Checker/GRState.cpp
+++ b/lib/Checker/GRState.cpp
@@ -11,10 +11,10 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Analysis/CFG.h"
#include "clang/Checker/PathSensitive/GRStateTrait.h"
#include "clang/Checker/PathSensitive/GRState.h"
#include "clang/Checker/PathSensitive/GRTransferFuncs.h"
-#include "llvm/ADT/SmallSet.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
@@ -227,6 +227,18 @@ const GRState* GRStateManager::addGDM(const GRState* St, void* Key, void* Data){
return getPersistentState(NewSt);
}
+const GRState *GRStateManager::removeGDM(const GRState *state, void *Key) {
+ GRState::GenericDataMap OldM = state->getGDM();
+ GRState::GenericDataMap NewM = GDMFactory.Remove(OldM, Key);
+
+ if (NewM == OldM)
+ return state;
+
+ GRState NewState = *state;
+ NewState.GDM = NewM;
+ return getPersistentState(NewState);
+}
+
//===----------------------------------------------------------------------===//
// Utility.
//===----------------------------------------------------------------------===//
diff --git a/lib/Checker/MacOSXAPIChecker.cpp b/lib/Checker/MacOSXAPIChecker.cpp
index 9621e853bc48..bcd96e73305e 100644
--- a/lib/Checker/MacOSXAPIChecker.cpp
+++ b/lib/Checker/MacOSXAPIChecker.cpp
@@ -17,7 +17,7 @@
#include "GRExprEngineInternalChecks.h"
#include "clang/Basic/TargetInfo.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRStateTrait.h"
#include "llvm/ADT/SmallString.h"
diff --git a/lib/Checker/MallocChecker.cpp b/lib/Checker/MallocChecker.cpp
index a08afc4b7959..a22df3046920 100644
--- a/lib/Checker/MallocChecker.cpp
+++ b/lib/Checker/MallocChecker.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineExperimentalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRState.h"
#include "clang/Checker/PathSensitive/GRStateTrait.h"
diff --git a/lib/Checker/MemRegion.cpp b/lib/Checker/MemRegion.cpp
index 9a26988fcf1d..9f12ab622fbf 100644
--- a/lib/Checker/MemRegion.cpp
+++ b/lib/Checker/MemRegion.cpp
@@ -13,10 +13,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Analysis/AnalysisContext.h"
#include "clang/Checker/PathSensitive/MemRegion.h"
+#include "clang/Analysis/AnalysisContext.h"
+#include "clang/Analysis/Support/BumpVector.h"
#include "clang/AST/CharUnits.h"
-#include "clang/AST/StmtVisitor.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/lib/Checker/NSErrorChecker.cpp b/lib/Checker/NSErrorChecker.cpp
index e428e2e83f2a..9130bfad8407 100644
--- a/lib/Checker/NSErrorChecker.cpp
+++ b/lib/Checker/NSErrorChecker.cpp
@@ -16,7 +16,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Checker/Checkers/LocalCheckers.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
#include "clang/Checker/Checkers/DereferenceChecker.h"
#include "BasicObjCFoundationChecks.h"
diff --git a/lib/Checker/NoReturnFunctionChecker.cpp b/lib/Checker/NoReturnFunctionChecker.cpp
index 1455d87665db..12527e076221 100644
--- a/lib/Checker/NoReturnFunctionChecker.cpp
+++ b/lib/Checker/NoReturnFunctionChecker.cpp
@@ -13,17 +13,17 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/Checker.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
namespace {
-class NoReturnFunctionChecker : public Checker {
+class NoReturnFunctionChecker : public CheckerVisitor<NoReturnFunctionChecker> {
public:
static void *getTag() { static int tag = 0; return &tag; }
- virtual bool EvalCallExpr(CheckerContext &C, const CallExpr *CE);
+ void PostVisitCallExpr(CheckerContext &C, const CallExpr *CE);
};
}
@@ -32,48 +32,48 @@ void clang::RegisterNoReturnFunctionChecker(GRExprEngine &Eng) {
Eng.registerCheck(new NoReturnFunctionChecker());
}
-bool NoReturnFunctionChecker::EvalCallExpr(CheckerContext &C,
- const CallExpr *CE) {
+void NoReturnFunctionChecker::PostVisitCallExpr(CheckerContext &C,
+ const CallExpr *CE) {
const GRState *state = C.getState();
const Expr *Callee = CE->getCallee();
- SVal L = state->getSVal(Callee);
- const FunctionDecl *FD = L.getAsFunctionDecl();
- if (!FD)
- return false;
- bool BuildSinks = false;
+ bool BuildSinks = getFunctionExtInfo(Callee->getType()).getNoReturn();
- if (FD->getAttr<NoReturnAttr>() || FD->getAttr<AnalyzerNoReturnAttr>())
- BuildSinks = true;
- else if (const IdentifierInfo *II = FD->getIdentifier()) {
- // HACK: Some functions are not marked noreturn, and don't return.
- // Here are a few hardwired ones. If this takes too long, we can
- // potentially cache these results.
- BuildSinks
- = llvm::StringSwitch<bool>(llvm::StringRef(II->getName()))
- .Case("exit", true)
- .Case("panic", true)
- .Case("error", true)
- .Case("Assert", true)
- // FIXME: This is just a wrapper around throwing an exception.
- // Eventually inter-procedural analysis should handle this easily.
- .Case("ziperr", true)
- .Case("assfail", true)
- .Case("db_error", true)
- .Case("__assert", true)
- .Case("__assert_rtn", true)
- .Case("__assert_fail", true)
- .Case("dtrace_assfail", true)
- .Case("yy_fatal_error", true)
- .Case("_XCAssertionFailureHandler", true)
- .Case("_DTAssertionFailureHandler", true)
- .Case("_TSAssertionFailureHandler", true)
- .Default(false);
+ if (!BuildSinks) {
+ SVal L = state->getSVal(Callee);
+ const FunctionDecl *FD = L.getAsFunctionDecl();
+ if (!FD)
+ return;
+
+ if (FD->getAttr<AnalyzerNoReturnAttr>())
+ BuildSinks = true;
+ else if (const IdentifierInfo *II = FD->getIdentifier()) {
+ // HACK: Some functions are not marked noreturn, and don't return.
+ // Here are a few hardwired ones. If this takes too long, we can
+ // potentially cache these results.
+ BuildSinks
+ = llvm::StringSwitch<bool>(llvm::StringRef(II->getName()))
+ .Case("exit", true)
+ .Case("panic", true)
+ .Case("error", true)
+ .Case("Assert", true)
+ // FIXME: This is just a wrapper around throwing an exception.
+ // Eventually inter-procedural analysis should handle this easily.
+ .Case("ziperr", true)
+ .Case("assfail", true)
+ .Case("db_error", true)
+ .Case("__assert", true)
+ .Case("__assert_rtn", true)
+ .Case("__assert_fail", true)
+ .Case("dtrace_assfail", true)
+ .Case("yy_fatal_error", true)
+ .Case("_XCAssertionFailureHandler", true)
+ .Case("_DTAssertionFailureHandler", true)
+ .Case("_TSAssertionFailureHandler", true)
+ .Default(false);
+ }
}
-
- if (!BuildSinks)
- return false;
- C.GenerateSink(CE);
- return true;
+ if (BuildSinks)
+ C.GenerateSink(CE);
}
diff --git a/lib/Checker/PathDiagnostic.cpp b/lib/Checker/PathDiagnostic.cpp
index 97500d95785c..963923c9ad10 100644
--- a/lib/Checker/PathDiagnostic.cpp
+++ b/lib/Checker/PathDiagnostic.cpp
@@ -108,8 +108,8 @@ void PathDiagnosticClient::HandleDiagnostic(Diagnostic::Level DiagLevel,
for (unsigned i = 0, e = Info.getNumRanges(); i != e; ++i)
P->addRange(Info.getRange(i));
- for (unsigned i = 0, e = Info.getNumCodeModificationHints(); i != e; ++i)
- P->addCodeModificationHint(Info.getCodeModificationHint(i));
+ for (unsigned i = 0, e = Info.getNumFixItHints(); i != e; ++i)
+ P->addFixItHint(Info.getFixItHint(i));
D->push_front(P);
HandlePathDiagnostic(D);
diff --git a/lib/Checker/PointerArithChecker.cpp b/lib/Checker/PointerArithChecker.cpp
index 3d62d0c7b9d7..ed60c42613fe 100644
--- a/lib/Checker/PointerArithChecker.cpp
+++ b/lib/Checker/PointerArithChecker.cpp
@@ -12,8 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
using namespace clang;
diff --git a/lib/Checker/PointerSubChecker.cpp b/lib/Checker/PointerSubChecker.cpp
index acc848ac8edb..bc0fd24d19b4 100644
--- a/lib/Checker/PointerSubChecker.cpp
+++ b/lib/Checker/PointerSubChecker.cpp
@@ -13,8 +13,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
using namespace clang;
diff --git a/lib/Checker/RegionStore.cpp b/lib/Checker/RegionStore.cpp
index c2b702acad9a..c97da33aaac6 100644
--- a/lib/Checker/RegionStore.cpp
+++ b/lib/Checker/RegionStore.cpp
@@ -536,15 +536,15 @@ public:
// First visit the cluster.
static_cast<DERIVED*>(this)->VisitCluster(baseR, C->begin(), C->end());
- // Next, visit the region.
- static_cast<DERIVED*>(this)->VisitRegion(baseR);
+ // Next, visit the base region.
+ static_cast<DERIVED*>(this)->VisitBaseRegion(baseR);
}
}
public:
void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C) {}
void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E) {}
- void VisitRegion(const MemRegion *baseR) {}
+ void VisitBaseRegion(const MemRegion *baseR) {}
};
}
@@ -580,7 +580,7 @@ public:
Ex(ex), Count(count), IS(is) {}
void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
- void VisitRegion(const MemRegion *baseR);
+ void VisitBaseRegion(const MemRegion *baseR);
private:
void VisitBinding(SVal V);
@@ -627,7 +627,7 @@ void InvalidateRegionsWorker::VisitCluster(const MemRegion *baseR,
}
}
-void InvalidateRegionsWorker::VisitRegion(const MemRegion *baseR) {
+void InvalidateRegionsWorker::VisitBaseRegion(const MemRegion *baseR) {
if (IS) {
// Symbolic region? Mark that symbol touched by the invalidation.
if (const SymbolicRegion *SR = dyn_cast<SymbolicRegion>(baseR))
@@ -787,9 +787,12 @@ DefinedOrUnknownSVal RegionStoreManager::getSizeInElements(const GRState *state,
return ValMgr.makeIntVal(CAT->getSize(), false);
}
- // Clients can use ordinary variables as if they were arrays. These
- // essentially are arrays of size 1.
- return ValMgr.makeIntVal(1, false);
+ // Clients can reinterpret ordinary variables as arrays, possibly of
+ // another type. The width is rounded down to ensure that an access is
+ // entirely within bounds.
+ CharUnits VarSize = getContext().getTypeSizeInChars(T);
+ CharUnits EleSize = getContext().getTypeSizeInChars(EleTy);
+ return ValMgr.makeIntVal(VarSize / EleSize, false);
}
}
@@ -963,7 +966,7 @@ Optional<SVal> RegionStoreManager::getDefaultBinding(RegionBindings B,
Optional<SVal> RegionStoreManager::getBinding(RegionBindings B,
const MemRegion *R) {
- if (Optional<SVal> V = getDirectBinding(B, R))
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
return V;
return getDefaultBinding(B, R);
@@ -1044,7 +1047,7 @@ SVal RegionStoreManager::Retrieve(Store store, Loc L, QualType T) {
}
#endif
- if (RTy->isStructureType())
+ if (RTy->isStructureType() || RTy->isClassType())
return RetrieveStruct(store, R);
// FIXME: Handle unions.
@@ -1144,7 +1147,7 @@ SVal RegionStoreManager::RetrieveElement(Store store,
const ElementRegion* R) {
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
- if (Optional<SVal> V = getDirectBinding(B, R))
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
return *V;
const MemRegion* superR = R->getSuperRegion();
@@ -1175,7 +1178,7 @@ SVal RegionStoreManager::RetrieveElement(Store store,
}
// Check if the immediate super region has a direct binding.
- if (Optional<SVal> V = getDirectBinding(B, superR)) {
+ if (const Optional<SVal> &V = getDirectBinding(B, superR)) {
if (SymbolRef parentSym = V->getAsSymbol())
return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
@@ -1203,7 +1206,7 @@ SVal RegionStoreManager::RetrieveField(Store store,
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
- if (Optional<SVal> V = getDirectBinding(B, R))
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
return *V;
QualType Ty = R->getValueType(getContext());
@@ -1278,13 +1281,13 @@ SVal RegionStoreManager::RetrieveObjCIvar(Store store, const ObjCIvarRegion* R){
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
- if (Optional<SVal> V = getDirectBinding(B, R))
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
return *V;
const MemRegion *superR = R->getSuperRegion();
// Check if the super region has a default binding.
- if (Optional<SVal> V = getDefaultBinding(B, superR)) {
+ if (const Optional<SVal> &V = getDefaultBinding(B, superR)) {
if (SymbolRef parentSym = V->getAsSymbol())
return ValMgr.getDerivedRegionValueSymbolVal(parentSym, R);
@@ -1300,7 +1303,7 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
// Check if the region has a binding.
RegionBindings B = GetRegionBindings(store);
- if (Optional<SVal> V = getDirectBinding(B, R))
+ if (const Optional<SVal> &V = getDirectBinding(B, R))
return *V;
// Lazily derive a value for the VarRegion.
@@ -1313,8 +1316,23 @@ SVal RegionStoreManager::RetrieveVar(Store store, const VarRegion *R) {
return ValMgr.getRegionValueSymbolVal(R);
if (isa<GlobalsSpaceRegion>(MS)) {
- if (VD->isFileVarDecl())
+ if (VD->isFileVarDecl()) {
+ // Is 'VD' declared constant? If so, retrieve the constant value.
+ QualType CT = Ctx.getCanonicalType(T);
+ if (CT.isConstQualified()) {
+ const Expr *Init = VD->getInit();
+ // Do the null check first, as we want to call 'IgnoreParenCasts'.
+ if (Init)
+ if (const IntegerLiteral *IL =
+ dyn_cast<IntegerLiteral>(Init->IgnoreParenCasts())) {
+ const nonloc::ConcreteInt &V = ValMgr.makeIntVal(IL);
+ return ValMgr.getSValuator().EvalCast(V, Init->getType(),
+ IL->getType());
+ }
+ }
+
return ValMgr.getRegionValueSymbolVal(R);
+ }
if (T->isIntegerType())
return ValMgr.makeIntVal(0, T);
@@ -1337,8 +1355,7 @@ SVal RegionStoreManager::RetrieveLazySymbol(const TypedRegion *R) {
SVal RegionStoreManager::RetrieveStruct(Store store, const TypedRegion* R) {
QualType T = R->getValueType(getContext());
- assert(T->isStructureType());
- assert(T->getAsStructureType()->getDecl()->isDefinition());
+ assert(T->isStructureType() || T->isClassType());
return ValMgr.makeLazyCompoundVal(store, R);
}
@@ -1692,8 +1709,8 @@ public:
// Called by ClusterAnalysis.
void VisitAddedToCluster(const MemRegion *baseR, RegionCluster &C);
void VisitCluster(const MemRegion *baseR, BindingKey *I, BindingKey *E);
- void VisitRegion(const MemRegion *baseR);
+ void VisitBindingKey(BindingKey K);
bool UpdatePostponed();
void VisitBinding(SVal V);
};
@@ -1730,11 +1747,8 @@ void RemoveDeadBindingsWorker::VisitAddedToCluster(const MemRegion *baseR,
void RemoveDeadBindingsWorker::VisitCluster(const MemRegion *baseR,
BindingKey *I, BindingKey *E) {
- for ( ; I != E; ++I) {
- const MemRegion *R = I->getRegion();
- if (R != baseR)
- VisitRegion(R);
- }
+ for ( ; I != E; ++I)
+ VisitBindingKey(*I);
}
void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
@@ -1762,34 +1776,36 @@ void RemoveDeadBindingsWorker::VisitBinding(SVal V) {
SymReaper.markLive(*SI);
}
-void RemoveDeadBindingsWorker::VisitRegion(const MemRegion *R) {
+void RemoveDeadBindingsWorker::VisitBindingKey(BindingKey K) {
+ const MemRegion *R = K.getRegion();
+
// Mark this region "live" by adding it to the worklist. This will cause
// use to visit all regions in the cluster (if we haven't visited them
// already).
- AddToWorkList(R);
-
- // Mark the symbol for any live SymbolicRegion as "live". This means we
- // should continue to track that symbol.
- if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
- SymReaper.markLive(SymR->getSymbol());
+ if (AddToWorkList(R)) {
+ // Mark the symbol for any live SymbolicRegion as "live". This means we
+ // should continue to track that symbol.
+ if (const SymbolicRegion *SymR = dyn_cast<SymbolicRegion>(R))
+ SymReaper.markLive(SymR->getSymbol());
+
+ // For BlockDataRegions, enqueue the VarRegions for variables marked
+ // with __block (passed-by-reference).
+ // via BlockDeclRefExprs.
+ if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(R)) {
+ for (BlockDataRegion::referenced_vars_iterator
+ RI = BD->referenced_vars_begin(), RE = BD->referenced_vars_end();
+ RI != RE; ++RI) {
+ if ((*RI)->getDecl()->getAttr<BlocksAttr>())
+ AddToWorkList(*RI);
+ }
- // For BlockDataRegions, enqueue the VarRegions for variables marked
- // with __block (passed-by-reference).
- // via BlockDeclRefExprs.
- if (const BlockDataRegion *BD = dyn_cast<BlockDataRegion>(R)) {
- for (BlockDataRegion::referenced_vars_iterator
- RI = BD->referenced_vars_begin(), RE = BD->referenced_vars_end();
- RI != RE; ++RI) {
- if ((*RI)->getDecl()->getAttr<BlocksAttr>())
- AddToWorkList(*RI);
+ // No possible data bindings on a BlockDataRegion.
+ return;
}
-
- // No possible data bindings on a BlockDataRegion.
- return;
}
- // Get the data binding for R (if any).
- if (Optional<SVal> V = RM.getBinding(B, R))
+ // Visit the data binding for K.
+ if (const SVal *V = RM.Lookup(B, K))
VisitBinding(*V);
}
diff --git a/lib/Checker/ReturnPointerRangeChecker.cpp b/lib/Checker/ReturnPointerRangeChecker.cpp
index 949ded507c5b..14edf5668983 100644
--- a/lib/Checker/ReturnPointerRangeChecker.cpp
+++ b/lib/Checker/ReturnPointerRangeChecker.cpp
@@ -13,9 +13,9 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
using namespace clang;
diff --git a/lib/Checker/ReturnStackAddressChecker.cpp b/lib/Checker/ReturnStackAddressChecker.cpp
index 9cbabba4a5f5..35b1cdebf620 100644
--- a/lib/Checker/ReturnStackAddressChecker.cpp
+++ b/lib/Checker/ReturnStackAddressChecker.cpp
@@ -14,8 +14,8 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
diff --git a/lib/Checker/ReturnUndefChecker.cpp b/lib/Checker/ReturnUndefChecker.cpp
index ee259883e48c..52a0b3076b6a 100644
--- a/lib/Checker/ReturnUndefChecker.cpp
+++ b/lib/Checker/ReturnUndefChecker.cpp
@@ -14,10 +14,9 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
-#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "llvm/ADT/SmallString.h"
+#include "clang/Checker/PathSensitive/GRExprEngine.h"
using namespace clang;
diff --git a/lib/Checker/SymbolManager.cpp b/lib/Checker/SymbolManager.cpp
index 65a46e31fefa..f3a803c57d32 100644
--- a/lib/Checker/SymbolManager.cpp
+++ b/lib/Checker/SymbolManager.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Checker/PathSensitive/SymbolManager.h"
+#include "clang/Analysis/Analyses/LiveVariables.h"
#include "clang/Checker/PathSensitive/MemRegion.h"
-#include "clang/Analysis/AnalysisContext.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/lib/Checker/UndefBranchChecker.cpp b/lib/Checker/UndefBranchChecker.cpp
index e047b187b108..90883456b17c 100644
--- a/lib/Checker/UndefBranchChecker.cpp
+++ b/lib/Checker/UndefBranchChecker.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/Checker.h"
using namespace clang;
diff --git a/lib/Checker/UndefCapturedBlockVarChecker.cpp b/lib/Checker/UndefCapturedBlockVarChecker.cpp
index a8d7284b40ac..b1010c9c4892 100644
--- a/lib/Checker/UndefCapturedBlockVarChecker.cpp
+++ b/lib/Checker/UndefCapturedBlockVarChecker.cpp
@@ -14,7 +14,7 @@
#include "GRExprEngineInternalChecks.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "llvm/Support/raw_ostream.h"
using namespace clang;
diff --git a/lib/Checker/UndefResultChecker.cpp b/lib/Checker/UndefResultChecker.cpp
index fb2283a62044..8b07aed10e30 100644
--- a/lib/Checker/UndefResultChecker.cpp
+++ b/lib/Checker/UndefResultChecker.cpp
@@ -13,9 +13,9 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
using namespace clang;
diff --git a/lib/Checker/UndefinedArraySubscriptChecker.cpp b/lib/Checker/UndefinedArraySubscriptChecker.cpp
index a2792ad17ba1..148629e0093f 100644
--- a/lib/Checker/UndefinedArraySubscriptChecker.cpp
+++ b/lib/Checker/UndefinedArraySubscriptChecker.cpp
@@ -12,9 +12,9 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
+#include "clang/Checker/PathSensitive/CheckerVisitor.h"
using namespace clang;
diff --git a/lib/Checker/UndefinedAssignmentChecker.cpp b/lib/Checker/UndefinedAssignmentChecker.cpp
index 7c33c1d39235..6cef60eaee29 100644
--- a/lib/Checker/UndefinedAssignmentChecker.cpp
+++ b/lib/Checker/UndefinedAssignmentChecker.cpp
@@ -13,8 +13,8 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
using namespace clang;
@@ -53,27 +53,43 @@ void UndefinedAssignmentChecker::PreVisitBind(CheckerContext &C,
if (!N)
return;
+ const char *str = "Assigned value is garbage or undefined";
+
if (!BT)
- BT = new BuiltinBug("Assigned value is garbage or undefined");
+ BT = new BuiltinBug(str);
// Generate a report for this bug.
- EnhancedBugReport *R = new EnhancedBugReport(*BT, BT->getName(), N);
+ const Expr *ex = 0;
- if (AssignE) {
- const Expr *ex = 0;
+ while (AssignE) {
+ if (const BinaryOperator *B = dyn_cast<BinaryOperator>(AssignE)) {
+ if (B->isCompoundAssignmentOp()) {
+ const GRState *state = C.getState();
+ if (state->getSVal(B->getLHS()).isUndef()) {
+ str = "The left expression of the compound assignment is an "
+ "uninitialized value. The computed value will also be garbage";
+ ex = B->getLHS();
+ break;
+ }
+ }
- if (const BinaryOperator *B = dyn_cast<BinaryOperator>(AssignE))
ex = B->getRHS();
- else if (const DeclStmt *DS = dyn_cast<DeclStmt>(AssignE)) {
+ break;
+ }
+
+ if (const DeclStmt *DS = dyn_cast<DeclStmt>(AssignE)) {
const VarDecl* VD = dyn_cast<VarDecl>(DS->getSingleDecl());
ex = VD->getInit();
}
- if (ex) {
- R->addRange(ex->getSourceRange());
- R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, ex);
- }
+
+ break;
}
+ EnhancedBugReport *R = new EnhancedBugReport(*BT, str, N);
+ if (ex) {
+ R->addRange(ex->getSourceRange());
+ R->addVisitorCreator(bugreporter::registerTrackNullOrUndefValue, ex);
+ }
C.EmitReport(R);
-}
+}
diff --git a/lib/Checker/UnixAPIChecker.cpp b/lib/Checker/UnixAPIChecker.cpp
index 7ff817ae7677..d75e5d25c49d 100644
--- a/lib/Checker/UnixAPIChecker.cpp
+++ b/lib/Checker/UnixAPIChecker.cpp
@@ -12,11 +12,10 @@
//
//===----------------------------------------------------------------------===//
+#include "GRExprEngineInternalChecks.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
-#include "clang/Checker/PathSensitive/GRStateTrait.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "llvm/ADT/StringSwitch.h"
-#include "GRExprEngineInternalChecks.h"
#include <fcntl.h>
using namespace clang;
diff --git a/lib/Checker/VLASizeChecker.cpp b/lib/Checker/VLASizeChecker.cpp
index 51ad1e2daf5e..cea9d191aa77 100644
--- a/lib/Checker/VLASizeChecker.cpp
+++ b/lib/Checker/VLASizeChecker.cpp
@@ -13,9 +13,9 @@
//===----------------------------------------------------------------------===//
#include "GRExprEngineInternalChecks.h"
+#include "clang/Checker/BugReporter/BugType.h"
#include "clang/Checker/PathSensitive/CheckerVisitor.h"
#include "clang/Checker/PathSensitive/GRExprEngine.h"
-#include "clang/Checker/BugReporter/BugReporter.h"
using namespace clang;
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index c10a401d8abf..509734123b0c 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -17,6 +17,7 @@
#include "CodeGenModule.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/Module.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/Target/TargetData.h"
#include <algorithm>
@@ -192,7 +193,7 @@ llvm::Value *CodeGenFunction::BuildBlockLiteralTmp(const BlockExpr *BE) {
CallArgList Args;
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, Args,
- CC_Default, false);
+ FunctionType::ExtInfo());
if (CGM.ReturnTypeUsesSret(FnInfo))
flags |= BLOCK_USE_STRET;
}
@@ -472,8 +473,8 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr* E,
QualType ResultType = FuncTy->getResultType();
const CGFunctionInfo &FnInfo =
- CGM.getTypes().getFunctionInfo(ResultType, Args, FuncTy->getCallConv(),
- FuncTy->getNoReturnAttr());
+ CGM.getTypes().getFunctionInfo(ResultType, Args,
+ FuncTy->getExtInfo());
// Cast the function pointer to the right type.
const llvm::Type *BlockFTy =
@@ -678,8 +679,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
const FunctionType *BlockFunctionType = BExpr->getFunctionType();
QualType ResultType;
- CallingConv CC = BlockFunctionType->getCallConv();
- bool NoReturn = BlockFunctionType->getNoReturnAttr();
+ FunctionType::ExtInfo EInfo = getFunctionExtInfo(*BlockFunctionType);
bool IsVariadic;
if (const FunctionProtoType *FTy =
dyn_cast<FunctionProtoType>(BlockFunctionType)) {
@@ -718,7 +718,7 @@ CodeGenFunction::GenerateBlockFunction(const BlockExpr *BExpr,
Args.push_back(std::make_pair(*i, (*i)->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(ResultType, Args, CC, NoReturn);
+ CGM.getTypes().getFunctionInfo(ResultType, Args, EInfo);
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, IsVariadic);
@@ -843,7 +843,7 @@ GenerateCopyHelperFunction(bool BlockHasCopyDispose, const llvm::StructType *T,
Args.push_back(std::make_pair(Src, Src->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -924,7 +924,7 @@ GenerateDestroyHelperFunction(bool BlockHasCopyDispose,
Args.push_back(std::make_pair(Src, Src->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
// FIXME: We'd like to put these into a mergable by content, with
// internal linkage.
@@ -1008,7 +1008,7 @@ GeneratebyrefCopyHelperFunction(const llvm::Type *T, int flag) {
Args.push_back(std::make_pair(Src, Src->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
@@ -1071,7 +1071,7 @@ BlockFunction::GeneratebyrefDestroyHelperFunction(const llvm::Type *T,
Args.push_back(std::make_pair(Src, Src->getType()));
const CGFunctionInfo &FI =
- CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+ CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
CodeGenTypes &Types = CGM.getTypes();
const llvm::FunctionType *LTy = Types.GetFunctionType(FI, false);
diff --git a/lib/CodeGen/CGBlocks.h b/lib/CodeGen/CGBlocks.h
index e91319f6ddab..efee0e36b853 100644
--- a/lib/CodeGen/CGBlocks.h
+++ b/lib/CodeGen/CGBlocks.h
@@ -17,7 +17,6 @@
#include "CodeGenTypes.h"
#include "clang/AST/Type.h"
#include "llvm/Module.h"
-#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/AST/CharUnits.h"
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index 419ed734e83e..a9b0b645a4a6 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -81,10 +81,6 @@ static RValue EmitBinaryAtomicPost(CodeGenFunction& CGF,
Value *Args[2] = { CGF.EmitScalarExpr(E->getArg(0)),
CGF.EmitScalarExpr(E->getArg(1)) };
Value *Result = EmitCallWithBarrier(CGF, AtomF, Args, Args + 2);
-
- if (Id == Intrinsic::atomic_load_nand)
- Result = CGF.Builder.CreateNot(Result);
-
return RValue::get(CGF.Builder.CreateBinOp(Op, Result, Args[1]));
}
@@ -550,12 +546,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_fetch_and_xor_8:
case Builtin::BI__sync_fetch_and_xor_16:
return EmitBinaryAtomic(*this, Intrinsic::atomic_load_xor, E);
- case Builtin::BI__sync_fetch_and_nand_1:
- case Builtin::BI__sync_fetch_and_nand_2:
- case Builtin::BI__sync_fetch_and_nand_4:
- case Builtin::BI__sync_fetch_and_nand_8:
- case Builtin::BI__sync_fetch_and_nand_16:
- return EmitBinaryAtomic(*this, Intrinsic::atomic_load_nand, E);
// Clang extensions: not overloaded yet.
case Builtin::BI__sync_fetch_and_min:
@@ -602,13 +592,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI__sync_xor_and_fetch_16:
return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_xor, E,
llvm::Instruction::Xor);
- case Builtin::BI__sync_nand_and_fetch_1:
- case Builtin::BI__sync_nand_and_fetch_2:
- case Builtin::BI__sync_nand_and_fetch_4:
- case Builtin::BI__sync_nand_and_fetch_8:
- case Builtin::BI__sync_nand_and_fetch_16:
- return EmitBinaryAtomicPost(*this, Intrinsic::atomic_load_nand, E,
- llvm::Instruction::And);
case Builtin::BI__sync_val_compare_and_swap_1:
case Builtin::BI__sync_val_compare_and_swap_2:
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index b88001c81e9f..93a182f5cc47 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -297,311 +297,6 @@ void CodeGenModule::getMangledCXXDtorName(MangleBuffer &Name,
getMangleContext().mangleCXXDtor(D, Type, Name.getBuffer());
}
-llvm::Constant *
-CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
- bool Extern,
- const ThunkAdjustment &ThisAdjustment) {
- return GenerateCovariantThunk(Fn, GD, Extern,
- CovariantThunkAdjustment(ThisAdjustment,
- ThunkAdjustment()));
-}
-
-llvm::Value *
-CodeGenFunction::DynamicTypeAdjust(llvm::Value *V,
- const ThunkAdjustment &Adjustment) {
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
-
- const llvm::Type *OrigTy = V->getType();
- if (Adjustment.NonVirtual) {
- // Do the non-virtual adjustment
- V = Builder.CreateBitCast(V, Int8PtrTy);
- V = Builder.CreateConstInBoundsGEP1_64(V, Adjustment.NonVirtual);
- V = Builder.CreateBitCast(V, OrigTy);
- }
-
- if (!Adjustment.Virtual)
- return V;
-
- assert(Adjustment.Virtual % (LLVMPointerWidth / 8) == 0 &&
- "vtable entry unaligned");
-
- // Do the virtual this adjustment
- const llvm::Type *PtrDiffTy = ConvertType(getContext().getPointerDiffType());
- const llvm::Type *PtrDiffPtrTy = PtrDiffTy->getPointerTo();
-
- llvm::Value *ThisVal = Builder.CreateBitCast(V, Int8PtrTy);
- V = Builder.CreateBitCast(V, PtrDiffPtrTy->getPointerTo());
- V = Builder.CreateLoad(V, "vtable");
-
- llvm::Value *VTablePtr = V;
- uint64_t VirtualAdjustment = Adjustment.Virtual / (LLVMPointerWidth / 8);
- V = Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
- V = Builder.CreateLoad(V);
- V = Builder.CreateGEP(ThisVal, V);
-
- return Builder.CreateBitCast(V, OrigTy);
-}
-
-llvm::Constant *
-CodeGenFunction::GenerateCovariantThunk(llvm::Function *Fn,
- GlobalDecl GD, bool Extern,
- const CovariantThunkAdjustment &Adjustment) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- QualType ResultType = FPT->getResultType();
-
- FunctionArgList Args;
- ImplicitParamDecl *ThisDecl =
- ImplicitParamDecl::Create(getContext(), 0, SourceLocation(), 0,
- MD->getThisType(getContext()));
- Args.push_back(std::make_pair(ThisDecl, ThisDecl->getType()));
- for (FunctionDecl::param_const_iterator i = MD->param_begin(),
- e = MD->param_end();
- i != e; ++i) {
- ParmVarDecl *D = *i;
- Args.push_back(std::make_pair(D, D->getType()));
- }
- IdentifierInfo *II
- = &CGM.getContext().Idents.get("__thunk_named_foo_");
- FunctionDecl *FD = FunctionDecl::Create(getContext(),
- getContext().getTranslationUnitDecl(),
- SourceLocation(), II, ResultType, 0,
- Extern
- ? FunctionDecl::Extern
- : FunctionDecl::Static,
- false, true);
- StartFunction(FD, ResultType, Fn, Args, SourceLocation());
-
- // generate body
- const llvm::Type *Ty =
- CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
- llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
-
- CallArgList CallArgs;
-
- bool ShouldAdjustReturnPointer = true;
- QualType ArgType = MD->getThisType(getContext());
- llvm::Value *Arg = Builder.CreateLoad(LocalDeclMap[ThisDecl], "this");
- if (!Adjustment.ThisAdjustment.isEmpty()) {
- // Do the this adjustment.
- const llvm::Type *OrigTy = Callee->getType();
- Arg = DynamicTypeAdjust(Arg, Adjustment.ThisAdjustment);
-
- if (!Adjustment.ReturnAdjustment.isEmpty()) {
- const CovariantThunkAdjustment &ReturnAdjustment =
- CovariantThunkAdjustment(ThunkAdjustment(),
- Adjustment.ReturnAdjustment);
-
- Callee = CGM.BuildCovariantThunk(GD, Extern, ReturnAdjustment);
-
- Callee = Builder.CreateBitCast(Callee, OrigTy);
- ShouldAdjustReturnPointer = false;
- }
- }
-
- CallArgs.push_back(std::make_pair(RValue::get(Arg), ArgType));
-
- for (FunctionDecl::param_const_iterator i = MD->param_begin(),
- e = MD->param_end();
- i != e; ++i) {
- ParmVarDecl *D = *i;
- QualType ArgType = D->getType();
-
- // llvm::Value *Arg = CGF.GetAddrOfLocalVar(Dst);
- Expr *Arg = new (getContext()) DeclRefExpr(D, ArgType.getNonReferenceType(),
- SourceLocation());
- CallArgs.push_back(std::make_pair(EmitCallArg(Arg, ArgType), ArgType));
- }
-
- RValue RV = EmitCall(CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
- FPT->getCallConv(),
- FPT->getNoReturnAttr()),
- Callee, ReturnValueSlot(), CallArgs, MD);
- if (ShouldAdjustReturnPointer && !Adjustment.ReturnAdjustment.isEmpty()) {
- bool CanBeZero = !(ResultType->isReferenceType()
- // FIXME: attr nonnull can't be zero either
- /* || ResultType->hasAttr<NonNullAttr>() */ );
- // Do the return result adjustment.
- if (CanBeZero) {
- llvm::BasicBlock *NonZeroBlock = createBasicBlock();
- llvm::BasicBlock *ZeroBlock = createBasicBlock();
- llvm::BasicBlock *ContBlock = createBasicBlock();
-
- const llvm::Type *Ty = RV.getScalarVal()->getType();
- llvm::Value *Zero = llvm::Constant::getNullValue(Ty);
- Builder.CreateCondBr(Builder.CreateICmpNE(RV.getScalarVal(), Zero),
- NonZeroBlock, ZeroBlock);
- EmitBlock(NonZeroBlock);
- llvm::Value *NZ =
- DynamicTypeAdjust(RV.getScalarVal(), Adjustment.ReturnAdjustment);
- EmitBranch(ContBlock);
- EmitBlock(ZeroBlock);
- llvm::Value *Z = RV.getScalarVal();
- EmitBlock(ContBlock);
- llvm::PHINode *RVOrZero = Builder.CreatePHI(Ty);
- RVOrZero->reserveOperandSpace(2);
- RVOrZero->addIncoming(NZ, NonZeroBlock);
- RVOrZero->addIncoming(Z, ZeroBlock);
- RV = RValue::get(RVOrZero);
- } else
- RV = RValue::get(DynamicTypeAdjust(RV.getScalarVal(),
- Adjustment.ReturnAdjustment));
- }
-
- if (!ResultType->isVoidType())
- EmitReturnOfRValue(RV, ResultType);
-
- FinishFunction();
- return Fn;
-}
-
-llvm::Constant *
-CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
- const ThunkAdjustment &ThisAdjustment) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // Compute mangled name
- llvm::SmallString<256> OutName;
- if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
- getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), ThisAdjustment,
- OutName);
- else
- getMangleContext().mangleThunk(MD, ThisAdjustment, OutName);
-
- // Get function for mangled name
- const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD);
- return GetOrCreateLLVMFunction(OutName, Ty, GlobalDecl());
-}
-
-llvm::Constant *
-CodeGenModule::GetAddrOfCovariantThunk(GlobalDecl GD,
- const CovariantThunkAdjustment &Adjustment) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // Compute mangled name
- llvm::SmallString<256> Name;
- getMangleContext().mangleCovariantThunk(MD, Adjustment, Name);
-
- // Get function for mangled name
- const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD);
- return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
-}
-
-void CodeGenModule::BuildThunksForVirtual(GlobalDecl GD) {
- CGVtableInfo::AdjustmentVectorTy *AdjPtr = getVtableInfo().getAdjustments(GD);
- if (!AdjPtr)
- return;
- CGVtableInfo::AdjustmentVectorTy &Adj = *AdjPtr;
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- for (unsigned i = 0; i < Adj.size(); i++) {
- GlobalDecl OGD = Adj[i].first;
- const CXXMethodDecl *OMD = cast<CXXMethodDecl>(OGD.getDecl());
- QualType nc_oret = OMD->getType()->getAs<FunctionType>()->getResultType();
- CanQualType oret = getContext().getCanonicalType(nc_oret);
- QualType nc_ret = MD->getType()->getAs<FunctionType>()->getResultType();
- CanQualType ret = getContext().getCanonicalType(nc_ret);
- ThunkAdjustment ReturnAdjustment;
- if (oret != ret) {
- QualType qD = nc_ret->getPointeeType();
- QualType qB = nc_oret->getPointeeType();
- CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
- CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
- ReturnAdjustment = ComputeThunkAdjustment(D, B);
- }
- ThunkAdjustment ThisAdjustment = Adj[i].second;
- bool Extern = !cast<CXXRecordDecl>(OMD->getDeclContext())->isInAnonymousNamespace();
- if (!ReturnAdjustment.isEmpty() || !ThisAdjustment.isEmpty()) {
- CovariantThunkAdjustment CoAdj(ThisAdjustment, ReturnAdjustment);
- llvm::Constant *FnConst;
- if (!ReturnAdjustment.isEmpty())
- FnConst = GetAddrOfCovariantThunk(GD, CoAdj);
- else
- FnConst = GetAddrOfThunk(GD, ThisAdjustment);
- if (!isa<llvm::Function>(FnConst)) {
- llvm::Constant *SubExpr =
- cast<llvm::ConstantExpr>(FnConst)->getOperand(0);
- llvm::Function *OldFn = cast<llvm::Function>(SubExpr);
- llvm::Constant *NewFnConst;
- if (!ReturnAdjustment.isEmpty())
- NewFnConst = GetAddrOfCovariantThunk(GD, CoAdj);
- else
- NewFnConst = GetAddrOfThunk(GD, ThisAdjustment);
- llvm::Function *NewFn = cast<llvm::Function>(NewFnConst);
- NewFn->takeName(OldFn);
- llvm::Constant *NewPtrForOldDecl =
- llvm::ConstantExpr::getBitCast(NewFn, OldFn->getType());
- OldFn->replaceAllUsesWith(NewPtrForOldDecl);
- OldFn->eraseFromParent();
- FnConst = NewFn;
- }
- llvm::Function *Fn = cast<llvm::Function>(FnConst);
- if (Fn->isDeclaration()) {
- llvm::GlobalVariable::LinkageTypes linktype;
- linktype = llvm::GlobalValue::WeakAnyLinkage;
- if (!Extern)
- linktype = llvm::GlobalValue::InternalLinkage;
- Fn->setLinkage(linktype);
- if (!Features.Exceptions && !Features.ObjCNonFragileABI)
- Fn->addFnAttr(llvm::Attribute::NoUnwind);
- Fn->setAlignment(2);
- CodeGenFunction(*this).GenerateCovariantThunk(Fn, GD, Extern, CoAdj);
- }
- }
- }
-}
-
-llvm::Constant *
-CodeGenModule::BuildThunk(GlobalDecl GD, bool Extern,
- const ThunkAdjustment &ThisAdjustment) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- llvm::SmallString<256> OutName;
- if (const CXXDestructorDecl *D = dyn_cast<CXXDestructorDecl>(MD)) {
- getMangleContext().mangleCXXDtorThunk(D, GD.getDtorType(), ThisAdjustment,
- OutName);
- } else
- getMangleContext().mangleThunk(MD, ThisAdjustment, OutName);
-
- llvm::GlobalVariable::LinkageTypes linktype;
- linktype = llvm::GlobalValue::WeakAnyLinkage;
- if (!Extern)
- linktype = llvm::GlobalValue::InternalLinkage;
- llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::FunctionType *FTy =
- getTypes().GetFunctionType(getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(),
- &getModule());
- CodeGenFunction(*this).GenerateThunk(Fn, GD, Extern, ThisAdjustment);
- llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
- return m;
-}
-
-llvm::Constant *
-CodeGenModule::BuildCovariantThunk(const GlobalDecl &GD, bool Extern,
- const CovariantThunkAdjustment &Adjustment) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
- llvm::SmallString<256> OutName;
- getMangleContext().mangleCovariantThunk(MD, Adjustment, OutName);
- llvm::GlobalVariable::LinkageTypes linktype;
- linktype = llvm::GlobalValue::WeakAnyLinkage;
- if (!Extern)
- linktype = llvm::GlobalValue::InternalLinkage;
- llvm::Type *Ptr8Ty=llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext),0);
- const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
- const llvm::FunctionType *FTy =
- getTypes().GetFunctionType(getTypes().getFunctionInfo(MD),
- FPT->isVariadic());
-
- llvm::Function *Fn = llvm::Function::Create(FTy, linktype, OutName.str(),
- &getModule());
- CodeGenFunction(*this).GenerateCovariantThunk(Fn, MD, Extern, Adjustment);
- llvm::Constant *m = llvm::ConstantExpr::getBitCast(Fn, Ptr8Ty);
- return m;
-}
-
static llvm::Value *BuildVirtualCall(CodeGenFunction &CGF, uint64_t VtableIndex,
llvm::Value *This, const llvm::Type *Ty) {
Ty = Ty->getPointerTo()->getPointerTo()->getPointerTo();
@@ -618,17 +313,17 @@ llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXMethodDecl *MD, llvm::Value *This,
const llvm::Type *Ty) {
MD = MD->getCanonicalDecl();
- uint64_t VtableIndex = CGM.getVtableInfo().getMethodVtableIndex(MD);
+ uint64_t VTableIndex = CGM.getVTables().getMethodVtableIndex(MD);
- return ::BuildVirtualCall(*this, VtableIndex, This, Ty);
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
llvm::Value *
CodeGenFunction::BuildVirtualCall(const CXXDestructorDecl *DD, CXXDtorType Type,
llvm::Value *&This, const llvm::Type *Ty) {
DD = cast<CXXDestructorDecl>(DD->getCanonicalDecl());
- uint64_t VtableIndex =
- CGM.getVtableInfo().getMethodVtableIndex(GlobalDecl(DD, Type));
+ uint64_t VTableIndex =
+ CGM.getVTables().getMethodVtableIndex(GlobalDecl(DD, Type));
- return ::BuildVirtualCall(*this, VtableIndex, This, Ty);
+ return ::BuildVirtualCall(*this, VTableIndex, This, Ty);
}
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index 072b1f6585fd..cb1ecc1aa616 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -67,8 +67,7 @@ const CGFunctionInfo &
CodeGenTypes::getFunctionInfo(CanQual<FunctionNoProtoType> FTNP) {
return getFunctionInfo(FTNP->getResultType().getUnqualifiedType(),
llvm::SmallVector<CanQualType, 16>(),
- FTNP->getCallConv(),
- FTNP->getNoReturnAttr());
+ FTNP->getExtInfo());
}
/// \param Args - contains any initial parameters besides those
@@ -81,8 +80,7 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenTypes &CGT,
ArgTys.push_back(FTP->getArgType(i));
CanQualType ResTy = FTP->getResultType().getUnqualifiedType();
return CGT.getFunctionInfo(ResTy, ArgTys,
- FTP->getCallConv(),
- FTP->getNoReturnAttr());
+ FTP->getExtInfo());
}
const CGFunctionInfo &
@@ -175,8 +173,10 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(const ObjCMethodDecl *MD) {
}
return getFunctionInfo(GetReturnType(MD->getResultType()),
ArgTys,
- getCallingConventionForDecl(MD),
- /*NoReturn*/ false);
+ FunctionType::ExtInfo(
+ /*NoReturn*/ false,
+ /*RegParm*/ 0,
+ getCallingConventionForDecl(MD)));
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
@@ -194,43 +194,40 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(GlobalDecl GD) {
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const CallArgList &Args,
- CallingConv CC,
- bool NoReturn) {
+ const FunctionType::ExtInfo &Info) {
// FIXME: Kill copy.
llvm::SmallVector<CanQualType, 16> ArgTys;
for (CallArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(Context.getCanonicalParamType(i->second));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, CC, NoReturn);
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(QualType ResTy,
const FunctionArgList &Args,
- CallingConv CC,
- bool NoReturn) {
+ const FunctionType::ExtInfo &Info) {
// FIXME: Kill copy.
llvm::SmallVector<CanQualType, 16> ArgTys;
for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
i != e; ++i)
ArgTys.push_back(Context.getCanonicalParamType(i->second));
- return getFunctionInfo(GetReturnType(ResTy), ArgTys, CC, NoReturn);
+ return getFunctionInfo(GetReturnType(ResTy), ArgTys, Info);
}
const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- CallingConv CallConv,
- bool NoReturn) {
+ const FunctionType::ExtInfo &Info) {
#ifndef NDEBUG
for (llvm::SmallVectorImpl<CanQualType>::const_iterator
I = ArgTys.begin(), E = ArgTys.end(); I != E; ++I)
assert(I->isCanonicalAsParam());
#endif
- unsigned CC = ClangCallConvToLLVMCallConv(CallConv);
+ unsigned CC = ClangCallConvToLLVMCallConv(Info.getCC());
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
- CGFunctionInfo::Profile(ID, CC, NoReturn, ResTy,
+ CGFunctionInfo::Profile(ID, Info, ResTy,
ArgTys.begin(), ArgTys.end());
void *InsertPos = 0;
@@ -239,7 +236,7 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
return *FI;
// Construct the function info.
- FI = new CGFunctionInfo(CC, NoReturn, ResTy, ArgTys);
+ FI = new CGFunctionInfo(CC, Info.getNoReturn(), Info.getRegParm(), ResTy, ArgTys);
FunctionInfos.InsertNode(FI, InsertPos);
// Compute ABI information.
@@ -250,11 +247,12 @@ const CGFunctionInfo &CodeGenTypes::getFunctionInfo(CanQualType ResTy,
CGFunctionInfo::CGFunctionInfo(unsigned _CallingConvention,
bool _NoReturn,
+ unsigned _RegParm,
CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys)
: CallingConvention(_CallingConvention),
EffectiveCallingConvention(_CallingConvention),
- NoReturn(_NoReturn)
+ NoReturn(_NoReturn), RegParm(_RegParm)
{
NumArgs = ArgTys.size();
Args = new ArgInfo[1 + NumArgs];
@@ -610,11 +608,7 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
// FIXME: we need to honour command line settings also...
// FIXME: RegParm should be reduced in case of nested functions and/or global
// register variable.
- signed RegParm = 0;
- if (TargetDecl)
- if (const RegparmAttr *RegParmAttr
- = TargetDecl->getAttr<RegparmAttr>())
- RegParm = RegParmAttr->getNumParams();
+ signed RegParm = FI.getRegParm();
unsigned PointerWidth = getContext().Target.getPointerWidth(0);
for (CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
@@ -623,8 +617,9 @@ void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
const ABIArgInfo &AI = it->info;
unsigned Attributes = 0;
- if (ParamType.isRestrictQualified())
- Attributes |= llvm::Attribute::NoAlias;
+ // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
+ // have the corresponding parameter variable. It doesn't make
+ // sense to do it here because parameters are so fucked up.
switch (AI.getKind()) {
case ABIArgInfo::Coerce:
@@ -749,6 +744,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
V = CreateMemTemp(Ty);
Builder.CreateStore(AI, V);
} else {
+ if (Arg->getType().isRestrictQualified())
+ AI->addAttr(llvm::Attribute::NoAlias);
+
if (!getContext().typesAreCompatible(Ty, Arg->getType())) {
// This must be a promotion, for something like
// "void a(x) short x; {..."
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 3d81165b1bf1..31c8aac3f241 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -76,12 +76,16 @@ namespace CodeGen {
unsigned NumArgs;
ArgInfo *Args;
+ /// How many arguments to pass inreg.
+ unsigned RegParm;
+
public:
typedef const ArgInfo *const_arg_iterator;
typedef ArgInfo *arg_iterator;
CGFunctionInfo(unsigned CallingConvention,
bool NoReturn,
+ unsigned RegParm,
CanQualType ResTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys);
~CGFunctionInfo() { delete[] Args; }
@@ -108,6 +112,8 @@ namespace CodeGen {
EffectiveCallingConvention = Value;
}
+ unsigned getRegParm() const { return RegParm; }
+
CanQualType getReturnType() const { return Args[0].type; }
ABIArgInfo &getReturnInfo() { return Args[0].info; }
@@ -116,19 +122,20 @@ namespace CodeGen {
void Profile(llvm::FoldingSetNodeID &ID) {
ID.AddInteger(getCallingConvention());
ID.AddBoolean(NoReturn);
+ ID.AddInteger(RegParm);
getReturnType().Profile(ID);
for (arg_iterator it = arg_begin(), ie = arg_end(); it != ie; ++it)
it->type.Profile(ID);
}
template<class Iterator>
static void Profile(llvm::FoldingSetNodeID &ID,
- unsigned CallingConvention,
- bool NoReturn,
+ const FunctionType::ExtInfo &Info,
CanQualType ResTy,
Iterator begin,
Iterator end) {
- ID.AddInteger(CallingConvention);
- ID.AddBoolean(NoReturn);
+ ID.AddInteger(Info.getCC());
+ ID.AddBoolean(Info.getNoReturn());
+ ID.AddInteger(Info.getRegParm());
ResTy.Profile(ID);
for (; begin != end; ++begin) {
CanQualType T = *begin; // force iterator to be over canonical types
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 525e85841646..177e86230477 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -69,42 +69,6 @@ CodeGenModule::GetNonVirtualBaseClassOffset(const CXXRecordDecl *Class,
return llvm::ConstantInt::get(PtrDiffTy, Offset);
}
-// FIXME: This probably belongs in CGVtable, but it relies on
-// the static function ComputeNonVirtualBaseClassOffset, so we should make that
-// a CodeGenModule member function as well.
-ThunkAdjustment
-CodeGenModule::ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl) {
- CXXBasePaths Paths(/*FindAmbiguities=*/false,
- /*RecordPaths=*/true, /*DetectVirtual=*/false);
- if (!const_cast<CXXRecordDecl *>(ClassDecl)->
- isDerivedFrom(const_cast<CXXRecordDecl *>(BaseClassDecl), Paths)) {
- assert(false && "Class must be derived from the passed in base class!");
- return ThunkAdjustment();
- }
-
- unsigned Start = 0;
- uint64_t VirtualOffset = 0;
-
- const CXXBasePath &Path = Paths.front();
- const CXXRecordDecl *VBase = 0;
- for (unsigned i = 0, e = Path.size(); i != e; ++i) {
- const CXXBasePathElement& Element = Path[i];
- if (Element.Base->isVirtual()) {
- Start = i+1;
- QualType VBaseType = Element.Base->getType();
- VBase = cast<CXXRecordDecl>(VBaseType->getAs<RecordType>()->getDecl());
- }
- }
- if (VBase)
- VirtualOffset =
- getVtableInfo().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
-
- uint64_t Offset =
- ComputeNonVirtualBaseClassOffset(getContext(), Paths.front(), Start);
- return ThunkAdjustment(Offset, VirtualOffset);
-}
-
/// Gets the address of a virtual base class within a complete object.
/// This should only be used for (1) non-virtual bases or (2) virtual bases
/// when the type is known to be complete (e.g. in complete destructors).
@@ -139,7 +103,7 @@ CodeGenFunction::GetAddressOfBaseOfCompleteClass(llvm::Value *This,
V = Builder.CreateBitCast(V, ConvertType(Base)->getPointerTo());
return V;
-}
+}
llvm::Value *
CodeGenFunction::GetAddressOfBaseClass(llvm::Value *Value,
@@ -308,6 +272,53 @@ CodeGenFunction::GetAddressOfDerivedClass(llvm::Value *Value,
return Value;
}
+/// EmitCopyCtorCall - Emit a call to a copy constructor.
+static void
+EmitCopyCtorCall(CodeGenFunction &CGF,
+ const CXXConstructorDecl *CopyCtor, CXXCtorType CopyCtorType,
+ llvm::Value *ThisPtr, llvm::Value *VTT, llvm::Value *Src) {
+ llvm::Value *Callee = CGF.CGM.GetAddrOfCXXConstructor(CopyCtor, CopyCtorType);
+
+ CallArgList CallArgs;
+
+ // Push the this ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(ThisPtr),
+ CopyCtor->getThisType(CGF.getContext())));
+
+ // Push the VTT parameter if necessary.
+ if (VTT) {
+ QualType T = CGF.getContext().getPointerType(CGF.getContext().VoidPtrTy);
+ CallArgs.push_back(std::make_pair(RValue::get(VTT), T));
+ }
+
+ // Push the Src ptr.
+ CallArgs.push_back(std::make_pair(RValue::get(Src),
+ CopyCtor->getParamDecl(0)->getType()));
+
+
+ {
+ CodeGenFunction::CXXTemporariesCleanupScope Scope(CGF);
+
+ // If the copy constructor has default arguments, emit them.
+ for (unsigned I = 1, E = CopyCtor->getNumParams(); I < E; ++I) {
+ const ParmVarDecl *Param = CopyCtor->getParamDecl(I);
+ const Expr *DefaultArgExpr = Param->getDefaultArg();
+
+ assert(DefaultArgExpr && "Ctor parameter must have default arg!");
+
+ QualType ArgType = Param->getType();
+ CallArgs.push_back(std::make_pair(CGF.EmitCallArg(DefaultArgExpr,
+ ArgType),
+ ArgType));
+ }
+
+ const FunctionProtoType *FPT =
+ CopyCtor->getType()->getAs<FunctionProtoType>();
+ CGF.EmitCall(CGF.CGM.getTypes().getFunctionInfo(CallArgs, FPT),
+ Callee, ReturnValueSlot(), CallArgs, CopyCtor);
+ }
+}
+
/// EmitClassAggrMemberwiseCopy - This routine generates code to copy a class
/// array of objects from SrcValue to DestValue. Copying can be either a bitwise
/// copy or via a copy constructor call.
@@ -354,22 +365,9 @@ void CodeGenFunction::EmitClassAggrMemberwiseCopy(llvm::Value *Dest,
if (BitwiseCopy)
EmitAggregateCopy(Dest, Src, Ty);
else if (CXXConstructorDecl *BaseCopyCtor =
- BaseClassDecl->getCopyConstructor(getContext(), 0)) {
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor,
- Ctor_Complete);
- CallArgList CallArgs;
- // Push the this (Dest) ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Dest),
- BaseCopyCtor->getThisType(getContext())));
+ BaseClassDecl->getCopyConstructor(getContext(), 0))
+ EmitCopyCtorCall(*this, BaseCopyCtor, Ctor_Complete, Dest, 0, Src);
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- BaseCopyCtor->getParamDecl(0)->getType()));
- const FunctionProtoType *FPT
- = BaseCopyCtor->getType()->getAs<FunctionProtoType>();
- EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, BaseCopyCtor);
- }
EmitBlock(ContinueBlock);
// Emit the increment of the loop counter.
@@ -471,7 +469,7 @@ void CodeGenFunction::EmitClassAggrCopyAssignment(llvm::Value *Dest,
/// GetVTTParameter - Return the VTT parameter that should be passed to a
/// base constructor/destructor with virtual bases.
static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
- if (!CGVtableInfo::needsVTTParameter(GD)) {
+ if (!CodeGenVTables::needsVTTParameter(GD)) {
// This constructor/destructor does not need a VTT parameter.
return 0;
}
@@ -486,21 +484,21 @@ static llvm::Value *GetVTTParameter(CodeGenFunction &CGF, GlobalDecl GD) {
// If the record matches the base, this is the complete ctor/dtor
// variant calling the base variant in a class with virtual bases.
if (RD == Base) {
- assert(!CGVtableInfo::needsVTTParameter(CGF.CurGD) &&
+ assert(!CodeGenVTables::needsVTTParameter(CGF.CurGD) &&
"doing no-op VTT offset in base dtor/ctor?");
SubVTTIndex = 0;
} else {
- SubVTTIndex = CGF.CGM.getVtableInfo().getSubVTTIndex(RD, Base);
+ SubVTTIndex = CGF.CGM.getVTables().getSubVTTIndex(RD, Base);
assert(SubVTTIndex != 0 && "Sub-VTT index must be greater than zero!");
}
- if (CGVtableInfo::needsVTTParameter(CGF.CurGD)) {
+ if (CodeGenVTables::needsVTTParameter(CGF.CurGD)) {
// A VTT parameter was passed to the constructor, use it.
VTT = CGF.LoadCXXVTT();
VTT = CGF.Builder.CreateConstInBoundsGEP1_64(VTT, SubVTTIndex);
} else {
// We're the complete constructor, so get the VTT by name.
- VTT = CGF.CGM.getVtableInfo().getVTT(RD);
+ VTT = CGF.CGM.getVTables().getVTT(RD);
VTT = CGF.Builder.CreateConstInBoundsGEP2_64(VTT, 0, SubVTTIndex);
}
@@ -531,29 +529,13 @@ void CodeGenFunction::EmitClassMemberwiseCopy(
return;
}
- if (CXXConstructorDecl *BaseCopyCtor =
- BaseClassDecl->getCopyConstructor(getContext(), 0)) {
- llvm::Value *Callee = CGM.GetAddrOfCXXConstructor(BaseCopyCtor, CtorType);
- CallArgList CallArgs;
- // Push the this (Dest) ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Dest),
- BaseCopyCtor->getThisType(getContext())));
-
- // Push the VTT parameter, if necessary.
- if (llvm::Value *VTT =
- GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType))) {
- QualType T = getContext().getPointerType(getContext().VoidPtrTy);
- CallArgs.push_back(std::make_pair(RValue::get(VTT), T));
- }
+ CXXConstructorDecl *BaseCopyCtor =
+ BaseClassDecl->getCopyConstructor(getContext(), 0);
+ if (!BaseCopyCtor)
+ return;
- // Push the Src ptr.
- CallArgs.push_back(std::make_pair(RValue::get(Src),
- BaseCopyCtor->getParamDecl(0)->getType()));
- const FunctionProtoType *FPT =
- BaseCopyCtor->getType()->getAs<FunctionProtoType>();
- EmitCall(CGM.getTypes().getFunctionInfo(CallArgs, FPT),
- Callee, ReturnValueSlot(), CallArgs, BaseCopyCtor);
- }
+ llvm::Value *VTT = GetVTTParameter(*this, GlobalDecl(BaseCopyCtor, CtorType));
+ EmitCopyCtorCall(*this, BaseCopyCtor, CtorType, Dest, VTT, Src);
}
/// EmitClassCopyAssignment - This routine generates code to copy assign a class
@@ -690,7 +672,7 @@ CodeGenFunction::SynthesizeCXXCopyConstructor(const FunctionArgList &Args) {
}
}
- InitializeVtablePtrs(ClassDecl);
+ InitializeVTablePointers(ClassDecl);
}
/// SynthesizeCXXCopyAssignment - Implicitly define copy assignment operator.
@@ -1010,7 +992,7 @@ void CodeGenFunction::EmitCtorPrologue(const CXXConstructorDecl *CD,
MemberInitializers.push_back(Member);
}
- InitializeVtablePtrs(ClassDecl);
+ InitializeVTablePointers(ClassDecl);
for (unsigned I = 0, E = MemberInitializers.size(); I != E; ++I) {
assert(LiveTemporaries.empty() &&
@@ -1060,7 +1042,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// Otherwise, we're in the base variant, so we need to ensure the
// vtable ptrs are right before emitting the body.
} else {
- InitializeVtablePtrs(Dtor->getParent());
+ InitializeVTablePointers(Dtor->getParent());
}
// Emit the body of the statement.
@@ -1286,14 +1268,12 @@ CodeGenFunction::EmitCXXAggrConstructorCall(const CXXConstructorDecl *D,
// before the construction of the next array element, if any.
// Keep track of the current number of live temporaries.
- unsigned OldNumLiveTemporaries = LiveTemporaries.size();
+ {
+ CXXTemporariesCleanupScope Scope(*this);
- EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd);
+ EmitCXXConstructorCall(D, Ctor_Complete, Address, ArgBeg, ArgEnd);
+ }
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
-
EmitBlock(ContinueBlock);
// Emit the increment of the loop counter.
@@ -1399,7 +1379,7 @@ CodeGenFunction::GenerateCXXAggrDestructorHelper(const CXXDestructorDecl *D,
llvm::raw_svector_ostream(Name) << "__tcf_" << (++UniqueAggrDestructorCount);
QualType R = getContext().VoidTy;
const CGFunctionInfo &FI
- = CGM.getTypes().getFunctionInfo(R, Args, CC_Default, false);
+ = CGM.getTypes().getFunctionInfo(R, Args, FunctionType::ExtInfo());
const llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI, false);
llvm::Function *Fn =
llvm::Function::Create(FTy, llvm::GlobalValue::InternalLinkage,
@@ -1474,7 +1454,7 @@ CodeGenFunction::EmitDelegateCXXConstructorCall(const CXXConstructorDecl *Ctor,
QualType VoidPP = getContext().getPointerType(getContext().VoidPtrTy);
DelegateArgs.push_back(std::make_pair(RValue::get(VTT), VoidPP));
- if (CGVtableInfo::needsVTTParameter(CurGD)) {
+ if (CodeGenVTables::needsVTTParameter(CurGD)) {
assert(I != E && "cannot skip vtt parameter, already done with args");
assert(I->second == VoidPP && "skipping parameter not of vtt type");
++I;
@@ -1541,7 +1521,7 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
VTablePtr = Builder.CreateLoad(VTablePtr, "vtable");
int64_t VBaseOffsetOffset =
- CGM.getVtableInfo().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
+ CGM.getVTables().getVirtualBaseOffsetOffset(ClassDecl, BaseClassDecl);
llvm::Value *VBaseOffsetPtr =
Builder.CreateConstGEP1_64(VTablePtr, VBaseOffsetOffset, "vbase.offset.ptr");
@@ -1556,69 +1536,126 @@ CodeGenFunction::GetVirtualBaseClassOffset(llvm::Value *This,
return VBaseOffset;
}
-void CodeGenFunction::InitializeVtablePtrs(const CXXRecordDecl *ClassDecl) {
- if (!ClassDecl->isDynamicClass())
- return;
+void
+CodeGenFunction::InitializeVTablePointer(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // Compute the address point.
+ llvm::Value *VTableAddressPoint;
+
+ // Check if we need to use a vtable from the VTT.
+ if (CodeGenVTables::needsVTTParameter(CurGD) &&
+ (RD->getNumVBases() || BaseIsMorallyVirtual)) {
+ // Get the secondary vpointer index.
+ uint64_t VirtualPointerIndex =
+ CGM.getVTables().getSecondaryVirtualPointerIndex(VTableClass, Base);
+
+ /// Load the VTT.
+ llvm::Value *VTT = LoadCXXVTT();
+ if (VirtualPointerIndex)
+ VTT = Builder.CreateConstInBoundsGEP1_64(VTT, VirtualPointerIndex);
- llvm::Constant *Vtable = CGM.getVtableInfo().getVtable(ClassDecl);
- CGVtableInfo::AddrSubMap_t& AddressPoints =
- *(*CGM.getVtableInfo().AddressPoints[ClassDecl])[ClassDecl];
- llvm::Value *ThisPtr = LoadCXXThis();
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
+ // And load the address point from the VTT.
+ VTableAddressPoint = Builder.CreateLoad(VTT);
+ } else {
+ uint64_t AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ VTableAddressPoint =
+ Builder.CreateConstInBoundsGEP2_64(VTable, 0, AddressPoint);
+ }
- // Store address points for virtual bases
- for (CXXRecordDecl::base_class_const_iterator I =
- ClassDecl->vbases_begin(), E = ClassDecl->vbases_end(); I != E; ++I) {
- const CXXBaseSpecifier &Base = *I;
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
- uint64_t Offset = Layout.getVBaseClassOffset(BaseClassDecl);
- InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
- ThisPtr, Offset);
+ // Compute where to store the address point.
+ llvm::Value *VTableField;
+
+ if (CodeGenVTables::needsVTTParameter(CurGD) && BaseIsMorallyVirtual) {
+ // We need to use the virtual base offset offset because the virtual base
+ // might have a different offset in the most derived class.
+ VTableField = GetAddressOfBaseClass(LoadCXXThis(), VTableClass, RD,
+ /*NullCheckValue=*/false);
+ } else {
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ VTableField = Builder.CreateBitCast(LoadCXXThis(), Int8PtrTy);
+ VTableField =
+ Builder.CreateConstInBoundsGEP1_64(VTableField, Base.getBaseOffset() / 8);
}
- // Store address points for non-virtual bases and current class
- InitializeVtablePtrsRecursive(ClassDecl, Vtable, AddressPoints, ThisPtr, 0);
+ // Finally, store the address point.
+ const llvm::Type *AddressPointPtrTy =
+ VTableAddressPoint->getType()->getPointerTo();
+ VTableField = Builder.CreateBitCast(VTableField, AddressPointPtrTy);
+ Builder.CreateStore(VTableAddressPoint, VTableField);
}
-void CodeGenFunction::InitializeVtablePtrsRecursive(
- const CXXRecordDecl *ClassDecl,
- llvm::Constant *Vtable,
- CGVtableInfo::AddrSubMap_t& AddressPoints,
- llvm::Value *ThisPtr,
- uint64_t Offset) {
- if (!ClassDecl->isDynamicClass())
- return;
+void
+CodeGenFunction::InitializeVTablePointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases) {
+ // If this base is a non-virtual primary base the address point has already
+ // been set.
+ if (!BaseIsNonVirtualPrimaryBase) {
+ // Initialize the vtable pointer for this base.
+ InitializeVTablePointer(Base, BaseIsMorallyVirtual, VTable, VTableClass);
+ }
+
+ const CXXRecordDecl *RD = Base.getBase();
- // Store address points for non-virtual bases
- const ASTRecordLayout &Layout = getContext().getASTRecordLayout(ClassDecl);
- for (CXXRecordDecl::base_class_const_iterator I =
- ClassDecl->bases_begin(), E = ClassDecl->bases_end(); I != E; ++I) {
- const CXXBaseSpecifier &Base = *I;
- if (Base.isVirtual())
+ // Traverse bases.
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ CXXRecordDecl *BaseDecl
+ = cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Ignore classes without a vtable.
+ if (!BaseDecl->isDynamicClass())
continue;
- CXXRecordDecl *BaseClassDecl
- = cast<CXXRecordDecl>(Base.getType()->getAs<RecordType>()->getDecl());
- uint64_t NewOffset = Offset + Layout.getBaseClassOffset(BaseClassDecl);
- InitializeVtablePtrsRecursive(BaseClassDecl, Vtable, AddressPoints,
- ThisPtr, NewOffset);
+
+ uint64_t BaseOffset;
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase;
+
+ if (I->isVirtual()) {
+ // Check if we've visited this virtual base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ const ASTRecordLayout &Layout =
+ getContext().getASTRecordLayout(VTableClass);
+
+ BaseOffset = Layout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ BaseDeclIsNonVirtualPrimaryBase = false;
+ } else {
+ const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
+
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+ BaseDeclIsNonVirtualPrimaryBase = Layout.getPrimaryBase() == BaseDecl;
+ }
+
+ InitializeVTablePointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual,
+ BaseDeclIsNonVirtualPrimaryBase,
+ VTable, VTableClass, VBases);
}
+}
- // Compute the address point
- assert(AddressPoints.count(std::make_pair(ClassDecl, Offset)) &&
- "Missing address point for class");
- uint64_t AddressPoint = AddressPoints[std::make_pair(ClassDecl, Offset)];
- llvm::Value *VtableAddressPoint =
- Builder.CreateConstInBoundsGEP2_64(Vtable, 0, AddressPoint);
-
- // Compute the address to store the address point
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- llvm::Value *VtableField = Builder.CreateBitCast(ThisPtr, Int8PtrTy);
- VtableField = Builder.CreateConstInBoundsGEP1_64(VtableField, Offset/8);
- const llvm::Type *AddressPointPtrTy =
- VtableAddressPoint->getType()->getPointerTo();
- VtableField = Builder.CreateBitCast(VtableField, AddressPointPtrTy);
+void CodeGenFunction::InitializeVTablePointers(const CXXRecordDecl *RD) {
+ // Ignore classes without a vtable.
+ if (!RD->isDynamicClass())
+ return;
+
+ // Get the VTable.
+ llvm::Constant *VTable = CGM.getVTables().GetAddrOfVTable(RD);
- // Store address point
- Builder.CreateStore(VtableAddressPoint, VtableField);
+ // Initialize the vtable pointers for this class and all of its bases.
+ VisitedVirtualBasesSetTy VBases;
+ InitializeVTablePointers(BaseSubobject(RD, 0),
+ /*BaseIsMorallyVirtual=*/false,
+ /*BaseIsNonVirtualPrimaryBase=*/false,
+ VTable, RD, VBases);
}
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index ad97d08a2bf7..58acd3c2e172 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -88,17 +88,35 @@ llvm::StringRef CGDebugInfo::getFunctionName(const FunctionDecl *FD) {
/// getOrCreateFile - Get the file debug info descriptor for the input location.
llvm::DIFile CGDebugInfo::getOrCreateFile(SourceLocation Loc) {
- if (!Loc.isValid())
+ if (!Loc.isValid())
// If Location is not valid then use main input file.
return DebugFactory.CreateFile(TheCU.getFilename(), TheCU.getDirectory(),
TheCU);
SourceManager &SM = CGM.getContext().getSourceManager();
PresumedLoc PLoc = SM.getPresumedLoc(Loc);
+
+ // Cache the results.
+ const char *fname = PLoc.getFilename();
+ llvm::DenseMap<const char *, llvm::WeakVH>::iterator it =
+ DIFileCache.find(fname);
+
+ if (it != DIFileCache.end()) {
+ // Verify that the information still exists.
+ if (&*it->second)
+ return llvm::DIFile(cast<llvm::MDNode>(it->second));
+ }
+
+ // FIXME: We shouldn't even need to call 'makeAbsolute()' in the cases
+ // where we can consult the FileEntry.
llvm::sys::Path AbsFileName(PLoc.getFilename());
AbsFileName.makeAbsolute();
- return DebugFactory.CreateFile(AbsFileName.getLast(),
- AbsFileName.getDirname(), TheCU);
+ llvm::DIFile F = DebugFactory.CreateFile(AbsFileName.getLast(),
+ AbsFileName.getDirname(), TheCU);
+
+ DIFileCache[fname] = F.getNode();
+ return F;
+
}
/// CreateCompileUnit - Create new compile unit.
void CGDebugInfo::CreateCompileUnit() {
@@ -112,6 +130,10 @@ void CGDebugInfo::CreateCompileUnit() {
llvm::sys::Path AbsFileName(MainFileName);
AbsFileName.makeAbsolute();
+ // The main file name provided via the "-main-file-name" option contains just
+ // the file name itself with no path information. This file name may have had
+ // a relative path, so we look into the actual file entry for the main
+ // file to determine the real absolute path for the file.
std::string MainFileDir;
if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID()))
MainFileDir = MainFile->getDir()->getName();
@@ -604,7 +626,7 @@ CGDebugInfo::CreateCXXMemberFunction(const CXXMethodDecl *Method,
// It doesn't make sense to give a virtual destructor a vtable index,
// since a single destructor has two entries in the vtable.
if (!isa<CXXDestructorDecl>(Method))
- VIndex = CGM.getVtableInfo().getMethodVtableIndex(Method);
+ VIndex = CGM.getVTables().getMethodVtableIndex(Method);
ContainingType = RecordTy;
}
@@ -662,7 +684,7 @@ CollectCXXBases(const CXXRecordDecl *RD, llvm::DIFile Unit,
if (BI->isVirtual()) {
// virtual base offset offset is -ve. The code generator emits dwarf
// expression where it expects +ve number.
- BaseOffset = 0 - CGM.getVtableInfo().getVirtualBaseOffsetOffset(RD, Base);
+ BaseOffset = 0 - CGM.getVTables().getVirtualBaseOffsetOffset(RD, Base);
BFlags = llvm::DIType::FlagVirtual;
} else
BaseOffset = RL.getBaseClassOffset(Base);
@@ -692,10 +714,8 @@ llvm::DIType CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile Unit) {
ASTContext &Context = CGM.getContext();
/* Function type */
- llvm::SmallVector<llvm::DIDescriptor, 16> STys;
- STys.push_back(getOrCreateType(Context.IntTy, Unit));
- llvm::DIArray SElements =
- DebugFactory.GetOrCreateArray(STys.data(), STys.size());
+ llvm::DIDescriptor STy = getOrCreateType(Context.IntTy, Unit);
+ llvm::DIArray SElements = DebugFactory.GetOrCreateArray(&STy, 1);
llvm::DIType SubTy =
DebugFactory.CreateCompositeType(llvm::dwarf::DW_TAG_subroutine_type,
Unit, "", Unit,
@@ -1048,11 +1068,9 @@ llvm::DIType CGDebugInfo::CreateType(const VectorType *Ty,
uint64_t NumElems = Ty->getNumElements();
if (NumElems > 0)
--NumElems;
- llvm::SmallVector<llvm::DIDescriptor, 8> Subscripts;
- Subscripts.push_back(DebugFactory.GetOrCreateSubrange(0, NumElems));
- llvm::DIArray SubscriptArray =
- DebugFactory.GetOrCreateArray(Subscripts.data(), Subscripts.size());
+ llvm::DIDescriptor Subscript = DebugFactory.GetOrCreateSubrange(0, NumElems);
+ llvm::DIArray SubscriptArray = DebugFactory.GetOrCreateArray(&Subscript, 1);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
uint64_t Align = CGM.getContext().getTypeAlign(Ty);
@@ -1208,7 +1226,7 @@ llvm::DIType CGDebugInfo::getOrCreateType(QualType Ty,
Ty = UnwrapTypeForDebugInfo(Ty);
// Check for existing entry.
- std::map<void *, llvm::WeakVH>::iterator it =
+ llvm::DenseMap<void *, llvm::WeakVH>::iterator it =
TypeCache.find(Ty.getAsOpaquePtr());
if (it != TypeCache.end()) {
// Verify that the debug info still exists.
@@ -1371,13 +1389,10 @@ void CGDebugInfo::EmitStopPoint(llvm::Function *Fn, CGBuilderTy &Builder) {
llvm::DIFile Unit = getOrCreateFile(CurLoc);
PresumedLoc PLoc = SM.getPresumedLoc(CurLoc);
- llvm::DIDescriptor DR(RegionStack.back());
- llvm::DIScope DS = llvm::DIScope(DR.getNode());
- llvm::DILocation DO(NULL);
- llvm::DILocation DL =
- DebugFactory.CreateLocation(PLoc.getLine(), PLoc.getColumn(),
- DS, DO);
- Builder.SetCurrentDebugLocation(DL.getNode());
+ llvm::MDNode *Scope = RegionStack.back();
+ Builder.SetCurrentDebugLocation(llvm::NewDebugLoc::get(PLoc.getLine(),
+ PLoc.getColumn(),
+ Scope));
}
/// EmitRegionStart- Constructs the debug code for entering a declarative
@@ -1580,11 +1595,8 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, unsigned Tag,
llvm::Instruction *Call =
DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
- llvm::DIScope DS(RegionStack.back());
- llvm::DILocation DO(NULL);
- llvm::DILocation DL = DebugFactory.CreateLocation(Line, Column, DS, DO);
-
- Call->setMetadata("dbg", DL.getNode());
+ llvm::MDNode *Scope = RegionStack.back();
+ Call->setDebugLoc(llvm::NewDebugLoc::get(Line, Column, Scope));
}
/// EmitDeclare - Emit local variable declaration debug info.
@@ -1646,13 +1658,9 @@ void CGDebugInfo::EmitDeclare(const BlockDeclRefExpr *BDRE, unsigned Tag,
// Insert an llvm.dbg.declare into the current block.
llvm::Instruction *Call =
DebugFactory.InsertDeclare(Storage, D, Builder.GetInsertBlock());
-
- llvm::DIScope DS(RegionStack.back());
- llvm::DILocation DO(NULL);
- llvm::DILocation DL =
- DebugFactory.CreateLocation(Line, PLoc.getColumn(), DS, DO);
- Call->setMetadata("dbg", DL.getNode());
+ llvm::MDNode *Scope = RegionStack.back();
+ Call->setDebugLoc(llvm::NewDebugLoc::get(Line, PLoc.getColumn(), Scope));
}
void CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD,
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index 47a462048342..8397245e3184 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -21,7 +21,6 @@
#include "llvm/Analysis/DebugInfo.h"
#include "llvm/Support/ValueHandle.h"
#include "llvm/Support/Allocator.h"
-#include <map>
#include "CGBuilder.h"
@@ -52,8 +51,7 @@ class CGDebugInfo {
unsigned FwdDeclCount;
/// TypeCache - Cache of previously constructed Types.
- // FIXME: Eliminate this map. Be careful of iterator invalidation.
- std::map<void *, llvm::WeakVH> TypeCache;
+ llvm::DenseMap<void *, llvm::WeakVH> TypeCache;
bool BlockLiteralGenericSet;
llvm::DIType BlockLiteralGeneric;
@@ -65,6 +63,7 @@ class CGDebugInfo {
/// constructed on demand. For example, C++ destructors, C++ operators etc..
llvm::BumpPtrAllocator DebugInfoNames;
+ llvm::DenseMap<const char *, llvm::WeakVH> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::WeakVH> SPCache;
llvm::DenseMap<const NamespaceDecl *, llvm::WeakVH> NameSpaceCache;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index dc9ecd64f4e4..87ec159a6010 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -14,6 +14,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGCall.h"
+#include "CGRecordLayout.h"
#include "CGObjCRuntime.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
@@ -1468,7 +1469,9 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
LValue CodeGenFunction::EmitLValueForBitfield(llvm::Value* BaseValue,
const FieldDecl* Field,
unsigned CVRQualifiers) {
- CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ const CGRecordLayout::BitFieldInfo &Info = RL.getBitFieldInfo(Field);
// FIXME: CodeGenTypes should expose a method to get the appropriate type for
// FieldTy (the appropriate type is ABI-dependent).
@@ -1496,7 +1499,9 @@ LValue CodeGenFunction::EmitLValueForField(llvm::Value* BaseValue,
if (Field->isBitField())
return EmitLValueForBitfield(BaseValue, Field, CVRQualifiers);
- unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
// Match union field type.
@@ -1531,7 +1536,9 @@ CodeGenFunction::EmitLValueForFieldInitialization(llvm::Value* BaseValue,
if (!FieldType->isReferenceType())
return EmitLValueForField(BaseValue, Field, CVRQualifiers);
- unsigned idx = CGM.getTypes().getLLVMFieldNo(Field);
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
+ unsigned idx = RL.getLLVMFieldNo(Field);
llvm::Value *V = Builder.CreateStructGEP(BaseValue, idx, "tmp");
assert(!FieldType.getObjCGCAttr() && "fields cannot have GC attrs");
@@ -1637,6 +1644,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CastExpr::CK_AnyPointerToObjCPointerCast:
return EmitLValue(E->getSubExpr());
+ case CastExpr::CK_UncheckedDerivedToBase:
case CastExpr::CK_DerivedToBase: {
const RecordType *DerivedClassTy =
E->getSubExpr()->getType()->getAs<RecordType>();
@@ -1872,7 +1880,6 @@ LValue CodeGenFunction::EmitObjCSuperExprLValue(const ObjCSuperExpr *E) {
LValue CodeGenFunction::EmitStmtExprLValue(const StmtExpr *E) {
// Can only get l-value for message expression returning aggregate type
RValue RV = EmitAnyExprToTemp(E);
- // FIXME: can this be volatile?
return LValue::MakeAddr(RV.getAggregateAddr(), MakeQualifiers(E->getType()));
}
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 4847ca3f8248..e2e2cd050419 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -333,8 +333,7 @@ void AggExprEmitter::VisitUnaryAddrOf(const UnaryOperator *E) {
llvm::Value *FuncPtr;
if (MD->isVirtual()) {
- int64_t Index =
- CGF.CGM.getVtableInfo().getMethodVtableIndex(MD);
+ int64_t Index = CGF.CGM.getVTables().getMethodVtableIndex(MD);
// Itanium C++ ABI 2.3:
// For a non-virtual function, this field is a simple function pointer.
@@ -500,10 +499,6 @@ AggExprEmitter::VisitCXXConstructExpr(const CXXConstructExpr *E) {
void AggExprEmitter::VisitCXXExprWithTemporaries(CXXExprWithTemporaries *E) {
llvm::Value *Val = DestPtr;
- if (!Val) {
- // Create a temporary variable.
- Val = CGF.CreateMemTemp(E->getType(), "tmp");
- }
CGF.EmitCXXExprWithTemporaries(E, Val, VolatileDest, IsInitializer);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 032862160464..d9585c9c6d20 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -44,9 +44,8 @@ RValue CodeGenFunction::EmitCXXMemberCall(const CXXMethodDecl *MD,
QualType ResultType = FPT->getResultType();
return EmitCall(CGM.getTypes().getFunctionInfo(ResultType, Args,
- FPT->getCallConv(),
- FPT->getNoReturnAttr()), Callee,
- ReturnValue, Args, MD);
+ FPT->getExtInfo()),
+ Callee, ReturnValue, Args, MD);
}
/// canDevirtualizeMemberFunctionCalls - Checks whether virtual calls on given
@@ -411,7 +410,8 @@ static CharUnits CalculateCookiePadding(ASTContext &Ctx, const CXXNewExpr *E) {
return CalculateCookiePadding(Ctx, E->getAllocatedType());
}
-static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
+static llvm::Value *EmitCXXNewAllocSize(ASTContext &Context,
+ CodeGenFunction &CGF,
const CXXNewExpr *E,
llvm::Value *& NumElements) {
QualType Type = E->getAllocatedType();
@@ -432,6 +432,15 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
NumElements =
llvm::ConstantInt::get(SizeTy, Result.Val.getInt().getZExtValue());
+ while (const ArrayType *AType = Context.getAsArrayType(Type)) {
+ const llvm::ArrayType *llvmAType =
+ cast<llvm::ArrayType>(CGF.ConvertType(Type));
+ NumElements =
+ CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(
+ SizeTy, llvmAType->getNumElements()));
+ Type = AType->getElementType();
+ }
return llvm::ConstantInt::get(SizeTy, AllocSize.getQuantity());
}
@@ -444,6 +453,16 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
CGF.Builder.CreateMul(NumElements,
llvm::ConstantInt::get(SizeTy,
TypeSize.getQuantity()));
+
+ while (const ArrayType *AType = Context.getAsArrayType(Type)) {
+ const llvm::ArrayType *llvmAType =
+ cast<llvm::ArrayType>(CGF.ConvertType(Type));
+ NumElements =
+ CGF.Builder.CreateMul(NumElements,
+ llvm::ConstantInt::get(
+ SizeTy, llvmAType->getNumElements()));
+ Type = AType->getElementType();
+ }
// And add the cookie padding if necessary.
if (!CookiePadding.isZero())
@@ -504,7 +523,8 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
QualType SizeTy = getContext().getSizeType();
llvm::Value *NumElements = 0;
- llvm::Value *AllocSize = EmitCXXNewAllocSize(*this, E, NumElements);
+ llvm::Value *AllocSize = EmitCXXNewAllocSize(getContext(),
+ *this, E, NumElements);
NewArgs.push_back(std::make_pair(RValue::get(AllocSize), SizeTy));
@@ -590,10 +610,20 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
CookiePadding.getQuantity());
}
- NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
-
- EmitNewInitializer(*this, E, NewPtr, NumElements);
-
+ if (AllocType->isArrayType()) {
+ while (const ArrayType *AType = getContext().getAsArrayType(AllocType))
+ AllocType = AType->getElementType();
+ NewPtr =
+ Builder.CreateBitCast(NewPtr,
+ ConvertType(getContext().getPointerType(AllocType)));
+ EmitNewInitializer(*this, E, NewPtr, NumElements);
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+ }
+ else {
+ NewPtr = Builder.CreateBitCast(NewPtr, ConvertType(E->getType()));
+ EmitNewInitializer(*this, E, NewPtr, NumElements);
+ }
+
if (NullCheckResult) {
Builder.CreateBr(NewEnd);
NewNotNull = Builder.GetInsertBlock();
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 591534042f51..0a0c9149b41b 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -522,13 +522,21 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
// scalar.
OpInfo.Ty = E->getComputationResultType();
OpInfo.RHS = EmitCast(E->getRHS(), OpInfo.Ty);
-
+
LValue LHSLV = CGF.EmitLValue(E->getLHS());
-
-
// We know the LHS is a complex lvalue.
- OpInfo.LHS=EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
- OpInfo.LHS=EmitComplexToComplexCast(OpInfo.LHS, LHSTy, OpInfo.Ty);
+ ComplexPairTy LHSComplexPair;
+ if (LHSLV.isPropertyRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
+ else if (LHSLV.isKVCRef())
+ LHSComplexPair =
+ CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
+ else
+ LHSComplexPair = EmitLoadOfComplex(LHSLV.getAddress(),
+ LHSLV.isVolatileQualified());
+
+ OpInfo.LHS=EmitComplexToComplexCast(LHSComplexPair, LHSTy, OpInfo.Ty);
// Expand the binary operator.
ComplexPairTy Result = (this->*Func)(OpInfo);
@@ -537,12 +545,22 @@ EmitCompoundAssign(const CompoundAssignOperator *E,
Result = EmitComplexToComplexCast(Result, OpInfo.Ty, LHSTy);
// Store the result value into the LHS lvalue.
- EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
+ if (LHSLV.isPropertyRef())
+ CGF.EmitObjCPropertySet(LHSLV.getPropertyRefExpr(),
+ RValue::getComplex(Result));
+ else if (LHSLV.isKVCRef())
+ CGF.EmitObjCPropertySet(LHSLV.getKVCRefExpr(), RValue::getComplex(Result));
+ else
+ EmitStoreOfComplex(Result, LHSLV.getAddress(), LHSLV.isVolatileQualified());
// And now return the LHS
IgnoreReal = ignreal;
IgnoreImag = ignimag;
IgnoreRealAssign = ignreal;
IgnoreImagAssign = ignimag;
+ if (LHSLV.isPropertyRef())
+ return CGF.EmitObjCPropertyGet(LHSLV.getPropertyRefExpr()).getComplexVal();
+ else if (LHSLV.isKVCRef())
+ return CGF.EmitObjCPropertyGet(LHSLV.getKVCRefExpr()).getComplexVal();
return EmitLoadOfComplex(LHSLV.getAddress(), LHSLV.isVolatileQualified());
}
diff --git a/lib/CodeGen/CGExprConstant.cpp b/lib/CodeGen/CGExprConstant.cpp
index f0d82a8f0dc6..172a77d78e5f 100644
--- a/lib/CodeGen/CGExprConstant.cpp
+++ b/lib/CodeGen/CGExprConstant.cpp
@@ -14,6 +14,7 @@
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
#include "clang/AST/APValue.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/RecordLayout.h"
@@ -417,7 +418,7 @@ public:
// Get the function pointer (or index if this is a virtual function).
if (MD->isVirtual()) {
- uint64_t Index = CGM.getVtableInfo().getMethodVtableIndex(MD);
+ uint64_t Index = CGM.getVTables().getMethodVtableIndex(MD);
// Itanium C++ ABI 2.3:
// For a non-virtual function, this field is a simple function pointer.
@@ -1011,7 +1012,9 @@ llvm::Constant *CodeGenModule::EmitNullConstant(QualType T) {
E = RD->field_end(); I != E; ++I) {
const FieldDecl *FD = *I;
- unsigned FieldNo = getTypes().getLLVMFieldNo(FD);
+ const CGRecordLayout &RL =
+ getTypes().getCGRecordLayout(FD->getParent());
+ unsigned FieldNo = RL.getLLVMFieldNo(FD);
Elements[FieldNo] = EmitNullConstant(FD->getType());
}
@@ -1047,7 +1050,9 @@ CodeGenModule::EmitPointerToDataMember(const FieldDecl *FD) {
const llvm::StructType *ClassLTy =
cast<llvm::StructType>(getTypes().ConvertType(ClassType));
- unsigned FieldNo = getTypes().getLLVMFieldNo(FD);
+ const CGRecordLayout &RL =
+ getTypes().getCGRecordLayout(FD->getParent());
+ unsigned FieldNo = RL.getLLVMFieldNo(FD);
uint64_t Offset =
getTargetData().getStructLayout(ClassLTy)->getElementOffset(FieldNo);
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 7e26971414b4..42bf68ed52ce 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -769,6 +769,9 @@ Value *ScalarExprEmitter::VisitInitListExpr(InitListExpr *E) {
static bool ShouldNullCheckClassCastValue(const CastExpr *CE) {
const Expr *E = CE->getSubExpr();
+
+ if (CE->getCastKind() == CastExpr::CK_UncheckedDerivedToBase)
+ return false;
if (isa<CXXThisExpr>(E)) {
// We always assume that 'this' is never null.
@@ -826,6 +829,7 @@ Value *ScalarExprEmitter::EmitCastExpr(CastExpr *CE) {
return CGF.GetAddressOfDerivedClass(Src, BaseClassDecl, DerivedClassDecl,
NullCheckValue);
}
+ case CastExpr::CK_UncheckedDerivedToBase:
case CastExpr::CK_DerivedToBase: {
const RecordType *DerivedClassTy =
E->getType()->getAs<PointerType>()->getPointeeType()->getAs<RecordType>();
@@ -1337,6 +1341,11 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &Ops) {
if (Ops.LHS->getType()->isFPOrFPVectorTy())
return Builder.CreateFSub(Ops.LHS, Ops.RHS, "sub");
+
+ // Signed integer overflow is undefined behavior.
+ if (Ops.Ty->isSignedIntegerType())
+ return Builder.CreateNSWSub(Ops.LHS, Ops.RHS, "sub");
+
return Builder.CreateSub(Ops.LHS, Ops.RHS, "sub");
}
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 3ff77f0170c1..9eaf57c445ed 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -191,7 +191,7 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
// FIXME: We shouldn't need to get the function info here, the
// runtime already should have computed it to build the function.
RValue RV = EmitCall(Types.getFunctionInfo(PD->getType(), Args,
- CC_Default, false),
+ FunctionType::ExtInfo()),
GetPropertyFn, ReturnValueSlot(), Args);
// We need to fix the type here. Ivars with copy & retain are
// always objects so we don't need to worry about complex or
@@ -201,7 +201,12 @@ void CodeGenFunction::GenerateObjCGetter(ObjCImplementationDecl *IMP,
EmitReturnOfRValue(RV, PD->getType());
} else {
LValue LV = EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), Ivar, 0);
- if (hasAggregateLLVMType(Ivar->getType())) {
+ if (Ivar->getType()->isAnyComplexType()) {
+ ComplexPairTy Pair = LoadComplexFromAddr(LV.getAddress(),
+ LV.isVolatileQualified());
+ StoreComplexToAddr(Pair, ReturnValue, LV.isVolatileQualified());
+ }
+ else if (hasAggregateLLVMType(Ivar->getType())) {
EmitAggregateCopy(ReturnValue, LV.getAddress(), Ivar->getType());
} else {
CodeGenTypes &Types = CGM.getTypes();
@@ -280,7 +285,8 @@ void CodeGenFunction::GenerateObjCSetter(ObjCImplementationDecl *IMP,
// FIXME: We shouldn't need to get the function info here, the runtime
// already should have computed it to build the function.
EmitCall(Types.getFunctionInfo(getContext().VoidTy, Args,
- CC_Default, false), SetPropertyFn,
+ FunctionType::ExtInfo()),
+ SetPropertyFn,
ReturnValueSlot(), Args);
} else {
// FIXME: Find a clean way to avoid AST node creation.
@@ -459,12 +465,13 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
static const unsigned NumItems = 16;
// Get selector
- llvm::SmallVector<IdentifierInfo*, 3> II;
- II.push_back(&CGM.getContext().Idents.get("countByEnumeratingWithState"));
- II.push_back(&CGM.getContext().Idents.get("objects"));
- II.push_back(&CGM.getContext().Idents.get("count"));
- Selector FastEnumSel = CGM.getContext().Selectors.getSelector(II.size(),
- &II[0]);
+ IdentifierInfo *II[] = {
+ &CGM.getContext().Idents.get("countByEnumeratingWithState"),
+ &CGM.getContext().Idents.get("objects"),
+ &CGM.getContext().Idents.get("count")
+ };
+ Selector FastEnumSel =
+ CGM.getContext().Selectors.getSelector(llvm::array_lengthof(II), &II[0]);
QualType ItemsTy =
getContext().getConstantArrayType(getContext().getObjCIdType(),
@@ -555,7 +562,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// FIXME: We shouldn't need to get the function info here, the runtime already
// should have computed it to build the function.
EmitCall(CGM.getTypes().getFunctionInfo(getContext().VoidTy, Args2,
- CC_Default, false),
+ FunctionType::ExtInfo()),
EnumerationMutationFn, ReturnValueSlot(), Args2);
EmitBlock(WasNotMutated);
diff --git a/lib/CodeGen/CGObjCGNU.cpp b/lib/CodeGen/CGObjCGNU.cpp
index 119819b810be..d4452000dc04 100644
--- a/lib/CodeGen/CGObjCGNU.cpp
+++ b/lib/CodeGen/CGObjCGNU.cpp
@@ -465,7 +465,7 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGen::CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- CC_Default, false);
+ FunctionType::ExtInfo());
const llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
@@ -573,7 +573,7 @@ CGObjCGNU::GenerateMessageSend(CodeGen::CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- CC_Default, false);
+ FunctionType::ExtInfo());
const llvm::FunctionType *impType =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
@@ -1694,7 +1694,7 @@ llvm::Constant *CGObjCGNU::EnumerationMutationFunction() {
Params.push_back(ASTIdTy);
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- CC_Default, false), false);
+ FunctionType::ExtInfo()), false);
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
diff --git a/lib/CodeGen/CGObjCMac.cpp b/lib/CodeGen/CGObjCMac.cpp
index 475280b6a01e..883ed98511e7 100644
--- a/lib/CodeGen/CGObjCMac.cpp
+++ b/lib/CodeGen/CGObjCMac.cpp
@@ -13,6 +13,7 @@
#include "CGObjCRuntime.h"
+#include "CGRecordLayout.h"
#include "CodeGenModule.h"
#include "CodeGenFunction.h"
#include "clang/AST/ASTContext.h"
@@ -306,7 +307,8 @@ public:
Params.push_back(Ctx.BoolTy);
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(IdType, Params,
- CC_Default, false), false);
+ FunctionType::ExtInfo()),
+ false);
return CGM.CreateRuntimeFunction(FTy, "objc_getProperty");
}
@@ -325,7 +327,8 @@ public:
Params.push_back(Ctx.BoolTy);
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- CC_Default, false), false);
+ FunctionType::ExtInfo()),
+ false);
return CGM.CreateRuntimeFunction(FTy, "objc_setProperty");
}
@@ -337,7 +340,8 @@ public:
Params.push_back(Ctx.getCanonicalParamType(Ctx.getObjCIdType()));
const llvm::FunctionType *FTy =
Types.GetFunctionType(Types.getFunctionInfo(Ctx.VoidTy, Params,
- CC_Default, false), false);
+ FunctionType::ExtInfo()),
+ false);
return CGM.CreateRuntimeFunction(FTy, "objc_enumerationMutation");
}
@@ -1559,7 +1563,7 @@ CGObjCCommonMac::EmitLegacyMessageSend(CodeGen::CodeGenFunction &CGF,
CodeGenTypes &Types = CGM.getTypes();
const CGFunctionInfo &FnInfo = Types.getFunctionInfo(ResultType, ActualArgs,
- CC_Default, false);
+ FunctionType::ExtInfo());
const llvm::FunctionType *FTy =
Types.GetFunctionType(FnInfo, Method ? Method->isVariadic() : false);
@@ -3131,8 +3135,10 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
FieldDecl *Field = RecFields[i];
uint64_t FieldOffset;
if (RD) {
+ const CGRecordLayout &RL =
+ CGM.getTypes().getCGRecordLayout(Field->getParent());
if (Field->isBitField()) {
- CodeGenTypes::BitFieldInfo Info = CGM.getTypes().getBitFieldInfo(Field);
+ const CGRecordLayout::BitFieldInfo &Info = RL.getBitFieldInfo(Field);
const llvm::Type *Ty =
CGM.getTypes().ConvertTypeForMemRecursive(Field->getType());
@@ -3141,7 +3147,7 @@ void CGObjCCommonMac::BuildAggrIvarLayout(const ObjCImplementationDecl *OI,
FieldOffset = Info.FieldNo * TypeSize;
} else
FieldOffset =
- Layout->getElementOffset(CGM.getTypes().getLLVMFieldNo(Field));
+ Layout->getElementOffset(RL.getLLVMFieldNo(Field));
} else
FieldOffset = ComputeIvarBaseOffset(CGM, OI, cast<ObjCIvarDecl>(Field));
@@ -5094,7 +5100,8 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
// FIXME. This is too much work to get the ABI-specific result type needed to
// find the message name.
const CGFunctionInfo &FnInfo
- = Types.getFunctionInfo(ResultType, CallArgList(), CC_Default, false);
+ = Types.getFunctionInfo(ResultType, CallArgList(),
+ FunctionType::ExtInfo());
llvm::Constant *Fn = 0;
std::string Name("\01l_");
if (CGM.ReturnTypeUsesSret(FnInfo)) {
@@ -5169,7 +5176,7 @@ CodeGen::RValue CGObjCNonFragileABIMac::EmitMessageSend(
ObjCTypes.MessageRefCPtrTy));
ActualArgs.insert(ActualArgs.end(), CallArgs.begin(), CallArgs.end());
const CGFunctionInfo &FnInfo1 = Types.getFunctionInfo(ResultType, ActualArgs,
- CC_Default, false);
+ FunctionType::ExtInfo());
llvm::Value *Callee = CGF.Builder.CreateStructGEP(Arg1, 0);
Callee = CGF.Builder.CreateLoad(Callee);
const llvm::FunctionType *FTy = Types.GetFunctionType(FnInfo1, true);
diff --git a/lib/CodeGen/CGObjCRuntime.h b/lib/CodeGen/CGObjCRuntime.h
index ff5d40bfbc87..b781940ffad5 100644
--- a/lib/CodeGen/CGObjCRuntime.h
+++ b/lib/CodeGen/CGObjCRuntime.h
@@ -16,7 +16,6 @@
#ifndef CLANG_CODEGEN_OBCJRUNTIME_H
#define CLANG_CODEGEN_OBCJRUNTIME_H
#include "clang/Basic/IdentifierTable.h" // Selector
-#include "llvm/ADT/SmallVector.h"
#include "clang/AST/DeclObjC.h"
#include <string>
diff --git a/lib/CodeGen/CGRTTI.cpp b/lib/CodeGen/CGRTTI.cpp
index 4907223fe346..1caec97fc367 100644
--- a/lib/CodeGen/CGRTTI.cpp
+++ b/lib/CodeGen/CGRTTI.cpp
@@ -148,7 +148,7 @@ public:
};
/// BuildTypeInfo - Build the RTTI type info struct for the given type.
- llvm::Constant *BuildTypeInfo(QualType Ty);
+ llvm::Constant *BuildTypeInfo(QualType Ty, bool Force = false);
};
}
@@ -327,83 +327,20 @@ static llvm::GlobalVariable::LinkageTypes getTypeInfoLinkage(QualType Ty) {
if (ContainsIncompleteClassType(Ty))
return llvm::GlobalValue::InternalLinkage;
- switch (Ty->getTypeClass()) {
- default:
- // FIXME: We need to add code to handle all types.
- assert(false && "Unhandled type!");
- break;
-
- case Type::Pointer: {
- const PointerType *PointerTy = cast<PointerType>(Ty);
-
- // If the pointee type has internal linkage, then the pointer type needs to
- // have it as well.
- if (getTypeInfoLinkage(PointerTy->getPointeeType()) ==
- llvm::GlobalVariable::InternalLinkage)
- return llvm::GlobalVariable::InternalLinkage;
-
- return llvm::GlobalVariable::WeakODRLinkage;
- }
-
- case Type::Enum: {
- const EnumType *EnumTy = cast<EnumType>(Ty);
- const EnumDecl *ED = EnumTy->getDecl();
-
- // If we're in an anonymous namespace, then we always want internal linkage.
- if (ED->isInAnonymousNamespace() || !ED->hasLinkage())
- return llvm::GlobalVariable::InternalLinkage;
-
- return llvm::GlobalValue::WeakODRLinkage;
- }
-
- case Type::Record: {
- const RecordType *RecordTy = cast<RecordType>(Ty);
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(RecordTy->getDecl());
-
- // If we're in an anonymous namespace, then we always want internal linkage.
- if (RD->isInAnonymousNamespace() || !RD->hasLinkage())
- return llvm::GlobalVariable::InternalLinkage;
-
- // If this class does not have a vtable, we want weak linkage.
- if (!RD->isDynamicClass())
- return llvm::GlobalValue::WeakODRLinkage;
-
- return CodeGenModule::getVtableLinkage(RD);
- }
-
- case Type::Vector:
- case Type::ExtVector:
- case Type::Builtin:
- return llvm::GlobalValue::WeakODRLinkage;
-
- case Type::FunctionProto: {
- const FunctionProtoType *FPT = cast<FunctionProtoType>(Ty);
+ switch (Ty->getLinkage()) {
+ case NoLinkage:
+ case InternalLinkage:
+ case UniqueExternalLinkage:
+ return llvm::GlobalValue::InternalLinkage;
- // Check the return type.
- if (getTypeInfoLinkage(FPT->getResultType()) ==
- llvm::GlobalValue::InternalLinkage)
- return llvm::GlobalValue::InternalLinkage;
-
- // Check the parameter types.
- for (unsigned i = 0; i != FPT->getNumArgs(); ++i) {
- if (getTypeInfoLinkage(FPT->getArgType(i)) ==
- llvm::GlobalValue::InternalLinkage)
- return llvm::GlobalValue::InternalLinkage;
+ case ExternalLinkage:
+ if (const RecordType *Record = dyn_cast<RecordType>(Ty)) {
+ const CXXRecordDecl *RD = cast<CXXRecordDecl>(Record->getDecl());
+ if (RD->isDynamicClass())
+ return CodeGenModule::getVtableLinkage(RD);
}
-
- return llvm::GlobalValue::WeakODRLinkage;
- }
-
- case Type::ConstantArray:
- case Type::IncompleteArray: {
- const ArrayType *AT = cast<ArrayType>(Ty);
-
- // Check the element type.
- if (getTypeInfoLinkage(AT->getElementType()) ==
- llvm::GlobalValue::InternalLinkage)
- return llvm::GlobalValue::InternalLinkage;
- }
+ return llvm::GlobalValue::WeakODRLinkage;
}
return llvm::GlobalValue::WeakODRLinkage;
@@ -444,6 +381,7 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
switch (Ty->getTypeClass()) {
default: assert(0 && "Unhandled type!");
+ case Type::Builtin:
// GCC treats vector types as fundamental types.
case Type::Vector:
case Type::ExtVector:
@@ -511,7 +449,7 @@ void RTTIBuilder::BuildVtablePointer(const Type *Ty) {
Fields.push_back(Vtable);
}
-llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) {
+llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty, bool Force) {
// We want to operate on the canonical type.
Ty = CGM.getContext().getCanonicalType(Ty);
@@ -525,7 +463,7 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) {
return llvm::ConstantExpr::getBitCast(OldGV, Int8PtrTy);
// Check if there is already an external RTTI descriptor for this type.
- if (ShouldUseExternalRTTIDescriptor(Ty))
+ if (!Force && ShouldUseExternalRTTIDescriptor(Ty))
return GetAddrOfExternalRTTIDescriptor(Ty);
llvm::GlobalVariable::LinkageTypes Linkage = getTypeInfoLinkage(Ty);
@@ -538,11 +476,9 @@ llvm::Constant *RTTIBuilder::BuildTypeInfo(QualType Ty) {
switch (Ty->getTypeClass()) {
default: assert(false && "Unhandled type class!");
- case Type::Builtin:
- assert(false && "Builtin type info must be in the standard library!");
- break;
// GCC treats vector types as fundamental types.
+ case Type::Builtin:
case Type::Vector:
case Type::ExtVector:
// Itanium C++ ABI 2.9.5p4:
@@ -760,7 +696,7 @@ void RTTIBuilder::BuildVMIClassTypeInfo(const CXXRecordDecl *RD) {
// subobject. For a virtual base, this is the offset in the virtual table of
// the virtual base offset for the virtual base referenced (negative).
if (Base->isVirtual())
- OffsetFlags = CGM.getVtableInfo().getVirtualBaseOffsetOffset(RD, BaseDecl);
+ OffsetFlags = CGM.getVTables().getVirtualBaseOffsetOffset(RD, BaseDecl);
else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
OffsetFlags = Layout.getBaseClassOffset(BaseDecl) / 8;
@@ -854,3 +790,61 @@ llvm::Constant *CodeGenModule::GetAddrOfRTTIDescriptor(QualType Ty) {
return RTTIBuilder(*this).BuildTypeInfo(Ty);
}
+
+// Try to find the magic class __cxxabiv1::__fundamental_type_info. If
+// exists and has a destructor, we will emit the typeinfo for the fundamental
+// types. This is the same behaviour as GCC.
+static CXXRecordDecl *FindMagicClass(ASTContext &AC) {
+ const IdentifierInfo &NamespaceII = AC.Idents.get("__cxxabiv1");
+ DeclarationName NamespaceDN = AC.DeclarationNames.getIdentifier(&NamespaceII);
+ TranslationUnitDecl *TUD = AC.getTranslationUnitDecl();
+ DeclContext::lookup_result NamespaceLookup = TUD->lookup(NamespaceDN);
+ if (NamespaceLookup.first == NamespaceLookup.second)
+ return NULL;
+ const NamespaceDecl *Namespace =
+ dyn_cast<NamespaceDecl>(*NamespaceLookup.first);
+ if (!Namespace)
+ return NULL;
+
+ const IdentifierInfo &ClassII = AC.Idents.get("__fundamental_type_info");
+ DeclarationName ClassDN = AC.DeclarationNames.getIdentifier(&ClassII);
+ DeclContext::lookup_const_result ClassLookup = Namespace->lookup(ClassDN);
+ if (ClassLookup.first == ClassLookup.second)
+ return NULL;
+ CXXRecordDecl *Class = dyn_cast<CXXRecordDecl>(*ClassLookup.first);
+
+ if (Class->hasDefinition() && Class->isDynamicClass() &&
+ Class->getDestructor(AC))
+ return Class;
+
+ return NULL;
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptor(QualType Type) {
+ QualType PointerType = Context.getPointerType(Type);
+ QualType PointerTypeConst = Context.getPointerType(Type.withConst());
+ RTTIBuilder(*this).BuildTypeInfo(Type, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerType, true);
+ RTTIBuilder(*this).BuildTypeInfo(PointerTypeConst, true);
+}
+
+void CodeGenModule::EmitFundamentalRTTIDescriptors() {
+ CXXRecordDecl *RD = FindMagicClass(getContext());
+ if (!RD)
+ return;
+
+ getVTables().GenerateClassData(getVtableLinkage(RD), RD);
+
+ QualType FundamentalTypes[] = { Context.VoidTy, Context.Char32Ty,
+ Context.Char16Ty, Context.UnsignedLongLongTy,
+ Context.LongLongTy, Context.WCharTy,
+ Context.UnsignedShortTy, Context.ShortTy,
+ Context.UnsignedLongTy, Context.LongTy,
+ Context.UnsignedIntTy, Context.IntTy,
+ Context.UnsignedCharTy, Context.FloatTy,
+ Context.LongDoubleTy, Context.DoubleTy,
+ Context.CharTy, Context.BoolTy,
+ Context.SignedCharTy };
+ for (unsigned i = 0; i < sizeof(FundamentalTypes)/sizeof(QualType); ++i)
+ EmitFundamentalRTTIDescriptor(FundamentalTypes[i]);
+}
diff --git a/lib/CodeGen/CGRecordLayout.h b/lib/CodeGen/CGRecordLayout.h
new file mode 100644
index 000000000000..d0d8f984a9ef
--- /dev/null
+++ b/lib/CodeGen/CGRecordLayout.h
@@ -0,0 +1,95 @@
+//===--- CGRecordLayout.h - LLVM Record Layout Information ------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef CLANG_CODEGEN_CGRECORDLAYOUT_H
+#define CLANG_CODEGEN_CGRECORDLAYOUT_H
+
+#include "llvm/ADT/DenseMap.h"
+#include "clang/AST/Decl.h"
+namespace llvm {
+ class Type;
+}
+
+namespace clang {
+namespace CodeGen {
+
+/// CGRecordLayout - This class handles struct and union layout info while
+/// lowering AST types to LLVM types.
+///
+/// These layout objects are only created on demand as IR generation requires.
+class CGRecordLayout {
+ friend class CodeGenTypes;
+
+ CGRecordLayout(const CGRecordLayout&); // DO NOT IMPLEMENT
+ void operator=(const CGRecordLayout&); // DO NOT IMPLEMENT
+
+public:
+ struct BitFieldInfo {
+ BitFieldInfo(unsigned FieldNo,
+ unsigned Start,
+ unsigned Size)
+ : FieldNo(FieldNo), Start(Start), Size(Size) {}
+
+ unsigned FieldNo;
+ unsigned Start;
+ unsigned Size;
+ };
+
+private:
+ /// The LLVMType corresponding to this record layout.
+ const llvm::Type *LLVMType;
+
+ /// Map from (non-bit-field) struct field to the corresponding llvm struct
+ /// type field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
+
+ /// Map from (bit-field) struct field to the corresponding llvm struct type
+ /// field no. This info is populated by record builder.
+ llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields;
+
+ /// Whether one of the fields in this record layout is a pointer to data
+ /// member, or a struct that contains pointer to data member.
+ bool ContainsPointerToDataMember : 1;
+
+public:
+ CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember)
+ : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) {}
+
+ /// \brief Return the LLVM type associated with this record.
+ const llvm::Type *getLLVMType() const {
+ return LLVMType;
+ }
+
+ /// \brief Check whether this struct contains pointers to data members.
+ bool containsPointerToDataMember() const {
+ return ContainsPointerToDataMember;
+ }
+
+ /// \brief Return the BitFieldInfo that corresponds to the field FD.
+ unsigned getLLVMFieldNo(const FieldDecl *FD) const {
+ assert(!FD->isBitField() && "Invalid call for bit-field decl!");
+ assert(FieldInfo.count(FD) && "Invalid field for record!");
+ return FieldInfo.lookup(FD);
+ }
+
+ /// \brief Return llvm::StructType element number that corresponds to the
+ /// field FD.
+ const BitFieldInfo &getBitFieldInfo(const FieldDecl *FD) const {
+ assert(FD->isBitField() && "Invalid call for non bit-field decl!");
+ llvm::DenseMap<const FieldDecl *, BitFieldInfo>::const_iterator
+ it = BitFields.find(FD);
+ assert(it != BitFields.end() && "Unable to find bitfield info");
+ return it->second;
+ }
+};
+
+} // end namespace CodeGen
+} // end namespace clang
+
+#endif
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.cpp b/lib/CodeGen/CGRecordLayoutBuilder.cpp
index baafd6836c63..daebabddc61f 100644
--- a/lib/CodeGen/CGRecordLayoutBuilder.cpp
+++ b/lib/CodeGen/CGRecordLayoutBuilder.cpp
@@ -1,4 +1,4 @@
-//===--- CGRecordLayoutBuilder.cpp - Record builder helper ------*- C++ -*-===//
+//===--- CGRecordLayoutBuilder.cpp - CGRecordLayout builder ----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,25 +7,131 @@
//
//===----------------------------------------------------------------------===//
//
-// This is a helper class used to build CGRecordLayout objects and LLVM types.
+// Builder implementation for CGRecordLayout objects.
//
//===----------------------------------------------------------------------===//
-#include "CGRecordLayoutBuilder.h"
-
+#include "CGRecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
#include "clang/AST/Expr.h"
#include "clang/AST/RecordLayout.h"
#include "CodeGenTypes.h"
+#include "llvm/Type.h"
#include "llvm/DerivedTypes.h"
#include "llvm/Target/TargetData.h"
-
-
using namespace clang;
using namespace CodeGen;
+namespace clang {
+namespace CodeGen {
+
+class CGRecordLayoutBuilder {
+public:
+ /// FieldTypes - Holds the LLVM types that the struct is created from.
+ std::vector<const llvm::Type *> FieldTypes;
+
+ /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
+ typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
+ llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
+
+ /// LLVMBitFieldInfo - Holds location and size information about a bit field.
+ struct LLVMBitFieldInfo {
+ LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start,
+ unsigned Size)
+ : FD(FD), FieldNo(FieldNo), Start(Start), Size(Size) { }
+
+ const FieldDecl *FD;
+
+ unsigned FieldNo;
+ unsigned Start;
+ unsigned Size;
+ };
+ llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
+
+ /// ContainsPointerToDataMember - Whether one of the fields in this record
+ /// layout is a pointer to data member, or a struct that contains pointer to
+ /// data member.
+ bool ContainsPointerToDataMember;
+
+ /// Packed - Whether the resulting LLVM struct will be packed or not.
+ bool Packed;
+
+private:
+ CodeGenTypes &Types;
+
+ /// Alignment - Contains the alignment of the RecordDecl.
+ //
+ // FIXME: This is not needed and should be removed.
+ unsigned Alignment;
+
+ /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
+ /// LLVM types.
+ unsigned AlignmentAsLLVMStruct;
+
+ /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
+ /// this will have the number of bits still available in the field.
+ char BitsAvailableInLastField;
+
+ /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
+ uint64_t NextFieldOffsetInBytes;
+
+ /// LayoutUnion - Will layout a union RecordDecl.
+ void LayoutUnion(const RecordDecl *D);
+
+ /// LayoutField - try to layout all fields in the record decl.
+ /// Returns false if the operation failed because the struct is not packed.
+ bool LayoutFields(const RecordDecl *D);
+
+ /// LayoutBases - layout the bases and vtable pointer of a record decl.
+ void LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout);
+
+ /// LayoutField - layout a single field. Returns false if the operation failed
+ /// because the current struct is not packed.
+ bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// LayoutBitField - layout a single bit field.
+ void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
+
+ /// AppendField - Appends a field with the given offset and type.
+ void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+
+ /// AppendPadding - Appends enough padding bytes so that the total struct
+ /// size matches the alignment of the passed in type.
+ void AppendPadding(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
+
+ /// AppendPadding - Appends enough padding bytes so that the total
+ /// struct size is a multiple of the field alignment.
+ void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
+
+ /// AppendBytes - Append a given number of bytes to the record.
+ void AppendBytes(uint64_t NumBytes);
+
+ /// AppendTailPadding - Append enough tail padding so that the type will have
+ /// the passed size.
+ void AppendTailPadding(uint64_t RecordSize);
+
+ unsigned getTypeAlignment(const llvm::Type *Ty) const;
+ uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const;
+
+ /// CheckForPointerToDataMember - Check if the given type contains a pointer
+ /// to data member.
+ void CheckForPointerToDataMember(QualType T);
+
+public:
+ CGRecordLayoutBuilder(CodeGenTypes &Types)
+ : ContainsPointerToDataMember(false), Packed(false), Types(Types),
+ Alignment(0), AlignmentAsLLVMStruct(1),
+ BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
+
+ /// Layout - Will layout a RecordDecl.
+ void Layout(const RecordDecl *D);
+};
+
+}
+}
+
void CGRecordLayoutBuilder::Layout(const RecordDecl *D) {
Alignment = Types.getContext().getASTRecordLayout(D).getAlignment() / 8;
Packed = D->hasAttr<PackedAttr>();
@@ -110,7 +216,7 @@ bool CGRecordLayoutBuilder::LayoutField(const FieldDecl *D,
// Check if we have a pointer to data member in this field.
CheckForPointerToDataMember(D->getType());
-
+
assert(FieldOffset % 8 == 0 && "FieldOffset is not on a byte boundary!");
uint64_t FieldOffsetInBytes = FieldOffset / 8;
@@ -166,7 +272,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
unsigned Align = 0;
bool HasOnlyZeroSizedBitFields = true;
-
+
unsigned FieldNo = 0;
for (RecordDecl::field_iterator Field = D->field_begin(),
FieldEnd = D->field_end(); Field != FieldEnd; ++Field, ++FieldNo) {
@@ -182,12 +288,13 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
continue;
// Add the bit field info.
- Types.addBitFieldInfo(*Field, 0, 0, FieldSize);
- } else
- Types.addFieldInfo(*Field, 0);
+ LLVMBitFields.push_back(LLVMBitFieldInfo(*Field, 0, 0, FieldSize));
+ } else {
+ LLVMFields.push_back(LLVMFieldInfo(*Field, 0));
+ }
HasOnlyZeroSizedBitFields = false;
-
+
const llvm::Type *FieldTy =
Types.ConvertTypeForMemRecursive(Field->getType());
unsigned FieldAlign = Types.getTargetData().getABITypeAlignment(FieldTy);
@@ -218,7 +325,7 @@ void CGRecordLayoutBuilder::LayoutUnion(const RecordDecl *D) {
"0-align record did not have all zero-sized bit-fields!");
Align = 1;
}
-
+
// Append tail padding.
if (Layout.getSize() / 8 > Size)
AppendPadding(Layout.getSize() / 8, Align);
@@ -228,9 +335,9 @@ void CGRecordLayoutBuilder::LayoutBases(const CXXRecordDecl *RD,
const ASTRecordLayout &Layout) {
// Check if we need to add a vtable pointer.
if (RD->isDynamicClass() && !Layout.getPrimaryBase()) {
- const llvm::Type *Int8PtrTy =
+ const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(Types.getLLVMContext());
-
+
assert(NextFieldOffsetInBytes == 0 &&
"Vtable pointer must come first!");
AppendField(NextFieldOffsetInBytes, Int8PtrTy->getPointerTo());
@@ -245,7 +352,7 @@ bool CGRecordLayoutBuilder::LayoutFields(const RecordDecl *D) {
if (const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(D))
LayoutBases(RD, Layout);
-
+
unsigned FieldNo = 0;
for (RecordDecl::field_iterator Field = D->field_begin(),
@@ -269,14 +376,14 @@ void CGRecordLayoutBuilder::AppendTailPadding(uint64_t RecordSize) {
uint64_t RecordSizeInBytes = RecordSize / 8;
assert(NextFieldOffsetInBytes <= RecordSizeInBytes && "Size mismatch!");
- uint64_t AlignedNextFieldOffset =
+ uint64_t AlignedNextFieldOffset =
llvm::RoundUpToAlignment(NextFieldOffsetInBytes, AlignmentAsLLVMStruct);
if (AlignedNextFieldOffset == RecordSizeInBytes) {
// We don't need any padding.
return;
}
-
+
unsigned NumPadBytes = RecordSizeInBytes - NextFieldOffsetInBytes;
AppendBytes(NumPadBytes);
}
@@ -359,46 +466,49 @@ void CGRecordLayoutBuilder::CheckForPointerToDataMember(QualType T) {
}
} else if (const RecordType *RT = T->getAs<RecordType>()) {
const CXXRecordDecl *RD = cast<CXXRecordDecl>(RT->getDecl());
-
+
// FIXME: It would be better if there was a way to explicitly compute the
// record layout instead of converting to a type.
Types.ConvertTagDeclType(RD);
-
+
const CGRecordLayout &Layout = Types.getCGRecordLayout(RD);
-
+
if (Layout.containsPointerToDataMember())
ContainsPointerToDataMember = true;
- }
+ }
}
-CGRecordLayout *
-CGRecordLayoutBuilder::ComputeLayout(CodeGenTypes &Types,
- const RecordDecl *D) {
- CGRecordLayoutBuilder Builder(Types);
+CGRecordLayout *CodeGenTypes::ComputeRecordLayout(const RecordDecl *D) {
+ CGRecordLayoutBuilder Builder(*this);
Builder.Layout(D);
- const llvm::Type *Ty = llvm::StructType::get(Types.getLLVMContext(),
+ const llvm::Type *Ty = llvm::StructType::get(getLLVMContext(),
Builder.FieldTypes,
Builder.Packed);
- assert(Types.getContext().getASTRecordLayout(D).getSize() / 8 ==
- Types.getTargetData().getTypeAllocSize(Ty) &&
+ assert(getContext().getASTRecordLayout(D).getSize() / 8 ==
+ getTargetData().getTypeAllocSize(Ty) &&
"Type size mismatch!");
+ CGRecordLayout *RL =
+ new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
+
// Add all the field numbers.
for (unsigned i = 0, e = Builder.LLVMFields.size(); i != e; ++i) {
const FieldDecl *FD = Builder.LLVMFields[i].first;
unsigned FieldNo = Builder.LLVMFields[i].second;
- Types.addFieldInfo(FD, FieldNo);
+ RL->FieldInfo.insert(std::make_pair(FD, FieldNo));
}
// Add bitfield info.
for (unsigned i = 0, e = Builder.LLVMBitFields.size(); i != e; ++i) {
- const LLVMBitFieldInfo &Info = Builder.LLVMBitFields[i];
+ const CGRecordLayoutBuilder::LLVMBitFieldInfo &Info =
+ Builder.LLVMBitFields[i];
- Types.addBitFieldInfo(Info.FD, Info.FieldNo, Info.Start, Info.Size);
+ CGRecordLayout::BitFieldInfo BFI(Info.FieldNo, Info.Start, Info.Size);
+ RL->BitFields.insert(std::make_pair(Info.FD, BFI));
}
- return new CGRecordLayout(Ty, Builder.ContainsPointerToDataMember);
+ return RL;
}
diff --git a/lib/CodeGen/CGRecordLayoutBuilder.h b/lib/CodeGen/CGRecordLayoutBuilder.h
deleted file mode 100644
index eb60ed7b5b1d..000000000000
--- a/lib/CodeGen/CGRecordLayoutBuilder.h
+++ /dev/null
@@ -1,142 +0,0 @@
-//===--- CGRecordLayoutBuilder.h - Record builder helper --------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This is a helper class used to build CGRecordLayout objects and LLVM types.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H
-#define CLANG_CODEGEN_CGRECORDLAYOUTBUILDER_H
-
-#include "llvm/ADT/SmallVector.h"
-#include "llvm/System/DataTypes.h"
-#include <vector>
-
-namespace llvm {
- class Type;
-}
-
-namespace clang {
- class ASTRecordLayout;
- class CXXRecordDecl;
- class FieldDecl;
- class RecordDecl;
- class QualType;
-
-namespace CodeGen {
- class CGRecordLayout;
- class CodeGenTypes;
-
-class CGRecordLayoutBuilder {
- CodeGenTypes &Types;
-
- /// Packed - Whether the resulting LLVM struct will be packed or not.
- bool Packed;
-
- /// ContainsPointerToDataMember - Whether one of the fields in this record
- /// layout is a pointer to data member, or a struct that contains pointer to
- /// data member.
- bool ContainsPointerToDataMember;
-
- /// Alignment - Contains the alignment of the RecordDecl.
- unsigned Alignment;
-
- /// AlignmentAsLLVMStruct - Will contain the maximum alignment of all the
- /// LLVM types.
- unsigned AlignmentAsLLVMStruct;
-
- /// BitsAvailableInLastField - If a bit field spans only part of a LLVM field,
- /// this will have the number of bits still available in the field.
- char BitsAvailableInLastField;
-
- /// NextFieldOffsetInBytes - Holds the next field offset in bytes.
- uint64_t NextFieldOffsetInBytes;
-
- /// FieldTypes - Holds the LLVM types that the struct is created from.
- std::vector<const llvm::Type *> FieldTypes;
-
- /// LLVMFieldInfo - Holds a field and its corresponding LLVM field number.
- typedef std::pair<const FieldDecl *, unsigned> LLVMFieldInfo;
- llvm::SmallVector<LLVMFieldInfo, 16> LLVMFields;
-
- /// LLVMBitFieldInfo - Holds location and size information about a bit field.
- struct LLVMBitFieldInfo {
- LLVMBitFieldInfo(const FieldDecl *FD, unsigned FieldNo, unsigned Start,
- unsigned Size)
- : FD(FD), FieldNo(FieldNo), Start(Start), Size(Size) { }
-
- const FieldDecl *FD;
-
- unsigned FieldNo;
- unsigned Start;
- unsigned Size;
- };
- llvm::SmallVector<LLVMBitFieldInfo, 16> LLVMBitFields;
-
- CGRecordLayoutBuilder(CodeGenTypes &Types)
- : Types(Types), Packed(false), ContainsPointerToDataMember(false)
- , Alignment(0), AlignmentAsLLVMStruct(1)
- , BitsAvailableInLastField(0), NextFieldOffsetInBytes(0) { }
-
- /// Layout - Will layout a RecordDecl.
- void Layout(const RecordDecl *D);
-
- /// LayoutUnion - Will layout a union RecordDecl.
- void LayoutUnion(const RecordDecl *D);
-
- /// LayoutField - try to layout all fields in the record decl.
- /// Returns false if the operation failed because the struct is not packed.
- bool LayoutFields(const RecordDecl *D);
-
- /// LayoutBases - layout the bases and vtable pointer of a record decl.
- void LayoutBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout);
-
- /// LayoutField - layout a single field. Returns false if the operation failed
- /// because the current struct is not packed.
- bool LayoutField(const FieldDecl *D, uint64_t FieldOffset);
-
- /// LayoutBitField - layout a single bit field.
- void LayoutBitField(const FieldDecl *D, uint64_t FieldOffset);
-
- /// AppendField - Appends a field with the given offset and type.
- void AppendField(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
-
- /// AppendPadding - Appends enough padding bytes so that the total struct
- /// size matches the alignment of the passed in type.
- void AppendPadding(uint64_t FieldOffsetInBytes, const llvm::Type *FieldTy);
-
- /// AppendPadding - Appends enough padding bytes so that the total
- /// struct size is a multiple of the field alignment.
- void AppendPadding(uint64_t FieldOffsetInBytes, unsigned FieldAlignment);
-
- /// AppendBytes - Append a given number of bytes to the record.
- void AppendBytes(uint64_t NumBytes);
-
- /// AppendTailPadding - Append enough tail padding so that the type will have
- /// the passed size.
- void AppendTailPadding(uint64_t RecordSize);
-
- unsigned getTypeAlignment(const llvm::Type *Ty) const;
- uint64_t getTypeSizeInBytes(const llvm::Type *Ty) const;
-
- /// CheckForPointerToDataMember - Check if the given type contains a pointer
- /// to data member.
- void CheckForPointerToDataMember(QualType T);
-
-public:
- /// ComputeLayout - Return the right record layout for a given record decl.
- static CGRecordLayout *ComputeLayout(CodeGenTypes &Types,
- const RecordDecl *D);
-};
-
-} // end namespace CodeGen
-} // end namespace clang
-
-
-#endif
diff --git a/lib/CodeGen/CGStmt.cpp b/lib/CodeGen/CGStmt.cpp
index a889e55a9e80..ae2f791719d3 100644
--- a/lib/CodeGen/CGStmt.cpp
+++ b/lib/CodeGen/CGStmt.cpp
@@ -607,7 +607,8 @@ void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
} else if (FnRetTy->isReferenceType()) {
// If this function returns a reference, take the address of the expression
// rather than the value.
- Builder.CreateStore(EmitLValue(RV).getAddress(), ReturnValue);
+ RValue Result = EmitReferenceBindingToExpr(RV, false);
+ Builder.CreateStore(Result.getScalarVal(), ReturnValue);
} else if (!hasAggregateLLVMType(RV->getType())) {
Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
} else if (RV->getType()->isAnyComplexType()) {
diff --git a/lib/CodeGen/CGTemporaries.cpp b/lib/CodeGen/CGTemporaries.cpp
index bed843966e62..6d38ab98dee6 100644
--- a/lib/CodeGen/CGTemporaries.cpp
+++ b/lib/CodeGen/CGTemporaries.cpp
@@ -127,15 +127,14 @@ CodeGenFunction::EmitCXXExprWithTemporaries(const CXXExprWithTemporaries *E,
size_t CleanupStackDepth = CleanupEntries.size();
(void) CleanupStackDepth;
- unsigned OldNumLiveTemporaries = LiveTemporaries.size();
-
- RValue RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
- /*IgnoreResult=*/false, IsInitializer);
-
- // Pop temporaries.
- while (LiveTemporaries.size() > OldNumLiveTemporaries)
- PopCXXTemporary();
+ RValue RV;
+
+ {
+ CXXTemporariesCleanupScope Scope(*this);
+ RV = EmitAnyExpr(E->getSubExpr(), AggLoc, IsAggLocVolatile,
+ /*IgnoreResult=*/false, IsInitializer);
+ }
assert(CleanupEntries.size() == CleanupStackDepth &&
"Cleanup size mismatch!");
diff --git a/lib/CodeGen/CGVTT.cpp b/lib/CodeGen/CGVTT.cpp
index 96c104b22d15..91d9f763bfe7 100644
--- a/lib/CodeGen/CGVTT.cpp
+++ b/lib/CodeGen/CGVTT.cpp
@@ -19,283 +19,355 @@ using namespace CodeGen;
#define D1(x)
namespace {
+
+/// VTT builder - Class for building VTT layout information.
class VTTBuilder {
- /// Inits - The list of values built for the VTT.
- std::vector<llvm::Constant *> &Inits;
- /// Class - The most derived class that this vtable is being built for.
- const CXXRecordDecl *Class;
- CodeGenModule &CGM; // Per-module state.
- llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase;
- /// BLayout - Layout for the most derived class that this vtable is being
- /// built for.
- const ASTRecordLayout &BLayout;
- CGVtableInfo::AddrMap_t &AddressPoints;
- // vtbl - A pointer to the vtable for Class.
- llvm::Constant *ClassVtbl;
- llvm::LLVMContext &VMContext;
-
- /// SeenVBasesInSecondary - The seen virtual bases when building the
- /// secondary virtual pointers.
- llvm::SmallPtrSet<const CXXRecordDecl *, 32> SeenVBasesInSecondary;
+
+ CodeGenModule &CGM;
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
+ /// MostDerivedClass - The most derived class for which we're building this
+ /// vtable.
+ const CXXRecordDecl *MostDerivedClass;
+
+ typedef llvm::SmallVector<llvm::Constant *, 64> VTTComponentsVectorTy;
- bool GenerateDefinition;
+ /// VTTComponents - The VTT components.
+ VTTComponentsVectorTy VTTComponents;
+
+ /// MostDerivedClassLayout - the AST record layout of the most derived class.
+ const ASTRecordLayout &MostDerivedClassLayout;
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
- llvm::DenseMap<BaseSubobject, llvm::Constant *> CtorVtables;
- llvm::DenseMap<std::pair<const CXXRecordDecl *, BaseSubobject>, uint64_t>
- CtorVtableAddressPoints;
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
+
+ /// SubVTTIndicies - The sub-VTT indices for the bases of the most derived
+ /// class.
+ llvm::DenseMap<const CXXRecordDecl *, uint64_t> SubVTTIndicies;
+
+ /// SecondaryVirtualPointerIndices - The secondary virtual pointer indices of
+ /// all subobjects of the most derived class.
+ llvm::DenseMap<BaseSubobject, uint64_t> SecondaryVirtualPointerIndices;
+
+ /// GenerateDefinition - Whether the VTT builder should generate LLVM IR for
+ /// the VTT.
+ bool GenerateDefinition;
- llvm::Constant *getCtorVtable(const BaseSubobject &Base,
- bool BaseIsVirtual) {
- if (!GenerateDefinition)
- return 0;
-
- llvm::Constant *&CtorVtable = CtorVtables[Base];
- if (!CtorVtable) {
- // Build the vtable.
- CGVtableInfo::CtorVtableInfo Info
- = CGM.getVtableInfo().getCtorVtable(Class, Base, BaseIsVirtual);
-
- CtorVtable = Info.Vtable;
-
- // Add the address points for this base.
- for (CGVtableInfo::AddressPointsMapTy::const_iterator I =
- Info.AddressPoints.begin(), E = Info.AddressPoints.end();
- I != E; ++I) {
- uint64_t &AddressPoint =
- CtorVtableAddressPoints[std::make_pair(Base.getBase(), I->first)];
-
- // Check if we already have the address points for this base.
- if (AddressPoint)
- break;
-
- // Otherwise, insert it.
- AddressPoint = I->second;
- }
- }
-
- return CtorVtable;
+ /// GetAddrOfVTable - Returns the address of the vtable for the base class in
+ /// the given vtable class.
+ ///
+ /// \param AddressPoints - If the returned vtable is a construction vtable,
+ /// this will hold the address points for it.
+ llvm::Constant *GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
+ AddressPointsMapTy& AddressPoints);
+
+ /// AddVTablePointer - Add a vtable pointer to the VTT currently being built.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints);
+
+ /// LayoutSecondaryVTTs - Lay out the secondary VTTs of the given base
+ /// subobject.
+ void LayoutSecondaryVTTs(BaseSubobject Base);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param BaseIsMorallyVirtual whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutSecondaryVirtualPointers - Lay out the secondary virtual pointers
+ /// for the given base subobject.
+ ///
+ /// \param AddressPoints - If the vtable is a construction vtable, this has
+ /// the address points for it.
+ void LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const AddressPointsMapTy& AddressPoints);
+
+ /// LayoutVirtualVTTs - Lay out the VTTs for the virtual base classes of the
+ /// given record decl.
+ void LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases);
+
+ /// LayoutVTT - Will lay out the VTT for the given subobject, including any
+ /// secondary VTTs, secondary virtual pointers and virtual VTTs.
+ void LayoutVTT(BaseSubobject Base, bool BaseIsVirtual);
+
+public:
+ VTTBuilder(CodeGenModule &CGM, const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition);
+
+ // getVTTComponents - Returns a reference to the VTT components.
+ const VTTComponentsVectorTy &getVTTComponents() const {
+ return VTTComponents;
}
+ /// getSubVTTIndicies - Returns a reference to the sub-VTT indices.
+ const llvm::DenseMap<const CXXRecordDecl *, uint64_t> &
+ getSubVTTIndicies() const {
+ return SubVTTIndicies;
+ }
- /// BuildVtablePtr - Build up a referene to the given secondary vtable
- llvm::Constant *BuildVtablePtr(llvm::Constant *Vtable,
- const CXXRecordDecl *VtableClass,
- const CXXRecordDecl *RD,
- uint64_t Offset) {
- if (!GenerateDefinition)
- return 0;
-
- uint64_t AddressPoint;
-
- if (VtableClass != Class) {
- // We have a ctor vtable, look for the address point in the ctor vtable
- // address points.
- AddressPoint =
- CtorVtableAddressPoints[std::make_pair(VtableClass,
- BaseSubobject(RD, Offset))];
- } else {
- AddressPoint =
- (*AddressPoints[VtableClass])[std::make_pair(RD, Offset)];
- }
+ /// getSecondaryVirtualPointerIndices - Returns a reference to the secondary
+ /// virtual pointer indices.
+ const llvm::DenseMap<BaseSubobject, uint64_t> &
+ getSecondaryVirtualPointerIndices() const {
+ return SecondaryVirtualPointerIndices;
+ }
- // FIXME: We can never have 0 address point. Do this for now so gepping
- // retains the same structure. Later we'll just assert.
- if (AddressPoint == 0)
- AddressPoint = 1;
- D1(printf("XXX address point for %s in %s layout %s at offset %d was %d\n",
- RD->getNameAsCString(), VtblClass->getNameAsCString(),
- Class->getNameAsCString(), (int)Offset, (int)AddressPoint));
-
- llvm::Value *Idxs[] = {
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), 0),
- llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), AddressPoint)
- };
+};
+
+VTTBuilder::VTTBuilder(CodeGenModule &CGM,
+ const CXXRecordDecl *MostDerivedClass,
+ bool GenerateDefinition)
+ : CGM(CGM), MostDerivedClass(MostDerivedClass),
+ MostDerivedClassLayout(CGM.getContext().getASTRecordLayout(MostDerivedClass)),
+ GenerateDefinition(GenerateDefinition) {
- llvm::Constant *Init =
- llvm::ConstantExpr::getInBoundsGetElementPtr(Vtable, Idxs, 2);
+ // Lay out this VTT.
+ LayoutVTT(BaseSubobject(MostDerivedClass, 0), /*BaseIsVirtual=*/false);
+}
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(VMContext);
- return llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+llvm::Constant *
+VTTBuilder::GetAddrOfVTable(BaseSubobject Base, bool BaseIsVirtual,
+ AddressPointsMapTy& AddressPoints) {
+ if (!GenerateDefinition)
+ return 0;
+
+ if (Base.getBase() == MostDerivedClass) {
+ assert(Base.getBaseOffset() == 0 &&
+ "Most derived class vtable must have a zero offset!");
+ // This is a regular vtable.
+ return CGM.getVTables().GetAddrOfVTable(MostDerivedClass);
}
+
+ return CGM.getVTables().GenerateConstructionVTable(MostDerivedClass,
+ Base, BaseIsVirtual,
+ AddressPoints);
+}
- /// Secondary - Add the secondary vtable pointers to Inits. Offset is the
- /// current offset in bits to the object we're working on.
- void Secondary(const CXXRecordDecl *RD, llvm::Constant *vtbl,
- const CXXRecordDecl *VtblClass, uint64_t Offset=0,
- bool MorallyVirtual=false) {
- if (RD->getNumVBases() == 0 && ! MorallyVirtual)
- return;
-
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
-
- // We only want to visit each virtual base once.
- if (i->isVirtual() && SeenVBasesInSecondary.count(Base))
- continue;
-
- // Itanium C++ ABI 2.6.2:
- // Secondary virtual pointers are present for all bases with either
- // virtual bases or virtual function declarations overridden along a
- // virtual path.
- //
- // If the base class is not dynamic, we don't want to add it, nor any
- // of its base classes.
- if (!Base->isDynamicClass())
- continue;
+void VTTBuilder::AddVTablePointer(BaseSubobject Base, llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints) {
+ // Store the vtable pointer index if we're generating the primary VTT.
+ if (VTableClass == MostDerivedClass) {
+ assert(!SecondaryVirtualPointerIndices.count(Base) &&
+ "A virtual pointer index already exists for this base subobject!");
+ SecondaryVirtualPointerIndices[Base] = VTTComponents.size();
+ }
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
- bool NonVirtualPrimaryBase;
- NonVirtualPrimaryBase = !PrimaryBaseWasVirtual && Base == PrimaryBase;
- bool BaseMorallyVirtual = MorallyVirtual | i->isVirtual();
- uint64_t BaseOffset;
- if (!i->isVirtual()) {
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- } else
- BaseOffset = BLayout.getVBaseClassOffset(Base);
- llvm::Constant *subvtbl = vtbl;
- const CXXRecordDecl *subVtblClass = VtblClass;
- if ((Base->getNumVBases() || BaseMorallyVirtual)
- && !NonVirtualPrimaryBase) {
- llvm::Constant *init;
- if (BaseMorallyVirtual || VtblClass == Class)
- init = BuildVtablePtr(vtbl, VtblClass, Base, BaseOffset);
- else {
- init = getCtorVtable(BaseSubobject(Base, BaseOffset), i->isVirtual());
-
- subvtbl = init;
- subVtblClass = Base;
-
- init = BuildVtablePtr(init, Class, Base, BaseOffset);
- }
-
- Inits.push_back(init);
- }
-
- if (i->isVirtual())
- SeenVBasesInSecondary.insert(Base);
-
- Secondary(Base, subvtbl, subVtblClass, BaseOffset, BaseMorallyVirtual);
- }
+ if (!GenerateDefinition) {
+ VTTComponents.push_back(0);
+ return;
}
- /// BuiltVTT - Add the VTT to Inits. Offset is the offset in bits to the
- /// currnet object we're working on.
- void BuildVTT(const CXXRecordDecl *RD, uint64_t Offset, bool BaseIsVirtual,
- bool MorallyVirtual) {
- // Itanium C++ ABI 2.6.2:
- // An array of virtual table addresses, called the VTT, is declared for
- // each class type that has indirect or direct virtual base classes.
- if (RD->getNumVBases() == 0)
- return;
+ uint64_t AddressPoint;
+ if (VTableClass != MostDerivedClass) {
+ // The vtable is a construction vtable, look in the construction vtable
+ // address points.
+ AddressPoint = AddressPoints.lookup(Base);
+ } else {
+ // Just get the address point for the regular vtable.
+ AddressPoint = CGM.getVTables().getAddressPoint(Base, VTableClass);
+ }
- // Remember the sub-VTT index.
- SubVTTIndicies[RD] = Inits.size();
+ if (!AddressPoint) AddressPoint = 0;
+ assert(AddressPoint != 0 && "Did not find an address point!");
+
+ llvm::Value *Idxs[] = {
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()), 0),
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(CGM.getLLVMContext()),
+ AddressPoint)
+ };
+
+ llvm::Constant *Init =
+ llvm::ConstantExpr::getInBoundsGetElementPtr(VTable, Idxs, 2);
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+
+ VTTComponents.push_back(Init);
+}
- llvm::Constant *Vtable;
- const CXXRecordDecl *VtableClass;
+void VTTBuilder::LayoutSecondaryVTTs(BaseSubobject Base) {
+ const CXXRecordDecl *RD = Base.getBase();
- // First comes the primary virtual table pointer...
- if (MorallyVirtual) {
- Vtable = ClassVtbl;
- VtableClass = Class;
- } else {
- Vtable = getCtorVtable(BaseSubobject(RD, Offset),
- /*IsVirtual=*/BaseIsVirtual);
- VtableClass = RD;
- }
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
- llvm::Constant *Init = BuildVtablePtr(Vtable, VtableClass, RD, Offset);
- Inits.push_back(Init);
-
- // then the secondary VTTs....
- SecondaryVTTs(RD, Offset, MorallyVirtual);
+ // Don't layout virtual bases.
+ if (I->isVirtual())
+ continue;
- // Make sure to clear the set of seen virtual bases.
- SeenVBasesInSecondary.clear();
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- // and last the secondary vtable pointers.
- Secondary(RD, Vtable, VtableClass, Offset, MorallyVirtual);
+ const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
+ uint64_t BaseOffset = Base.getBaseOffset() +
+ Layout.getBaseClassOffset(BaseDecl);
+
+ // Layout the VTT for this base.
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/false);
}
+}
+
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ const AddressPointsMapTy& AddressPoints,
+ VisitedVirtualBasesSetTy &VBases) {
+ const CXXRecordDecl *RD = Base.getBase();
+
+ // We're not interested in bases that don't have virtual bases, and not
+ // morally virtual bases.
+ if (!RD->getNumVBases() && !BaseIsMorallyVirtual)
+ return;
- /// SecondaryVTTs - Add the secondary VTTs to Inits. The secondary VTTs are
- /// built from each direct non-virtual proper base that requires a VTT in
- /// declaration order.
- void SecondaryVTTs(const CXXRecordDecl *RD, uint64_t Offset=0,
- bool MorallyVirtual=false) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (i->isVirtual())
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
+
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers are present for all bases with either
+ // virtual bases or virtual function declarations overridden along a
+ // virtual path.
+ //
+ // If the base class is not dynamic, we don't want to add it, nor any
+ // of its base classes.
+ if (!BaseDecl->isDynamicClass())
+ continue;
+
+ bool BaseDeclIsMorallyVirtual = BaseIsMorallyVirtual;
+ bool BaseDeclIsNonVirtualPrimaryBase = false;
+ uint64_t BaseOffset;
+ if (I->isVirtual()) {
+ // Ignore virtual bases that we've already visited.
+ if (!VBases.insert(BaseDecl))
continue;
+
+ BaseOffset = MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+ BaseDeclIsMorallyVirtual = true;
+ } else {
const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- uint64_t BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- BuildVTT(Base, BaseOffset, /*BaseIsVirtual=*/false, MorallyVirtual);
+ BaseOffset = Base.getBaseOffset() + Layout.getBaseClassOffset(BaseDecl);
+
+ if (!Layout.getPrimaryBaseWasVirtual() &&
+ Layout.getPrimaryBase() == BaseDecl)
+ BaseDeclIsNonVirtualPrimaryBase = true;
}
- }
- /// VirtualVTTs - Add the VTT for each proper virtual base in inheritance
- /// graph preorder.
- void VirtualVTTs(const CXXRecordDecl *RD) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (i->isVirtual() && !SeenVBase.count(Base)) {
- SeenVBase.insert(Base);
- uint64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
- BuildVTT(Base, BaseOffset, /*BaseIsVirtual=*/true, false);
- }
- VirtualVTTs(Base);
+ // Itanium C++ ABI 2.6.2:
+ // Secondary virtual pointers: for each base class X which (a) has virtual
+ // bases or is reachable along a virtual path from D, and (b) is not a
+ // non-virtual primary base, the address of the virtual table for X-in-D
+ // or an appropriate construction virtual table.
+ if (!BaseDeclIsNonVirtualPrimaryBase &&
+ (BaseDecl->getNumVBases() || BaseDeclIsMorallyVirtual)) {
+ // Add the vtable pointer.
+ AddVTablePointer(BaseSubobject(BaseDecl, BaseOffset), VTable, VTableClass,
+ AddressPoints);
}
+
+ // And lay out the secondary virtual pointers for the base class.
+ LayoutSecondaryVirtualPointers(BaseSubobject(BaseDecl, BaseOffset),
+ BaseDeclIsMorallyVirtual, VTable,
+ VTableClass, AddressPoints, VBases);
}
+}
-public:
- VTTBuilder(std::vector<llvm::Constant *> &inits, const CXXRecordDecl *c,
- CodeGenModule &cgm, bool GenerateDefinition)
- : Inits(inits), Class(c), CGM(cgm),
- BLayout(cgm.getContext().getASTRecordLayout(c)),
- AddressPoints(*cgm.getVtableInfo().AddressPoints[c]),
- VMContext(cgm.getModule().getContext()),
- GenerateDefinition(GenerateDefinition) {
-
- // First comes the primary virtual table pointer for the complete class...
- ClassVtbl = GenerateDefinition ? CGM.getVtableInfo().getVtable(Class) : 0;
+void
+VTTBuilder::LayoutSecondaryVirtualPointers(BaseSubobject Base,
+ llvm::Constant *VTable,
+ const AddressPointsMapTy& AddressPoints) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutSecondaryVirtualPointers(Base, /*BaseIsMorallyVirtual=*/false,
+ VTable, Base.getBase(), AddressPoints, VBases);
+}
- llvm::Constant *Init = BuildVtablePtr(ClassVtbl, Class, Class, 0);
- Inits.push_back(Init);
+void VTTBuilder::LayoutVirtualVTTs(const CXXRecordDecl *RD,
+ VisitedVirtualBasesSetTy &VBases) {
+ for (CXXRecordDecl::base_class_const_iterator I = RD->bases_begin(),
+ E = RD->bases_end(); I != E; ++I) {
+ const CXXRecordDecl *BaseDecl =
+ cast<CXXRecordDecl>(I->getType()->getAs<RecordType>()->getDecl());
- // then the secondary VTTs...
- SecondaryVTTs(Class);
+ // Check if this is a virtual base.
+ if (I->isVirtual()) {
+ // Check if we've seen this base before.
+ if (!VBases.insert(BaseDecl))
+ continue;
+
+ uint64_t BaseOffset =
+ MostDerivedClassLayout.getVBaseClassOffset(BaseDecl);
+
+ LayoutVTT(BaseSubobject(BaseDecl, BaseOffset), /*BaseIsVirtual=*/true);
+ }
+
+ // We only need to layout virtual VTTs for this base if it actually has
+ // virtual bases.
+ if (BaseDecl->getNumVBases())
+ LayoutVirtualVTTs(BaseDecl, VBases);
+ }
+}
- // Make sure to clear the set of seen virtual bases.
- SeenVBasesInSecondary.clear();
+void VTTBuilder::LayoutVTT(BaseSubobject Base, bool BaseIsVirtual) {
+ const CXXRecordDecl *RD = Base.getBase();
- // then the secondary vtable pointers...
- Secondary(Class, ClassVtbl, Class);
+ // Itanium C++ ABI 2.6.2:
+ // An array of virtual table addresses, called the VTT, is declared for
+ // each class type that has indirect or direct virtual base classes.
+ if (RD->getNumVBases() == 0)
+ return;
+
+ bool IsPrimaryVTT = Base.getBase() == MostDerivedClass;
- // and last, the virtual VTTs.
- VirtualVTTs(Class);
+ if (!IsPrimaryVTT) {
+ // Remember the sub-VTT index.
+ SubVTTIndicies[RD] = VTTComponents.size();
}
+
+ AddressPointsMapTy AddressPoints;
+ llvm::Constant *VTable = GetAddrOfVTable(Base, BaseIsVirtual, AddressPoints);
+
+ // Add the primary vtable pointer.
+ AddVTablePointer(Base, VTable, RD, AddressPoints);
+
+ // Add the secondary VTTs.
+ LayoutSecondaryVTTs(Base);
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getSubVTTIndicies() {
- return SubVTTIndicies;
+ // Add the secondary virtual pointers.
+ LayoutSecondaryVirtualPointers(Base, VTable, AddressPoints);
+
+ // If this is the primary VTT, we want to lay out virtual VTTs as well.
+ if (IsPrimaryVTT) {
+ VisitedVirtualBasesSetTy VBases;
+ LayoutVirtualVTTs(Base.getBase(), VBases);
}
-};
+}
+
}
llvm::GlobalVariable *
-CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *RD) {
+CodeGenVTables::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
+ bool GenerateDefinition,
+ const CXXRecordDecl *RD) {
// Only classes that have virtual bases need a VTT.
if (RD->getNumVBases() == 0)
return 0;
@@ -311,13 +383,15 @@ CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
const llvm::Type *Int8PtrTy =
llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- std::vector<llvm::Constant *> inits;
- VTTBuilder b(inits, RD, CGM, GenerateDefinition);
+ VTTBuilder Builder(CGM, RD, GenerateDefinition);
+
+ const llvm::ArrayType *Type =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getVTTComponents().size());
- const llvm::ArrayType *Type = llvm::ArrayType::get(Int8PtrTy, inits.size());
llvm::Constant *Init = 0;
if (GenerateDefinition)
- Init = llvm::ConstantArray::get(Type, inits);
+ Init = llvm::ConstantArray::get(Type, Builder.getVTTComponents().data(),
+ Builder.getVTTComponents().size());
llvm::GlobalVariable *OldGV = GV;
GV = new llvm::GlobalVariable(CGM.getModule(), Type, /*isConstant=*/true,
@@ -336,26 +410,12 @@ CGVtableInfo::GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
return GV;
}
-CGVtableInfo::CtorVtableInfo
-CGVtableInfo::getCtorVtable(const CXXRecordDecl *RD,
- const BaseSubobject &Base, bool BaseIsVirtual) {
- CtorVtableInfo Info;
-
- Info.Vtable = GenerateVtable(llvm::GlobalValue::InternalLinkage,
- /*GenerateDefinition=*/true,
- RD, Base.getBase(), Base.getBaseOffset(),
- BaseIsVirtual, Info.AddressPoints);
- return Info;
-}
-
-llvm::GlobalVariable *CGVtableInfo::getVTT(const CXXRecordDecl *RD) {
+llvm::GlobalVariable *CodeGenVTables::getVTT(const CXXRecordDecl *RD) {
return GenerateVTT(llvm::GlobalValue::ExternalLinkage,
/*GenerateDefinition=*/false, RD);
-
}
-
-bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) {
+bool CodeGenVTables::needsVTTParameter(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
// We don't have any virtual bases, just return early.
@@ -373,19 +433,17 @@ bool CGVtableInfo::needsVTTParameter(GlobalDecl GD) {
return false;
}
-uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD,
- const CXXRecordDecl *Base) {
+uint64_t CodeGenVTables::getSubVTTIndex(const CXXRecordDecl *RD,
+ const CXXRecordDecl *Base) {
ClassPairTy ClassPair(RD, Base);
- SubVTTIndiciesTy::iterator I =
- SubVTTIndicies.find(ClassPair);
+ SubVTTIndiciesMapTy::iterator I = SubVTTIndicies.find(ClassPair);
if (I != SubVTTIndicies.end())
return I->second;
- std::vector<llvm::Constant *> inits;
- VTTBuilder Builder(inits, RD, CGM, /*GenerateDefinition=*/false);
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
- for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
+ for (llvm::DenseMap<const CXXRecordDecl *, uint64_t>::const_iterator I =
Builder.getSubVTTIndicies().begin(),
E = Builder.getSubVTTIndicies().end(); I != E; ++I) {
// Insert all indices.
@@ -399,3 +457,31 @@ uint64_t CGVtableInfo::getSubVTTIndex(const CXXRecordDecl *RD,
return I->second;
}
+
+uint64_t
+CodeGenVTables::getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base) {
+ SecondaryVirtualPointerIndicesMapTy::iterator I =
+ SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+
+ if (I != SecondaryVirtualPointerIndices.end())
+ return I->second;
+
+ VTTBuilder Builder(CGM, RD, /*GenerateDefinition=*/false);
+
+ // Insert all secondary vpointer indices.
+ for (llvm::DenseMap<BaseSubobject, uint64_t>::const_iterator I =
+ Builder.getSecondaryVirtualPointerIndices().begin(),
+ E = Builder.getSecondaryVirtualPointerIndices().end(); I != E; ++I) {
+ std::pair<const CXXRecordDecl *, BaseSubobject> Pair =
+ std::make_pair(RD, I->first);
+
+ SecondaryVirtualPointerIndices.insert(std::make_pair(Pair, I->second));
+ }
+
+ I = SecondaryVirtualPointerIndices.find(std::make_pair(RD, Base));
+ assert(I != SecondaryVirtualPointerIndices.end() && "Did not find index!");
+
+ return I->second;
+}
+
diff --git a/lib/CodeGen/CGVtable.cpp b/lib/CodeGen/CGVtable.cpp
index df30f479acc8..2d1c73440bbe 100644
--- a/lib/CodeGen/CGVtable.cpp
+++ b/lib/CodeGen/CGVtable.cpp
@@ -138,7 +138,7 @@ private:
/// AddOverriders - Add the final overriders for this base subobject to the
/// map of final overriders.
- void AddOverriders(BaseSubobject Base,uint64_t OffsetInLayoutClass,
+ void AddOverriders(BaseSubobject Base, uint64_t OffsetInLayoutClass,
SubobjectOffsetsMapTy &Offsets);
/// PropagateOverrider - Propagate the NewMD overrider to all the functions
@@ -636,6 +636,10 @@ public:
reinterpret_cast<uintptr_t>(MD));
}
+ static VtableComponent getFromOpaqueInteger(uint64_t I) {
+ return VtableComponent(I);
+ }
+
/// getKind - Get the kind of this vtable component.
Kind getKind() const {
return (Kind)(Value & 0x7);
@@ -725,6 +729,9 @@ private:
return static_cast<uintptr_t>(Value & ~7ULL);
}
+ explicit VtableComponent(uint64_t Value)
+ : Value(Value) { }
+
/// The kind is stored in the lower 3 bits of the value. For offsets, we
/// make use of the facts that classes can't be larger than 2^55 bytes,
/// so we store the offset in the lower part of the 61 bytes that remain.
@@ -1091,9 +1098,15 @@ public:
typedef llvm::SmallSetVector<const CXXRecordDecl *, 8>
PrimaryBasesSetVectorTy;
+ typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
+ VBaseOffsetOffsetsMapTy;
+
+ typedef llvm::DenseMap<BaseSubobject, uint64_t>
+ AddressPointsMapTy;
+
private:
- /// VtableInfo - Global vtable information.
- CGVtableInfo &VtableInfo;
+ /// VTables - Global vtable information.
+ CodeGenVTables &VTables;
/// MostDerivedClass - The most derived class for which we're building this
/// vtable.
@@ -1122,9 +1135,6 @@ private:
/// bases in this vtable.
llvm::DenseMap<const CXXRecordDecl *, VCallOffsetMap> VCallOffsetsForVBases;
- typedef llvm::DenseMap<const CXXRecordDecl *, int64_t>
- VBaseOffsetOffsetsMapTy;
-
/// VBaseOffsetOffsets - Contains the offsets of the virtual base offsets for
/// the most derived class.
VBaseOffsetOffsetsMapTy VBaseOffsetOffsets;
@@ -1133,29 +1143,8 @@ private:
llvm::SmallVector<VtableComponent, 64> Components;
/// AddressPoints - Address points for the vtable being built.
- CGVtableInfo::AddressPointsMapTy AddressPoints;
-
- /// ReturnAdjustment - A return adjustment.
- struct ReturnAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
- /// of the virtual base class offset.
- int64_t VBaseOffsetOffset;
-
- ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
-
- bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
+ AddressPointsMapTy AddressPoints;
- friend bool operator==(const ReturnAdjustment &LHS,
- const ReturnAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
- }
- };
-
/// MethodInfo - Contains information about a method in a vtable.
/// (Used for computing 'this' pointer adjustment thunks.
struct MethodInfo {
@@ -1185,62 +1174,21 @@ private:
/// currently building.
MethodInfoMapTy MethodInfoMap;
- /// ThisAdjustment - A 'this' pointer adjustment thunk.
- struct ThisAdjustment {
- /// NonVirtual - The non-virtual adjustment from the derived object to its
- /// nearest virtual base.
- int64_t NonVirtual;
-
- /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
- /// of the virtual call offset.
- int64_t VCallOffsetOffset;
-
- ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
-
- bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
-
- friend bool operator==(const ThisAdjustment &LHS,
- const ThisAdjustment &RHS) {
- return LHS.NonVirtual == RHS.NonVirtual &&
- LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
- }
- };
-
- /// ThunkInfo - The 'this' pointer adjustment as well as an optional return
- /// adjustment for a thunk.
- struct ThunkInfo {
- /// This - The 'this' pointer adjustment.
- ThisAdjustment This;
-
- /// Return - The return adjustment.
- ReturnAdjustment Return;
-
- ThunkInfo() { }
-
- ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
- : This(This), Return(Return) { }
+ typedef llvm::DenseMap<uint64_t, ThunkInfo> VtableThunksMapTy;
- friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
- return LHS.This == RHS.This && LHS.Return == RHS.Return;
- }
+ /// VTableThunks - The thunks by vtable index in the vtable currently being
+ /// built.
+ VtableThunksMapTy VTableThunks;
- bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
- };
-
- typedef llvm::DenseMap<uint64_t, ThunkInfo> ThunksInfoMapTy;
+ typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
- /// Thunks - The thunks by vtable index in the vtable currently being built.
- ThunksInfoMapTy Thunks;
-
- typedef llvm::DenseMap<const CXXMethodDecl *,
- llvm::SmallVector<ThunkInfo, 1> > MethodThunksMapTy;
-
- /// MethodThunks - A map that contains all the thunks needed for all methods
- /// in the vtable currently being built.
- MethodThunksMapTy MethodThunks;
+ /// Thunks - A map that contains all the thunks needed for all methods in the
+ /// most derived class for which the vtable is currently being built.
+ ThunksMapTy Thunks;
/// AddThunk - Add a thunk for the given method.
- void AddThunk(const CXXMethodDecl *MD, ThunkInfo &Thunk);
+ void AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk);
/// ComputeThisAdjustments - Compute the 'this' pointer adjustments for the
/// part of the vtable we're currently building.
@@ -1341,10 +1289,10 @@ private:
}
public:
- VtableBuilder(CGVtableInfo &VtableInfo, const CXXRecordDecl *MostDerivedClass,
+ VtableBuilder(CodeGenVTables &VTables, const CXXRecordDecl *MostDerivedClass,
uint64_t MostDerivedClassOffset, bool MostDerivedClassIsVirtual,
const CXXRecordDecl *LayoutClass)
- : VtableInfo(VtableInfo), MostDerivedClass(MostDerivedClass),
+ : VTables(VTables), MostDerivedClass(MostDerivedClass),
MostDerivedClassOffset(MostDerivedClassOffset),
MostDerivedClassIsVirtual(MostDerivedClassIsVirtual),
LayoutClass(LayoutClass), Context(MostDerivedClass->getASTContext()),
@@ -1353,15 +1301,57 @@ public:
LayoutVtable();
}
+ ThunksMapTy::const_iterator thunks_begin() const {
+ return Thunks.begin();
+ }
+
+ ThunksMapTy::const_iterator thunks_end() const {
+ return Thunks.end();
+ }
+
+ const VBaseOffsetOffsetsMapTy &getVBaseOffsetOffsets() const {
+ return VBaseOffsetOffsets;
+ }
+
+ /// getNumVTableComponents - Return the number of components in the vtable
+ /// currently built.
+ uint64_t getNumVTableComponents() const {
+ return Components.size();
+ }
+
+ const uint64_t *vtable_components_data_begin() const {
+ return reinterpret_cast<const uint64_t *>(Components.begin());
+ }
+
+ const uint64_t *vtable_components_data_end() const {
+ return reinterpret_cast<const uint64_t *>(Components.end());
+ }
+
+ AddressPointsMapTy::const_iterator address_points_begin() const {
+ return AddressPoints.begin();
+ }
+
+ AddressPointsMapTy::const_iterator address_points_end() const {
+ return AddressPoints.end();
+ }
+
+ VtableThunksMapTy::const_iterator vtable_thunks_begin() const {
+ return VTableThunks.begin();
+ }
+
+ VtableThunksMapTy::const_iterator vtable_thunks_end() const {
+ return VTableThunks.end();
+ }
+
/// dumpLayout - Dump the vtable layout.
void dumpLayout(llvm::raw_ostream&);
};
-void VtableBuilder::AddThunk(const CXXMethodDecl *MD, ThunkInfo &Thunk) {
- if (isBuildingConstructorVtable())
- return;
+void VtableBuilder::AddThunk(const CXXMethodDecl *MD, const ThunkInfo &Thunk) {
+ assert(!isBuildingConstructorVtable() &&
+ "Can't add thunks for construction vtable");
- llvm::SmallVector<ThunkInfo, 1> &ThunksVector = MethodThunks[MD];
+ llvm::SmallVector<ThunkInfo, 1> &ThunksVector = Thunks[MD];
// Check if we have this thunk already.
if (std::find(ThunksVector.begin(), ThunksVector.end(), Thunk) !=
@@ -1410,6 +1400,17 @@ void VtableBuilder::ComputeThisAdjustments() {
Overriders.getOverrider(BaseSubobject(MD->getParent(),
MethodInfo.BaseOffset), MD);
+ // Check if we need an adjustment at all.
+ if (MethodInfo.BaseOffsetInLayoutClass == Overrider.Offset) {
+ // When a return thunk is needed by a derived class that overrides a
+ // virtual base, gcc uses a virtual 'this' adjustment as well.
+ // While the thunk itself might be needed by vtables in subclasses or
+ // in construction vtables, there doesn't seem to be a reason for using
+ // the thunk in this vtable. Still, we do so to match gcc.
+ if (VTableThunks.lookup(VtableIndex).Return.isEmpty())
+ continue;
+ }
+
ThisAdjustment ThisAdjustment =
ComputeThisAdjustment(MD, MethodInfo.BaseOffsetInLayoutClass, Overrider);
@@ -1417,22 +1418,48 @@ void VtableBuilder::ComputeThisAdjustments() {
continue;
// Add it.
- Thunks[VtableIndex].This = ThisAdjustment;
+ VTableThunks[VtableIndex].This = ThisAdjustment;
if (isa<CXXDestructorDecl>(MD)) {
// Add an adjustment for the deleting destructor as well.
- Thunks[VtableIndex + 1].This = ThisAdjustment;
+ VTableThunks[VtableIndex + 1].This = ThisAdjustment;
}
-
- AddThunk(Overrider.Method, Thunks[VtableIndex]);
}
/// Clear the method info map.
MethodInfoMap.clear();
+
+ if (isBuildingConstructorVtable()) {
+ // We don't need to store thunk information for construction vtables.
+ return;
+ }
+
+ for (VtableThunksMapTy::const_iterator I = VTableThunks.begin(),
+ E = VTableThunks.end(); I != E; ++I) {
+ const VtableComponent &Component = Components[I->first];
+ const ThunkInfo &Thunk = I->second;
+ const CXXMethodDecl *MD;
+
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind!");
+ case VtableComponent::CK_FunctionPointer:
+ MD = Component.getFunctionDecl();
+ break;
+ case VtableComponent::CK_CompleteDtorPointer:
+ MD = Component.getDestructorDecl();
+ break;
+ case VtableComponent::CK_DeletingDtorPointer:
+ // We've already added the thunk when we saw the complete dtor pointer.
+ continue;
+ }
+
+ if (MD->getParent() == MostDerivedClass)
+ AddThunk(MD, Thunk);
+ }
}
-VtableBuilder::ReturnAdjustment
-VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
+ReturnAdjustment VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
ReturnAdjustment Adjustment;
if (!Offset.isEmpty()) {
@@ -1444,8 +1471,8 @@ VtableBuilder::ComputeReturnAdjustment(BaseOffset Offset) {
VBaseOffsetOffsets.lookup(Offset.VirtualBase);
} else {
Adjustment.VBaseOffsetOffset =
- VtableInfo.getVirtualBaseOffsetOffset(Offset.DerivedClass,
- Offset.VirtualBase);
+ VTables.getVirtualBaseOffsetOffset(Offset.DerivedClass,
+ Offset.VirtualBase);
}
// FIXME: Once the assert in getVirtualBaseOffsetOffset is back again,
@@ -1512,14 +1539,10 @@ VtableBuilder::ComputeThisAdjustmentBaseOffset(BaseSubobject Base,
return BaseOffset();
}
-VtableBuilder::ThisAdjustment
+ThisAdjustment
VtableBuilder::ComputeThisAdjustment(const CXXMethodDecl *MD,
uint64_t BaseOffsetInLayoutClass,
FinalOverriders::OverriderInfo Overrider) {
- // Check if we need an adjustment at all.
- if (BaseOffsetInLayoutClass == Overrider.Offset)
- return ThisAdjustment();
-
// Ignore adjustments for pure virtual member functions.
if (Overrider.Method->isPure())
return ThisAdjustment();
@@ -1576,7 +1599,7 @@ VtableBuilder::AddMethod(const CXXMethodDecl *MD,
} else {
// Add the return adjustment if necessary.
if (!ReturnAdjustment.isEmpty())
- Thunks[Components.size()].Return = ReturnAdjustment;
+ VTableThunks[Components.size()].Return = ReturnAdjustment;
// Add the function.
Components.push_back(VtableComponent::MakeFunction(MD));
@@ -1776,6 +1799,25 @@ VtableBuilder::AddMethods(BaseSubobject Base, uint64_t BaseOffsetInLayoutClass,
MethodInfoMap.insert(std::make_pair(MD, MethodInfo));
MethodInfoMap.erase(OverriddenMD);
+
+ // If the overridden method exists in a virtual base class or a direct
+ // or indirect base class of a virtual base class, we need to emit a
+ // thunk if we ever have a class hierarchy where the base class is not
+ // a primary base in the complete object.
+ if (!isBuildingConstructorVtable() && OverriddenMD != MD) {
+ // Compute the this adjustment.
+ ThisAdjustment ThisAdjustment =
+ ComputeThisAdjustment(OverriddenMD, BaseOffsetInLayoutClass,
+ Overrider);
+
+ if (ThisAdjustment.VCallOffsetOffset &&
+ Overrider.Method->getParent() == MostDerivedClass) {
+ // This is a virtual thunk for the most derived class, add it.
+ AddThunk(Overrider.Method,
+ ThunkInfo(ThisAdjustment, ReturnAdjustment()));
+ }
+ }
+
continue;
}
}
@@ -1866,20 +1908,32 @@ VtableBuilder::LayoutPrimaryAndSecondaryVtables(BaseSubobject Base,
// Compute 'this' pointer adjustments.
ComputeThisAdjustments();
- // Record the address point.
- AddressPoints.insert(std::make_pair(BaseSubobject(Base.getBase(),
- OffsetInLayoutClass),
- AddressPoint));
-
- // Record the address points for all primary bases.
- for (PrimaryBasesSetVectorTy::const_iterator I = PrimaryBases.begin(),
- E = PrimaryBases.end(); I != E; ++I) {
- const CXXRecordDecl *BaseDecl = *I;
+ // Add all address points.
+ const CXXRecordDecl *RD = Base.getBase();
+ while (true) {
+ AddressPoints.insert(std::make_pair(BaseSubobject(RD, OffsetInLayoutClass),
+ AddressPoint));
+
+ const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
+ const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- // We know that all the primary bases have the same offset as the base
- // subobject.
- BaseSubobject PrimaryBase(BaseDecl, OffsetInLayoutClass);
- AddressPoints.insert(std::make_pair(PrimaryBase, AddressPoint));
+ if (!PrimaryBase)
+ break;
+
+ if (Layout.getPrimaryBaseWasVirtual()) {
+ // Check if this virtual primary base is a primary base in the layout
+ // class. If it's not, we don't want to add it.
+ const ASTRecordLayout &LayoutClassLayout =
+ Context.getASTRecordLayout(LayoutClass);
+
+ if (LayoutClassLayout.getVBaseClassOffset(PrimaryBase) !=
+ OffsetInLayoutClass) {
+ // We don't want to add this class (or any of its primary bases).
+ break;
+ }
+ }
+
+ RD = PrimaryBase;
}
bool BaseIsMorallyVirtual = BaseIsVirtual;
@@ -2062,8 +2116,8 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
// Since an address point can be shared by multiple subobjects, we use an
// STL multimap.
std::multimap<uint64_t, BaseSubobject> AddressPointsByIndex;
- for (CGVtableInfo::AddressPointsMapTy::const_iterator I =
- AddressPoints.begin(), E = AddressPoints.end(); I != E; ++I) {
+ for (AddressPointsMapTy::const_iterator I = AddressPoints.begin(),
+ E = AddressPoints.end(); I != E; ++I) {
const BaseSubobject& Base = I->first;
uint64_t Index = I->second;
@@ -2106,7 +2160,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
if (MD->isPure())
Out << " [pure]";
- ThunkInfo Thunk = Thunks.lookup(I);
+ ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty()) {
// If this function pointer has a return adjustment, dump it.
if (!Thunk.Return.isEmpty()) {
@@ -2154,7 +2208,7 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
if (DD->isPure())
Out << " [pure]";
- ThunkInfo Thunk = Thunks.lookup(I);
+ ThunkInfo Thunk = VTableThunks.lookup(I);
if (!Thunk.isEmpty()) {
// If this destructor has a 'this' pointer adjustment, dump it.
if (!Thunk.This.isEmpty()) {
@@ -2253,13 +2307,12 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
Out << "\n";
}
- if (!MethodThunks.empty()) {
-
+ if (!Thunks.empty()) {
// We store the method names in a map to get a stable order.
std::map<std::string, const CXXMethodDecl *> MethodNamesAndDecls;
- for (MethodThunksMapTy::const_iterator I = MethodThunks.begin(),
- E = MethodThunks.end(); I != E; ++I) {
+ for (ThunksMapTy::const_iterator I = Thunks.begin(), E = Thunks.end();
+ I != E; ++I) {
const CXXMethodDecl *MD = I->first;
std::string MethodName =
PredefinedExpr::ComputeName(PredefinedExpr::PrettyFunctionNoVirtual,
@@ -2273,7 +2326,9 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
I != E; ++I) {
const std::string &MethodName = I->first;
const CXXMethodDecl *MD = I->second;
- const llvm::SmallVector<ThunkInfo, 1> &ThunksVector = MethodThunks[MD];
+
+ ThunkInfoVectorTy ThunksVector = Thunks[MD];
+ std::sort(ThunksVector.begin(), ThunksVector.end());
Out << "Thunks for '" << MethodName << "' (" << ThunksVector.size();
Out << (ThunksVector.size() == 1 ? " entry" : " entries") << ").\n";
@@ -2283,1099 +2338,42 @@ void VtableBuilder::dumpLayout(llvm::raw_ostream& Out) {
Out << llvm::format("%4d | ", I);
+ // If this function pointer has a return pointer adjustment, dump it.
+ if (!Thunk.Return.isEmpty()) {
+ Out << "return adjustment: " << Thunk.This.NonVirtual;
+ Out << " non-virtual";
+ if (Thunk.Return.VBaseOffsetOffset) {
+ Out << ", " << Thunk.Return.VBaseOffsetOffset;
+ Out << " vbase offset offset";
+ }
+
+ if (!Thunk.This.isEmpty())
+ Out << "\n ";
+ }
+
// If this function pointer has a 'this' pointer adjustment, dump it.
if (!Thunk.This.isEmpty()) {
- Out << "this: ";
- Out << Thunk.This.NonVirtual << " nv";
+ Out << "this adjustment: ";
+ Out << Thunk.This.NonVirtual << " non-virtual";
if (Thunk.This.VCallOffsetOffset) {
Out << ", " << Thunk.This.VCallOffsetOffset;
- Out << " v";
+ Out << " vcall offset offset";
}
}
Out << '\n';
}
- }
- }
-}
-
-}
-
-namespace {
-class OldVtableBuilder {
-public:
- /// Index_t - Vtable index type.
- typedef uint64_t Index_t;
- typedef std::vector<std::pair<GlobalDecl,
- std::pair<GlobalDecl, ThunkAdjustment> > >
- SavedAdjustmentsVectorTy;
-private:
-
- // VtableComponents - The components of the vtable being built.
- typedef llvm::SmallVector<llvm::Constant *, 64> VtableComponentsVectorTy;
- VtableComponentsVectorTy VtableComponents;
-
- const bool BuildVtable;
-
- llvm::Type *Ptr8Ty;
-
- /// MostDerivedClass - The most derived class that this vtable is being
- /// built for.
- const CXXRecordDecl *MostDerivedClass;
-
- /// LayoutClass - The most derived class used for virtual base layout
- /// information.
- const CXXRecordDecl *LayoutClass;
- /// LayoutOffset - The offset for Class in LayoutClass.
- uint64_t LayoutOffset;
- /// BLayout - Layout for the most derived class that this vtable is being
- /// built for.
- const ASTRecordLayout &BLayout;
- llvm::SmallSet<const CXXRecordDecl *, 32> IndirectPrimary;
- llvm::SmallSet<const CXXRecordDecl *, 32> SeenVBase;
- llvm::Constant *rtti;
- llvm::LLVMContext &VMContext;
- CodeGenModule &CGM; // Per-module state.
-
- llvm::DenseMap<const CXXMethodDecl *, Index_t> VCall;
- llvm::DenseMap<GlobalDecl, Index_t> VCallOffset;
- llvm::DenseMap<GlobalDecl, Index_t> VCallOffsetForVCall;
- // This is the offset to the nearest virtual base
- llvm::DenseMap<const CXXMethodDecl *, Index_t> NonVirtualOffset;
- llvm::DenseMap<const CXXRecordDecl *, Index_t> VBIndex;
-
- /// PureVirtualFunction - Points to __cxa_pure_virtual.
- llvm::Constant *PureVirtualFn;
-
- /// VtableMethods - A data structure for keeping track of methods in a vtable.
- /// Can add methods, override methods and iterate in vtable order.
- class VtableMethods {
- // MethodToIndexMap - Maps from a global decl to the index it has in the
- // Methods vector.
- llvm::DenseMap<GlobalDecl, uint64_t> MethodToIndexMap;
-
- /// Methods - The methods, in vtable order.
- typedef llvm::SmallVector<GlobalDecl, 16> MethodsVectorTy;
- MethodsVectorTy Methods;
- MethodsVectorTy OrigMethods;
-
- public:
- /// AddMethod - Add a method to the vtable methods.
- void AddMethod(GlobalDecl GD) {
- assert(!MethodToIndexMap.count(GD) &&
- "Method has already been added!");
-
- MethodToIndexMap[GD] = Methods.size();
- Methods.push_back(GD);
- OrigMethods.push_back(GD);
- }
-
- /// OverrideMethod - Replace a method with another.
- void OverrideMethod(GlobalDecl OverriddenGD, GlobalDecl GD) {
- llvm::DenseMap<GlobalDecl, uint64_t>::iterator i
- = MethodToIndexMap.find(OverriddenGD);
- assert(i != MethodToIndexMap.end() && "Did not find entry!");
-
- // Get the index of the old decl.
- uint64_t Index = i->second;
-
- // Replace the old decl with the new decl.
- Methods[Index] = GD;
-
- // And add the new.
- MethodToIndexMap[GD] = Index;
- }
-
- /// getIndex - Gives the index of a passed in GlobalDecl. Returns false if
- /// the index couldn't be found.
- bool getIndex(GlobalDecl GD, uint64_t &Index) const {
- llvm::DenseMap<GlobalDecl, uint64_t>::const_iterator i
- = MethodToIndexMap.find(GD);
-
- if (i == MethodToIndexMap.end())
- return false;
-
- Index = i->second;
- return true;
- }
-
- GlobalDecl getOrigMethod(uint64_t Index) const {
- return OrigMethods[Index];
- }
-
- MethodsVectorTy::size_type size() const {
- return Methods.size();
- }
-
- void clear() {
- MethodToIndexMap.clear();
- Methods.clear();
- OrigMethods.clear();
- }
-
- GlobalDecl operator[](uint64_t Index) const {
- return Methods[Index];
- }
- };
-
- /// Methods - The vtable methods we're currently building.
- VtableMethods Methods;
-
- /// ThisAdjustments - For a given index in the vtable, contains the 'this'
- /// pointer adjustment needed for a method.
- typedef llvm::DenseMap<uint64_t, ThunkAdjustment> ThisAdjustmentsMapTy;
- ThisAdjustmentsMapTy ThisAdjustments;
-
- SavedAdjustmentsVectorTy SavedAdjustments;
-
- /// BaseReturnTypes - Contains the base return types of methods who have been
- /// overridden with methods whose return types require adjustment. Used for
- /// generating covariant thunk information.
- typedef llvm::DenseMap<uint64_t, CanQualType> BaseReturnTypesMapTy;
- BaseReturnTypesMapTy BaseReturnTypes;
-
- std::vector<Index_t> VCalls;
-
- typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t;
- // subAddressPoints - Used to hold the AddressPoints (offsets) into the built
- // vtable for use in computing the initializers for the VTT.
- llvm::DenseMap<CtorVtable_t, int64_t> &subAddressPoints;
-
- /// AddressPoints - Address points for this vtable.
- CGVtableInfo::AddressPointsMapTy& AddressPoints;
-
- typedef CXXRecordDecl::method_iterator method_iter;
- const uint32_t LLVMPointerWidth;
- Index_t extra;
- typedef std::vector<std::pair<const CXXRecordDecl *, int64_t> > Path_t;
- static llvm::DenseMap<CtorVtable_t, int64_t>&
- AllocAddressPoint(CodeGenModule &cgm, const CXXRecordDecl *l,
- const CXXRecordDecl *c) {
- CGVtableInfo::AddrMap_t *&oref = cgm.getVtableInfo().AddressPoints[l];
- if (oref == 0)
- oref = new CGVtableInfo::AddrMap_t;
-
- llvm::DenseMap<CtorVtable_t, int64_t> *&ref = (*oref)[c];
- if (ref == 0)
- ref = new llvm::DenseMap<CtorVtable_t, int64_t>;
- return *ref;
- }
-
- bool DclIsSame(const FunctionDecl *New, const FunctionDecl *Old) {
- FunctionTemplateDecl *OldTemplate = Old->getDescribedFunctionTemplate();
- FunctionTemplateDecl *NewTemplate = New->getDescribedFunctionTemplate();
-
- // C++ [temp.fct]p2:
- // A function template can be overloaded with other function templates
- // and with normal (non-template) functions.
- if ((OldTemplate == 0) != (NewTemplate == 0))
- return false;
-
- // Is the function New an overload of the function Old?
- QualType OldQType = CGM.getContext().getCanonicalType(Old->getType());
- QualType NewQType = CGM.getContext().getCanonicalType(New->getType());
-
- // Compare the signatures (C++ 1.3.10) of the two functions to
- // determine whether they are overloads. If we find any mismatch
- // in the signature, they are overloads.
-
- // If either of these functions is a K&R-style function (no
- // prototype), then we consider them to have matching signatures.
- if (isa<FunctionNoProtoType>(OldQType.getTypePtr()) ||
- isa<FunctionNoProtoType>(NewQType.getTypePtr()))
- return true;
-
- FunctionProtoType* OldType = cast<FunctionProtoType>(OldQType);
- FunctionProtoType* NewType = cast<FunctionProtoType>(NewQType);
-
- // The signature of a function includes the types of its
- // parameters (C++ 1.3.10), which includes the presence or absence
- // of the ellipsis; see C++ DR 357).
- if (OldQType != NewQType &&
- (OldType->getNumArgs() != NewType->getNumArgs() ||
- OldType->isVariadic() != NewType->isVariadic() ||
- !std::equal(OldType->arg_type_begin(), OldType->arg_type_end(),
- NewType->arg_type_begin())))
- return false;
-
-#if 0
- // C++ [temp.over.link]p4:
- // The signature of a function template consists of its function
- // signature, its return type and its template parameter list. The names
- // of the template parameters are significant only for establishing the
- // relationship between the template parameters and the rest of the
- // signature.
- //
- // We check the return type and template parameter lists for function
- // templates first; the remaining checks follow.
- if (NewTemplate &&
- (!TemplateParameterListsAreEqual(NewTemplate->getTemplateParameters(),
- OldTemplate->getTemplateParameters(),
- TPL_TemplateMatch) ||
- OldType->getResultType() != NewType->getResultType()))
- return false;
-#endif
-
- // If the function is a class member, its signature includes the
- // cv-qualifiers (if any) on the function itself.
- //
- // As part of this, also check whether one of the member functions
- // is static, in which case they are not overloads (C++
- // 13.1p2). While not part of the definition of the signature,
- // this check is important to determine whether these functions
- // can be overloaded.
- const CXXMethodDecl* OldMethod = dyn_cast<CXXMethodDecl>(Old);
- const CXXMethodDecl* NewMethod = dyn_cast<CXXMethodDecl>(New);
- if (OldMethod && NewMethod &&
- !OldMethod->isStatic() && !NewMethod->isStatic() &&
- OldMethod->getTypeQualifiers() != NewMethod->getTypeQualifiers())
- return false;
-
- // The signatures match; this is not an overload.
- return true;
- }
-
- typedef llvm::DenseMap<const CXXMethodDecl *, const CXXMethodDecl*>
- ForwardUnique_t;
- ForwardUnique_t ForwardUnique;
- llvm::DenseMap<const CXXMethodDecl*, const CXXMethodDecl*> UniqueOverrider;
-
- void BuildUniqueOverrider(const CXXMethodDecl *U, const CXXMethodDecl *MD) {
- const CXXMethodDecl *PrevU = UniqueOverrider[MD];
- assert(U && "no unique overrider");
- if (PrevU == U)
- return;
- if (PrevU != U && PrevU != 0) {
- // If already set, note the two sets as the same
- if (0)
- printf("%s::%s same as %s::%s\n",
- PrevU->getParent()->getNameAsString().c_str(),
- PrevU->getNameAsString().c_str(),
- U->getParent()->getNameAsString().c_str(),
- U->getNameAsString().c_str());
- ForwardUnique[PrevU] = U;
- return;
- }
-
- // Not set, set it now
- if (0)
- printf("marking %s::%s %p override as %s::%s\n",
- MD->getParent()->getNameAsString().c_str(),
- MD->getNameAsString().c_str(),
- (void*)MD,
- U->getParent()->getNameAsString().c_str(),
- U->getNameAsString().c_str());
- UniqueOverrider[MD] = U;
-
- for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(),
- me = MD->end_overridden_methods(); mi != me; ++mi) {
- BuildUniqueOverrider(U, *mi);
- }
- }
-
- void BuildUniqueOverriders(const CXXRecordDecl *RD) {
- if (0) printf("walking %s\n", RD->getNameAsCString());
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end(); i != e; ++i) {
- const CXXMethodDecl *MD = *i;
- if (!MD->isVirtual())
- continue;
-
- if (UniqueOverrider[MD] == 0) {
- // Only set this, if it hasn't been set yet.
- BuildUniqueOverrider(MD, MD);
- if (0)
- printf("top set is %s::%s %p\n",
- MD->getParent()->getNameAsString().c_str(),
- MD->getNameAsString().c_str(),
- (void*)MD);
- ForwardUnique[MD] = MD;
- }
- }
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- BuildUniqueOverriders(Base);
- }
- }
-
- static int DclCmp(const void *p1, const void *p2) {
- const CXXMethodDecl *MD1 = *(const CXXMethodDecl *const *)p1;
- const CXXMethodDecl *MD2 = *(const CXXMethodDecl *const *)p2;
-
- return (DeclarationName::compare(MD1->getDeclName(), MD2->getDeclName()));
- }
-
- void MergeForwarding() {
- typedef llvm::SmallVector<const CXXMethodDecl *, 100> A_t;
- A_t A;
- for (ForwardUnique_t::iterator I = ForwardUnique.begin(),
- E = ForwardUnique.end(); I != E; ++I) {
- if (I->first == I->second)
- // Only add the roots of all trees
- A.push_back(I->first);
- }
- llvm::array_pod_sort(A.begin(), A.end(), DclCmp);
- for (A_t::iterator I = A.begin(),
- E = A.end(); I != E; ++I) {
- A_t::iterator J = I;
- while (++J != E && DclCmp(I, J) == 0)
- if (DclIsSame(*I, *J)) {
- if (0) printf("connecting %s\n", (*I)->getNameAsString().c_str());
- ForwardUnique[*J] = *I;
- }
- }
- }
-
- const CXXMethodDecl *getUnique(const CXXMethodDecl *MD) {
- const CXXMethodDecl *U = UniqueOverrider[MD];
- assert(U && "unique overrider not found");
- while (ForwardUnique.count(U)) {
- const CXXMethodDecl *NU = ForwardUnique[U];
- if (NU == U) break;
- U = NU;
- }
- return U;
- }
-
- GlobalDecl getUnique(GlobalDecl GD) {
- const CXXMethodDecl *Unique = getUnique(cast<CXXMethodDecl>(GD.getDecl()));
-
- if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(Unique))
- return GlobalDecl(CD, GD.getCtorType());
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(Unique))
- return GlobalDecl(DD, GD.getDtorType());
-
- return Unique;
- }
-
- /// getPureVirtualFn - Return the __cxa_pure_virtual function.
- llvm::Constant* getPureVirtualFn() {
- if (!PureVirtualFn) {
- const llvm::FunctionType *Ty =
- llvm::FunctionType::get(llvm::Type::getVoidTy(VMContext),
- /*isVarArg=*/false);
- PureVirtualFn = wrap(CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual"));
- }
-
- return PureVirtualFn;
- }
-
-public:
- OldVtableBuilder(const CXXRecordDecl *MostDerivedClass,
- const CXXRecordDecl *l, uint64_t lo, CodeGenModule &cgm,
- bool build, CGVtableInfo::AddressPointsMapTy& AddressPoints)
- : BuildVtable(build), MostDerivedClass(MostDerivedClass), LayoutClass(l),
- LayoutOffset(lo), BLayout(cgm.getContext().getASTRecordLayout(l)),
- rtti(0), VMContext(cgm.getModule().getContext()),CGM(cgm),
- PureVirtualFn(0),
- subAddressPoints(AllocAddressPoint(cgm, l, MostDerivedClass)),
- AddressPoints(AddressPoints),
- LLVMPointerWidth(cgm.getContext().Target.getPointerWidth(0))
- {
- Ptr8Ty = llvm::PointerType::get(llvm::Type::getInt8Ty(VMContext), 0);
- if (BuildVtable) {
- QualType ClassType = CGM.getContext().getTagDeclType(MostDerivedClass);
- rtti = CGM.GetAddrOfRTTIDescriptor(ClassType);
- }
- BuildUniqueOverriders(MostDerivedClass);
- MergeForwarding();
- }
-
- // getVtableComponents - Returns a reference to the vtable components.
- const VtableComponentsVectorTy &getVtableComponents() const {
- return VtableComponents;
- }
-
- llvm::DenseMap<const CXXRecordDecl *, uint64_t> &getVBIndex()
- { return VBIndex; }
-
- SavedAdjustmentsVectorTy &getSavedAdjustments()
- { return SavedAdjustments; }
-
- llvm::Constant *wrap(Index_t i) {
- llvm::Constant *m;
- m = llvm::ConstantInt::get(llvm::Type::getInt64Ty(VMContext), i);
- return llvm::ConstantExpr::getIntToPtr(m, Ptr8Ty);
- }
-
- llvm::Constant *wrap(llvm::Constant *m) {
- return llvm::ConstantExpr::getBitCast(m, Ptr8Ty);
- }
-
-//#define D1(x)
-#define D1(X) do { if (getenv("CLANG_VTABLE_DEBUG")) { X; } } while (0)
-
- void GenerateVBaseOffsets(const CXXRecordDecl *RD, uint64_t Offset,
- bool updateVBIndex, Index_t current_vbindex) {
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- Index_t next_vbindex = current_vbindex;
- if (i->isVirtual() && !SeenVBase.count(Base)) {
- SeenVBase.insert(Base);
- if (updateVBIndex) {
- next_vbindex = (ssize_t)(-(VCalls.size()*LLVMPointerWidth/8)
- - 3*LLVMPointerWidth/8);
- VBIndex[Base] = next_vbindex;
- }
- int64_t BaseOffset = -(Offset/8) + BLayout.getVBaseClassOffset(Base)/8;
- VCalls.push_back((0?700:0) + BaseOffset);
- D1(printf(" vbase for %s at %d delta %d most derived %s\n",
- Base->getNameAsCString(),
- (int)-VCalls.size()-3, (int)BaseOffset,
- MostDerivedClass->getNameAsCString()));
- }
- // We also record offsets for non-virtual bases to closest enclosing
- // virtual base. We do this so that we don't have to search
- // for the nearst virtual base class when generating thunks.
- if (updateVBIndex && VBIndex.count(Base) == 0)
- VBIndex[Base] = next_vbindex;
- GenerateVBaseOffsets(Base, Offset, updateVBIndex, next_vbindex);
- }
- }
-
- void StartNewTable() {
- SeenVBase.clear();
- }
-
- Index_t getNVOffset_1(const CXXRecordDecl *D, const CXXRecordDecl *B,
- Index_t Offset = 0) {
-
- if (B == D)
- return Offset;
-
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(D);
- for (CXXRecordDecl::base_class_const_iterator i = D->bases_begin(),
- e = D->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- int64_t BaseOffset = 0;
- if (!i->isVirtual())
- BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- int64_t o = getNVOffset_1(Base, B, BaseOffset);
- if (o >= 0)
- return o;
- }
-
- return -1;
- }
-
- /// getNVOffset - Returns the non-virtual offset for the given (B) base of the
- /// derived class D.
- Index_t getNVOffset(QualType qB, QualType qD) {
- qD = qD->getPointeeType();
- qB = qB->getPointeeType();
- CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
- CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
- int64_t o = getNVOffset_1(D, B);
- if (o >= 0)
- return o;
-
- assert(false && "FIXME: non-virtual base not found");
- return 0;
- }
-
- /// getVbaseOffset - Returns the index into the vtable for the virtual base
- /// offset for the given (B) virtual base of the derived class D.
- Index_t getVbaseOffset(QualType qB, QualType qD) {
- qD = qD->getPointeeType();
- qB = qB->getPointeeType();
- CXXRecordDecl *D = cast<CXXRecordDecl>(qD->getAs<RecordType>()->getDecl());
- CXXRecordDecl *B = cast<CXXRecordDecl>(qB->getAs<RecordType>()->getDecl());
- if (D != MostDerivedClass)
- return CGM.getVtableInfo().getVirtualBaseOffsetOffset(D, B);
- llvm::DenseMap<const CXXRecordDecl *, Index_t>::iterator i;
- i = VBIndex.find(B);
- if (i != VBIndex.end())
- return i->second;
-
- assert(false && "FIXME: Base not found");
- return 0;
- }
-
- bool OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
- Index_t OverrideOffset, Index_t Offset,
- int64_t CurrentVBaseOffset);
-
- /// AppendMethods - Append the current methods to the vtable.
- void AppendMethodsToVtable();
-
- llvm::Constant *WrapAddrOf(GlobalDecl GD) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD);
-
- return wrap(CGM.GetAddrOfFunction(GD, Ty));
- }
-
- void OverrideMethods(Path_t *Path, bool MorallyVirtual, int64_t Offset,
- int64_t CurrentVBaseOffset) {
- for (Path_t::reverse_iterator i = Path->rbegin(),
- e = Path->rend(); i != e; ++i) {
- const CXXRecordDecl *RD = i->first;
- int64_t OverrideOffset = i->second;
- for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
- ++mi) {
- const CXXMethodDecl *MD = *mi;
-
- if (!MD->isVirtual())
- continue;
-
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- // Override both the complete and the deleting destructor.
- GlobalDecl CompDtor(DD, Dtor_Complete);
- OverrideMethod(CompDtor, MorallyVirtual, OverrideOffset, Offset,
- CurrentVBaseOffset);
-
- GlobalDecl DeletingDtor(DD, Dtor_Deleting);
- OverrideMethod(DeletingDtor, MorallyVirtual, OverrideOffset, Offset,
- CurrentVBaseOffset);
- } else {
- OverrideMethod(MD, MorallyVirtual, OverrideOffset, Offset,
- CurrentVBaseOffset);
- }
- }
- }
- }
-
- void AddMethod(const GlobalDecl GD, bool MorallyVirtual, Index_t Offset,
- int64_t CurrentVBaseOffset) {
- // If we can find a previously allocated slot for this, reuse it.
- if (OverrideMethod(GD, MorallyVirtual, Offset, Offset,
- CurrentVBaseOffset))
- return;
-
- D1(printf(" vfn for %s at %d\n",
- dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsString().c_str(),
- (int)Methods.size()));
-
- // We didn't find an entry in the vtable that we could use, add a new
- // entry.
- Methods.AddMethod(GD);
-
- VCallOffset[GD] = Offset/8 - CurrentVBaseOffset/8;
-
- if (MorallyVirtual) {
- GlobalDecl UGD = getUnique(GD);
- const CXXMethodDecl *UMD = cast<CXXMethodDecl>(UGD.getDecl());
-
- assert(UMD && "final overrider not found");
-
- Index_t &idx = VCall[UMD];
- // Allocate the first one, after that, we reuse the previous one.
- if (idx == 0) {
- VCallOffsetForVCall[UGD] = Offset/8;
- NonVirtualOffset[UMD] = Offset/8 - CurrentVBaseOffset/8;
- idx = VCalls.size()+1;
- VCalls.push_back(Offset/8 - CurrentVBaseOffset/8);
- D1(printf(" vcall for %s at %d with delta %d\n",
- dyn_cast<CXXMethodDecl>(GD.getDecl())->getNameAsString().c_str(),
- (int)-VCalls.size()-3, (int)VCalls[idx-1]));
- }
- }
- }
-
- void AddMethods(const CXXRecordDecl *RD, bool MorallyVirtual,
- Index_t Offset, int64_t CurrentVBaseOffset) {
- for (method_iter mi = RD->method_begin(), me = RD->method_end(); mi != me;
- ++mi) {
- const CXXMethodDecl *MD = *mi;
- if (!MD->isVirtual())
- continue;
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(MD)) {
- // For destructors, add both the complete and the deleting destructor
- // to the vtable.
- AddMethod(GlobalDecl(DD, Dtor_Complete), MorallyVirtual, Offset,
- CurrentVBaseOffset);
- AddMethod(GlobalDecl(DD, Dtor_Deleting), MorallyVirtual, Offset,
- CurrentVBaseOffset);
- } else
- AddMethod(MD, MorallyVirtual, Offset, CurrentVBaseOffset);
- }
- }
+ Out << '\n';
- void NonVirtualBases(const CXXRecordDecl *RD, const ASTRecordLayout &Layout,
- const CXXRecordDecl *PrimaryBase,
- bool PrimaryBaseWasVirtual, bool MorallyVirtual,
- int64_t Offset, int64_t CurrentVBaseOffset,
- Path_t *Path) {
- Path->push_back(std::make_pair(RD, Offset));
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- if (i->isVirtual())
- continue;
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- uint64_t o = Offset + Layout.getBaseClassOffset(Base);
- StartNewTable();
- GenerateVtableForBase(Base, o, MorallyVirtual, false,
- true, Base == PrimaryBase && !PrimaryBaseWasVirtual,
- CurrentVBaseOffset, Path);
}
- Path->pop_back();
}
-
-// #define D(X) do { X; } while (0)
-#define D(X)
-
- void insertVCalls(int InsertionPoint) {
- D1(printf("============= combining vbase/vcall\n"));
- D(VCalls.insert(VCalls.begin(), 673));
- D(VCalls.push_back(672));
-
- VtableComponents.insert(VtableComponents.begin() + InsertionPoint,
- VCalls.size(), 0);
- if (BuildVtable) {
- // The vcalls come first...
- for (std::vector<Index_t>::reverse_iterator i = VCalls.rbegin(),
- e = VCalls.rend();
- i != e; ++i)
- VtableComponents[InsertionPoint++] = wrap((0?600:0) + *i);
- }
- VCalls.clear();
- VCall.clear();
- VCallOffsetForVCall.clear();
- VCallOffset.clear();
- NonVirtualOffset.clear();
- }
-
- void AddAddressPoints(const CXXRecordDecl *RD, uint64_t Offset,
- Index_t AddressPoint) {
- D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString(),
- LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
- subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
- AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
-
- // Now also add the address point for all our primary bases.
- while (1) {
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- RD = Layout.getPrimaryBase();
- const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
- // FIXME: Double check this.
- if (RD == 0)
- break;
- if (PrimaryBaseWasVirtual &&
- BLayout.getVBaseClassOffset(RD) != Offset)
- break;
- D1(printf("XXX address point for %s in %s layout %s at offset %d is %d\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString(),
- LayoutClass->getNameAsCString(), (int)Offset, (int)AddressPoint));
- subAddressPoints[std::make_pair(RD, Offset)] = AddressPoint;
- AddressPoints[BaseSubobject(RD, Offset)] = AddressPoint;
- }
- }
-
-
- void FinishGenerateVtable(const CXXRecordDecl *RD,
- const ASTRecordLayout &Layout,
- const CXXRecordDecl *PrimaryBase,
- bool ForNPNVBases, bool WasPrimaryBase,
- bool PrimaryBaseWasVirtual,
- bool MorallyVirtual, int64_t Offset,
- bool ForVirtualBase, int64_t CurrentVBaseOffset,
- Path_t *Path) {
- bool alloc = false;
- if (Path == 0) {
- alloc = true;
- Path = new Path_t;
- }
-
- StartNewTable();
- extra = 0;
- Index_t AddressPoint = 0;
- int VCallInsertionPoint = 0;
- if (!ForNPNVBases || !WasPrimaryBase) {
- bool DeferVCalls = MorallyVirtual || ForVirtualBase;
- VCallInsertionPoint = VtableComponents.size();
- if (!DeferVCalls) {
- insertVCalls(VCallInsertionPoint);
- } else
- // FIXME: just for extra, or for all uses of VCalls.size post this?
- extra = -VCalls.size();
-
- // Add the offset to top.
- VtableComponents.push_back(BuildVtable ? wrap(-((Offset-LayoutOffset)/8)) : 0);
-
- // Add the RTTI information.
- VtableComponents.push_back(rtti);
-
- AddressPoint = VtableComponents.size();
-
- AppendMethodsToVtable();
- }
-
- // and then the non-virtual bases.
- NonVirtualBases(RD, Layout, PrimaryBase, PrimaryBaseWasVirtual,
- MorallyVirtual, Offset, CurrentVBaseOffset, Path);
-
- if (ForVirtualBase) {
- // FIXME: We're adding to VCalls in callers, we need to do the overrides
- // in the inner part, so that we know the complete set of vcalls during
- // the build and don't have to insert into methods. Saving out the
- // AddressPoint here, would need to be fixed, if we didn't do that. Also
- // retroactively adding vcalls for overrides later wind up in the wrong
- // place, the vcall slot has to be alloted during the walk of the base
- // when the function is first introduces.
- AddressPoint += VCalls.size();
- insertVCalls(VCallInsertionPoint);
- }
-
- if (!ForNPNVBases || !WasPrimaryBase)
- AddAddressPoints(RD, Offset, AddressPoint);
-
- if (alloc) {
- delete Path;
- }
- }
-
- void Primaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset,
- bool updateVBIndex, Index_t current_vbindex,
- int64_t CurrentVBaseOffset) {
- if (!RD->isDynamicClass())
- return;
-
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
-
- // vtables are composed from the chain of primaries.
- if (PrimaryBase && !PrimaryBaseWasVirtual) {
- D1(printf(" doing primaries for %s most derived %s\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
- Primaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
- updateVBIndex, current_vbindex, CurrentVBaseOffset);
- }
-
- D1(printf(" doing vcall entries for %s most derived %s\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
-
- // And add the virtuals for the class to the primary vtable.
- AddMethods(RD, MorallyVirtual, Offset, CurrentVBaseOffset);
- }
-
- void VBPrimaries(const CXXRecordDecl *RD, bool MorallyVirtual, int64_t Offset,
- bool updateVBIndex, Index_t current_vbindex,
- bool RDisVirtualBase, int64_t CurrentVBaseOffset,
- bool bottom) {
- if (!RD->isDynamicClass())
- return;
-
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
-
- // vtables are composed from the chain of primaries.
- if (PrimaryBase) {
- int BaseCurrentVBaseOffset = CurrentVBaseOffset;
- if (PrimaryBaseWasVirtual) {
- IndirectPrimary.insert(PrimaryBase);
- BaseCurrentVBaseOffset = BLayout.getVBaseClassOffset(PrimaryBase);
- }
-
- D1(printf(" doing primaries for %s most derived %s\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
-
- VBPrimaries(PrimaryBase, PrimaryBaseWasVirtual|MorallyVirtual, Offset,
- updateVBIndex, current_vbindex, PrimaryBaseWasVirtual,
- BaseCurrentVBaseOffset, false);
- }
-
- D1(printf(" doing vbase entries for %s most derived %s\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
- GenerateVBaseOffsets(RD, Offset, updateVBIndex, current_vbindex);
-
- if (RDisVirtualBase || bottom) {
- Primaries(RD, MorallyVirtual, Offset, updateVBIndex, current_vbindex,
- CurrentVBaseOffset);
- }
- }
-
- void GenerateVtableForBase(const CXXRecordDecl *RD, int64_t Offset = 0,
- bool MorallyVirtual = false,
- bool ForVirtualBase = false,
- bool ForNPNVBases = false,
- bool WasPrimaryBase = true,
- int CurrentVBaseOffset = 0,
- Path_t *Path = 0) {
- if (!RD->isDynamicClass())
- return;
-
- // Construction vtable don't need parts that have no virtual bases and
- // aren't morally virtual.
- if ((LayoutClass != MostDerivedClass) &&
- RD->getNumVBases() == 0 && !MorallyVirtual)
- return;
-
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- const CXXRecordDecl *PrimaryBase = Layout.getPrimaryBase();
- const bool PrimaryBaseWasVirtual = Layout.getPrimaryBaseWasVirtual();
-
- extra = 0;
- D1(printf("building entries for base %s most derived %s\n",
- RD->getNameAsCString(), MostDerivedClass->getNameAsCString()));
-
- if (ForVirtualBase)
- extra = VCalls.size();
-
- if (!ForNPNVBases || !WasPrimaryBase) {
- VBPrimaries(RD, MorallyVirtual, Offset, !ForVirtualBase, 0,
- ForVirtualBase, CurrentVBaseOffset, true);
-
- if (Path)
- OverrideMethods(Path, MorallyVirtual, Offset, CurrentVBaseOffset);
- }
-
- FinishGenerateVtable(RD, Layout, PrimaryBase, ForNPNVBases, WasPrimaryBase,
- PrimaryBaseWasVirtual, MorallyVirtual, Offset,
- ForVirtualBase, CurrentVBaseOffset, Path);
- }
-
- void GenerateVtableForVBases(const CXXRecordDecl *RD,
- int64_t Offset = 0,
- Path_t *Path = 0) {
- bool alloc = false;
- if (Path == 0) {
- alloc = true;
- Path = new Path_t;
- }
- // FIXME: We also need to override using all paths to a virtual base,
- // right now, we just process the first path
- Path->push_back(std::make_pair(RD, Offset));
- for (CXXRecordDecl::base_class_const_iterator i = RD->bases_begin(),
- e = RD->bases_end(); i != e; ++i) {
- const CXXRecordDecl *Base =
- cast<CXXRecordDecl>(i->getType()->getAs<RecordType>()->getDecl());
- if (i->isVirtual() && !IndirectPrimary.count(Base)) {
- // Mark it so we don't output it twice.
- IndirectPrimary.insert(Base);
- StartNewTable();
- VCall.clear();
- int64_t BaseOffset = BLayout.getVBaseClassOffset(Base);
- int64_t CurrentVBaseOffset = BaseOffset;
- D1(printf("vtable %s virtual base %s\n",
- MostDerivedClass->getNameAsCString(), Base->getNameAsCString()));
- GenerateVtableForBase(Base, BaseOffset, true, true, false,
- true, CurrentVBaseOffset, Path);
- }
- int64_t BaseOffset;
- if (i->isVirtual())
- BaseOffset = BLayout.getVBaseClassOffset(Base);
- else {
- const ASTRecordLayout &Layout = CGM.getContext().getASTRecordLayout(RD);
- BaseOffset = Offset + Layout.getBaseClassOffset(Base);
- }
-
- if (Base->getNumVBases()) {
- GenerateVtableForVBases(Base, BaseOffset, Path);
- }
- }
- Path->pop_back();
- if (alloc)
- delete Path;
- }
-};
-} // end anonymous namespace
-
-bool OldVtableBuilder::OverrideMethod(GlobalDecl GD, bool MorallyVirtual,
- Index_t OverrideOffset, Index_t Offset,
- int64_t CurrentVBaseOffset) {
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- const bool isPure = MD->isPure();
-
- // FIXME: Should OverrideOffset's be Offset?
-
- for (CXXMethodDecl::method_iterator mi = MD->begin_overridden_methods(),
- e = MD->end_overridden_methods(); mi != e; ++mi) {
- GlobalDecl OGD;
- GlobalDecl OGD2;
-
- const CXXMethodDecl *OMD = *mi;
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(OMD))
- OGD = GlobalDecl(DD, GD.getDtorType());
- else
- OGD = OMD;
-
- // Check whether this is the method being overridden in this section of
- // the vtable.
- uint64_t Index;
- if (!Methods.getIndex(OGD, Index))
- continue;
-
- OGD2 = OGD;
-
- // Get the original method, which we should be computing thunks, etc,
- // against.
- OGD = Methods.getOrigMethod(Index);
- OMD = cast<CXXMethodDecl>(OGD.getDecl());
-
- QualType ReturnType =
- MD->getType()->getAs<FunctionType>()->getResultType();
- QualType OverriddenReturnType =
- OMD->getType()->getAs<FunctionType>()->getResultType();
-
- // Check if we need a return type adjustment.
- if (!ComputeReturnAdjustmentBaseOffset(CGM.getContext(), MD,
- OMD).isEmpty()) {
- CanQualType &BaseReturnType = BaseReturnTypes[Index];
-
- // Insert the base return type.
- if (BaseReturnType.isNull())
- BaseReturnType =
- CGM.getContext().getCanonicalType(OverriddenReturnType);
- }
-
- Methods.OverrideMethod(OGD, GD);
-
- GlobalDecl UGD = getUnique(GD);
- const CXXMethodDecl *UMD = cast<CXXMethodDecl>(UGD.getDecl());
- assert(UGD.getDecl() && "unique overrider not found");
- assert(UGD == getUnique(OGD) && "unique overrider not unique");
-
- ThisAdjustments.erase(Index);
- if (MorallyVirtual || VCall.count(UMD)) {
-
- Index_t &idx = VCall[UMD];
- if (idx == 0) {
- VCallOffset[GD] = VCallOffset[OGD];
- // NonVirtualOffset[UMD] = CurrentVBaseOffset/8 - OverrideOffset/8;
- NonVirtualOffset[UMD] = VCallOffset[OGD];
- VCallOffsetForVCall[UMD] = OverrideOffset/8;
- idx = VCalls.size()+1;
- VCalls.push_back(OverrideOffset/8 - CurrentVBaseOffset/8);
- D1(printf(" vcall for %s at %d with delta %d most derived %s\n",
- MD->getNameAsString().c_str(), (int)-idx-3,
- (int)VCalls[idx-1], MostDerivedClass->getNameAsCString()));
- } else {
- VCallOffset[GD] = NonVirtualOffset[UMD];
- VCalls[idx-1] = -VCallOffsetForVCall[UGD] + OverrideOffset/8;
- D1(printf(" vcall patch for %s at %d with delta %d most derived %s\n",
- MD->getNameAsString().c_str(), (int)-idx-3,
- (int)VCalls[idx-1], MostDerivedClass->getNameAsCString()));
- }
- int64_t NonVirtualAdjustment = -VCallOffset[OGD];
- QualType DerivedType = MD->getThisType(CGM.getContext());
- QualType BaseType = cast<const CXXMethodDecl>(OGD.getDecl())->getThisType(CGM.getContext());
- int64_t NonVirtualAdjustment2 = -(getNVOffset(BaseType, DerivedType)/8);
- if (NonVirtualAdjustment2 != NonVirtualAdjustment) {
- NonVirtualAdjustment = NonVirtualAdjustment2;
- }
- int64_t VirtualAdjustment =
- -((idx + extra + 2) * LLVMPointerWidth / 8);
-
- // Optimize out virtual adjustments of 0.
- if (VCalls[idx-1] == 0)
- VirtualAdjustment = 0;
-
- ThunkAdjustment ThisAdjustment(NonVirtualAdjustment,
- VirtualAdjustment);
-
- if (!isPure && !ThisAdjustment.isEmpty()) {
- ThisAdjustments[Index] = ThisAdjustment;
- SavedAdjustments.push_back(
- std::make_pair(GD, std::make_pair(OGD, ThisAdjustment)));
- }
- return true;
- }
-
- VCallOffset[GD] = VCallOffset[OGD2] - OverrideOffset/8;
-
- int64_t NonVirtualAdjustment = -VCallOffset[GD];
- QualType DerivedType = MD->getThisType(CGM.getContext());
- QualType BaseType = cast<const CXXMethodDecl>(OGD.getDecl())->getThisType(CGM.getContext());
- int64_t NonVirtualAdjustment2 = -(getNVOffset(BaseType, DerivedType)/8);
- if (NonVirtualAdjustment2 != NonVirtualAdjustment) {
- NonVirtualAdjustment = NonVirtualAdjustment2;
- }
-
- if (NonVirtualAdjustment) {
- ThunkAdjustment ThisAdjustment(NonVirtualAdjustment, 0);
-
- if (!isPure) {
- ThisAdjustments[Index] = ThisAdjustment;
- SavedAdjustments.push_back(
- std::make_pair(GD, std::make_pair(OGD, ThisAdjustment)));
- }
- }
- return true;
- }
-
- return false;
}
-
-void OldVtableBuilder::AppendMethodsToVtable() {
- if (!BuildVtable) {
- VtableComponents.insert(VtableComponents.end(), Methods.size(),
- (llvm::Constant *)0);
- ThisAdjustments.clear();
- BaseReturnTypes.clear();
- Methods.clear();
- return;
- }
-
- // Reserve room in the vtable for our new methods.
- VtableComponents.reserve(VtableComponents.size() + Methods.size());
-
- for (unsigned i = 0, e = Methods.size(); i != e; ++i) {
- GlobalDecl GD = Methods[i];
- const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
-
- // Get the 'this' pointer adjustment.
- ThunkAdjustment ThisAdjustment = ThisAdjustments.lookup(i);
-
- // Construct the return type adjustment.
- ThunkAdjustment ReturnAdjustment;
-
- QualType BaseReturnType = BaseReturnTypes.lookup(i);
- if (!BaseReturnType.isNull() && !MD->isPure()) {
- QualType DerivedType =
- MD->getType()->getAs<FunctionType>()->getResultType();
-
- int64_t NonVirtualAdjustment =
- getNVOffset(BaseReturnType, DerivedType) / 8;
-
- int64_t VirtualAdjustment =
- getVbaseOffset(BaseReturnType, DerivedType);
-
- ReturnAdjustment = ThunkAdjustment(NonVirtualAdjustment,
- VirtualAdjustment);
- }
-
- llvm::Constant *Method = 0;
- if (!ReturnAdjustment.isEmpty()) {
- // Build a covariant thunk.
- CovariantThunkAdjustment Adjustment(ThisAdjustment, ReturnAdjustment);
- Method = wrap(CGM.GetAddrOfCovariantThunk(GD, Adjustment));
- } else if (!ThisAdjustment.isEmpty()) {
- // Build a "regular" thunk.
- Method = wrap(CGM.GetAddrOfThunk(GD, ThisAdjustment));
- } else if (MD->isPure()) {
- // We have a pure virtual method.
- Method = getPureVirtualFn();
- } else {
- // We have a good old regular method.
- Method = WrapAddrOf(GD);
- }
-
- // Add the method to the vtable.
- VtableComponents.push_back(Method);
- }
-
- ThisAdjustments.clear();
- BaseReturnTypes.clear();
-
- Methods.clear();
}
-void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
+void CodeGenVTables::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
// Itanium C++ ABI 2.5.2:
// The order of the virtual function pointers in a virtual table is the
@@ -3481,7 +2479,7 @@ void CGVtableInfo::ComputeMethodVtableIndices(const CXXRecordDecl *RD) {
NumVirtualFunctionPointers[RD] = CurrentIndex;
}
-uint64_t CGVtableInfo::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
+uint64_t CodeGenVTables::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
llvm::DenseMap<const CXXRecordDecl *, uint64_t>::iterator I =
NumVirtualFunctionPointers.find(RD);
if (I != NumVirtualFunctionPointers.end())
@@ -3494,7 +2492,7 @@ uint64_t CGVtableInfo::getNumVirtualFunctionPointers(const CXXRecordDecl *RD) {
return I->second;
}
-uint64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) {
+uint64_t CodeGenVTables::getMethodVtableIndex(GlobalDecl GD) {
MethodVtableIndicesTy::iterator I = MethodVtableIndices.find(GD);
if (I != MethodVtableIndices.end())
return I->second;
@@ -3508,36 +2506,8 @@ uint64_t CGVtableInfo::getMethodVtableIndex(GlobalDecl GD) {
return I->second;
}
-CGVtableInfo::AdjustmentVectorTy*
-CGVtableInfo::getAdjustments(GlobalDecl GD) {
- SavedAdjustmentsTy::iterator I = SavedAdjustments.find(GD);
- if (I != SavedAdjustments.end())
- return &I->second;
-
- const CXXRecordDecl *RD = cast<CXXRecordDecl>(GD.getDecl()->getDeclContext());
- if (!SavedAdjustmentRecords.insert(RD).second)
- return 0;
-
- AddressPointsMapTy AddressPoints;
- OldVtableBuilder b(RD, RD, 0, CGM, false, AddressPoints);
- D1(printf("vtable %s\n", RD->getNameAsCString()));
- b.GenerateVtableForBase(RD);
- b.GenerateVtableForVBases(RD);
-
- for (OldVtableBuilder::SavedAdjustmentsVectorTy::iterator
- i = b.getSavedAdjustments().begin(),
- e = b.getSavedAdjustments().end(); i != e; i++)
- SavedAdjustments[i->first].push_back(i->second);
-
- I = SavedAdjustments.find(GD);
- if (I != SavedAdjustments.end())
- return &I->second;
-
- return 0;
-}
-
-int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
- const CXXRecordDecl *VBase) {
+int64_t CodeGenVTables::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
+ const CXXRecordDecl *VBase) {
ClassPairTy ClassPair(RD, VBase);
VirtualBaseClassOffsetOffsetsMapTy::iterator I =
@@ -3549,7 +2519,6 @@ int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
BaseSubobject(RD, 0),
/*BaseIsVirtual=*/false,
/*OffsetInLayoutClass=*/0);
-
for (VCallAndVBaseOffsetBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
Builder.getVBaseOffsetOffsets().begin(),
@@ -3573,128 +2542,588 @@ int64_t CGVtableInfo::getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
return I->second;
}
-uint64_t CGVtableInfo::getVtableAddressPoint(const CXXRecordDecl *RD) {
- uint64_t AddressPoint =
- (*(*(CGM.getVtableInfo().AddressPoints[RD]))[RD])[std::make_pair(RD, 0)];
-
+uint64_t
+CodeGenVTables::getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD) {
+ uint64_t AddressPoint = AddressPoints.lookup(std::make_pair(RD, Base));
+ assert(AddressPoint && "Address point must not be zero!");
+
return AddressPoint;
}
-llvm::GlobalVariable *
-CGVtableInfo::GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition,
- const CXXRecordDecl *LayoutClass,
- const CXXRecordDecl *RD, uint64_t Offset,
- bool IsVirtual,
- AddressPointsMapTy& AddressPoints) {
- if (GenerateDefinition) {
- if (LayoutClass == RD) {
- assert(!IsVirtual &&
- "Can only have a virtual base in construction vtables!");
- assert(!Offset &&
- "Can only have a base offset in construction vtables!");
- }
+llvm::Constant *CodeGenModule::GetAddrOfThunk(GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Compute the mangled name.
+ llvm::SmallString<256> Name;
+ if (const CXXDestructorDecl* DD = dyn_cast<CXXDestructorDecl>(MD))
+ getMangleContext().mangleCXXDtorThunk(DD, GD.getDtorType(), Thunk.This,
+ Name);
+ else
+ getMangleContext().mangleThunk(MD, Thunk, Name);
+
+ const llvm::Type *Ty = getTypes().GetFunctionTypeForVtable(MD);
+ return GetOrCreateLLVMFunction(Name, Ty, GlobalDecl());
+}
+
+static llvm::Value *PerformTypeAdjustment(CodeGenFunction &CGF,
+ llvm::Value *Ptr,
+ int64_t NonVirtualAdjustment,
+ int64_t VirtualAdjustment) {
+ if (!NonVirtualAdjustment && !VirtualAdjustment)
+ return Ptr;
+
+ const llvm::Type *Int8PtrTy =
+ llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
+
+ llvm::Value *V = CGF.Builder.CreateBitCast(Ptr, Int8PtrTy);
+
+ if (NonVirtualAdjustment) {
+ // Do the non-virtual adjustment.
+ V = CGF.Builder.CreateConstInBoundsGEP1_64(V, NonVirtualAdjustment);
+ }
+
+ if (VirtualAdjustment) {
+ const llvm::Type *PtrDiffTy =
+ CGF.ConvertType(CGF.getContext().getPointerDiffType());
+
+ // Do the virtual adjustment.
+ llvm::Value *VTablePtrPtr =
+ CGF.Builder.CreateBitCast(V, Int8PtrTy->getPointerTo());
+
+ llvm::Value *VTablePtr = CGF.Builder.CreateLoad(VTablePtrPtr);
+
+ llvm::Value *OffsetPtr =
+ CGF.Builder.CreateConstInBoundsGEP1_64(VTablePtr, VirtualAdjustment);
+
+ OffsetPtr = CGF.Builder.CreateBitCast(OffsetPtr, PtrDiffTy->getPointerTo());
+
+ // Load the adjustment offset from the vtable.
+ llvm::Value *Offset = CGF.Builder.CreateLoad(OffsetPtr);
- VtableBuilder Builder(*this, RD, Offset,
- /*MostDerivedClassIsVirtual=*/IsVirtual,
- LayoutClass);
+ // Adjust our pointer.
+ V = CGF.Builder.CreateInBoundsGEP(V, Offset);
+ }
+
+ // Cast back to the original type.
+ return CGF.Builder.CreateBitCast(V, Ptr->getType());
+}
+
+void CodeGenFunction::GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
+ const ThunkInfo &Thunk) {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
+ QualType ResultType = FPT->getResultType();
+ QualType ThisType = MD->getThisType(getContext());
- if (CGM.getLangOptions().DumpVtableLayouts)
- Builder.dumpLayout(llvm::errs());
+ FunctionArgList FunctionArgs;
+
+ // FIXME: It would be nice if more of this code could be shared with
+ // CodeGenFunction::GenerateCode.
+
+ // Create the implicit 'this' parameter declaration.
+ CXXThisDecl = ImplicitParamDecl::Create(getContext(), 0,
+ MD->getLocation(),
+ &getContext().Idents.get("this"),
+ ThisType);
+
+ // Add the 'this' parameter.
+ FunctionArgs.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
+
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+
+ FunctionArgs.push_back(std::make_pair(Param, Param->getType()));
}
+
+ StartFunction(GlobalDecl(), ResultType, Fn, FunctionArgs, SourceLocation());
- llvm::SmallString<256> OutName;
- if (LayoutClass != RD)
- CGM.getMangleContext().mangleCXXCtorVtable(LayoutClass, Offset / 8,
- RD, OutName);
- else
- CGM.getMangleContext().mangleCXXVtable(RD, OutName);
- llvm::StringRef Name = OutName.str();
+ // Adjust the 'this' pointer if necessary.
+ llvm::Value *AdjustedThisPtr =
+ PerformTypeAdjustment(*this, LoadCXXThis(),
+ Thunk.This.NonVirtual,
+ Thunk.This.VCallOffsetOffset);
+
+ CallArgList CallArgs;
+
+ // Add our adjusted 'this' pointer.
+ CallArgs.push_back(std::make_pair(RValue::get(AdjustedThisPtr), ThisType));
- llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (GV == 0 || CGM.getVtableInfo().AddressPoints[LayoutClass] == 0 ||
- GV->isDeclaration()) {
- OldVtableBuilder b(RD, LayoutClass, Offset, CGM, GenerateDefinition,
- AddressPoints);
+ // Add the rest of the parameters.
+ for (FunctionDecl::param_const_iterator I = MD->param_begin(),
+ E = MD->param_end(); I != E; ++I) {
+ ParmVarDecl *Param = *I;
+ QualType ArgType = Param->getType();
+
+ // FIXME: Declaring a DeclRefExpr on the stack is kinda icky.
+ DeclRefExpr ArgExpr(Param, ArgType.getNonReferenceType(), SourceLocation());
+ CallArgs.push_back(std::make_pair(EmitCallArg(&ArgExpr, ArgType), ArgType));
+ }
- D1(printf("vtable %s\n", RD->getNameAsCString()));
- // First comes the vtables for all the non-virtual bases...
- b.GenerateVtableForBase(RD, Offset);
+ // Get our callee.
+ const llvm::Type *Ty =
+ CGM.getTypes().GetFunctionType(CGM.getTypes().getFunctionInfo(MD),
+ FPT->isVariadic());
+ llvm::Value *Callee = CGM.GetAddrOfFunction(GD, Ty);
- // then the vtables for all the virtual bases.
- b.GenerateVtableForVBases(RD, Offset);
+ const CGFunctionInfo &FnInfo =
+ CGM.getTypes().getFunctionInfo(ResultType, CallArgs,
+ FPT->getExtInfo());
+
+ // Now emit our call.
+ RValue RV = EmitCall(FnInfo, Callee, ReturnValueSlot(), CallArgs, MD);
+
+ if (!Thunk.Return.isEmpty()) {
+ // Emit the return adjustment.
+ bool NullCheckValue = !ResultType->isReferenceType();
+
+ llvm::BasicBlock *AdjustNull = 0;
+ llvm::BasicBlock *AdjustNotNull = 0;
+ llvm::BasicBlock *AdjustEnd = 0;
+
+ llvm::Value *ReturnValue = RV.getScalarVal();
- llvm::Constant *Init = 0;
- const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
- llvm::ArrayType *ArrayType =
- llvm::ArrayType::get(Int8PtrTy, b.getVtableComponents().size());
-
- if (GenerateDefinition)
- Init = llvm::ConstantArray::get(ArrayType, &b.getVtableComponents()[0],
- b.getVtableComponents().size());
-
- llvm::GlobalVariable *OGV = GV;
-
- GV = new llvm::GlobalVariable(CGM.getModule(), ArrayType,
- /*isConstant=*/true, Linkage, Init, Name);
- CGM.setGlobalVisibility(GV, RD);
-
- if (OGV) {
- GV->takeName(OGV);
- llvm::Constant *NewPtr =
- llvm::ConstantExpr::getBitCast(GV, OGV->getType());
- OGV->replaceAllUsesWith(NewPtr);
- OGV->eraseFromParent();
+ if (NullCheckValue) {
+ AdjustNull = createBasicBlock("adjust.null");
+ AdjustNotNull = createBasicBlock("adjust.notnull");
+ AdjustEnd = createBasicBlock("adjust.end");
+
+ llvm::Value *IsNull = Builder.CreateIsNull(ReturnValue);
+ Builder.CreateCondBr(IsNull, AdjustNull, AdjustNotNull);
+ EmitBlock(AdjustNotNull);
+ }
+
+ ReturnValue = PerformTypeAdjustment(*this, ReturnValue,
+ Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ if (NullCheckValue) {
+ Builder.CreateBr(AdjustEnd);
+ EmitBlock(AdjustNull);
+ Builder.CreateBr(AdjustEnd);
+ EmitBlock(AdjustEnd);
+
+ llvm::PHINode *PHI = Builder.CreatePHI(ReturnValue->getType());
+ PHI->reserveOperandSpace(2);
+ PHI->addIncoming(ReturnValue, AdjustNotNull);
+ PHI->addIncoming(llvm::Constant::getNullValue(ReturnValue->getType()),
+ AdjustNull);
+ ReturnValue = PHI;
}
+
+ RV = RValue::get(ReturnValue);
}
+
+ if (!ResultType->isVoidType())
+ EmitReturnOfRValue(RV, ResultType);
+
+ FinishFunction();
+
+ // Destroy the 'this' declaration.
+ CXXThisDecl->Destroy(getContext());
- return GV;
+ // Set the right linkage.
+ Fn->setLinkage(CGM.getFunctionLinkage(MD));
+
+ // Set the right visibility.
+ CGM.setGlobalVisibility(Fn, MD);
}
-void CGVtableInfo::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
- const CXXRecordDecl *RD) {
- llvm::GlobalVariable *&Vtable = Vtables[RD];
- if (Vtable) {
- assert(Vtable->getInitializer() && "Vtable doesn't have a definition!");
+void CodeGenVTables::EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk)
+{
+ llvm::Constant *Entry = CGM.GetAddrOfThunk(GD, Thunk);
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+
+ // Strip off a bitcast if we got one back.
+ if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Entry)) {
+ assert(CE->getOpcode() == llvm::Instruction::BitCast);
+ Entry = CE->getOperand(0);
+ }
+
+ // There's already a declaration with the same name, check if it has the same
+ // type or if we need to replace it.
+ if (cast<llvm::GlobalValue>(Entry)->getType()->getElementType() !=
+ CGM.getTypes().GetFunctionTypeForVtable(MD)) {
+ llvm::GlobalValue *OldThunkFn = cast<llvm::GlobalValue>(Entry);
+
+ // If the types mismatch then we have to rewrite the definition.
+ assert(OldThunkFn->isDeclaration() &&
+ "Shouldn't replace non-declaration");
+
+ // Remove the name from the old thunk function and get a new thunk.
+ OldThunkFn->setName(llvm::StringRef());
+ Entry = CGM.GetAddrOfThunk(GD, Thunk);
+
+ // If needed, replace the old thunk with a bitcast.
+ if (!OldThunkFn->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(Entry, OldThunkFn->getType());
+ OldThunkFn->replaceAllUsesWith(NewPtrForOldDecl);
+ }
+
+ // Remove the old thunk.
+ OldThunkFn->eraseFromParent();
+ }
+
+ // Actually generate the thunk body.
+ llvm::Function *ThunkFn = cast<llvm::Function>(Entry);
+ CodeGenFunction(CGM).GenerateThunk(ThunkFn, GD, Thunk);
+}
+
+void CodeGenVTables::EmitThunks(GlobalDecl GD)
+{
+ const CXXMethodDecl *MD =
+ cast<CXXMethodDecl>(GD.getDecl())->getCanonicalDecl();
+
+ // We don't need to generate thunks for the base destructor.
+ if (isa<CXXDestructorDecl>(MD) && GD.getDtorType() == Dtor_Base)
+ return;
+
+ const CXXRecordDecl *RD = MD->getParent();
+
+ // Compute VTable related info for this class.
+ ComputeVTableRelatedInformation(RD);
+
+ ThunksMapTy::const_iterator I = Thunks.find(MD);
+ if (I == Thunks.end()) {
+ // We did not find a thunk for this method.
return;
}
+
+ const ThunkInfoVectorTy &ThunkInfoVector = I->second;
+ for (unsigned I = 0, E = ThunkInfoVector.size(); I != E; ++I)
+ EmitThunk(GD, ThunkInfoVector[I]);
+}
+
+void CodeGenVTables::ComputeVTableRelatedInformation(const CXXRecordDecl *RD) {
+ uint64_t *&LayoutData = VTableLayoutMap[RD];
- AddressPointsMapTy AddressPoints;
- Vtable = GenerateVtable(Linkage, /*GenerateDefinition=*/true, RD, RD, 0,
- /*IsVirtual=*/false,
- AddressPoints);
- GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+ // Check if we've computed this information before.
+ if (LayoutData)
+ return;
+
+ VtableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
- for (CXXRecordDecl::method_iterator i = RD->method_begin(),
- e = RD->method_end(); i != e; ++i) {
- if (!(*i)->isVirtual())
- continue;
- if(!(*i)->hasInlineBody() && !(*i)->isImplicit())
- continue;
+ // Add the VTable layout.
+ uint64_t NumVTableComponents = Builder.getNumVTableComponents();
+ LayoutData = new uint64_t[NumVTableComponents + 1];
- if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(*i)) {
- CGM.BuildThunksForVirtual(GlobalDecl(DD, Dtor_Complete));
- CGM.BuildThunksForVirtual(GlobalDecl(DD, Dtor_Deleting));
- } else {
- CGM.BuildThunksForVirtual(GlobalDecl(*i));
+ // Store the number of components.
+ LayoutData[0] = NumVTableComponents;
+
+ // Store the components.
+ std::copy(Builder.vtable_components_data_begin(),
+ Builder.vtable_components_data_end(),
+ &LayoutData[1]);
+
+ // Add the known thunks.
+ Thunks.insert(Builder.thunks_begin(), Builder.thunks_end());
+
+ // Add the thunks needed in this vtable.
+ assert(!VTableThunksMap.count(RD) &&
+ "Thunks already exists for this vtable!");
+
+ VTableThunksTy &VTableThunks = VTableThunksMap[RD];
+ VTableThunks.append(Builder.vtable_thunks_begin(),
+ Builder.vtable_thunks_end());
+
+ // Sort them.
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ // Add the address points.
+ for (VtableBuilder::AddressPointsMapTy::const_iterator I =
+ Builder.address_points_begin(), E = Builder.address_points_end();
+ I != E; ++I) {
+
+ uint64_t &AddressPoint = AddressPoints[std::make_pair(RD, I->first)];
+
+ // Check if we already have the address points for this base.
+ assert(!AddressPoint && "Address point already exists for this base!");
+
+ AddressPoint = I->second;
+ }
+
+ // If we don't have the vbase information for this class, insert it.
+ // getVirtualBaseOffsetOffset will compute it separately without computing
+ // the rest of the vtable related information.
+ if (!RD->getNumVBases())
+ return;
+
+ const RecordType *VBaseRT =
+ RD->vbases_begin()->getType()->getAs<RecordType>();
+ const CXXRecordDecl *VBase = cast<CXXRecordDecl>(VBaseRT->getDecl());
+
+ if (VirtualBaseClassOffsetOffsets.count(std::make_pair(RD, VBase)))
+ return;
+
+ for (VtableBuilder::VBaseOffsetOffsetsMapTy::const_iterator I =
+ Builder.getVBaseOffsetOffsets().begin(),
+ E = Builder.getVBaseOffsetOffsets().end(); I != E; ++I) {
+ // Insert all types.
+ ClassPairTy ClassPair(RD, I->first);
+
+ VirtualBaseClassOffsetOffsets.insert(std::make_pair(ClassPair, I->second));
+ }
+}
+
+llvm::Constant *
+CodeGenVTables::CreateVTableInitializer(const CXXRecordDecl *RD,
+ const uint64_t *Components,
+ unsigned NumComponents,
+ const VTableThunksTy &VTableThunks) {
+ llvm::SmallVector<llvm::Constant *, 64> Inits;
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+
+ const llvm::Type *PtrDiffTy =
+ CGM.getTypes().ConvertType(CGM.getContext().getPointerDiffType());
+
+ QualType ClassType = CGM.getContext().getTagDeclType(RD);
+ llvm::Constant *RTTI = CGM.GetAddrOfRTTIDescriptor(ClassType);
+
+ unsigned NextVTableThunkIndex = 0;
+
+ llvm::Constant* PureVirtualFn = 0;
+
+ for (unsigned I = 0; I != NumComponents; ++I) {
+ VtableComponent Component =
+ VtableComponent::getFromOpaqueInteger(Components[I]);
+
+ llvm::Constant *Init = 0;
+
+ switch (Component.getKind()) {
+ case VtableComponent::CK_VCallOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVCallOffset());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VtableComponent::CK_VBaseOffset:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getVBaseOffset());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VtableComponent::CK_OffsetToTop:
+ Init = llvm::ConstantInt::get(PtrDiffTy, Component.getOffsetToTop());
+ Init = llvm::ConstantExpr::getIntToPtr(Init, Int8PtrTy);
+ break;
+ case VtableComponent::CK_RTTI:
+ Init = llvm::ConstantExpr::getBitCast(RTTI, Int8PtrTy);
+ break;
+ case VtableComponent::CK_FunctionPointer:
+ case VtableComponent::CK_CompleteDtorPointer:
+ case VtableComponent::CK_DeletingDtorPointer: {
+ GlobalDecl GD;
+
+ // Get the right global decl.
+ switch (Component.getKind()) {
+ default:
+ llvm_unreachable("Unexpected vtable component kind");
+ case VtableComponent::CK_FunctionPointer:
+ GD = Component.getFunctionDecl();
+ break;
+ case VtableComponent::CK_CompleteDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Complete);
+ break;
+ case VtableComponent::CK_DeletingDtorPointer:
+ GD = GlobalDecl(Component.getDestructorDecl(), Dtor_Deleting);
+ break;
+ }
+
+ if (cast<CXXMethodDecl>(GD.getDecl())->isPure()) {
+ // We have a pure virtual member function.
+ if (!PureVirtualFn) {
+ const llvm::FunctionType *Ty =
+ llvm::FunctionType::get(llvm::Type::getVoidTy(CGM.getLLVMContext()),
+ /*isVarArg=*/false);
+ PureVirtualFn =
+ CGM.CreateRuntimeFunction(Ty, "__cxa_pure_virtual");
+ PureVirtualFn = llvm::ConstantExpr::getBitCast(PureVirtualFn,
+ Int8PtrTy);
+ }
+
+ Init = PureVirtualFn;
+ } else {
+ // Check if we should use a thunk.
+ if (NextVTableThunkIndex < VTableThunks.size() &&
+ VTableThunks[NextVTableThunkIndex].first == I) {
+ const ThunkInfo &Thunk = VTableThunks[NextVTableThunkIndex].second;
+
+ Init = CGM.GetAddrOfThunk(GD, Thunk);
+
+ NextVTableThunkIndex++;
+ } else {
+ const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
+ const llvm::Type *Ty = CGM.getTypes().GetFunctionTypeForVtable(MD);
+
+ Init = CGM.GetAddrOfFunction(GD, Ty);
+ }
+
+ Init = llvm::ConstantExpr::getBitCast(Init, Int8PtrTy);
+ }
+ break;
+ }
+
+ case VtableComponent::CK_UnusedFunctionPointer:
+ Init = llvm::ConstantExpr::getNullValue(Int8PtrTy);
+ break;
+ };
+
+ Inits.push_back(Init);
+ }
+
+ llvm::ArrayType *ArrayType = llvm::ArrayType::get(Int8PtrTy, NumComponents);
+ return llvm::ConstantArray::get(ArrayType, Inits.data(), Inits.size());
+}
+
+/// GetGlobalVariable - Will return a global variable of the given type.
+/// If a variable with a different type already exists then a new variable
+/// with the right type will be created.
+/// FIXME: We should move this to CodeGenModule and rename it to something
+/// better and then use it in CGVTT and CGRTTI.
+static llvm::GlobalVariable *
+GetGlobalVariable(llvm::Module &Module, llvm::StringRef Name,
+ const llvm::Type *Ty,
+ llvm::GlobalValue::LinkageTypes Linkage) {
+
+ llvm::GlobalVariable *GV = Module.getNamedGlobal(Name);
+ llvm::GlobalVariable *OldGV = 0;
+
+ if (GV) {
+ // Check if the variable has the right type.
+ if (GV->getType()->getElementType() == Ty)
+ return GV;
+
+ assert(GV->isDeclaration() && "Declaration has wrong type!");
+
+ OldGV = GV;
+ }
+
+ // Create a new variable.
+ GV = new llvm::GlobalVariable(Module, Ty, /*isConstant=*/true,
+ Linkage, 0, Name);
+
+ if (OldGV) {
+ // Replace occurrences of the old variable if needed.
+ GV->takeName(OldGV);
+
+ if (!OldGV->use_empty()) {
+ llvm::Constant *NewPtrForOldDecl =
+ llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
+ OldGV->replaceAllUsesWith(NewPtrForOldDecl);
}
+
+ OldGV->eraseFromParent();
}
+
+ return GV;
}
-llvm::GlobalVariable *CGVtableInfo::getVtable(const CXXRecordDecl *RD) {
- llvm::GlobalVariable *Vtable = Vtables.lookup(RD);
+llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTable(const CXXRecordDecl *RD) {
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXVtable(RD, OutName);
+ llvm::StringRef Name = OutName.str();
+
+ ComputeVTableRelatedInformation(RD);
- if (!Vtable) {
- AddressPointsMapTy AddressPoints;
- Vtable = GenerateVtable(llvm::GlobalValue::ExternalLinkage,
- /*GenerateDefinition=*/false, RD, RD, 0,
- /*IsVirtual=*/false, AddressPoints);
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, getNumVTableComponents(RD));
+
+ return GetGlobalVariable(CGM.getModule(), Name, ArrayType,
+ llvm::GlobalValue::ExternalLinkage);
+}
+
+void
+CodeGenVTables::EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ // Dump the vtable layout if necessary.
+ if (CGM.getLangOptions().DumpVtableLayouts) {
+ VtableBuilder Builder(*this, RD, 0, /*MostDerivedClassIsVirtual=*/0, RD);
+
+ Builder.dumpLayout(llvm::errs());
}
- return Vtable;
+ assert(VTableThunksMap.count(RD) &&
+ "No thunk status for this record decl!");
+
+ const VTableThunksTy& Thunks = VTableThunksMap[RD];
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(RD, getVTableComponentsData(RD),
+ getNumVTableComponents(RD), Thunks);
+ VTable->setInitializer(Init);
+
+ // Set the correct linkage.
+ VTable->setLinkage(Linkage);
}
-void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
+llvm::GlobalVariable *
+CodeGenVTables::GenerateConstructionVTable(const CXXRecordDecl *RD,
+ const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ VTableAddressPointsMapTy& AddressPoints) {
+ VtableBuilder Builder(*this, Base.getBase(), Base.getBaseOffset(),
+ /*MostDerivedClassIsVirtual=*/BaseIsVirtual, RD);
+
+ // Dump the vtable layout if necessary.
+ if (CGM.getLangOptions().DumpVtableLayouts)
+ Builder.dumpLayout(llvm::errs());
+
+ // Add the address points.
+ AddressPoints.insert(Builder.address_points_begin(),
+ Builder.address_points_end());
+
+ // Get the mangled construction vtable name.
+ llvm::SmallString<256> OutName;
+ CGM.getMangleContext().mangleCXXCtorVtable(RD, Base.getBaseOffset() / 8,
+ Base.getBase(), OutName);
+ llvm::StringRef Name = OutName.str();
+
+ const llvm::Type *Int8PtrTy = llvm::Type::getInt8PtrTy(CGM.getLLVMContext());
+ llvm::ArrayType *ArrayType =
+ llvm::ArrayType::get(Int8PtrTy, Builder.getNumVTableComponents());
+
+ // Create the variable that will hold the construction vtable.
+ llvm::GlobalVariable *VTable =
+ GetGlobalVariable(CGM.getModule(), Name, ArrayType,
+ llvm::GlobalValue::InternalLinkage);
+
+ // Add the thunks.
+ VTableThunksTy VTableThunks;
+ VTableThunks.append(Builder.vtable_thunks_begin(),
+ Builder.vtable_thunks_end());
+
+ // Sort them.
+ std::sort(VTableThunks.begin(), VTableThunks.end());
+
+ // Create and set the initializer.
+ llvm::Constant *Init =
+ CreateVTableInitializer(Base.getBase(),
+ Builder.vtable_components_data_begin(),
+ Builder.getNumVTableComponents(), VTableThunks);
+ VTable->setInitializer(Init);
+
+ return VTable;
+}
+
+void
+CodeGenVTables::GenerateClassData(llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD) {
+ llvm::GlobalVariable *&VTable = Vtables[RD];
+ if (VTable) {
+ assert(VTable->getInitializer() && "Vtable doesn't have a definition!");
+ return;
+ }
+
+ VTable = GetAddrOfVTable(RD);
+ EmitVTableDefinition(VTable, Linkage, RD);
+
+ GenerateVTT(Linkage, /*GenerateDefinition=*/true, RD);
+}
+
+void CodeGenVTables::EmitVTableRelatedData(GlobalDecl GD) {
const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
const CXXRecordDecl *RD = MD->getParent();
@@ -3702,20 +3131,34 @@ void CGVtableInfo::MaybeEmitVtable(GlobalDecl GD) {
if (!RD->isDynamicClass())
return;
+ // Check if we need to emit thunks for this function.
+ if (MD->isVirtual())
+ EmitThunks(GD);
+
// Get the key function.
const CXXMethodDecl *KeyFunction = CGM.getContext().getKeyFunction(RD);
+ TemplateSpecializationKind RDKind = RD->getTemplateSpecializationKind();
+ TemplateSpecializationKind MDKind = MD->getTemplateSpecializationKind();
+
if (KeyFunction) {
// We don't have the right key function.
if (KeyFunction->getCanonicalDecl() != MD->getCanonicalDecl())
return;
+ } else {
+ // If this is an explicit instantiation of a method, we don't need a vtable.
+ // Since we have no key function, we will emit the vtable when we see
+ // a use, and just defining a function is not an use.
+ if ((RDKind == TSK_ImplicitInstantiation ||
+ RDKind == TSK_ExplicitInstantiationDeclaration) &&
+ MDKind == TSK_ExplicitInstantiationDefinition)
+ return;
}
if (Vtables.count(RD))
return;
- TemplateSpecializationKind kind = RD->getTemplateSpecializationKind();
- if (kind == TSK_ImplicitInstantiation)
+ if (RDKind == TSK_ImplicitInstantiation)
CGM.DeferredVtables.push_back(RD);
else
GenerateClassData(CGM.getVtableLinkage(RD), RD);
diff --git a/lib/CodeGen/CGVtable.h b/lib/CodeGen/CGVtable.h
index 5a146ab97c99..60735554d56e 100644
--- a/lib/CodeGen/CGVtable.h
+++ b/lib/CodeGen/CGVtable.h
@@ -15,7 +15,6 @@
#define CLANG_CODEGEN_CGVTABLE_H
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/DenseSet.h"
#include "llvm/GlobalVariable.h"
#include "GlobalDecl.h"
@@ -25,41 +24,93 @@ namespace clang {
namespace CodeGen {
class CodeGenModule;
-/// ThunkAdjustment - Virtual and non-virtual adjustment for thunks.
-class ThunkAdjustment {
-public:
- ThunkAdjustment(int64_t NonVirtual, int64_t Virtual)
- : NonVirtual(NonVirtual),
- Virtual(Virtual) { }
-
- ThunkAdjustment()
- : NonVirtual(0), Virtual(0) { }
+/// ReturnAdjustment - A return adjustment.
+struct ReturnAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
+ int64_t NonVirtual;
+
+ /// VBaseOffsetOffset - The offset (in bytes), relative to the address point
+ /// of the virtual base class offset.
+ int64_t VBaseOffsetOffset;
+
+ ReturnAdjustment() : NonVirtual(0), VBaseOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VBaseOffsetOffset; }
- // isEmpty - Return whether this thunk adjustment is empty.
- bool isEmpty() const {
- return NonVirtual == 0 && Virtual == 0;
+ friend bool operator==(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset == RHS.VBaseOffsetOffset;
}
- /// NonVirtual - The non-virtual adjustment.
+ friend bool operator<(const ReturnAdjustment &LHS,
+ const ReturnAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VBaseOffsetOffset < RHS.VBaseOffsetOffset;
+ }
+};
+
+/// ThisAdjustment - A 'this' pointer adjustment.
+struct ThisAdjustment {
+ /// NonVirtual - The non-virtual adjustment from the derived object to its
+ /// nearest virtual base.
int64_t NonVirtual;
- /// Virtual - The virtual adjustment.
- int64_t Virtual;
+ /// VCallOffsetOffset - The offset (in bytes), relative to the address point,
+ /// of the virtual call offset.
+ int64_t VCallOffsetOffset;
+
+ ThisAdjustment() : NonVirtual(0), VCallOffsetOffset(0) { }
+
+ bool isEmpty() const { return !NonVirtual && !VCallOffsetOffset; }
+
+ friend bool operator==(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset == RHS.VCallOffsetOffset;
+ }
+
+ friend bool operator<(const ThisAdjustment &LHS,
+ const ThisAdjustment &RHS) {
+ if (LHS.NonVirtual < RHS.NonVirtual)
+ return true;
+
+ return LHS.NonVirtual == RHS.NonVirtual &&
+ LHS.VCallOffsetOffset < RHS.VCallOffsetOffset;
+ }
};
-/// CovariantThunkAdjustment - Adjustment of the 'this' pointer and the
-/// return pointer for covariant thunks.
-class CovariantThunkAdjustment {
-public:
- CovariantThunkAdjustment(const ThunkAdjustment &ThisAdjustment,
- const ThunkAdjustment &ReturnAdjustment)
- : ThisAdjustment(ThisAdjustment), ReturnAdjustment(ReturnAdjustment) { }
+/// ThunkInfo - The 'this' pointer adjustment as well as an optional return
+/// adjustment for a thunk.
+struct ThunkInfo {
+ /// This - The 'this' pointer adjustment.
+ ThisAdjustment This;
+
+ /// Return - The return adjustment.
+ ReturnAdjustment Return;
- CovariantThunkAdjustment() { }
+ ThunkInfo() { }
- ThunkAdjustment ThisAdjustment;
- ThunkAdjustment ReturnAdjustment;
-};
+ ThunkInfo(const ThisAdjustment &This, const ReturnAdjustment &Return)
+ : This(This), Return(Return) { }
+
+ friend bool operator==(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ return LHS.This == RHS.This && LHS.Return == RHS.Return;
+ }
+
+ friend bool operator<(const ThunkInfo &LHS, const ThunkInfo &RHS) {
+ if (LHS.This < RHS.This)
+ return true;
+
+ return LHS.This == RHS.This && LHS.Return < RHS.Return;
+ }
+
+ bool isEmpty() const { return This.isEmpty() && Return.isEmpty(); }
+};
// BaseSubobject - Uniquely identifies a direct or indirect base class.
// Stores both the base class decl and the offset from the most derived class to
@@ -126,19 +177,7 @@ template <> struct isPodLike<clang::CodeGen::BaseSubobject> {
namespace clang {
namespace CodeGen {
-class CGVtableInfo {
-public:
- typedef std::vector<std::pair<GlobalDecl, ThunkAdjustment> >
- AdjustmentVectorTy;
-
- typedef std::pair<const CXXRecordDecl *, uint64_t> CtorVtable_t;
- typedef llvm::DenseMap<CtorVtable_t, int64_t> AddrSubMap_t;
- typedef llvm::DenseMap<const CXXRecordDecl *, AddrSubMap_t *> AddrMap_t;
- llvm::DenseMap<const CXXRecordDecl *, AddrMap_t*> AddressPoints;
-
- typedef llvm::DenseMap<BaseSubobject, uint64_t> AddressPointsMapTy;
-
-private:
+class CodeGenVTables {
CodeGenModule &CGM;
/// MethodVtableIndices - Contains the index (relative to the vtable address
@@ -163,31 +202,97 @@ private:
/// pointers in the vtable for a given record decl.
llvm::DenseMap<const CXXRecordDecl *, uint64_t> NumVirtualFunctionPointers;
- typedef llvm::DenseMap<GlobalDecl, AdjustmentVectorTy> SavedAdjustmentsTy;
- SavedAdjustmentsTy SavedAdjustments;
- llvm::DenseSet<const CXXRecordDecl*> SavedAdjustmentRecords;
+ typedef llvm::SmallVector<ThunkInfo, 1> ThunkInfoVectorTy;
+ typedef llvm::DenseMap<const CXXMethodDecl *, ThunkInfoVectorTy> ThunksMapTy;
+
+ /// Thunks - Contains all thunks that a given method decl will need.
+ ThunksMapTy Thunks;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, uint64_t *> VTableLayoutMapTy;
+
+ /// VTableLayoutMap - Stores the vtable layout for all record decls.
+ /// The layout is stored as an array of 64-bit integers, where the first
+ /// integer is the number of vtable entries in the layout, and the subsequent
+ /// integers are the vtable components.
+ VTableLayoutMapTy VTableLayoutMap;
+
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *,
+ BaseSubobject>, uint64_t> AddressPointsMapTy;
+
+ /// Address points - Address points for all vtables.
+ AddressPointsMapTy AddressPoints;
+
+ /// VTableAddressPointsMapTy - Address points for a single vtable.
+ typedef llvm::DenseMap<BaseSubobject, uint64_t> VTableAddressPointsMapTy;
+
+ typedef llvm::SmallVector<std::pair<uint64_t, ThunkInfo>, 1>
+ VTableThunksTy;
+
+ typedef llvm::DenseMap<const CXXRecordDecl *, VTableThunksTy>
+ VTableThunksMapTy;
+
+ /// VTableThunksMap - Contains thunks needed by vtables.
+ VTableThunksMapTy VTableThunksMap;
+
+ uint64_t getNumVTableComponents(const CXXRecordDecl *RD) const {
+ assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
+
+ return VTableLayoutMap.lookup(RD)[0];
+ }
+
+ const uint64_t *getVTableComponentsData(const CXXRecordDecl *RD) const {
+ assert(VTableLayoutMap.count(RD) && "No vtable layout for this class!");
+
+ uint64_t *Components = VTableLayoutMap.lookup(RD);
+ return &Components[1];
+ }
+
+ typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesMapTy;
+
+ /// SubVTTIndicies - Contains indices into the various sub-VTTs.
+ SubVTTIndiciesMapTy SubVTTIndicies;
+
+
+ typedef llvm::DenseMap<std::pair<const CXXRecordDecl *,
+ BaseSubobject>, uint64_t>
+ SecondaryVirtualPointerIndicesMapTy;
- typedef llvm::DenseMap<ClassPairTy, uint64_t> SubVTTIndiciesTy;
- SubVTTIndiciesTy SubVTTIndicies;
+ /// SecondaryVirtualPointerIndices - Contains the secondary virtual pointer
+ /// indices.
+ SecondaryVirtualPointerIndicesMapTy SecondaryVirtualPointerIndices;
/// getNumVirtualFunctionPointers - Return the number of virtual function
/// pointers in the vtable for a given record decl.
uint64_t getNumVirtualFunctionPointers(const CXXRecordDecl *RD);
void ComputeMethodVtableIndices(const CXXRecordDecl *RD);
-
- llvm::GlobalVariable *
- GenerateVtable(llvm::GlobalVariable::LinkageTypes Linkage,
- bool GenerateDefinition, const CXXRecordDecl *LayoutClass,
- const CXXRecordDecl *RD, uint64_t Offset, bool IsVirtual,
- AddressPointsMapTy& AddressPoints);
llvm::GlobalVariable *GenerateVTT(llvm::GlobalVariable::LinkageTypes Linkage,
bool GenerateDefinition,
const CXXRecordDecl *RD);
+ /// EmitThunk - Emit a single thunk.
+ void EmitThunk(GlobalDecl GD, const ThunkInfo &Thunk);
+
+ /// EmitThunks - Emit the associated thunks for the given global decl.
+ void EmitThunks(GlobalDecl GD);
+
+ /// ComputeVTableRelatedInformation - Compute and store all vtable related
+ /// information (vtable layout, vbase offset offsets, thunks etc) for the
+ /// given record decl.
+ void ComputeVTableRelatedInformation(const CXXRecordDecl *RD);
+
+ /// CreateVTableInitializer - Create a vtable initializer for the given record
+ /// decl.
+ /// \param Components - The vtable components; this is really an array of
+ /// VTableComponents.
+ llvm::Constant *CreateVTableInitializer(const CXXRecordDecl *RD,
+ const uint64_t *Components,
+ unsigned NumComponents,
+ const VTableThunksTy &VTableThunks);
+
public:
- CGVtableInfo(CodeGenModule &CGM)
+ CodeGenVTables(CodeGenModule &CGM)
: CGM(CGM) { }
/// needsVTTParameter - Return whether the given global decl needs a VTT
@@ -199,6 +304,11 @@ public:
/// given record decl.
uint64_t getSubVTTIndex(const CXXRecordDecl *RD, const CXXRecordDecl *Base);
+ /// getSecondaryVirtualPointerIndex - Return the index in the VTT where the
+ /// virtual pointer for the given subobject is located.
+ uint64_t getSecondaryVirtualPointerIndex(const CXXRecordDecl *RD,
+ BaseSubobject Base);
+
/// getMethodVtableIndex - Return the index (relative to the vtable address
/// point) where the function pointer for the given virtual function is
/// stored.
@@ -212,35 +322,32 @@ public:
int64_t getVirtualBaseOffsetOffset(const CXXRecordDecl *RD,
const CXXRecordDecl *VBase);
- AdjustmentVectorTy *getAdjustments(GlobalDecl GD);
-
- /// getVtableAddressPoint - returns the address point of the vtable for the
- /// given record decl.
- /// FIXME: This should return a list of address points.
- uint64_t getVtableAddressPoint(const CXXRecordDecl *RD);
-
- llvm::GlobalVariable *getVtable(const CXXRecordDecl *RD);
-
- /// CtorVtableInfo - Information about a constructor vtable.
- struct CtorVtableInfo {
- /// Vtable - The vtable itself.
- llvm::GlobalVariable *Vtable;
+ /// getAddressPoint - Get the address point of the given subobject in the
+ /// class decl.
+ uint64_t getAddressPoint(BaseSubobject Base, const CXXRecordDecl *RD);
- /// AddressPoints - The address points in this constructor vtable.
- AddressPointsMapTy AddressPoints;
-
- CtorVtableInfo() : Vtable(0) { }
- };
+ /// GetAddrOfVTable - Get the address of the vtable for the given record decl.
+ llvm::GlobalVariable *GetAddrOfVTable(const CXXRecordDecl *RD);
+
+ /// EmitVTableDefinition - Emit the definition of the given vtable.
+ void EmitVTableDefinition(llvm::GlobalVariable *VTable,
+ llvm::GlobalVariable::LinkageTypes Linkage,
+ const CXXRecordDecl *RD);
- CtorVtableInfo getCtorVtable(const CXXRecordDecl *RD,
- const BaseSubobject &Base,
- bool BaseIsVirtual);
+ /// GenerateConstructionVTable - Generate a construction vtable for the given
+ /// base subobject.
+ llvm::GlobalVariable *
+ GenerateConstructionVTable(const CXXRecordDecl *RD, const BaseSubobject &Base,
+ bool BaseIsVirtual,
+ VTableAddressPointsMapTy& AddressPoints);
llvm::GlobalVariable *getVTT(const CXXRecordDecl *RD);
- void MaybeEmitVtable(GlobalDecl GD);
+ // EmitVTableRelatedData - Will emit any thunks that the global decl might
+ // have, as well as the vtable itself if the global decl is the key function.
+ void EmitVTableRelatedData(GlobalDecl GD);
- /// GenerateClassData - Generate all the class data requires to be generated
+ /// GenerateClassData - Generate all the class data required to be generated
/// upon definition of a KeyFunction. This includes the vtable, the
/// rtti data structure and the VTT.
///
diff --git a/lib/CodeGen/CodeGenFunction.cpp b/lib/CodeGen/CodeGenFunction.cpp
index f45582705618..b863aff23612 100644
--- a/lib/CodeGen/CodeGenFunction.cpp
+++ b/lib/CodeGen/CodeGenFunction.cpp
@@ -199,8 +199,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
QualType FnType = getContext().getFunctionType(RetTy, 0, 0, false, 0,
false, false, 0, 0,
- /*FIXME?*/false,
- /*FIXME?*/CC_Default);
+ /*FIXME?*/
+ FunctionType::ExtInfo());
// Emit subprogram debug descriptor.
if (CGDebugInfo *DI = getDebugInfo()) {
@@ -211,7 +211,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
// FIXME: Leaked.
// CC info is ignored, hopefully?
CurFnInfo = &CGM.getTypes().getFunctionInfo(FnRetTy, Args,
- CC_Default, false);
+ FunctionType::ExtInfo());
if (RetTy->isVoidType()) {
// Void type; nothing to return.
@@ -279,7 +279,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn) {
Args.push_back(std::make_pair(CXXThisDecl, CXXThisDecl->getType()));
// Check if we need a VTT parameter as well.
- if (CGVtableInfo::needsVTTParameter(GD)) {
+ if (CodeGenVTables::needsVTTParameter(GD)) {
// FIXME: The comment about using a fake decl above applies here too.
QualType T = getContext().getPointerType(getContext().VoidPtrTy);
CXXVTTDecl =
diff --git a/lib/CodeGen/CodeGenFunction.h b/lib/CodeGen/CodeGenFunction.h
index bd12c4a87c29..f21350d0f2d9 100644
--- a/lib/CodeGen/CodeGenFunction.h
+++ b/lib/CodeGen/CodeGenFunction.h
@@ -22,7 +22,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/ValueHandle.h"
-#include <map>
#include "CodeGenModule.h"
#include "CGBlocks.h"
#include "CGBuilder.h"
@@ -254,6 +253,27 @@ public:
}
};
+ /// CXXTemporariesCleanupScope - Enters a new scope for catching live
+ /// temporaries, all of which will be popped once the scope is exited.
+ class CXXTemporariesCleanupScope {
+ CodeGenFunction &CGF;
+ size_t NumLiveTemporaries;
+
+ // DO NOT IMPLEMENT
+ CXXTemporariesCleanupScope(const CXXTemporariesCleanupScope &);
+ CXXTemporariesCleanupScope &operator=(const CXXTemporariesCleanupScope &);
+
+ public:
+ explicit CXXTemporariesCleanupScope(CodeGenFunction &CGF)
+ : CGF(CGF), NumLiveTemporaries(CGF.LiveTemporaries.size()) { }
+
+ ~CXXTemporariesCleanupScope() {
+ while (CGF.LiveTemporaries.size() > NumLiveTemporaries)
+ CGF.PopCXXTemporary();
+ }
+ };
+
+
/// EmitCleanupBlocks - Takes the old cleanup stack size and emits the cleanup
/// blocks that have been added.
void EmitCleanupBlocks(size_t OldCleanupStackSize);
@@ -504,30 +524,29 @@ public:
/// legal to call this function even if there is no current insertion point.
void FinishFunction(SourceLocation EndLoc=SourceLocation());
- /// DynamicTypeAdjust - Do the non-virtual and virtual adjustments on an
- /// object pointer to alter the dynamic type of the pointer. Used by
- /// GenerateCovariantThunk for building thunks.
- llvm::Value *DynamicTypeAdjust(llvm::Value *V,
- const ThunkAdjustment &Adjustment);
-
- /// GenerateThunk - Generate a thunk for the given method
- llvm::Constant *GenerateThunk(llvm::Function *Fn, GlobalDecl GD,
- bool Extern,
- const ThunkAdjustment &ThisAdjustment);
- llvm::Constant *
- GenerateCovariantThunk(llvm::Function *Fn, GlobalDecl GD,
- bool Extern,
- const CovariantThunkAdjustment &Adjustment);
-
+ /// GenerateThunk - Generate a thunk for the given method.
+ void GenerateThunk(llvm::Function *Fn, GlobalDecl GD, const ThunkInfo &Thunk);
+
void EmitCtorPrologue(const CXXConstructorDecl *CD, CXXCtorType Type);
- void InitializeVtablePtrs(const CXXRecordDecl *ClassDecl);
+ /// InitializeVTablePointer - Initialize the vtable pointer of the given
+ /// subobject.
+ ///
+ /// \param BaseIsMorallyVirtual - Whether the base subobject is a virtual base
+ /// or a direct or indirect base of a virtual base.
+ void InitializeVTablePointer(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass);
+
+ typedef llvm::SmallPtrSet<const CXXRecordDecl *, 4> VisitedVirtualBasesSetTy;
+ void InitializeVTablePointers(BaseSubobject Base, bool BaseIsMorallyVirtual,
+ bool BaseIsNonVirtualPrimaryBase,
+ llvm::Constant *VTable,
+ const CXXRecordDecl *VTableClass,
+ VisitedVirtualBasesSetTy& VBases);
+
+ void InitializeVTablePointers(const CXXRecordDecl *ClassDecl);
- void InitializeVtablePtrsRecursive(const CXXRecordDecl *ClassDecl,
- llvm::Constant *Vtable,
- CGVtableInfo::AddrSubMap_t& AddressPoints,
- llvm::Value *ThisPtr,
- uint64_t Offset);
void SynthesizeCXXCopyConstructor(const FunctionArgList &Args);
void SynthesizeCXXCopyAssignment(const FunctionArgList &Args);
@@ -1272,6 +1291,10 @@ public:
/// getTrapBB - Create a basic block that will call the trap intrinsic. We'll
/// generate a branch around the created basic block as necessary.
llvm::BasicBlock* getTrapBB();
+
+ /// EmitCallArg - Emit a single call argument.
+ RValue EmitCallArg(const Expr *E, QualType ArgType);
+
private:
void EmitReturnOfRValue(RValue RV, QualType Ty);
@@ -1303,9 +1326,6 @@ private:
/// current cleanup scope.
void AddBranchFixup(llvm::BranchInst *BI);
- /// EmitCallArg - Emit a single call argument.
- RValue EmitCallArg(const Expr *E, QualType ArgType);
-
/// EmitCallArgs - Emit call arguments for a function.
/// The CallArgTypeInfo parameter is used for iterating over the known
/// argument types of the function being called.
diff --git a/lib/CodeGen/CodeGenModule.cpp b/lib/CodeGen/CodeGenModule.cpp
index b4b5bbdb99aa..3c872c8560d2 100644
--- a/lib/CodeGen/CodeGenModule.cpp
+++ b/lib/CodeGen/CodeGenModule.cpp
@@ -47,7 +47,7 @@ CodeGenModule::CodeGenModule(ASTContext &C, const CodeGenOptions &CGO,
Features(C.getLangOptions()), CodeGenOpts(CGO), TheModule(M),
TheTargetData(TD), TheTargetCodeGenInfo(0), Diags(diags),
Types(C, M, TD, getTargetCodeGenInfo().getABIInfo()),
- MangleCtx(C), VtableInfo(*this), Runtime(0),
+ MangleCtx(C), VTables(*this), Runtime(0),
MemCpyFn(0), MemMoveFn(0), MemSetFn(0), CFConstantStringClassRef(0),
VMContext(M.getContext()) {
@@ -79,6 +79,7 @@ void CodeGenModule::createObjCRuntime() {
}
void CodeGenModule::Release() {
+ EmitFundamentalRTTIDescriptors();
EmitDeferred();
EmitCXXGlobalInitFunc();
EmitCXXGlobalDtorFunc();
@@ -495,7 +496,7 @@ void CodeGenModule::EmitDeferred() {
if (!DeferredVtables.empty()) {
const CXXRecordDecl *RD = DeferredVtables.back();
DeferredVtables.pop_back();
- getVtableInfo().GenerateClassData(getVtableLinkage(RD), RD);
+ getVTables().GenerateClassData(getVtableLinkage(RD), RD);
continue;
}
@@ -714,20 +715,9 @@ void CodeGenModule::EmitGlobalDefinition(GlobalDecl GD) {
Context.getSourceManager(),
"Generating code for declaration");
- if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(D)) {
- getVtableInfo().MaybeEmitVtable(GD);
- if (MD->isVirtual() && MD->isOutOfLine() &&
- (!isa<CXXDestructorDecl>(D) || GD.getDtorType() != Dtor_Base)) {
- if (isa<CXXDestructorDecl>(D)) {
- GlobalDecl CanonGD(cast<CXXDestructorDecl>(D->getCanonicalDecl()),
- GD.getDtorType());
- BuildThunksForVirtual(CanonGD);
- } else {
- BuildThunksForVirtual(MD->getCanonicalDecl());
- }
- }
- }
-
+ if (isa<CXXMethodDecl>(D))
+ getVTables().EmitVTableRelatedData(GD);
+
if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(D))
EmitCXXConstructor(CD, GD.getCtorType());
else if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(D))
@@ -758,7 +748,7 @@ CodeGenModule::GetOrCreateLLVMFunction(llvm::StringRef MangledName,
if (WeakRefReferences.count(Entry)) {
const FunctionDecl *FD = cast_or_null<FunctionDecl>(D.getDecl());
if (FD && !FD->hasAttr<WeakAttr>())
- Entry->setLinkage(llvm::Function::ExternalLinkage);
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
WeakRefReferences.erase(Entry);
}
@@ -873,7 +863,7 @@ CodeGenModule::GetOrCreateLLVMGlobal(llvm::StringRef MangledName,
if (Entry) {
if (WeakRefReferences.count(Entry)) {
if (D && !D->hasAttr<WeakAttr>())
- Entry->setLinkage(llvm::Function::ExternalLinkage);
+ Entry->setLinkage(llvm::Function::ExternalLinkage);
WeakRefReferences.erase(Entry);
}
@@ -1255,9 +1245,9 @@ static void ReplaceUsesOfNonProtoTypeWithRealFunction(llvm::GlobalValue *Old,
if (!CI->use_empty())
CI->replaceAllUsesWith(NewCall);
- // Copy any custom metadata attached with CI.
- if (llvm::MDNode *DbgNode = CI->getMetadata("dbg"))
- NewCall->setMetadata("dbg", DbgNode);
+ // Copy debug location attached to CI.
+ if (!CI->getDebugLoc().isUnknown())
+ NewCall->setDebugLoc(CI->getDebugLoc());
CI->eraseFromParent();
}
}
diff --git a/lib/CodeGen/CodeGenModule.h b/lib/CodeGen/CodeGenModule.h
index febb8560367b..3c57c0b8cbfc 100644
--- a/lib/CodeGen/CodeGenModule.h
+++ b/lib/CodeGen/CodeGenModule.h
@@ -31,7 +31,6 @@
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/ValueHandle.h"
-#include <list>
namespace llvm {
class Module;
@@ -93,8 +92,8 @@ class CodeGenModule : public BlockModule {
CodeGenTypes Types;
MangleContext MangleCtx;
- /// VtableInfo - Holds information about C++ vtables.
- CGVtableInfo VtableInfo;
+ /// VTables - Holds information about C++ vtables.
+ CodeGenVTables VTables;
CGObjCRuntime* Runtime;
CGDebugInfo* DebugInfo;
@@ -181,7 +180,7 @@ public:
llvm::Module &getModule() const { return TheModule; }
CodeGenTypes &getTypes() { return Types; }
MangleContext &getMangleContext() { return MangleCtx; }
- CGVtableInfo &getVtableInfo() { return VtableInfo; }
+ CodeGenVTables &getVTables() { return VTables; }
Diagnostic &getDiags() const { return Diags; }
const llvm::TargetData &getTargetData() const { return TheTargetData; }
llvm::LLVMContext &getLLVMContext() { return VMContext; }
@@ -225,36 +224,18 @@ public:
/// for the given type.
llvm::Constant *GetAddrOfRTTIDescriptor(QualType Ty);
- llvm::Constant *GetAddrOfThunk(GlobalDecl GD,
- const ThunkAdjustment &ThisAdjustment);
- llvm::Constant *GetAddrOfCovariantThunk(GlobalDecl GD,
- const CovariantThunkAdjustment &ThisAdjustment);
- void BuildThunksForVirtual(GlobalDecl GD);
- void BuildThunksForVirtualRecursive(GlobalDecl GD, GlobalDecl BaseOGD);
+ /// GetAddrOfThunk - Get the address of the thunk for the given global decl.
+ llvm::Constant *GetAddrOfThunk(GlobalDecl GD, const ThunkInfo &Thunk);
/// GetWeakRefReference - Get a reference to the target of VD.
llvm::Constant *GetWeakRefReference(const ValueDecl *VD);
- /// BuildThunk - Build a thunk for the given method.
- llvm::Constant *BuildThunk(GlobalDecl GD, bool Extern,
- const ThunkAdjustment &ThisAdjustment);
-
- /// BuildCoVariantThunk - Build a thunk for the given method
- llvm::Constant *
- BuildCovariantThunk(const GlobalDecl &GD, bool Extern,
- const CovariantThunkAdjustment &Adjustment);
-
/// GetNonVirtualBaseClassOffset - Returns the offset from a derived class to
/// its base class. Returns null if the offset is 0.
llvm::Constant *
GetNonVirtualBaseClassOffset(const CXXRecordDecl *ClassDecl,
const CXXRecordDecl *BaseClassDecl);
- /// ComputeThunkAdjustment - Returns the two parts required to compute the
- /// offset for an object.
- ThunkAdjustment ComputeThunkAdjustment(const CXXRecordDecl *ClassDecl,
- const CXXRecordDecl *BaseClassDecl);
-
/// GetStringForStringLiteral - Return the appropriate bytes for a string
/// literal, properly padded to match the literal type. If only the address of
/// a constant is needed consider using GetAddrOfConstantStringLiteral.
@@ -523,6 +504,14 @@ private:
void EmitAnnotations(void);
+ /// EmitFundamentalRTTIDescriptor - Emit the RTTI descriptors for the
+ /// given type.
+ void EmitFundamentalRTTIDescriptor(QualType Type);
+
+ /// EmitFundamentalRTTIDescriptors - Emit the RTTI descriptors for the
+ /// builtin types.
+ void EmitFundamentalRTTIDescriptors();
+
/// EmitDeferred - Emit any needed decls for which code generation
/// was deferred.
void EmitDeferred(void);
diff --git a/lib/CodeGen/CodeGenTypes.cpp b/lib/CodeGen/CodeGenTypes.cpp
index 4feca4dd7d3d..f53dd83d7035 100644
--- a/lib/CodeGen/CodeGenTypes.cpp
+++ b/lib/CodeGen/CodeGenTypes.cpp
@@ -12,6 +12,8 @@
//===----------------------------------------------------------------------===//
#include "CodeGenTypes.h"
+#include "CGCall.h"
+#include "CGRecordLayout.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclObjC.h"
#include "clang/AST/DeclCXX.h"
@@ -20,10 +22,6 @@
#include "llvm/DerivedTypes.h"
#include "llvm/Module.h"
#include "llvm/Target/TargetData.h"
-
-#include "CGCall.h"
-#include "CGRecordLayoutBuilder.h"
-
using namespace clang;
using namespace CodeGen;
@@ -400,7 +398,6 @@ const llvm::Type *CodeGenTypes::ConvertNewType(QualType T) {
/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
/// enum.
const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
-
// TagDecl's are not necessarily unique, instead use the (clang)
// type connected to the decl.
const Type *Key =
@@ -449,7 +446,7 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
}
// Layout fields.
- CGRecordLayout *Layout = CGRecordLayoutBuilder::ComputeLayout(*this, RD);
+ CGRecordLayout *Layout = ComputeRecordLayout(RD);
CGRecordLayouts[Key] = Layout;
const llvm::Type *ResultType = Layout->getLLVMType();
@@ -462,42 +459,11 @@ const llvm::Type *CodeGenTypes::ConvertTagDeclType(const TagDecl *TD) {
return ResultHolder.get();
}
-/// getLLVMFieldNo - Return llvm::StructType element number
-/// that corresponds to the field FD.
-unsigned CodeGenTypes::getLLVMFieldNo(const FieldDecl *FD) {
- assert(!FD->isBitField() && "Don't use getLLVMFieldNo on bit fields!");
-
- llvm::DenseMap<const FieldDecl*, unsigned>::iterator I = FieldInfo.find(FD);
- assert (I != FieldInfo.end() && "Unable to find field info");
- return I->second;
-}
-
-/// addFieldInfo - Assign field number to field FD.
-void CodeGenTypes::addFieldInfo(const FieldDecl *FD, unsigned No) {
- FieldInfo[FD] = No;
-}
-
-/// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field FD.
-CodeGenTypes::BitFieldInfo CodeGenTypes::getBitFieldInfo(const FieldDecl *FD) {
- llvm::DenseMap<const FieldDecl *, BitFieldInfo>::iterator
- I = BitFields.find(FD);
- assert (I != BitFields.end() && "Unable to find bitfield info");
- return I->second;
-}
-
-/// addBitFieldInfo - Assign a start bit and a size to field FD.
-void CodeGenTypes::addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo,
- unsigned Start, unsigned Size) {
- BitFields.insert(std::make_pair(FD, BitFieldInfo(FieldNo, Start, Size)));
-}
-
/// getCGRecordLayout - Return record layout info for the given llvm::Type.
const CGRecordLayout &
-CodeGenTypes::getCGRecordLayout(const TagDecl *TD) const {
+CodeGenTypes::getCGRecordLayout(const RecordDecl *TD) const {
const Type *Key = Context.getTagDeclType(TD).getTypePtr();
- llvm::DenseMap<const Type*, CGRecordLayout *>::const_iterator I
- = CGRecordLayouts.find(Key);
- assert (I != CGRecordLayouts.end()
- && "Unable to find record layout information for type");
- return *I->second;
+ const CGRecordLayout *Layout = CGRecordLayouts.lookup(Key);
+ assert(Layout && "Unable to find record layout information for type");
+ return *Layout;
}
diff --git a/lib/CodeGen/CodeGenTypes.h b/lib/CodeGen/CodeGenTypes.h
index b2912efb3402..9b74106d61ce 100644
--- a/lib/CodeGen/CodeGenTypes.h
+++ b/lib/CodeGen/CodeGenTypes.h
@@ -16,7 +16,6 @@
#include "llvm/Module.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/SmallSet.h"
#include <vector>
#include "CGCall.h"
@@ -52,37 +51,9 @@ namespace clang {
typedef CanQual<Type> CanQualType;
namespace CodeGen {
+ class CGRecordLayout;
class CodeGenTypes;
- /// CGRecordLayout - This class handles struct and union layout info while
- /// lowering AST types to LLVM types.
- class CGRecordLayout {
- CGRecordLayout(); // DO NOT IMPLEMENT
-
- /// LLVMType - The LLVMType corresponding to this record layout.
- const llvm::Type *LLVMType;
-
- /// ContainsPointerToDataMember - Whether one of the fields in this record
- /// layout is a pointer to data member, or a struct that contains pointer to
- /// data member.
- bool ContainsPointerToDataMember;
-
- public:
- CGRecordLayout(const llvm::Type *T, bool ContainsPointerToDataMember)
- : LLVMType(T), ContainsPointerToDataMember(ContainsPointerToDataMember) { }
-
- /// getLLVMType - Return llvm type associated with this record.
- const llvm::Type *getLLVMType() const {
- return LLVMType;
- }
-
- /// containsPointerToDataMember - Whether this struct contains pointers to
- /// data members.
- bool containsPointerToDataMember() const {
- return ContainsPointerToDataMember;
- }
- };
-
/// CodeGenTypes - This class organizes the cross-module state that is used
/// while lowering AST types to LLVM types.
class CodeGenTypes {
@@ -107,32 +78,12 @@ class CodeGenTypes {
/// CGRecordLayouts - This maps llvm struct type with corresponding
/// record layout info.
- /// FIXME : If CGRecordLayout is less than 16 bytes then use
- /// inline it in the map.
llvm::DenseMap<const Type*, CGRecordLayout *> CGRecordLayouts;
- /// FieldInfo - This maps struct field with corresponding llvm struct type
- /// field no. This info is populated by record organizer.
- llvm::DenseMap<const FieldDecl *, unsigned> FieldInfo;
-
/// FunctionInfos - Hold memoized CGFunctionInfo results.
llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
-public:
- struct BitFieldInfo {
- BitFieldInfo(unsigned FieldNo,
- unsigned Start,
- unsigned Size)
- : FieldNo(FieldNo), Start(Start), Size(Size) {}
-
- unsigned FieldNo;
- unsigned Start;
- unsigned Size;
- };
-
private:
- llvm::DenseMap<const FieldDecl *, BitFieldInfo> BitFields;
-
/// TypeCache - This map keeps cache of llvm::Types (through PATypeHolder)
/// and maps llvm::Types to corresponding clang::Type. llvm::PATypeHolder is
/// used instead of llvm::Type because it allows us to bypass potential
@@ -178,11 +129,7 @@ public:
/// and/or incomplete argument types, this will return the opaque type.
const llvm::Type *GetFunctionTypeForVtable(const CXXMethodDecl *MD);
- const CGRecordLayout &getCGRecordLayout(const TagDecl*) const;
-
- /// getLLVMFieldNo - Return llvm::StructType element number
- /// that corresponds to the field FD.
- unsigned getLLVMFieldNo(const FieldDecl *FD);
+ const CGRecordLayout &getCGRecordLayout(const RecordDecl*) const;
/// UpdateCompletedType - When we find the full definition for a TagDecl,
/// replace the 'opaque' type we previously made for it if applicable.
@@ -202,7 +149,7 @@ public:
const CGFunctionInfo &getFunctionInfo(const CallArgList &Args,
const FunctionType *Ty) {
return getFunctionInfo(Ty->getResultType(), Args,
- Ty->getCallConv(), Ty->getNoReturnAttr());
+ Ty->getExtInfo());
}
const CGFunctionInfo &getFunctionInfo(CanQual<FunctionProtoType> Ty);
const CGFunctionInfo &getFunctionInfo(CanQual<FunctionNoProtoType> Ty);
@@ -216,33 +163,22 @@ public:
/// specified, the "C" calling convention will be used.
const CGFunctionInfo &getFunctionInfo(QualType ResTy,
const CallArgList &Args,
- CallingConv CC,
- bool NoReturn);
+ const FunctionType::ExtInfo &Info);
const CGFunctionInfo &getFunctionInfo(QualType ResTy,
const FunctionArgList &Args,
- CallingConv CC,
- bool NoReturn);
+ const FunctionType::ExtInfo &Info);
/// Retrieves the ABI information for the given function signature.
///
/// \param ArgTys - must all actually be canonical as params
const CGFunctionInfo &getFunctionInfo(CanQualType RetTy,
const llvm::SmallVectorImpl<CanQualType> &ArgTys,
- CallingConv CC,
- bool NoReturn);
+ const FunctionType::ExtInfo &Info);
-public: // These are internal details of CGT that shouldn't be used externally.
- /// addFieldInfo - Assign field number to field FD.
- void addFieldInfo(const FieldDecl *FD, unsigned FieldNo);
-
- /// addBitFieldInfo - Assign a start bit and a size to field FD.
- void addBitFieldInfo(const FieldDecl *FD, unsigned FieldNo,
- unsigned Start, unsigned Size);
-
- /// getBitFieldInfo - Return the BitFieldInfo that corresponds to the field
- /// FD.
- BitFieldInfo getBitFieldInfo(const FieldDecl *FD);
+ /// \brief Compute a new LLVM record layout object for the given record.
+ CGRecordLayout *ComputeRecordLayout(const RecordDecl *D);
+public: // These are internal details of CGT that shouldn't be used externally.
/// ConvertTagDeclType - Lay out a tagged decl type like struct or union or
/// enum.
const llvm::Type *ConvertTagDeclType(const TagDecl *TD);
diff --git a/lib/CodeGen/Mangle.cpp b/lib/CodeGen/Mangle.cpp
index f2a73f1c2d12..077db7c26852 100644
--- a/lib/CodeGen/Mangle.cpp
+++ b/lib/CodeGen/Mangle.cpp
@@ -102,7 +102,7 @@ public:
llvm::raw_svector_ostream &getStream() { return Out; }
void mangle(const NamedDecl *D, llvm::StringRef Prefix = "_Z");
- void mangleCallOffset(const ThunkAdjustment &Adjustment);
+ void mangleCallOffset(int64_t NonVirtual, int64_t Virtual);
void mangleNumber(int64_t Number);
void mangleFunctionEncoding(const FunctionDecl *FD);
void mangleName(const NamedDecl *ND);
@@ -439,23 +439,23 @@ void CXXNameMangler::mangleNumber(int64_t Number) {
Out << Number;
}
-void CXXNameMangler::mangleCallOffset(const ThunkAdjustment &Adjustment) {
+void CXXNameMangler::mangleCallOffset(int64_t NonVirtual, int64_t Virtual) {
// <call-offset> ::= h <nv-offset> _
// ::= v <v-offset> _
// <nv-offset> ::= <offset number> # non-virtual base override
// <v-offset> ::= <offset number> _ <virtual offset number>
// # virtual base override, with vcall offset
- if (!Adjustment.Virtual) {
+ if (!Virtual) {
Out << 'h';
- mangleNumber(Adjustment.NonVirtual);
+ mangleNumber(NonVirtual);
Out << '_';
return;
}
Out << 'v';
- mangleNumber(Adjustment.NonVirtual);
+ mangleNumber(NonVirtual);
Out << '_';
- mangleNumber(Adjustment.Virtual);
+ mangleNumber(Virtual);
Out << '_';
}
@@ -1131,15 +1131,20 @@ void CXXNameMangler::mangleType(const ComplexType *T) {
}
// GNU extension: vector types
+// <type> ::= <vector-type>
+// <vector-type> ::= Dv <positive dimension number> _ <element type>
+// ::= Dv [<dimension expression>] _ <element type>
void CXXNameMangler::mangleType(const VectorType *T) {
- Out << "U8__vector";
+ Out << "Dv" << T->getNumElements() << '_';
mangleType(T->getElementType());
}
void CXXNameMangler::mangleType(const ExtVectorType *T) {
mangleType(static_cast<const VectorType*>(T));
}
void CXXNameMangler::mangleType(const DependentSizedExtVectorType *T) {
- Out << "U8__vector";
+ Out << "Dv";
+ mangleExpression(T->getSizeExpr());
+ Out << '_';
mangleType(T->getElementType());
}
@@ -1159,7 +1164,7 @@ void CXXNameMangler::mangleType(const TemplateSpecializationType *T) {
mangleName(TD, T->getArgs(), T->getNumArgs());
}
-void CXXNameMangler::mangleType(const TypenameType *T) {
+void CXXNameMangler::mangleType(const DependentNameType *T) {
// Typename types are always nested
Out << 'N';
mangleUnresolvedScope(T->getQualifier());
@@ -1451,8 +1456,9 @@ void CXXNameMangler::mangleExpression(const Expr *E) {
// It isn't clear that we ever actually want to have such a
// nested-name-specifier; why not just represent it as a typename type?
if (!QTy && NNS->getAsIdentifier() && NNS->getPrefix()) {
- QTy = getASTContext().getTypenameType(NNS->getPrefix(),
- NNS->getAsIdentifier())
+ QTy = getASTContext().getDependentNameType(ETK_Typename,
+ NNS->getPrefix(),
+ NNS->getAsIdentifier())
.getTypePtr();
}
assert(QTy && "Qualifier was not type!");
@@ -1862,52 +1868,50 @@ void MangleContext::mangleCXXDtor(const CXXDestructorDecl *D, CXXDtorType Type,
Mangler.mangle(D);
}
-/// \brief Mangles the a thunk with the offset n for the declaration D and
-/// emits that name to the given output stream.
-void MangleContext::mangleThunk(const FunctionDecl *FD,
- const ThunkAdjustment &ThisAdjustment,
+void MangleContext::mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
llvm::SmallVectorImpl<char> &Res) {
- assert(!isa<CXXDestructorDecl>(FD) &&
- "Use mangleCXXDtor for destructor decls!");
-
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
+ // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
+ // # base is the nominal target function of thunk
+ // # first call-offset is 'this' adjustment
+ // # second call-offset is result adjustment
+
+ assert(!isa<CXXDestructorDecl>(MD) &&
+ "Use mangleCXXDtor for destructor decls!");
+
CXXNameMangler Mangler(*this, Res);
Mangler.getStream() << "_ZT";
- Mangler.mangleCallOffset(ThisAdjustment);
- Mangler.mangleFunctionEncoding(FD);
-}
-
-void MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *D,
- CXXDtorType Type,
- const ThunkAdjustment &ThisAdjustment,
- llvm::SmallVectorImpl<char> &Res) {
+ if (!Thunk.Return.isEmpty())
+ Mangler.getStream() << 'c';
+
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(Thunk.This.NonVirtual, Thunk.This.VCallOffsetOffset);
+
+ // Mangle the return pointer adjustment if there is one.
+ if (!Thunk.Return.isEmpty())
+ Mangler.mangleCallOffset(Thunk.Return.NonVirtual,
+ Thunk.Return.VBaseOffsetOffset);
+
+ Mangler.mangleFunctionEncoding(MD);
+}
+
+void
+MangleContext::mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
+ llvm::SmallVectorImpl<char> &Res) {
// <special-name> ::= T <call-offset> <base encoding>
// # base is the nominal target function of thunk
- CXXNameMangler Mangler(*this, Res, D, Type);
+
+ CXXNameMangler Mangler(*this, Res, DD, Type);
Mangler.getStream() << "_ZT";
- Mangler.mangleCallOffset(ThisAdjustment);
- Mangler.mangleFunctionEncoding(D);
-}
-/// \brief Mangles the a covariant thunk for the declaration D and emits that
-/// name to the given output stream.
-void
-MangleContext::mangleCovariantThunk(const FunctionDecl *FD,
- const CovariantThunkAdjustment& Adjustment,
- llvm::SmallVectorImpl<char> &Res) {
- assert(!isa<CXXDestructorDecl>(FD) &&
- "No such thing as a covariant thunk for a destructor!");
+ // Mangle the 'this' pointer adjustment.
+ Mangler.mangleCallOffset(ThisAdjustment.NonVirtual,
+ ThisAdjustment.VCallOffsetOffset);
- // <special-name> ::= Tc <call-offset> <call-offset> <base encoding>
- // # base is the nominal target function of thunk
- // # first call-offset is 'this' adjustment
- // # second call-offset is result adjustment
- CXXNameMangler Mangler(*this, Res);
- Mangler.getStream() << "_ZTc";
- Mangler.mangleCallOffset(Adjustment.ThisAdjustment);
- Mangler.mangleCallOffset(Adjustment.ReturnAdjustment);
- Mangler.mangleFunctionEncoding(FD);
+ Mangler.mangleFunctionEncoding(DD);
}
/// mangleGuardVariable - Returns the mangled name for a guard variable
diff --git a/lib/CodeGen/Mangle.h b/lib/CodeGen/Mangle.h
index 62656b95da1b..91a5e97b69c9 100644
--- a/lib/CodeGen/Mangle.h
+++ b/lib/CodeGen/Mangle.h
@@ -28,13 +28,14 @@ namespace clang {
class ASTContext;
class CXXConstructorDecl;
class CXXDestructorDecl;
+ class CXXMethodDecl;
class FunctionDecl;
class NamedDecl;
class VarDecl;
namespace CodeGen {
- class CovariantThunkAdjustment;
- class ThunkAdjustment;
+ struct ThisAdjustment;
+ struct ThunkInfo;
/// MangleBuffer - a convenient class for storing a name which is
/// either the result of a mangling or is a constant string with
@@ -91,15 +92,12 @@ public:
bool shouldMangleDeclName(const NamedDecl *D);
void mangleName(const NamedDecl *D, llvm::SmallVectorImpl<char> &);
- void mangleThunk(const FunctionDecl *FD,
- const ThunkAdjustment &ThisAdjustment,
+ void mangleThunk(const CXXMethodDecl *MD,
+ const ThunkInfo &Thunk,
llvm::SmallVectorImpl<char> &);
- void mangleCXXDtorThunk(const CXXDestructorDecl *D, CXXDtorType Type,
- const ThunkAdjustment &ThisAdjustment,
+ void mangleCXXDtorThunk(const CXXDestructorDecl *DD, CXXDtorType Type,
+ const ThisAdjustment &ThisAdjustment,
llvm::SmallVectorImpl<char> &);
- void mangleCovariantThunk(const FunctionDecl *FD,
- const CovariantThunkAdjustment& Adjustment,
- llvm::SmallVectorImpl<char> &);
void mangleGuardVariable(const VarDecl *D, llvm::SmallVectorImpl<char> &);
void mangleCXXVtable(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
void mangleCXXVTT(const CXXRecordDecl *RD, llvm::SmallVectorImpl<char> &);
diff --git a/lib/Driver/CMakeLists.txt b/lib/Driver/CMakeLists.txt
index 60d8e9c927f5..7efcd8a8dde4 100644
--- a/lib/Driver/CMakeLists.txt
+++ b/lib/Driver/CMakeLists.txt
@@ -10,8 +10,8 @@ add_clang_library(clangDriver
DriverOptions.cpp
HostInfo.cpp
Job.cpp
- OptTable.cpp
Option.cpp
+ OptTable.cpp
Phases.cpp
Tool.cpp
ToolChain.cpp
diff --git a/lib/Driver/Driver.cpp b/lib/Driver/Driver.cpp
index acfff386f485..921147f7a09b 100644
--- a/lib/Driver/Driver.cpp
+++ b/lib/Driver/Driver.cpp
@@ -45,7 +45,8 @@ using namespace clang;
Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir,
llvm::StringRef _DefaultHostTriple,
llvm::StringRef _DefaultImageName,
- bool IsProduction, Diagnostic &_Diags)
+ bool IsProduction, bool CXXIsProduction,
+ Diagnostic &_Diags)
: Opts(createDriverOptTable()), Diags(_Diags),
Name(_Name), Dir(_Dir), DefaultHostTriple(_DefaultHostTriple),
DefaultImageName(_DefaultImageName),
@@ -66,7 +67,8 @@ Driver::Driver(llvm::StringRef _Name, llvm::StringRef _Dir,
CCCClangArchs.insert(llvm::Triple::x86_64);
CCCClangArchs.insert(llvm::Triple::arm);
- CCCUseClangCXX = false;
+ if (!CXXIsProduction)
+ CCCUseClangCXX = false;
}
// Compute the path to the resource directory.
@@ -172,6 +174,8 @@ Compilation *Driver::BuildCompilation(int argc, const char **argv) {
HostTriple = A->getValue(*Args);
if (const Arg *A = Args->getLastArg(options::OPT_ccc_install_dir))
Dir = A->getValue(*Args);
+ if (const Arg *A = Args->getLastArg(options::OPT_B))
+ PrefixDir = A->getValue(*Args);
Host = GetHostInfo(HostTriple);
@@ -1088,6 +1092,15 @@ const char *Driver::GetNamedOutputPath(Compilation &C,
}
std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when lokup up program paths.
+ if (!PrefixDir.empty()) {
+ llvm::sys::Path P(PrefixDir);
+ P.appendComponent(Name);
+ if (P.exists())
+ return P.str();
+ }
+
const ToolChain::path_list &List = TC.getFilePaths();
for (ToolChain::path_list::const_iterator
it = List.begin(), ie = List.end(); it != ie; ++it) {
@@ -1102,6 +1115,15 @@ std::string Driver::GetFilePath(const char *Name, const ToolChain &TC) const {
std::string Driver::GetProgramPath(const char *Name, const ToolChain &TC,
bool WantFile) const {
+ // Respect a limited subset of the '-Bprefix' functionality in GCC by
+ // attempting to use this prefix when lokup up program paths.
+ if (!PrefixDir.empty()) {
+ llvm::sys::Path P(PrefixDir);
+ P.appendComponent(Name);
+ if (WantFile ? P.exists() : P.canExecute())
+ return P.str();
+ }
+
const ToolChain::path_list &List = TC.getProgramPaths();
for (ToolChain::path_list::const_iterator
it = List.begin(), ie = List.end(); it != ie; ++it) {
diff --git a/lib/Driver/HostInfo.cpp b/lib/Driver/HostInfo.cpp
index d8e086db98d1..d9e2e379745f 100644
--- a/lib/Driver/HostInfo.cpp
+++ b/lib/Driver/HostInfo.cpp
@@ -144,13 +144,15 @@ ToolChain *DarwinHostInfo::CreateToolChain(const ArgList &Args,
TCTriple.setArch(Arch);
// If we recognized the arch, match it to the toolchains we support.
- if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
+ const char *UseNewToolChain = ::getenv("CCC_ENABLE_NEW_DARWIN_TOOLCHAIN");
+ if (UseNewToolChain ||
+ Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb) {
+ TC = new toolchains::DarwinClang(*this, TCTriple, DarwinVersion);
+ } else if (Arch == llvm::Triple::x86 || Arch == llvm::Triple::x86_64) {
// We still use the legacy DarwinGCC toolchain on X86.
TC = new toolchains::DarwinGCC(*this, TCTriple, DarwinVersion,
GCCVersion);
- } else if (Arch == llvm::Triple::arm || Arch == llvm::Triple::thumb)
- TC = new toolchains::DarwinClang(*this, TCTriple, DarwinVersion);
- else
+ } else
TC = new toolchains::Darwin_Generic_GCC(*this, TCTriple);
}
diff --git a/lib/Driver/Tools.cpp b/lib/Driver/Tools.cpp
index 1c34df05b1c5..2a3799152ac8 100644
--- a/lib/Driver/Tools.cpp
+++ b/lib/Driver/Tools.cpp
@@ -56,6 +56,35 @@ static void CheckCodeGenerationOptions(const Driver &D, const ArgList &Args) {
<< A->getAsString(Args) << "-static";
}
+// Quote target names for inclusion in GNU Make dependency files.
+// Only the characters '$', '#', ' ', '\t' are quoted.
+static void QuoteTarget(llvm::StringRef Target,
+ llvm::SmallVectorImpl<char> &Res) {
+ for (unsigned i = 0, e = Target.size(); i != e; ++i) {
+ switch (Target[i]) {
+ case ' ':
+ case '\t':
+ // Escape the preceding backslashes
+ for (int j = i - 1; j >= 0 && Target[j] == '\\'; --j)
+ Res.push_back('\\');
+
+ // Escape the space/tab
+ Res.push_back('\\');
+ break;
+ case '$':
+ Res.push_back('$');
+ break;
+ case '#':
+ Res.push_back('\\');
+ break;
+ default:
+ break;
+ }
+
+ Res.push_back(Target[i]);
+ }
+}
+
void Clang::AddPreprocessingOptions(const Driver &D,
const ArgList &Args,
ArgStringList &CmdArgs,
@@ -91,9 +120,7 @@ void Clang::AddPreprocessingOptions(const Driver &D,
CmdArgs.push_back("-dependency-file");
CmdArgs.push_back(DepFile);
- // Add an -MT option if the user didn't specify their own.
- //
- // FIXME: This should use -MQ, when we support it.
+ // Add a default target if one wasn't specified.
if (!Args.hasArg(options::OPT_MT) && !Args.hasArg(options::OPT_MQ)) {
const char *DepTarget;
@@ -114,7 +141,9 @@ void Clang::AddPreprocessingOptions(const Driver &D,
}
CmdArgs.push_back("-MT");
- CmdArgs.push_back(DepTarget);
+ llvm::SmallString<128> Quoted;
+ QuoteTarget(DepTarget, Quoted);
+ CmdArgs.push_back(Args.MakeArgString(Quoted));
}
if (A->getOption().matches(options::OPT_M) ||
@@ -123,7 +152,25 @@ void Clang::AddPreprocessingOptions(const Driver &D,
}
Args.AddLastArg(CmdArgs, options::OPT_MP);
- Args.AddAllArgs(CmdArgs, options::OPT_MT);
+
+ // Convert all -MQ <target> args to -MT <quoted target>
+ for (arg_iterator it = Args.filtered_begin(options::OPT_MT,
+ options::OPT_MQ),
+ ie = Args.filtered_end(); it != ie; ++it) {
+
+ it->claim();
+
+ if (it->getOption().matches(options::OPT_MQ)) {
+ CmdArgs.push_back("-MT");
+ llvm::SmallString<128> Quoted;
+ QuoteTarget(it->getValue(Args), Quoted);
+ CmdArgs.push_back(Args.MakeArgString(Quoted));
+
+ // -MT flag - no change
+ } else {
+ it->render(Args, CmdArgs);
+ }
+ }
// Add -i* options, and automatically translate to
// -include-pch/-include-pth for transparent PCH support. It's
@@ -796,6 +843,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
else
CmdArgs.push_back("plist");
+ // Disable the presentation of standard compiler warnings when
+ // using --analyze. We only want to show static analyzer diagnostics
+ // or frontend errors.
+ CmdArgs.push_back("-w");
+
// Add -Xanalyzer arguments when running as analyzer.
Args.AddAllArgValues(CmdArgs, options::OPT_Xanalyzer);
}
@@ -925,7 +977,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Arg *Unsupported;
if ((Unsupported = Args.getLastArg(options::OPT_MG)) ||
- (Unsupported = Args.getLastArg(options::OPT_MQ)) ||
(Unsupported = Args.getLastArg(options::OPT_iframework)) ||
(Unsupported = Args.getLastArg(options::OPT_fshort_enums)))
D.Diag(clang::diag::err_drv_clang_unsupported)
@@ -943,6 +994,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-g");
Args.AddLastArg(CmdArgs, options::OPT_nostdinc);
+ Args.AddLastArg(CmdArgs, options::OPT_nostdincxx);
Args.AddLastArg(CmdArgs, options::OPT_nobuiltininc);
// Pass the path to compiler resource files.
@@ -1701,6 +1753,7 @@ void darwin::CC1::AddCPPUniqueOptionsArgs(const ArgList &Args,
if (!Args.hasArg(options::OPT_Q))
CmdArgs.push_back("-quiet");
Args.AddAllArgs(CmdArgs, options::OPT_nostdinc);
+ Args.AddAllArgs(CmdArgs, options::OPT_nostdincxx);
Args.AddLastArg(CmdArgs, options::OPT_v);
Args.AddAllArgs(CmdArgs, options::OPT_I_Group, options::OPT_F);
Args.AddLastArg(CmdArgs, options::OPT_P);
diff --git a/lib/Frontend/ASTUnit.cpp b/lib/Frontend/ASTUnit.cpp
index 935c41524370..7243f709cc65 100644
--- a/lib/Frontend/ASTUnit.cpp
+++ b/lib/Frontend/ASTUnit.cpp
@@ -378,7 +378,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(const char **ArgBegin,
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver("clang", "/", llvm::sys::getHostTriple(),
- "a.out", false, Diags);
+ "a.out", false, false, Diags);
// Don't check that inputs exist, they have been remapped.
TheDriver.setCheckInputsExist(false);
diff --git a/lib/Frontend/CompilerInstance.cpp b/lib/Frontend/CompilerInstance.cpp
index 7b4932d787a1..879e9f681de9 100644
--- a/lib/Frontend/CompilerInstance.cpp
+++ b/lib/Frontend/CompilerInstance.cpp
@@ -29,6 +29,7 @@
#include "llvm/LLVMContext.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/raw_ostream.h"
+#include "llvm/ADT/Statistic.h"
#include "llvm/Support/Timer.h"
#include "llvm/System/Host.h"
#include "llvm/System/Path.h"
@@ -479,6 +480,9 @@ bool CompilerInstance::ExecuteAction(FrontendAction &Act) {
if (getFrontendOpts().ShowTimers)
createFrontendTimer();
+ if (getFrontendOpts().ShowStats)
+ llvm::EnableStatistics();
+
for (unsigned i = 0, e = getFrontendOpts().Inputs.size(); i != e; ++i) {
const std::string &InFile = getFrontendOpts().Inputs[i].second;
diff --git a/lib/Frontend/CompilerInvocation.cpp b/lib/Frontend/CompilerInvocation.cpp
index 6e18f346d561..dc2c6bf3614a 100644
--- a/lib/Frontend/CompilerInvocation.cpp
+++ b/lib/Frontend/CompilerInvocation.cpp
@@ -436,6 +436,8 @@ static void HeaderSearchOptsToArgs(const HeaderSearchOptions &Opts,
}
if (!Opts.UseStandardIncludes)
Res.push_back("-nostdinc");
+ if (!Opts.UseStandardCXXIncludes)
+ Res.push_back("-nostdinc++");
if (Opts.Verbose)
Res.push_back("-v");
}
@@ -1014,6 +1016,7 @@ static void ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args) {
Opts.Verbose = Args.hasArg(OPT_v);
Opts.UseBuiltinIncludes = !Args.hasArg(OPT_nobuiltininc);
Opts.UseStandardIncludes = !Args.hasArg(OPT_nostdinc);
+ Opts.UseStandardCXXIncludes = !Args.hasArg(OPT_nostdincxx);
Opts.ResourceDir = getLastArgValue(Args, OPT_resource_dir);
// Add -I... and -F... options in order.
diff --git a/lib/Frontend/DependencyFile.cpp b/lib/Frontend/DependencyFile.cpp
index de2b056dc7ce..14aee3559c9e 100644
--- a/lib/Frontend/DependencyFile.cpp
+++ b/lib/Frontend/DependencyFile.cpp
@@ -48,14 +48,15 @@ public:
IncludeSystemHeaders(Opts.IncludeSystemHeaders),
PhonyTarget(Opts.UsePhonyTargets) {}
- ~DependencyFileCallback() {
+ virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType);
+
+ virtual void EndOfMainFile() {
OutputDependencyFile();
OS->flush();
delete OS;
+ OS = 0;
}
-
- virtual void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType);
};
}
diff --git a/lib/Frontend/FixItRewriter.cpp b/lib/Frontend/FixItRewriter.cpp
index 0b04cf2b44d3..20d452e76a64 100644
--- a/lib/Frontend/FixItRewriter.cpp
+++ b/lib/Frontend/FixItRewriter.cpp
@@ -93,7 +93,7 @@ void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
// completely ignore it, even if it's an error: fix-it locations
// are meant to perform specific fix-ups even in the presence of
// other errors.
- if (Info.getNumCodeModificationHints() == 0)
+ if (Info.getNumFixItHints() == 0)
return;
// See if the location of the error is one that matches what the
@@ -122,10 +122,10 @@ void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
// Make sure that we can perform all of the modifications we
// in this diagnostic.
- bool CanRewrite = Info.getNumCodeModificationHints() > 0;
- for (unsigned Idx = 0, Last = Info.getNumCodeModificationHints();
+ bool CanRewrite = Info.getNumFixItHints() > 0;
+ for (unsigned Idx = 0, Last = Info.getNumFixItHints();
Idx < Last; ++Idx) {
- const CodeModificationHint &Hint = Info.getCodeModificationHint(Idx);
+ const FixItHint &Hint = Info.getFixItHint(Idx);
if (Hint.RemoveRange.isValid() &&
Rewrite.getRangeSize(Hint.RemoveRange) == -1) {
CanRewrite = false;
@@ -140,7 +140,7 @@ void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
}
if (!CanRewrite) {
- if (Info.getNumCodeModificationHints() > 0)
+ if (Info.getNumFixItHints() > 0)
Diag(Info.getLocation(), diag::note_fixit_in_macro);
// If this was an error, refuse to perform any rewriting.
@@ -152,9 +152,9 @@ void FixItRewriter::HandleDiagnostic(Diagnostic::Level DiagLevel,
}
bool Failed = false;
- for (unsigned Idx = 0, Last = Info.getNumCodeModificationHints();
+ for (unsigned Idx = 0, Last = Info.getNumFixItHints();
Idx < Last; ++Idx) {
- const CodeModificationHint &Hint = Info.getCodeModificationHint(Idx);
+ const FixItHint &Hint = Info.getFixItHint(Idx);
if (!Hint.RemoveRange.isValid()) {
// We're adding code.
if (Rewrite.InsertTextBefore(Hint.InsertionLoc, Hint.CodeToInsert))
diff --git a/lib/Frontend/FrontendAction.cpp b/lib/Frontend/FrontendAction.cpp
index 66df7a61917b..110612d03b58 100644
--- a/lib/Frontend/FrontendAction.cpp
+++ b/lib/Frontend/FrontendAction.cpp
@@ -169,6 +169,10 @@ void FrontendAction::EndSourceFile() {
CI.setASTContext(0);
}
+ // Inform the preprocessor we are done.
+ if (CI.hasPreprocessor())
+ CI.getPreprocessor().EndSourceFile();
+
if (CI.getFrontendOpts().ShowStats) {
llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n";
CI.getPreprocessor().PrintStats();
diff --git a/lib/Frontend/HTMLDiagnostics.cpp b/lib/Frontend/HTMLDiagnostics.cpp
index da99cb8b7b89..022a34d0bd4f 100644
--- a/lib/Frontend/HTMLDiagnostics.cpp
+++ b/lib/Frontend/HTMLDiagnostics.cpp
@@ -484,8 +484,7 @@ void HTMLDiagnostics::HandlePiece(Rewriter& R, FileID BugFileID,
// FIXME: This code is disabled because it seems to mangle the HTML
// output. I'm leaving it here because it's generally the right idea,
// but needs some help from someone more familiar with the rewriter.
- for (const CodeModificationHint *Hint = P.code_modifications_begin(),
- *HintEnd = P.code_modifications_end();
+ for (const FixItHint *Hint = P.fixit_begin(), *HintEnd = P.fixit_end();
Hint != HintEnd; ++Hint) {
if (Hint->RemoveRange.isValid()) {
HighlightRange(R, LPosInfo.first, Hint->RemoveRange,
diff --git a/lib/Frontend/InitHeaderSearch.cpp b/lib/Frontend/InitHeaderSearch.cpp
index cd749d221db6..9f5bced0d485 100644
--- a/lib/Frontend/InitHeaderSearch.cpp
+++ b/lib/Frontend/InitHeaderSearch.cpp
@@ -82,7 +82,8 @@ public:
/// AddDefaultSystemIncludePaths - Adds the default system include paths so
/// that e.g. stdio.h is found.
void AddDefaultSystemIncludePaths(const LangOptions &Lang,
- const llvm::Triple &triple);
+ const llvm::Triple &triple,
+ bool UseStandardCXXIncludes);
/// Realize - Merges all search path lists into one list and send it to
/// HeaderSearch.
@@ -594,8 +595,9 @@ void InitHeaderSearch::AddDefaultCPlusPlusIncludePaths(const llvm::Triple &tripl
}
void InitHeaderSearch::AddDefaultSystemIncludePaths(const LangOptions &Lang,
- const llvm::Triple &triple) {
- if (Lang.CPlusPlus)
+ const llvm::Triple &triple,
+ bool UseStandardCXXIncludes) {
+ if (Lang.CPlusPlus && UseStandardCXXIncludes)
AddDefaultCPlusPlusIncludePaths(triple);
AddDefaultCIncludePaths(triple);
@@ -765,7 +767,8 @@ void clang::ApplyHeaderSearchOptions(HeaderSearch &HS,
}
if (HSOpts.UseStandardIncludes)
- Init.AddDefaultSystemIncludePaths(Lang, Triple);
+ Init.AddDefaultSystemIncludePaths(Lang, Triple,
+ HSOpts.UseStandardCXXIncludes);
Init.Realize();
}
diff --git a/lib/Frontend/PCHReader.cpp b/lib/Frontend/PCHReader.cpp
index e659ff047d76..6d39952e9d27 100644
--- a/lib/Frontend/PCHReader.cpp
+++ b/lib/Frontend/PCHReader.cpp
@@ -905,11 +905,18 @@ PCHReader::PCHReadResult PCHReader::ReadSLocEntryRecord(unsigned ID) {
return Failure;
}
- if (Record.size() < 8) {
+ if (Record.size() < 10) {
Error("source location entry is incorrect");
return Failure;
}
+ if ((off_t)Record[4] != File->getSize() ||
+ (time_t)Record[5] != File->getModificationTime()) {
+ Diag(diag::err_fe_pch_file_modified)
+ << Filename;
+ return Failure;
+ }
+
FileID FID = SourceMgr.createFileID(File,
SourceLocation::getFromRawEncoding(Record[1]),
(SrcMgr::CharacteristicKind)Record[2],
@@ -920,10 +927,10 @@ PCHReader::PCHReadResult PCHReader::ReadSLocEntryRecord(unsigned ID) {
// Reconstruct header-search information for this file.
HeaderFileInfo HFI;
- HFI.isImport = Record[4];
- HFI.DirInfo = Record[5];
- HFI.NumIncludes = Record[6];
- HFI.ControllingMacroID = Record[7];
+ HFI.isImport = Record[6];
+ HFI.DirInfo = Record[7];
+ HFI.NumIncludes = Record[8];
+ HFI.ControllingMacroID = Record[9];
if (Listener)
Listener->ReadHeaderFileInfo(HFI, File->getUID());
break;
@@ -2068,20 +2075,21 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
}
case pch::TYPE_FUNCTION_NO_PROTO: {
- if (Record.size() != 3) {
+ if (Record.size() != 4) {
Error("incorrect encoding of no-proto function type");
return QualType();
}
QualType ResultType = GetType(Record[0]);
- return Context->getFunctionNoProtoType(ResultType, Record[1],
- (CallingConv)Record[2]);
+ FunctionType::ExtInfo Info(Record[1], Record[2], (CallingConv)Record[3]);
+ return Context->getFunctionNoProtoType(ResultType, Info);
}
case pch::TYPE_FUNCTION_PROTO: {
QualType ResultType = GetType(Record[0]);
bool NoReturn = Record[1];
- CallingConv CallConv = (CallingConv)Record[2];
- unsigned Idx = 3;
+ unsigned RegParm = Record[2];
+ CallingConv CallConv = (CallingConv)Record[3];
+ unsigned Idx = 4;
unsigned NumParams = Record[Idx++];
llvm::SmallVector<QualType, 16> ParamTypes;
for (unsigned I = 0; I != NumParams; ++I)
@@ -2097,7 +2105,9 @@ QualType PCHReader::ReadTypeRecord(uint64_t Offset) {
return Context->getFunctionType(ResultType, ParamTypes.data(), NumParams,
isVariadic, Quals, hasExceptionSpec,
hasAnyExceptionSpec, NumExceptions,
- Exceptions.data(), NoReturn, CallConv);
+ Exceptions.data(),
+ FunctionType::ExtInfo(NoReturn, RegParm,
+ CallConv));
}
case pch::TYPE_UNRESOLVED_USING:
@@ -2341,7 +2351,7 @@ void TypeLocReader::VisitQualifiedNameTypeLoc(QualifiedNameTypeLoc TL) {
void TypeLocReader::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
}
-void TypeLocReader::VisitTypenameTypeLoc(TypenameTypeLoc TL) {
+void TypeLocReader::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
TL.setNameLoc(SourceLocation::getFromRawEncoding(Record[Idx++]));
}
void TypeLocReader::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
diff --git a/lib/Frontend/PCHWriter.cpp b/lib/Frontend/PCHWriter.cpp
index 4752cd3ea6e2..4dd8dc36b760 100644
--- a/lib/Frontend/PCHWriter.cpp
+++ b/lib/Frontend/PCHWriter.cpp
@@ -141,9 +141,11 @@ void PCHTypeWriter::VisitExtVectorType(const ExtVectorType *T) {
void PCHTypeWriter::VisitFunctionType(const FunctionType *T) {
Writer.AddTypeRef(T->getResultType(), Record);
- Record.push_back(T->getNoReturnAttr());
+ FunctionType::ExtInfo C = T->getExtInfo();
+ Record.push_back(C.getNoReturn());
+ Record.push_back(C.getRegParm());
// FIXME: need to stabilize encoding of calling convention...
- Record.push_back(T->getCallConv());
+ Record.push_back(C.getCC());
}
void PCHTypeWriter::VisitFunctionNoProtoType(const FunctionNoProtoType *T) {
@@ -404,7 +406,7 @@ void TypeLocWriter::VisitQualifiedNameTypeLoc(QualifiedNameTypeLoc TL) {
void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
-void TypeLocWriter::VisitTypenameTypeLoc(TypenameTypeLoc TL) {
+void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
Writer.AddSourceLocation(TL.getNameLoc(), Record);
}
void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
@@ -921,6 +923,9 @@ static unsigned CreateSLocFileAbbrev(llvm::BitstreamWriter &Stream) {
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Include location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Characteristic
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Line directives
+ // FileEntry fields.
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 12)); // Size
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 32)); // Modification time
// HeaderFileInfo fields.
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isImport
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // DirInfo
@@ -1063,6 +1068,10 @@ void PCHWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
// The source location entry is a file. The blob associated
// with this entry is the file name.
+ // Emit size/modification time for this file.
+ Record.push_back(Content->Entry->getSize());
+ Record.push_back(Content->Entry->getModificationTime());
+
// Emit header-search information associated with this file.
HeaderFileInfo HFI;
HeaderSearch &HS = PP.getHeaderSearchInfo();
diff --git a/lib/Frontend/RewriteObjC.cpp b/lib/Frontend/RewriteObjC.cpp
index 79aecceb70d0..cba92987a3d6 100644
--- a/lib/Frontend/RewriteObjC.cpp
+++ b/lib/Frontend/RewriteObjC.cpp
@@ -2262,8 +2262,8 @@ void RewriteObjC::SynthSelGetUidFunctionDecl() {
QualType getFuncType = Context->getFunctionType(Context->getObjCSelType(),
&ArgTys[0], ArgTys.size(),
false /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
SelGetUidFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
SelGetUidIdent, getFuncType, 0,
@@ -2359,8 +2359,8 @@ void RewriteObjC::SynthSuperContructorFunctionDecl() {
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
false, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
SuperContructorFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2380,8 +2380,8 @@ void RewriteObjC::SynthMsgSendFunctionDecl() {
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
true /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
MsgSendFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2404,8 +2404,8 @@ void RewriteObjC::SynthMsgSendSuperFunctionDecl() {
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
true /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
MsgSendSuperFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2425,8 +2425,8 @@ void RewriteObjC::SynthMsgSendStretFunctionDecl() {
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
true /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
MsgSendStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2451,8 +2451,8 @@ void RewriteObjC::SynthMsgSendSuperStretFunctionDecl() {
QualType msgSendType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
true /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
MsgSendSuperStretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2472,8 +2472,8 @@ void RewriteObjC::SynthMsgSendFpretFunctionDecl() {
QualType msgSendType = Context->getFunctionType(Context->DoubleTy,
&ArgTys[0], ArgTys.size(),
true /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
MsgSendFpretFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
msgSendIdent, msgSendType, 0,
@@ -2488,8 +2488,8 @@ void RewriteObjC::SynthGetClassFunctionDecl() {
QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
false /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
GetClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getClassIdent, getClassType, 0,
@@ -2505,8 +2505,8 @@ void RewriteObjC::SynthGetSuperClassFunctionDecl() {
QualType getClassType = Context->getFunctionType(Context->getObjCClassType(),
&ArgTys[0], ArgTys.size(),
false /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
GetSuperClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getSuperClassIdent, getClassType, 0,
@@ -2521,8 +2521,8 @@ void RewriteObjC::SynthGetMetaClassFunctionDecl() {
QualType getClassType = Context->getFunctionType(Context->getObjCIdType(),
&ArgTys[0], ArgTys.size(),
false /*isVariadic*/, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
GetMetaClassFunctionDecl = FunctionDecl::Create(*Context, TUDecl,
SourceLocation(),
getClassIdent, getClassType, 0,
@@ -2964,8 +2964,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
&ArgTypes[0], ArgTypes.size(),
// If we don't have a method decl, force a variadic cast.
Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : true, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CastExpr::CK_Unknown,
cast);
@@ -2995,8 +2995,8 @@ Stmt *RewriteObjC::SynthMessageExpr(ObjCMessageExpr *Exp,
castType = Context->getFunctionType(returnType,
&ArgTypes[0], ArgTypes.size(),
Exp->getMethodDecl() ? Exp->getMethodDecl()->isVariadic() : false, 0,
- false, false, 0, 0, false,
- CC_Default);
+ false, false, 0, 0,
+ FunctionType::ExtInfo());
castType = Context->getPointerType(castType);
cast = NoTypeInfoCStyleCastExpr(Context, castType, CastExpr::CK_Unknown,
cast);
@@ -4547,7 +4547,7 @@ Stmt *RewriteObjC::SynthesizeBlockCall(CallExpr *Exp, const Expr *BlockExp) {
QualType PtrToFuncCastType = Context->getFunctionType(Exp->getType(),
&ArgTypes[0], ArgTypes.size(), false/*no variadic*/, 0,
false, false, 0, 0,
- false, CC_Default);
+ FunctionType::ExtInfo());
PtrToFuncCastType = Context->getPointerType(PtrToFuncCastType);
@@ -5673,4 +5673,3 @@ void RewriteObjC::HandleTranslationUnit(ASTContext &C) {
}
OutFile->flush();
}
-
diff --git a/lib/Frontend/TextDiagnosticPrinter.cpp b/lib/Frontend/TextDiagnosticPrinter.cpp
index 24d51e2c78dc..4e91f8d4c221 100644
--- a/lib/Frontend/TextDiagnosticPrinter.cpp
+++ b/lib/Frontend/TextDiagnosticPrinter.cpp
@@ -276,7 +276,7 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
SourceRange *Ranges,
unsigned NumRanges,
SourceManager &SM,
- const CodeModificationHint *Hints,
+ const FixItHint *Hints,
unsigned NumHints,
unsigned Columns) {
assert(LangOpts && "Unexpected diagnostic outside source file processing");
@@ -409,7 +409,7 @@ void TextDiagnosticPrinter::EmitCaretDiagnostic(SourceLocation Loc,
std::string FixItInsertionLine;
if (NumHints && DiagOpts->ShowFixits) {
- for (const CodeModificationHint *Hint = Hints, *LastHint = Hints + NumHints;
+ for (const FixItHint *Hint = Hints, *LastHint = Hints + NumHints;
Hint != LastHint; ++Hint) {
if (Hint->InsertionLoc.isValid()) {
// We have an insertion hint. Determine whether the inserted
@@ -833,7 +833,7 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
if (DiagOpts->ShowCarets && Info.getLocation().isValid() &&
((LastLoc != Info.getLocation()) || Info.getNumRanges() ||
(LastCaretDiagnosticWasNote && Level != Diagnostic::Note) ||
- Info.getNumCodeModificationHints())) {
+ Info.getNumFixItHints())) {
// Cache the LastLoc, it allows us to omit duplicate source/caret spewage.
LastLoc = Info.getLocation();
LastCaretDiagnosticWasNote = (Level == Diagnostic::Note);
@@ -845,9 +845,9 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
for (unsigned i = 0; i != NumRanges; ++i)
Ranges[i] = Info.getRange(i);
- unsigned NumHints = Info.getNumCodeModificationHints();
+ unsigned NumHints = Info.getNumFixItHints();
for (unsigned idx = 0; idx < NumHints; ++idx) {
- const CodeModificationHint &Hint = Info.getCodeModificationHint(idx);
+ const FixItHint &Hint = Info.getFixItHint(idx);
if (Hint.RemoveRange.isValid()) {
assert(NumRanges < 20 && "Out of space");
Ranges[NumRanges++] = Hint.RemoveRange;
@@ -855,8 +855,8 @@ void TextDiagnosticPrinter::HandleDiagnostic(Diagnostic::Level Level,
}
EmitCaretDiagnostic(LastLoc, Ranges, NumRanges, LastLoc.getManager(),
- Info.getCodeModificationHints(),
- Info.getNumCodeModificationHints(),
+ Info.getFixItHints(),
+ Info.getNumFixItHints(),
DiagOpts->MessageLength);
}
diff --git a/lib/Headers/emmintrin.h b/lib/Headers/emmintrin.h
index b09a62731ce1..6b9dd2aba588 100644
--- a/lib/Headers/emmintrin.h
+++ b/lib/Headers/emmintrin.h
@@ -36,417 +36,417 @@ typedef long long __m128i __attribute__((__vector_size__(16)));
typedef short __v8hi __attribute__((__vector_size__(16)));
typedef char __v16qi __attribute__((__vector_size__(16)));
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_add_sd(__m128d a, __m128d b)
{
a[0] += b[0];
return a;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_add_pd(__m128d a, __m128d b)
{
return a + b;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_sub_sd(__m128d a, __m128d b)
{
a[0] -= b[0];
return a;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_sub_pd(__m128d a, __m128d b)
{
return a - b;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_mul_sd(__m128d a, __m128d b)
{
a[0] *= b[0];
return a;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_mul_pd(__m128d a, __m128d b)
{
return a * b;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_div_sd(__m128d a, __m128d b)
{
a[0] /= b[0];
return a;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_div_pd(__m128d a, __m128d b)
{
return a / b;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_sqrt_sd(__m128d a, __m128d b)
{
__m128d c = __builtin_ia32_sqrtsd(b);
return (__m128d) { c[0], a[1] };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_sqrt_pd(__m128d a)
{
return __builtin_ia32_sqrtpd(a);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_min_sd(__m128d a, __m128d b)
{
return __builtin_ia32_minsd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_min_pd(__m128d a, __m128d b)
{
return __builtin_ia32_minpd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_max_sd(__m128d a, __m128d b)
{
return __builtin_ia32_maxsd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_max_pd(__m128d a, __m128d b)
{
return __builtin_ia32_maxpd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_and_pd(__m128d a, __m128d b)
{
return (__m128d)((__v4si)a & (__v4si)b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_andnot_pd(__m128d a, __m128d b)
{
return (__m128d)(~(__v4si)a & (__v4si)b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_or_pd(__m128d a, __m128d b)
{
return (__m128d)((__v4si)a | (__v4si)b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_xor_pd(__m128d a, __m128d b)
{
return (__m128d)((__v4si)a ^ (__v4si)b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 0);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 1);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmple_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(b, a, 1);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(b, a, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 7);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 3);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 4);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 5);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(a, b, 6);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(b, a, 5);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_pd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmppd(b, a, 6);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 0);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 1);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmple_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(b, a, 1);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(b, a, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 7);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 3);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 4);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 5);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(a, b, 6);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(b, a, 5);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_sd(__m128d a, __m128d b)
{
return (__m128d)__builtin_ia32_cmpsd(b, a, 6);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comieq_sd(__m128d a, __m128d b)
{
return __builtin_ia32_comisdeq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comilt_sd(__m128d a, __m128d b)
{
return __builtin_ia32_comisdlt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comile_sd(__m128d a, __m128d b)
{
return __builtin_ia32_comisdle(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comigt_sd(__m128d a, __m128d b)
{
return __builtin_ia32_comisdgt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comineq_sd(__m128d a, __m128d b)
{
return __builtin_ia32_comisdneq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomieq_sd(__m128d a, __m128d b)
{
return __builtin_ia32_ucomisdeq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomilt_sd(__m128d a, __m128d b)
{
return __builtin_ia32_ucomisdlt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomile_sd(__m128d a, __m128d b)
{
return __builtin_ia32_ucomisdle(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomigt_sd(__m128d a, __m128d b)
{
return __builtin_ia32_ucomisdgt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomineq_sd(__m128d a, __m128d b)
{
return __builtin_ia32_ucomisdneq(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpd_ps(__m128d a)
{
return __builtin_ia32_cvtpd2ps(a);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtps_pd(__m128 a)
{
return __builtin_ia32_cvtps2pd(a);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi32_pd(__m128i a)
{
return __builtin_ia32_cvtdq2pd((__v4si)a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtpd_epi32(__m128d a)
{
return __builtin_ia32_cvtpd2dq(a);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtsd_si32(__m128d a)
{
return __builtin_ia32_cvtsd2si(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsd_ss(__m128 a, __m128d b)
{
a[0] = b[0];
return a;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_sd(__m128d a, int b)
{
a[0] = b;
return a;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtss_sd(__m128d a, __m128 b)
{
a[0] = b[0];
return a;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvttpd_epi32(__m128d a)
{
return (__m128i)__builtin_ia32_cvttpd2dq(a);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvttsd_si32(__m128d a)
{
return a[0];
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpd_pi32(__m128d a)
{
return (__m64)__builtin_ia32_cvtpd2pi(a);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvttpd_pi32(__m128d a)
{
return (__m64)__builtin_ia32_cvttpd2pi(a);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtpi32_pd(__m64 a)
{
return __builtin_ia32_cvtpi2pd((__v2si)a);
}
-static inline double __attribute__((__always_inline__, __nodebug__))
+static __inline__ double __attribute__((__always_inline__, __nodebug__))
_mm_cvtsd_f64(__m128d a)
{
return a[0];
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_load_pd(double const *dp)
{
return *(__m128d*)dp;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_load1_pd(double const *dp)
{
return (__m128d){ dp[0], dp[0] };
@@ -454,542 +454,542 @@ _mm_load1_pd(double const *dp)
#define _mm_load_pd1(dp) _mm_load1_pd(dp)
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_loadr_pd(double const *dp)
{
return (__m128d){ dp[1], dp[0] };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_loadu_pd(double const *dp)
{
return __builtin_ia32_loadupd(dp);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_load_sd(double const *dp)
{
return (__m128d){ *dp, 0.0 };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_loadh_pd(__m128d a, double const *dp)
{
return __builtin_shufflevector(a, *(__m128d *)dp, 0, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_loadl_pd(__m128d a, double const *dp)
{
return __builtin_shufflevector(a, *(__m128d *)dp, 2, 1);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_set_sd(double w)
{
return (__m128d){ w, 0 };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_set1_pd(double w)
{
return (__m128d){ w, w };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_set_pd(double w, double x)
{
return (__m128d){ x, w };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_setr_pd(double w, double x)
{
return (__m128d){ w, x };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_setzero_pd(void)
{
return (__m128d){ 0, 0 };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_move_sd(__m128d a, __m128d b)
{
return (__m128d){ b[0], a[1] };
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_store_sd(double *dp, __m128d a)
{
dp[0] = a[0];
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_store1_pd(double *dp, __m128d a)
{
dp[0] = a[0];
dp[1] = a[0];
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_store_pd(double *dp, __m128d a)
{
*(__m128d *)dp = a;
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storeu_pd(double *dp, __m128d a)
{
__builtin_ia32_storeupd(dp, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storer_pd(double *dp, __m128d a)
{
dp[0] = a[1];
dp[1] = a[0];
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storeh_pd(double *dp, __m128d a)
{
dp[0] = a[1];
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storel_pd(double *dp, __m128d a)
{
dp[0] = a[0];
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_add_epi8(__m128i a, __m128i b)
{
return (__m128i)((__v16qi)a + (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_add_epi16(__m128i a, __m128i b)
{
return (__m128i)((__v8hi)a + (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_add_epi32(__m128i a, __m128i b)
{
return (__m128i)((__v4si)a + (__v4si)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_si64(__m64 a, __m64 b)
{
return a + b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_add_epi64(__m128i a, __m128i b)
{
return a + b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_adds_epi8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_paddsb128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_adds_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_paddsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_adds_epu8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_paddusb128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_adds_epu16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_paddusw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_avg_epu8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pavgb128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_avg_epu16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pavgw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_madd_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmaxsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmaxub128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pminsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pminub128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mulhi_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mulhi_epu16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mullo_epi16(__m128i a, __m128i b)
{
return (__m128i)((__v8hi)a * (__v8hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mul_su32(__m64 a, __m64 b)
{
return __builtin_ia32_pmuludq((__v2si)a, (__v2si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mul_epu32(__m128i a, __m128i b)
{
return __builtin_ia32_pmuludq128((__v4si)a, (__v4si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sad_epu8(__m128i a, __m128i b)
{
return __builtin_ia32_psadbw128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sub_epi8(__m128i a, __m128i b)
{
return (__m128i)((__v16qi)a - (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sub_epi16(__m128i a, __m128i b)
{
return (__m128i)((__v8hi)a - (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sub_epi32(__m128i a, __m128i b)
{
return (__m128i)((__v4si)a - (__v4si)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_si64(__m64 a, __m64 b)
{
return a - b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sub_epi64(__m128i a, __m128i b)
{
return a - b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_subs_epi8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psubsb128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_subs_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psubsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_subs_epu8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psubusb128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_subs_epu16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psubusw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_and_si128(__m128i a, __m128i b)
{
return a & b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_andnot_si128(__m128i a, __m128i b)
{
return ~a & b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_or_si128(__m128i a, __m128i b)
{
return a | b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_xor_si128(__m128i a, __m128i b)
{
return a ^ b;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_si128(__m128i a, int imm)
{
return __builtin_ia32_pslldqi128(a, imm * 8);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_epi16(__m128i a, int count)
{
return (__m128i)__builtin_ia32_psllwi128((__v8hi)a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sll_epi16(__m128i a, __m128i count)
{
return (__m128i)__builtin_ia32_psllw128((__v8hi)a, (__v8hi)count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_epi32(__m128i a, int count)
{
return (__m128i)__builtin_ia32_pslldi128((__v4si)a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sll_epi32(__m128i a, __m128i count)
{
return (__m128i)__builtin_ia32_pslld128((__v4si)a, (__v4si)count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_slli_epi64(__m128i a, int count)
{
return __builtin_ia32_psllqi128(a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sll_epi64(__m128i a, __m128i count)
{
return __builtin_ia32_psllq128(a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srai_epi16(__m128i a, int count)
{
return (__m128i)__builtin_ia32_psrawi128((__v8hi)a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sra_epi16(__m128i a, __m128i count)
{
return (__m128i)__builtin_ia32_psraw128((__v8hi)a, (__v8hi)count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srai_epi32(__m128i a, int count)
{
return (__m128i)__builtin_ia32_psradi128((__v4si)a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sra_epi32(__m128i a, __m128i count)
{
return (__m128i)__builtin_ia32_psrad128((__v4si)a, (__v4si)count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_si128(__m128i a, int imm)
{
return __builtin_ia32_psrldqi128(a, imm * 8);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_epi16(__m128i a, int count)
{
return (__m128i)__builtin_ia32_psrlwi128((__v8hi)a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srl_epi16(__m128i a, __m128i count)
{
return (__m128i)__builtin_ia32_psrlw128((__v8hi)a, (__v8hi)count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_epi32(__m128i a, int count)
{
return (__m128i)__builtin_ia32_psrldi128((__v4si)a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srl_epi32(__m128i a, __m128i count)
{
return (__m128i)__builtin_ia32_psrld128((__v4si)a, (__v4si)count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srli_epi64(__m128i a, int count)
{
return __builtin_ia32_psrlqi128(a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_srl_epi64(__m128i a, __m128i count)
{
return __builtin_ia32_psrlq128(a, count);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi8(__m128i a, __m128i b)
{
return (__m128i)((__v16qi)a == (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi16(__m128i a, __m128i b)
{
return (__m128i)((__v8hi)a == (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi32(__m128i a, __m128i b)
{
return (__m128i)((__v4si)a == (__v4si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi8(__m128i a, __m128i b)
{
return (__m128i)((__v16qi)a > (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi16(__m128i a, __m128i b)
{
return (__m128i)((__v8hi)a > (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi32(__m128i a, __m128i b)
{
return (__m128i)((__v4si)a > (__v4si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_epi8(__m128i a, __m128i b)
{
return _mm_cmpgt_epi8(b,a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_epi16(__m128i a, __m128i b)
{
return _mm_cmpgt_epi16(b,a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_epi32(__m128i a, __m128i b)
{
return _mm_cmpgt_epi32(b,a);
}
#ifdef __x86_64__
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_sd(__m128d a, long long b)
{
a[0] = b;
return a;
}
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvtsd_si64(__m128d a)
{
return __builtin_ia32_cvtsd2si64(a);
}
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvttsd_si64(__m128d a)
{
return a[0];
}
#endif
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi32_ps(__m128i a)
{
return __builtin_ia32_cvtdq2ps((__v4si)a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtps_epi32(__m128 a)
{
return (__m128i)__builtin_ia32_cvtps2dq(a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvttps_epi32(__m128 a)
{
return (__m128i)__builtin_ia32_cvttps2dq(a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_si128(int a)
{
return (__m128i)(__v4si){ a, 0, 0, 0 };
}
#ifdef __x86_64__
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_si128(long long a)
{
return (__m128i){ a, 0 };
}
#endif
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi128_si32(__m128i a)
{
__v4si b = (__v4si)a;
@@ -997,207 +997,207 @@ _mm_cvtsi128_si32(__m128i a)
}
#ifdef __x86_64__
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi128_si64(__m128i a)
{
return a[0];
}
#endif
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_load_si128(__m128i const *p)
{
return *p;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_loadu_si128(__m128i const *p)
{
return (__m128i)__builtin_ia32_loaddqu((char const *)p);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_loadl_epi64(__m128i const *p)
{
return (__m128i) { *(long long*)p, 0};
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set_epi64x(long long q1, long long q0)
{
return (__m128i){ q0, q1 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set_epi64(__m64 q1, __m64 q0)
{
return (__m128i){ (long long)q0, (long long)q1 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set_epi32(int i3, int i2, int i1, int i0)
{
return (__m128i)(__v4si){ i0, i1, i2, i3};
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set_epi16(short w7, short w6, short w5, short w4, short w3, short w2, short w1, short w0)
{
return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set_epi8(char b15, char b14, char b13, char b12, char b11, char b10, char b9, char b8, char b7, char b6, char b5, char b4, char b3, char b2, char b1, char b0)
{
return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set1_epi64x(long long q)
{
return (__m128i){ q, q };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set1_epi64(__m64 q)
{
return (__m128i){ (long long)q, (long long)q };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set1_epi32(int i)
{
return (__m128i)(__v4si){ i, i, i, i };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set1_epi16(short w)
{
return (__m128i)(__v8hi){ w, w, w, w, w, w, w, w };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_set1_epi8(char b)
{
return (__m128i)(__v16qi){ b, b, b, b, b, b, b, b, b, b, b, b, b, b, b, b };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_setr_epi64(__m64 q0, __m64 q1)
{
return (__m128i){ (long long)q0, (long long)q1 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_setr_epi32(int i0, int i1, int i2, int i3)
{
return (__m128i)(__v4si){ i0, i1, i2, i3};
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_setr_epi16(short w0, short w1, short w2, short w3, short w4, short w5, short w6, short w7)
{
return (__m128i)(__v8hi){ w0, w1, w2, w3, w4, w5, w6, w7 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_setr_epi8(char b0, char b1, char b2, char b3, char b4, char b5, char b6, char b7, char b8, char b9, char b10, char b11, char b12, char b13, char b14, char b15)
{
return (__m128i)(__v16qi){ b0, b1, b2, b3, b4, b5, b6, b7, b8, b9, b10, b11, b12, b13, b14, b15 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_setzero_si128(void)
{
return (__m128i){ 0LL, 0LL };
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_store_si128(__m128i *p, __m128i b)
{
*p = b;
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storeu_si128(__m128i *p, __m128i b)
{
__builtin_ia32_storedqu((char *)p, (__v16qi)b);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_maskmoveu_si128(__m128i d, __m128i n, char *p)
{
__builtin_ia32_maskmovdqu((__v16qi)d, (__v16qi)n, p);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storel_epi64(__m128i *p, __m128i a)
{
__builtin_ia32_storelv4si((__v2si *)p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_pd(double *p, __m128d a)
{
__builtin_ia32_movntpd(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_si128(__m128i *p, __m128i a)
{
__builtin_ia32_movntdq(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_si32(int *p, int a)
{
__builtin_ia32_movnti(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_clflush(void const *p)
{
__builtin_ia32_clflush(p);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_lfence(void)
{
__builtin_ia32_lfence();
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_mfence(void)
{
__builtin_ia32_mfence();
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_packs_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_packsswb128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_packs_epi32(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_packssdw128((__v4si)a, (__v4si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_packus_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_packuswb128((__v8hi)a, (__v8hi)b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_extract_epi16(__m128i a, int imm)
{
__v8hi b = (__v8hi)a;
return b[imm];
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_insert_epi16(__m128i a, int b, int imm)
{
__v8hi c = (__v8hi)a;
@@ -1205,7 +1205,7 @@ _mm_insert_epi16(__m128i a, int b, int imm)
return (__m128i)c;
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_movemask_epi8(__m128i a)
{
return __builtin_ia32_pmovmskb128((__v16qi)a);
@@ -1226,85 +1226,85 @@ _mm_movemask_epi8(__m128i a)
4 + ((imm) & 0x30) >> 4, \
4 + ((imm) & 0xc0) >> 6))
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi8(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector((__v16qi)a, (__v16qi)b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector((__v8hi)a, (__v8hi)b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi32(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector((__v4si)a, (__v4si)b, 2, 4+2, 3, 4+3);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_epi64(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector(a, b, 1, 2+1);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_epi8(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector((__v16qi)a, (__v16qi)b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector((__v8hi)a, (__v8hi)b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_epi32(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector((__v4si)a, (__v4si)b, 0, 4+0, 1, 4+1);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_epi64(__m128i a, __m128i b)
{
return (__m128i)__builtin_shufflevector(a, b, 0, 2+0);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_movepi64_pi64(__m128i a)
{
return (__m64)a[0];
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_movpi64_pi64(__m64 a)
{
return (__m128i){ (long long)a, 0 };
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_move_epi64(__m128i a)
{
return __builtin_shufflevector(a, (__m128i){ 0 }, 0, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pd(__m128d a, __m128d b)
{
return __builtin_shufflevector(a, b, 1, 2+1);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pd(__m128d a, __m128d b)
{
return __builtin_shufflevector(a, b, 0, 2+0);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_movemask_pd(__m128d a)
{
return __builtin_ia32_movmskpd(a);
@@ -1313,43 +1313,43 @@ _mm_movemask_pd(__m128d a)
#define _mm_shuffle_pd(a, b, i) (__builtin_shufflevector((a), (b), (i) & 1, \
(((i) & 2) >> 1) + 2))
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_castpd_ps(__m128d in)
{
return (__m128)in;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_castpd_si128(__m128d in)
{
return (__m128i)in;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_castps_pd(__m128 in)
{
return (__m128d)in;
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_castps_si128(__m128 in)
{
return (__m128i)in;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_castsi128_ps(__m128i in)
{
return (__m128)in;
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_castsi128_pd(__m128i in)
{
return (__m128d)in;
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_pause(void)
{
__asm__ volatile ("pause");
diff --git a/lib/Headers/mm_malloc.h b/lib/Headers/mm_malloc.h
index a680c47a4a90..fba865120b29 100644
--- a/lib/Headers/mm_malloc.h
+++ b/lib/Headers/mm_malloc.h
@@ -27,7 +27,8 @@
#include <errno.h>
#include <stdlib.h>
-static inline void *__attribute__((__always_inline__, __nodebug__)) _mm_malloc(size_t size, size_t align)
+static __inline__ void *__attribute__((__always_inline__, __nodebug__))
+_mm_malloc(size_t size, size_t align)
{
if (align & (align - 1)) {
errno = EINVAL;
@@ -44,13 +45,15 @@ static inline void *__attribute__((__always_inline__, __nodebug__)) _mm_malloc(s
if (!mallocedMemory)
return 0;
- void *alignedMemory = (void *)(((size_t)mallocedMemory + align) & ~((size_t)align - 1));
+ void *alignedMemory =
+ (void *)(((size_t)mallocedMemory + align) & ~((size_t)align - 1));
((void **)alignedMemory)[-1] = mallocedMemory;
return alignedMemory;
}
-static inline void __attribute__((__always_inline__, __nodebug__)) _mm_free(void *p)
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
+_mm_free(void *p)
{
if (p)
free(((void **)p)[-1]);
diff --git a/lib/Headers/mmintrin.h b/lib/Headers/mmintrin.h
index 0f06f787b6a0..401d8a7aaede 100644
--- a/lib/Headers/mmintrin.h
+++ b/lib/Headers/mmintrin.h
@@ -34,409 +34,409 @@ typedef int __v2si __attribute__((__vector_size__(8)));
typedef short __v4hi __attribute__((__vector_size__(8)));
typedef char __v8qi __attribute__((__vector_size__(8)));
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_empty(void)
{
__builtin_ia32_emms();
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_si64(int __i)
{
return (__m64)(__v2si){__i, 0};
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_si32(__m64 __m)
{
__v2si __mmx_var2 = (__v2si)__m;
return __mmx_var2[0];
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_m64(long long __i)
{
return (__m64)__i;
}
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvtm64_si64(__m64 __m)
{
return (long long)__m;
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_packs_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_packsswb((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_packs_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_packssdw((__v2si)__m1, (__v2si)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_packs_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_packuswb((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 4, 8+4, 5,
8+5, 6, 8+6, 7, 8+7);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 2, 4+2, 3,
4+3);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 1, 2+1);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_shufflevector((__v8qi)__m1, (__v8qi)__m2, 0, 8+0, 1,
8+1, 2, 8+2, 3, 8+3);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_shufflevector((__v4hi)__m1, (__v4hi)__m2, 0, 4+0, 1,
4+1);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_shufflevector((__v2si)__m1, (__v2si)__m2, 0, 2+0);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)((__v8qi)__m1 + (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 + (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_add_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)((__v2si)__m1 + (__v2si)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddsb((__v8qi)__m1, (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddsw((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pu8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddusb((__v8qi)__m1, (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_adds_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_paddusw((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)((__v8qi)__m1 - (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 - (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sub_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)((__v2si)__m1 - (__v2si)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubsb((__v8qi)__m1, (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubsw((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pu8(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubusb((__v8qi)__m1, (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_subs_pu16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_psubusw((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_madd_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmaddwd((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mulhi_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)__builtin_ia32_pmulhw((__v4hi)__m1, (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mullo_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 * (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_pi16(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psllw((__v4hi)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_pi16(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psllwi((__v4hi)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_pi32(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_pslld((__v2si)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_pi32(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_pslldi((__v2si)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sll_si64(__m64 __m, __m64 __count)
{
return __builtin_ia32_psllq(__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_slli_si64(__m64 __m, int __count)
{
return __builtin_ia32_psllqi(__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sra_pi16(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psraw((__v4hi)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srai_pi16(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrawi((__v4hi)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sra_pi32(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrad((__v2si)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srai_pi32(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psradi((__v2si)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srl_pi16(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrlw((__v4hi)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_pi16(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrlwi((__v4hi)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srl_pi32(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrld((__v2si)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_pi32(__m64 __m, int __count)
{
return (__m64)__builtin_ia32_psrldi((__v2si)__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srl_si64(__m64 __m, __m64 __count)
{
return (__m64)__builtin_ia32_psrlq(__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_srli_si64(__m64 __m, int __count)
{
return __builtin_ia32_psrlqi(__m, __count);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_and_si64(__m64 __m1, __m64 __m2)
{
return __m1 & __m2;
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_andnot_si64(__m64 __m1, __m64 __m2)
{
return ~__m1 & __m2;
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_or_si64(__m64 __m1, __m64 __m2)
{
return __m1 | __m2;
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_xor_si64(__m64 __m1, __m64 __m2)
{
return __m1 ^ __m2;
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)((__v8qi)__m1 == (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 == (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)((__v2si)__m1 == (__v2si)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2)
{
return (__m64)((__v8qi)__m1 > (__v8qi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2)
{
return (__m64)((__v4hi)__m1 > (__v4hi)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2)
{
return (__m64)((__v2si)__m1 > (__v2si)__m2);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setzero_si64(void)
{
return (__m64){ 0LL };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi32(int __i1, int __i0)
{
return (__m64)(__v2si){ __i0, __i1 };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi16(short __s3, short __s2, short __s1, short __s0)
{
return (__m64)(__v4hi){ __s0, __s1, __s2, __s3 };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
char __b1, char __b0)
{
return (__m64)(__v8qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7 };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi32(int __i)
{
return (__m64)(__v2si){ __i, __i };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi16(short __s)
{
return (__m64)(__v4hi){ __s, __s, __s, __s };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_set1_pi8(char __b)
{
return (__m64)(__v8qi){ __b, __b, __b, __b, __b, __b, __b, __b };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi32(int __i1, int __i0)
{
return (__m64)(__v2si){ __i1, __i0 };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi16(short __s3, short __s2, short __s1, short __s0)
{
return (__m64)(__v4hi){ __s3, __s2, __s1, __s0 };
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_setr_pi8(char __b7, char __b6, char __b5, char __b4, char __b3, char __b2,
char __b1, char __b0)
{
diff --git a/lib/Headers/pmmintrin.h b/lib/Headers/pmmintrin.h
index cd901662d68b..7ca386cee953 100644
--- a/lib/Headers/pmmintrin.h
+++ b/lib/Headers/pmmintrin.h
@@ -30,67 +30,67 @@
#include <emmintrin.h>
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_lddqu_si128(__m128i const *p)
{
return (__m128i)__builtin_ia32_lddqu((char const *)p);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_addsub_ps(__m128 a, __m128 b)
{
return __builtin_ia32_addsubps(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_hadd_ps(__m128 a, __m128 b)
{
return __builtin_ia32_haddps(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_ps(__m128 a, __m128 b)
{
return __builtin_ia32_hsubps(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_movehdup_ps(__m128 a)
{
return __builtin_shufflevector(a, a, 1, 1, 3, 3);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_moveldup_ps(__m128 a)
{
return __builtin_shufflevector(a, a, 0, 0, 2, 2);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_addsub_pd(__m128d a, __m128d b)
{
return __builtin_ia32_addsubpd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_hadd_pd(__m128d a, __m128d b)
{
return __builtin_ia32_haddpd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pd(__m128d a, __m128d b)
{
return __builtin_ia32_hsubpd(a, b);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_loaddup_pd(double const *dp)
{
return (__m128d){ *dp, *dp };
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_movedup_pd(__m128d a)
{
return __builtin_shufflevector(a, a, 0, 0);
@@ -104,13 +104,13 @@ _mm_movedup_pd(__m128d a)
#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_monitor(void const *p, unsigned extensions, unsigned hints)
{
__builtin_ia32_monitor((void *)p, extensions, hints);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_mwait(unsigned extensions, unsigned hints)
{
__builtin_ia32_mwait(extensions, hints);
diff --git a/lib/Headers/smmintrin.h b/lib/Headers/smmintrin.h
index 9c8d53d0e489..e271f9953cd9 100644
--- a/lib/Headers/smmintrin.h
+++ b/lib/Headers/smmintrin.h
@@ -1,25 +1,25 @@
-/*===---- smmintrin.h - SSE intrinsics -------------------------------------===
-*
-* Permission is hereby granted, free of charge, to any person obtaining a copy
-* of this software and associated documentation files (the "Software"), to deal
-* in the Software without restriction, including without limitation the rights
-* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-* copies of the Software, and to permit persons to whom the Software is
-* furnished to do so, subject to the following conditions:
-*
-* The above copyright notice and this permission notice shall be included in
-* all copies or substantial portions of the Software.
-*
-* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-* THE SOFTWARE.
-*
-*===-----------------------------------------------------------------------===
-*/
+/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
#ifndef _SMMINTRIN_H
#define _SMMINTRIN_H
@@ -67,53 +67,53 @@ typedef long long __v2di __attribute__ ((__vector_size__ (16)));
#define _mm_round_sd(X, Y, M) __builtin_ia32_roundsd((X), (Y), (M))
/* SSE4 Packed Blending Intrinsics. */
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blend_pd (__m128d __V1, __m128d __V2, const int __M)
{
return (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, __M);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_blend_ps (__m128 __V1, __m128 __V2, const int __M)
{
return (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, __M);
}
-static inline __m128d __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
{
return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
(__v2df)__M);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
{
return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
(__v4sf)__M);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
{
return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
(__v16qi)__M);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_blend_epi16 (__m128i __V1, __m128i __V2, const int __M)
{
return (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, __M);
}
/* SSE4 Dword Multiply Instructions. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
{
- return (__m128i) __builtin_ia32_pmulld128((__v4si)__V1, (__v4si)__V2);
+ return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mul_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
@@ -124,56 +124,56 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M))
/* SSE4 Streaming Load Hint Instruction. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_stream_load_si128 (__m128i *__V)
{
return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi8 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi8 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu16 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu16 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu32 (__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
@@ -224,19 +224,19 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testz_si128(__m128i __M, __m128i __V)
{
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testc_si128(__m128i __M, __m128i __V)
{
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testnzc_si128(__m128i __M, __m128i __V)
{
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
@@ -247,88 +247,88 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((V), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_pcmpeqq((__v2di)__V1, (__v2di)__V2);
}
/* SSE4 Packed Integer Sign-Extension. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi16(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi16_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi16_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi32_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V);
}
/* SSE4 Packed Integer Zero-Extension. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi16(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu16_epi32(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu16_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu32_epi64(__m128i __V)
{
return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
}
/* SSE4 Pack with Unsigned Saturation. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_packus_epi32(__m128i __V1, __m128i __V2)
{
return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
@@ -400,33 +400,33 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
__builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M))
/* SSE4.2 Compare Packed Data -- Greater Than. */
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
{
return __builtin_ia32_pcmpgtq((__v2di)__V1, (__v2di)__V2);
}
/* SSE4.2 Accumulate CRC32. */
-static inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u8(unsigned int __C, unsigned char __D)
{
return __builtin_ia32_crc32qi(__C, __D);
}
-static inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u16(unsigned int __C, unsigned short __D)
{
return __builtin_ia32_crc32hi(__C, __D);
}
-static inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u32(unsigned int __C, unsigned int __D)
{
return __builtin_ia32_crc32si(__C, __D);
}
#ifdef __x86_64__
-static inline unsigned long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u64(unsigned long long __C, unsigned long long __D)
{
return __builtin_ia32_crc32di(__C, __D);
@@ -434,14 +434,14 @@ _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
#endif /* __x86_64__ */
/* SSE4.2 Population Count. */
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_popcnt_u32(unsigned int __A)
{
return __builtin_popcount(__A);
}
#ifdef __x86_64__
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_popcnt_u64(unsigned long long __A)
{
return __builtin_popcountll(__A);
diff --git a/lib/Headers/tmmintrin.h b/lib/Headers/tmmintrin.h
index 7adb776fef76..09ebc2378016 100644
--- a/lib/Headers/tmmintrin.h
+++ b/lib/Headers/tmmintrin.h
@@ -30,37 +30,37 @@
#include <pmmintrin.h>
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_abs_pi8(__m64 a)
{
return (__m64)__builtin_ia32_pabsb((__v8qi)a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_abs_epi8(__m128i a)
{
return (__m128i)__builtin_ia32_pabsb128((__v16qi)a);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_abs_pi16(__m64 a)
{
return (__m64)__builtin_ia32_pabsw((__v4hi)a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_abs_epi16(__m128i a)
{
return (__m128i)__builtin_ia32_pabsw128((__v8hi)a);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_abs_pi32(__m64 a)
{
return (__m64)__builtin_ia32_pabsd((__v2si)a);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_abs_epi32(__m128i a)
{
return (__m128i)__builtin_ia32_pabsd128((__v4si)a);
@@ -69,145 +69,145 @@ _mm_abs_epi32(__m128i a)
#define _mm_alignr_epi8(a, b, n) (__builtin_ia32_palignr128((a), (b), (n)))
#define _mm_alignr_pi8(a, b, n) (__builtin_ia32_palignr((a), (b), (n*8)))
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_phaddw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadd_epi32(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_phaddd128((__v4si)a, (__v4si)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hadd_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_phaddw((__v4hi)a, (__v4hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hadd_pi32(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_phaddd((__v2si)a, (__v2si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hadds_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_phaddsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hadds_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_phaddsw((__v4hi)a, (__v4hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsub_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_phsubw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsub_epi32(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_phsubd128((__v4si)a, (__v4si)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_phsubw((__v4hi)a, (__v4hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsub_pi32(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_phsubd((__v2si)a, (__v2si)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_hsubs_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_phsubsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_hsubs_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_phsubsw((__v4hi)a, (__v4hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_maddubs_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmaddubsw128((__v16qi)a, (__v16qi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_maddubs_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pmaddubsw((__v8qi)a, (__v8qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mulhrs_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pmulhrsw128((__v8hi)a, (__v8hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mulhrs_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pmulhrsw((__v4hi)a, (__v4hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_shuffle_epi8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_pshufb128((__v16qi)a, (__v16qi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_shuffle_pi8(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pshufb((__v8qi)a, (__v8qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sign_epi8(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psignb128((__v16qi)a, (__v16qi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sign_epi16(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psignw128((__v8hi)a, (__v8hi)b);
}
-static inline __m128i __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_sign_epi32(__m128i a, __m128i b)
{
return (__m128i)__builtin_ia32_psignd128((__v4si)a, (__v4si)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sign_pi8(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_psignb((__v8qi)a, (__v8qi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sign_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_psignw((__v4hi)a, (__v4hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sign_pi32(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_psignd((__v2si)a, (__v2si)b);
diff --git a/lib/Headers/wmmintrin.h b/lib/Headers/wmmintrin.h
new file mode 100644
index 000000000000..6b2e4687d4bc
--- /dev/null
+++ b/lib/Headers/wmmintrin.h
@@ -0,0 +1,67 @@
+/*===---- wmmintrin.h - AES intrinsics ------------------------------------===
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef _WMMINTRIN_H
+#define _WMMINTRIN_H
+
+#if !defined (__AES__)
+# error "AES instructions not enabled"
+#else
+
+#include <smmintrin.h>
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesenc_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenc128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesenclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesenclast128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesdec_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdec128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesdeclast_si128(__m128i __V, __m128i __R)
+{
+ return (__m128i)__builtin_ia32_aesdeclast128(__V, __R);
+}
+
+static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
+_mm_aesimc_si128(__m128i __V)
+{
+ return (__m128i)__builtin_ia32_aesimc128(__V);
+}
+
+#define _mm_aeskeygenassist_si128(C, R) \
+ __builtin_ia32_aeskeygenassist128((C), (R))
+
+#endif /* __AES__ */
+#endif /* _WMMINTRIN_H */
diff --git a/lib/Headers/xmmintrin.h b/lib/Headers/xmmintrin.h
index 06e616bd19c2..4e313b23b3d0 100644
--- a/lib/Headers/xmmintrin.h
+++ b/lib/Headers/xmmintrin.h
@@ -36,365 +36,365 @@ typedef float __m128 __attribute__((__vector_size__(16)));
#include <mm_malloc.h>
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_add_ss(__m128 a, __m128 b)
{
a[0] += b[0];
return a;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_add_ps(__m128 a, __m128 b)
{
return a + b;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_sub_ss(__m128 a, __m128 b)
{
a[0] -= b[0];
return a;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_sub_ps(__m128 a, __m128 b)
{
return a - b;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_mul_ss(__m128 a, __m128 b)
{
a[0] *= b[0];
return a;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_mul_ps(__m128 a, __m128 b)
{
return a * b;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_div_ss(__m128 a, __m128 b)
{
a[0] /= b[0];
return a;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_div_ps(__m128 a, __m128 b)
{
return a / b;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_sqrt_ss(__m128 a)
{
return __builtin_ia32_sqrtss(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_sqrt_ps(__m128 a)
{
return __builtin_ia32_sqrtps(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rcp_ss(__m128 a)
{
return __builtin_ia32_rcpss(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rcp_ps(__m128 a)
{
return __builtin_ia32_rcpps(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rsqrt_ss(__m128 a)
{
return __builtin_ia32_rsqrtss(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_rsqrt_ps(__m128 a)
{
return __builtin_ia32_rsqrtps(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_min_ss(__m128 a, __m128 b)
{
return __builtin_ia32_minss(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_min_ps(__m128 a, __m128 b)
{
return __builtin_ia32_minps(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_max_ss(__m128 a, __m128 b)
{
return __builtin_ia32_maxss(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_max_ps(__m128 a, __m128 b)
{
return __builtin_ia32_maxps(a, b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_and_ps(__m128 a, __m128 b)
{
return (__m128)((__v4si)a & (__v4si)b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_andnot_ps(__m128 a, __m128 b)
{
return (__m128)(~(__v4si)a & (__v4si)b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_or_ps(__m128 a, __m128 b)
{
return (__m128)((__v4si)a | (__v4si)b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_xor_ps(__m128 a, __m128 b)
{
return (__m128)((__v4si)a ^ (__v4si)b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 0);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 0);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 1);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmplt_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 1);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmple_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 2);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmple_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 2);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(b, a, 1);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(b, a, 1);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(b, a, 2);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpge_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(b, a, 2);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 4);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpneq_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 4);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnlt_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 6);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnle_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 6);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(b, a, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpngt_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(b, a, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(b, a, 6);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpnge_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(b, a, 6);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 7);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpord_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 7);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_ss(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpss(a, b, 3);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cmpunord_ps(__m128 a, __m128 b)
{
return (__m128)__builtin_ia32_cmpps(a, b, 3);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comieq_ss(__m128 a, __m128 b)
{
return __builtin_ia32_comieq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comilt_ss(__m128 a, __m128 b)
{
return __builtin_ia32_comilt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comile_ss(__m128 a, __m128 b)
{
return __builtin_ia32_comile(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comigt_ss(__m128 a, __m128 b)
{
return __builtin_ia32_comigt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comige_ss(__m128 a, __m128 b)
{
return __builtin_ia32_comige(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_comineq_ss(__m128 a, __m128 b)
{
return __builtin_ia32_comineq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomieq_ss(__m128 a, __m128 b)
{
return __builtin_ia32_ucomieq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomilt_ss(__m128 a, __m128 b)
{
return __builtin_ia32_ucomilt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomile_ss(__m128 a, __m128 b)
{
return __builtin_ia32_ucomile(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomigt_ss(__m128 a, __m128 b)
{
return __builtin_ia32_ucomigt(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomige_ss(__m128 a, __m128 b)
{
return __builtin_ia32_ucomige(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_ucomineq_ss(__m128 a, __m128 b)
{
return __builtin_ia32_ucomineq(a, b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtss_si32(__m128 a)
{
return __builtin_ia32_cvtss2si(a);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvt_ss2si(__m128 a)
{
return _mm_cvtss_si32(a);
@@ -402,7 +402,7 @@ _mm_cvt_ss2si(__m128 a)
#ifdef __x86_64__
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvtss_si64(__m128 a)
{
return __builtin_ia32_cvtss2si64(a);
@@ -410,37 +410,37 @@ _mm_cvtss_si64(__m128 a)
#endif
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtps_pi32(__m128 a)
{
return (__m64)__builtin_ia32_cvtps2pi(a);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvttss_si32(__m128 a)
{
return a[0];
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_cvtt_ss2si(__m128 a)
{
return _mm_cvttss_si32(a);
}
-static inline long long __attribute__((__always_inline__, __nodebug__))
+static __inline__ long long __attribute__((__always_inline__, __nodebug__))
_mm_cvttss_si64(__m128 a)
{
return a[0];
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvttps_pi32(__m128 a)
{
return (__m64)__builtin_ia32_cvttps2pi(a);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi32_ss(__m128 a, int b)
{
a[0] = b;
@@ -449,7 +449,7 @@ _mm_cvtsi32_ss(__m128 a, int b)
#ifdef __x86_64__
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtsi64_ss(__m128 a, long long b)
{
a[0] = b;
@@ -458,19 +458,19 @@ _mm_cvtsi64_ss(__m128 a, long long b)
#endif
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpi32_ps(__m128 a, __m64 b)
{
return __builtin_ia32_cvtpi2ps(a, (__v2si)b);
}
-static inline float __attribute__((__always_inline__, __nodebug__))
+static __inline__ float __attribute__((__always_inline__, __nodebug__))
_mm_cvtss_f32(__m128 a)
{
return a[0];
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_loadh_pi(__m128 a, const __m64 *p)
{
__m128 b;
@@ -479,7 +479,7 @@ _mm_loadh_pi(__m128 a, const __m64 *p)
return __builtin_shufflevector(a, b, 0, 1, 4, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_loadl_pi(__m128 a, const __m64 *p)
{
__m128 b;
@@ -488,13 +488,13 @@ _mm_loadl_pi(__m128 a, const __m64 *p)
return __builtin_shufflevector(a, b, 4, 5, 2, 3);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_load_ss(const float *p)
{
return (__m128){ *p, 0, 0, 0 };
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_load1_ps(const float *p)
{
return (__m128){ *p, *p, *p, *p };
@@ -502,100 +502,100 @@ _mm_load1_ps(const float *p)
#define _mm_load_ps1(p) _mm_load1_ps(p)
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_load_ps(const float *p)
{
return *(__m128*)p;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_loadu_ps(const float *p)
{
return __builtin_ia32_loadups(p);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_loadr_ps(const float *p)
{
__m128 a = _mm_load_ps(p);
return __builtin_shufflevector(a, a, 3, 2, 1, 0);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_set_ss(float w)
{
return (__m128){ w, 0, 0, 0 };
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_set1_ps(float w)
{
return (__m128){ w, w, w, w };
}
// Microsoft specific.
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_set_ps1(float w)
{
return _mm_set1_ps(w);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_set_ps(float z, float y, float x, float w)
{
return (__m128){ w, x, y, z };
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_setr_ps(float z, float y, float x, float w)
{
return (__m128){ z, y, x, w };
}
-static inline __m128 __attribute__((__always_inline__))
+static __inline__ __m128 __attribute__((__always_inline__))
_mm_setzero_ps(void)
{
return (__m128){ 0, 0, 0, 0 };
}
-static inline void __attribute__((__always_inline__))
+static __inline__ void __attribute__((__always_inline__))
_mm_storeh_pi(__m64 *p, __m128 a)
{
__builtin_ia32_storehps((__v2si *)p, a);
}
-static inline void __attribute__((__always_inline__))
+static __inline__ void __attribute__((__always_inline__))
_mm_storel_pi(__m64 *p, __m128 a)
{
__builtin_ia32_storelps((__v2si *)p, a);
}
-static inline void __attribute__((__always_inline__))
+static __inline__ void __attribute__((__always_inline__))
_mm_store_ss(float *p, __m128 a)
{
*p = a[0];
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storeu_ps(float *p, __m128 a)
{
__builtin_ia32_storeups(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_store1_ps(float *p, __m128 a)
{
a = __builtin_shufflevector(a, a, 0, 0, 0, 0);
_mm_storeu_ps(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_store_ps(float *p, __m128 a)
{
*(__m128 *)p = a;
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_storer_ps(float *p, __m128 a)
{
a = __builtin_shufflevector(a, a, 3, 2, 1, 0);
@@ -612,32 +612,32 @@ _mm_storer_ps(float *p, __m128 a)
#define _mm_prefetch(a, sel) (__builtin_prefetch((void *)a, 0, sel))
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_pi(__m64 *p, __m64 a)
{
__builtin_ia32_movntq(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_stream_ps(float *p, __m128 a)
{
__builtin_ia32_movntps(p, a);
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_sfence(void)
{
__builtin_ia32_sfence();
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_extract_pi16(__m64 a, int n)
{
__v4hi b = (__v4hi)a;
return (unsigned short)b[n & 3];
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_insert_pi16(__m64 a, int d, int n)
{
__v4hi b = (__v4hi)a;
@@ -645,37 +645,37 @@ _mm_insert_pi16(__m64 a, int d, int n)
return (__m64)b;
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_max_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pmaxsw((__v4hi)a, (__v4hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_max_pu8(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pmaxub((__v8qi)a, (__v8qi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_min_pi16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pminsw((__v4hi)a, (__v4hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_min_pu8(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pminub((__v8qi)a, (__v8qi)b);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_movemask_pi8(__m64 a)
{
return __builtin_ia32_pmovmskb((__v8qi)a);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_mulhi_pu16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pmulhuw((__v4hi)a, (__v4hi)b);
@@ -686,37 +686,37 @@ _mm_mulhi_pu16(__m64 a, __m64 b)
(n) & 0x3, ((n) & 0xc) >> 2, \
((n) & 0x30) >> 4, ((n) & 0xc0) >> 6))
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_maskmove_si64(__m64 d, __m64 n, char *p)
{
__builtin_ia32_maskmovq((__v8qi)d, (__v8qi)n, p);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_avg_pu8(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pavgb((__v8qi)a, (__v8qi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_avg_pu16(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_pavgw((__v4hi)a, (__v4hi)b);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_sad_pu8(__m64 a, __m64 b)
{
return (__m64)__builtin_ia32_psadbw((__v8qi)a, (__v8qi)b);
}
-static inline unsigned int __attribute__((__always_inline__, __nodebug__))
+static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_getcsr(void)
{
return __builtin_ia32_stmxcsr();
}
-static inline void __attribute__((__always_inline__, __nodebug__))
+static __inline__ void __attribute__((__always_inline__, __nodebug__))
_mm_setcsr(unsigned int i)
{
__builtin_ia32_ldmxcsr(i);
@@ -727,37 +727,37 @@ _mm_setcsr(unsigned int i)
(((mask) & 0x30) >> 4) + 4, \
(((mask) & 0xc0) >> 6) + 4))
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_unpackhi_ps(__m128 a, __m128 b)
{
return __builtin_shufflevector(a, b, 2, 6, 3, 7);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_unpacklo_ps(__m128 a, __m128 b)
{
return __builtin_shufflevector(a, b, 0, 4, 1, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_move_ss(__m128 a, __m128 b)
{
return __builtin_shufflevector(a, b, 4, 1, 2, 3);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_movehl_ps(__m128 a, __m128 b)
{
return __builtin_shufflevector(a, b, 6, 7, 2, 3);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_movelh_ps(__m128 a, __m128 b)
{
return __builtin_shufflevector(a, b, 0, 1, 4, 5);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpi16_ps(__m64 a)
{
__m64 b, c;
@@ -775,7 +775,7 @@ _mm_cvtpi16_ps(__m64 a)
return r;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpu16_ps(__m64 a)
{
__m64 b, c;
@@ -792,7 +792,7 @@ _mm_cvtpu16_ps(__m64 a)
return r;
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpi8_ps(__m64 a)
{
__m64 b;
@@ -804,7 +804,7 @@ _mm_cvtpi8_ps(__m64 a)
return _mm_cvtpi16_ps(b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpu8_ps(__m64 a)
{
__m64 b;
@@ -815,7 +815,7 @@ _mm_cvtpu8_ps(__m64 a)
return _mm_cvtpi16_ps(b);
}
-static inline __m128 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_cvtpi32x2_ps(__m64 a, __m64 b)
{
__m128 c;
@@ -827,7 +827,7 @@ _mm_cvtpi32x2_ps(__m64 a, __m64 b)
return _mm_cvtpi32_ps(c, a);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtps_pi16(__m128 a)
{
__m64 b, c;
@@ -839,7 +839,7 @@ _mm_cvtps_pi16(__m128 a)
return _mm_packs_pi16(b, c);
}
-static inline __m64 __attribute__((__always_inline__, __nodebug__))
+static __inline__ __m64 __attribute__((__always_inline__, __nodebug__))
_mm_cvtps_pi8(__m128 a)
{
__m64 b, c;
@@ -850,7 +850,7 @@ _mm_cvtps_pi8(__m128 a)
return _mm_packs_pi16(b, c);
}
-static inline int __attribute__((__always_inline__, __nodebug__))
+static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_movemask_ps(__m128 a)
{
return __builtin_ia32_movmskps(a);
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 2f89142409a0..19f25ea4a8bb 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -1372,8 +1372,7 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
// a pedwarn.
if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r'))
Diag(BufferEnd, diag::ext_no_newline_eof)
- << CodeModificationHint::CreateInsertion(getSourceLocation(BufferEnd),
- "\n");
+ << FixItHint::CreateInsertion(getSourceLocation(BufferEnd), "\n");
BufferPtr = CurPtr;
diff --git a/lib/Lex/PPDirectives.cpp b/lib/Lex/PPDirectives.cpp
index 7b601010b20d..757ba9014df6 100644
--- a/lib/Lex/PPDirectives.cpp
+++ b/lib/Lex/PPDirectives.cpp
@@ -127,10 +127,10 @@ void Preprocessor::CheckEndOfDirective(const char *DirType, bool EnableMacros) {
// Add a fixit in GNU/C99/C++ mode. Don't offer a fixit for strict-C89,
// because it is more trouble than it is worth to insert /**/ and check that
// there is no /**/ in the range also.
- CodeModificationHint FixItHint;
+ FixItHint Hint;
if (Features.GNUMode || Features.C99 || Features.CPlusPlus)
- FixItHint = CodeModificationHint::CreateInsertion(Tmp.getLocation(),"//");
- Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << FixItHint;
+ Hint = FixItHint::CreateInsertion(Tmp.getLocation(),"//");
+ Diag(Tmp, diag::ext_pp_extra_tokens_at_eol) << DirType << Hint;
DiscardUntilEndOfDirective();
}
}
diff --git a/lib/Lex/PPLexerChange.cpp b/lib/Lex/PPLexerChange.cpp
index 6d1c132fc0a9..335d3db627dd 100644
--- a/lib/Lex/PPLexerChange.cpp
+++ b/lib/Lex/PPLexerChange.cpp
@@ -255,6 +255,7 @@ bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
if (!I->second->isUsed())
Diag(I->second->getDefinitionLoc(), diag::pp_macro_not_used);
}
+
return true;
}
diff --git a/lib/Lex/PPMacroExpansion.cpp b/lib/Lex/PPMacroExpansion.cpp
index ffae8ab6afb1..1c6a5ad0ebce 100644
--- a/lib/Lex/PPMacroExpansion.cpp
+++ b/lib/Lex/PPMacroExpansion.cpp
@@ -258,10 +258,13 @@ bool Preprocessor::HandleMacroExpandedIdentifier(Token &Identifier,
InstantiationEnd,Identifier.getLength());
Identifier.setLocation(Loc);
- // If this is #define X X, we must mark the result as unexpandible.
- if (IdentifierInfo *NewII = Identifier.getIdentifierInfo())
- if (getMacroInfo(NewII) == MI)
- Identifier.setFlag(Token::DisableExpand);
+ // If this is a disabled macro or #define X X, we must mark the result as
+ // unexpandable.
+ if (IdentifierInfo *NewII = Identifier.getIdentifierInfo()) {
+ if (MacroInfo *NewMI = getMacroInfo(NewII))
+ if (!NewMI->isEnabled() || NewMI == MI)
+ Identifier.setFlag(Token::DisableExpand);
+ }
// Since this is not an identifier token, it can't be macro expanded, so
// we're done.
diff --git a/lib/Lex/Preprocessor.cpp b/lib/Lex/Preprocessor.cpp
index 9d59300d213e..8b4b1ddf4149 100644
--- a/lib/Lex/Preprocessor.cpp
+++ b/lib/Lex/Preprocessor.cpp
@@ -519,6 +519,11 @@ bool Preprocessor::EnterMainSourceFile() {
return EnterSourceFile(FID, 0, ErrorStr);
}
+void Preprocessor::EndSourceFile() {
+ // Notify the client that we reached the end of the source file.
+ if (Callbacks)
+ Callbacks->EndOfMainFile();
+}
//===----------------------------------------------------------------------===//
// Lexer Event Handling.
diff --git a/lib/Lex/TokenConcatenation.cpp b/lib/Lex/TokenConcatenation.cpp
index 07951646ffe1..51d2e2326fca 100644
--- a/lib/Lex/TokenConcatenation.cpp
+++ b/lib/Lex/TokenConcatenation.cpp
@@ -85,7 +85,7 @@ TokenConcatenation::TokenConcatenation(Preprocessor &pp) : PP(pp) {
TokenInfo[tok::star ] |= aci_avoid_equal; // *=
TokenInfo[tok::exclaim ] |= aci_avoid_equal; // !=
TokenInfo[tok::lessless ] |= aci_avoid_equal; // <<=
- TokenInfo[tok::greaterequal] |= aci_avoid_equal; // >>=
+ TokenInfo[tok::greatergreater] |= aci_avoid_equal; // >>=
TokenInfo[tok::caret ] |= aci_avoid_equal; // ^=
TokenInfo[tok::equal ] |= aci_avoid_equal; // ==
}
diff --git a/lib/Parse/AttributeList.cpp b/lib/Parse/AttributeList.cpp
index a66dd96b2599..3cd74d57f56d 100644
--- a/lib/Parse/AttributeList.cpp
+++ b/lib/Parse/AttributeList.cpp
@@ -92,7 +92,6 @@ AttributeList::Kind AttributeList::getKind(const IdentifierInfo *Name) {
.Case("dllimport", AT_dllimport)
.Case("dllexport", AT_dllexport)
.Case("may_alias", IgnoredAttribute) // FIXME: TBAA
- .Case("gcc_tdiag", IgnoredAttribute) // GCC diagnostics type checking.
.Case("base_check", AT_base_check)
.Case("deprecated", AT_deprecated)
.Case("visibility", AT_visibility)
diff --git a/lib/Parse/DeclSpec.cpp b/lib/Parse/DeclSpec.cpp
index 4a699e7ad5e5..11865ab97b84 100644
--- a/lib/Parse/DeclSpec.cpp
+++ b/lib/Parse/DeclSpec.cpp
@@ -481,7 +481,7 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
if (TypeSpecComplex != TSC_unspecified) {
if (TypeSpecType == TST_unspecified) {
Diag(D, TSCLoc, SrcMgr, diag::ext_plain_complex)
- << CodeModificationHint::CreateInsertion(
+ << FixItHint::CreateInsertion(
PP.getLocForEndOfToken(getTypeSpecComplexLoc()),
" double");
TypeSpecType = TST_double; // _Complex -> _Complex double.
@@ -507,7 +507,7 @@ void DeclSpec::Finish(Diagnostic &D, Preprocessor &PP) {
Diag(D, SCLoc, SrcMgr, diag::err_friend_storage_spec)
<< SpecName
- << CodeModificationHint::CreateRemoval(SourceRange(SCLoc, SCEndLoc));
+ << FixItHint::CreateRemoval(SourceRange(SCLoc, SCEndLoc));
ClearStorageClassSpecs();
}
diff --git a/lib/Parse/ParseCXXInlineMethods.cpp b/lib/Parse/ParseCXXInlineMethods.cpp
index f1e639c2957d..87e22fa9dce0 100644
--- a/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/lib/Parse/ParseCXXInlineMethods.cpp
@@ -122,6 +122,9 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
Actions.ActOnDelayedCXXMethodParameter(CurScope, LM.DefaultArgs[I].Param);
if (CachedTokens *Toks = LM.DefaultArgs[I].Toks) {
+ // Save the current token position.
+ SourceLocation origLoc = Tok.getLocation();
+
// Parse the default argument from its saved token stream.
Toks->push_back(Tok); // So that the current token doesn't get lost
PP.EnterTokenStream(&Toks->front(), Toks->size(), true, false);
@@ -139,6 +142,15 @@ void Parser::ParseLexedMethodDeclarations(ParsingClass &Class) {
else
Actions.ActOnParamDefaultArgument(LM.DefaultArgs[I].Param, EqualLoc,
move(DefArgResult));
+
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "ParseAssignmentExpression went over the default arg tokens!");
+ // There could be leftover tokens (e.g. because of an error).
+ // Skip through until we reach the original token position.
+ while (Tok.getLocation() != origLoc)
+ ConsumeAnyToken();
+
delete Toks;
LM.DefaultArgs[I].Toks = 0;
}
@@ -177,6 +189,9 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
if (LM.TemplateScope)
Actions.ActOnReenterTemplateScope(CurScope, LM.D);
+ // Save the current token position.
+ SourceLocation origLoc = Tok.getLocation();
+
assert(!LM.Toks.empty() && "Empty body!");
// Append the current token at the end of the new token stream so that it
// doesn't get lost.
@@ -195,6 +210,11 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
if (Tok.is(tok::kw_try)) {
ParseFunctionTryBlock(LM.D);
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "ParseFunctionTryBlock went over the cached tokens!");
+ assert(Tok.getLocation() == origLoc &&
+ "ParseFunctionTryBlock left tokens in the token stream!");
continue;
}
if (Tok.is(tok::colon))
@@ -204,6 +224,11 @@ void Parser::ParseLexedMethodDefs(ParsingClass &Class) {
// FIXME: What if ParseConstructorInitializer doesn't leave us with a '{'??
ParseFunctionStatementBody(LM.D);
+ assert(!PP.getSourceManager().isBeforeInTranslationUnit(origLoc,
+ Tok.getLocation()) &&
+ "We consumed more than the cached tokens!");
+ assert(Tok.getLocation() == origLoc &&
+ "Tokens were left in the token stream!");
}
for (unsigned I = 0, N = Class.NestedClasses.size(); I != N; ++I)
diff --git a/lib/Parse/ParseDecl.cpp b/lib/Parse/ParseDecl.cpp
index cff35b72c45a..e87d052141cc 100644
--- a/lib/Parse/ParseDecl.cpp
+++ b/lib/Parse/ParseDecl.cpp
@@ -734,7 +734,7 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
if (TagName) {
Diag(Loc, diag::err_use_of_tag_name_without_tag)
<< Tok.getIdentifierInfo() << TagName << getLang().CPlusPlus
- << CodeModificationHint::CreateInsertion(Tok.getLocation(),TagName);
+ << FixItHint::CreateInsertion(Tok.getLocation(),TagName);
// Parse this as a tag as if the missing tag were present.
if (TagKind == tok::kw_enum)
@@ -1360,7 +1360,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
DS.SetRangeEnd(EndProtoLoc);
Diag(Loc, diag::warn_objc_protocol_qualifier_missing_id)
- << CodeModificationHint::CreateInsertion(Loc, "id")
+ << FixItHint::CreateInsertion(Loc, "id")
<< SourceRange(Loc, EndProtoLoc);
// Need to support trailing type qualifiers (e.g. "id<p> const").
// If a type specifier follows, it will be diagnosed elsewhere.
@@ -1756,7 +1756,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
Diag(Tok, diag::ext_extra_struct_semi)
- << CodeModificationHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
continue;
}
@@ -1999,7 +1999,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, DeclPtrTy EnumDecl) {
!(getLang().C99 || getLang().CPlusPlus0x))
Diag(CommaLoc, diag::ext_enumerator_list_comma)
<< getLang().CPlusPlus
- << CodeModificationHint::CreateRemoval(CommaLoc);
+ << FixItHint::CreateRemoval(CommaLoc);
}
// Eat the }.
@@ -3009,7 +3009,7 @@ void Parser::ParseFunctionDeclarator(SourceLocation LParenLoc, Declarator &D,
// We have ellipsis without a preceding ',', which is ill-formed
// in C. Complain and provide the fix.
Diag(EllipsisLoc, diag::err_missing_comma_before_ellipsis)
- << CodeModificationHint::CreateInsertion(EllipsisLoc, ", ");
+ << FixItHint::CreateInsertion(EllipsisLoc, ", ");
}
}
diff --git a/lib/Parse/ParseDeclCXX.cpp b/lib/Parse/ParseDeclCXX.cpp
index 9e232cbf325d..813c24ce3d58 100644
--- a/lib/Parse/ParseDeclCXX.cpp
+++ b/lib/Parse/ParseDeclCXX.cpp
@@ -837,7 +837,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Diag(TemplateId->TemplateNameLoc,
diag::err_explicit_instantiation_with_definition)
<< SourceRange(TemplateInfo.TemplateLoc)
- << CodeModificationHint::CreateInsertion(LAngleLoc, "<>");
+ << FixItHint::CreateInsertion(LAngleLoc, "<>");
// Create a fake template parameter list that contains only
// "template<>", so that we treat this construct as a class
@@ -1079,7 +1079,7 @@ Parser::BaseResult Parser::ParseBaseSpecifier(DeclPtrTy ClassDecl) {
if (IsVirtual) {
// Complain about duplicate 'virtual'
Diag(VirtualLoc, diag::err_dup_virtual)
- << CodeModificationHint::CreateRemoval(VirtualLoc);
+ << FixItHint::CreateRemoval(VirtualLoc);
}
IsVirtual = true;
@@ -1554,7 +1554,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
Diag(Tok, diag::ext_extra_struct_semi)
- << CodeModificationHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
continue;
}
@@ -1579,10 +1579,11 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// If attributes exist after class contents, parse them.
llvm::OwningPtr<AttributeList> AttrList;
if (Tok.is(tok::kw___attribute))
- AttrList.reset(ParseGNUAttributes()); // FIXME: where should I put them?
+ AttrList.reset(ParseGNUAttributes());
Actions.ActOnFinishCXXMemberSpecification(CurScope, RecordLoc, TagDecl,
- LBraceLoc, RBraceLoc);
+ LBraceLoc, RBraceLoc,
+ AttrList.get());
// C++ 9.2p2: Within the class member-specification, the class is regarded as
// complete within function bodies, default arguments,
diff --git a/lib/Parse/ParseExprCXX.cpp b/lib/Parse/ParseExprCXX.cpp
index f1e989f4a7b0..8528f8fe190c 100644
--- a/lib/Parse/ParseExprCXX.cpp
+++ b/lib/Parse/ParseExprCXX.cpp
@@ -246,7 +246,7 @@ bool Parser::ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS,
// recover like this.
PP.LookAhead(1).is(tok::identifier)) {
Diag(Next, diag::err_unexected_colon_in_nested_name_spec)
- << CodeModificationHint::CreateReplacement(Next.getLocation(), "::");
+ << FixItHint::CreateReplacement(Next.getLocation(), "::");
// Recover as if the user wrote '::'.
Next.setKind(tok::coloncolon);
@@ -1313,7 +1313,7 @@ bool Parser::ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
Diag(TemplateId->TemplateNameLoc,
diag::err_out_of_line_constructor_template_id)
<< TemplateId->Name
- << CodeModificationHint::CreateRemoval(
+ << FixItHint::CreateRemoval(
SourceRange(TemplateId->LAngleLoc, TemplateId->RAngleLoc));
Result.setConstructorName(Actions.getTypeName(*TemplateId->Name,
TemplateId->TemplateNameLoc,
diff --git a/lib/Parse/ParseInit.cpp b/lib/Parse/ParseInit.cpp
index c4e79cae3f54..9154d8d59987 100644
--- a/lib/Parse/ParseInit.cpp
+++ b/lib/Parse/ParseInit.cpp
@@ -76,9 +76,8 @@ Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() {
SourceLocation ColonLoc = ConsumeToken();
Diag(Tok, diag::ext_gnu_old_style_field_designator)
- << CodeModificationHint::CreateReplacement(SourceRange(NameLoc,
- ColonLoc),
- NewSyntax.str());
+ << FixItHint::CreateReplacement(SourceRange(NameLoc, ColonLoc),
+ NewSyntax.str());
Designation D;
D.AddDesignator(Designator::getField(FieldName, SourceLocation(), NameLoc));
@@ -218,7 +217,7 @@ Parser::OwningExprResult Parser::ParseInitializerWithPotentialDesignator() {
(Desig.getDesignator(0).isArrayDesignator() ||
Desig.getDesignator(0).isArrayRangeDesignator())) {
Diag(Tok, diag::ext_gnu_missing_equal_designator)
- << CodeModificationHint::CreateInsertion(Tok.getLocation(), "= ");
+ << FixItHint::CreateInsertion(Tok.getLocation(), "= ");
return Actions.ActOnDesignatedInitializer(Desig, Tok.getLocation(),
true, ParseInitializer());
}
diff --git a/lib/Parse/ParseObjc.cpp b/lib/Parse/ParseObjc.cpp
index 7b2b6e855bb7..9a3473f042ed 100644
--- a/lib/Parse/ParseObjc.cpp
+++ b/lib/Parse/ParseObjc.cpp
@@ -988,7 +988,7 @@ void Parser::ParseObjCClassInstanceVariables(DeclPtrTy interfaceDecl,
// Check for extraneous top-level semicolon.
if (Tok.is(tok::semi)) {
Diag(Tok, diag::ext_extra_struct_semi)
- << CodeModificationHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
continue;
}
@@ -1236,7 +1236,7 @@ Parser::DeclPtrTy Parser::ParseObjCAtImplementationDeclaration(
if (Tok.is(tok::l_brace)) // we have ivars
ParseObjCClassInstanceVariables(ImplClsType/*FIXME*/,
- tok::objc_protected, atLoc);
+ tok::objc_private, atLoc);
ObjCImpDecl = ImplClsType;
PendingObjCImpDecl.push_back(ObjCImpDecl);
@@ -1571,7 +1571,7 @@ Parser::DeclPtrTy Parser::ParseObjCMethodDefinition() {
if (Tok.is(tok::semi)) {
if (ObjCImpDecl) {
Diag(Tok, diag::warn_semicolon_before_method_body)
- << CodeModificationHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
}
ConsumeToken();
}
@@ -1817,9 +1817,12 @@ Parser::ParseObjCMessageExpressionBody(SourceLocation LBracLoc,
SkipUntil(tok::r_square);
return ExprError();
}
-
+
if (Tok.isNot(tok::r_square)) {
- Diag(Tok, diag::err_expected_rsquare);
+ if (Tok.is(tok::identifier))
+ Diag(Tok, diag::err_expected_colon);
+ else
+ Diag(Tok, diag::err_expected_rsquare);
// We must manually skip to a ']', otherwise the expression skipper will
// stop at the ']' when it skips to the ';'. We want it to skip beyond
// the enclosing expression.
diff --git a/lib/Parse/ParseStmt.cpp b/lib/Parse/ParseStmt.cpp
index 9fd145dc2676..b752b48cfd49 100644
--- a/lib/Parse/ParseStmt.cpp
+++ b/lib/Parse/ParseStmt.cpp
@@ -125,10 +125,12 @@ Parser::ParseStatementOrDeclaration(bool OnlyStatement) {
// expression[opt] ';'
OwningExprResult Expr(ParseExpression());
if (Expr.isInvalid()) {
- // If the expression is invalid, skip ahead to the next semicolon. Not
- // doing this opens us up to the possibility of infinite loops if
+ // If the expression is invalid, skip ahead to the next semicolon or '}'.
+ // Not doing this opens us up to the possibility of infinite loops if
// ParseExpression does not consume any tokens.
- SkipUntil(tok::semi);
+ SkipUntil(tok::r_brace, /*StopAtSemi=*/true, /*DontConsume=*/true);
+ if (Tok.is(tok::semi))
+ ConsumeToken();
return StmtError();
}
// Otherwise, eat the semicolon.
diff --git a/lib/Parse/ParseTemplate.cpp b/lib/Parse/ParseTemplate.cpp
index 12f26bfcb94e..ff6995340151 100644
--- a/lib/Parse/ParseTemplate.cpp
+++ b/lib/Parse/ParseTemplate.cpp
@@ -660,7 +660,7 @@ Parser::ParseTemplateIdAfterTemplateName(TemplateTy Template,
ReplaceStr = "> > ";
Diag(Tok.getLocation(), diag::err_two_right_angle_brackets_need_space)
- << CodeModificationHint::CreateReplacement(
+ << FixItHint::CreateReplacement(
SourceRange(Tok.getLocation()), ReplaceStr);
}
diff --git a/lib/Parse/Parser.cpp b/lib/Parse/Parser.cpp
index d45aaed70e54..489586c36f54 100644
--- a/lib/Parse/Parser.cpp
+++ b/lib/Parse/Parser.cpp
@@ -95,8 +95,8 @@ void Parser::SuggestParentheses(SourceLocation Loc, unsigned DK,
}
Diag(Loc, DK)
- << CodeModificationHint::CreateInsertion(ParenRange.getBegin(), "(")
- << CodeModificationHint::CreateInsertion(EndLoc, ")");
+ << FixItHint::CreateInsertion(ParenRange.getBegin(), "(")
+ << FixItHint::CreateInsertion(EndLoc, ")");
}
/// MatchRHSPunctuation - For punctuation with a LHS and RHS (e.g. '['/']'),
@@ -146,7 +146,7 @@ bool Parser::ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned DiagID,
// Show what code to insert to fix this problem.
Diag(EndLoc, DiagID)
<< Msg
- << CodeModificationHint::CreateInsertion(EndLoc, Spelling);
+ << FixItHint::CreateInsertion(EndLoc, Spelling);
} else
Diag(Tok, DiagID) << Msg;
@@ -395,7 +395,7 @@ Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(CXX0XAttributeList Attr)
case tok::semi:
if (!getLang().CPlusPlus0x)
Diag(Tok, diag::ext_top_level_semi)
- << CodeModificationHint::CreateRemoval(Tok.getLocation());
+ << FixItHint::CreateRemoval(Tok.getLocation());
ConsumeToken();
// TODO: Invoke action for top-level semicolon.
@@ -829,7 +829,7 @@ Parser::OwningExprResult Parser::ParseSimpleAsm(SourceLocation *EndLoc) {
PP.getLocForEndOfToken(Tok.getLocation()));
Diag(Tok, diag::warn_file_asm_volatile)
- << CodeModificationHint::CreateRemoval(RemovalRange);
+ << FixItHint::CreateRemoval(RemovalRange);
ConsumeToken();
}
diff --git a/lib/Sema/AnalysisBasedWarnings.cpp b/lib/Sema/AnalysisBasedWarnings.cpp
index c4ceec0f8111..d1f00ca66d0a 100644
--- a/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/lib/Sema/AnalysisBasedWarnings.cpp
@@ -154,7 +154,7 @@ static ControlFlowKind CheckFallThrough(AnalysisContext &AC) {
continue;
}
Expr *CEE = C->getCallee()->IgnoreParenCasts();
- if (CEE->getType().getNoReturnAttr()) {
+ if (getFunctionExtInfo(CEE->getType()).getNoReturn()) {
NoReturnEdge = true;
HasFakeEdge = true;
} else if (DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(CEE)) {
@@ -189,7 +189,7 @@ struct CheckFallThroughDiagnostics {
unsigned diag_AlwaysFallThrough_ReturnsNonVoid;
unsigned diag_NeverFallThroughOrReturn;
bool funMode;
-
+
static CheckFallThroughDiagnostics MakeForFunction() {
CheckFallThroughDiagnostics D;
D.diag_MaybeFallThrough_HasNoReturn =
@@ -205,7 +205,7 @@ struct CheckFallThroughDiagnostics {
D.funMode = true;
return D;
}
-
+
static CheckFallThroughDiagnostics MakeForBlock() {
CheckFallThroughDiagnostics D;
D.diag_MaybeFallThrough_HasNoReturn =
@@ -221,7 +221,7 @@ struct CheckFallThroughDiagnostics {
D.funMode = false;
return D;
}
-
+
bool checkDiagnostics(Diagnostic &D, bool ReturnsVoid,
bool HasNoReturn) const {
if (funMode) {
@@ -232,7 +232,7 @@ struct CheckFallThroughDiagnostics {
&& (D.getDiagnosticLevel(diag::warn_suggest_noreturn_block)
== Diagnostic::Ignored || !ReturnsVoid);
}
-
+
// For blocks.
return ReturnsVoid && !HasNoReturn
&& (D.getDiagnosticLevel(diag::warn_suggest_noreturn_block)
@@ -255,14 +255,14 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
ReturnsVoid = FD->getResultType()->isVoidType();
HasNoReturn = FD->hasAttr<NoReturnAttr>() ||
- FD->getType()->getAs<FunctionType>()->getNoReturnAttr();
+ FD->getType()->getAs<FunctionType>()->getNoReturnAttr();
}
else if (const ObjCMethodDecl *MD = dyn_cast<ObjCMethodDecl>(D)) {
ReturnsVoid = MD->getResultType()->isVoidType();
HasNoReturn = MD->hasAttr<NoReturnAttr>();
}
else if (isa<BlockDecl>(D)) {
- if (const FunctionType *FT =
+ if (const FunctionType *FT =
BlockTy->getPointeeType()->getAs<FunctionType>()) {
if (FT->getResultType()->isVoidType())
ReturnsVoid = true;
@@ -276,7 +276,7 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
// Short circuit for compilation speed.
if (CD.checkDiagnostics(Diags, ReturnsVoid, HasNoReturn))
return;
-
+
// FIXME: Function try block
if (const CompoundStmt *Compound = dyn_cast<CompoundStmt>(Body)) {
switch (CheckFallThrough(AC)) {
@@ -312,25 +312,23 @@ static void CheckFallThroughForBody(Sema &S, const Decl *D, const Stmt *Body,
// warnings on a function, method, or block.
//===----------------------------------------------------------------------===//
-clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) : S(s) {
- Diagnostic &D = S.getDiagnostics();
-
+clang::sema::AnalysisBasedWarnings::Policy::Policy() {
enableCheckFallThrough = 1;
+ enableCheckUnreachable = 0;
+}
- enableCheckUnreachable = (unsigned)
+clang::sema::AnalysisBasedWarnings::AnalysisBasedWarnings(Sema &s) : S(s) {
+ Diagnostic &D = S.getDiagnostics();