aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-04-16 16:02:28 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-04-16 16:02:28 +0000
commit7442d6faa2719e4e7d33a7021c406c5a4facd74d (patch)
treec72b9241553fc9966179aba84f90f17bfa9235c3 /lib
parentb52119637f743680a99710ce5fdb6646da2772af (diff)
downloadsrc-7442d6faa2719e4e7d33a7021c406c5a4facd74d.tar.gz
src-7442d6faa2719e4e7d33a7021c406c5a4facd74d.zip
Vendor import of clang trunk r300422:vendor/clang/clang-trunk-r300422
Notes
Notes: svn path=/vendor/clang/dist/; revision=317019 svn path=/vendor/clang/clang-trunk-r300422/; revision=317020; tag=vendor/clang/clang-trunk-r300422
Diffstat (limited to 'lib')
-rw-r--r--lib/AST/ASTContext.cpp207
-rw-r--r--lib/AST/ASTDumper.cpp48
-rw-r--r--lib/AST/ASTImporter.cpp363
-rw-r--r--lib/AST/CMakeLists.txt2
-rw-r--r--lib/AST/CXXInheritance.cpp4
-rw-r--r--lib/AST/Decl.cpp54
-rw-r--r--lib/AST/DeclBase.cpp1
-rw-r--r--lib/AST/DeclCXX.cpp84
-rw-r--r--lib/AST/DeclObjC.cpp4
-rw-r--r--lib/AST/DeclPrinter.cpp25
-rw-r--r--lib/AST/DeclTemplate.cpp37
-rw-r--r--lib/AST/DeclarationName.cpp105
-rw-r--r--lib/AST/Expr.cpp29
-rw-r--r--lib/AST/ExprCXX.cpp10
-rw-r--r--lib/AST/ExprClassification.cpp4
-rw-r--r--lib/AST/ExprConstant.cpp337
-rw-r--r--lib/AST/ExternalASTMerger.cpp185
-rw-r--r--lib/AST/ExternalASTSource.cpp5
-rw-r--r--lib/AST/ItaniumMangle.cpp38
-rw-r--r--lib/AST/Mangle.cpp8
-rw-r--r--lib/AST/MicrosoftMangle.cpp57
-rw-r--r--lib/AST/NSAPI.cpp1
-rw-r--r--lib/AST/ODRHash.cpp361
-rw-r--r--lib/AST/OpenMPClause.cpp12
-rw-r--r--lib/AST/Stmt.cpp2
-rw-r--r--lib/AST/StmtCXX.cpp27
-rw-r--r--lib/AST/StmtOpenMP.cpp40
-rw-r--r--lib/AST/StmtPrinter.cpp7
-rw-r--r--lib/AST/StmtProfile.cpp237
-rw-r--r--lib/AST/TemplateBase.cpp4
-rw-r--r--lib/AST/Type.cpp140
-rw-r--r--lib/AST/TypeLoc.cpp1
-rw-r--r--lib/AST/TypePrinter.cpp40
-rw-r--r--lib/ASTMatchers/Dynamic/Registry.cpp7
-rw-r--r--lib/Analysis/BodyFarm.cpp4
-rw-r--r--lib/Analysis/CFG.cpp2
-rw-r--r--lib/Analysis/CallGraph.cpp1
-rw-r--r--lib/Analysis/CloneDetection.cpp887
-rw-r--r--lib/Analysis/OSLog.cpp3
-rw-r--r--lib/Analysis/ReachableCode.cpp19
-rw-r--r--lib/Analysis/ThreadSafetyTIL.cpp4
-rw-r--r--lib/Basic/CMakeLists.txt2
-rw-r--r--lib/Basic/Diagnostic.cpp171
-rw-r--r--lib/Basic/DiagnosticIDs.cpp5
-rw-r--r--lib/Basic/FileManager.cpp7
-rw-r--r--lib/Basic/IdentifierTable.cpp21
-rw-r--r--lib/Basic/LangOptions.cpp2
-rw-r--r--lib/Basic/MemoryBufferCache.cpp48
-rw-r--r--lib/Basic/Module.cpp2
-rw-r--r--lib/Basic/OpenMPKinds.cpp73
-rw-r--r--lib/Basic/SourceManager.cpp13
-rw-r--r--lib/Basic/TargetInfo.cpp5
-rw-r--r--lib/Basic/Targets.cpp861
-rw-r--r--lib/Basic/Version.cpp2
-rw-r--r--lib/Basic/VirtualFileSystem.cpp24
-rw-r--r--lib/Basic/XRayLists.cpp53
-rw-r--r--lib/CodeGen/ABIInfo.h1
-rw-r--r--lib/CodeGen/BackendUtil.cpp416
-rw-r--r--lib/CodeGen/CGAtomic.cpp2
-rw-r--r--lib/CodeGen/CGBlocks.cpp419
-rw-r--r--lib/CodeGen/CGBuiltin.cpp261
-rw-r--r--lib/CodeGen/CGCUDANV.cpp2
-rw-r--r--lib/CodeGen/CGCXX.cpp2
-rw-r--r--lib/CodeGen/CGCXXABI.h27
-rw-r--r--lib/CodeGen/CGCall.cpp588
-rw-r--r--lib/CodeGen/CGCall.h52
-rw-r--r--lib/CodeGen/CGClass.cpp66
-rw-r--r--lib/CodeGen/CGCleanup.cpp43
-rw-r--r--lib/CodeGen/CGCoroutine.cpp277
-rw-r--r--lib/CodeGen/CGDebugInfo.cpp252
-rw-r--r--lib/CodeGen/CGDebugInfo.h67
-rw-r--r--lib/CodeGen/CGDecl.cpp81
-rw-r--r--lib/CodeGen/CGDeclCXX.cpp2
-rw-r--r--lib/CodeGen/CGException.cpp17
-rw-r--r--lib/CodeGen/CGExpr.cpp273
-rw-r--r--lib/CodeGen/CGExprAgg.cpp9
-rw-r--r--lib/CodeGen/CGExprCXX.cpp50
-rw-r--r--lib/CodeGen/CGExprComplex.cpp16
-rw-r--r--lib/CodeGen/CGExprScalar.cpp217
-rw-r--r--lib/CodeGen/CGGPUBuiltin.cpp (renamed from lib/CodeGen/CGCUDABuiltin.cpp)13
-rw-r--r--lib/CodeGen/CGObjC.cpp119
-rw-r--r--lib/CodeGen/CGObjCGNU.cpp4
-rw-r--r--lib/CodeGen/CGObjCMac.cpp26
-rw-r--r--lib/CodeGen/CGOpenCLRuntime.cpp3
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.cpp306
-rw-r--r--lib/CodeGen/CGOpenMPRuntime.h53
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.cpp1710
-rw-r--r--lib/CodeGen/CGOpenMPRuntimeNVPTX.h128
-rw-r--r--lib/CodeGen/CGStmt.cpp10
-rw-r--r--lib/CodeGen/CGStmtOpenMP.cpp298
-rw-r--r--lib/CodeGen/CGVTables.cpp20
-rw-r--r--lib/CodeGen/CMakeLists.txt4
-rw-r--r--lib/CodeGen/CodeGenAction.cpp293
-rw-r--r--lib/CodeGen/CodeGenFunction.cpp81
-rw-r--r--lib/CodeGen/CodeGenFunction.h173
-rw-r--r--lib/CodeGen/CodeGenModule.cpp199
-rw-r--r--lib/CodeGen/CodeGenModule.h63
-rw-r--r--lib/CodeGen/CodeGenPGO.cpp34
-rw-r--r--lib/CodeGen/CodeGenPGO.h3
-rw-r--r--lib/CodeGen/CodeGenTypes.cpp4
-rw-r--r--lib/CodeGen/CodeGenTypes.h7
-rw-r--r--lib/CodeGen/ConstantBuilder.h444
-rw-r--r--lib/CodeGen/ConstantInitBuilder.cpp280
-rw-r--r--lib/CodeGen/CoverageMappingGen.cpp10
-rw-r--r--lib/CodeGen/EHScopeStack.h2
-rw-r--r--lib/CodeGen/ItaniumCXXABI.cpp86
-rw-r--r--lib/CodeGen/MacroPPCallbacks.cpp207
-rw-r--r--lib/CodeGen/MacroPPCallbacks.h117
-rw-r--r--lib/CodeGen/MicrosoftCXXABI.cpp82
-rw-r--r--lib/CodeGen/ModuleBuilder.cpp8
-rw-r--r--lib/CodeGen/ObjectFilePCHContainerOperations.cpp9
-rw-r--r--lib/CodeGen/TargetInfo.cpp121
-rw-r--r--lib/Driver/CMakeLists.txt43
-rw-r--r--lib/Driver/CrossWindowsToolChain.cpp125
-rw-r--r--lib/Driver/Driver.cpp185
-rw-r--r--lib/Driver/DriverOptions.cpp4
-rw-r--r--lib/Driver/Job.cpp26
-rw-r--r--lib/Driver/MSVCToolChain.cpp892
-rw-r--r--lib/Driver/MinGWToolChain.cpp257
-rw-r--r--lib/Driver/Multilib.cpp2
-rw-r--r--lib/Driver/SanitizerArgs.cpp13
-rw-r--r--lib/Driver/ToolChain.cpp27
-rw-r--r--lib/Driver/ToolChains.cpp5342
-rw-r--r--lib/Driver/ToolChains.h1388
-rw-r--r--lib/Driver/ToolChains/AMDGPU.cpp45
-rw-r--r--lib/Driver/ToolChains/AMDGPU.h54
-rw-r--r--lib/Driver/ToolChains/AVR.cpp44
-rw-r--r--lib/Driver/ToolChains/AVR.h49
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.cpp199
-rw-r--r--lib/Driver/ToolChains/Arch/AArch64.h35
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.cpp547
-rw-r--r--lib/Driver/ToolChains/Arch/ARM.h60
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.cpp403
-rw-r--r--lib/Driver/ToolChains/Arch/Mips.h62
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.cpp131
-rw-r--r--lib/Driver/ToolChains/Arch/PPC.h45
-rw-r--r--lib/Driver/ToolChains/Arch/Sparc.cpp100
-rw-r--r--lib/Driver/ToolChains/Arch/Sparc.h42
-rw-r--r--lib/Driver/ToolChains/Arch/SystemZ.cpp41
-rw-r--r--lib/Driver/ToolChains/Arch/SystemZ.h32
-rw-r--r--lib/Driver/ToolChains/Arch/X86.cpp173
-rw-r--r--lib/Driver/ToolChains/Arch/X86.h37
-rw-r--r--lib/Driver/ToolChains/Bitrig.cpp190
-rw-r--r--lib/Driver/ToolChains/Bitrig.h79
-rw-r--r--lib/Driver/ToolChains/Clang.cpp5160
-rw-r--r--lib/Driver/ToolChains/Clang.h149
-rw-r--r--lib/Driver/ToolChains/CloudABI.cpp145
-rw-r--r--lib/Driver/ToolChains/CloudABI.h69
-rw-r--r--lib/Driver/ToolChains/CommonArgs.cpp973
-rw-r--r--lib/Driver/ToolChains/CommonArgs.h96
-rw-r--r--lib/Driver/ToolChains/Contiki.cpp28
-rw-r--r--lib/Driver/ToolChains/Contiki.h38
-rw-r--r--lib/Driver/ToolChains/CrossWindows.cpp309
-rw-r--r--lib/Driver/ToolChains/CrossWindows.h88
-rw-r--r--lib/Driver/ToolChains/Cuda.cpp488
-rw-r--r--lib/Driver/ToolChains/Cuda.h177
-rw-r--r--lib/Driver/ToolChains/Darwin.cpp1910
-rw-r--r--lib/Driver/ToolChains/Darwin.h488
-rw-r--r--lib/Driver/ToolChains/DragonFly.cpp197
-rw-r--r--lib/Driver/ToolChains/DragonFly.h68
-rw-r--r--lib/Driver/ToolChains/FreeBSD.cpp395
-rw-r--r--lib/Driver/ToolChains/FreeBSD.h86
-rw-r--r--lib/Driver/ToolChains/Fuchsia.cpp229
-rw-r--r--lib/Driver/ToolChains/Fuchsia.h79
-rw-r--r--lib/Driver/ToolChains/Gnu.cpp2426
-rw-r--r--lib/Driver/ToolChains/Gnu.h351
-rw-r--r--lib/Driver/ToolChains/Haiku.cpp33
-rw-r--r--lib/Driver/ToolChains/Haiku.h40
-rw-r--r--lib/Driver/ToolChains/Hexagon.cpp457
-rw-r--r--lib/Driver/ToolChains/Hexagon.h99
-rw-r--r--lib/Driver/ToolChains/Lanai.h39
-rw-r--r--lib/Driver/ToolChains/Linux.cpp901
-rw-r--r--lib/Driver/ToolChains/Linux.h57
-rw-r--r--lib/Driver/ToolChains/MSVC.cpp1421
-rw-r--r--lib/Driver/ToolChains/MSVC.h141
-rw-r--r--lib/Driver/ToolChains/MSVCSetupApi.h514
-rw-r--r--lib/Driver/ToolChains/MinGW.cpp471
-rw-r--r--lib/Driver/ToolChains/MinGW.h102
-rw-r--r--lib/Driver/ToolChains/Minix.cpp109
-rw-r--r--lib/Driver/ToolChains/Minix.h66
-rw-r--r--lib/Driver/ToolChains/MipsLinux.cpp128
-rw-r--r--lib/Driver/ToolChains/MipsLinux.h62
-rw-r--r--lib/Driver/ToolChains/Myriad.cpp286
-rw-r--r--lib/Driver/ToolChains/Myriad.h102
-rw-r--r--lib/Driver/ToolChains/NaCl.cpp363
-rw-r--r--lib/Driver/ToolChains/NaCl.h87
-rw-r--r--lib/Driver/ToolChains/NetBSD.cpp412
-rw-r--r--lib/Driver/ToolChains/NetBSD.h79
-rw-r--r--lib/Driver/ToolChains/OpenBSD.cpp234
-rw-r--r--lib/Driver/ToolChains/OpenBSD.h76
-rw-r--r--lib/Driver/ToolChains/PS4CPU.cpp419
-rw-r--r--lib/Driver/ToolChains/PS4CPU.h93
-rw-r--r--lib/Driver/ToolChains/Solaris.cpp193
-rw-r--r--lib/Driver/ToolChains/Solaris.h75
-rw-r--r--lib/Driver/ToolChains/TCE.cpp47
-rw-r--r--lib/Driver/ToolChains/TCE.h47
-rw-r--r--lib/Driver/ToolChains/WebAssembly.cpp163
-rw-r--r--lib/Driver/ToolChains/WebAssembly.h77
-rw-r--r--lib/Driver/ToolChains/XCore.cpp149
-rw-r--r--lib/Driver/ToolChains/XCore.h82
-rw-r--r--lib/Driver/Tools.cpp12226
-rw-r--r--lib/Driver/Tools.h1010
-rw-r--r--lib/Driver/XRayArgs.cpp114
-rw-r--r--lib/Format/BreakableToken.cpp736
-rw-r--r--lib/Format/BreakableToken.h362
-rw-r--r--lib/Format/CMakeLists.txt2
-rw-r--r--lib/Format/Comments.cpp36
-rw-r--r--lib/Format/Comments.h33
-rw-r--r--lib/Format/ContinuationIndenter.cpp262
-rw-r--r--lib/Format/ContinuationIndenter.h19
-rw-r--r--lib/Format/Format.cpp147
-rw-r--r--lib/Format/FormatToken.h22
-rw-r--r--lib/Format/FormatTokenLexer.cpp38
-rw-r--r--lib/Format/FormatTokenLexer.h1
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.cpp175
-rw-r--r--lib/Format/NamespaceEndCommentsFixer.h37
-rw-r--r--lib/Format/TokenAnnotator.cpp223
-rw-r--r--lib/Format/TokenAnnotator.h4
-rw-r--r--lib/Format/UnwrappedLineFormatter.cpp46
-rw-r--r--lib/Format/UnwrappedLineFormatter.h8
-rw-r--r--lib/Format/UnwrappedLineParser.cpp286
-rw-r--r--lib/Format/UnwrappedLineParser.h35
-rw-r--r--lib/Format/WhitespaceManager.cpp274
-rw-r--r--lib/Format/WhitespaceManager.h44
-rw-r--r--lib/Frontend/ASTConsumers.cpp39
-rw-r--r--lib/Frontend/ASTUnit.cpp32
-rw-r--r--lib/Frontend/CompilerInstance.cpp57
-rw-r--r--lib/Frontend/CompilerInvocation.cpp289
-rw-r--r--lib/Frontend/FrontendAction.cpp89
-rw-r--r--lib/Frontend/FrontendActions.cpp10
-rw-r--r--lib/Frontend/InitPreprocessor.cpp17
-rw-r--r--lib/Frontend/Rewrite/RewriteMacros.cpp2
-rw-r--r--lib/Frontend/Rewrite/RewriteModernObjC.cpp6
-rw-r--r--lib/Frontend/Rewrite/RewriteObjC.cpp6
-rw-r--r--lib/FrontendTool/ExecuteCompilerInvocation.cpp2
-rw-r--r--lib/Headers/CMakeLists.txt1
-rw-r--r--lib/Headers/altivec.h44
-rw-r--r--lib/Headers/avx2intrin.h2
-rw-r--r--lib/Headers/avx512bwintrin.h104
-rw-r--r--lib/Headers/avx512dqintrin.h95
-rw-r--r--lib/Headers/avx512fintrin.h189
-rw-r--r--lib/Headers/avx512vldqintrin.h42
-rw-r--r--lib/Headers/avx512vlintrin.h45
-rw-r--r--lib/Headers/avxintrin.h296
-rw-r--r--lib/Headers/clzerointrin.h50
-rw-r--r--lib/Headers/emmintrin.h38
-rw-r--r--lib/Headers/f16cintrin.h10
-rw-r--r--lib/Headers/htmxlintrin.h14
-rw-r--r--lib/Headers/intrin.h50
-rw-r--r--lib/Headers/mmintrin.h2
-rw-r--r--lib/Headers/module.modulemap1
-rw-r--r--lib/Headers/opencl-c.h884
-rw-r--r--lib/Headers/pmmintrin.h12
-rw-r--r--lib/Headers/prfchwintrin.h24
-rw-r--r--lib/Headers/smmintrin.h2013
-rw-r--r--lib/Headers/stdarg.h3
-rw-r--r--lib/Headers/tgmath.h16
-rw-r--r--lib/Headers/x86intrin.h4
-rw-r--r--lib/Headers/xmmintrin.h14
-rw-r--r--lib/Headers/xopintrin.h4
-rw-r--r--lib/Index/CMakeLists.txt1
-rw-r--r--lib/Index/CommentToXML.cpp6
-rw-r--r--lib/Index/IndexBody.cpp16
-rw-r--r--lib/Index/IndexDecl.cpp155
-rw-r--r--lib/Index/IndexSymbol.cpp70
-rw-r--r--lib/Index/IndexTypeSourceInfo.cpp32
-rw-r--r--lib/Index/IndexingAction.cpp17
-rw-r--r--lib/Index/IndexingContext.cpp93
-rw-r--r--lib/Index/IndexingContext.h3
-rw-r--r--lib/Index/USRGeneration.cpp20
-rw-r--r--lib/Lex/HeaderSearch.cpp6
-rw-r--r--lib/Lex/Lexer.cpp20
-rw-r--r--lib/Lex/ModuleMap.cpp31
-rw-r--r--lib/Lex/PPCaching.cpp30
-rw-r--r--lib/Lex/PPDirectives.cpp11
-rw-r--r--lib/Lex/PPMacroExpansion.cpp1
-rw-r--r--lib/Lex/Pragma.cpp11
-rw-r--r--lib/Lex/Preprocessor.cpp10
-rw-r--r--lib/Parse/ParseCXXInlineMethods.cpp8
-rw-r--r--lib/Parse/ParseDecl.cpp348
-rw-r--r--lib/Parse/ParseDeclCXX.cpp193
-rw-r--r--lib/Parse/ParseExpr.cpp83
-rw-r--r--lib/Parse/ParseExprCXX.cpp91
-rw-r--r--lib/Parse/ParseInit.cpp2
-rw-r--r--lib/Parse/ParseObjc.cpp101
-rw-r--r--lib/Parse/ParseOpenMP.cpp11
-rw-r--r--lib/Parse/ParsePragma.cpp153
-rw-r--r--lib/Parse/ParseStmt.cpp25
-rw-r--r--lib/Parse/ParseStmtAsm.cpp19
-rw-r--r--lib/Parse/ParseTemplate.cpp33
-rw-r--r--lib/Parse/Parser.cpp52
-rw-r--r--lib/Parse/RAIIObjectsForParser.h447
-rw-r--r--lib/Sema/AnalysisBasedWarnings.cpp17
-rw-r--r--lib/Sema/CoroutineStmtBuilder.h70
-rw-r--r--lib/Sema/MultiplexExternalSemaSource.cpp9
-rw-r--r--lib/Sema/ScopeInfo.cpp19
-rw-r--r--lib/Sema/Sema.cpp33
-rw-r--r--lib/Sema/SemaAttr.cpp17
-rw-r--r--lib/Sema/SemaCUDA.cpp14
-rw-r--r--lib/Sema/SemaCXXScopeSpec.cpp35
-rw-r--r--lib/Sema/SemaCast.cpp40
-rw-r--r--lib/Sema/SemaChecking.cpp210
-rw-r--r--lib/Sema/SemaCodeComplete.cpp163
-rw-r--r--lib/Sema/SemaCoroutine.cpp843
-rw-r--r--lib/Sema/SemaDecl.cpp680
-rw-r--r--lib/Sema/SemaDeclAttr.cpp308
-rw-r--r--lib/Sema/SemaDeclCXX.cpp1057
-rw-r--r--lib/Sema/SemaDeclObjC.cpp16
-rw-r--r--lib/Sema/SemaExceptionSpec.cpp1
-rw-r--r--lib/Sema/SemaExpr.cpp450
-rw-r--r--lib/Sema/SemaExprCXX.cpp345
-rw-r--r--lib/Sema/SemaExprMember.cpp47
-rw-r--r--lib/Sema/SemaExprObjC.cpp136
-rw-r--r--lib/Sema/SemaInit.cpp357
-rw-r--r--lib/Sema/SemaLambda.cpp73
-rw-r--r--lib/Sema/SemaLookup.cpp87
-rw-r--r--lib/Sema/SemaObjCProperty.cpp37
-rw-r--r--lib/Sema/SemaOpenMP.cpp562
-rw-r--r--lib/Sema/SemaOverload.cpp177
-rw-r--r--lib/Sema/SemaPseudoObject.cpp33
-rw-r--r--lib/Sema/SemaStmt.cpp48
-rw-r--r--lib/Sema/SemaStmtAsm.cpp5
-rw-r--r--lib/Sema/SemaStmtAttr.cpp27
-rw-r--r--lib/Sema/SemaTemplate.cpp807
-rw-r--r--lib/Sema/SemaTemplateDeduction.cpp384
-rw-r--r--lib/Sema/SemaTemplateInstantiate.cpp262
-rw-r--r--lib/Sema/SemaTemplateInstantiateDecl.cpp208
-rw-r--r--lib/Sema/SemaTemplateVariadic.cpp1
-rw-r--r--lib/Sema/SemaType.cpp248
-rw-r--r--lib/Sema/TreeTransform.h552
-rw-r--r--lib/Serialization/ASTCommon.cpp4
-rw-r--r--lib/Serialization/ASTReader.cpp1362
-rw-r--r--lib/Serialization/ASTReaderDecl.cpp197
-rw-r--r--lib/Serialization/ASTReaderStmt.cpp20
-rw-r--r--lib/Serialization/ASTWriter.cpp386
-rw-r--r--lib/Serialization/ASTWriterDecl.cpp20
-rw-r--r--lib/Serialization/ASTWriterStmt.cpp16
-rw-r--r--lib/Serialization/GeneratePCH.cpp7
-rw-r--r--lib/Serialization/GlobalModuleIndex.cpp76
-rw-r--r--lib/Serialization/Module.cpp22
-rw-r--r--lib/Serialization/ModuleManager.cpp255
-rw-r--r--lib/StaticAnalyzer/Checkers/AnalysisOrderChecker.cpp49
-rw-r--r--lib/StaticAnalyzer/Checkers/BlockInCriticalSectionChecker.cpp70
-rw-r--r--lib/StaticAnalyzer/Checkers/CMakeLists.txt1
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringChecker.cpp10
-rw-r--r--lib/StaticAnalyzer/Checkers/CStringSyntaxChecker.cpp32
-rw-r--r--lib/StaticAnalyzer/Checkers/CXXSelfAssignmentChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp76
-rw-r--r--lib/StaticAnalyzer/Checkers/CastToStructChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/CheckerDocumentation.cpp14
-rw-r--r--lib/StaticAnalyzer/Checkers/CloneChecker.cpp100
-rw-r--r--lib/StaticAnalyzer/Checkers/ConversionChecker.cpp42
-rw-r--r--lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp7
-rw-r--r--lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp85
-rw-r--r--lib/StaticAnalyzer/Checkers/IteratorPastEndChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/MacOSKeychainAPIChecker.cpp172
-rw-r--r--lib/StaticAnalyzer/Checkers/MallocChecker.cpp49
-rw-r--r--lib/StaticAnalyzer/Checkers/MisusedMovedObjectChecker.cpp481
-rw-r--r--lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp2
-rw-r--r--lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp4
-rw-r--r--lib/StaticAnalyzer/Checkers/ObjCPropertyChecker.cpp3
-rw-r--r--lib/StaticAnalyzer/Checkers/RetainCountChecker.cpp12
-rw-r--r--lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp26
-rw-r--r--lib/StaticAnalyzer/Checkers/ValistChecker.cpp85
-rw-r--r--lib/StaticAnalyzer/Core/AnalyzerOptions.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/BugReporterVisitors.cpp8
-rw-r--r--lib/StaticAnalyzer/Core/CMakeLists.txt17
-rw-r--r--lib/StaticAnalyzer/Core/CallEvent.cpp81
-rw-r--r--lib/StaticAnalyzer/Core/CheckerManager.cpp12
-rw-r--r--lib/StaticAnalyzer/Core/ConstraintManager.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/DynamicTypeMap.cpp2
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngine.cpp156
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineC.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineCXX.cpp9
-rw-r--r--lib/StaticAnalyzer/Core/ExprEngineObjC.cpp4
-rw-r--r--lib/StaticAnalyzer/Core/MemRegion.cpp62
-rw-r--r--lib/StaticAnalyzer/Core/ProgramState.cpp15
-rw-r--r--lib/StaticAnalyzer/Core/RangeConstraintManager.cpp102
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.cpp204
-rw-r--r--lib/StaticAnalyzer/Core/RangedConstraintManager.h (renamed from lib/StaticAnalyzer/Core/SimpleConstraintManager.h)69
-rw-r--r--lib/StaticAnalyzer/Core/RegionStore.cpp10
-rw-r--r--lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp230
-rw-r--r--lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp102
-rw-r--r--lib/StaticAnalyzer/Core/Store.cpp35
-rw-r--r--lib/StaticAnalyzer/Core/Z3ConstraintManager.cpp1618
-rw-r--r--lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp4
-rw-r--r--lib/Tooling/CMakeLists.txt1
-rw-r--r--lib/Tooling/Refactoring.cpp15
-rw-r--r--lib/Tooling/Refactoring/AtomicChange.cpp178
-rw-r--r--lib/Tooling/Refactoring/CMakeLists.txt12
-rw-r--r--lib/Tooling/Tooling.cpp2
392 files changed, 51438 insertions, 31344 deletions
diff --git a/lib/AST/ASTContext.cpp b/lib/AST/ASTContext.cpp
index b531a66dbab3..7b337b061a03 100644
--- a/lib/AST/ASTContext.cpp
+++ b/lib/AST/ASTContext.cpp
@@ -703,6 +703,7 @@ static const LangAS::Map *getAddressSpaceMap(const TargetInfo &T,
// The fake address space map must have a distinct entry for each
// language-specific address space.
static const unsigned FakeAddrSpaceMap[] = {
+ 0, // Default
1, // opencl_global
3, // opencl_local
2, // opencl_constant
@@ -748,6 +749,8 @@ ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
ExternCContext(nullptr), MakeIntegerSeqDecl(nullptr),
TypePackElementDecl(nullptr), SourceMgr(SM), LangOpts(LOpts),
SanitizerBL(new SanitizerBlacklist(LangOpts.SanitizerBlacklistFiles, SM)),
+ XRayFilter(new XRayFunctionFilter(LangOpts.XRayAlwaysInstrumentFiles,
+ LangOpts.XRayNeverInstrumentFiles, SM)),
AddrSpaceMap(nullptr), Target(nullptr), AuxTarget(nullptr),
PrintingPolicy(LOpts), Idents(idents), Selectors(sels),
BuiltinInfo(builtins), DeclarationNames(*this), ExternalSource(nullptr),
@@ -1167,7 +1170,6 @@ void ASTContext::InitBuiltinTypes(const TargetInfo &Target,
InitBuiltinType(OCLEventTy, BuiltinType::OCLEvent);
InitBuiltinType(OCLClkEventTy, BuiltinType::OCLClkEvent);
InitBuiltinType(OCLQueueTy, BuiltinType::OCLQueue);
- InitBuiltinType(OCLNDRangeTy, BuiltinType::OCLNDRange);
InitBuiltinType(OCLReserveIDTy, BuiltinType::OCLReserveID);
}
@@ -1474,6 +1476,8 @@ CharUnits ASTContext::getDeclAlign(const Decl *D, bool ForAlignof) const {
}
}
Align = std::max(Align, getPreferredTypeAlign(T.getTypePtr()));
+ if (BaseT.getQualifiers().hasUnaligned())
+ Align = Target->getCharWidth();
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
if (VD->hasGlobalStorage() && !ForAlignof)
Align = std::max(Align, getTargetInfo().getMinGlobalAlign());
@@ -1775,7 +1779,6 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
// Currently these types are pointers to opaque types.
Width = Target->getPointerWidth(0);
@@ -1877,8 +1880,9 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(cast<SubstTemplateTypeParmType>(T)->
getReplacementType().getTypePtr());
- case Type::Auto: {
- const AutoType *A = cast<AutoType>(T);
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization: {
+ const DeducedType *A = cast<DeducedType>(T);
assert(!A->getDeducedType().isNull() &&
"cannot request the size of an undeduced or dependent auto type");
return getTypeInfo(A->getDeducedType().getTypePtr());
@@ -2691,8 +2695,7 @@ QualType ASTContext::getConstantArrayType(QualType EltTy,
// Convert the array size into a canonical width matching the pointer size for
// the target.
llvm::APInt ArySize(ArySizeIn);
- ArySize =
- ArySize.zextOrTrunc(Target->getPointerWidth(getTargetAddressSpace(EltTy)));
+ ArySize = ArySize.zextOrTrunc(Target->getMaxPointerWidth());
llvm::FoldingSetNodeID ID;
ConstantArrayType::Profile(ID, EltTy, ArySize, ASM, IndexTypeQuals);
@@ -2765,6 +2768,7 @@ QualType ASTContext::getVariableArrayDecayedType(QualType type) const {
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
llvm_unreachable("type should never be variably-modified");
@@ -3785,12 +3789,8 @@ QualType ASTContext::getDependentNameType(ElaboratedTypeKeyword Keyword,
QualType Canon) const {
if (Canon.isNull()) {
NestedNameSpecifier *CanonNNS = getCanonicalNestedNameSpecifier(NNS);
- ElaboratedTypeKeyword CanonKeyword = Keyword;
- if (Keyword == ETK_None)
- CanonKeyword = ETK_Typename;
-
- if (CanonNNS != NNS || CanonKeyword != Keyword)
- Canon = getDependentNameType(CanonKeyword, CanonNNS, Name);
+ if (CanonNNS != NNS)
+ Canon = getDependentNameType(Keyword, CanonNNS, Name);
}
llvm::FoldingSetNodeID ID;
@@ -3874,42 +3874,45 @@ ASTContext::getDependentTemplateSpecializationType(
return QualType(T, 0);
}
+TemplateArgument ASTContext::getInjectedTemplateArg(NamedDecl *Param) {
+ TemplateArgument Arg;
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
+ QualType ArgType = getTypeDeclType(TTP);
+ if (TTP->isParameterPack())
+ ArgType = getPackExpansionType(ArgType, None);
+
+ Arg = TemplateArgument(ArgType);
+ } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
+ Expr *E = new (*this) DeclRefExpr(
+ NTTP, /*enclosing*/false,
+ NTTP->getType().getNonLValueExprType(*this),
+ Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
+
+ if (NTTP->isParameterPack())
+ E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
+ None);
+ Arg = TemplateArgument(E);
+ } else {
+ auto *TTP = cast<TemplateTemplateParmDecl>(Param);
+ if (TTP->isParameterPack())
+ Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
+ else
+ Arg = TemplateArgument(TemplateName(TTP));
+ }
+
+ if (Param->isTemplateParameterPack())
+ Arg = TemplateArgument::CreatePackCopy(*this, Arg);
+
+ return Arg;
+}
+
void
ASTContext::getInjectedTemplateArgs(const TemplateParameterList *Params,
SmallVectorImpl<TemplateArgument> &Args) {
Args.reserve(Args.size() + Params->size());
- for (NamedDecl *Param : *Params) {
- TemplateArgument Arg;
- if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(Param)) {
- QualType ArgType = getTypeDeclType(TTP);
- if (TTP->isParameterPack())
- ArgType = getPackExpansionType(ArgType, None);
-
- Arg = TemplateArgument(ArgType);
- } else if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- Expr *E = new (*this) DeclRefExpr(
- NTTP, /*enclosing*/false,
- NTTP->getType().getNonLValueExprType(*this),
- Expr::getValueKindForType(NTTP->getType()), NTTP->getLocation());
-
- if (NTTP->isParameterPack())
- E = new (*this) PackExpansionExpr(DependentTy, E, NTTP->getLocation(),
- None);
- Arg = TemplateArgument(E);
- } else {
- auto *TTP = cast<TemplateTemplateParmDecl>(Param);
- if (TTP->isParameterPack())
- Arg = TemplateArgument(TemplateName(TTP), Optional<unsigned>());
- else
- Arg = TemplateArgument(TemplateName(TTP));
- }
-
- if (Param->isTemplateParameterPack())
- Arg = TemplateArgument::CreatePackCopy(*this, Arg);
-
- Args.push_back(Arg);
- }
+ for (NamedDecl *Param : *Params)
+ Args.push_back(getInjectedTemplateArg(Param));
}
QualType ASTContext::getPackExpansionType(QualType Pattern,
@@ -4439,6 +4442,28 @@ QualType ASTContext::getAutoType(QualType DeducedType, AutoTypeKeyword Keyword,
return QualType(AT, 0);
}
+/// Return the uniqued reference to the deduced template specialization type
+/// which has been deduced to the given type, or to the canonical undeduced
+/// such type, or the canonical deduced-but-dependent such type.
+QualType ASTContext::getDeducedTemplateSpecializationType(
+ TemplateName Template, QualType DeducedType, bool IsDependent) const {
+ // Look in the folding set for an existing type.
+ void *InsertPos = nullptr;
+ llvm::FoldingSetNodeID ID;
+ DeducedTemplateSpecializationType::Profile(ID, Template, DeducedType,
+ IsDependent);
+ if (DeducedTemplateSpecializationType *DTST =
+ DeducedTemplateSpecializationTypes.FindNodeOrInsertPos(ID, InsertPos))
+ return QualType(DTST, 0);
+
+ DeducedTemplateSpecializationType *DTST = new (*this, TypeAlignment)
+ DeducedTemplateSpecializationType(Template, DeducedType, IsDependent);
+ Types.push_back(DTST);
+ if (InsertPos)
+ DeducedTemplateSpecializationTypes.InsertNode(DTST, InsertPos);
+ return QualType(DTST, 0);
+}
+
/// getAtomicType - Return the uniqued reference to the atomic type for
/// the given value type.
QualType ASTContext::getAtomicType(QualType T) const {
@@ -5922,7 +5947,6 @@ static char getObjCEncodingForPrimitiveKind(const ASTContext *C,
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
case BuiltinType::OCLSampler:
case BuiltinType::Dependent:
@@ -6337,6 +6361,7 @@ void ASTContext::getObjCEncodingForTypeImpl(QualType T, std::string& S,
// We could see an undeduced auto type here during error recovery.
// Just ignore it.
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
return;
case Type::Pipe:
@@ -8043,20 +8068,12 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
Qualifiers LQuals = LHSCan.getLocalQualifiers();
Qualifiers RQuals = RHSCan.getLocalQualifiers();
if (LQuals != RQuals) {
- if (getLangOpts().OpenCL) {
- if (LHSCan.getUnqualifiedType() != RHSCan.getUnqualifiedType() ||
- LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers())
- return QualType();
- if (LQuals.isAddressSpaceSupersetOf(RQuals))
- return LHS;
- if (RQuals.isAddressSpaceSupersetOf(LQuals))
- return RHS;
- }
// If any of these qualifiers are different, we have a type
// mismatch.
if (LQuals.getCVRQualifiers() != RQuals.getCVRQualifiers() ||
LQuals.getAddressSpace() != RQuals.getAddressSpace() ||
- LQuals.getObjCLifetime() != RQuals.getObjCLifetime())
+ LQuals.getObjCLifetime() != RQuals.getObjCLifetime() ||
+ LQuals.hasUnaligned() != RQuals.hasUnaligned())
return QualType();
// Exactly one GC qualifier difference is allowed: __strong is
@@ -8136,6 +8153,7 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
llvm_unreachable("Non-canonical and dependent types shouldn't get here");
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
case Type::LValueReference:
case Type::RValueReference:
case Type::MemberPointer:
@@ -8175,6 +8193,20 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
LHSPointee = LHSPointee.getUnqualifiedType();
RHSPointee = RHSPointee.getUnqualifiedType();
}
+ if (getLangOpts().OpenCL) {
+ Qualifiers LHSPteeQual = LHSPointee.getQualifiers();
+ Qualifiers RHSPteeQual = RHSPointee.getQualifiers();
+ // Blocks can't be an expression in a ternary operator (OpenCL v2.0
+ // 6.12.5) thus the following check is asymmetric.
+ if (!LHSPteeQual.isAddressSpaceSupersetOf(RHSPteeQual))
+ return QualType();
+ LHSPteeQual.removeAddressSpace();
+ RHSPteeQual.removeAddressSpace();
+ LHSPointee =
+ QualType(LHSPointee.getTypePtr(), LHSPteeQual.getAsOpaqueValue());
+ RHSPointee =
+ QualType(RHSPointee.getTypePtr(), RHSPteeQual.getAsOpaqueValue());
+ }
QualType ResultType = mergeTypes(LHSPointee, RHSPointee, OfBlockPointer,
Unqualified);
if (ResultType.isNull()) return QualType();
@@ -8696,7 +8728,8 @@ static QualType DecodeTypeFromStr(const char *&Str, const ASTContext &Context,
char *End;
unsigned AddrSpace = strtoul(Str, &End, 10);
if (End != Str && AddrSpace != 0) {
- Type = Context.getAddrSpaceQualType(Type, AddrSpace);
+ Type = Context.getAddrSpaceQualType(Type, AddrSpace +
+ LangAS::Count);
Str = End;
}
if (c == '*')
@@ -8788,7 +8821,7 @@ static GVALinkage basicGVALinkageForFunction(const ASTContext &Context,
if (!FD->isExternallyVisible())
return GVA_Internal;
- GVALinkage External = GVA_StrongExternal;
+ GVALinkage External;
switch (FD->getTemplateSpecializationKind()) {
case TSK_Undeclared:
case TSK_ExplicitSpecialization:
@@ -8860,8 +8893,22 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
}
GVALinkage ASTContext::GetGVALinkageForFunction(const FunctionDecl *FD) const {
- return adjustGVALinkageForAttributes(
+ auto L = adjustGVALinkageForAttributes(
*this, basicGVALinkageForFunction(*this, FD), FD);
+ auto EK = ExternalASTSource::EK_ReplyHazy;
+ if (auto *Ext = getExternalSource())
+ EK = Ext->hasExternalDefinitions(FD);
+ switch (EK) {
+ case ExternalASTSource::EK_Never:
+ if (L == GVA_DiscardableODR)
+ return GVA_StrongODR;
+ break;
+ case ExternalASTSource::EK_Always:
+ return GVA_AvailableExternally;
+ case ExternalASTSource::EK_ReplyHazy:
+ break;
+ }
+ return L;
}
static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
@@ -8870,22 +8917,30 @@ static GVALinkage basicGVALinkageForVariable(const ASTContext &Context,
return GVA_Internal;
if (VD->isStaticLocal()) {
- GVALinkage StaticLocalLinkage = GVA_DiscardableODR;
const DeclContext *LexicalContext = VD->getParentFunctionOrMethod();
while (LexicalContext && !isa<FunctionDecl>(LexicalContext))
LexicalContext = LexicalContext->getLexicalParent();
- // Let the static local variable inherit its linkage from the nearest
- // enclosing function.
- if (LexicalContext)
- StaticLocalLinkage =
- Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext));
+ // ObjC Blocks can create local variables that don't have a FunctionDecl
+ // LexicalContext.
+ if (!LexicalContext)
+ return GVA_DiscardableODR;
- // GVA_StrongODR function linkage is stronger than what we need,
- // downgrade to GVA_DiscardableODR.
- // This allows us to discard the variable if we never end up needing it.
- return StaticLocalLinkage == GVA_StrongODR ? GVA_DiscardableODR
- : StaticLocalLinkage;
+ // Otherwise, let the static local variable inherit its linkage from the
+ // nearest enclosing function.
+ auto StaticLocalLinkage =
+ Context.GetGVALinkageForFunction(cast<FunctionDecl>(LexicalContext));
+
+ // Itanium ABI 5.2.2: "Each COMDAT group [for a static local variable] must
+ // be emitted in any object with references to the symbol for the object it
+ // contains, whether inline or out-of-line."
+ // Similar behavior is observed with MSVC. An alternative ABI could use
+ // StrongODR/AvailableExternally to match the function, but none are
+ // known/supported currently.
+ if (StaticLocalLinkage == GVA_StrongODR ||
+ StaticLocalLinkage == GVA_AvailableExternally)
+ return GVA_DiscardableODR;
+ return StaticLocalLinkage;
}
// MSVC treats in-class initialized static data members as definitions.
@@ -9002,10 +9057,12 @@ bool ASTContext::DeclMustBeEmitted(const Decl *D) {
}
}
+ GVALinkage Linkage = GetGVALinkageForFunction(FD);
+
// static, static inline, always_inline, and extern inline functions can
// always be deferred. Normal inline functions can be deferred in C99/C++.
// Implicit template instantiations can also be deferred in C++.
- return !isDiscardableGVALinkage(GetGVALinkageForFunction(FD));
+ return !isDiscardableGVALinkage(Linkage);
}
const VarDecl *VD = cast<VarDecl>(D);
@@ -9488,6 +9545,18 @@ uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
return getTargetInfo().getNullPointerValue(AS);
}
+unsigned ASTContext::getTargetAddressSpace(unsigned AS) const {
+ // For OpenCL, only function local variables are not explicitly marked with
+ // an address space in the AST, and these need to be the address space of
+ // alloca.
+ if (!AS && LangOpts.OpenCL)
+ return getTargetInfo().getDataLayout().getAllocaAddrSpace();
+ if (AS >= LangAS::Count)
+ return AS - LangAS::Count;
+ else
+ return (*AddrSpaceMap)[AS];
+}
+
// Explicitly instantiate this in case a Redeclarable<T> is used from a TU that
// doesn't include ASTContext.h
template
diff --git a/lib/AST/ASTDumper.cpp b/lib/AST/ASTDumper.cpp
index 62261ccc905b..ef491ab06f8c 100644
--- a/lib/AST/ASTDumper.cpp
+++ b/lib/AST/ASTDumper.cpp
@@ -102,22 +102,26 @@ namespace {
/// Pending[i] is an action to dump an entity at level i.
llvm::SmallVector<std::function<void(bool isLastChild)>, 32> Pending;
+ /// Indicates whether we should trigger deserialization of nodes that had
+ /// not already been loaded.
+ bool Deserialize = false;
+
/// Indicates whether we're at the top level.
- bool TopLevel;
+ bool TopLevel = true;
/// Indicates if we're handling the first child after entering a new depth.
- bool FirstChild;
+ bool FirstChild = true;
/// Prefix for currently-being-dumped entity.
std::string Prefix;
/// Keep track of the last location we print out so that we can
/// print out deltas from then on out.
- const char *LastLocFilename;
- unsigned LastLocLine;
+ const char *LastLocFilename = "";
+ unsigned LastLocLine = ~0U;
/// The \c FullComment parent of the comment being dumped.
- const FullComment *FC;
+ const FullComment *FC = nullptr;
bool ShowColors;
@@ -203,15 +207,14 @@ namespace {
public:
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM)
- : OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
- LastLocFilename(""), LastLocLine(~0U), FC(nullptr),
+ : OS(OS), Traits(Traits), SM(SM),
ShowColors(SM && SM->getDiagnostics().getShowColors()) { }
ASTDumper(raw_ostream &OS, const CommandTraits *Traits,
const SourceManager *SM, bool ShowColors)
- : OS(OS), Traits(Traits), SM(SM), TopLevel(true), FirstChild(true),
- LastLocFilename(""), LastLocLine(~0U),
- ShowColors(ShowColors) { }
+ : OS(OS), Traits(Traits), SM(SM), ShowColors(ShowColors) {}
+
+ void setDeserialize(bool D) { Deserialize = D; }
void dumpDecl(const Decl *D);
void dumpStmt(const Stmt *S);
@@ -764,14 +767,15 @@ bool ASTDumper::hasNodes(const DeclContext *DC) {
return false;
return DC->hasExternalLexicalStorage() ||
- DC->noload_decls_begin() != DC->noload_decls_end();
+ (Deserialize ? DC->decls_begin() != DC->decls_end()
+ : DC->noload_decls_begin() != DC->noload_decls_end());
}
void ASTDumper::dumpDeclContext(const DeclContext *DC) {
if (!DC)
return;
- for (auto *D : DC->noload_decls())
+ for (auto *D : (Deserialize ? DC->decls() : DC->noload_decls()))
dumpDecl(D);
if (DC->hasExternalLexicalStorage()) {
@@ -795,11 +799,13 @@ void ASTDumper::dumpLookups(const DeclContext *DC, bool DumpDecls) {
bool HasUndeserializedLookups = Primary->hasExternalVisibleStorage();
- DeclContext::all_lookups_iterator I = Primary->noload_lookups_begin(),
- E = Primary->noload_lookups_end();
- while (I != E) {
+ for (auto I = Deserialize ? Primary->lookups_begin()
+ : Primary->noload_lookups_begin(),
+ E = Deserialize ? Primary->lookups_end()
+ : Primary->noload_lookups_end();
+ I != E; ++I) {
DeclarationName Name = I.getLookupName();
- DeclContextLookupResult R = *I++;
+ DeclContextLookupResult R = *I;
dumpChild([=] {
OS << "DeclarationName ";
@@ -1463,6 +1469,7 @@ void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
OS << " typename";
else
OS << " class";
+ OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
dumpName(D);
@@ -1472,6 +1479,7 @@ void ASTDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
dumpType(D->getType());
+ OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
dumpName(D);
@@ -1481,6 +1489,7 @@ void ASTDumper::VisitNonTypeTemplateParmDecl(const NonTypeTemplateParmDecl *D) {
void ASTDumper::VisitTemplateTemplateParmDecl(
const TemplateTemplateParmDecl *D) {
+ OS << " depth " << D->getDepth() << " index " << D->getIndex();
if (D->isParameterPack())
OS << " ...";
dumpName(D);
@@ -2504,9 +2513,10 @@ LLVM_DUMP_METHOD void Type::dump(llvm::raw_ostream &OS) const {
LLVM_DUMP_METHOD void Decl::dump() const { dump(llvm::errs()); }
-LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS) const {
+LLVM_DUMP_METHOD void Decl::dump(raw_ostream &OS, bool Deserialize) const {
ASTDumper P(OS, &getASTContext().getCommentCommandTraits(),
&getASTContext().getSourceManager());
+ P.setDeserialize(Deserialize);
P.dumpDecl(this);
}
@@ -2521,12 +2531,14 @@ LLVM_DUMP_METHOD void DeclContext::dumpLookups() const {
}
LLVM_DUMP_METHOD void DeclContext::dumpLookups(raw_ostream &OS,
- bool DumpDecls) const {
+ bool DumpDecls,
+ bool Deserialize) const {
const DeclContext *DC = this;
while (!DC->isTranslationUnit())
DC = DC->getParent();
ASTContext &Ctx = cast<TranslationUnitDecl>(DC)->getASTContext();
ASTDumper P(OS, &Ctx.getCommentCommandTraits(), &Ctx.getSourceManager());
+ P.setDeserialize(Deserialize);
P.dumpLookups(this, DumpDecls);
}
diff --git a/lib/AST/ASTImporter.cpp b/lib/AST/ASTImporter.cpp
index 1ccb746633a7..95492825eb9d 100644
--- a/lib/AST/ASTImporter.cpp
+++ b/lib/AST/ASTImporter.cpp
@@ -72,7 +72,7 @@ namespace clang {
QualType VisitEnumType(const EnumType *T);
QualType VisitAttributedType(const AttributedType *T);
QualType VisitTemplateTypeParmType(const TemplateTypeParmType *T);
- // FIXME: SubstTemplateTypeParmType
+ QualType VisitSubstTemplateTypeParmType(const SubstTemplateTypeParmType *T);
QualType VisitTemplateSpecializationType(const TemplateSpecializationType *T);
QualType VisitElaboratedType(const ElaboratedType *T);
// FIXME: DependentNameType
@@ -278,6 +278,8 @@ namespace clang {
Expr *VisitArrayInitIndexExpr(ArrayInitIndexExpr *E);
Expr *VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E);
Expr *VisitCXXNamedCastExpr(CXXNamedCastExpr *E);
+ Expr *VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E);
+
template<typename IIter, typename OIter>
void ImportArray(IIter Ibegin, IIter Iend, OIter Obegin) {
@@ -397,6 +399,9 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
QualType T1, QualType T2);
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2);
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateArgument &Arg1,
+ const TemplateArgument &Arg2);
/// \brief Determine structural equivalence of two expressions.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
@@ -421,8 +426,103 @@ static bool IsStructurallyEquivalent(const IdentifierInfo *Name1,
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
NestedNameSpecifier *NNS1,
NestedNameSpecifier *NNS2) {
- // FIXME: Implement!
- return true;
+ if (NNS1->getKind() != NNS2->getKind())
+ return false;
+
+ NestedNameSpecifier *Prefix1 = NNS1->getPrefix(),
+ *Prefix2 = NNS2->getPrefix();
+ if ((bool)Prefix1 != (bool)Prefix2)
+ return false;
+
+ if (Prefix1)
+ if (!IsStructurallyEquivalent(Context, Prefix1, Prefix2))
+ return false;
+
+ switch (NNS1->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ return IsStructurallyEquivalent(NNS1->getAsIdentifier(),
+ NNS2->getAsIdentifier());
+ case NestedNameSpecifier::Namespace:
+ return IsStructurallyEquivalent(Context, NNS1->getAsNamespace(),
+ NNS2->getAsNamespace());
+ case NestedNameSpecifier::NamespaceAlias:
+ return IsStructurallyEquivalent(Context, NNS1->getAsNamespaceAlias(),
+ NNS2->getAsNamespaceAlias());
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ return IsStructurallyEquivalent(Context, QualType(NNS1->getAsType(), 0),
+ QualType(NNS2->getAsType(), 0));
+ case NestedNameSpecifier::Global:
+ return true;
+ case NestedNameSpecifier::Super:
+ return IsStructurallyEquivalent(Context, NNS1->getAsRecordDecl(),
+ NNS2->getAsRecordDecl());
+ }
+ return false;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ const TemplateName &N1,
+ const TemplateName &N2) {
+ if (N1.getKind() != N2.getKind())
+ return false;
+ switch (N1.getKind()) {
+ case TemplateName::Template:
+ return IsStructurallyEquivalent(Context, N1.getAsTemplateDecl(),
+ N2.getAsTemplateDecl());
+
+ case TemplateName::OverloadedTemplate: {
+ OverloadedTemplateStorage *OS1 = N1.getAsOverloadedTemplate(),
+ *OS2 = N2.getAsOverloadedTemplate();
+ OverloadedTemplateStorage::iterator I1 = OS1->begin(), I2 = OS2->begin(),
+ E1 = OS1->end(), E2 = OS2->end();
+ for (; I1 != E1 && I2 != E2; ++I1, ++I2)
+ if (!IsStructurallyEquivalent(Context, *I1, *I2))
+ return false;
+ return I1 == E1 && I2 == E2;
+ }
+
+ case TemplateName::QualifiedTemplate: {
+ QualifiedTemplateName *QN1 = N1.getAsQualifiedTemplateName(),
+ *QN2 = N2.getAsQualifiedTemplateName();
+ return IsStructurallyEquivalent(Context, QN1->getDecl(), QN2->getDecl()) &&
+ IsStructurallyEquivalent(Context, QN1->getQualifier(),
+ QN2->getQualifier());
+ }
+
+ case TemplateName::DependentTemplate: {
+ DependentTemplateName *DN1 = N1.getAsDependentTemplateName(),
+ *DN2 = N2.getAsDependentTemplateName();
+ if (!IsStructurallyEquivalent(Context, DN1->getQualifier(),
+ DN2->getQualifier()))
+ return false;
+ if (DN1->isIdentifier() && DN2->isIdentifier())
+ return IsStructurallyEquivalent(DN1->getIdentifier(),
+ DN2->getIdentifier());
+ else if (DN1->isOverloadedOperator() && DN2->isOverloadedOperator())
+ return DN1->getOperator() == DN2->getOperator();
+ return false;
+ }
+
+ case TemplateName::SubstTemplateTemplateParm: {
+ SubstTemplateTemplateParmStorage *TS1 = N1.getAsSubstTemplateTemplateParm(),
+ *TS2 = N2.getAsSubstTemplateTemplateParm();
+ return IsStructurallyEquivalent(Context, TS1->getParameter(),
+ TS2->getParameter()) &&
+ IsStructurallyEquivalent(Context, TS1->getReplacement(),
+ TS2->getReplacement());
+ }
+ case TemplateName::SubstTemplateTemplateParmPack: {
+ SubstTemplateTemplateParmPackStorage
+ *P1 = N1.getAsSubstTemplateTemplateParmPack(),
+ *P2 = N2.getAsSubstTemplateTemplateParmPack();
+ return IsStructurallyEquivalent(Context, P1->getArgumentPack(),
+ P2->getArgumentPack()) &&
+ IsStructurallyEquivalent(Context, P1->getParameterPack(),
+ P2->getParameterPack());
+ }
+ }
+ return false;
}
/// \brief Determine whether two template arguments are equivalent.
@@ -783,6 +883,20 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::DeducedTemplateSpecialization: {
+ auto *DT1 = cast<DeducedTemplateSpecializationType>(T1);
+ auto *DT2 = cast<DeducedTemplateSpecializationType>(T2);
+ if (!IsStructurallyEquivalent(Context,
+ DT1->getTemplateName(),
+ DT2->getTemplateName()))
+ return false;
+ if (!IsStructurallyEquivalent(Context,
+ DT1->getDeducedType(),
+ DT2->getDeducedType()))
+ return false;
+ break;
+ }
+
case Type::Record:
case Type::Enum:
if (!IsStructurallyEquivalent(Context,
@@ -1976,6 +2090,23 @@ QualType ASTNodeImporter::VisitTemplateTypeParmType(
T->getDepth(), T->getIndex(), T->isParameterPack(), ParmDecl);
}
+QualType ASTNodeImporter::VisitSubstTemplateTypeParmType(
+ const SubstTemplateTypeParmType *T) {
+ const TemplateTypeParmType *Replaced =
+ cast_or_null<TemplateTypeParmType>(Importer.Import(
+ QualType(T->getReplacedParameter(), 0)).getTypePtr());
+ if (!Replaced)
+ return QualType();
+
+ QualType Replacement = Importer.Import(T->getReplacementType());
+ if (Replacement.isNull())
+ return QualType();
+ Replacement = Replacement.getCanonicalType();
+
+ return Importer.getToContext().getSubstTemplateTypeParmType(
+ Replaced, Replacement);
+}
+
QualType ASTNodeImporter::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
TemplateName ToTemplate = Importer.Import(T->getTemplateName());
@@ -2133,6 +2264,7 @@ ASTNodeImporter::ImportDeclarationNameLoc(const DeclarationNameInfo &From,
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::CXXUsingDirective:
+ case DeclarationName::CXXDeductionGuideName:
return;
case DeclarationName::CXXOperatorName: {
@@ -2231,8 +2363,10 @@ bool ASTNodeImporter::ImportDefinition(RecordDecl *From, RecordDecl *To,
ToData.UserProvidedDefaultConstructor
= FromData.UserProvidedDefaultConstructor;
ToData.DeclaredSpecialMembers = FromData.DeclaredSpecialMembers;
- ToData.ImplicitCopyConstructorHasConstParam
- = FromData.ImplicitCopyConstructorHasConstParam;
+ ToData.ImplicitCopyConstructorCanHaveConstParamForVBase
+ = FromData.ImplicitCopyConstructorCanHaveConstParamForVBase;
+ ToData.ImplicitCopyConstructorCanHaveConstParamForNonVBase
+ = FromData.ImplicitCopyConstructorCanHaveConstParamForNonVBase;
ToData.ImplicitCopyAssignmentHasConstParam
= FromData.ImplicitCopyAssignmentHasConstParam;
ToData.HasDeclaredCopyConstructorWithConstParam
@@ -2785,7 +2919,7 @@ Decl *ASTNodeImporter::VisitEnumDecl(EnumDecl *D) {
if (!DC->isFunctionOrMethod() && SearchName) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(SearchName, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -2874,7 +3008,7 @@ Decl *ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
if (!DC->isFunctionOrMethod()) {
SmallVector<NamedDecl *, 4> ConflictingDecls;
SmallVector<NamedDecl *, 2> FoundDecls;
- DC->getRedeclContext()->localUncachedLookup(Name, FoundDecls);
+ DC->getRedeclContext()->localUncachedLookup(SearchName, FoundDecls);
for (unsigned I = 0, N = FoundDecls.size(); I != N; ++I) {
if (!FoundDecls[I]->isInIdentifierNamespace(IDNS))
continue;
@@ -3671,6 +3805,9 @@ Decl *ASTNodeImporter::VisitVarDecl(VarDecl *D) {
if (ImportDefinition(D, ToVar))
return nullptr;
+ if (D->isConstexpr())
+ ToVar->setConstexpr(true);
+
return ToVar;
}
@@ -3724,8 +3861,27 @@ Decl *ASTNodeImporter::VisitParmVarDecl(ParmVarDecl *D) {
Importer.Import(D->getInnerLocStart()),
Loc, Name.getAsIdentifierInfo(),
T, TInfo, D->getStorageClass(),
- /*FIXME: Default argument*/nullptr);
+ /*DefaultArg*/ nullptr);
+
+ // Set the default argument.
ToParm->setHasInheritedDefaultArg(D->hasInheritedDefaultArg());
+ ToParm->setKNRPromoted(D->isKNRPromoted());
+
+ Expr *ToDefArg = nullptr;
+ Expr *FromDefArg = nullptr;
+ if (D->hasUninstantiatedDefaultArg()) {
+ FromDefArg = D->getUninstantiatedDefaultArg();
+ ToDefArg = Importer.Import(FromDefArg);
+ ToParm->setUninstantiatedDefaultArg(ToDefArg);
+ } else if (D->hasUnparsedDefaultArg()) {
+ ToParm->setUnparsedDefaultArg();
+ } else if (D->hasDefaultArg()) {
+ FromDefArg = D->getDefaultArg();
+ ToDefArg = Importer.Import(FromDefArg);
+ ToParm->setDefaultArg(ToDefArg);
+ }
+ if (FromDefArg && !ToDefArg)
+ return nullptr;
if (D->isUsed())
ToParm->setIsUsed();
@@ -4424,8 +4580,10 @@ Decl *ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
ToProperty->setPropertyAttributes(D->getPropertyAttributes());
ToProperty->setPropertyAttributesAsWritten(
D->getPropertyAttributesAsWritten());
- ToProperty->setGetterName(Importer.Import(D->getGetterName()));
- ToProperty->setSetterName(Importer.Import(D->getSetterName()));
+ ToProperty->setGetterName(Importer.Import(D->getGetterName()),
+ Importer.Import(D->getGetterNameLoc()));
+ ToProperty->setSetterName(Importer.Import(D->getSetterName()),
+ Importer.Import(D->getSetterNameLoc()));
ToProperty->setGetterMethodDecl(
cast_or_null<ObjCMethodDecl>(Importer.Import(D->getGetterMethodDecl())));
ToProperty->setSetterMethodDecl(
@@ -4753,12 +4911,46 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
}
} else {
// Create a new specialization.
- D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
- D->getTagKind(), DC,
- StartLoc, IdLoc,
- ClassTemplate,
- TemplateArgs,
- /*PrevDecl=*/nullptr);
+ if (ClassTemplatePartialSpecializationDecl *PartialSpec =
+ dyn_cast<ClassTemplatePartialSpecializationDecl>(D)) {
+
+ // Import TemplateArgumentListInfo
+ TemplateArgumentListInfo ToTAInfo;
+ auto &ASTTemplateArgs = *PartialSpec->getTemplateArgsAsWritten();
+ for (unsigned I = 0, E = ASTTemplateArgs.NumTemplateArgs; I < E; ++I) {
+ bool Error = false;
+ auto ToLoc = ImportTemplateArgumentLoc(ASTTemplateArgs[I], Error);
+ if (Error)
+ return nullptr;
+ ToTAInfo.addArgument(ToLoc);
+ }
+
+ QualType CanonInjType = Importer.Import(
+ PartialSpec->getInjectedSpecializationType());
+ if (CanonInjType.isNull())
+ return nullptr;
+ CanonInjType = CanonInjType.getCanonicalType();
+
+ TemplateParameterList *ToTPList = ImportTemplateParameterList(
+ PartialSpec->getTemplateParameters());
+ if (!ToTPList && PartialSpec->getTemplateParameters())
+ return nullptr;
+
+ D2 = ClassTemplatePartialSpecializationDecl::Create(
+ Importer.getToContext(), D->getTagKind(), DC, StartLoc, IdLoc,
+ ToTPList, ClassTemplate,
+ llvm::makeArrayRef(TemplateArgs.data(), TemplateArgs.size()),
+ ToTAInfo, CanonInjType, nullptr);
+
+ } else {
+ D2 = ClassTemplateSpecializationDecl::Create(Importer.getToContext(),
+ D->getTagKind(), DC,
+ StartLoc, IdLoc,
+ ClassTemplate,
+ TemplateArgs,
+ /*PrevDecl=*/nullptr);
+ }
+
D2->setSpecializationKind(D->getSpecializationKind());
// Add this specialization to the class template.
@@ -4766,13 +4958,31 @@ Decl *ASTNodeImporter::VisitClassTemplateSpecializationDecl(
// Import the qualifier, if any.
D2->setQualifierInfo(Importer.Import(D->getQualifierLoc()));
-
+
+ Importer.Imported(D, D2);
+
+ if (auto *TSI = D->getTypeAsWritten()) {
+ TypeSourceInfo *TInfo = Importer.Import(TSI);
+ if (!TInfo)
+ return nullptr;
+ D2->setTypeAsWritten(TInfo);
+ D2->setTemplateKeywordLoc(Importer.Import(D->getTemplateKeywordLoc()));
+ D2->setExternLoc(Importer.Import(D->getExternLoc()));
+ }
+
+ SourceLocation POI = Importer.Import(D->getPointOfInstantiation());
+ if (POI.isValid())
+ D2->setPointOfInstantiation(POI);
+ else if (D->getPointOfInstantiation().isValid())
+ return nullptr;
+
+ D2->setTemplateSpecializationKind(D->getTemplateSpecializationKind());
+
// Add the specialization to this context.
D2->setLexicalDeclContext(LexicalDC);
LexicalDC->addDeclInternal(D2);
}
Importer.Imported(D, D2);
-
if (D->isCompleteDefinition() && ImportDefinition(D, D2))
return nullptr;
@@ -5010,13 +5220,17 @@ Stmt *ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
SmallVector<IdentifierInfo *, 4> Names;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I));
- if (!ToII)
+ // ToII is nullptr when no symbolic name is given for output operand
+ // see ParseStmtAsm::ParseAsmOperandsOpt
+ if (!ToII && S->getOutputIdentifier(I))
return nullptr;
Names.push_back(ToII);
}
for (unsigned I = 0, E = S->getNumInputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getInputIdentifier(I));
- if (!ToII)
+ // ToII is nullptr when no symbolic name is given for input operand
+ // see ParseStmtAsm::ParseAsmOperandsOpt
+ if (!ToII && S->getInputIdentifier(I))
return nullptr;
Names.push_back(ToII);
}
@@ -5860,7 +6074,7 @@ Expr *ASTNodeImporter::VisitBinaryOperator(BinaryOperator *E) {
T, E->getValueKind(),
E->getObjectKind(),
Importer.Import(E->getOperatorLoc()),
- E->isFPContractable());
+ E->getFPFeatures());
}
Expr *ASTNodeImporter::VisitConditionalOperator(ConditionalOperator *E) {
@@ -6010,7 +6224,7 @@ Expr *ASTNodeImporter::VisitCompoundAssignOperator(CompoundAssignOperator *E) {
E->getObjectKind(),
CompLHSType, CompResultType,
Importer.Import(E->getOperatorLoc()),
- E->isFPContractable());
+ E->getFPFeatures());
}
bool ASTNodeImporter::ImportCastPath(CastExpr *CE, CXXCastPath &Path) {
@@ -6633,6 +6847,27 @@ Expr *ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
}
}
+
+Expr *ASTNodeImporter::VisitSubstNonTypeTemplateParmExpr(
+ SubstNonTypeTemplateParmExpr *E) {
+ QualType T = Importer.Import(E->getType());
+ if (T.isNull())
+ return nullptr;
+
+ NonTypeTemplateParmDecl *Param = cast_or_null<NonTypeTemplateParmDecl>(
+ Importer.Import(E->getParameter()));
+ if (!Param)
+ return nullptr;
+
+ Expr *Replacement = Importer.Import(E->getReplacement());
+ if (!Replacement)
+ return nullptr;
+
+ return new (Importer.getToContext()) SubstNonTypeTemplateParmExpr(
+ T, E->getValueKind(), Importer.Import(E->getExprLoc()), Param,
+ Replacement);
+}
+
ASTImporter::ASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
ASTContext &FromContext, FileManager &FromFileManager,
bool MinimalImport)
@@ -6839,14 +7074,14 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
case NestedNameSpecifier::Namespace:
if (NamespaceDecl *NS =
- cast<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
+ cast_or_null<NamespaceDecl>(Import(FromNNS->getAsNamespace()))) {
return NestedNameSpecifier::Create(ToContext, prefix, NS);
}
return nullptr;
case NestedNameSpecifier::NamespaceAlias:
if (NamespaceAliasDecl *NSAD =
- cast<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
+ cast_or_null<NamespaceAliasDecl>(Import(FromNNS->getAsNamespaceAlias()))) {
return NestedNameSpecifier::Create(ToContext, prefix, NSAD);
}
return nullptr;
@@ -6856,7 +7091,7 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
case NestedNameSpecifier::Super:
if (CXXRecordDecl *RD =
- cast<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
+ cast_or_null<CXXRecordDecl>(Import(FromNNS->getAsRecordDecl()))) {
return NestedNameSpecifier::SuperSpecifier(ToContext, RD);
}
return nullptr;
@@ -6878,8 +7113,74 @@ NestedNameSpecifier *ASTImporter::Import(NestedNameSpecifier *FromNNS) {
}
NestedNameSpecifierLoc ASTImporter::Import(NestedNameSpecifierLoc FromNNS) {
- // FIXME: Implement!
- return NestedNameSpecifierLoc();
+ // Copied from NestedNameSpecifier mostly.
+ SmallVector<NestedNameSpecifierLoc , 8> NestedNames;
+ NestedNameSpecifierLoc NNS = FromNNS;
+
+ // Push each of the nested-name-specifiers's onto a stack for
+ // serialization in reverse order.
+ while (NNS) {
+ NestedNames.push_back(NNS);
+ NNS = NNS.getPrefix();
+ }
+
+ NestedNameSpecifierLocBuilder Builder;
+
+ while (!NestedNames.empty()) {
+ NNS = NestedNames.pop_back_val();
+ NestedNameSpecifier *Spec = Import(NNS.getNestedNameSpecifier());
+ if (!Spec)
+ return NestedNameSpecifierLoc();
+
+ NestedNameSpecifier::SpecifierKind Kind = Spec->getKind();
+ switch (Kind) {
+ case NestedNameSpecifier::Identifier:
+ Builder.Extend(getToContext(),
+ Spec->getAsIdentifier(),
+ Import(NNS.getLocalBeginLoc()),
+ Import(NNS.getLocalEndLoc()));
+ break;
+
+ case NestedNameSpecifier::Namespace:
+ Builder.Extend(getToContext(),
+ Spec->getAsNamespace(),
+ Import(NNS.getLocalBeginLoc()),
+ Import(NNS.getLocalEndLoc()));
+ break;
+
+ case NestedNameSpecifier::NamespaceAlias:
+ Builder.Extend(getToContext(),
+ Spec->getAsNamespaceAlias(),
+ Import(NNS.getLocalBeginLoc()),
+ Import(NNS.getLocalEndLoc()));
+ break;
+
+ case NestedNameSpecifier::TypeSpec:
+ case NestedNameSpecifier::TypeSpecWithTemplate: {
+ TypeSourceInfo *TSI = getToContext().getTrivialTypeSourceInfo(
+ QualType(Spec->getAsType(), 0));
+ Builder.Extend(getToContext(),
+ Import(NNS.getLocalBeginLoc()),
+ TSI->getTypeLoc(),
+ Import(NNS.getLocalEndLoc()));
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ Builder.MakeGlobal(getToContext(), Import(NNS.getLocalBeginLoc()));
+ break;
+
+ case NestedNameSpecifier::Super: {
+ SourceRange ToRange = Import(NNS.getSourceRange());
+ Builder.MakeSuper(getToContext(),
+ Spec->getAsRecordDecl(),
+ ToRange.getBegin(),
+ ToRange.getEnd());
+ }
+ }
+ }
+
+ return Builder.getWithLocInContext(getToContext());
}
TemplateName ASTImporter::Import(TemplateName From) {
@@ -7175,6 +7476,14 @@ DeclarationName ASTImporter::Import(DeclarationName FromName) {
ToContext.getCanonicalType(T));
}
+ case DeclarationName::CXXDeductionGuideName: {
+ TemplateDecl *Template = cast_or_null<TemplateDecl>(
+ Import(FromName.getCXXDeductionGuideTemplate()));
+ if (!Template)
+ return DeclarationName();
+ return ToContext.DeclarationNames.getCXXDeductionGuideName(Template);
+ }
+
case DeclarationName::CXXConversionFunctionName: {
QualType T = Import(FromName.getCXXNameType());
if (T.isNull())
diff --git a/lib/AST/CMakeLists.txt b/lib/AST/CMakeLists.txt
index e28bd2e16d17..13bf352c2f21 100644
--- a/lib/AST/CMakeLists.txt
+++ b/lib/AST/CMakeLists.txt
@@ -31,6 +31,7 @@ add_clang_library(clangAST
ExprConstant.cpp
ExprCXX.cpp
ExprObjC.cpp
+ ExternalASTMerger.cpp
ExternalASTSource.cpp
InheritViz.cpp
ItaniumCXXABI.cpp
@@ -40,6 +41,7 @@ add_clang_library(clangAST
MicrosoftMangle.cpp
NestedNameSpecifier.cpp
NSAPI.cpp
+ ODRHash.cpp
OpenMPClause.cpp
ParentMap.cpp
RawCommentList.cpp
diff --git a/lib/AST/CXXInheritance.cpp b/lib/AST/CXXInheritance.cpp
index a97d6a22e7b3..56fb0464078f 100644
--- a/lib/AST/CXXInheritance.cpp
+++ b/lib/AST/CXXInheritance.cpp
@@ -88,7 +88,7 @@ bool CXXRecordDecl::isDerivedFrom(const CXXRecordDecl *Base,
const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
// FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
return lookupInBases(
- [this, BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ [BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
return FindBaseClass(Specifier, Path, BaseDecl);
},
Paths);
@@ -109,7 +109,7 @@ bool CXXRecordDecl::isVirtuallyDerivedFrom(const CXXRecordDecl *Base) const {
const CXXRecordDecl *BaseDecl = Base->getCanonicalDecl();
// FIXME: Capturing 'this' is a workaround for name lookup bugs in GCC 4.7.
return lookupInBases(
- [this, BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
+ [BaseDecl](const CXXBaseSpecifier *Specifier, CXXBasePath &Path) {
return FindVirtualBaseClass(Specifier, Path, BaseDecl);
},
Paths);
diff --git a/lib/AST/Decl.cpp b/lib/AST/Decl.cpp
index 81f08787d515..2b22e5bb50a5 100644
--- a/lib/AST/Decl.cpp
+++ b/lib/AST/Decl.cpp
@@ -1414,6 +1414,11 @@ void NamedDecl::printQualifiedName(raw_ostream &OS,
const PrintingPolicy &P) const {
const DeclContext *Ctx = getDeclContext();
+ // For ObjC methods, look through categories and use the interface as context.
+ if (auto *MD = dyn_cast<ObjCMethodDecl>(this))
+ if (auto *ID = MD->getClassInterface())
+ Ctx = ID;
+
if (Ctx->isFunctionOrMethod()) {
printName(OS);
return;
@@ -2143,13 +2148,6 @@ APValue *VarDecl::evaluateValue() const {
return evaluateValue(Notes);
}
-namespace {
-// Destroy an APValue that was allocated in an ASTContext.
-void DestroyAPValue(void* UntypedValue) {
- static_cast<APValue*>(UntypedValue)->~APValue();
-}
-} // namespace
-
APValue *VarDecl::evaluateValue(
SmallVectorImpl<PartialDiagnosticAt> &Notes) const {
EvaluatedStmt *Eval = ensureEvaluatedStmt();
@@ -2181,7 +2179,7 @@ APValue *VarDecl::evaluateValue(
if (!Result)
Eval->Evaluated = APValue();
else if (Eval->Evaluated.needsCleanup())
- getASTContext().AddDeallocation(DestroyAPValue, &Eval->Evaluated);
+ getASTContext().addDestruction(&Eval->Evaluated);
Eval->IsEvaluating = false;
Eval->WasEvaluated = true;
@@ -2510,7 +2508,7 @@ bool FunctionDecl::isVariadic() const {
bool FunctionDecl::hasBody(const FunctionDecl *&Definition) const {
for (auto I : redecls()) {
- if (I->Body || I->IsLateTemplateParsed) {
+ if (I->doesThisDeclarationHaveABody()) {
Definition = I;
return true;
}
@@ -3744,6 +3742,20 @@ void EnumDecl::completeDefinition(QualType NewType,
TagDecl::completeDefinition();
}
+bool EnumDecl::isClosed() const {
+ if (const auto *A = getAttr<EnumExtensibilityAttr>())
+ return A->getExtensibility() == EnumExtensibilityAttr::Closed;
+ return true;
+}
+
+bool EnumDecl::isClosedFlag() const {
+ return isClosed() && hasAttr<FlagEnumAttr>();
+}
+
+bool EnumDecl::isClosedNonFlag() const {
+ return isClosed() && !hasAttr<FlagEnumAttr>();
+}
+
TemplateSpecializationKind EnumDecl::getTemplateSpecializationKind() const {
if (MemberSpecializationInfo *MSI = getMemberSpecializationInfo())
return MSI->getTemplateSpecializationKind();
@@ -4230,6 +4242,30 @@ TagDecl *TypedefNameDecl::getAnonDeclWithTypedefName(bool AnyRedecl) const {
return nullptr;
}
+bool TypedefNameDecl::isTransparentTagSlow() const {
+ auto determineIsTransparent = [&]() {
+ if (auto *TT = getUnderlyingType()->getAs<TagType>()) {
+ if (auto *TD = TT->getDecl()) {
+ if (TD->getName() != getName())
+ return false;
+ SourceLocation TTLoc = getLocation();
+ SourceLocation TDLoc = TD->getLocation();
+ if (!TTLoc.isMacroID() || !TDLoc.isMacroID())
+ return false;
+ SourceManager &SM = getASTContext().getSourceManager();
+ return SM.getSpellingLoc(TTLoc) == SM.getSpellingLoc(TDLoc);
+ }
+ }
+ return false;
+ };
+
+ bool isTransparent = determineIsTransparent();
+ CacheIsTransparentTag = 1;
+ if (isTransparent)
+ CacheIsTransparentTag |= 0x2;
+ return isTransparent;
+}
+
TypedefDecl *TypedefDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
return new (C, ID) TypedefDecl(C, nullptr, SourceLocation(), SourceLocation(),
nullptr, nullptr);
diff --git a/lib/AST/DeclBase.cpp b/lib/AST/DeclBase.cpp
index 6111abab646e..cda70c5edcd4 100644
--- a/lib/AST/DeclBase.cpp
+++ b/lib/AST/DeclBase.cpp
@@ -619,6 +619,7 @@ bool Decl::isWeakImported() const {
unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
switch (DeclKind) {
case Function:
+ case CXXDeductionGuide:
case CXXMethod:
case CXXConstructor:
case ConstructorUsingShadow:
diff --git a/lib/AST/DeclCXX.cpp b/lib/AST/DeclCXX.cpp
index a9db65a51518..2e5cec9c108f 100644
--- a/lib/AST/DeclCXX.cpp
+++ b/lib/AST/DeclCXX.cpp
@@ -18,6 +18,7 @@
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
+#include "clang/AST/ODRHash.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/IdentifierTable.h"
#include "llvm/ADT/STLExtras.h"
@@ -67,12 +68,14 @@ CXXRecordDecl::DefinitionData::DefinitionData(CXXRecordDecl *D)
HasConstexprDefaultConstructor(false),
HasNonLiteralTypeFieldsOrBases(false), ComputedVisibleConversions(false),
UserProvidedDefaultConstructor(false), DeclaredSpecialMembers(0),
- ImplicitCopyConstructorHasConstParam(true),
+ ImplicitCopyConstructorCanHaveConstParamForVBase(true),
+ ImplicitCopyConstructorCanHaveConstParamForNonVBase(true),
ImplicitCopyAssignmentHasConstParam(true),
HasDeclaredCopyConstructorWithConstParam(false),
HasDeclaredCopyAssignmentWithConstParam(false), IsLambda(false),
- IsParsingBaseSpecifiers(false), NumBases(0), NumVBases(0), Bases(),
- VBases(), Definition(D), FirstFriend() {}
+ IsParsingBaseSpecifiers(false), HasODRHash(false), ODRHash(0),
+ NumBases(0), NumVBases(0), Bases(), VBases(), Definition(D),
+ FirstFriend() {}
CXXBaseSpecifier *CXXRecordDecl::DefinitionData::getBasesSlowCase() const {
return Bases.get(Definition->getASTContext().getExternalSource());
@@ -226,7 +229,7 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// 'const B&' or 'const volatile B&' [...]
if (CXXRecordDecl *VBaseDecl = VBase.getType()->getAsCXXRecordDecl())
if (!VBaseDecl->hasCopyConstructorWithConstParam())
- data().ImplicitCopyConstructorHasConstParam = false;
+ data().ImplicitCopyConstructorCanHaveConstParamForVBase = false;
// C++1z [dcl.init.agg]p1:
// An aggregate is a class with [...] no virtual base classes
@@ -263,6 +266,14 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// In the definition of a constexpr constructor [...]
// -- the class shall not have any virtual base classes
data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++1z [class.copy]p8:
+ // The implicitly-declared copy constructor for a class X will have
+ // the form 'X::X(const X&)' if each potentially constructed subobject
+ // has a copy constructor whose first parameter is of type
+ // 'const B&' or 'const volatile B&' [...]
+ if (!BaseClassDecl->hasCopyConstructorWithConstParam())
+ data().ImplicitCopyConstructorCanHaveConstParamForVBase = false;
} else {
// C++ [class.ctor]p5:
// A default constructor is trivial [...] if:
@@ -305,6 +316,14 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
// default constructor is constexpr.
if (!BaseClassDecl->hasConstexprDefaultConstructor())
data().DefaultedDefaultConstructorIsConstexpr = false;
+
+ // C++1z [class.copy]p8:
+ // The implicitly-declared copy constructor for a class X will have
+ // the form 'X::X(const X&)' if each potentially constructed subobject
+ // has a copy constructor whose first parameter is of type
+ // 'const B&' or 'const volatile B&' [...]
+ if (!BaseClassDecl->hasCopyConstructorWithConstParam())
+ data().ImplicitCopyConstructorCanHaveConstParamForNonVBase = false;
}
// C++ [class.ctor]p3:
@@ -324,14 +343,6 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
if (!BaseClassDecl->hasCopyAssignmentWithConstParam())
data().ImplicitCopyAssignmentHasConstParam = false;
- // C++11 [class.copy]p8:
- // The implicitly-declared copy constructor for a class X will have
- // the form 'X::X(const X&)' if each direct [...] base class B of X
- // has a copy constructor whose first parameter is of type
- // 'const B&' or 'const volatile B&' [...]
- if (!BaseClassDecl->hasCopyConstructorWithConstParam())
- data().ImplicitCopyConstructorHasConstParam = false;
-
// A class has an Objective-C object member if... or any of its bases
// has an Objective-C object member.
if (BaseClassDecl->hasObjectMember())
@@ -371,6 +382,23 @@ CXXRecordDecl::setBases(CXXBaseSpecifier const * const *Bases,
data().IsParsingBaseSpecifiers = false;
}
+unsigned CXXRecordDecl::getODRHash() const {
+ assert(hasDefinition() && "ODRHash only for records with definitions");
+
+ // Previously calculated hash is stored in DefinitionData.
+ if (DefinitionData->HasODRHash)
+ return DefinitionData->ODRHash;
+
+ // Only calculate hash on first call of getODRHash per record.
+ ODRHash Hash;
+ Hash.AddCXXRecordDecl(getDefinition());
+ DefinitionData->HasODRHash = true;
+ DefinitionData->ODRHash = Hash.CalculateHash();
+
+ return DefinitionData->ODRHash;
+}
+
+
void CXXRecordDecl::addedClassSubobject(CXXRecordDecl *Subobj) {
// C++11 [class.copy]p11:
// A defaulted copy/move constructor for a class X is defined as
@@ -702,9 +730,7 @@ void CXXRecordDecl::addedMember(Decl *D) {
ASTContext &Context = getASTContext();
QualType T = Context.getBaseElementType(Field->getType());
if (T->isObjCRetainableType() || T.isObjCGCStrong()) {
- if (!Context.getLangOpts().ObjCAutoRefCount) {
- setHasObjectMember(true);
- } else if (T.getObjCLifetime() != Qualifiers::OCL_ExplicitNone) {
+ if (T.hasNonTrivialObjCLifetime()) {
// Objective-C Automatic Reference Counting:
// If a class has a non-static data member of Objective-C pointer
// type (or array thereof), it is a non-POD type and its
@@ -716,6 +742,8 @@ void CXXRecordDecl::addedMember(Decl *D) {
Data.PlainOldData = false;
Data.HasTrivialSpecialMembers = 0;
Data.HasIrrelevantDestructor = false;
+ } else if (!Context.getLangOpts().ObjCAutoRefCount) {
+ setHasObjectMember(true);
}
} else if (!T.isCXX98PODType(Context))
data().PlainOldData = false;
@@ -905,12 +933,11 @@ void CXXRecordDecl::addedMember(Decl *D) {
// C++11 [class.copy]p8:
// The implicitly-declared copy constructor for a class X will have
- // the form 'X::X(const X&)' if [...] for all the non-static data
- // members of X that are of a class type M (or array thereof), each
- // such class type has a copy constructor whose first parameter is
- // of type 'const M&' or 'const volatile M&'.
+ // the form 'X::X(const X&)' if each potentially constructed subobject
+ // of a class type M (or array thereof) has a copy constructor whose
+ // first parameter is of type 'const M&' or 'const volatile M&'.
if (!FieldRec->hasCopyConstructorWithConstParam())
- data().ImplicitCopyConstructorHasConstParam = false;
+ data().ImplicitCopyConstructorCanHaveConstParamForNonVBase = false;
// C++11 [class.copy]p18:
// The implicitly-declared copy assignment oeprator for a class X will
@@ -1472,6 +1499,23 @@ bool CXXRecordDecl::mayBeAbstract() const {
return false;
}
+void CXXDeductionGuideDecl::anchor() { }
+
+CXXDeductionGuideDecl *CXXDeductionGuideDecl::Create(
+ ASTContext &C, DeclContext *DC, SourceLocation StartLoc, bool IsExplicit,
+ const DeclarationNameInfo &NameInfo, QualType T, TypeSourceInfo *TInfo,
+ SourceLocation EndLocation) {
+ return new (C, DC) CXXDeductionGuideDecl(C, DC, StartLoc, IsExplicit,
+ NameInfo, T, TInfo, EndLocation);
+}
+
+CXXDeductionGuideDecl *CXXDeductionGuideDecl::CreateDeserialized(ASTContext &C,
+ unsigned ID) {
+ return new (C, ID) CXXDeductionGuideDecl(C, nullptr, SourceLocation(), false,
+ DeclarationNameInfo(), QualType(),
+ nullptr, SourceLocation());
+}
+
void CXXMethodDecl::anchor() { }
bool CXXMethodDecl::isStatic() const {
diff --git a/lib/AST/DeclObjC.cpp b/lib/AST/DeclObjC.cpp
index 60d05f682e6e..9218eb537a60 100644
--- a/lib/AST/DeclObjC.cpp
+++ b/lib/AST/DeclObjC.cpp
@@ -162,10 +162,10 @@ ObjCPropertyDecl::findPropertyDecl(const DeclContext *DC,
return nullptr;
}
- // If context is class, then lookup property in its extensions.
+ // If context is class, then lookup property in its visible extensions.
// This comes before property is looked up in primary class.
if (auto *IDecl = dyn_cast<ObjCInterfaceDecl>(DC)) {
- for (const auto *Ext : IDecl->known_extensions())
+ for (const auto *Ext : IDecl->visible_extensions())
if (ObjCPropertyDecl *PD = ObjCPropertyDecl::findPropertyDecl(Ext,
propertyID,
queryKind))
diff --git a/lib/AST/DeclPrinter.cpp b/lib/AST/DeclPrinter.cpp
index b8ebe1c568c7..5d841a197f26 100644
--- a/lib/AST/DeclPrinter.cpp
+++ b/lib/AST/DeclPrinter.cpp
@@ -464,6 +464,7 @@ void DeclPrinter::VisitRecordDecl(RecordDecl *D) {
void DeclPrinter::VisitEnumConstantDecl(EnumConstantDecl *D) {
Out << *D;
+ prettyPrintAttributes(D);
if (Expr *Init = D->getInitExpr()) {
Out << " = ";
Init->printPretty(Out, nullptr, Policy, Indentation);
@@ -480,6 +481,7 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
CXXConstructorDecl *CDecl = dyn_cast<CXXConstructorDecl>(D);
CXXConversionDecl *ConversionDecl = dyn_cast<CXXConversionDecl>(D);
+ CXXDeductionGuideDecl *GuideDecl = dyn_cast<CXXDeductionGuideDecl>(D);
if (!Policy.SuppressSpecifiers) {
switch (D->getStorageClass()) {
case SC_None: break;
@@ -495,13 +497,23 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (D->isModulePrivate()) Out << "__module_private__ ";
if (D->isConstexpr() && !D->isExplicitlyDefaulted()) Out << "constexpr ";
if ((CDecl && CDecl->isExplicitSpecified()) ||
- (ConversionDecl && ConversionDecl->isExplicit()))
+ (ConversionDecl && ConversionDecl->isExplicitSpecified()) ||
+ (GuideDecl && GuideDecl->isExplicitSpecified()))
Out << "explicit ";
}
PrintingPolicy SubPolicy(Policy);
SubPolicy.SuppressSpecifiers = false;
- std::string Proto = D->getNameInfo().getAsString();
+ std::string Proto;
+ if (!Policy.SuppressScope) {
+ if (const NestedNameSpecifier *NS = D->getQualifier()) {
+ llvm::raw_string_ostream OS(Proto);
+ NS->print(OS, Policy);
+ }
+ }
+ Proto += D->getNameInfo().getAsString();
+ if (GuideDecl)
+ Proto = GuideDecl->getDeducedTemplate()->getDeclName().getAsString();
if (const TemplateArgumentList *TArgs = D->getTemplateSpecializationArgs()) {
llvm::raw_string_ostream POut(Proto);
DeclPrinter TArgPrinter(POut, SubPolicy, Indentation);
@@ -651,7 +663,9 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
}
} else if (!ConversionDecl && !isa<CXXDestructorDecl>(D)) {
if (FT && FT->hasTrailingReturn()) {
- Out << "auto " << Proto << " -> ";
+ if (!GuideDecl)
+ Out << "auto ";
+ Out << Proto << " -> ";
Proto.clear();
}
AFT->getReturnType().print(Out, Policy, Proto);
@@ -1043,7 +1057,10 @@ void DeclPrinter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
prettyPrintPragmas(D->getTemplatedDecl());
VisitRedeclarableTemplateDecl(D);
- if (PrintInstantiation) {
+ // Never print "instantiations" for deduction guides (they don't really
+ // have them).
+ if (PrintInstantiation &&
+ !isa<CXXDeductionGuideDecl>(D->getTemplatedDecl())) {
FunctionDecl *PrevDecl = D->getTemplatedDecl();
const FunctionDecl *Def;
if (PrevDecl->isDefined(Def) && Def != PrevDecl)
diff --git a/lib/AST/DeclTemplate.cpp b/lib/AST/DeclTemplate.cpp
index a5fbb0a3baec..00a6739478bd 100644
--- a/lib/AST/DeclTemplate.cpp
+++ b/lib/AST/DeclTemplate.cpp
@@ -208,10 +208,6 @@ void RedeclarableTemplateDecl::addSpecializationImpl(
// FunctionTemplateDecl Implementation
//===----------------------------------------------------------------------===//
-void FunctionTemplateDecl::DeallocateCommon(void *Ptr) {
- static_cast<Common *>(Ptr)->~Common();
-}
-
FunctionTemplateDecl *FunctionTemplateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
@@ -231,7 +227,7 @@ FunctionTemplateDecl *FunctionTemplateDecl::CreateDeserialized(ASTContext &C,
RedeclarableTemplateDecl::CommonBase *
FunctionTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
- C.AddDeallocation(DeallocateCommon, CommonPtr);
+ C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -288,19 +284,23 @@ ArrayRef<TemplateArgument> FunctionTemplateDecl::getInjectedTemplateArgs() {
// ClassTemplateDecl Implementation
//===----------------------------------------------------------------------===//
-void ClassTemplateDecl::DeallocateCommon(void *Ptr) {
- static_cast<Common *>(Ptr)->~Common();
-}
-
ClassTemplateDecl *ClassTemplateDecl::Create(ASTContext &C,
DeclContext *DC,
SourceLocation L,
DeclarationName Name,
TemplateParameterList *Params,
- NamedDecl *Decl) {
+ NamedDecl *Decl,
+ Expr *AssociatedConstraints) {
AdoptTemplateParameterList(Params, cast<DeclContext>(Decl));
- ClassTemplateDecl *New = new (C, DC) ClassTemplateDecl(C, DC, L, Name,
- Params, Decl);
+
+ if (!AssociatedConstraints) {
+ return new (C, DC) ClassTemplateDecl(C, DC, L, Name, Params, Decl);
+ }
+
+ ConstrainedTemplateDeclInfo *const CTDI = new (C) ConstrainedTemplateDeclInfo;
+ ClassTemplateDecl *const New =
+ new (C, DC) ClassTemplateDecl(CTDI, C, DC, L, Name, Params, Decl);
+ New->setAssociatedConstraints(AssociatedConstraints);
return New;
}
@@ -340,7 +340,7 @@ ClassTemplateDecl::getPartialSpecializations() {
RedeclarableTemplateDecl::CommonBase *
ClassTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
- C.AddDeallocation(DeallocateCommon, CommonPtr);
+ C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -880,13 +880,10 @@ TypeAliasTemplateDecl *TypeAliasTemplateDecl::CreateDeserialized(ASTContext &C,
DeclarationName(), nullptr, nullptr);
}
-void TypeAliasTemplateDecl::DeallocateCommon(void *Ptr) {
- static_cast<Common *>(Ptr)->~Common();
-}
RedeclarableTemplateDecl::CommonBase *
TypeAliasTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
- C.AddDeallocation(DeallocateCommon, CommonPtr);
+ C.addDestruction(CommonPtr);
return CommonPtr;
}
@@ -907,10 +904,6 @@ ClassScopeFunctionSpecializationDecl::CreateDeserialized(ASTContext &C,
// VarTemplateDecl Implementation
//===----------------------------------------------------------------------===//
-void VarTemplateDecl::DeallocateCommon(void *Ptr) {
- static_cast<Common *>(Ptr)->~Common();
-}
-
VarTemplateDecl *VarTemplateDecl::getDefinition() {
VarTemplateDecl *CurD = this;
while (CurD) {
@@ -966,7 +959,7 @@ VarTemplateDecl::getPartialSpecializations() {
RedeclarableTemplateDecl::CommonBase *
VarTemplateDecl::newCommon(ASTContext &C) const {
Common *CommonPtr = new (C) Common;
- C.AddDeallocation(DeallocateCommon, CommonPtr);
+ C.addDestruction(CommonPtr);
return CommonPtr;
}
diff --git a/lib/AST/DeclarationName.cpp b/lib/AST/DeclarationName.cpp
index 52791e51d2dc..1f8e26deda97 100644
--- a/lib/AST/DeclarationName.cpp
+++ b/lib/AST/DeclarationName.cpp
@@ -14,6 +14,7 @@
#include "clang/AST/DeclarationName.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
@@ -43,6 +44,22 @@ public:
}
};
+/// Contains extra information for the name of a C++ deduction guide.
+class CXXDeductionGuideNameExtra : public DeclarationNameExtra,
+ public llvm::FoldingSetNode {
+public:
+ /// The template named by the deduction guide.
+ TemplateDecl *Template;
+
+ /// FETokenInfo - Extra information associated with this operator
+ /// name that can be used by the front end.
+ void *FETokenInfo;
+
+ void Profile(llvm::FoldingSetNodeID &ID) {
+ ID.AddPointer(Template);
+ }
+};
+
/// CXXOperatorIdName - Contains extra information for the name of an
/// overloaded operator in C++, such as "operator+.
class CXXOperatorIdName : public DeclarationNameExtra {
@@ -122,7 +139,13 @@ int DeclarationName::compare(DeclarationName LHS, DeclarationName RHS) {
if (QualTypeOrdering()(RHS.getCXXNameType(), LHS.getCXXNameType()))
return 1;
return 0;
-
+
+ case DeclarationName::CXXDeductionGuideName:
+ // We never want to compare deduction guide names for templates from
+ // different scopes, so just compare the template-name.
+ return compare(LHS.getCXXDeductionGuideTemplate()->getDeclName(),
+ RHS.getCXXDeductionGuideTemplate()->getDeclName());
+
case DeclarationName::CXXOperatorName:
return compareInt(LHS.getCXXOverloadedOperator(),
RHS.getCXXOverloadedOperator());
@@ -179,6 +202,12 @@ void DeclarationName::print(raw_ostream &OS, const PrintingPolicy &Policy) {
return printCXXConstructorDestructorName(N.getCXXNameType(), OS, Policy);
}
+ case DeclarationName::CXXDeductionGuideName:
+ OS << "<deduction guide for ";
+ getCXXDeductionGuideTemplate()->getDeclName().print(OS, Policy);
+ OS << '>';
+ return;
+
case DeclarationName::CXXOperatorName: {
static const char* const OperatorNames[NUM_OVERLOADED_OPERATORS] = {
nullptr,
@@ -243,6 +272,9 @@ DeclarationName::NameKind DeclarationName::getNameKind() const {
case DeclarationNameExtra::CXXDestructor:
return CXXDestructorName;
+ case DeclarationNameExtra::CXXDeductionGuide:
+ return CXXDeductionGuideName;
+
case DeclarationNameExtra::CXXConversionFunction:
return CXXConversionFunctionName;
@@ -268,7 +300,15 @@ DeclarationName::NameKind DeclarationName::getNameKind() const {
bool DeclarationName::isDependentName() const {
QualType T = getCXXNameType();
- return !T.isNull() && T->isDependentType();
+ if (!T.isNull() && T->isDependentType())
+ return true;
+
+ // A class-scope deduction guide in a dependent context has a dependent name.
+ auto *TD = getCXXDeductionGuideTemplate();
+ if (TD && TD->getDeclContext()->isDependentContext())
+ return true;
+
+ return false;
}
std::string DeclarationName::getAsString() const {
@@ -285,6 +325,12 @@ QualType DeclarationName::getCXXNameType() const {
return QualType();
}
+TemplateDecl *DeclarationName::getCXXDeductionGuideTemplate() const {
+ if (auto *Guide = getAsCXXDeductionGuideNameExtra())
+ return Guide->Template;
+ return nullptr;
+}
+
OverloadedOperatorKind DeclarationName::getCXXOverloadedOperator() const {
if (CXXOperatorIdName *CXXOp = getAsCXXOperatorIdName()) {
unsigned value
@@ -312,6 +358,9 @@ void *DeclarationName::getFETokenInfoAsVoidSlow() const {
case CXXConversionFunctionName:
return getAsCXXSpecialName()->FETokenInfo;
+ case CXXDeductionGuideName:
+ return getAsCXXDeductionGuideNameExtra()->FETokenInfo;
+
case CXXOperatorName:
return getAsCXXOperatorIdName()->FETokenInfo;
@@ -335,6 +384,10 @@ void DeclarationName::setFETokenInfo(void *T) {
getAsCXXSpecialName()->FETokenInfo = T;
break;
+ case CXXDeductionGuideName:
+ getAsCXXDeductionGuideNameExtra()->FETokenInfo = T;
+ break;
+
case CXXOperatorName:
getAsCXXOperatorIdName()->FETokenInfo = T;
break;
@@ -366,6 +419,7 @@ LLVM_DUMP_METHOD void DeclarationName::dump() const {
DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
CXXSpecialNamesImpl = new llvm::FoldingSet<CXXSpecialName>;
CXXLiteralOperatorNames = new llvm::FoldingSet<CXXLiteralOperatorIdName>;
+ CXXDeductionGuideNames = new llvm::FoldingSet<CXXDeductionGuideNameExtra>;
// Initialize the overloaded operator names.
CXXOperatorNames = new (Ctx) CXXOperatorIdName[NUM_OVERLOADED_OPERATORS];
@@ -377,14 +431,18 @@ DeclarationNameTable::DeclarationNameTable(const ASTContext &C) : Ctx(C) {
}
DeclarationNameTable::~DeclarationNameTable() {
- llvm::FoldingSet<CXXSpecialName> *SpecialNames =
- static_cast<llvm::FoldingSet<CXXSpecialName>*>(CXXSpecialNamesImpl);
- llvm::FoldingSet<CXXLiteralOperatorIdName> *LiteralNames
- = static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName>*>
- (CXXLiteralOperatorNames);
+ auto *SpecialNames =
+ static_cast<llvm::FoldingSet<CXXSpecialName> *>(CXXSpecialNamesImpl);
+ auto *LiteralNames =
+ static_cast<llvm::FoldingSet<CXXLiteralOperatorIdName> *>(
+ CXXLiteralOperatorNames);
+ auto *DeductionGuideNames =
+ static_cast<llvm::FoldingSet<CXXDeductionGuideNameExtra> *>(
+ CXXDeductionGuideNames);
delete SpecialNames;
delete LiteralNames;
+ delete DeductionGuideNames;
}
DeclarationName DeclarationNameTable::getCXXConstructorName(CanQualType Ty) {
@@ -398,6 +456,30 @@ DeclarationName DeclarationNameTable::getCXXDestructorName(CanQualType Ty) {
}
DeclarationName
+DeclarationNameTable::getCXXDeductionGuideName(TemplateDecl *Template) {
+ Template = cast<TemplateDecl>(Template->getCanonicalDecl());
+
+ auto *DeductionGuideNames =
+ static_cast<llvm::FoldingSet<CXXDeductionGuideNameExtra> *>(
+ CXXDeductionGuideNames);
+
+ llvm::FoldingSetNodeID ID;
+ ID.AddPointer(Template);
+
+ void *InsertPos = nullptr;
+ if (auto *Name = DeductionGuideNames->FindNodeOrInsertPos(ID, InsertPos))
+ return DeclarationName(Name);
+
+ auto *Name = new (Ctx) CXXDeductionGuideNameExtra;
+ Name->ExtraKindOrNumArgs = DeclarationNameExtra::CXXDeductionGuide;
+ Name->Template = Template;
+ Name->FETokenInfo = nullptr;
+
+ DeductionGuideNames->InsertNode(Name, InsertPos);
+ return DeclarationName(Name);
+}
+
+DeclarationName
DeclarationNameTable::getCXXConversionFunctionName(CanQualType Ty) {
return getCXXSpecialName(DeclarationName::CXXConversionFunctionName, Ty);
}
@@ -477,6 +559,7 @@ DeclarationNameTable::getCXXLiteralOperatorName(IdentifierInfo *II) {
DeclarationNameLoc::DeclarationNameLoc(DeclarationName Name) {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
+ case DeclarationName::CXXDeductionGuideName:
break;
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
@@ -509,6 +592,7 @@ bool DeclarationNameInfo::containsUnexpandedParameterPack() const {
case DeclarationName::CXXOperatorName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXUsingDirective:
+ case DeclarationName::CXXDeductionGuideName:
return false;
case DeclarationName::CXXConstructorName:
@@ -531,6 +615,7 @@ bool DeclarationNameInfo::isInstantiationDependent() const {
case DeclarationName::CXXOperatorName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXUsingDirective:
+ case DeclarationName::CXXDeductionGuideName:
return false;
case DeclarationName::CXXConstructorName:
@@ -560,6 +645,7 @@ void DeclarationNameInfo::printName(raw_ostream &OS) const {
case DeclarationName::CXXOperatorName:
case DeclarationName::CXXLiteralOperatorName:
case DeclarationName::CXXUsingDirective:
+ case DeclarationName::CXXDeductionGuideName:
OS << Name;
return;
@@ -574,7 +660,9 @@ void DeclarationNameInfo::printName(raw_ostream &OS) const {
LangOptions LO;
LO.CPlusPlus = true;
LO.Bool = true;
- OS << TInfo->getType().getAsString(PrintingPolicy(LO));
+ PrintingPolicy PP(LO);
+ PP.SuppressScope = true;
+ OS << TInfo->getType().getAsString(PP);
} else
OS << Name;
return;
@@ -585,6 +673,7 @@ void DeclarationNameInfo::printName(raw_ostream &OS) const {
SourceLocation DeclarationNameInfo::getEndLoc() const {
switch (Name.getNameKind()) {
case DeclarationName::Identifier:
+ case DeclarationName::CXXDeductionGuideName:
return NameLoc;
case DeclarationName::CXXOperatorName: {
diff --git a/lib/AST/Expr.cpp b/lib/AST/Expr.cpp
index 14f31d0c6b8c..d523a0f93cf6 100644
--- a/lib/AST/Expr.cpp
+++ b/lib/AST/Expr.cpp
@@ -987,7 +987,7 @@ void StringLiteral::outputString(raw_ostream &OS) const {
void StringLiteral::setString(const ASTContext &C, StringRef Str,
StringKind Kind, bool IsPascal) {
//FIXME: we assume that the string data comes from a target that uses the same
- // code unit size and endianess for the type of string.
+ // code unit size and endianness for the type of string.
this->Kind = Kind;
this->IsPascal = IsPascal;
@@ -1571,8 +1571,9 @@ bool CastExpr::CastConsistency() const {
goto CheckNoBasePath;
case CK_AddressSpaceConversion:
- assert(getType()->isPointerType());
- assert(getSubExpr()->getType()->isPointerType());
+ assert(getType()->isPointerType() || getType()->isBlockPointerType());
+ assert(getSubExpr()->getType()->isPointerType() ||
+ getSubExpr()->getType()->isBlockPointerType());
assert(getType()->getPointeeType().getAddressSpace() !=
getSubExpr()->getType()->getPointeeType().getAddressSpace());
// These should not have an inheritance path.
@@ -1881,6 +1882,11 @@ bool InitListExpr::isTransparent() const {
if (getNumInits() != 1 || !getInit(0))
return false;
+ // Don't confuse aggregate initialization of a struct X { X &x; }; with a
+ // transparent struct copy.
+ if (!getInit(0)->isRValue() && getType()->isRecordType())
+ return false;
+
return getType().getCanonicalType() ==
getInit(0)->getType().getCanonicalType();
}
@@ -2952,6 +2958,7 @@ bool Expr::HasSideEffects(const ASTContext &Ctx,
case CXXNewExprClass:
case CXXDeleteExprClass:
case CoawaitExprClass:
+ case DependentCoawaitExprClass:
case CoyieldExprClass:
// These always have a side-effect.
return true;
@@ -3880,16 +3887,22 @@ PseudoObjectExpr::PseudoObjectExpr(QualType type, ExprValueKind VK,
// UnaryExprOrTypeTraitExpr
Stmt::child_range UnaryExprOrTypeTraitExpr::children() {
+ const_child_range CCR =
+ const_cast<const UnaryExprOrTypeTraitExpr *>(this)->children();
+ return child_range(cast_away_const(CCR.begin()), cast_away_const(CCR.end()));
+}
+
+Stmt::const_child_range UnaryExprOrTypeTraitExpr::children() const {
// If this is of a type and the type is a VLA type (and not a typedef), the
// size expression of the VLA needs to be treated as an executable expression.
// Why isn't this weirdness documented better in StmtIterator?
if (isArgumentType()) {
- if (const VariableArrayType* T = dyn_cast<VariableArrayType>(
- getArgumentType().getTypePtr()))
- return child_range(child_iterator(T), child_iterator());
- return child_range(child_iterator(), child_iterator());
+ if (const VariableArrayType *T =
+ dyn_cast<VariableArrayType>(getArgumentType().getTypePtr()))
+ return const_child_range(const_child_iterator(T), const_child_iterator());
+ return const_child_range(const_child_iterator(), const_child_iterator());
}
- return child_range(&Argument.Ex, &Argument.Ex + 1);
+ return const_child_range(&Argument.Ex, &Argument.Ex + 1);
}
AtomicExpr::AtomicExpr(SourceLocation BLoc, ArrayRef<Expr*> args,
diff --git a/lib/AST/ExprCXX.cpp b/lib/AST/ExprCXX.cpp
index ad510e0070e6..6713fca04571 100644
--- a/lib/AST/ExprCXX.cpp
+++ b/lib/AST/ExprCXX.cpp
@@ -734,23 +734,23 @@ CXXBindTemporaryExpr *CXXBindTemporaryExpr::Create(const ASTContext &C,
CXXTemporaryObjectExpr::CXXTemporaryObjectExpr(const ASTContext &C,
CXXConstructorDecl *Cons,
- TypeSourceInfo *Type,
+ QualType Type,
+ TypeSourceInfo *TSI,
ArrayRef<Expr*> Args,
SourceRange ParenOrBraceRange,
bool HadMultipleCandidates,
bool ListInitialization,
bool StdInitListInitialization,
bool ZeroInitialization)
- : CXXConstructExpr(C, CXXTemporaryObjectExprClass,
- Type->getType().getNonReferenceType(),
- Type->getTypeLoc().getBeginLoc(),
+ : CXXConstructExpr(C, CXXTemporaryObjectExprClass, Type,
+ TSI->getTypeLoc().getBeginLoc(),
Cons, false, Args,
HadMultipleCandidates,
ListInitialization,
StdInitListInitialization,
ZeroInitialization,
CXXConstructExpr::CK_Complete, ParenOrBraceRange),
- Type(Type) {
+ Type(TSI) {
}
SourceLocation CXXTemporaryObjectExpr::getLocStart() const {
diff --git a/lib/AST/ExprClassification.cpp b/lib/AST/ExprClassification.cpp
index adb74b80b198..c035a42439a3 100644
--- a/lib/AST/ExprClassification.cpp
+++ b/lib/AST/ExprClassification.cpp
@@ -129,6 +129,7 @@ static Cl::Kinds ClassifyInternal(ASTContext &Ctx, const Expr *E) {
case Expr::UnresolvedLookupExprClass:
case Expr::UnresolvedMemberExprClass:
case Expr::TypoExprClass:
+ case Expr::DependentCoawaitExprClass:
case Expr::CXXDependentScopeMemberExprClass:
case Expr::DependentScopeDeclRefExprClass:
// ObjC instance variables are lvalues
@@ -626,7 +627,8 @@ static Cl::ModifiableType IsModifiable(ASTContext &Ctx, const Expr *E,
// Const stuff is obviously not modifiable.
if (CT.isConstQualified())
return Cl::CM_ConstQualified;
- if (CT.getQualifiers().getAddressSpace() == LangAS::opencl_constant)
+ if (Ctx.getLangOpts().OpenCL &&
+ CT.getQualifiers().getAddressSpace() == LangAS::opencl_constant)
return Cl::CM_ConstAddrSpace;
// Arrays are not modifiable, only their elements are.
diff --git a/lib/AST/ExprConstant.cpp b/lib/AST/ExprConstant.cpp
index 2c0fce91844c..2fafa4876758 100644
--- a/lib/AST/ExprConstant.cpp
+++ b/lib/AST/ExprConstant.cpp
@@ -350,36 +350,49 @@ namespace {
MostDerivedArraySize = 2;
MostDerivedPathLength = Entries.size();
}
- void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E, uint64_t N);
+ void diagnosePointerArithmetic(EvalInfo &Info, const Expr *E,
+ const APSInt &N);
/// Add N to the address of this subobject.
- void adjustIndex(EvalInfo &Info, const Expr *E, uint64_t N) {
- if (Invalid) return;
+ void adjustIndex(EvalInfo &Info, const Expr *E, APSInt N) {
+ if (Invalid || !N) return;
+ uint64_t TruncatedN = N.extOrTrunc(64).getZExtValue();
if (isMostDerivedAnUnsizedArray()) {
// Can't verify -- trust that the user is doing the right thing (or if
// not, trust that the caller will catch the bad behavior).
- Entries.back().ArrayIndex += N;
- return;
- }
- if (MostDerivedPathLength == Entries.size() &&
- MostDerivedIsArrayElement) {
- Entries.back().ArrayIndex += N;
- if (Entries.back().ArrayIndex > getMostDerivedArraySize()) {
- diagnosePointerArithmetic(Info, E, Entries.back().ArrayIndex);
- setInvalid();
- }
+ // FIXME: Should we reject if this overflows, at least?
+ Entries.back().ArrayIndex += TruncatedN;
return;
}
+
// [expr.add]p4: For the purposes of these operators, a pointer to a
// nonarray object behaves the same as a pointer to the first element of
// an array of length one with the type of the object as its element type.
- if (IsOnePastTheEnd && N == (uint64_t)-1)
- IsOnePastTheEnd = false;
- else if (!IsOnePastTheEnd && N == 1)
- IsOnePastTheEnd = true;
- else if (N != 0) {
- diagnosePointerArithmetic(Info, E, uint64_t(IsOnePastTheEnd) + N);
+ bool IsArray = MostDerivedPathLength == Entries.size() &&
+ MostDerivedIsArrayElement;
+ uint64_t ArrayIndex =
+ IsArray ? Entries.back().ArrayIndex : (uint64_t)IsOnePastTheEnd;
+ uint64_t ArraySize =
+ IsArray ? getMostDerivedArraySize() : (uint64_t)1;
+
+ if (N < -(int64_t)ArrayIndex || N > ArraySize - ArrayIndex) {
+ // Calculate the actual index in a wide enough type, so we can include
+ // it in the note.
+ N = N.extend(std::max<unsigned>(N.getBitWidth() + 1, 65));
+ (llvm::APInt&)N += ArrayIndex;
+ assert(N.ugt(ArraySize) && "bounds check failed for in-bounds index");
+ diagnosePointerArithmetic(Info, E, N);
setInvalid();
+ return;
}
+
+ ArrayIndex += TruncatedN;
+ assert(ArrayIndex <= ArraySize &&
+ "bounds check succeeded for out-of-bounds index");
+
+ if (IsArray)
+ Entries.back().ArrayIndex = ArrayIndex;
+ else
+ IsOnePastTheEnd = (ArrayIndex != 0);
}
};
@@ -413,6 +426,17 @@ namespace {
/// Index - The call index of this call.
unsigned Index;
+ // FIXME: Adding this to every 'CallStackFrame' may have a nontrivial impact
+ // on the overall stack usage of deeply-recursing constexpr evaluataions.
+ // (We should cache this map rather than recomputing it repeatedly.)
+ // But let's try this and see how it goes; we can look into caching the map
+ // as a later change.
+
+ /// LambdaCaptureFields - Mapping from captured variables/this to
+ /// corresponding data members in the closure class.
+ llvm::DenseMap<const VarDecl *, FieldDecl *> LambdaCaptureFields;
+ FieldDecl *LambdaThisCaptureField;
+
CallStackFrame(EvalInfo &Info, SourceLocation CallLoc,
const FunctionDecl *Callee, const LValue *This,
APValue *Arguments);
@@ -1048,16 +1072,17 @@ bool SubobjectDesignator::checkSubobject(EvalInfo &Info, const Expr *E,
}
void SubobjectDesignator::diagnosePointerArithmetic(EvalInfo &Info,
- const Expr *E, uint64_t N) {
+ const Expr *E,
+ const APSInt &N) {
// If we're complaining, we must be able to statically determine the size of
// the most derived array.
if (MostDerivedPathLength == Entries.size() && MostDerivedIsArrayElement)
Info.CCEDiag(E, diag::note_constexpr_array_index)
- << static_cast<int>(N) << /*array*/ 0
+ << N << /*array*/ 0
<< static_cast<unsigned>(getMostDerivedArraySize());
else
Info.CCEDiag(E, diag::note_constexpr_array_index)
- << static_cast<int>(N) << /*non-array*/ 1;
+ << N << /*non-array*/ 1;
setInvalid();
}
@@ -1273,14 +1298,24 @@ namespace {
void clearIsNullPointer() {
IsNullPtr = false;
}
- void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E, uint64_t Index,
- CharUnits ElementSize) {
- // Compute the new offset in the appropriate width.
- Offset += Index * ElementSize;
- if (Index && checkNullPointer(Info, E, CSK_ArrayIndex))
+ void adjustOffsetAndIndex(EvalInfo &Info, const Expr *E,
+ const APSInt &Index, CharUnits ElementSize) {
+ // An index of 0 has no effect. (In C, adding 0 to a null pointer is UB,
+ // but we're not required to diagnose it and it's valid in C++.)
+ if (!Index)
+ return;
+
+ // Compute the new offset in the appropriate width, wrapping at 64 bits.
+ // FIXME: When compiling for a 32-bit target, we should use 32-bit
+ // offsets.
+ uint64_t Offset64 = Offset.getQuantity();
+ uint64_t ElemSize64 = ElementSize.getQuantity();
+ uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
+ Offset = CharUnits::fromQuantity(Offset64 + ElemSize64 * Index64);
+
+ if (checkNullPointer(Info, E, CSK_ArrayIndex))
Designator.adjustIndex(Info, E, Index);
- if (Index)
- clearIsNullPointer();
+ clearIsNullPointer();
}
void adjustOffset(CharUnits N) {
Offset += N;
@@ -1404,13 +1439,24 @@ static bool EvaluateIntegerOrLValue(const Expr *E, APValue &Result,
EvalInfo &Info);
static bool EvaluateFloat(const Expr *E, APFloat &Result, EvalInfo &Info);
static bool EvaluateComplex(const Expr *E, ComplexValue &Res, EvalInfo &Info);
-static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info);
+static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
+ EvalInfo &Info);
static bool EvaluateAsRValue(EvalInfo &Info, const Expr *E, APValue &Result);
//===----------------------------------------------------------------------===//
// Misc utilities
//===----------------------------------------------------------------------===//
+/// Negate an APSInt in place, converting it to a signed form if necessary, and
+/// preserving its value (by extending by up to one bit as needed).
+static void negateAsSigned(APSInt &Int) {
+ if (Int.isUnsigned() || Int.isMinSignedValue()) {
+ Int = Int.extend(Int.getBitWidth() + 1);
+ Int.setIsSigned(true);
+ }
+ Int = -Int;
+}
+
/// Produce a string describing the given constexpr call.
static void describeCall(CallStackFrame *Frame, raw_ostream &Out) {
unsigned ArgIndex = 0;
@@ -1458,13 +1504,6 @@ static bool EvaluateIgnoredValue(EvalInfo &Info, const Expr *E) {
return true;
}
-/// Sign- or zero-extend a value to 64 bits. If it's already 64 bits, just
-/// return its existing value.
-static int64_t getExtValue(const APSInt &Value) {
- return Value.isSigned() ? Value.getSExtValue()
- : static_cast<int64_t>(Value.getZExtValue());
-}
-
/// Should this call expression be treated as a string literal?
static bool IsStringLiteralCall(const CallExpr *E) {
unsigned Builtin = E->getBuiltinCallee();
@@ -2220,7 +2259,7 @@ static bool HandleSizeof(EvalInfo &Info, SourceLocation Loc,
/// \param Adjustment - The adjustment, in objects of type EltTy, to add.
static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
LValue &LVal, QualType EltTy,
- int64_t Adjustment) {
+ APSInt Adjustment) {
CharUnits SizeOfPointee;
if (!HandleSizeof(Info, E->getExprLoc(), EltTy, SizeOfPointee))
return false;
@@ -2229,6 +2268,13 @@ static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
return true;
}
+static bool HandleLValueArrayAdjustment(EvalInfo &Info, const Expr *E,
+ LValue &LVal, QualType EltTy,
+ int64_t Adjustment) {
+ return HandleLValueArrayAdjustment(Info, E, LVal, EltTy,
+ APSInt::get(Adjustment));
+}
+
/// Update an lvalue to refer to a component of a complex number.
/// \param Info - Information about the ongoing evaluation.
/// \param LVal - The lvalue to be updated.
@@ -2247,6 +2293,10 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
return true;
}
+static bool handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv,
+ QualType Type, const LValue &LVal,
+ APValue &RVal);
+
/// Try to evaluate the initializer for a variable declaration.
///
/// \param Info Information about the ongoing evaluation.
@@ -2258,6 +2308,7 @@ static bool HandleLValueComplexElement(EvalInfo &Info, const Expr *E,
static bool evaluateVarDeclInit(EvalInfo &Info, const Expr *E,
const VarDecl *VD, CallStackFrame *Frame,
APValue *&Result) {
+
// If this is a parameter to an active constexpr function call, perform
// argument substitution.
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD)) {
@@ -3191,9 +3242,9 @@ struct CompoundAssignSubobjectHandler {
return false;
}
- int64_t Offset = getExtValue(RHS.getInt());
+ APSInt Offset = RHS.getInt();
if (Opcode == BO_Sub)
- Offset = -Offset;
+ negateAsSigned(Offset);
LValue LVal;
LVal.setFrom(Info.Ctx, Subobj);
@@ -4148,6 +4199,10 @@ static bool HandleFunctionCall(SourceLocation CallLoc,
return false;
This->moveInto(Result);
return true;
+ } else if (MD && isLambdaCallOperator(MD)) {
+ // We're in a lambda; determine the lambda capture field maps.
+ MD->getParent()->getCaptureFields(Frame.LambdaCaptureFields,
+ Frame.LambdaThisCaptureField);
}
StmtResult Ret = {Result, ResultSlot};
@@ -4691,7 +4746,10 @@ public:
case CK_AtomicToNonAtomic: {
APValue AtomicVal;
- if (!EvaluateAtomic(E->getSubExpr(), AtomicVal, Info))
+ // This does not need to be done in place even for class/array types:
+ // atomic-to-non-atomic conversion implies copying the object
+ // representation.
+ if (!Evaluate(AtomicVal, Info, E->getSubExpr()))
return false;
return DerivedSuccess(AtomicVal, E);
}
@@ -5009,6 +5067,33 @@ bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
+
+ // If we are within a lambda's call operator, check whether the 'VD' referred
+ // to within 'E' actually represents a lambda-capture that maps to a
+ // data-member/field within the closure object, and if so, evaluate to the
+ // field or what the field refers to.
+ if (Info.CurrentCall && isLambdaCallOperator(Info.CurrentCall->Callee)) {
+ if (auto *FD = Info.CurrentCall->LambdaCaptureFields.lookup(VD)) {
+ if (Info.checkingPotentialConstantExpression())
+ return false;
+ // Start with 'Result' referring to the complete closure object...
+ Result = *Info.CurrentCall->This;
+ // ... then update it to refer to the field of the closure object
+ // that represents the capture.
+ if (!HandleLValueMember(Info, E, Result, FD))
+ return false;
+ // And if the field is of reference type, update 'Result' to refer to what
+ // the field refers to.
+ if (FD->getType()->isReferenceType()) {
+ APValue RVal;
+ if (!handleLValueToRValueConversion(Info, E, FD->getType(), Result,
+ RVal))
+ return false;
+ Result.setFrom(Info.Ctx, RVal);
+ }
+ return true;
+ }
+ }
CallStackFrame *Frame = nullptr;
if (VD->hasLocalStorage() && Info.CurrentCall->Index > 1) {
// Only if a local variable was declared in the function currently being
@@ -5162,8 +5247,7 @@ bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
if (!EvaluateInteger(E->getIdx(), Index, Info))
return false;
- return HandleLValueArrayAdjustment(Info, E, Result, E->getType(),
- getExtValue(Index));
+ return HandleLValueArrayAdjustment(Info, E, Result, E->getType(), Index);
}
bool LValueExprEvaluator::VisitUnaryDeref(const UnaryOperator *E) {
@@ -5409,6 +5493,27 @@ public:
return false;
}
Result = *Info.CurrentCall->This;
+ // If we are inside a lambda's call operator, the 'this' expression refers
+ // to the enclosing '*this' object (either by value or reference) which is
+ // either copied into the closure object's field that represents the '*this'
+ // or refers to '*this'.
+ if (isLambdaCallOperator(Info.CurrentCall->Callee)) {
+ // Update 'Result' to refer to the data member/field of the closure object
+ // that represents the '*this' capture.
+ if (!HandleLValueMember(Info, E, Result,
+ Info.CurrentCall->LambdaThisCaptureField))
+ return false;
+ // If we captured '*this' by reference, replace the field with its referent.
+ if (Info.CurrentCall->LambdaThisCaptureField->getType()
+ ->isPointerType()) {
+ APValue RVal;
+ if (!handleLValueToRValueConversion(Info, E, E->getType(), Result,
+ RVal))
+ return false;
+
+ Result.setFrom(Info.Ctx, RVal);
+ }
+ }
return true;
}
@@ -5440,13 +5545,11 @@ bool PointerExprEvaluator::VisitBinaryOperator(const BinaryOperator *E) {
if (!EvaluateInteger(IExp, Offset, Info) || !EvalPtrOK)
return false;
- int64_t AdditionalOffset = getExtValue(Offset);
if (E->getOpcode() == BO_Sub)
- AdditionalOffset = -AdditionalOffset;
+ negateAsSigned(Offset);
QualType Pointee = PExp->getType()->castAs<PointerType>()->getPointeeType();
- return HandleLValueArrayAdjustment(Info, E, Result, Pointee,
- AdditionalOffset);
+ return HandleLValueArrayAdjustment(Info, E, Result, Pointee, Offset);
}
bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
@@ -5576,6 +5679,8 @@ static CharUnits GetAlignOfType(EvalInfo &Info, QualType T) {
T = Ref->getPointeeType();
// __alignof is defined to return the preferred alignment.
+ if (T.getQualifiers().hasUnaligned())
+ return CharUnits::One();
return Info.Ctx.toCharUnitsFromBits(
Info.Ctx.getPreferredTypeAlign(T.getTypePtr()));
}
@@ -5640,14 +5745,14 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
APSInt Alignment;
if (!EvaluateInteger(E->getArg(1), Alignment, Info))
return false;
- CharUnits Align = CharUnits::fromQuantity(getExtValue(Alignment));
+ CharUnits Align = CharUnits::fromQuantity(Alignment.getZExtValue());
if (E->getNumArgs() > 2) {
APSInt Offset;
if (!EvaluateInteger(E->getArg(2), Offset, Info))
return false;
- int64_t AdditionalOffset = -getExtValue(Offset);
+ int64_t AdditionalOffset = -Offset.getZExtValue();
OffsetResult.Offset += CharUnits::fromQuantity(AdditionalOffset);
}
@@ -5664,12 +5769,11 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
if (BaseAlignment < Align) {
Result.Designator.setInvalid();
- // FIXME: Quantities here cast to integers because the plural modifier
- // does not work on APSInts yet.
+ // FIXME: Add support to Diagnostic for long / long long.
CCEDiag(E->getArg(0),
diag::note_constexpr_baa_insufficient_alignment) << 0
- << (int) BaseAlignment.getQuantity()
- << (unsigned) getExtValue(Alignment);
+ << (unsigned)BaseAlignment.getQuantity()
+ << (unsigned)Align.getQuantity();
return false;
}
}
@@ -5677,18 +5781,14 @@ bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
// The offset must also have the correct alignment.
if (OffsetResult.Offset.alignTo(Align) != OffsetResult.Offset) {
Result.Designator.setInvalid();
- APSInt Offset(64, false);
- Offset = OffsetResult.Offset.getQuantity();
-
- if (OffsetResult.Base)
- CCEDiag(E->getArg(0),
- diag::note_constexpr_baa_insufficient_alignment) << 1
- << (int) getExtValue(Offset) << (unsigned) getExtValue(Alignment);
- else
- CCEDiag(E->getArg(0),
- diag::note_constexpr_baa_value_insufficient_alignment)
- << Offset << (unsigned) getExtValue(Alignment);
+ (OffsetResult.Base
+ ? CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_insufficient_alignment) << 1
+ : CCEDiag(E->getArg(0),
+ diag::note_constexpr_baa_value_insufficient_alignment))
+ << (int)OffsetResult.Offset.getQuantity()
+ << (unsigned)Align.getQuantity();
return false;
}
@@ -6245,14 +6345,40 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
if (ClosureClass->isInvalidDecl()) return false;
if (Info.checkingPotentialConstantExpression()) return true;
- if (E->capture_size()) {
- Info.FFDiag(E, diag::note_unimplemented_constexpr_lambda_feature_ast)
- << "can not evaluate lambda expressions with captures";
- return false;
+
+ const size_t NumFields =
+ std::distance(ClosureClass->field_begin(), ClosureClass->field_end());
+
+ assert(NumFields == (size_t)std::distance(E->capture_init_begin(),
+ E->capture_init_end()) &&
+ "The number of lambda capture initializers should equal the number of "
+ "fields within the closure type");
+
+ Result = APValue(APValue::UninitStruct(), /*NumBases*/0, NumFields);
+ // Iterate through all the lambda's closure object's fields and initialize
+ // them.
+ auto *CaptureInitIt = E->capture_init_begin();
+ const LambdaCapture *CaptureIt = ClosureClass->captures_begin();
+ bool Success = true;
+ for (const auto *Field : ClosureClass->fields()) {
+ assert(CaptureInitIt != E->capture_init_end());
+ // Get the initializer for this field
+ Expr *const CurFieldInit = *CaptureInitIt++;
+
+ // If there is no initializer, either this is a VLA or an error has
+ // occurred.
+ if (!CurFieldInit)
+ return Error(E);
+
+ APValue &FieldVal = Result.getStructField(Field->getFieldIndex());
+ if (!EvaluateInPlace(FieldVal, Info, This, CurFieldInit)) {
+ if (!Info.keepEvaluatingAfterFailure())
+ return false;
+ Success = false;
+ }
+ ++CaptureIt;
}
- // FIXME: Implement captures.
- Result = APValue(APValue::UninitStruct(), /*NumBases*/0, /*NumFields*/0);
- return true;
+ return Success;
}
static bool EvaluateRecord(const Expr *E, const LValue &This,
@@ -6971,7 +7097,6 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
case BuiltinType::Dependent:
llvm_unreachable("CallExpr::isBuiltinClassifyType(): unimplemented type");
@@ -7030,6 +7155,7 @@ static int EvaluateBuiltinClassifyType(const CallExpr *E,
case Type::Vector:
case Type::ExtVector:
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
case Type::ObjCObject:
case Type::ObjCInterface:
case Type::ObjCObjectPointer:
@@ -7948,6 +8074,19 @@ bool DataRecursiveIntBinOpEvaluator::
return true;
}
+static void addOrSubLValueAsInteger(APValue &LVal, const APSInt &Index,
+ bool IsSub) {
+ // Compute the new offset in the appropriate width, wrapping at 64 bits.
+ // FIXME: When compiling for a 32-bit target, we should use 32-bit
+ // offsets.
+ assert(!LVal.hasLValuePath() && "have designator for integer lvalue");
+ CharUnits &Offset = LVal.getLValueOffset();
+ uint64_t Offset64 = Offset.getQuantity();
+ uint64_t Index64 = Index.extOrTrunc(64).getZExtValue();
+ Offset = CharUnits::fromQuantity(IsSub ? Offset64 - Index64
+ : Offset64 + Index64);
+}
+
bool DataRecursiveIntBinOpEvaluator::
VisitBinOp(const EvalResult &LHSResult, const EvalResult &RHSResult,
const BinaryOperator *E, APValue &Result) {
@@ -7994,12 +8133,7 @@ bool DataRecursiveIntBinOpEvaluator::
// Handle cases like (unsigned long)&a + 4.
if (E->isAdditiveOp() && LHSVal.isLValue() && RHSVal.isInt()) {
Result = LHSVal;
- CharUnits AdditionalOffset =
- CharUnits::fromQuantity(RHSVal.getInt().getZExtValue());
- if (E->getOpcode() == BO_Add)
- Result.getLValueOffset() += AdditionalOffset;
- else
- Result.getLValueOffset() -= AdditionalOffset;
+ addOrSubLValueAsInteger(Result, RHSVal.getInt(), E->getOpcode() == BO_Sub);
return true;
}
@@ -8007,8 +8141,7 @@ bool DataRecursiveIntBinOpEvaluator::
if (E->getOpcode() == BO_Add &&
RHSVal.isLValue() && LHSVal.isInt()) {
Result = RHSVal;
- Result.getLValueOffset() +=
- CharUnits::fromQuantity(LHSVal.getInt().getZExtValue());
+ addOrSubLValueAsInteger(Result, LHSVal.getInt(), /*IsSub*/false);
return true;
}
@@ -9565,10 +9698,11 @@ bool ComplexExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
namespace {
class AtomicExprEvaluator :
public ExprEvaluatorBase<AtomicExprEvaluator> {
+ const LValue *This;
APValue &Result;
public:
- AtomicExprEvaluator(EvalInfo &Info, APValue &Result)
- : ExprEvaluatorBaseTy(Info), Result(Result) {}
+ AtomicExprEvaluator(EvalInfo &Info, const LValue *This, APValue &Result)
+ : ExprEvaluatorBaseTy(Info), This(This), Result(Result) {}
bool Success(const APValue &V, const Expr *E) {
Result = V;
@@ -9578,7 +9712,10 @@ public:
bool ZeroInitialization(const Expr *E) {
ImplicitValueInitExpr VIE(
E->getType()->castAs<AtomicType>()->getValueType());
- return Evaluate(Result, Info, &VIE);
+ // For atomic-qualified class (and array) types in C++, initialize the
+ // _Atomic-wrapped subobject directly, in-place.
+ return This ? EvaluateInPlace(Result, Info, *This, &VIE)
+ : Evaluate(Result, Info, &VIE);
}
bool VisitCastExpr(const CastExpr *E) {
@@ -9586,15 +9723,17 @@ public:
default:
return ExprEvaluatorBaseTy::VisitCastExpr(E);
case CK_NonAtomicToAtomic:
- return Evaluate(Result, Info, E->getSubExpr());
+ return This ? EvaluateInPlace(Result, Info, *This, E->getSubExpr())
+ : Evaluate(Result, Info, E->getSubExpr());
}
}
};
} // end anonymous namespace
-static bool EvaluateAtomic(const Expr *E, APValue &Result, EvalInfo &Info) {
+static bool EvaluateAtomic(const Expr *E, const LValue *This, APValue &Result,
+ EvalInfo &Info) {
assert(E->isRValue() && E->getType()->isAtomicType());
- return AtomicExprEvaluator(Info, Result).Visit(E);
+ return AtomicExprEvaluator(Info, This, Result).Visit(E);
}
//===----------------------------------------------------------------------===//
@@ -9699,8 +9838,17 @@ static bool Evaluate(APValue &Result, EvalInfo &Info, const Expr *E) {
if (!EvaluateVoid(E, Info))
return false;
} else if (T->isAtomicType()) {
- if (!EvaluateAtomic(E, Result, Info))
- return false;
+ QualType Unqual = T.getAtomicUnqualifiedType();
+ if (Unqual->isArrayType() || Unqual->isRecordType()) {
+ LValue LV;
+ LV.set(E, Info.CurrentCall->Index);
+ APValue &Value = Info.CurrentCall->createTemporary(E, false);
+ if (!EvaluateAtomic(E, &LV, Value, Info))
+ return false;
+ } else {
+ if (!EvaluateAtomic(E, nullptr, Result, Info))
+ return false;
+ }
} else if (Info.getLangOpts().CPlusPlus11) {
Info.FFDiag(E, diag::note_constexpr_nonliteral) << E->getType();
return false;
@@ -9725,10 +9873,16 @@ static bool EvaluateInPlace(APValue &Result, EvalInfo &Info, const LValue &This,
if (E->isRValue()) {
// Evaluate arrays and record types in-place, so that later initializers can
// refer to earlier-initialized members of the object.
- if (E->getType()->isArrayType())
+ QualType T = E->getType();
+ if (T->isArrayType())
return EvaluateArray(E, This, Result, Info);
- else if (E->getType()->isRecordType())
+ else if (T->isRecordType())
return EvaluateRecord(E, This, Result, Info);
+ else if (T->isAtomicType()) {
+ QualType Unqual = T.getAtomicUnqualifiedType();
+ if (Unqual->isArrayType() || Unqual->isRecordType())
+ return EvaluateAtomic(E, &This, Result, Info);
+ }
}
// For any other type, in-place evaluation is unimportant.
@@ -9945,7 +10099,7 @@ bool Expr::EvalResult::isGlobalLValue() const {
// Note that to reduce code duplication, this helper does no evaluation
// itself; the caller checks whether the expression is evaluatable, and
// in the rare cases where CheckICE actually cares about the evaluated
-// value, it calls into Evalute.
+// value, it calls into Evaluate.
namespace {
@@ -10067,6 +10221,7 @@ static ICEDiag CheckICE(const Expr* E, const ASTContext &Ctx) {
case Expr::LambdaExprClass:
case Expr::CXXFoldExprClass:
case Expr::CoawaitExprClass:
+ case Expr::DependentCoawaitExprClass:
case Expr::CoyieldExprClass:
return ICEDiag(IK_NotICE, E->getLocStart());
diff --git a/lib/AST/ExternalASTMerger.cpp b/lib/AST/ExternalASTMerger.cpp
new file mode 100644
index 000000000000..2d4d0185ff2a
--- /dev/null
+++ b/lib/AST/ExternalASTMerger.cpp
@@ -0,0 +1,185 @@
+//===- ExternalASTMerger.cpp - Merging External AST Interface ---*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the ExternalASTMerger, which vends a combination of
+// ASTs from several different ASTContext/FileManager pairs
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclObjC.h"
+#include "clang/AST/ExternalASTMerger.h"
+
+using namespace clang;
+
+namespace {
+
+template <typename T> struct Source {
+ T t;
+ Source(T t) : t(t) {}
+ operator T() { return t; }
+ template <typename U = T> U &get() { return t; }
+ template <typename U = T> const U &get() const { return t; }
+ template <typename U> operator Source<U>() { return Source<U>(t); }
+};
+
+typedef std::pair<Source<NamedDecl *>, ASTImporter *> Candidate;
+
+class LazyASTImporter : public ASTImporter {
+public:
+ LazyASTImporter(ASTContext &ToContext, FileManager &ToFileManager,
+ ASTContext &FromContext, FileManager &FromFileManager)
+ : ASTImporter(ToContext, ToFileManager, FromContext, FromFileManager,
+ /*MinimalImport=*/true) {}
+ Decl *Imported(Decl *From, Decl *To) override {
+ if (auto ToTag = dyn_cast<TagDecl>(To)) {
+ ToTag->setHasExternalLexicalStorage();
+ } else if (auto ToNamespace = dyn_cast<NamespaceDecl>(To)) {
+ ToNamespace->setHasExternalVisibleStorage();
+ }
+ return ASTImporter::Imported(From, To);
+ }
+};
+
+Source<const DeclContext *>
+LookupSameContext(Source<TranslationUnitDecl *> SourceTU, const DeclContext *DC,
+ ASTImporter &ReverseImporter) {
+ if (DC->isTranslationUnit()) {
+ return SourceTU;
+ }
+ Source<const DeclContext *> SourceParentDC =
+ LookupSameContext(SourceTU, DC->getParent(), ReverseImporter);
+ if (!SourceParentDC) {
+ // If we couldn't find the parent DC in this TranslationUnit, give up.
+ return nullptr;
+ }
+ auto ND = cast<NamedDecl>(DC);
+ DeclarationName Name = ND->getDeclName();
+ Source<DeclarationName> SourceName = ReverseImporter.Import(Name);
+ DeclContext::lookup_result SearchResult =
+ SourceParentDC.get()->lookup(SourceName.get());
+ size_t SearchResultSize = SearchResult.size();
+ // Handle multiple candidates once we have a test for it.
+ // This may turn up when we import template specializations correctly.
+ assert(SearchResultSize < 2);
+ if (SearchResultSize == 0) {
+ // couldn't find the name, so we have to give up
+ return nullptr;
+ } else {
+ NamedDecl *SearchResultDecl = SearchResult[0];
+ return dyn_cast<DeclContext>(SearchResultDecl);
+ }
+}
+
+bool IsForwardDeclaration(Decl *D) {
+ assert(!isa<ObjCInterfaceDecl>(D)); // TODO handle this case
+ if (auto TD = dyn_cast<TagDecl>(D)) {
+ return !TD->isThisDeclarationADefinition();
+ } else if (auto FD = dyn_cast<FunctionDecl>(D)) {
+ return !FD->isThisDeclarationADefinition();
+ } else {
+ return false;
+ }
+}
+
+void ForEachMatchingDC(
+ const DeclContext *DC,
+ llvm::ArrayRef<ExternalASTMerger::ImporterPair> Importers,
+ std::function<void(const ExternalASTMerger::ImporterPair &IP,
+ Source<const DeclContext *> SourceDC)>
+ Callback) {
+ for (const ExternalASTMerger::ImporterPair &IP : Importers) {
+ Source<TranslationUnitDecl *> SourceTU(
+ IP.Forward->getFromContext().getTranslationUnitDecl());
+ Source<const DeclContext *> SourceDC =
+ LookupSameContext(SourceTU, DC, *IP.Reverse);
+ if (SourceDC.get()) {
+ Callback(IP, SourceDC);
+ }
+ }
+}
+
+bool HasDeclOfSameType(llvm::ArrayRef<Candidate> Decls, const Candidate &C) {
+ return std::any_of(Decls.begin(), Decls.end(), [&C](const Candidate &D) {
+ return C.first.get()->getKind() == D.first.get()->getKind();
+ });
+}
+} // end namespace
+
+ExternalASTMerger::ExternalASTMerger(const ImporterEndpoint &Target,
+ llvm::ArrayRef<ImporterEndpoint> Sources) {
+ for (const ImporterEndpoint &S : Sources) {
+ Importers.push_back(
+ {llvm::make_unique<LazyASTImporter>(Target.AST, Target.FM, S.AST, S.FM),
+ llvm::make_unique<ASTImporter>(S.AST, S.FM, Target.AST, Target.FM,
+ /*MinimalImport=*/true)});
+ }
+}
+
+bool ExternalASTMerger::FindExternalVisibleDeclsByName(const DeclContext *DC,
+ DeclarationName Name) {
+ llvm::SmallVector<NamedDecl *, 1> Decls;
+ llvm::SmallVector<Candidate, 4> CompleteDecls;
+ llvm::SmallVector<Candidate, 4> ForwardDecls;
+
+ auto FilterFoundDecl = [&CompleteDecls, &ForwardDecls](const Candidate &C) {
+ if (IsForwardDeclaration(C.first.get())) {
+ if (!HasDeclOfSameType(ForwardDecls, C)) {
+ ForwardDecls.push_back(C);
+ }
+ } else {
+ CompleteDecls.push_back(C);
+ }
+ };
+
+ ForEachMatchingDC(DC, Importers, [Name, &FilterFoundDecl](
+ const ImporterPair &IP,
+ Source<const DeclContext *> SourceDC) {
+ DeclarationName FromName = IP.Reverse->Import(Name);
+ DeclContextLookupResult Result = SourceDC.get()->lookup(FromName);
+ for (NamedDecl *FromD : Result) {
+ FilterFoundDecl(std::make_pair(FromD, IP.Forward.get()));
+ }
+ });
+
+ llvm::ArrayRef<Candidate> DeclsToReport =
+ CompleteDecls.empty() ? ForwardDecls : CompleteDecls;
+
+ if (DeclsToReport.empty()) {
+ return false;
+ }
+
+ Decls.reserve(DeclsToReport.size());
+ for (const Candidate &C : DeclsToReport) {
+ NamedDecl *d = cast<NamedDecl>(C.second->Import(C.first.get()));
+ assert(d);
+ Decls.push_back(d);
+ }
+ SetExternalVisibleDeclsForName(DC, Name, Decls);
+ return true;
+}
+
+void ExternalASTMerger::FindExternalLexicalDecls(
+ const DeclContext *DC, llvm::function_ref<bool(Decl::Kind)> IsKindWeWant,
+ SmallVectorImpl<Decl *> &Result) {
+ ForEachMatchingDC(
+ DC, Importers, [DC, IsKindWeWant](const ImporterPair &IP,
+ Source<const DeclContext *> SourceDC) {
+ for (const Decl *SourceDecl : SourceDC.get()->decls()) {
+ if (IsKindWeWant(SourceDecl->getKind())) {
+ Decl *ImportedDecl =
+ IP.Forward->Import(const_cast<Decl *>(SourceDecl));
+ assert(ImportedDecl->getDeclContext() == DC);
+ (void)ImportedDecl;
+ (void)DC;
+ }
+ }
+ });
+}
diff --git a/lib/AST/ExternalASTSource.cpp b/lib/AST/ExternalASTSource.cpp
index e3de8c5fefa2..182d38242f59 100644
--- a/lib/AST/ExternalASTSource.cpp
+++ b/lib/AST/ExternalASTSource.cpp
@@ -28,6 +28,11 @@ ExternalASTSource::getSourceDescriptor(unsigned ID) {
return None;
}
+ExternalASTSource::ExtKind
+ExternalASTSource::hasExternalDefinitions(const Decl *D) {
+ return EK_ReplyHazy;
+}
+
ExternalASTSource::ASTSourceDescriptor::ASTSourceDescriptor(const Module &M)
: Signature(M.Signature), ClangModule(&M) {
if (M.Directory)
diff --git a/lib/AST/ItaniumMangle.cpp b/lib/AST/ItaniumMangle.cpp
index ab3e49d903cf..29fcdd7be924 100644
--- a/lib/AST/ItaniumMangle.cpp
+++ b/lib/AST/ItaniumMangle.cpp
@@ -982,9 +982,8 @@ void CXXNameMangler::mangleFloat(const llvm::APFloat &f) {
unsigned digitBitIndex = 4 * (numCharacters - stringIndex - 1);
// Project out 4 bits starting at 'digitIndex'.
- llvm::integerPart hexDigit
- = valueBits.getRawData()[digitBitIndex / llvm::integerPartWidth];
- hexDigit >>= (digitBitIndex % llvm::integerPartWidth);
+ uint64_t hexDigit = valueBits.getRawData()[digitBitIndex / 64];
+ hexDigit >>= (digitBitIndex % 64);
hexDigit &= 0xF;
// Map that over to a lowercase hex digit.
@@ -1190,6 +1189,8 @@ void CXXNameMangler::mangleUnresolvedName(
llvm_unreachable("Can't mangle a constructor name!");
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
+ case DeclarationName::CXXDeductionGuideName:
+ llvm_unreachable("Can't mangle a deduction guide name!");
case DeclarationName::ObjCMultiArgSelector:
case DeclarationName::ObjCOneArgSelector:
case DeclarationName::ObjCZeroArgSelector:
@@ -1419,6 +1420,9 @@ void CXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
writeAbiTags(ND, AdditionalAbiTags);
break;
+ case DeclarationName::CXXDeductionGuideName:
+ llvm_unreachable("Can't mangle a deduction guide name!");
+
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
}
@@ -1870,6 +1874,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::Paren:
case Type::Attributed:
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
case Type::ObjCObject:
case Type::ObjCInterface:
@@ -1996,6 +2001,7 @@ void CXXNameMangler::mangleOperatorName(DeclarationName Name, unsigned Arity) {
switch (Name.getNameKind()) {
case DeclarationName::CXXConstructorName:
case DeclarationName::CXXDestructorName:
+ case DeclarationName::CXXDeductionGuideName:
case DeclarationName::CXXUsingDirective:
case DeclarationName::Identifier:
case DeclarationName::ObjCMultiArgSelector:
@@ -2152,10 +2158,12 @@ void CXXNameMangler::mangleQualifiers(Qualifiers Quals) {
} else {
switch (AS) {
default: llvm_unreachable("Not a language specific address space");
- // <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant" ]
+ // <OpenCL-addrspace> ::= "CL" [ "global" | "local" | "constant |
+ // "generic" ]
case LangAS::opencl_global: ASString = "CLglobal"; break;
case LangAS::opencl_local: ASString = "CLlocal"; break;
case LangAS::opencl_constant: ASString = "CLconstant"; break;
+ case LangAS::opencl_generic: ASString = "CLgeneric"; break;
// <CUDA-addrspace> ::= "CU" [ "device" | "constant" | "shared" ]
case LangAS::cuda_device: ASString = "CUdevice"; break;
case LangAS::cuda_constant: ASString = "CUconstant"; break;
@@ -2493,9 +2501,6 @@ void CXXNameMangler::mangleType(const BuiltinType *T) {
case BuiltinType::OCLQueue:
Out << "9ocl_queue";
break;
- case BuiltinType::OCLNDRange:
- Out << "11ocl_ndrange";
- break;
case BuiltinType::OCLReserveID:
Out << "13ocl_reserveid";
break;
@@ -3043,6 +3048,7 @@ void CXXNameMangler::mangleType(const DependentNameType *T) {
// ::= Te <name> # dependent elaborated type specifier using
// # 'enum'
switch (T->getKeyword()) {
+ case ETK_None:
case ETK_Typename:
break;
case ETK_Struct:
@@ -3056,8 +3062,6 @@ void CXXNameMangler::mangleType(const DependentNameType *T) {
case ETK_Enum:
Out << "Te";
break;
- default:
- llvm_unreachable("unexpected keyword for dependent type name");
}
// Typename types are always nested
Out << 'N';
@@ -3146,6 +3150,16 @@ void CXXNameMangler::mangleType(const AutoType *T) {
mangleType(D);
}
+void CXXNameMangler::mangleType(const DeducedTemplateSpecializationType *T) {
+ // FIXME: This is not the right mangling. We also need to include a scope
+ // here in some cases.
+ QualType D = T->getDeducedType();
+ if (D.isNull())
+ mangleUnscopedTemplateName(T->getTemplateName(), nullptr);
+ else
+ mangleType(D);
+}
+
void CXXNameMangler::mangleType(const AtomicType *T) {
// <type> ::= U <source-name> <type> # vendor extended type qualifier
// (Until there's a standardized mangling...)
@@ -4021,6 +4035,12 @@ recurse:
mangleExpression(cast<CoawaitExpr>(E)->getOperand());
break;
+ case Expr::DependentCoawaitExprClass:
+ // FIXME: Propose a non-vendor mangling.
+ Out << "v18co_await";
+ mangleExpression(cast<DependentCoawaitExpr>(E)->getOperand());
+ break;
+
case Expr::CoyieldExprClass:
// FIXME: Propose a non-vendor mangling.
Out << "v18co_yield";
diff --git a/lib/AST/Mangle.cpp b/lib/AST/Mangle.cpp
index 05dd886adcef..00d50c0e3bdf 100644
--- a/lib/AST/Mangle.cpp
+++ b/lib/AST/Mangle.cpp
@@ -262,9 +262,13 @@ void MangleContext::mangleObjCMethodNameWithoutSize(const ObjCMethodDecl *MD,
const ObjCContainerDecl *CD =
dyn_cast<ObjCContainerDecl>(MD->getDeclContext());
assert (CD && "Missing container decl in GetNameForMethod");
- OS << (MD->isInstanceMethod() ? '-' : '+') << '[' << CD->getName();
- if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD))
+ OS << (MD->isInstanceMethod() ? '-' : '+') << '[';
+ if (const ObjCCategoryImplDecl *CID = dyn_cast<ObjCCategoryImplDecl>(CD)) {
+ OS << CID->getClassInterface()->getName();
OS << '(' << *CID << ')';
+ } else {
+ OS << CD->getName();
+ }
OS << ' ';
MD->getSelector().print(OS);
OS << ']';
diff --git a/lib/AST/MicrosoftMangle.cpp b/lib/AST/MicrosoftMangle.cpp
index 76c368d7f04c..6e14dd055cf8 100644
--- a/lib/AST/MicrosoftMangle.cpp
+++ b/lib/AST/MicrosoftMangle.cpp
@@ -942,6 +942,9 @@ void MicrosoftCXXNameMangler::mangleUnqualifiedName(const NamedDecl *ND,
break;
}
+ case DeclarationName::CXXDeductionGuideName:
+ llvm_unreachable("Can't mangle a deduction guide name!");
+
case DeclarationName::CXXUsingDirective:
llvm_unreachable("Can't mangle a using directive name!");
}
@@ -1797,10 +1800,6 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
Out << "PA";
mangleArtificalTagType(TTK_Struct, "ocl_queue");
break;
- case BuiltinType::OCLNDRange:
- Out << "PA";
- mangleArtificalTagType(TTK_Struct, "ocl_ndrange");
- break;
case BuiltinType::OCLReserveID:
Out << "PA";
mangleArtificalTagType(TTK_Struct, "ocl_reserveid");
@@ -1887,14 +1886,18 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// <return-type> ::= <type>
// ::= @ # structors (they have no declared return type)
if (IsStructor) {
- if (isa<CXXDestructorDecl>(D) && isStructorDecl(D) &&
- StructorType == Dtor_Deleting) {
- // The scalar deleting destructor takes an extra int argument.
- // However, the FunctionType generated has 0 arguments.
- // FIXME: This is a temporary hack.
- // Maybe should fix the FunctionType creation instead?
- Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z");
- return;
+ if (isa<CXXDestructorDecl>(D) && isStructorDecl(D)) {
+ // The scalar deleting destructor takes an extra int argument which is not
+ // reflected in the AST.
+ if (StructorType == Dtor_Deleting) {
+ Out << (PointersAre64Bit ? "PEAXI@Z" : "PAXI@Z");
+ return;
+ }
+ // The vbase destructor returns void which is not reflected in the AST.
+ if (StructorType == Dtor_Complete) {
+ Out << "XXZ";
+ return;
+ }
}
if (IsCtorClosure) {
// Default constructor closure and copy constructor closure both return
@@ -1954,7 +1957,7 @@ void MicrosoftCXXNameMangler::mangleFunctionType(const FunctionType *T,
// Happens for function pointer type arguments for example.
for (unsigned I = 0, E = Proto->getNumParams(); I != E; ++I) {
mangleArgumentType(Proto->getParamType(I), Range);
- // Mangle each pass_object_size parameter as if it's a paramater of enum
+ // Mangle each pass_object_size parameter as if it's a parameter of enum
// type passed directly after the parameter with the pass_object_size
// attribute. The aforementioned enum's name is __pass_object_size, and we
// pretend it resides in a top-level namespace called __clang.
@@ -2002,13 +2005,20 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
// <global-function> ::= Y # global near
// ::= Z # global far
if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD)) {
+ bool IsVirtual = MD->isVirtual();
+ // When mangling vbase destructor variants, ignore whether or not the
+ // underlying destructor was defined to be virtual.
+ if (isa<CXXDestructorDecl>(MD) && isStructorDecl(MD) &&
+ StructorType == Dtor_Complete) {
+ IsVirtual = false;
+ }
switch (MD->getAccess()) {
case AS_none:
llvm_unreachable("Unsupported access specifier");
case AS_private:
if (MD->isStatic())
Out << 'C';
- else if (MD->isVirtual())
+ else if (IsVirtual)
Out << 'E';
else
Out << 'A';
@@ -2016,7 +2026,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
case AS_protected:
if (MD->isStatic())
Out << 'K';
- else if (MD->isVirtual())
+ else if (IsVirtual)
Out << 'M';
else
Out << 'I';
@@ -2024,7 +2034,7 @@ void MicrosoftCXXNameMangler::mangleFunctionClass(const FunctionDecl *FD) {
case AS_public:
if (MD->isStatic())
Out << 'S';
- else if (MD->isVirtual())
+ else if (IsVirtual)
Out << 'U';
else
Out << 'Q';
@@ -2474,6 +2484,17 @@ void MicrosoftCXXNameMangler::mangleType(const AutoType *T, Qualifiers,
<< Range;
}
+void MicrosoftCXXNameMangler::mangleType(
+ const DeducedTemplateSpecializationType *T, Qualifiers, SourceRange Range) {
+ assert(T->getDeducedType().isNull() && "expecting a dependent type!");
+
+ DiagnosticsEngine &Diags = Context.getDiags();
+ unsigned DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
+ "cannot mangle this deduced class template specialization type yet");
+ Diags.Report(Range.getBegin(), DiagID)
+ << Range;
+}
+
void MicrosoftCXXNameMangler::mangleType(const AtomicType *T, Qualifiers,
SourceRange Range) {
QualType ValueType = T->getValueType();
@@ -2997,14 +3018,14 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
// N.B. The length is in terms of bytes, not characters.
Mangler.mangleNumber(SL->getByteLength() + SL->getCharByteWidth());
- auto GetLittleEndianByte = [&Mangler, &SL](unsigned Index) {
+ auto GetLittleEndianByte = [&SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
unsigned OffsetInCodeUnit = Index % CharByteWidth;
return static_cast<char>((CodeUnit >> (8 * OffsetInCodeUnit)) & 0xff);
};
- auto GetBigEndianByte = [&Mangler, &SL](unsigned Index) {
+ auto GetBigEndianByte = [&SL](unsigned Index) {
unsigned CharByteWidth = SL->getCharByteWidth();
uint32_t CodeUnit = SL->getCodeUnit(Index / CharByteWidth);
unsigned OffsetInCodeUnit = (CharByteWidth - 1) - (Index % CharByteWidth);
diff --git a/lib/AST/NSAPI.cpp b/lib/AST/NSAPI.cpp
index ac2a8d354247..e7c8c16b0145 100644
--- a/lib/AST/NSAPI.cpp
+++ b/lib/AST/NSAPI.cpp
@@ -453,7 +453,6 @@ NSAPI::getNSNumberFactoryMethodKind(QualType T) const {
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
case BuiltinType::BoundMember:
case BuiltinType::Dependent:
diff --git a/lib/AST/ODRHash.cpp b/lib/AST/ODRHash.cpp
new file mode 100644
index 000000000000..d72eebbe8e48
--- /dev/null
+++ b/lib/AST/ODRHash.cpp
@@ -0,0 +1,361 @@
+//===-- ODRHash.cpp - Hashing to diagnose ODR failures ----------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the ODRHash class, which calculates a hash based
+/// on AST nodes, which is stable across different runs.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/ODRHash.h"
+
+#include "clang/AST/DeclVisitor.h"
+#include "clang/AST/NestedNameSpecifier.h"
+#include "clang/AST/StmtVisitor.h"
+#include "clang/AST/TypeVisitor.h"
+
+using namespace clang;
+
+void ODRHash::AddStmt(const Stmt *S) {
+ assert(S && "Expecting non-null pointer.");
+ S->ProcessODRHash(ID, *this);
+}
+
+void ODRHash::AddIdentifierInfo(const IdentifierInfo *II) {
+ assert(II && "Expecting non-null pointer.");
+ ID.AddString(II->getName());
+}
+
+void ODRHash::AddDeclarationName(DeclarationName Name) {
+ AddBoolean(Name.isEmpty());
+ if (Name.isEmpty())
+ return;
+
+ auto Kind = Name.getNameKind();
+ ID.AddInteger(Kind);
+ switch (Kind) {
+ case DeclarationName::Identifier:
+ AddIdentifierInfo(Name.getAsIdentifierInfo());
+ break;
+ case DeclarationName::ObjCZeroArgSelector:
+ case DeclarationName::ObjCOneArgSelector:
+ case DeclarationName::ObjCMultiArgSelector: {
+ Selector S = Name.getObjCSelector();
+ AddBoolean(S.isNull());
+ AddBoolean(S.isKeywordSelector());
+ AddBoolean(S.isUnarySelector());
+ unsigned NumArgs = S.getNumArgs();
+ for (unsigned i = 0; i < NumArgs; ++i) {
+ AddIdentifierInfo(S.getIdentifierInfoForSlot(i));
+ }
+ break;
+ }
+ case DeclarationName::CXXConstructorName:
+ case DeclarationName::CXXDestructorName:
+ AddQualType(Name.getCXXNameType());
+ break;
+ case DeclarationName::CXXOperatorName:
+ ID.AddInteger(Name.getCXXOverloadedOperator());
+ break;
+ case DeclarationName::CXXLiteralOperatorName:
+ AddIdentifierInfo(Name.getCXXLiteralIdentifier());
+ break;
+ case DeclarationName::CXXConversionFunctionName:
+ AddQualType(Name.getCXXNameType());
+ break;
+ case DeclarationName::CXXUsingDirective:
+ break;
+ case DeclarationName::CXXDeductionGuideName: {
+ auto *Template = Name.getCXXDeductionGuideTemplate();
+ AddBoolean(Template);
+ if (Template) {
+ AddDecl(Template);
+ }
+ }
+ }
+}
+
+void ODRHash::AddNestedNameSpecifier(const NestedNameSpecifier *NNS) {}
+void ODRHash::AddTemplateName(TemplateName Name) {}
+void ODRHash::AddTemplateArgument(TemplateArgument TA) {}
+void ODRHash::AddTemplateParameterList(const TemplateParameterList *TPL) {}
+
+void ODRHash::clear() {
+ DeclMap.clear();
+ TypeMap.clear();
+ Bools.clear();
+ ID.clear();
+}
+
+unsigned ODRHash::CalculateHash() {
+ // Append the bools to the end of the data segment backwards. This allows
+ // for the bools data to be compressed 32 times smaller compared to using
+ // ID.AddBoolean
+ const unsigned unsigned_bits = sizeof(unsigned) * CHAR_BIT;
+ const unsigned size = Bools.size();
+ const unsigned remainder = size % unsigned_bits;
+ const unsigned loops = size / unsigned_bits;
+ auto I = Bools.rbegin();
+ unsigned value = 0;
+ for (unsigned i = 0; i < remainder; ++i) {
+ value <<= 1;
+ value |= *I;
+ ++I;
+ }
+ ID.AddInteger(value);
+
+ for (unsigned i = 0; i < loops; ++i) {
+ value = 0;
+ for (unsigned j = 0; j < unsigned_bits; ++j) {
+ value <<= 1;
+ value |= *I;
+ ++I;
+ }
+ ID.AddInteger(value);
+ }
+
+ assert(I == Bools.rend());
+ Bools.clear();
+ return ID.ComputeHash();
+}
+
+// Process a Decl pointer. Add* methods call back into ODRHash while Visit*
+// methods process the relevant parts of the Decl.
+class ODRDeclVisitor : public ConstDeclVisitor<ODRDeclVisitor> {
+ typedef ConstDeclVisitor<ODRDeclVisitor> Inherited;
+ llvm::FoldingSetNodeID &ID;
+ ODRHash &Hash;
+
+public:
+ ODRDeclVisitor(llvm::FoldingSetNodeID &ID, ODRHash &Hash)
+ : ID(ID), Hash(Hash) {}
+
+ void AddStmt(const Stmt *S) {
+ Hash.AddBoolean(S);
+ if (S) {
+ Hash.AddStmt(S);
+ }
+ }
+
+ void AddIdentifierInfo(const IdentifierInfo *II) {
+ Hash.AddBoolean(II);
+ if (II) {
+ Hash.AddIdentifierInfo(II);
+ }
+ }
+
+ void AddQualType(QualType T) {
+ Hash.AddQualType(T);
+ }
+
+ void Visit(const Decl *D) {
+ ID.AddInteger(D->getKind());
+ Inherited::Visit(D);
+ }
+
+ void VisitNamedDecl(const NamedDecl *D) {
+ Hash.AddDeclarationName(D->getDeclName());
+ Inherited::VisitNamedDecl(D);
+ }
+
+ void VisitValueDecl(const ValueDecl *D) {
+ AddQualType(D->getType());
+ Inherited::VisitValueDecl(D);
+ }
+
+ void VisitAccessSpecDecl(const AccessSpecDecl *D) {
+ ID.AddInteger(D->getAccess());
+ Inherited::VisitAccessSpecDecl(D);
+ }
+
+ void VisitStaticAssertDecl(const StaticAssertDecl *D) {
+ AddStmt(D->getAssertExpr());
+ AddStmt(D->getMessage());
+
+ Inherited::VisitStaticAssertDecl(D);
+ }
+
+ void VisitFieldDecl(const FieldDecl *D) {
+ const bool IsBitfield = D->isBitField();
+ Hash.AddBoolean(IsBitfield);
+
+ if (IsBitfield) {
+ AddStmt(D->getBitWidth());
+ }
+
+ Hash.AddBoolean(D->isMutable());
+ AddStmt(D->getInClassInitializer());
+
+ Inherited::VisitFieldDecl(D);
+ }
+
+ void VisitFunctionDecl(const FunctionDecl *D) {
+ ID.AddInteger(D->getStorageClass());
+ Hash.AddBoolean(D->isInlineSpecified());
+ Hash.AddBoolean(D->isVirtualAsWritten());
+ Hash.AddBoolean(D->isPure());
+ Hash.AddBoolean(D->isDeletedAsWritten());
+
+ Inherited::VisitFunctionDecl(D);
+ }
+
+ void VisitCXXMethodDecl(const CXXMethodDecl *D) {
+ Hash.AddBoolean(D->isConst());
+ Hash.AddBoolean(D->isVolatile());
+
+ Inherited::VisitCXXMethodDecl(D);
+ }
+
+ void VisitTypedefNameDecl(const TypedefNameDecl *D) {
+ AddQualType(D->getUnderlyingType());
+
+ Inherited::VisitTypedefNameDecl(D);
+ }
+
+ void VisitTypedefDecl(const TypedefDecl *D) {
+ Inherited::VisitTypedefDecl(D);
+ }
+
+ void VisitTypeAliasDecl(const TypeAliasDecl *D) {
+ Inherited::VisitTypeAliasDecl(D);
+ }
+};
+
+// Only allow a small portion of Decl's to be processed. Remove this once
+// all Decl's can be handled.
+bool ODRHash::isWhitelistedDecl(const Decl *D, const CXXRecordDecl *Parent) {
+ if (D->isImplicit()) return false;
+ if (D->getDeclContext() != Parent) return false;
+
+ switch (D->getKind()) {
+ default:
+ return false;
+ case Decl::AccessSpec:
+ case Decl::CXXMethod:
+ case Decl::Field:
+ case Decl::StaticAssert:
+ case Decl::TypeAlias:
+ case Decl::Typedef:
+ return true;
+ }
+}
+
+void ODRHash::AddSubDecl(const Decl *D) {
+ assert(D && "Expecting non-null pointer.");
+ AddDecl(D);
+
+ ODRDeclVisitor(ID, *this).Visit(D);
+}
+
+void ODRHash::AddCXXRecordDecl(const CXXRecordDecl *Record) {
+ assert(Record && Record->hasDefinition() &&
+ "Expected non-null record to be a definition.");
+ AddDecl(Record);
+
+ // Filter out sub-Decls which will not be processed in order to get an
+ // accurate count of Decl's.
+ llvm::SmallVector<const Decl *, 16> Decls;
+ for (const Decl *SubDecl : Record->decls()) {
+ if (isWhitelistedDecl(SubDecl, Record)) {
+ Decls.push_back(SubDecl);
+ }
+ }
+
+ ID.AddInteger(Decls.size());
+ for (auto SubDecl : Decls) {
+ AddSubDecl(SubDecl);
+ }
+}
+
+void ODRHash::AddDecl(const Decl *D) {
+ assert(D && "Expecting non-null pointer.");
+ auto Result = DeclMap.insert(std::make_pair(D, DeclMap.size()));
+ ID.AddInteger(Result.first->second);
+ // On first encounter of a Decl pointer, process it. Every time afterwards,
+ // only the index value is needed.
+ if (!Result.second) {
+ return;
+ }
+
+ ID.AddInteger(D->getKind());
+
+ if (const NamedDecl *ND = dyn_cast<NamedDecl>(D)) {
+ AddDeclarationName(ND->getDeclName());
+ }
+}
+
+// Process a Type pointer. Add* methods call back into ODRHash while Visit*
+// methods process the relevant parts of the Type.
+class ODRTypeVisitor : public TypeVisitor<ODRTypeVisitor> {
+ typedef TypeVisitor<ODRTypeVisitor> Inherited;
+ llvm::FoldingSetNodeID &ID;
+ ODRHash &Hash;
+
+public:
+ ODRTypeVisitor(llvm::FoldingSetNodeID &ID, ODRHash &Hash)
+ : ID(ID), Hash(Hash) {}
+
+ void AddStmt(Stmt *S) {
+ Hash.AddBoolean(S);
+ if (S) {
+ Hash.AddStmt(S);
+ }
+ }
+
+ void AddDecl(Decl *D) {
+ Hash.AddBoolean(D);
+ if (D) {
+ Hash.AddDecl(D);
+ }
+ }
+
+ void Visit(const Type *T) {
+ ID.AddInteger(T->getTypeClass());
+ Inherited::Visit(T);
+ }
+
+ void VisitType(const Type *T) {}
+
+ void VisitBuiltinType(const BuiltinType *T) {
+ ID.AddInteger(T->getKind());
+ VisitType(T);
+ }
+
+ void VisitTypedefType(const TypedefType *T) {
+ AddDecl(T->getDecl());
+ Hash.AddQualType(T->getDecl()->getUnderlyingType());
+ VisitType(T);
+ }
+};
+
+void ODRHash::AddType(const Type *T) {
+ assert(T && "Expecting non-null pointer.");
+ auto Result = TypeMap.insert(std::make_pair(T, TypeMap.size()));
+ ID.AddInteger(Result.first->second);
+ // On first encounter of a Type pointer, process it. Every time afterwards,
+ // only the index value is needed.
+ if (!Result.second) {
+ return;
+ }
+
+ ODRTypeVisitor(ID, *this).Visit(T);
+}
+
+void ODRHash::AddQualType(QualType T) {
+ AddBoolean(T.isNull());
+ if (T.isNull())
+ return;
+ SplitQualType split = T.split();
+ ID.AddInteger(split.Quals.getAsOpaqueValue());
+ AddType(split.Ty);
+}
+
+void ODRHash::AddBoolean(bool Value) {
+ Bools.push_back(Value);
+}
diff --git a/lib/AST/OpenMPClause.cpp b/lib/AST/OpenMPClause.cpp
index a28b9f3b6d64..77470a9b76d0 100644
--- a/lib/AST/OpenMPClause.cpp
+++ b/lib/AST/OpenMPClause.cpp
@@ -48,11 +48,17 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
return static_cast<const OMPReductionClause *>(C);
case OMPC_linear:
return static_cast<const OMPLinearClause *>(C);
+ case OMPC_if:
+ return static_cast<const OMPIfClause *>(C);
+ case OMPC_num_threads:
+ return static_cast<const OMPNumThreadsClause *>(C);
+ case OMPC_num_teams:
+ return static_cast<const OMPNumTeamsClause *>(C);
+ case OMPC_thread_limit:
+ return static_cast<const OMPThreadLimitClause *>(C);
case OMPC_default:
case OMPC_proc_bind:
- case OMPC_if:
case OMPC_final:
- case OMPC_num_threads:
case OMPC_safelen:
case OMPC_simdlen:
case OMPC_collapse:
@@ -77,8 +83,6 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_threads:
case OMPC_simd:
case OMPC_map:
- case OMPC_num_teams:
- case OMPC_thread_limit:
case OMPC_priority:
case OMPC_grainsize:
case OMPC_nogroup:
diff --git a/lib/AST/Stmt.cpp b/lib/AST/Stmt.cpp
index 697cdc3fb360..69e65f558f89 100644
--- a/lib/AST/Stmt.cpp
+++ b/lib/AST/Stmt.cpp
@@ -1083,7 +1083,7 @@ CapturedStmt *CapturedStmt::CreateDeserialized(const ASTContext &Context,
}
Stmt::child_range CapturedStmt::children() {
- // Children are captured field initilizers.
+ // Children are captured field initializers.
return child_range(getStoredStmts(), getStoredStmts() + NumCaptures);
}
diff --git a/lib/AST/StmtCXX.cpp b/lib/AST/StmtCXX.cpp
index 4a04fc211262..aade13ed3bd4 100644
--- a/lib/AST/StmtCXX.cpp
+++ b/lib/AST/StmtCXX.cpp
@@ -86,3 +86,30 @@ VarDecl *CXXForRangeStmt::getLoopVariable() {
const VarDecl *CXXForRangeStmt::getLoopVariable() const {
return const_cast<CXXForRangeStmt *>(this)->getLoopVariable();
}
+
+CoroutineBodyStmt *CoroutineBodyStmt::Create(
+ const ASTContext &C, CoroutineBodyStmt::CtorArgs const& Args) {
+ std::size_t Size = totalSizeToAlloc<Stmt *>(
+ CoroutineBodyStmt::FirstParamMove + Args.ParamMoves.size());
+
+ void *Mem = C.Allocate(Size, alignof(CoroutineBodyStmt));
+ return new (Mem) CoroutineBodyStmt(Args);
+}
+
+CoroutineBodyStmt::CoroutineBodyStmt(CoroutineBodyStmt::CtorArgs const &Args)
+ : Stmt(CoroutineBodyStmtClass), NumParams(Args.ParamMoves.size()) {
+ Stmt **SubStmts = getStoredStmts();
+ SubStmts[CoroutineBodyStmt::Body] = Args.Body;
+ SubStmts[CoroutineBodyStmt::Promise] = Args.Promise;
+ SubStmts[CoroutineBodyStmt::InitSuspend] = Args.InitialSuspend;
+ SubStmts[CoroutineBodyStmt::FinalSuspend] = Args.FinalSuspend;
+ SubStmts[CoroutineBodyStmt::OnException] = Args.OnException;
+ SubStmts[CoroutineBodyStmt::OnFallthrough] = Args.OnFallthrough;
+ SubStmts[CoroutineBodyStmt::Allocate] = Args.Allocate;
+ SubStmts[CoroutineBodyStmt::Deallocate] = Args.Deallocate;
+ SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue;
+ SubStmts[CoroutineBodyStmt::ReturnStmtOnAllocFailure] =
+ Args.ReturnStmtOnAllocFailure;
+ std::copy(Args.ParamMoves.begin(), Args.ParamMoves.end(),
+ const_cast<Stmt **>(getParamMoves().data()));
+}
diff --git a/lib/AST/StmtOpenMP.cpp b/lib/AST/StmtOpenMP.cpp
index 880817a1339b..a812884cd927 100644
--- a/lib/AST/StmtOpenMP.cpp
+++ b/lib/AST/StmtOpenMP.cpp
@@ -149,6 +149,8 @@ OMPForDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -201,6 +203,8 @@ OMPForSimdDirective::Create(const ASTContext &C, SourceLocation StartLoc,
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -366,6 +370,8 @@ OMPParallelForDirective *OMPParallelForDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -417,6 +423,8 @@ OMPParallelForSimdDirective *OMPParallelForSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -753,6 +761,8 @@ OMPTargetParallelForDirective *OMPTargetParallelForDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -896,6 +906,8 @@ OMPTaskLoopDirective *OMPTaskLoopDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -947,6 +959,8 @@ OMPTaskLoopSimdDirective *OMPTaskLoopSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -997,6 +1011,8 @@ OMPDistributeDirective *OMPDistributeDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1071,6 +1087,8 @@ OMPDistributeParallelForDirective *OMPDistributeParallelForDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1127,6 +1145,8 @@ OMPDistributeParallelForSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1182,6 +1202,8 @@ OMPDistributeSimdDirective *OMPDistributeSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1236,6 +1258,8 @@ OMPTargetParallelForSimdDirective *OMPTargetParallelForSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1330,6 +1354,8 @@ OMPTeamsDistributeDirective *OMPTeamsDistributeDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1383,6 +1409,8 @@ OMPTeamsDistributeSimdDirective *OMPTeamsDistributeSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1438,6 +1466,8 @@ OMPTeamsDistributeParallelForSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1496,6 +1526,8 @@ OMPTeamsDistributeParallelForDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1576,6 +1608,8 @@ OMPTargetTeamsDistributeDirective *OMPTargetTeamsDistributeDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1634,6 +1668,8 @@ OMPTargetTeamsDistributeParallelForDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1695,6 +1731,8 @@ OMPTargetTeamsDistributeParallelForSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
@@ -1753,6 +1791,8 @@ OMPTargetTeamsDistributeSimdDirective::Create(
Dir->setNumIterations(Exprs.NumIterations);
Dir->setPrevLowerBoundVariable(Exprs.PrevLB);
Dir->setPrevUpperBoundVariable(Exprs.PrevUB);
+ Dir->setDistInc(Exprs.DistInc);
+ Dir->setPrevEnsureUpperBound(Exprs.PrevEUB);
Dir->setCounters(Exprs.Counters);
Dir->setPrivateCounters(Exprs.PrivateCounters);
Dir->setInits(Exprs.Inits);
diff --git a/lib/AST/StmtPrinter.cpp b/lib/AST/StmtPrinter.cpp
index 1ba1aa40ec5c..21f5259c3ca8 100644
--- a/lib/AST/StmtPrinter.cpp
+++ b/lib/AST/StmtPrinter.cpp
@@ -2475,6 +2475,13 @@ void StmtPrinter::VisitCoawaitExpr(CoawaitExpr *S) {
PrintExpr(S->getOperand());
}
+
+void StmtPrinter::VisitDependentCoawaitExpr(DependentCoawaitExpr *S) {
+ OS << "co_await ";
+ PrintExpr(S->getOperand());
+}
+
+
void StmtPrinter::VisitCoyieldExpr(CoyieldExpr *S) {
OS << "co_yield ";
PrintExpr(S->getOperand());
diff --git a/lib/AST/StmtProfile.cpp b/lib/AST/StmtProfile.cpp
index bcd2e96875e7..f1fbe2806b5d 100644
--- a/lib/AST/StmtProfile.cpp
+++ b/lib/AST/StmtProfile.cpp
@@ -19,20 +19,22 @@
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/AST/ODRHash.h"
#include "clang/AST/StmtVisitor.h"
#include "llvm/ADT/FoldingSet.h"
using namespace clang;
namespace {
class StmtProfiler : public ConstStmtVisitor<StmtProfiler> {
+ protected:
llvm::FoldingSetNodeID &ID;
- const ASTContext &Context;
bool Canonical;
public:
- StmtProfiler(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
- bool Canonical)
- : ID(ID), Context(Context), Canonical(Canonical) { }
+ StmtProfiler(llvm::FoldingSetNodeID &ID, bool Canonical)
+ : ID(ID), Canonical(Canonical) {}
+
+ virtual ~StmtProfiler() {}
void VisitStmt(const Stmt *S);
@@ -41,22 +43,25 @@ namespace {
/// \brief Visit a declaration that is referenced within an expression
/// or statement.
- void VisitDecl(const Decl *D);
+ virtual void VisitDecl(const Decl *D) = 0;
/// \brief Visit a type that is referenced within an expression or
/// statement.
- void VisitType(QualType T);
+ virtual void VisitType(QualType T) = 0;
/// \brief Visit a name that occurs within an expression or statement.
- void VisitName(DeclarationName Name);
+ virtual void VisitName(DeclarationName Name) = 0;
+
+ /// \brief Visit identifiers that are not in Decl's or Type's.
+ virtual void VisitIdentifierInfo(IdentifierInfo *II) = 0;
/// \brief Visit a nested-name-specifier that occurs within an expression
/// or statement.
- void VisitNestedNameSpecifier(NestedNameSpecifier *NNS);
+ virtual void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) = 0;
/// \brief Visit a template name that occurs within an expression or
/// statement.
- void VisitTemplateName(TemplateName Name);
+ virtual void VisitTemplateName(TemplateName Name) = 0;
/// \brief Visit template arguments that occur within an expression or
/// statement.
@@ -66,6 +71,127 @@ namespace {
/// \brief Visit a single template argument.
void VisitTemplateArgument(const TemplateArgument &Arg);
};
+
+ class StmtProfilerWithPointers : public StmtProfiler {
+ const ASTContext &Context;
+
+ public:
+ StmtProfilerWithPointers(llvm::FoldingSetNodeID &ID,
+ const ASTContext &Context, bool Canonical)
+ : StmtProfiler(ID, Canonical), Context(Context) {}
+ private:
+ void VisitDecl(const Decl *D) override {
+ ID.AddInteger(D ? D->getKind() : 0);
+
+ if (Canonical && D) {
+ if (const NonTypeTemplateParmDecl *NTTP =
+ dyn_cast<NonTypeTemplateParmDecl>(D)) {
+ ID.AddInteger(NTTP->getDepth());
+ ID.AddInteger(NTTP->getIndex());
+ ID.AddBoolean(NTTP->isParameterPack());
+ VisitType(NTTP->getType());
+ return;
+ }
+
+ if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
+ // The Itanium C++ ABI uses the type, scope depth, and scope
+ // index of a parameter when mangling expressions that involve
+ // function parameters, so we will use the parameter's type for
+ // establishing function parameter identity. That way, our
+ // definition of "equivalent" (per C++ [temp.over.link]) is at
+ // least as strong as the definition of "equivalent" used for
+ // name mangling.
+ VisitType(Parm->getType());
+ ID.AddInteger(Parm->getFunctionScopeDepth());
+ ID.AddInteger(Parm->getFunctionScopeIndex());
+ return;
+ }
+
+ if (const TemplateTypeParmDecl *TTP =
+ dyn_cast<TemplateTypeParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+
+ if (const TemplateTemplateParmDecl *TTP =
+ dyn_cast<TemplateTemplateParmDecl>(D)) {
+ ID.AddInteger(TTP->getDepth());
+ ID.AddInteger(TTP->getIndex());
+ ID.AddBoolean(TTP->isParameterPack());
+ return;
+ }
+ }
+
+ ID.AddPointer(D ? D->getCanonicalDecl() : nullptr);
+ }
+
+ void VisitType(QualType T) override {
+ if (Canonical && !T.isNull())
+ T = Context.getCanonicalType(T);
+
+ ID.AddPointer(T.getAsOpaquePtr());
+ }
+
+ void VisitName(DeclarationName Name) override {
+ ID.AddPointer(Name.getAsOpaquePtr());
+ }
+
+ void VisitIdentifierInfo(IdentifierInfo *II) override {
+ ID.AddPointer(II);
+ }
+
+ void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) override {
+ if (Canonical)
+ NNS = Context.getCanonicalNestedNameSpecifier(NNS);
+ ID.AddPointer(NNS);
+ }
+
+ void VisitTemplateName(TemplateName Name) override {
+ if (Canonical)
+ Name = Context.getCanonicalTemplateName(Name);
+
+ Name.Profile(ID);
+ }
+ };
+
+ class StmtProfilerWithoutPointers : public StmtProfiler {
+ ODRHash &Hash;
+ public:
+ StmtProfilerWithoutPointers(llvm::FoldingSetNodeID &ID, ODRHash &Hash)
+ : StmtProfiler(ID, false), Hash(Hash) {}
+
+ private:
+ void VisitType(QualType T) override {
+ Hash.AddQualType(T);
+ }
+
+ void VisitName(DeclarationName Name) override {
+ Hash.AddDeclarationName(Name);
+ }
+ void VisitIdentifierInfo(IdentifierInfo *II) override {
+ ID.AddBoolean(II);
+ if (II) {
+ Hash.AddIdentifierInfo(II);
+ }
+ }
+ void VisitDecl(const Decl *D) override {
+ ID.AddBoolean(D);
+ if (D) {
+ Hash.AddDecl(D);
+ }
+ }
+ void VisitTemplateName(TemplateName Name) override {
+ Hash.AddTemplateName(Name);
+ }
+ void VisitNestedNameSpecifier(NestedNameSpecifier *NNS) override {
+ ID.AddBoolean(NNS);
+ if (NNS) {
+ Hash.AddNestedNameSpecifier(NNS);
+ }
+ }
+ };
}
void StmtProfiler::VisitStmt(const Stmt *S) {
@@ -283,6 +409,7 @@ void OMPClauseProfiler::VistOMPClauseWithPostUpdate(
}
void OMPClauseProfiler::VisitOMPIfClause(const OMPIfClause *C) {
+ VistOMPClauseWithPreInit(C);
if (C->getCondition())
Profiler->VisitStmt(C->getCondition());
}
@@ -293,6 +420,7 @@ void OMPClauseProfiler::VisitOMPFinalClause(const OMPFinalClause *C) {
}
void OMPClauseProfiler::VisitOMPNumThreadsClause(const OMPNumThreadsClause *C) {
+ VistOMPClauseWithPreInit(C);
if (C->getNumThreads())
Profiler->VisitStmt(C->getNumThreads());
}
@@ -495,11 +623,13 @@ void OMPClauseProfiler::VisitOMPMapClause(const OMPMapClause *C) {
VisitOMPClauseList(C);
}
void OMPClauseProfiler::VisitOMPNumTeamsClause(const OMPNumTeamsClause *C) {
+ VistOMPClauseWithPreInit(C);
if (C->getNumTeams())
Profiler->VisitStmt(C->getNumTeams());
}
void OMPClauseProfiler::VisitOMPThreadLimitClause(
const OMPThreadLimitClause *C) {
+ VistOMPClauseWithPreInit(C);
if (C->getThreadLimit())
Profiler->VisitStmt(C->getThreadLimit());
}
@@ -849,7 +979,7 @@ void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
break;
case OffsetOfNode::Identifier:
- ID.AddPointer(ON.getFieldName());
+ VisitIdentifierInfo(ON.getFieldName());
break;
case OffsetOfNode::Base:
@@ -857,7 +987,7 @@ void StmtProfiler::VisitOffsetOfExpr(const OffsetOfExpr *S) {
break;
}
}
-
+
VisitExpr(S);
}
@@ -1447,7 +1577,7 @@ StmtProfiler::VisitCXXPseudoDestructorExpr(const CXXPseudoDestructorExpr *S) {
if (S->getDestroyedTypeInfo())
VisitType(S->getDestroyedType());
else
- ID.AddPointer(S->getDestroyedTypeIdentifier());
+ VisitIdentifierInfo(S->getDestroyedTypeIdentifier());
}
void StmtProfiler::VisitOverloadExpr(const OverloadExpr *S) {
@@ -1595,6 +1725,10 @@ void StmtProfiler::VisitCoawaitExpr(const CoawaitExpr *S) {
VisitExpr(S);
}
+void StmtProfiler::VisitDependentCoawaitExpr(const DependentCoawaitExpr *S) {
+ VisitExpr(S);
+}
+
void StmtProfiler::VisitCoyieldExpr(const CoyieldExpr *S) {
VisitExpr(S);
}
@@ -1697,77 +1831,6 @@ void StmtProfiler::VisitObjCAvailabilityCheckExpr(
VisitExpr(S);
}
-void StmtProfiler::VisitDecl(const Decl *D) {
- ID.AddInteger(D? D->getKind() : 0);
-
- if (Canonical && D) {
- if (const NonTypeTemplateParmDecl *NTTP =
- dyn_cast<NonTypeTemplateParmDecl>(D)) {
- ID.AddInteger(NTTP->getDepth());
- ID.AddInteger(NTTP->getIndex());
- ID.AddBoolean(NTTP->isParameterPack());
- VisitType(NTTP->getType());
- return;
- }
-
- if (const ParmVarDecl *Parm = dyn_cast<ParmVarDecl>(D)) {
- // The Itanium C++ ABI uses the type, scope depth, and scope
- // index of a parameter when mangling expressions that involve
- // function parameters, so we will use the parameter's type for
- // establishing function parameter identity. That way, our
- // definition of "equivalent" (per C++ [temp.over.link]) is at
- // least as strong as the definition of "equivalent" used for
- // name mangling.
- VisitType(Parm->getType());
- ID.AddInteger(Parm->getFunctionScopeDepth());
- ID.AddInteger(Parm->getFunctionScopeIndex());
- return;
- }
-
- if (const TemplateTypeParmDecl *TTP =
- dyn_cast<TemplateTypeParmDecl>(D)) {
- ID.AddInteger(TTP->getDepth());
- ID.AddInteger(TTP->getIndex());
- ID.AddBoolean(TTP->isParameterPack());
- return;
- }
-
- if (const TemplateTemplateParmDecl *TTP =
- dyn_cast<TemplateTemplateParmDecl>(D)) {
- ID.AddInteger(TTP->getDepth());
- ID.AddInteger(TTP->getIndex());
- ID.AddBoolean(TTP->isParameterPack());
- return;
- }
- }
-
- ID.AddPointer(D? D->getCanonicalDecl() : nullptr);
-}
-
-void StmtProfiler::VisitType(QualType T) {
- if (Canonical)
- T = Context.getCanonicalType(T);
-
- ID.AddPointer(T.getAsOpaquePtr());
-}
-
-void StmtProfiler::VisitName(DeclarationName Name) {
- ID.AddPointer(Name.getAsOpaquePtr());
-}
-
-void StmtProfiler::VisitNestedNameSpecifier(NestedNameSpecifier *NNS) {
- if (Canonical)
- NNS = Context.getCanonicalNestedNameSpecifier(NNS);
- ID.AddPointer(NNS);
-}
-
-void StmtProfiler::VisitTemplateName(TemplateName Name) {
- if (Canonical)
- Name = Context.getCanonicalTemplateName(Name);
-
- Name.Profile(ID);
-}
-
void StmtProfiler::VisitTemplateArguments(const TemplateArgumentLoc *Args,
unsigned NumArgs) {
ID.AddInteger(NumArgs);
@@ -1817,6 +1880,12 @@ void StmtProfiler::VisitTemplateArgument(const TemplateArgument &Arg) {
void Stmt::Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const {
- StmtProfiler Profiler(ID, Context, Canonical);
+ StmtProfilerWithPointers Profiler(ID, Context, Canonical);
+ Profiler.Visit(this);
+}
+
+void Stmt::ProcessODRHash(llvm::FoldingSetNodeID &ID,
+ class ODRHash &Hash) const {
+ StmtProfilerWithoutPointers Profiler(ID, Hash);
Profiler.Visit(this);
}
diff --git a/lib/AST/TemplateBase.cpp b/lib/AST/TemplateBase.cpp
index 099f939c7a75..e4998c37a4ef 100644
--- a/lib/AST/TemplateBase.cpp
+++ b/lib/AST/TemplateBase.cpp
@@ -453,10 +453,6 @@ LLVM_DUMP_METHOD void TemplateArgument::dump() const { dump(llvm::errs()); }
// TemplateArgumentLoc Implementation
//===----------------------------------------------------------------------===//
-TemplateArgumentLocInfo::TemplateArgumentLocInfo() {
- memset((void*)this, 0, sizeof(TemplateArgumentLocInfo));
-}
-
SourceRange TemplateArgumentLoc::getSourceRange() const {
switch (Argument.getKind()) {
case TemplateArgument::Expression:
diff --git a/lib/AST/Type.cpp b/lib/AST/Type.cpp
index 0d0cd2e305be..df26233b4796 100644
--- a/lib/AST/Type.cpp
+++ b/lib/AST/Type.cpp
@@ -1559,61 +1559,79 @@ TagDecl *Type::getAsTagDecl() const {
}
namespace {
- class GetContainedAutoVisitor :
- public TypeVisitor<GetContainedAutoVisitor, AutoType*> {
+ class GetContainedDeducedTypeVisitor :
+ public TypeVisitor<GetContainedDeducedTypeVisitor, Type*> {
+ bool Syntactic;
public:
- using TypeVisitor<GetContainedAutoVisitor, AutoType*>::Visit;
- AutoType *Visit(QualType T) {
+ GetContainedDeducedTypeVisitor(bool Syntactic = false)
+ : Syntactic(Syntactic) {}
+
+ using TypeVisitor<GetContainedDeducedTypeVisitor, Type*>::Visit;
+ Type *Visit(QualType T) {
if (T.isNull())
return nullptr;
return Visit(T.getTypePtr());
}
- // The 'auto' type itself.
- AutoType *VisitAutoType(const AutoType *AT) {
- return const_cast<AutoType*>(AT);
+ // The deduced type itself.
+ Type *VisitDeducedType(const DeducedType *AT) {
+ return const_cast<DeducedType*>(AT);
}
// Only these types can contain the desired 'auto' type.
- AutoType *VisitPointerType(const PointerType *T) {
+ Type *VisitElaboratedType(const ElaboratedType *T) {
+ return Visit(T->getNamedType());
+ }
+ Type *VisitPointerType(const PointerType *T) {
return Visit(T->getPointeeType());
}
- AutoType *VisitBlockPointerType(const BlockPointerType *T) {
+ Type *VisitBlockPointerType(const BlockPointerType *T) {
return Visit(T->getPointeeType());
}
- AutoType *VisitReferenceType(const ReferenceType *T) {
+ Type *VisitReferenceType(const ReferenceType *T) {
return Visit(T->getPointeeTypeAsWritten());
}
- AutoType *VisitMemberPointerType(const MemberPointerType *T) {
+ Type *VisitMemberPointerType(const MemberPointerType *T) {
return Visit(T->getPointeeType());
}
- AutoType *VisitArrayType(const ArrayType *T) {
+ Type *VisitArrayType(const ArrayType *T) {
return Visit(T->getElementType());
}
- AutoType *VisitDependentSizedExtVectorType(
+ Type *VisitDependentSizedExtVectorType(
const DependentSizedExtVectorType *T) {
return Visit(T->getElementType());
}
- AutoType *VisitVectorType(const VectorType *T) {
+ Type *VisitVectorType(const VectorType *T) {
return Visit(T->getElementType());
}
- AutoType *VisitFunctionType(const FunctionType *T) {
+ Type *VisitFunctionProtoType(const FunctionProtoType *T) {
+ if (Syntactic && T->hasTrailingReturn())
+ return const_cast<FunctionProtoType*>(T);
+ return VisitFunctionType(T);
+ }
+ Type *VisitFunctionType(const FunctionType *T) {
return Visit(T->getReturnType());
}
- AutoType *VisitParenType(const ParenType *T) {
+ Type *VisitParenType(const ParenType *T) {
return Visit(T->getInnerType());
}
- AutoType *VisitAttributedType(const AttributedType *T) {
+ Type *VisitAttributedType(const AttributedType *T) {
return Visit(T->getModifiedType());
}
- AutoType *VisitAdjustedType(const AdjustedType *T) {
+ Type *VisitAdjustedType(const AdjustedType *T) {
return Visit(T->getOriginalType());
}
};
}
-AutoType *Type::getContainedAutoType() const {
- return GetContainedAutoVisitor().Visit(this);
+DeducedType *Type::getContainedDeducedType() const {
+ return cast_or_null<DeducedType>(
+ GetContainedDeducedTypeVisitor().Visit(this));
+}
+
+bool Type::hasAutoForTrailingReturnType() const {
+ return dyn_cast_or_null<FunctionType>(
+ GetContainedDeducedTypeVisitor(true).Visit(this));
}
bool Type::hasIntegerRepresentation() const {
@@ -2005,20 +2023,8 @@ bool QualType::isCXX98PODType(const ASTContext &Context) const {
if ((*this)->isIncompleteType())
return false;
- if (Context.getLangOpts().ObjCAutoRefCount) {
- switch (getObjCLifetime()) {
- case Qualifiers::OCL_ExplicitNone:
- return true;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_None:
- break;
- }
- }
+ if (hasNonTrivialObjCLifetime())
+ return false;
QualType CanonicalType = getTypePtr()->CanonicalType;
switch (CanonicalType->getTypeClass()) {
@@ -2067,22 +2073,8 @@ bool QualType::isTrivialType(const ASTContext &Context) const {
if ((*this)->isIncompleteType())
return false;
- if (Context.getLangOpts().ObjCAutoRefCount) {
- switch (getObjCLifetime()) {
- case Qualifiers::OCL_ExplicitNone:
- return true;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_None:
- if ((*this)->isObjCLifetimeType())
- return false;
- break;
- }
- }
+ if (hasNonTrivialObjCLifetime())
+ return false;
QualType CanonicalType = getTypePtr()->CanonicalType;
if (CanonicalType->isDependentType())
@@ -2119,22 +2111,8 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
if ((*this)->isArrayType())
return Context.getBaseElementType(*this).isTriviallyCopyableType(Context);
- if (Context.getLangOpts().ObjCAutoRefCount) {
- switch (getObjCLifetime()) {
- case Qualifiers::OCL_ExplicitNone:
- return true;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_None:
- if ((*this)->isObjCLifetimeType())
- return false;
- break;
- }
- }
+ if (hasNonTrivialObjCLifetime())
+ return false;
// C++11 [basic.types]p9
// Scalar types, trivially copyable class types, arrays of such types, and
@@ -2170,7 +2148,11 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
return false;
}
-
+bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
+ return !Context.getLangOpts().ObjCAutoRefCount &&
+ Context.getLangOpts().ObjCWeak &&
+ getObjCLifetime() != Qualifiers::OCL_Weak;
+}
bool Type::isLiteralType(const ASTContext &Ctx) const {
if (isDependentType())
@@ -2280,20 +2262,8 @@ bool QualType::isCXX11PODType(const ASTContext &Context) const {
if (ty->isDependentType())
return false;
- if (Context.getLangOpts().ObjCAutoRefCount) {
- switch (getObjCLifetime()) {
- case Qualifiers::OCL_ExplicitNone:
- return true;
-
- case Qualifiers::OCL_Strong:
- case Qualifiers::OCL_Weak:
- case Qualifiers::OCL_Autoreleasing:
- return false;
-
- case Qualifiers::OCL_None:
- break;
- }
- }
+ if (hasNonTrivialObjCLifetime())
+ return false;
// C++11 [basic.types]p9:
// Scalar types, POD classes, arrays of such types, and cv-qualified
@@ -2630,8 +2600,6 @@ StringRef BuiltinType::getName(const PrintingPolicy &Policy) const {
return "clk_event_t";
case OCLQueue:
return "queue_t";
- case OCLNDRange:
- return "ndrange_t";
case OCLReserveID:
return "reserve_id_t";
case OMPArraySection:
@@ -3365,6 +3333,7 @@ static CachedProperties computeCachedProperties(const Type *T) {
return CachedProperties(ExternalLinkage, false);
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
// Give non-deduced 'auto' types external linkage. We should only see them
// here in error recovery.
return CachedProperties(ExternalLinkage, false);
@@ -3472,6 +3441,7 @@ static LinkageInfo computeLinkageInfo(const Type *T) {
return LinkageInfo::external();
case Type::Auto:
+ case Type::DeducedTemplateSpecialization:
return LinkageInfo::external();
case Type::Record:
@@ -3608,7 +3578,8 @@ bool Type::canHaveNullability() const {
// auto is considered dependent when it isn't deduced.
case Type::Auto:
- return !cast<AutoType>(type.getTypePtr())->isDeduced();
+ case Type::DeducedTemplateSpecialization:
+ return !cast<DeducedType>(type.getTypePtr())->isDeduced();
case Type::Builtin:
switch (cast<BuiltinType>(type.getTypePtr())->getKind()) {
@@ -3640,7 +3611,6 @@ bool Type::canHaveNullability() const {
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
case BuiltinType::BuiltinFn:
case BuiltinType::NullPtr:
diff --git a/lib/AST/TypeLoc.cpp b/lib/AST/TypeLoc.cpp
index 7242858f21e6..c9a268655723 100644
--- a/lib/AST/TypeLoc.cpp
+++ b/lib/AST/TypeLoc.cpp
@@ -340,7 +340,6 @@ TypeSpecifierType BuiltinTypeLoc::getWrittenTypeSpec() const {
case BuiltinType::OCLEvent:
case BuiltinType::OCLClkEvent:
case BuiltinType::OCLQueue:
- case BuiltinType::OCLNDRange:
case BuiltinType::OCLReserveID:
case BuiltinType::BuiltinFn:
case BuiltinType::OMPArraySection:
diff --git a/lib/AST/TypePrinter.cpp b/lib/AST/TypePrinter.cpp
index cccc90876321..5268a2901ad9 100644
--- a/lib/AST/TypePrinter.cpp
+++ b/lib/AST/TypePrinter.cpp
@@ -96,7 +96,7 @@ namespace {
static bool canPrefixQualifiers(const Type *T, bool &NeedARCStrongQualifier);
void spaceBeforePlaceHolder(raw_ostream &OS);
- void printTypeSpec(const NamedDecl *D, raw_ostream &OS);
+ void printTypeSpec(NamedDecl *D, raw_ostream &OS);
void printBefore(const Type *ty, Qualifiers qs, raw_ostream &OS);
void printBefore(QualType T, raw_ostream &OS);
@@ -189,6 +189,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::Elaborated:
case Type::TemplateTypeParm:
case Type::SubstTemplateTypeParmPack:
+ case Type::DeducedTemplateSpecialization:
case Type::TemplateSpecialization:
case Type::InjectedClassName:
case Type::DependentName:
@@ -797,7 +798,14 @@ void TypePrinter::printFunctionNoProtoAfter(const FunctionNoProtoType *T,
printAfter(T->getReturnType(), OS);
}
-void TypePrinter::printTypeSpec(const NamedDecl *D, raw_ostream &OS) {
+void TypePrinter::printTypeSpec(NamedDecl *D, raw_ostream &OS) {
+
+ // Compute the full nested-name-specifier for this type.
+ // In C, this will always be empty except when the type
+ // being printed is anonymous within other Record.
+ if (!Policy.SuppressScope)
+ AppendScope(D->getDeclContext(), OS);
+
IdentifierInfo *II = D->getIdentifier();
OS << II->getName();
spaceBeforePlaceHolder(OS);
@@ -888,6 +896,24 @@ void TypePrinter::printAutoAfter(const AutoType *T, raw_ostream &OS) {
printAfter(T->getDeducedType(), OS);
}
+void TypePrinter::printDeducedTemplateSpecializationBefore(
+ const DeducedTemplateSpecializationType *T, raw_ostream &OS) {
+ // If the type has been deduced, print the deduced type.
+ if (!T->getDeducedType().isNull()) {
+ printBefore(T->getDeducedType(), OS);
+ } else {
+ IncludeStrongLifetimeRAII Strong(Policy);
+ T->getTemplateName().print(OS, Policy);
+ spaceBeforePlaceHolder(OS);
+ }
+}
+void TypePrinter::printDeducedTemplateSpecializationAfter(
+ const DeducedTemplateSpecializationType *T, raw_ostream &OS) {
+ // If the type has been deduced, print the deduced type.
+ if (!T->getDeducedType().isNull())
+ printAfter(T->getDeducedType(), OS);
+}
+
void TypePrinter::printAtomicBefore(const AtomicType *T, raw_ostream &OS) {
IncludeStrongLifetimeRAII Strong(Policy);
@@ -1627,14 +1653,22 @@ void Qualifiers::print(raw_ostream &OS, const PrintingPolicy& Policy,
OS << "__local";
break;
case LangAS::opencl_constant:
+ case LangAS::cuda_constant:
OS << "__constant";
break;
case LangAS::opencl_generic:
OS << "__generic";
break;
+ case LangAS::cuda_device:
+ OS << "__device";
+ break;
+ case LangAS::cuda_shared:
+ OS << "__shared";
+ break;
default:
+ assert(addrspace >= LangAS::Count);
OS << "__attribute__((address_space(";
- OS << addrspace;
+ OS << addrspace - LangAS::Count;
OS << ")))";
}
}
diff --git a/lib/ASTMatchers/Dynamic/Registry.cpp b/lib/ASTMatchers/Dynamic/Registry.cpp
index d1cab80c1a53..9fa693038194 100644
--- a/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -330,6 +330,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isPublic);
REGISTER_MATCHER(isPure);
REGISTER_MATCHER(isSignedInteger);
+ REGISTER_MATCHER(isStaticStorageClass);
REGISTER_MATCHER(isStruct);
REGISTER_MATCHER(isTemplateInstantiation);
REGISTER_MATCHER(isUnion);
@@ -359,9 +360,14 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(nullStmt);
REGISTER_MATCHER(numSelectorArgs);
REGISTER_MATCHER(ofClass);
+ REGISTER_MATCHER(objcCategoryDecl);
REGISTER_MATCHER(objcInterfaceDecl);
+ REGISTER_MATCHER(objcIvarDecl);
REGISTER_MATCHER(objcMessageExpr);
+ REGISTER_MATCHER(objcMethodDecl);
REGISTER_MATCHER(objcObjectPointerType);
+ REGISTER_MATCHER(objcPropertyDecl);
+ REGISTER_MATCHER(objcProtocolDecl);
REGISTER_MATCHER(on);
REGISTER_MATCHER(onImplicitObjectArgument);
REGISTER_MATCHER(opaqueValueExpr);
@@ -412,6 +418,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(typedefNameDecl);
REGISTER_MATCHER(typedefType);
REGISTER_MATCHER(typeAliasDecl);
+ REGISTER_MATCHER(typeAliasTemplateDecl);
REGISTER_MATCHER(typeLoc);
REGISTER_MATCHER(unaryExprOrTypeTraitExpr);
REGISTER_MATCHER(unaryOperator);
diff --git a/lib/Analysis/BodyFarm.cpp b/lib/Analysis/BodyFarm.cpp
index 56c812c34c50..59127246105d 100644
--- a/lib/Analysis/BodyFarm.cpp
+++ b/lib/Analysis/BodyFarm.cpp
@@ -87,7 +87,7 @@ BinaryOperator *ASTMaker::makeAssignment(const Expr *LHS, const Expr *RHS,
QualType Ty) {
return new (C) BinaryOperator(const_cast<Expr*>(LHS), const_cast<Expr*>(RHS),
BO_Assign, Ty, VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ OK_Ordinary, SourceLocation(), FPOptions());
}
BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
@@ -99,7 +99,7 @@ BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
Op,
C.getLogicalOperationType(),
VK_RValue,
- OK_Ordinary, SourceLocation(), false);
+ OK_Ordinary, SourceLocation(), FPOptions());
}
CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
diff --git a/lib/Analysis/CFG.cpp b/lib/Analysis/CFG.cpp
index d56e0e8fa1d0..2a2b3d73b5ca 100644
--- a/lib/Analysis/CFG.cpp
+++ b/lib/Analysis/CFG.cpp
@@ -1390,7 +1390,7 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
// Check if type is a C++ class with non-trivial destructor.
if (const CXXRecordDecl *CD = QT->getAsCXXRecordDecl())
- if (!CD->hasTrivialDestructor()) {
+ if (CD->hasDefinition() && !CD->hasTrivialDestructor()) {
// Add the variable to scope
Scope = createOrReuseLocalScope(Scope);
Scope->addVar(VD);
diff --git a/lib/Analysis/CallGraph.cpp b/lib/Analysis/CallGraph.cpp
index 8c126b09d057..6d9530bf0c68 100644
--- a/lib/Analysis/CallGraph.cpp
+++ b/lib/Analysis/CallGraph.cpp
@@ -62,6 +62,7 @@ public:
void VisitCallExpr(CallExpr *CE) {
if (Decl *D = getDeclFromCall(CE))
addCalledDecl(D);
+ VisitChildren(CE);
}
// Adds may-call edges for the ObjC message sends.
diff --git a/lib/Analysis/CloneDetection.cpp b/lib/Analysis/CloneDetection.cpp
index e761738214c6..5bbcbe4e5722 100644
--- a/lib/Analysis/CloneDetection.cpp
+++ b/lib/Analysis/CloneDetection.cpp
@@ -24,27 +24,27 @@
using namespace clang;
-StmtSequence::StmtSequence(const CompoundStmt *Stmt, ASTContext &Context,
+StmtSequence::StmtSequence(const CompoundStmt *Stmt, const Decl *D,
unsigned StartIndex, unsigned EndIndex)
- : S(Stmt), Context(&Context), StartIndex(StartIndex), EndIndex(EndIndex) {
+ : S(Stmt), D(D), StartIndex(StartIndex), EndIndex(EndIndex) {
assert(Stmt && "Stmt must not be a nullptr");
assert(StartIndex < EndIndex && "Given array should not be empty");
assert(EndIndex <= Stmt->size() && "Given array too big for this Stmt");
}
-StmtSequence::StmtSequence(const Stmt *Stmt, ASTContext &Context)
- : S(Stmt), Context(&Context), StartIndex(0), EndIndex(0) {}
+StmtSequence::StmtSequence(const Stmt *Stmt, const Decl *D)
+ : S(Stmt), D(D), StartIndex(0), EndIndex(0) {}
StmtSequence::StmtSequence()
- : S(nullptr), Context(nullptr), StartIndex(0), EndIndex(0) {}
+ : S(nullptr), D(nullptr), StartIndex(0), EndIndex(0) {}
bool StmtSequence::contains(const StmtSequence &Other) const {
- // If both sequences reside in different translation units, they can never
- // contain each other.
- if (Context != Other.Context)
+ // If both sequences reside in different declarations, they can never contain
+ // each other.
+ if (D != Other.D)
return false;
- const SourceManager &SM = Context->getSourceManager();
+ const SourceManager &SM = getASTContext().getSourceManager();
// Otherwise check if the start and end locations of the current sequence
// surround the other sequence.
@@ -76,6 +76,11 @@ StmtSequence::iterator StmtSequence::end() const {
return CS->body_begin() + EndIndex;
}
+ASTContext &StmtSequence::getASTContext() const {
+ assert(D);
+ return D->getASTContext();
+}
+
SourceLocation StmtSequence::getStartLoc() const {
return front()->getLocStart();
}
@@ -86,168 +91,8 @@ SourceRange StmtSequence::getSourceRange() const {
return SourceRange(getStartLoc(), getEndLoc());
}
-namespace {
-
-/// \brief Analyzes the pattern of the referenced variables in a statement.
-class VariablePattern {
-
- /// \brief Describes an occurence of a variable reference in a statement.
- struct VariableOccurence {
- /// The index of the associated VarDecl in the Variables vector.
- size_t KindID;
- /// The statement in the code where the variable was referenced.
- const Stmt *Mention;
-
- VariableOccurence(size_t KindID, const Stmt *Mention)
- : KindID(KindID), Mention(Mention) {}
- };
-
- /// All occurences of referenced variables in the order of appearance.
- std::vector<VariableOccurence> Occurences;
- /// List of referenced variables in the order of appearance.
- /// Every item in this list is unique.
- std::vector<const VarDecl *> Variables;
-
- /// \brief Adds a new variable referenced to this pattern.
- /// \param VarDecl The declaration of the variable that is referenced.
- /// \param Mention The SourceRange where this variable is referenced.
- void addVariableOccurence(const VarDecl *VarDecl, const Stmt *Mention) {
- // First check if we already reference this variable
- for (size_t KindIndex = 0; KindIndex < Variables.size(); ++KindIndex) {
- if (Variables[KindIndex] == VarDecl) {
- // If yes, add a new occurence that points to the existing entry in
- // the Variables vector.
- Occurences.emplace_back(KindIndex, Mention);
- return;
- }
- }
- // If this variable wasn't already referenced, add it to the list of
- // referenced variables and add a occurence that points to this new entry.
- Occurences.emplace_back(Variables.size(), Mention);
- Variables.push_back(VarDecl);
- }
-
- /// \brief Adds each referenced variable from the given statement.
- void addVariables(const Stmt *S) {
- // Sometimes we get a nullptr (such as from IfStmts which often have nullptr
- // children). We skip such statements as they don't reference any
- // variables.
- if (!S)
- return;
-
- // Check if S is a reference to a variable. If yes, add it to the pattern.
- if (auto D = dyn_cast<DeclRefExpr>(S)) {
- if (auto VD = dyn_cast<VarDecl>(D->getDecl()->getCanonicalDecl()))
- addVariableOccurence(VD, D);
- }
-
- // Recursively check all children of the given statement.
- for (const Stmt *Child : S->children()) {
- addVariables(Child);
- }
- }
-
-public:
- /// \brief Creates an VariablePattern object with information about the given
- /// StmtSequence.
- VariablePattern(const StmtSequence &Sequence) {
- for (const Stmt *S : Sequence)
- addVariables(S);
- }
-
- /// \brief Counts the differences between this pattern and the given one.
- /// \param Other The given VariablePattern to compare with.
- /// \param FirstMismatch Output parameter that will be filled with information
- /// about the first difference between the two patterns. This parameter
- /// can be a nullptr, in which case it will be ignored.
- /// \return Returns the number of differences between the pattern this object
- /// is following and the given VariablePattern.
- ///
- /// For example, the following statements all have the same pattern and this
- /// function would return zero:
- ///
- /// if (a < b) return a; return b;
- /// if (x < y) return x; return y;
- /// if (u2 < u1) return u2; return u1;
- ///
- /// But the following statement has a different pattern (note the changed
- /// variables in the return statements) and would have two differences when
- /// compared with one of the statements above.
- ///
- /// if (a < b) return b; return a;
- ///
- /// This function should only be called if the related statements of the given
- /// pattern and the statements of this objects are clones of each other.
- unsigned countPatternDifferences(
- const VariablePattern &Other,
- CloneDetector::SuspiciousClonePair *FirstMismatch = nullptr) {
- unsigned NumberOfDifferences = 0;
-
- assert(Other.Occurences.size() == Occurences.size());
- for (unsigned i = 0; i < Occurences.size(); ++i) {
- auto ThisOccurence = Occurences[i];
- auto OtherOccurence = Other.Occurences[i];
- if (ThisOccurence.KindID == OtherOccurence.KindID)
- continue;
-
- ++NumberOfDifferences;
-
- // If FirstMismatch is not a nullptr, we need to store information about
- // the first difference between the two patterns.
- if (FirstMismatch == nullptr)
- continue;
-
- // Only proceed if we just found the first difference as we only store
- // information about the first difference.
- if (NumberOfDifferences != 1)
- continue;
-
- const VarDecl *FirstSuggestion = nullptr;
- // If there is a variable available in the list of referenced variables
- // which wouldn't break the pattern if it is used in place of the
- // current variable, we provide this variable as the suggested fix.
- if (OtherOccurence.KindID < Variables.size())
- FirstSuggestion = Variables[OtherOccurence.KindID];
-
- // Store information about the first clone.
- FirstMismatch->FirstCloneInfo =
- CloneDetector::SuspiciousClonePair::SuspiciousCloneInfo(
- Variables[ThisOccurence.KindID], ThisOccurence.Mention,
- FirstSuggestion);
-
- // Same as above but with the other clone. We do this for both clones as
- // we don't know which clone is the one containing the unintended
- // pattern error.
- const VarDecl *SecondSuggestion = nullptr;
- if (ThisOccurence.KindID < Other.Variables.size())
- SecondSuggestion = Other.Variables[ThisOccurence.KindID];
-
- // Store information about the second clone.
- FirstMismatch->SecondCloneInfo =
- CloneDetector::SuspiciousClonePair::SuspiciousCloneInfo(
- Other.Variables[OtherOccurence.KindID], OtherOccurence.Mention,
- SecondSuggestion);
-
- // SuspiciousClonePair guarantees that the first clone always has a
- // suggested variable associated with it. As we know that one of the two
- // clones in the pair always has suggestion, we swap the two clones
- // in case the first clone has no suggested variable which means that
- // the second clone has a suggested variable and should be first.
- if (!FirstMismatch->FirstCloneInfo.Suggestion)
- std::swap(FirstMismatch->FirstCloneInfo,
- FirstMismatch->SecondCloneInfo);
-
- // This ensures that we always have at least one suggestion in a pair.
- assert(FirstMismatch->FirstCloneInfo.Suggestion);
- }
-
- return NumberOfDifferences;
- }
-};
-}
-
-/// \brief Prints the macro name that contains the given SourceLocation into
-/// the given raw_string_ostream.
+/// Prints the macro name that contains the given SourceLocation into the given
+/// raw_string_ostream.
static void printMacroName(llvm::raw_string_ostream &MacroStack,
ASTContext &Context, SourceLocation Loc) {
MacroStack << Lexer::getImmediateMacroName(Loc, Context.getSourceManager(),
@@ -258,8 +103,8 @@ static void printMacroName(llvm::raw_string_ostream &MacroStack,
MacroStack << " ";
}
-/// \brief Returns a string that represents all macro expansions that
-/// expanded into the given SourceLocation.
+/// Returns a string that represents all macro expansions that expanded into the
+/// given SourceLocation.
///
/// If 'getMacroStack(A) == getMacroStack(B)' is true, then the SourceLocations
/// A and B are expanded from the same macros in the same order.
@@ -279,7 +124,9 @@ static std::string getMacroStack(SourceLocation Loc, ASTContext &Context) {
}
namespace {
-/// \brief Collects the data of a single Stmt.
+typedef unsigned DataPiece;
+
+/// Collects the data of a single Stmt.
///
/// This class defines what a code clone is: If it collects for two statements
/// the same data, then those two statements are considered to be clones of each
@@ -292,11 +139,11 @@ template <typename T>
class StmtDataCollector : public ConstStmtVisitor<StmtDataCollector<T>> {
ASTContext &Context;
- /// \brief The data sink to which all data is forwarded.
+ /// The data sink to which all data is forwarded.
T &DataConsumer;
public:
- /// \brief Collects data of the given Stmt.
+ /// Collects data of the given Stmt.
/// \param S The given statement.
/// \param Context The ASTContext of S.
/// \param DataConsumer The data sink to which all data is forwarded.
@@ -307,7 +154,7 @@ public:
// Below are utility methods for appending different data to the vector.
- void addData(CloneDetector::DataPiece Integer) {
+ void addData(DataPiece Integer) {
DataConsumer.update(
StringRef(reinterpret_cast<char *>(&Integer), sizeof(Integer)));
}
@@ -425,7 +272,7 @@ public:
})
DEF_ADD_DATA(DeclStmt, {
auto numDecls = std::distance(S->decl_begin(), S->decl_end());
- addData(static_cast<CloneDetector::DataPiece>(numDecls));
+ addData(static_cast<DataPiece>(numDecls));
for (const Decl *D : S->decls()) {
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
addData(VD->getType());
@@ -454,199 +301,131 @@ public:
};
} // end anonymous namespace
-namespace {
-/// Generates CloneSignatures for a set of statements and stores the results in
-/// a CloneDetector object.
-class CloneSignatureGenerator {
+void CloneDetector::analyzeCodeBody(const Decl *D) {
+ assert(D);
+ assert(D->hasBody());
- CloneDetector &CD;
- ASTContext &Context;
+ Sequences.push_back(StmtSequence(D->getBody(), D));
+}
- /// \brief Generates CloneSignatures for all statements in the given statement
- /// tree and stores them in the CloneDetector.
- ///
- /// \param S The root of the given statement tree.
- /// \param ParentMacroStack A string representing the macros that generated
- /// the parent statement or an empty string if no
- /// macros generated the parent statement.
- /// See getMacroStack() for generating such a string.
- /// \return The CloneSignature of the root statement.
- CloneDetector::CloneSignature
- generateSignatures(const Stmt *S, const std::string &ParentMacroStack) {
- // Create an empty signature that will be filled in this method.
- CloneDetector::CloneSignature Signature;
-
- llvm::MD5 Hash;
-
- // Collect all relevant data from S and hash it.
- StmtDataCollector<llvm::MD5>(S, Context, Hash);
-
- // Look up what macros expanded into the current statement.
- std::string StartMacroStack = getMacroStack(S->getLocStart(), Context);
- std::string EndMacroStack = getMacroStack(S->getLocEnd(), Context);
-
- // First, check if ParentMacroStack is not empty which means we are currently
- // dealing with a parent statement which was expanded from a macro.
- // If this parent statement was expanded from the same macros as this
- // statement, we reduce the initial complexity of this statement to zero.
- // This causes that a group of statements that were generated by a single
- // macro expansion will only increase the total complexity by one.
- // Note: This is not the final complexity of this statement as we still
- // add the complexity of the child statements to the complexity value.
- if (!ParentMacroStack.empty() && (StartMacroStack == ParentMacroStack &&
- EndMacroStack == ParentMacroStack)) {
- Signature.Complexity = 0;
- }
+/// Returns true if and only if \p Stmt contains at least one other
+/// sequence in the \p Group.
+static bool containsAnyInGroup(StmtSequence &Seq,
+ CloneDetector::CloneGroup &Group) {
+ for (StmtSequence &GroupSeq : Group) {
+ if (Seq.contains(GroupSeq))
+ return true;
+ }
+ return false;
+}
- // Storage for the signatures of the direct child statements. This is only
- // needed if the current statement is a CompoundStmt.
- std::vector<CloneDetector::CloneSignature> ChildSignatures;
- const CompoundStmt *CS = dyn_cast<const CompoundStmt>(S);
+/// Returns true if and only if all sequences in \p OtherGroup are
+/// contained by a sequence in \p Group.
+static bool containsGroup(CloneDetector::CloneGroup &Group,
+ CloneDetector::CloneGroup &OtherGroup) {
+ // We have less sequences in the current group than we have in the other,
+ // so we will never fulfill the requirement for returning true. This is only
+ // possible because we know that a sequence in Group can contain at most
+ // one sequence in OtherGroup.
+ if (Group.size() < OtherGroup.size())
+ return false;
- // The signature of a statement includes the signatures of its children.
- // Therefore we create the signatures for every child and add them to the
- // current signature.
- for (const Stmt *Child : S->children()) {
- // Some statements like 'if' can have nullptr children that we will skip.
- if (!Child)
- continue;
+ for (StmtSequence &Stmt : Group) {
+ if (!containsAnyInGroup(Stmt, OtherGroup))
+ return false;
+ }
+ return true;
+}
- // Recursive call to create the signature of the child statement. This
- // will also create and store all clone groups in this child statement.
- // We pass only the StartMacroStack along to keep things simple.
- auto ChildSignature = generateSignatures(Child, StartMacroStack);
+void OnlyLargestCloneConstraint::constrain(
+ std::vector<CloneDetector::CloneGroup> &Result) {
+ std::vector<unsigned> IndexesToRemove;
- // Add the collected data to the signature of the current statement.
- Signature.Complexity += ChildSignature.Complexity;
- Hash.update(StringRef(reinterpret_cast<char *>(&ChildSignature.Hash),
- sizeof(ChildSignature.Hash)));
+ // Compare every group in the result with the rest. If one groups contains
+ // another group, we only need to return the bigger group.
+ // Note: This doesn't scale well, so if possible avoid calling any heavy
+ // function from this loop to minimize the performance impact.
+ for (unsigned i = 0; i < Result.size(); ++i) {
+ for (unsigned j = 0; j < Result.size(); ++j) {
+ // Don't compare a group with itself.
+ if (i == j)
+ continue;
- // If the current statement is a CompoundStatement, we need to store the
- // signature for the generation of the sub-sequences.
- if (CS)
- ChildSignatures.push_back(ChildSignature);
+ if (containsGroup(Result[j], Result[i])) {
+ IndexesToRemove.push_back(i);
+ break;
+ }
}
+ }
- // If the current statement is a CompoundStmt, we also need to create the
- // clone groups from the sub-sequences inside the children.
- if (CS)
- handleSubSequences(CS, ChildSignatures);
+ // Erasing a list of indexes from the vector should be done with decreasing
+ // indexes. As IndexesToRemove is constructed with increasing values, we just
+ // reverse iterate over it to get the desired order.
+ for (auto I = IndexesToRemove.rbegin(); I != IndexesToRemove.rend(); ++I) {
+ Result.erase(Result.begin() + *I);
+ }
+}
- // Create the final hash code for the current signature.
- llvm::MD5::MD5Result HashResult;
- Hash.final(HashResult);
+static size_t createHash(llvm::MD5 &Hash) {
+ size_t HashCode;
- // Copy as much of the generated hash code to the signature's hash code.
- std::memcpy(&Signature.Hash, &HashResult,
- std::min(sizeof(Signature.Hash), sizeof(HashResult)));
+ // Create the final hash code for the current Stmt.
+ llvm::MD5::MD5Result HashResult;
+ Hash.final(HashResult);
- // Save the signature for the current statement in the CloneDetector object.
- CD.add(StmtSequence(S, Context), Signature);
+ // Copy as much as possible of the generated hash code to the Stmt's hash
+ // code.
+ std::memcpy(&HashCode, &HashResult,
+ std::min(sizeof(HashCode), sizeof(HashResult)));
- return Signature;
- }
+ return HashCode;
+}
- /// \brief Adds all possible sub-sequences in the child array of the given
- /// CompoundStmt to the CloneDetector.
- /// \param CS The given CompoundStmt.
- /// \param ChildSignatures A list of calculated signatures for each child in
- /// the given CompoundStmt.
- void handleSubSequences(
- const CompoundStmt *CS,
- const std::vector<CloneDetector::CloneSignature> &ChildSignatures) {
+size_t RecursiveCloneTypeIIConstraint::saveHash(
+ const Stmt *S, const Decl *D,
+ std::vector<std::pair<size_t, StmtSequence>> &StmtsByHash) {
+ llvm::MD5 Hash;
+ ASTContext &Context = D->getASTContext();
- // FIXME: This function has quadratic runtime right now. Check if skipping
- // this function for too long CompoundStmts is an option.
+ StmtDataCollector<llvm::MD5>(S, Context, Hash);
- // The length of the sub-sequence. We don't need to handle sequences with
- // the length 1 as they are already handled in CollectData().
+ auto CS = dyn_cast<CompoundStmt>(S);
+ SmallVector<size_t, 8> ChildHashes;
+
+ for (const Stmt *Child : S->children()) {
+ if (Child == nullptr) {
+ ChildHashes.push_back(0);
+ continue;
+ }
+ size_t ChildHash = saveHash(Child, D, StmtsByHash);
+ Hash.update(
+ StringRef(reinterpret_cast<char *>(&ChildHash), sizeof(ChildHash)));
+ ChildHashes.push_back(ChildHash);
+ }
+
+ if (CS) {
for (unsigned Length = 2; Length <= CS->size(); ++Length) {
- // The start index in the body of the CompoundStmt. We increase the
- // position until the end of the sub-sequence reaches the end of the
- // CompoundStmt body.
for (unsigned Pos = 0; Pos <= CS->size() - Length; ++Pos) {
- // Create an empty signature and add the signatures of all selected
- // child statements to it.
- CloneDetector::CloneSignature SubSignature;
- llvm::MD5 SubHash;
-
+ llvm::MD5 Hash;
for (unsigned i = Pos; i < Pos + Length; ++i) {
- SubSignature.Complexity += ChildSignatures[i].Complexity;
- size_t ChildHash = ChildSignatures[i].Hash;
-
- SubHash.update(StringRef(reinterpret_cast<char *>(&ChildHash),
+ size_t ChildHash = ChildHashes[i];
+ Hash.update(StringRef(reinterpret_cast<char *>(&ChildHash),
sizeof(ChildHash)));
}
-
- // Create the final hash code for the current signature.
- llvm::MD5::MD5Result HashResult;
- SubHash.final(HashResult);
-
- // Copy as much of the generated hash code to the signature's hash code.
- std::memcpy(&SubSignature.Hash, &HashResult,
- std::min(sizeof(SubSignature.Hash), sizeof(HashResult)));
-
- // Save the signature together with the information about what children
- // sequence we selected.
- CD.add(StmtSequence(CS, Context, Pos, Pos + Length), SubSignature);
+ StmtsByHash.push_back(std::make_pair(
+ createHash(Hash), StmtSequence(CS, D, Pos, Pos + Length)));
}
}
}
-public:
- explicit CloneSignatureGenerator(CloneDetector &CD, ASTContext &Context)
- : CD(CD), Context(Context) {}
-
- /// \brief Generates signatures for all statements in the given function body.
- void consumeCodeBody(const Stmt *S) { generateSignatures(S, ""); }
-};
-} // end anonymous namespace
-
-void CloneDetector::analyzeCodeBody(const Decl *D) {
- assert(D);
- assert(D->hasBody());
- CloneSignatureGenerator Generator(*this, D->getASTContext());
- Generator.consumeCodeBody(D->getBody());
-}
-
-void CloneDetector::add(const StmtSequence &S,
- const CloneSignature &Signature) {
- Sequences.push_back(std::make_pair(Signature, S));
+ size_t HashCode = createHash(Hash);
+ StmtsByHash.push_back(std::make_pair(HashCode, StmtSequence(S, D)));
+ return HashCode;
}
namespace {
-/// \brief Returns true if and only if \p Stmt contains at least one other
-/// sequence in the \p Group.
-bool containsAnyInGroup(StmtSequence &Stmt, CloneDetector::CloneGroup &Group) {
- for (StmtSequence &GroupStmt : Group.Sequences) {
- if (Stmt.contains(GroupStmt))
- return true;
- }
- return false;
-}
-
-/// \brief Returns true if and only if all sequences in \p OtherGroup are
-/// contained by a sequence in \p Group.
-bool containsGroup(CloneDetector::CloneGroup &Group,
- CloneDetector::CloneGroup &OtherGroup) {
- // We have less sequences in the current group than we have in the other,
- // so we will never fulfill the requirement for returning true. This is only
- // possible because we know that a sequence in Group can contain at most
- // one sequence in OtherGroup.
- if (Group.Sequences.size() < OtherGroup.Sequences.size())
- return false;
-
- for (StmtSequence &Stmt : Group.Sequences) {
- if (!containsAnyInGroup(Stmt, OtherGroup))
- return false;
- }
- return true;
-}
-} // end anonymous namespace
-
-namespace {
-/// \brief Wrapper around FoldingSetNodeID that it can be used as the template
-/// argument of the StmtDataCollector.
+/// Wrapper around FoldingSetNodeID that it can be used as the template
+/// argument of the StmtDataCollector.
class FoldingSetNodeIDWrapper {
llvm::FoldingSetNodeID &FS;
@@ -658,8 +437,8 @@ public:
};
} // end anonymous namespace
-/// \brief Writes the relevant data from all statements and child statements
-/// in the given StmtSequence into the given FoldingSetNodeID.
+/// Writes the relevant data from all statements and child statements
+/// in the given StmtSequence into the given FoldingSetNodeID.
static void CollectStmtSequenceData(const StmtSequence &Sequence,
FoldingSetNodeIDWrapper &OutputData) {
for (const Stmt *S : Sequence) {
@@ -670,13 +449,13 @@ static void CollectStmtSequenceData(const StmtSequence &Sequence,
if (!Child)
continue;
- CollectStmtSequenceData(StmtSequence(Child, Sequence.getASTContext()),
+ CollectStmtSequenceData(StmtSequence(Child, Sequence.getContainingDecl()),
OutputData);
}
}
}
-/// \brief Returns true if both sequences are clones of each other.
+/// Returns true if both sequences are clones of each other.
static bool areSequencesClones(const StmtSequence &LHS,
const StmtSequence &RHS) {
// We collect the data from all statements in the sequence as we did before
@@ -693,202 +472,272 @@ static bool areSequencesClones(const StmtSequence &LHS,
return DataLHS == DataRHS;
}
-/// \brief Finds all actual clone groups in a single group of presumed clones.
-/// \param Result Output parameter to which all found groups are added.
-/// \param Group A group of presumed clones. The clones are allowed to have a
-/// different variable pattern and may not be actual clones of each
-/// other.
-/// \param CheckVariablePattern If true, every clone in a group that was added
-/// to the output follows the same variable pattern as the other
-/// clones in its group.
-static void createCloneGroups(std::vector<CloneDetector::CloneGroup> &Result,
- const CloneDetector::CloneGroup &Group,
- bool CheckVariablePattern) {
- // We remove the Sequences one by one, so a list is more appropriate.
- std::list<StmtSequence> UnassignedSequences(Group.Sequences.begin(),
- Group.Sequences.end());
-
- // Search for clones as long as there could be clones in UnassignedSequences.
- while (UnassignedSequences.size() > 1) {
-
- // Pick the first Sequence as a protoype for a new clone group.
- StmtSequence Prototype = UnassignedSequences.front();
- UnassignedSequences.pop_front();
-
- CloneDetector::CloneGroup FilteredGroup(Prototype, Group.Signature);
-
- // Analyze the variable pattern of the prototype. Every other StmtSequence
- // needs to have the same pattern to get into the new clone group.
- VariablePattern PrototypeFeatures(Prototype);
-
- // Search all remaining StmtSequences for an identical variable pattern
- // and assign them to our new clone group.
- auto I = UnassignedSequences.begin(), E = UnassignedSequences.end();
- while (I != E) {
- // If the sequence doesn't fit to the prototype, we have encountered
- // an unintended hash code collision and we skip it.
- if (!areSequencesClones(Prototype, *I)) {
- ++I;
- continue;
- }
+void RecursiveCloneTypeIIConstraint::constrain(
+ std::vector<CloneDetector::CloneGroup> &Sequences) {
+ // FIXME: Maybe we can do this in-place and don't need this additional vector.
+ std::vector<CloneDetector::CloneGroup> Result;
- // If we weren't asked to check for a matching variable pattern in clone
- // groups we can add the sequence now to the new clone group.
- // If we were asked to check for matching variable pattern, we first have
- // to check that there are no differences between the two patterns and
- // only proceed if they match.
- if (!CheckVariablePattern ||
- VariablePattern(*I).countPatternDifferences(PrototypeFeatures) == 0) {
- FilteredGroup.Sequences.push_back(*I);
- I = UnassignedSequences.erase(I);
- continue;
- }
+ for (CloneDetector::CloneGroup &Group : Sequences) {
+ // We assume in the following code that the Group is non-empty, so we
+ // skip all empty groups.
+ if (Group.empty())
+ continue;
+
+ std::vector<std::pair<size_t, StmtSequence>> StmtsByHash;
- // We didn't found a matching variable pattern, so we continue with the
- // next sequence.
- ++I;
+ // Generate hash codes for all children of S and save them in StmtsByHash.
+ for (const StmtSequence &S : Group) {
+ saveHash(S.front(), S.getContainingDecl(), StmtsByHash);
}
- // Add a valid clone group to the list of found clone groups.
- if (!FilteredGroup.isValid())
- continue;
+ // Sort hash_codes in StmtsByHash.
+ std::stable_sort(StmtsByHash.begin(), StmtsByHash.end(),
+ [](std::pair<size_t, StmtSequence> LHS,
+ std::pair<size_t, StmtSequence> RHS) {
+ return LHS.first < RHS.first;
+ });
+
+ // Check for each StmtSequence if its successor has the same hash value.
+ // We don't check the last StmtSequence as it has no successor.
+ // Note: The 'size - 1 ' in the condition is safe because we check for an
+ // empty Group vector at the beginning of this function.
+ for (unsigned i = 0; i < StmtsByHash.size() - 1; ++i) {
+ const auto Current = StmtsByHash[i];
+
+ // It's likely that we just found an sequence of StmtSequences that
+ // represent a CloneGroup, so we create a new group and start checking and
+ // adding the StmtSequences in this sequence.
+ CloneDetector::CloneGroup NewGroup;
+
+ size_t PrototypeHash = Current.first;
+
+ for (; i < StmtsByHash.size(); ++i) {
+ // A different hash value means we have reached the end of the sequence.
+ if (PrototypeHash != StmtsByHash[i].first ||
+ !areSequencesClones(StmtsByHash[i].second, Current.second)) {
+ // The current sequence could be the start of a new CloneGroup. So we
+ // decrement i so that we visit it again in the outer loop.
+ // Note: i can never be 0 at this point because we are just comparing
+ // the hash of the Current StmtSequence with itself in the 'if' above.
+ assert(i != 0);
+ --i;
+ break;
+ }
+ // Same hash value means we should add the StmtSequence to the current
+ // group.
+ NewGroup.push_back(StmtsByHash[i].second);
+ }
- Result.push_back(FilteredGroup);
+ // We created a new clone group with matching hash codes and move it to
+ // the result vector.
+ Result.push_back(NewGroup);
+ }
}
+ // Sequences is the output parameter, so we copy our result into it.
+ Sequences = Result;
}
-void CloneDetector::findClones(std::vector<CloneGroup> &Result,
- unsigned MinGroupComplexity,
- bool CheckPatterns) {
- // A shortcut (and necessary for the for-loop later in this function).
- if (Sequences.empty())
- return;
+size_t MinComplexityConstraint::calculateStmtComplexity(
+ const StmtSequence &Seq, const std::string &ParentMacroStack) {
+ if (Seq.empty())
+ return 0;
+
+ size_t Complexity = 1;
+
+ ASTContext &Context = Seq.getASTContext();
+
+ // Look up what macros expanded into the current statement.
+ std::string StartMacroStack = getMacroStack(Seq.getStartLoc(), Context);
+ std::string EndMacroStack = getMacroStack(Seq.getEndLoc(), Context);
+
+ // First, check if ParentMacroStack is not empty which means we are currently
+ // dealing with a parent statement which was expanded from a macro.
+ // If this parent statement was expanded from the same macros as this
+ // statement, we reduce the initial complexity of this statement to zero.
+ // This causes that a group of statements that were generated by a single
+ // macro expansion will only increase the total complexity by one.
+ // Note: This is not the final complexity of this statement as we still
+ // add the complexity of the child statements to the complexity value.
+ if (!ParentMacroStack.empty() && (StartMacroStack == ParentMacroStack &&
+ EndMacroStack == ParentMacroStack)) {
+ Complexity = 0;
+ }
- // We need to search for groups of StmtSequences with the same hash code to
- // create our initial clone groups. By sorting all known StmtSequences by
- // their hash value we make sure that StmtSequences with the same hash code
- // are grouped together in the Sequences vector.
- // Note: We stable sort here because the StmtSequences are added in the order
- // in which they appear in the source file. We want to preserve that order
- // because we also want to report them in that order in the CloneChecker.
- std::stable_sort(Sequences.begin(), Sequences.end(),
- [](std::pair<CloneSignature, StmtSequence> LHS,
- std::pair<CloneSignature, StmtSequence> RHS) {
- return LHS.first.Hash < RHS.first.Hash;
- });
-
- std::vector<CloneGroup> CloneGroups;
-
- // Check for each CloneSignature if its successor has the same hash value.
- // We don't check the last CloneSignature as it has no successor.
- // Note: The 'size - 1' in the condition is safe because we check for an empty
- // Sequences vector at the beginning of this function.
- for (unsigned i = 0; i < Sequences.size() - 1; ++i) {
- const auto Current = Sequences[i];
- const auto Next = Sequences[i + 1];
-
- if (Current.first.Hash != Next.first.Hash)
- continue;
+ // Iterate over the Stmts in the StmtSequence and add their complexity values
+ // to the current complexity value.
+ if (Seq.holdsSequence()) {
+ for (const Stmt *S : Seq) {
+ Complexity += calculateStmtComplexity(
+ StmtSequence(S, Seq.getContainingDecl()), StartMacroStack);
+ }
+ } else {
+ for (const Stmt *S : Seq.front()->children()) {
+ Complexity += calculateStmtComplexity(
+ StmtSequence(S, Seq.getContainingDecl()), StartMacroStack);
+ }
+ }
+ return Complexity;
+}
- // It's likely that we just found an sequence of CloneSignatures that
- // represent a CloneGroup, so we create a new group and start checking and
- // adding the CloneSignatures in this sequence.
- CloneGroup Group;
- Group.Signature = Current.first;
-
- for (; i < Sequences.size(); ++i) {
- const auto &Signature = Sequences[i];
-
- // A different hash value means we have reached the end of the sequence.
- if (Current.first.Hash != Signature.first.Hash) {
- // The current Signature could be the start of a new CloneGroup. So we
- // decrement i so that we visit it again in the outer loop.
- // Note: i can never be 0 at this point because we are just comparing
- // the hash of the Current CloneSignature with itself in the 'if' above.
- assert(i != 0);
- --i;
- break;
- }
+void MatchingVariablePatternConstraint::constrain(
+ std::vector<CloneDetector::CloneGroup> &CloneGroups) {
+ CloneConstraint::splitCloneGroups(
+ CloneGroups, [](const StmtSequence &A, const StmtSequence &B) {
+ VariablePattern PatternA(A);
+ VariablePattern PatternB(B);
+ return PatternA.countPatternDifferences(PatternB) == 0;
+ });
+}
- // Skip CloneSignatures that won't pass the complexity requirement.
- if (Signature.first.Complexity < MinGroupComplexity)
+void CloneConstraint::splitCloneGroups(
+ std::vector<CloneDetector::CloneGroup> &CloneGroups,
+ std::function<bool(const StmtSequence &, const StmtSequence &)> Compare) {
+ std::vector<CloneDetector::CloneGroup> Result;
+ for (auto &HashGroup : CloneGroups) {
+ // Contains all indexes in HashGroup that were already added to a
+ // CloneGroup.
+ std::vector<char> Indexes;
+ Indexes.resize(HashGroup.size());
+
+ for (unsigned i = 0; i < HashGroup.size(); ++i) {
+ // Skip indexes that are already part of a CloneGroup.
+ if (Indexes[i])
continue;
- Group.Sequences.push_back(Signature.second);
- }
+ // Pick the first unhandled StmtSequence and consider it as the
+ // beginning
+ // of a new CloneGroup for now.
+ // We don't add i to Indexes because we never iterate back.
+ StmtSequence Prototype = HashGroup[i];
+ CloneDetector::CloneGroup PotentialGroup = {Prototype};
+ ++Indexes[i];
+
+ // Check all following StmtSequences for clones.
+ for (unsigned j = i + 1; j < HashGroup.size(); ++j) {
+ // Skip indexes that are already part of a CloneGroup.
+ if (Indexes[j])
+ continue;
+
+ // If a following StmtSequence belongs to our CloneGroup, we add it to
+ // it.
+ const StmtSequence &Candidate = HashGroup[j];
+
+ if (!Compare(Prototype, Candidate))
+ continue;
+
+ PotentialGroup.push_back(Candidate);
+ // Make sure we never visit this StmtSequence again.
+ ++Indexes[j];
+ }
- // There is a chance that we haven't found more than two fitting
- // CloneSignature because not enough CloneSignatures passed the complexity
- // requirement. As a CloneGroup with less than two members makes no sense,
- // we ignore this CloneGroup and won't add it to the result.
- if (!Group.isValid())
- continue;
+ // Otherwise, add it to the result and continue searching for more
+ // groups.
+ Result.push_back(PotentialGroup);
+ }
- CloneGroups.push_back(Group);
+ assert(std::all_of(Indexes.begin(), Indexes.end(),
+ [](char c) { return c == 1; }));
}
+ CloneGroups = Result;
+}
- // Add every valid clone group that fulfills the complexity requirement.
- for (const CloneGroup &Group : CloneGroups) {
- createCloneGroups(Result, Group, CheckPatterns);
+void VariablePattern::addVariableOccurence(const VarDecl *VarDecl,
+ const Stmt *Mention) {
+ // First check if we already reference this variable
+ for (size_t KindIndex = 0; KindIndex < Variables.size(); ++KindIndex) {
+ if (Variables[KindIndex] == VarDecl) {
+ // If yes, add a new occurence that points to the existing entry in
+ // the Variables vector.
+ Occurences.emplace_back(KindIndex, Mention);
+ return;
+ }
}
+ // If this variable wasn't already referenced, add it to the list of
+ // referenced variables and add a occurence that points to this new entry.
+ Occurences.emplace_back(Variables.size(), Mention);
+ Variables.push_back(VarDecl);
+}
- std::vector<unsigned> IndexesToRemove;
-
- // Compare every group in the result with the rest. If one groups contains
- // another group, we only need to return the bigger group.
- // Note: This doesn't scale well, so if possible avoid calling any heavy
- // function from this loop to minimize the performance impact.
- for (unsigned i = 0; i < Result.size(); ++i) {
- for (unsigned j = 0; j < Result.size(); ++j) {
- // Don't compare a group with itself.
- if (i == j)
- continue;
+void VariablePattern::addVariables(const Stmt *S) {
+ // Sometimes we get a nullptr (such as from IfStmts which often have nullptr
+ // children). We skip such statements as they don't reference any
+ // variables.
+ if (!S)
+ return;
- if (containsGroup(Result[j], Result[i])) {
- IndexesToRemove.push_back(i);
- break;
- }
- }
+ // Check if S is a reference to a variable. If yes, add it to the pattern.
+ if (auto D = dyn_cast<DeclRefExpr>(S)) {
+ if (auto VD = dyn_cast<VarDecl>(D->getDecl()->getCanonicalDecl()))
+ addVariableOccurence(VD, D);
}
- // Erasing a list of indexes from the vector should be done with decreasing
- // indexes. As IndexesToRemove is constructed with increasing values, we just
- // reverse iterate over it to get the desired order.
- for (auto I = IndexesToRemove.rbegin(); I != IndexesToRemove.rend(); ++I) {
- Result.erase(Result.begin() + *I);
+ // Recursively check all children of the given statement.
+ for (const Stmt *Child : S->children()) {
+ addVariables(Child);
}
}
-void CloneDetector::findSuspiciousClones(
- std::vector<CloneDetector::SuspiciousClonePair> &Result,
- unsigned MinGroupComplexity) {
- std::vector<CloneGroup> Clones;
- // Reuse the normal search for clones but specify that the clone groups don't
- // need to have a common referenced variable pattern so that we can manually
- // search for the kind of pattern errors this function is supposed to find.
- findClones(Clones, MinGroupComplexity, false);
-
- for (const CloneGroup &Group : Clones) {
- for (unsigned i = 0; i < Group.Sequences.size(); ++i) {
- VariablePattern PatternA(Group.Sequences[i]);
-
- for (unsigned j = i + 1; j < Group.Sequences.size(); ++j) {
- VariablePattern PatternB(Group.Sequences[j]);
-
- CloneDetector::SuspiciousClonePair ClonePair;
- // For now, we only report clones which break the variable pattern just
- // once because multiple differences in a pattern are an indicator that
- // those differences are maybe intended (e.g. because it's actually
- // a different algorithm).
- // TODO: In very big clones even multiple variables can be unintended,
- // so replacing this number with a percentage could better handle such
- // cases. On the other hand it could increase the false-positive rate
- // for all clones if the percentage is too high.
- if (PatternA.countPatternDifferences(PatternB, &ClonePair) == 1) {
- Result.push_back(ClonePair);
- break;
- }
- }
- }
+unsigned VariablePattern::countPatternDifferences(
+ const VariablePattern &Other,
+ VariablePattern::SuspiciousClonePair *FirstMismatch) {
+ unsigned NumberOfDifferences = 0;
+
+ assert(Other.Occurences.size() == Occurences.size());
+ for (unsigned i = 0; i < Occurences.size(); ++i) {
+ auto ThisOccurence = Occurences[i];
+ auto OtherOccurence = Other.Occurences[i];
+ if (ThisOccurence.KindID == OtherOccurence.KindID)
+ continue;
+
+ ++NumberOfDifferences;
+
+ // If FirstMismatch is not a nullptr, we need to store information about
+ // the first difference between the two patterns.
+ if (FirstMismatch == nullptr)
+ continue;
+
+ // Only proceed if we just found the first difference as we only store
+ // information about the first difference.
+ if (NumberOfDifferences != 1)
+ continue;
+
+ const VarDecl *FirstSuggestion = nullptr;
+ // If there is a variable available in the list of referenced variables
+ // which wouldn't break the pattern if it is used in place of the
+ // current variable, we provide this variable as the suggested fix.
+ if (OtherOccurence.KindID < Variables.size())
+ FirstSuggestion = Variables[OtherOccurence.KindID];
+
+ // Store information about the first clone.
+ FirstMismatch->FirstCloneInfo =
+ VariablePattern::SuspiciousClonePair::SuspiciousCloneInfo(
+ Variables[ThisOccurence.KindID], ThisOccurence.Mention,
+ FirstSuggestion);
+
+ // Same as above but with the other clone. We do this for both clones as
+ // we don't know which clone is the one containing the unintended
+ // pattern error.
+ const VarDecl *SecondSuggestion = nullptr;
+ if (ThisOccurence.KindID < Other.Variables.size())
+ SecondSuggestion = Other.Variables[ThisOccurence.KindID];
+
+ // Store information about the second clone.
+ FirstMismatch->SecondCloneInfo =
+ VariablePattern::SuspiciousClonePair::SuspiciousCloneInfo(
+ Other.Variables[OtherOccurence.KindID], OtherOccurence.Mention,
+ SecondSuggestion);
+
+ // SuspiciousClonePair guarantees that the first clone always has a
+ // suggested variable associated with it. As we know that one of the two
+ // clones in the pair always has suggestion, we swap the two clones
+ // in case the first clone has no suggested variable which means that
+ // the second clone has a suggested variable and should be first.
+ if (!FirstMismatch->FirstCloneInfo.Suggestion)
+ std::swap(FirstMismatch->FirstCloneInfo, FirstMismatch->SecondCloneInfo);
+
+ // This ensures that we always have at least one suggestion in a pair.
+ assert(FirstMismatch->FirstCloneInfo.Suggestion);
}
+
+ return NumberOfDifferences;
}
diff --git a/lib/Analysis/OSLog.cpp b/lib/Analysis/OSLog.cpp
index 3e13a153c65f..b2983932ea22 100644
--- a/lib/Analysis/OSLog.cpp
+++ b/lib/Analysis/OSLog.cpp
@@ -10,11 +10,11 @@
#include "llvm/ADT/SmallBitVector.h"
using namespace clang;
-using llvm::APInt;
using clang::analyze_os_log::OSLogBufferItem;
using clang::analyze_os_log::OSLogBufferLayout;
+namespace {
class OSLogFormatStringHandler
: public analyze_format_string::FormatStringHandler {
private:
@@ -166,6 +166,7 @@ public:
}
}
};
+} // end anonymous namespace
bool clang::analyze_os_log::computeOSLogBufferLayout(
ASTContext &Ctx, const CallExpr *E, OSLogBufferLayout &Layout) {
diff --git a/lib/Analysis/ReachableCode.cpp b/lib/Analysis/ReachableCode.cpp
index a2f3203762f7..60724ea60b25 100644
--- a/lib/Analysis/ReachableCode.cpp
+++ b/lib/Analysis/ReachableCode.cpp
@@ -58,6 +58,14 @@ static bool isTrivialDoWhile(const CFGBlock *B, const Stmt *S) {
return false;
}
+static bool isBuiltinUnreachable(const Stmt *S) {
+ if (const auto *DRE = dyn_cast<DeclRefExpr>(S))
+ if (const auto *FDecl = dyn_cast<FunctionDecl>(DRE->getDecl()))
+ return FDecl->getIdentifier() &&
+ FDecl->getBuiltinID() == Builtin::BI__builtin_unreachable;
+ return false;
+}
+
static bool isDeadReturn(const CFGBlock *B, const Stmt *S) {
// Look to see if the current control flow ends with a 'return', and see if
// 'S' is a substatement. The 'return' may not be the last element in the
@@ -132,15 +140,21 @@ static bool isExpandedFromConfigurationMacro(const Stmt *S,
// so that we can refine it later.
SourceLocation L = S->getLocStart();
if (L.isMacroID()) {
+ SourceManager &SM = PP.getSourceManager();
if (IgnoreYES_NO) {
// The Objective-C constant 'YES' and 'NO'
// are defined as macros. Do not treat them
// as configuration values.
- SourceManager &SM = PP.getSourceManager();
SourceLocation TopL = getTopMostMacro(L, SM);
StringRef MacroName = PP.getImmediateMacroName(TopL);
if (MacroName == "YES" || MacroName == "NO")
return false;
+ } else if (!PP.getLangOpts().CPlusPlus) {
+ // Do not treat C 'false' and 'true' macros as configuration values.
+ SourceLocation TopL = getTopMostMacro(L, SM);
+ StringRef MacroName = PP.getImmediateMacroName(TopL);
+ if (MacroName == "false" || MacroName == "true")
+ return false;
}
return true;
}
@@ -586,8 +600,7 @@ void DeadCodeScan::reportDeadCode(const CFGBlock *B,
if (isa<BreakStmt>(S)) {
UK = reachable_code::UK_Break;
- }
- else if (isTrivialDoWhile(B, S)) {
+ } else if (isTrivialDoWhile(B, S) || isBuiltinUnreachable(S)) {
return;
}
else if (isDeadReturn(B, S)) {
diff --git a/lib/Analysis/ThreadSafetyTIL.cpp b/lib/Analysis/ThreadSafetyTIL.cpp
index 2923f7e66a61..83aa90435e2a 100644
--- a/lib/Analysis/ThreadSafetyTIL.cpp
+++ b/lib/Analysis/ThreadSafetyTIL.cpp
@@ -186,8 +186,8 @@ int BasicBlock::topologicalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
//
// This sort assumes that (1) dominators have been computed, (2) there are no
// critical edges, and (3) the entry block is reachable from the exit block
-// and no blocks are accessable via traversal of back-edges from the exit that
-// weren't accessable via forward edges from the entry.
+// and no blocks are accessible via traversal of back-edges from the exit that
+// weren't accessible via forward edges from the entry.
int BasicBlock::topologicalFinalSort(SimpleArray<BasicBlock*>& Blocks, int ID) {
// Visited is assumed to have been set by the topologicalSort. This pass
// assumes !Visited means that we've visited this node before.
diff --git a/lib/Basic/CMakeLists.txt b/lib/Basic/CMakeLists.txt
index 8929ec30ff7b..2feb31851cfe 100644
--- a/lib/Basic/CMakeLists.txt
+++ b/lib/Basic/CMakeLists.txt
@@ -74,6 +74,7 @@ add_clang_library(clangBasic
FileSystemStatCache.cpp
IdentifierTable.cpp
LangOptions.cpp
+ MemoryBufferCache.cpp
Module.cpp
ObjCRuntime.cpp
OpenMPKinds.cpp
@@ -89,6 +90,7 @@ add_clang_library(clangBasic
VersionTuple.cpp
VirtualFileSystem.cpp
Warnings.cpp
+ XRayLists.cpp
${version_inc}
)
diff --git a/lib/Basic/Diagnostic.cpp b/lib/Basic/Diagnostic.cpp
index 7529c475d6b9..350d5477751c 100644
--- a/lib/Basic/Diagnostic.cpp
+++ b/lib/Basic/Diagnostic.cpp
@@ -16,6 +16,7 @@
#include "clang/Basic/DiagnosticOptions.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/PartialDiagnostic.h"
+#include "clang/Basic/SourceManager.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/CrashRecoveryContext.h"
@@ -131,13 +132,13 @@ void DiagnosticsEngine::Reset() {
// Clear state related to #pragma diagnostic.
DiagStates.clear();
- DiagStatePoints.clear();
+ DiagStatesByLoc.clear();
DiagStateOnPushStack.clear();
// Create a DiagState and DiagStatePoint representing diagnostic changes
// through command-line.
DiagStates.emplace_back();
- DiagStatePoints.push_back(DiagStatePoint(&DiagStates.back(), FullSourceLoc()));
+ DiagStatesByLoc.appendFirst(&DiagStates.back());
}
void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
@@ -157,27 +158,94 @@ void DiagnosticsEngine::ReportDelayed() {
DelayedDiagArg2.clear();
}
-DiagnosticsEngine::DiagStatePointsTy::iterator
-DiagnosticsEngine::GetDiagStatePointForLoc(SourceLocation L) const {
- assert(!DiagStatePoints.empty());
- assert(DiagStatePoints.front().Loc.isInvalid() &&
- "Should have created a DiagStatePoint for command-line");
+void DiagnosticsEngine::DiagStateMap::appendFirst(
+ DiagState *State) {
+ assert(Files.empty() && "not first");
+ FirstDiagState = CurDiagState = State;
+ CurDiagStateLoc = SourceLocation();
+}
+
+void DiagnosticsEngine::DiagStateMap::append(SourceManager &SrcMgr,
+ SourceLocation Loc,
+ DiagState *State) {
+ CurDiagState = State;
+ CurDiagStateLoc = Loc;
+
+ std::pair<FileID, unsigned> Decomp = SrcMgr.getDecomposedLoc(Loc);
+ unsigned Offset = Decomp.second;
+ for (File *F = getFile(SrcMgr, Decomp.first); F;
+ Offset = F->ParentOffset, F = F->Parent) {
+ F->HasLocalTransitions = true;
+ auto &Last = F->StateTransitions.back();
+ assert(Last.Offset <= Offset && "state transitions added out of order");
+
+ if (Last.Offset == Offset) {
+ if (Last.State == State)
+ break;
+ Last.State = State;
+ continue;
+ }
+
+ F->StateTransitions.push_back({State, Offset});
+ }
+}
- if (!SourceMgr)
- return DiagStatePoints.end() - 1;
+DiagnosticsEngine::DiagState *
+DiagnosticsEngine::DiagStateMap::lookup(SourceManager &SrcMgr,
+ SourceLocation Loc) const {
+ // Common case: we have not seen any diagnostic pragmas.
+ if (Files.empty())
+ return FirstDiagState;
- FullSourceLoc Loc(L, *SourceMgr);
- if (Loc.isInvalid())
- return DiagStatePoints.end() - 1;
+ std::pair<FileID, unsigned> Decomp = SrcMgr.getDecomposedLoc(Loc);
+ const File *F = getFile(SrcMgr, Decomp.first);
+ return F->lookup(Decomp.second);
+}
+
+DiagnosticsEngine::DiagState *
+DiagnosticsEngine::DiagStateMap::File::lookup(unsigned Offset) const {
+ auto OnePastIt = std::upper_bound(
+ StateTransitions.begin(), StateTransitions.end(), Offset,
+ [](unsigned Offset, const DiagStatePoint &P) {
+ return Offset < P.Offset;
+ });
+ assert(OnePastIt != StateTransitions.begin() && "missing initial state");
+ return OnePastIt[-1].State;
+}
+
+DiagnosticsEngine::DiagStateMap::File *
+DiagnosticsEngine::DiagStateMap::getFile(SourceManager &SrcMgr,
+ FileID ID) const {
+ // Get or insert the File for this ID.
+ auto Range = Files.equal_range(ID);
+ if (Range.first != Range.second)
+ return &Range.first->second;
+ auto &F = Files.insert(Range.first, std::make_pair(ID, File()))->second;
+
+ // We created a new File; look up the diagnostic state at the start of it and
+ // initialize it.
+ if (ID.isValid()) {
+ std::pair<FileID, unsigned> Decomp = SrcMgr.getDecomposedIncludedLoc(ID);
+ F.Parent = getFile(SrcMgr, Decomp.first);
+ F.ParentOffset = Decomp.second;
+ F.StateTransitions.push_back({F.Parent->lookup(Decomp.second), 0});
+ } else {
+ // This is the (imaginary) root file into which we pretend all top-level
+ // files are included; it descends from the initial state.
+ //
+ // FIXME: This doesn't guarantee that we use the same ordering as
+ // isBeforeInTranslationUnit in the cases where someone invented another
+ // top-level file and added diagnostic pragmas to it. See the code at the
+ // end of isBeforeInTranslationUnit for the quirks it deals with.
+ F.StateTransitions.push_back({FirstDiagState, 0});
+ }
+ return &F;
+}
- DiagStatePointsTy::iterator Pos = DiagStatePoints.end();
- FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
- if (LastStateChangePos.isValid() &&
- Loc.isBeforeInTranslationUnitThan(LastStateChangePos))
- Pos = std::upper_bound(DiagStatePoints.begin(), DiagStatePoints.end(),
- DiagStatePoint(nullptr, Loc));
- --Pos;
- return Pos;
+void DiagnosticsEngine::PushDiagStatePoint(DiagState *State,
+ SourceLocation Loc) {
+ assert(Loc.isValid() && "Adding invalid loc point");
+ DiagStatesByLoc.append(*SourceMgr, Loc, State);
}
void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
@@ -187,65 +255,38 @@ void DiagnosticsEngine::setSeverity(diag::kind Diag, diag::Severity Map,
assert((Diags->isBuiltinWarningOrExtension(Diag) ||
(Map == diag::Severity::Fatal || Map == diag::Severity::Error)) &&
"Cannot map errors into warnings!");
- assert(!DiagStatePoints.empty());
assert((L.isInvalid() || SourceMgr) && "No SourceMgr for valid location");
- FullSourceLoc Loc = SourceMgr? FullSourceLoc(L, *SourceMgr) : FullSourceLoc();
- FullSourceLoc LastStateChangePos = DiagStatePoints.back().Loc;
// Don't allow a mapping to a warning override an error/fatal mapping.
+ bool WasUpgradedFromWarning = false;
if (Map == diag::Severity::Warning) {
DiagnosticMapping &Info = GetCurDiagState()->getOrAddMapping(Diag);
if (Info.getSeverity() == diag::Severity::Error ||
- Info.getSeverity() == diag::Severity::Fatal)
+ Info.getSeverity() == diag::Severity::Fatal) {
Map = Info.getSeverity();
+ WasUpgradedFromWarning = true;
+ }
}
DiagnosticMapping Mapping = makeUserMapping(Map, L);
+ Mapping.setUpgradedFromWarning(WasUpgradedFromWarning);
// Common case; setting all the diagnostics of a group in one place.
- if (Loc.isInvalid() || Loc == LastStateChangePos) {
- GetCurDiagState()->setMapping(Diag, Mapping);
- return;
- }
-
- // Another common case; modifying diagnostic state in a source location
- // after the previous one.
- if ((Loc.isValid() && LastStateChangePos.isInvalid()) ||
- LastStateChangePos.isBeforeInTranslationUnitThan(Loc)) {
- // A diagnostic pragma occurred, create a new DiagState initialized with
- // the current one and a new DiagStatePoint to record at which location
- // the new state became active.
- DiagStates.push_back(*GetCurDiagState());
- PushDiagStatePoint(&DiagStates.back(), Loc);
- GetCurDiagState()->setMapping(Diag, Mapping);
- return;
- }
-
- // We allow setting the diagnostic state in random source order for
- // completeness but it should not be actually happening in normal practice.
-
- DiagStatePointsTy::iterator Pos = GetDiagStatePointForLoc(Loc);
- assert(Pos != DiagStatePoints.end());
-
- // Update all diagnostic states that are active after the given location.
- for (DiagStatePointsTy::iterator
- I = Pos+1, E = DiagStatePoints.end(); I != E; ++I) {
- I->State->setMapping(Diag, Mapping);
- }
-
- // If the location corresponds to an existing point, just update its state.
- if (Pos->Loc == Loc) {
- Pos->State->setMapping(Diag, Mapping);
+ if ((L.isInvalid() || L == DiagStatesByLoc.getCurDiagStateLoc()) &&
+ DiagStatesByLoc.getCurDiagState()) {
+ // FIXME: This is theoretically wrong: if the current state is shared with
+ // some other location (via push/pop) we will change the state for that
+ // other location as well. This cannot currently happen, as we can't update
+ // the diagnostic state at the same location at which we pop.
+ DiagStatesByLoc.getCurDiagState()->setMapping(Diag, Mapping);
return;
}
- // Create a new state/point and fit it into the vector of DiagStatePoints
- // so that the vector is always ordered according to location.
- assert(Pos->Loc.isBeforeInTranslationUnitThan(Loc));
- DiagStates.push_back(*Pos->State);
- DiagState *NewState = &DiagStates.back();
- NewState->setMapping(Diag, Mapping);
- DiagStatePoints.insert(Pos+1, DiagStatePoint(NewState,
- FullSourceLoc(Loc, *SourceMgr)));
+ // A diagnostic pragma occurred, create a new DiagState initialized with
+ // the current one and a new DiagStatePoint to record at which location
+ // the new state became active.
+ DiagStates.push_back(*GetCurDiagState());
+ DiagStates.back().setMapping(Diag, Mapping);
+ PushDiagStatePoint(&DiagStates.back(), L);
}
bool DiagnosticsEngine::setSeverityForGroup(diag::Flavor Flavor,
diff --git a/lib/Basic/DiagnosticIDs.cpp b/lib/Basic/DiagnosticIDs.cpp
index 3c370f67fa32..e0580af45b50 100644
--- a/lib/Basic/DiagnosticIDs.cpp
+++ b/lib/Basic/DiagnosticIDs.cpp
@@ -411,11 +411,8 @@ DiagnosticIDs::getDiagnosticSeverity(unsigned DiagID, SourceLocation Loc,
// to error. Errors can only be mapped to fatal.
diag::Severity Result = diag::Severity::Fatal;
- DiagnosticsEngine::DiagStatePointsTy::iterator
- Pos = Diag.GetDiagStatePointForLoc(Loc);
- DiagnosticsEngine::DiagState *State = Pos->State;
-
// Get the mapping information, or compute it lazily.
+ DiagnosticsEngine::DiagState *State = Diag.GetDiagStateForLoc(Loc);
DiagnosticMapping &Mapping = State->getOrAddMapping((diag::kind)DiagID);
// TODO: Can a null severity really get here?
diff --git a/lib/Basic/FileManager.cpp b/lib/Basic/FileManager.cpp
index 50050d0a519b..0c10b5f4d14c 100644
--- a/lib/Basic/FileManager.cpp
+++ b/lib/Basic/FileManager.cpp
@@ -50,14 +50,14 @@ using namespace clang;
FileManager::FileManager(const FileSystemOptions &FSO,
IntrusiveRefCntPtr<vfs::FileSystem> FS)
- : FS(FS), FileSystemOpts(FSO),
- SeenDirEntries(64), SeenFileEntries(64), NextFileUID(0) {
+ : FS(std::move(FS)), FileSystemOpts(FSO), SeenDirEntries(64),
+ SeenFileEntries(64), NextFileUID(0) {
NumDirLookups = NumFileLookups = 0;
NumDirCacheMisses = NumFileCacheMisses = 0;
// If the caller doesn't provide a virtual file system, just grab the real
// file system.
- if (!FS)
+ if (!this->FS)
this->FS = vfs::getRealFileSystem();
}
@@ -386,6 +386,7 @@ FileManager::getVirtualFile(StringRef Filename, off_t Size,
UFE->ModTime = ModificationTime;
UFE->Dir = DirInfo;
UFE->UID = NextFileUID++;
+ UFE->IsValid = true;
UFE->File.reset();
return UFE;
}
diff --git a/lib/Basic/IdentifierTable.cpp b/lib/Basic/IdentifierTable.cpp
index af424cd92390..74c85376c7db 100644
--- a/lib/Basic/IdentifierTable.cpp
+++ b/lib/Basic/IdentifierTable.cpp
@@ -244,7 +244,7 @@ static KeywordStatus getTokenKwStatus(const LangOptions &LangOpts,
/// \brief Returns true if the identifier represents a keyword in the
/// specified language.
-bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) {
+bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) const {
switch (getTokenKwStatus(LangOpts, getTokenID())) {
case KS_Enabled:
case KS_Extension:
@@ -254,6 +254,19 @@ bool IdentifierInfo::isKeyword(const LangOptions &LangOpts) {
}
}
+/// \brief Returns true if the identifier represents a C++ keyword in the
+/// specified language.
+bool IdentifierInfo::isCPlusPlusKeyword(const LangOptions &LangOpts) const {
+ if (!LangOpts.CPlusPlus || !isKeyword(LangOpts))
+ return false;
+ // This is a C++ keyword if this identifier is not a keyword when checked
+ // using LangOptions without C++ support.
+ LangOptions LangOptsNoCPP = LangOpts;
+ LangOptsNoCPP.CPlusPlus = false;
+ LangOptsNoCPP.CPlusPlus11 = false;
+ return !isKeyword(LangOptsNoCPP);
+}
+
tok::PPKeywordKind IdentifierInfo::getPPKeywordID() const {
// We use a perfect hash function here involving the length of the keyword,
// the first and third character. For preprocessor ID's there are no
@@ -487,8 +500,10 @@ ObjCMethodFamily Selector::getMethodFamilyImpl(Selector sel) {
if (name == "self") return OMF_self;
if (name == "initialize") return OMF_initialize;
}
-
- if (name == "performSelector") return OMF_performSelector;
+
+ if (name == "performSelector" || name == "performSelectorInBackground" ||
+ name == "performSelectorOnMainThread")
+ return OMF_performSelector;
// The other method families may begin with a prefix of underscores.
while (!name.empty() && name.front() == '_')
diff --git a/lib/Basic/LangOptions.cpp b/lib/Basic/LangOptions.cpp
index ff10a773a97c..c8a774311efe 100644
--- a/lib/Basic/LangOptions.cpp
+++ b/lib/Basic/LangOptions.cpp
@@ -33,6 +33,8 @@ void LangOptions::resetNonModularOptions() {
// sanitizer options (this affects __has_feature(address_sanitizer) etc).
Sanitize.clear();
SanitizerBlacklistFiles.clear();
+ XRayAlwaysInstrumentFiles.clear();
+ XRayNeverInstrumentFiles.clear();
CurrentModule.clear();
IsHeaderFile = false;
diff --git a/lib/Basic/MemoryBufferCache.cpp b/lib/Basic/MemoryBufferCache.cpp
new file mode 100644
index 000000000000..c1fc571ec9b3
--- /dev/null
+++ b/lib/Basic/MemoryBufferCache.cpp
@@ -0,0 +1,48 @@
+//===- MemoryBufferCache.cpp - Cache for loaded memory buffers ------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/MemoryBufferCache.h"
+#include "llvm/Support/MemoryBuffer.h"
+
+using namespace clang;
+
+llvm::MemoryBuffer &
+MemoryBufferCache::addBuffer(llvm::StringRef Filename,
+ std::unique_ptr<llvm::MemoryBuffer> Buffer) {
+ auto Insertion =
+ Buffers.insert({Filename, BufferEntry{std::move(Buffer), NextIndex++}});
+ assert(Insertion.second && "Already has a buffer");
+ return *Insertion.first->second.Buffer;
+}
+
+llvm::MemoryBuffer *MemoryBufferCache::lookupBuffer(llvm::StringRef Filename) {
+ auto I = Buffers.find(Filename);
+ if (I == Buffers.end())
+ return nullptr;
+ return I->second.Buffer.get();
+}
+
+bool MemoryBufferCache::isBufferFinal(llvm::StringRef Filename) {
+ auto I = Buffers.find(Filename);
+ if (I == Buffers.end())
+ return false;
+ return I->second.Index < FirstRemovableIndex;
+}
+
+bool MemoryBufferCache::tryToRemoveBuffer(llvm::StringRef Filename) {
+ auto I = Buffers.find(Filename);
+ assert(I != Buffers.end() && "No buffer to remove...");
+ if (I->second.Index < FirstRemovableIndex)
+ return true;
+
+ Buffers.erase(I);
+ return false;
+}
+
+void MemoryBufferCache::finalizeCurrentBuffers() { FirstRemovableIndex = NextIndex; }
diff --git a/lib/Basic/Module.cpp b/lib/Basic/Module.cpp
index 80bbc24f3db3..a6fd931cb174 100644
--- a/lib/Basic/Module.cpp
+++ b/lib/Basic/Module.cpp
@@ -27,7 +27,7 @@ using namespace clang;
Module::Module(StringRef Name, SourceLocation DefinitionLoc, Module *Parent,
bool IsFramework, bool IsExplicit, unsigned VisibilityID)
: Name(Name), DefinitionLoc(DefinitionLoc), Parent(Parent), Directory(),
- Umbrella(), Signature(0), ASTFile(nullptr), VisibilityID(VisibilityID),
+ Umbrella(), ASTFile(nullptr), VisibilityID(VisibilityID),
IsMissingRequirement(false), HasIncompatibleModuleFile(false),
IsAvailable(true), IsFromModuleFile(false), IsFramework(IsFramework),
IsExplicit(IsExplicit), IsSystem(false), IsExternC(false),
diff --git a/lib/Basic/OpenMPKinds.cpp b/lib/Basic/OpenMPKinds.cpp
index 905c3693d378..5a8bb61eaadf 100644
--- a/lib/Basic/OpenMPKinds.cpp
+++ b/lib/Basic/OpenMPKinds.cpp
@@ -863,3 +863,76 @@ bool clang::isOpenMPLoopBoundSharingDirective(OpenMPDirectiveKind Kind) {
Kind == OMPD_target_teams_distribute_parallel_for_simd ||
Kind == OMPD_target_teams_distribute_simd;
}
+
+void clang::getOpenMPCaptureRegions(
+ SmallVectorImpl<OpenMPDirectiveKind> &CaptureRegions,
+ OpenMPDirectiveKind DKind) {
+ assert(DKind <= OMPD_unknown);
+ switch (DKind) {
+ case OMPD_parallel:
+ case OMPD_parallel_for:
+ case OMPD_parallel_for_simd:
+ case OMPD_parallel_sections:
+ case OMPD_distribute_parallel_for:
+ CaptureRegions.push_back(OMPD_parallel);
+ break;
+ case OMPD_target_teams:
+ CaptureRegions.push_back(OMPD_target);
+ CaptureRegions.push_back(OMPD_teams);
+ break;
+ case OMPD_teams:
+ case OMPD_simd:
+ case OMPD_for:
+ case OMPD_for_simd:
+ case OMPD_sections:
+ case OMPD_section:
+ case OMPD_single:
+ case OMPD_master:
+ case OMPD_critical:
+ case OMPD_taskgroup:
+ case OMPD_distribute:
+ case OMPD_ordered:
+ case OMPD_atomic:
+ case OMPD_target_data:
+ case OMPD_target:
+ case OMPD_target_parallel_for:
+ case OMPD_target_parallel_for_simd:
+ case OMPD_target_simd:
+ case OMPD_task:
+ case OMPD_taskloop:
+ case OMPD_taskloop_simd:
+ case OMPD_distribute_parallel_for_simd:
+ case OMPD_distribute_simd:
+ case OMPD_teams_distribute:
+ case OMPD_teams_distribute_simd:
+ case OMPD_teams_distribute_parallel_for_simd:
+ case OMPD_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute:
+ case OMPD_target_teams_distribute_parallel_for:
+ case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_distribute_simd:
+ CaptureRegions.push_back(DKind);
+ break;
+ case OMPD_target_parallel:
+ CaptureRegions.push_back(OMPD_target);
+ CaptureRegions.push_back(OMPD_parallel);
+ break;
+ case OMPD_threadprivate:
+ case OMPD_taskyield:
+ case OMPD_barrier:
+ case OMPD_taskwait:
+ case OMPD_cancellation_point:
+ case OMPD_cancel:
+ case OMPD_flush:
+ case OMPD_target_enter_data:
+ case OMPD_target_exit_data:
+ case OMPD_declare_reduction:
+ case OMPD_declare_simd:
+ case OMPD_declare_target:
+ case OMPD_end_declare_target:
+ case OMPD_target_update:
+ llvm_unreachable("OpenMP Directive is not allowed");
+ case OMPD_unknown:
+ llvm_unreachable("Unknown OpenMP directive");
+ }
+}
diff --git a/lib/Basic/SourceManager.cpp b/lib/Basic/SourceManager.cpp
index 380ca373e69b..156aa0b11f7d 100644
--- a/lib/Basic/SourceManager.cpp
+++ b/lib/Basic/SourceManager.cpp
@@ -1136,6 +1136,7 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
return 1;
}
+ const char *Buf = MemBuf->getBufferStart();
// See if we just calculated the line number for this FilePos and can use
// that to lookup the start of the line instead of searching for it.
if (LastLineNoFileIDQuery == FID &&
@@ -1144,11 +1145,19 @@ unsigned SourceManager::getColumnNumber(FileID FID, unsigned FilePos,
unsigned *SourceLineCache = LastLineNoContentCache->SourceLineCache;
unsigned LineStart = SourceLineCache[LastLineNoResult - 1];
unsigned LineEnd = SourceLineCache[LastLineNoResult];
- if (FilePos >= LineStart && FilePos < LineEnd)
+ if (FilePos >= LineStart && FilePos < LineEnd) {
+ // LineEnd is the LineStart of the next line.
+ // A line ends with separator LF or CR+LF on Windows.
+ // FilePos might point to the last separator,
+ // but we need a column number at most 1 + the last column.
+ if (FilePos + 1 == LineEnd && FilePos > LineStart) {
+ if (Buf[FilePos - 1] == '\r' || Buf[FilePos - 1] == '\n')
+ --FilePos;
+ }
return FilePos - LineStart + 1;
+ }
}
- const char *Buf = MemBuf->getBufferStart();
unsigned LineStart = FilePos;
while (LineStart && Buf[LineStart-1] != '\n' && Buf[LineStart-1] != '\r')
--LineStart;
diff --git a/lib/Basic/TargetInfo.cpp b/lib/Basic/TargetInfo.cpp
index b1b01e5f584f..e19404dc54cb 100644
--- a/lib/Basic/TargetInfo.cpp
+++ b/lib/Basic/TargetInfo.cpp
@@ -282,8 +282,9 @@ bool TargetInfo::isTypeSigned(IntType T) {
/// adjust - Set forced language options.
/// Apply changes to the target information with respect to certain
-/// language options which change the target configuration.
-void TargetInfo::adjust(const LangOptions &Opts) {
+/// language options which change the target configuration and adjust
+/// the language based on the target options where applicable.
+void TargetInfo::adjust(LangOptions &Opts) {
if (Opts.NoBitFieldTypeAlign)
UseBitFieldTypeAlignment = false;
if (Opts.ShortWChar)
diff --git a/lib/Basic/Targets.cpp b/lib/Basic/Targets.cpp
index 1a95ff26816e..a457f6deee75 100644
--- a/lib/Basic/Targets.cpp
+++ b/lib/Basic/Targets.cpp
@@ -545,6 +545,8 @@ protected:
Builder.defineMacro("__ELF__");
if (Opts.POSIXThreads)
Builder.defineMacro("_REENTRANT");
+ if (this->HasFloat128)
+ Builder.defineMacro("__FLOAT128__");
}
public:
OpenBSDTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
@@ -552,11 +554,11 @@ public:
this->TLSSupported = false;
switch (Triple.getArch()) {
- default:
case llvm::Triple::x86:
case llvm::Triple::x86_64:
- case llvm::Triple::arm:
- case llvm::Triple::sparc:
+ this->HasFloat128 = true;
+ // FALLTHROUGH
+ default:
this->MCountName = "__mcount";
break;
case llvm::Triple::mips64:
@@ -886,6 +888,7 @@ class PPCTargetInfo : public TargetInfo {
std::string CPU;
// Target cpu features.
+ bool HasAltivec;
bool HasVSX;
bool HasP8Vector;
bool HasP8Crypto;
@@ -901,9 +904,10 @@ protected:
public:
PPCTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
- : TargetInfo(Triple), HasVSX(false), HasP8Vector(false),
+ : TargetInfo(Triple), HasAltivec(false), HasVSX(false), HasP8Vector(false),
HasP8Crypto(false), HasDirectMove(false), HasQPX(false), HasHTM(false),
HasBPERMD(false), HasExtDiv(false), HasP9Vector(false) {
+ SuitableAlign = 128;
SimdDefaultAlign = 128;
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::PPCDoubleDouble();
@@ -930,6 +934,13 @@ public:
ArchDefineA2q = 1 << 15
} ArchDefineTypes;
+ // Set the language option for altivec based on our value.
+ void adjust(LangOptions &Opts) override {
+ if (HasAltivec)
+ Opts.AltiVec = 1;
+ TargetInfo::adjust(Opts);
+ }
+
// Note: GCC recognizes the following additional cpus:
// 401, 403, 405, 405fp, 440fp, 464, 464fp, 476, 476fp, 505, 740, 801,
// 821, 823, 8540, 8548, e300c2, e300c3, e500mc64, e6500, 860, cell,
@@ -1164,7 +1175,9 @@ const Builtin::Info PPCTargetInfo::BuiltinInfo[] = {
bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
for (const auto &Feature : Features) {
- if (Feature == "+vsx") {
+ if (Feature == "+altivec") {
+ HasAltivec = true;
+ } else if (Feature == "+vsx") {
HasVSX = true;
} else if (Feature == "+bpermd") {
HasBPERMD = true;
@@ -1224,82 +1237,100 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ABI == "elfv2")
Builder.defineMacro("_CALL_ELF", "2");
+ // This typically is only for a new enough linker (bfd >= 2.16.2 or gold), but
+ // our suppport post-dates this and it should work on all 64-bit ppc linux
+ // platforms. It is guaranteed to work on all elfv2 platforms.
+ if (getTriple().getOS() == llvm::Triple::Linux && PointerWidth == 64)
+ Builder.defineMacro("_CALL_LINUX", "1");
+
// Subtarget options.
Builder.defineMacro("__NATURAL_ALIGNMENT__");
Builder.defineMacro("__REGISTER_PREFIX__", "");
// FIXME: Should be controlled by command line option.
- if (LongDoubleWidth == 128)
+ if (LongDoubleWidth == 128) {
Builder.defineMacro("__LONG_DOUBLE_128__");
-
- if (Opts.AltiVec) {
- Builder.defineMacro("__VEC__", "10206");
- Builder.defineMacro("__ALTIVEC__");
+ Builder.defineMacro("__LONGDOUBLE128");
}
+ // Define this for elfv2 (64-bit only) or 64-bit darwin.
+ if (ABI == "elfv2" ||
+ (getTriple().getOS() == llvm::Triple::Darwin && PointerWidth == 64))
+ Builder.defineMacro("__STRUCT_PARM_ALIGN__", "16");
+
// CPU identification.
- ArchDefineTypes defs = (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
- .Case("440", ArchDefineName)
- .Case("450", ArchDefineName | ArchDefine440)
- .Case("601", ArchDefineName)
- .Case("602", ArchDefineName | ArchDefinePpcgr)
- .Case("603", ArchDefineName | ArchDefinePpcgr)
- .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
- .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
- .Case("604", ArchDefineName | ArchDefinePpcgr)
- .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
- .Case("620", ArchDefineName | ArchDefinePpcgr)
- .Case("630", ArchDefineName | ArchDefinePpcgr)
- .Case("7400", ArchDefineName | ArchDefinePpcgr)
- .Case("7450", ArchDefineName | ArchDefinePpcgr)
- .Case("750", ArchDefineName | ArchDefinePpcgr)
- .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("a2", ArchDefineA2)
- .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
- .Case("pwr3", ArchDefinePpcgr)
- .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6
- | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x
- | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("pwr9", ArchDefineName | ArchDefinePwr8 | ArchDefinePwr7
- | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("power3", ArchDefinePpcgr)
- .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6
- | ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4
- | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x
- | ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5
- | ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
- .Case("power9", ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7
- | ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x
- | ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr
- | ArchDefinePpcsq)
- .Default(ArchDefineNone);
+ ArchDefineTypes defs =
+ (ArchDefineTypes)llvm::StringSwitch<int>(CPU)
+ .Case("440", ArchDefineName)
+ .Case("450", ArchDefineName | ArchDefine440)
+ .Case("601", ArchDefineName)
+ .Case("602", ArchDefineName | ArchDefinePpcgr)
+ .Case("603", ArchDefineName | ArchDefinePpcgr)
+ .Case("603e", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("603ev", ArchDefineName | ArchDefine603 | ArchDefinePpcgr)
+ .Case("604", ArchDefineName | ArchDefinePpcgr)
+ .Case("604e", ArchDefineName | ArchDefine604 | ArchDefinePpcgr)
+ .Case("620", ArchDefineName | ArchDefinePpcgr)
+ .Case("630", ArchDefineName | ArchDefinePpcgr)
+ .Case("7400", ArchDefineName | ArchDefinePpcgr)
+ .Case("7450", ArchDefineName | ArchDefinePpcgr)
+ .Case("750", ArchDefineName | ArchDefinePpcgr)
+ .Case("970", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("a2", ArchDefineA2)
+ .Case("a2q", ArchDefineName | ArchDefineA2 | ArchDefineA2q)
+ .Case("pwr3", ArchDefinePpcgr)
+ .Case("pwr4", ArchDefineName | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr5", ArchDefineName | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("pwr5x", ArchDefineName | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr6", ArchDefineName | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr6x", ArchDefineName | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("pwr7", ArchDefineName | ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr8", ArchDefineName | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("pwr9", ArchDefineName | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power3", ArchDefinePpcgr)
+ .Case("power4", ArchDefinePwr4 | ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power5", ArchDefinePwr5 | ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power5x", ArchDefinePwr5x | ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power6", ArchDefinePwr6 | ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power6x", ArchDefinePwr6x | ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power7", ArchDefinePwr7 | ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ .Case("power8", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Case("power9", ArchDefinePwr9 | ArchDefinePwr8 | ArchDefinePwr7 |
+ ArchDefinePwr6x | ArchDefinePwr6 |
+ ArchDefinePwr5x | ArchDefinePwr5 |
+ ArchDefinePwr4 | ArchDefinePpcgr |
+ ArchDefinePpcsq)
+ // powerpc64le automatically defaults to at least power8.
+ .Case("ppc64le", ArchDefinePwr8 | ArchDefinePwr7 | ArchDefinePwr6x |
+ ArchDefinePwr6 | ArchDefinePwr5x |
+ ArchDefinePwr5 | ArchDefinePwr4 |
+ ArchDefinePpcgr | ArchDefinePpcsq)
+ .Default(ArchDefineNone);
if (defs & ArchDefineName)
Builder.defineMacro(Twine("_ARCH_", StringRef(CPU).upper()));
@@ -1343,6 +1374,10 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__TOS_BGQ__");
}
+ if (HasAltivec) {
+ Builder.defineMacro("__VEC__", "10206");
+ Builder.defineMacro("__ALTIVEC__");
+ }
if (HasVSX)
Builder.defineMacro("__VSX__");
if (HasP8Vector)
@@ -1362,6 +1397,9 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
if (PointerWidth == 64)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ // We have support for the bswap intrinsics so we can define this.
+ Builder.defineMacro("__HAVE_BSWAP__", "1");
+
// FIXME: The following are not yet generated here by Clang, but are
// generated by GCC:
//
@@ -1374,8 +1412,6 @@ void PPCTargetInfo::getTargetDefines(const LangOptions &Opts,
// __RSQRTEF__
// _SOFT_DOUBLE_
// __NO_LWSYNC__
- // __HAVE_BSWAP__
- // __LONGDOUBLE128
// __CMODEL_MEDIUM__
// __CMODEL_LARGE__
// _CALL_SYSV
@@ -1481,6 +1517,11 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr8", true)
.Case("pwr7", true)
.Default(false);
+ Features["htm"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
if (!ppcUserFeaturesCheck(Diags, FeaturesVec))
return false;
@@ -1490,44 +1531,47 @@ bool PPCTargetInfo::initFeatureMap(
bool PPCTargetInfo::hasFeature(StringRef Feature) const {
return llvm::StringSwitch<bool>(Feature)
- .Case("powerpc", true)
- .Case("vsx", HasVSX)
- .Case("power8-vector", HasP8Vector)
- .Case("crypto", HasP8Crypto)
- .Case("direct-move", HasDirectMove)
- .Case("qpx", HasQPX)
- .Case("htm", HasHTM)
- .Case("bpermd", HasBPERMD)
- .Case("extdiv", HasExtDiv)
- .Case("float128", HasFloat128)
- .Case("power9-vector", HasP9Vector)
- .Default(false);
+ .Case("powerpc", true)
+ .Case("altivec", HasAltivec)
+ .Case("vsx", HasVSX)
+ .Case("power8-vector", HasP8Vector)
+ .Case("crypto", HasP8Crypto)
+ .Case("direct-move", HasDirectMove)
+ .Case("qpx", HasQPX)
+ .Case("htm", HasHTM)
+ .Case("bpermd", HasBPERMD)
+ .Case("extdiv", HasExtDiv)
+ .Case("float128", HasFloat128)
+ .Case("power9-vector", HasP9Vector)
+ .Default(false);
}
void PPCTargetInfo::setFeatureEnabled(llvm::StringMap<bool> &Features,
StringRef Name, bool Enabled) const {
- // If we're enabling direct-move or power8-vector go ahead and enable vsx
- // as well. Do the inverse if we're disabling vsx. We'll diagnose any user
- // incompatible options.
if (Enabled) {
- if (Name == "direct-move" ||
- Name == "power8-vector" ||
- Name == "float128" ||
- Name == "power9-vector") {
- // power9-vector is really a superset of power8-vector so encode that.
- Features[Name] = Features["vsx"] = true;
- if (Name == "power9-vector")
- Features["power8-vector"] = true;
- } else {
- Features[Name] = true;
- }
+ // If we're enabling any of the vsx based features then enable vsx and
+ // altivec. We'll diagnose any problems later.
+ bool FeatureHasVSX = llvm::StringSwitch<bool>(Name)
+ .Case("vsx", true)
+ .Case("direct-move", true)
+ .Case("power8-vector", true)
+ .Case("power9-vector", true)
+ .Case("float128", true)
+ .Default(false);
+ if (FeatureHasVSX)
+ Features["vsx"] = Features["altivec"] = true;
+ if (Name == "power9-vector")
+ Features["power8-vector"] = true;
+ Features[Name] = true;
} else {
- if (Name == "vsx") {
- Features[Name] = Features["direct-move"] = Features["power8-vector"] =
+ // If we're disabling altivec or vsx go ahead and disable all of the vsx
+ // features.
+ if ((Name == "altivec") || (Name == "vsx"))
+ Features["vsx"] = Features["direct-move"] = Features["power8-vector"] =
Features["float128"] = Features["power9-vector"] = false;
- } else {
- Features[Name] = false;
- }
+ if (Name == "power8-vector")
+ Features["power9-vector"] = false;
+ Features[Name] = false;
}
}
@@ -1718,7 +1762,6 @@ public:
BoolWidth = BoolAlign = 32; //XXX support -mone-byte-bool?
PtrDiffType = SignedInt; // for http://llvm.org/bugs/show_bug.cgi?id=15726
LongLongAlign = 32;
- SuitableAlign = 128;
resetDataLayout("E-m:o-p:32:32-f64:32:64-n32");
}
BuiltinVaListKind getBuiltinVaListKind() const override {
@@ -1731,12 +1774,12 @@ public:
DarwinPPC64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: DarwinTargetInfo<PPC64TargetInfo>(Triple, Opts) {
HasAlignMac68kSupport = true;
- SuitableAlign = 128;
resetDataLayout("E-m:o-i64:64-n32:64");
}
};
static const unsigned NVPTXAddrSpaceMap[] = {
+ 0, // Default
1, // opencl_global
3, // opencl_local
4, // opencl_constant
@@ -1990,14 +2033,25 @@ ArrayRef<const char *> NVPTXTargetInfo::getGCCRegNames() const {
return llvm::makeArrayRef(GCCRegNames);
}
-static const unsigned AMDGPUAddrSpaceMap[] = {
- 1, // opencl_global
- 3, // opencl_local
- 2, // opencl_constant
- 4, // opencl_generic
- 1, // cuda_device
- 2, // cuda_constant
- 3 // cuda_shared
+static const LangAS::Map AMDGPUPrivateIsZeroMap = {
+ 4, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 4, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
+};
+static const LangAS::Map AMDGPUGenericIsZeroMap = {
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_generic
+ 1, // cuda_device
+ 2, // cuda_constant
+ 3 // cuda_shared
};
// If you edit the description strings, make sure you update
@@ -2007,15 +2061,39 @@ static const char *const DataLayoutStringR600 =
"e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
-static const char *const DataLayoutStringSI =
+static const char *const DataLayoutStringSIPrivateIsZero =
"e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32"
"-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
"-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64";
+static const char *const DataLayoutStringSIGenericIsZero =
+ "e-p:64:64-p1:64:64-p2:64:64-p3:32:32-p4:32:32-p5:32:32"
+ "-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128"
+ "-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64-A5";
+
class AMDGPUTargetInfo final : public TargetInfo {
static const Builtin::Info BuiltinInfo[];
static const char * const GCCRegNames[];
+ struct AddrSpace {
+ unsigned Generic, Global, Local, Constant, Private;
+ AddrSpace(bool IsGenericZero_ = false){
+ if (IsGenericZero_) {
+ Generic = 0;
+ Global = 1;
+ Local = 3;
+ Constant = 2;
+ Private = 5;
+ } else {
+ Generic = 4;
+ Global = 1;
+ Local = 3;
+ Constant = 2;
+ Private = 0;
+ }
+ }
+ };
+
/// \brief The GPU profiles supported by the AMDGPU target.
enum GPUKind {
GK_NONE,
@@ -2029,18 +2107,27 @@ class AMDGPUTargetInfo final : public TargetInfo {
GK_CAYMAN,
GK_GFX6,
GK_GFX7,
- GK_GFX8
+ GK_GFX8,
+ GK_GFX9
} GPU;
bool hasFP64:1;
bool hasFMAF:1;
bool hasLDEXPF:1;
- bool hasFullSpeedFP32Denorms:1;
+ const AddrSpace AS;
+
+ static bool hasFullSpeedFMAF32(StringRef GPUName) {
+ return parseAMDGCNName(GPUName) >= GK_GFX9;
+ }
static bool isAMDGCN(const llvm::Triple &TT) {
return TT.getArch() == llvm::Triple::amdgcn;
}
+ static bool isGenericZero(const llvm::Triple &TT) {
+ return TT.getEnvironmentName() == "amdgiz" ||
+ TT.getEnvironmentName() == "amdgizcl";
+ }
public:
AMDGPUTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple) ,
@@ -2048,17 +2135,21 @@ public:
hasFP64(false),
hasFMAF(false),
hasLDEXPF(false),
- hasFullSpeedFP32Denorms(false){
+ AS(isGenericZero(Triple)){
if (getTriple().getArch() == llvm::Triple::amdgcn) {
hasFP64 = true;
hasFMAF = true;
hasLDEXPF = true;
}
-
+ auto IsGenericZero = isGenericZero(Triple);
resetDataLayout(getTriple().getArch() == llvm::Triple::amdgcn ?
- DataLayoutStringSI : DataLayoutStringR600);
+ (IsGenericZero ? DataLayoutStringSIGenericIsZero :
+ DataLayoutStringSIPrivateIsZero)
+ : DataLayoutStringR600);
+ assert(DataLayout->getAllocaAddrSpace() == AS.Private);
- AddrSpaceMap = &AMDGPUAddrSpaceMap;
+ AddrSpaceMap = IsGenericZero ? &AMDGPUGenericIsZeroMap :
+ &AMDGPUPrivateIsZeroMap;
UseAddrSpaceMapMangling = true;
}
@@ -2066,14 +2157,10 @@ public:
if (GPU <= GK_CAYMAN)
return 32;
- switch(AddrSpace) {
- default:
- return 64;
- case 0:
- case 3:
- case 5:
- return 32;
+ if (AddrSpace == AS.Private || AddrSpace == AS.Local) {
+ return 32;
}
+ return 64;
}
uint64_t getMaxPointerWidth() const override {
@@ -2113,15 +2200,16 @@ public:
for (auto &I : TargetOpts.FeaturesAsWritten) {
if (I == "+fp32-denormals" || I == "-fp32-denormals")
hasFP32Denormals = true;
- if (I == "+fp64-denormals" || I == "-fp64-denormals")
+ if (I == "+fp64-fp16-denormals" || I == "-fp64-fp16-denormals")
hasFP64Denormals = true;
}
if (!hasFP32Denormals)
- TargetOpts.Features.push_back((Twine(hasFullSpeedFP32Denorms &&
+ TargetOpts.Features.push_back(
+ (Twine(hasFullSpeedFMAF32(TargetOpts.CPU) &&
!CGOpts.FlushDenorm ? '+' : '-') + Twine("fp32-denormals")).str());
- // Always do not flush fp64 denorms.
+ // Always do not flush fp64 or fp16 denorms.
if (!hasFP64Denormals && hasFP64)
- TargetOpts.Features.push_back("+fp64-denormals");
+ TargetOpts.Features.push_back("+fp64-fp16-denormals");
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
@@ -2206,6 +2294,8 @@ public:
.Case("gfx803", GK_GFX8)
.Case("gfx804", GK_GFX8)
.Case("gfx810", GK_GFX8)
+ .Case("gfx900", GK_GFX9)
+ .Case("gfx901", GK_GFX9)
.Default(GK_NONE);
}
@@ -2248,6 +2338,33 @@ public:
return LangAS::opencl_constant;
}
+ /// \returns Target specific vtbl ptr address space.
+ unsigned getVtblPtrAddressSpace() const override {
+ // \todo: We currently have address spaces defined in AMDGPU Backend. It
+ // would be nice if we could use it here instead of using bare numbers (same
+ // applies to getDWARFAddressSpace).
+ return 2; // constant.
+ }
+
+ /// \returns If a target requires an address within a target specific address
+ /// space \p AddressSpace to be converted in order to be used, then return the
+ /// corresponding target specific DWARF address space.
+ ///
+ /// \returns Otherwise return None and no conversion will be emitted in the
+ /// DWARF.
+ Optional<unsigned> getDWARFAddressSpace(
+ unsigned AddressSpace) const override {
+ const unsigned DWARF_Private = 1;
+ const unsigned DWARF_Local = 2;
+ if (AddressSpace == AS.Private) {
+ return DWARF_Private;
+ } else if (AddressSpace == AS.Local) {
+ return DWARF_Local;
+ } else {
+ return None;
+ }
+ }
+
CallingConvCheckResult checkCallingConvention(CallingConv CC) const override {
switch (CC) {
default:
@@ -2262,7 +2379,7 @@ public:
// address space has value 0 but in private and local address space has
// value ~0.
uint64_t getNullPointerValue(unsigned AS) const override {
- return AS != LangAS::opencl_local && AS != 0 ? 0 : ~0;
+ return AS == LangAS::opencl_local ? ~0 : 0;
}
};
@@ -2345,9 +2462,13 @@ bool AMDGPUTargetInfo::initFeatureMap(
case GK_GFX7:
break;
+ case GK_GFX9:
+ Features["gfx9-insts"] = true;
+ LLVM_FALLTHROUGH;
case GK_GFX8:
Features["s-memrealtime"] = true;
Features["16-bit-insts"] = true;
+ Features["dpp"] = true;
break;
case GK_NONE:
@@ -2489,11 +2610,10 @@ class X86TargetInfo : public TargetInfo {
bool HasXSAVEC = false;
bool HasXSAVES = false;
bool HasMWAITX = false;
+ bool HasCLZERO = false;
bool HasPKU = false;
bool HasCLFLUSHOPT = false;
- bool HasPCOMMIT = false;
bool HasCLWB = false;
- bool HasUMIP = false;
bool HasMOVBE = false;
bool HasPREFETCHWT1 = false;
@@ -3067,8 +3187,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512ifma", true);
setFeatureEnabledImpl(Features, "avx512vbmi", true);
setFeatureEnabledImpl(Features, "sha", true);
- setFeatureEnabledImpl(Features, "umip", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_SkylakeServer:
setFeatureEnabledImpl(Features, "avx512f", true);
setFeatureEnabledImpl(Features, "avx512cd", true);
@@ -3076,44 +3195,43 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx512bw", true);
setFeatureEnabledImpl(Features, "avx512vl", true);
setFeatureEnabledImpl(Features, "pku", true);
- setFeatureEnabledImpl(Features, "pcommit", true);
setFeatureEnabledImpl(Features, "clwb", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_SkylakeClient:
setFeatureEnabledImpl(Features, "xsavec", true);
setFeatureEnabledImpl(Features, "xsaves", true);
setFeatureEnabledImpl(Features, "mpx", true);
setFeatureEnabledImpl(Features, "sgx", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
- // FALLTHROUGH
+ setFeatureEnabledImpl(Features, "rtm", true);
+ LLVM_FALLTHROUGH;
case CK_Broadwell:
setFeatureEnabledImpl(Features, "rdseed", true);
setFeatureEnabledImpl(Features, "adx", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_Haswell:
setFeatureEnabledImpl(Features, "avx2", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
- setFeatureEnabledImpl(Features, "rtm", true);
setFeatureEnabledImpl(Features, "fma", true);
setFeatureEnabledImpl(Features, "movbe", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_IvyBridge:
setFeatureEnabledImpl(Features, "rdrnd", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "fsgsbase", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_SandyBridge:
setFeatureEnabledImpl(Features, "avx", true);
setFeatureEnabledImpl(Features, "xsave", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_Westmere:
case CK_Silvermont:
setFeatureEnabledImpl(Features, "aes", true);
setFeatureEnabledImpl(Features, "pclmul", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_Nehalem:
setFeatureEnabledImpl(Features, "sse4.2", true);
setFeatureEnabledImpl(Features, "fxsr", true);
@@ -3173,7 +3291,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "sse4a", true);
setFeatureEnabledImpl(Features, "lzcnt", true);
setFeatureEnabledImpl(Features, "popcnt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_K8SSE3:
case CK_OpteronSSE3:
case CK_Athlon64SSE3:
@@ -3188,7 +3306,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BTVER1:
setFeatureEnabledImpl(Features, "ssse3", true);
setFeatureEnabledImpl(Features, "sse4a", true);
@@ -3205,6 +3323,7 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "clflushopt", true);
+ setFeatureEnabledImpl(Features, "clzero", true);
setFeatureEnabledImpl(Features, "cx16", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "fma", true);
@@ -3229,17 +3348,17 @@ bool X86TargetInfo::initFeatureMap(
setFeatureEnabledImpl(Features, "avx2", true);
setFeatureEnabledImpl(Features, "bmi2", true);
setFeatureEnabledImpl(Features, "mwaitx", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BDVER3:
setFeatureEnabledImpl(Features, "fsgsbase", true);
setFeatureEnabledImpl(Features, "xsaveopt", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BDVER2:
setFeatureEnabledImpl(Features, "bmi", true);
setFeatureEnabledImpl(Features, "fma", true);
setFeatureEnabledImpl(Features, "f16c", true);
setFeatureEnabledImpl(Features, "tbm", true);
- // FALLTHROUGH
+ LLVM_FALLTHROUGH;
case CK_BDVER1:
// xop implies avx, sse4a and fma4.
setFeatureEnabledImpl(Features, "xop", true);
@@ -3560,14 +3679,12 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasPKU = true;
} else if (Feature == "+clflushopt") {
HasCLFLUSHOPT = true;
- } else if (Feature == "+pcommit") {
- HasPCOMMIT = true;
} else if (Feature == "+clwb") {
HasCLWB = true;
- } else if (Feature == "+umip") {
- HasUMIP = true;
} else if (Feature == "+prefetchwt1") {
HasPREFETCHWT1 = true;
+ } else if (Feature == "+clzero") {
+ HasCLZERO = true;
}
X86SSEEnum Level = llvm::StringSwitch<X86SSEEnum>(Feature)
@@ -3885,6 +4002,18 @@ void X86TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__PKU__");
if (HasCX16)
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16");
+ if (HasCLFLUSHOPT)
+ Builder.defineMacro("__CLFLUSHOPT__");
+ if (HasCLWB)
+ Builder.defineMacro("__CLWB__");
+ if (HasMPX)
+ Builder.defineMacro("__MPX__");
+ if (HasSGX)
+ Builder.defineMacro("__SGX__");
+ if (HasPREFETCHWT1)
+ Builder.defineMacro("__PREFETCHWT1__");
+ if (HasCLZERO)
+ Builder.defineMacro("__CLZERO__");
// Each case falls through to the previous one here.
switch (SSELevel) {
@@ -3971,6 +4100,7 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("bmi2", HasBMI2)
.Case("clflushopt", HasCLFLUSHOPT)
.Case("clwb", HasCLWB)
+ .Case("clzero", HasCLZERO)
.Case("cx16", HasCX16)
.Case("f16c", HasF16C)
.Case("fma", HasFMA)
@@ -3984,7 +4114,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("movbe", HasMOVBE)
.Case("mpx", HasMPX)
.Case("pclmul", HasPCLMUL)
- .Case("pcommit", HasPCOMMIT)
.Case("pku", HasPKU)
.Case("popcnt", HasPOPCNT)
.Case("prefetchwt1", HasPREFETCHWT1)
@@ -4002,7 +4131,6 @@ bool X86TargetInfo::hasFeature(StringRef Feature) const {
.Case("sse4.2", SSELevel >= SSE42)
.Case("sse4a", XOPLevel >= SSE4A)
.Case("tbm", HasTBM)
- .Case("umip", HasUMIP)
.Case("x86", true)
.Case("x86_32", getTriple().getArch() == llvm::Triple::x86)
.Case("x86_64", getTriple().getArch() == llvm::Triple::x86_64)
@@ -5056,6 +5184,8 @@ class ARMTargetInfo : public TargetInfo {
return "7M";
case llvm::ARM::AK_ARMV7EM:
return "7EM";
+ case llvm::ARM::AK_ARMV7VE:
+ return "7VE";
case llvm::ARM::AK_ARMV8A:
return "8A";
case llvm::ARM::AK_ARMV8_1A:
@@ -5144,6 +5274,8 @@ public:
default:
if (Triple.getOS() == llvm::Triple::NetBSD)
setABI("apcs-gnu");
+ else if (Triple.getOS() == llvm::Triple::OpenBSD)
+ setABI("aapcs-linux");
else
setABI("aapcs");
break;
@@ -5925,7 +6057,8 @@ class AArch64TargetInfo : public TargetInfo {
public:
AArch64TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: TargetInfo(Triple), ABI("aapcs") {
- if (getTriple().getOS() == llvm::Triple::NetBSD) {
+ if (getTriple().getOS() == llvm::Triple::NetBSD ||
+ getTriple().getOS() == llvm::Triple::OpenBSD) {
WCharType = SignedInt;
// NetBSD apparently prefers consistency across ARM targets to consistency
@@ -5960,8 +6093,9 @@ public:
// AArch64 targets default to using the ARM C++ ABI.
TheCXXABI.set(TargetCXXABI::GenericAArch64);
- if (Triple.getOS() == llvm::Triple::Linux ||
- Triple.getOS() == llvm::Triple::UnknownOS)
+ if (Triple.getOS() == llvm::Triple::Linux)
+ this->MCountName = "\01_mcount";
+ else if (Triple.getOS() == llvm::Triple::UnknownOS)
this->MCountName = Opts.EABIVersion == "gnu" ? "\01_mcount" : "mcount";
}
@@ -6408,6 +6542,7 @@ public:
.Case("hexagonv5", "5")
.Case("hexagonv55", "55")
.Case("hexagonv60", "60")
+ .Case("hexagonv62", "62")
.Default(nullptr);
}
@@ -6452,6 +6587,9 @@ void HexagonTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__HEXAGON_ARCH__", "60");
Builder.defineMacro("__QDSP6_V60__");
Builder.defineMacro("__QDSP6_ARCH__", "60");
+ } else if (CPU == "hexagonv62") {
+ Builder.defineMacro("__HEXAGON_V62__");
+ Builder.defineMacro("__HEXAGON_ARCH__", "62");
}
if (hasFeature("hvx")) {
@@ -7037,6 +7175,15 @@ public:
Builder.defineMacro("__zarch__");
Builder.defineMacro("__LONG_DOUBLE_128__");
+ const std::string ISARev = llvm::StringSwitch<std::string>(CPU)
+ .Cases("arch8", "z10", "8")
+ .Cases("arch9", "z196", "9")
+ .Cases("arch10", "zEC12", "10")
+ .Cases("arch11", "z13", "11")
+ .Default("");
+ if (!ISARev.empty())
+ Builder.defineMacro("__ARCH__", ISARev);
+
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_1");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_2");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
@@ -7044,6 +7191,8 @@ public:
if (HasTransactionalExecution)
Builder.defineMacro("__HTM__");
+ if (HasVector)
+ Builder.defineMacro("__VX__");
if (Opts.ZVector)
Builder.defineMacro("__VEC__", "10301");
}
@@ -7268,6 +7417,7 @@ ArrayRef<const char *> MSP430TargetInfo::getGCCRegNames() const {
// publicly available in http://tce.cs.tut.fi
static const unsigned TCEOpenCLAddrSpaceMap[] = {
+ 0, // Default
3, // opencl_global
4, // opencl_local
5, // opencl_constant
@@ -7434,6 +7584,8 @@ class MipsTargetInfo : public TargetInfo {
bool IsMicromips;
bool IsNan2008;
bool IsSingleFloat;
+ bool IsNoABICalls;
+ bool CanUseBSDABICalls;
enum MipsFloatABI {
HardFloat, SoftFloat
} FloatABI;
@@ -7449,8 +7601,9 @@ protected:
public:
MipsTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
: TargetInfo(Triple), IsMips16(false), IsMicromips(false),
- IsNan2008(false), IsSingleFloat(false), FloatABI(HardFloat),
- DspRev(NoDSP), HasMSA(false), HasFP64(false) {
+ IsNan2008(false), IsSingleFloat(false), IsNoABICalls(false),
+ CanUseBSDABICalls(false), FloatABI(HardFloat), DspRev(NoDSP),
+ HasMSA(false), HasFP64(false) {
TheCXXABI.set(TargetCXXABI::GenericMIPS);
setABI((getTriple().getArch() == llvm::Triple::mips ||
@@ -7459,6 +7612,9 @@ public:
: "n64");
CPU = ABI == "o32" ? "mips32r2" : "mips64r2";
+
+ CanUseBSDABICalls = Triple.getOS() == llvm::Triple::FreeBSD ||
+ Triple.getOS() == llvm::Triple::OpenBSD;
}
bool isNaN2008Default() const {
@@ -7535,7 +7691,11 @@ public:
void setN64ABITypes() {
setN32N64ABITypes();
- Int64Type = SignedLong;
+ if (getTriple().getOS() == llvm::Triple::OpenBSD) {
+ Int64Type = SignedLongLong;
+ } else {
+ Int64Type = SignedLong;
+ }
IntMaxType = Int64Type;
LongWidth = LongAlign = 64;
PointerWidth = PointerAlign = 64;
@@ -7639,6 +7799,12 @@ public:
} else
llvm_unreachable("Invalid ABI.");
+ if (!IsNoABICalls) {
+ Builder.defineMacro("__mips_abicalls");
+ if (CanUseBSDABICalls)
+ Builder.defineMacro("__ABICALLS__");
+ }
+
Builder.defineMacro("__REGISTER_PREFIX__", "");
switch (FloatABI) {
@@ -7853,6 +8019,8 @@ public:
IsNan2008 = true;
else if (Feature == "-nan2008")
IsNan2008 = false;
+ else if (Feature == "+noabicalls")
+ IsNoABICalls = true;
}
setDataLayout();
@@ -8216,6 +8384,7 @@ const Builtin::Info Le64TargetInfo::BuiltinInfo[] = {
};
static const unsigned SPIRAddrSpaceMap[] = {
+ 0, // Default
1, // opencl_global
3, // opencl_local
2, // opencl_constant
@@ -8435,6 +8604,254 @@ public:
}
};
+/// Information about a specific microcontroller.
+struct MCUInfo {
+ const char *Name;
+ const char *DefineName;
+};
+
+// This list should be kept up-to-date with AVRDevices.td in LLVM.
+static ArrayRef<MCUInfo> AVRMcus = {
+ { "at90s1200", "__AVR_AT90S1200__" },
+ { "attiny11", "__AVR_ATtiny11__" },
+ { "attiny12", "__AVR_ATtiny12__" },
+ { "attiny15", "__AVR_ATtiny15__" },
+ { "attiny28", "__AVR_ATtiny28__" },
+ { "at90s2313", "__AVR_AT90S2313__" },
+ { "at90s2323", "__AVR_AT90S2323__" },
+ { "at90s2333", "__AVR_AT90S2333__" },
+ { "at90s2343", "__AVR_AT90S2343__" },
+ { "attiny22", "__AVR_ATtiny22__" },
+ { "attiny26", "__AVR_ATtiny26__" },
+ { "at86rf401", "__AVR_AT86RF401__" },
+ { "at90s4414", "__AVR_AT90S4414__" },
+ { "at90s4433", "__AVR_AT90S4433__" },
+ { "at90s4434", "__AVR_AT90S4434__" },
+ { "at90s8515", "__AVR_AT90S8515__" },
+ { "at90c8534", "__AVR_AT90c8534__" },
+ { "at90s8535", "__AVR_AT90S8535__" },
+ { "ata5272", "__AVR_ATA5272__" },
+ { "attiny13", "__AVR_ATtiny13__" },
+ { "attiny13a", "__AVR_ATtiny13A__" },
+ { "attiny2313", "__AVR_ATtiny2313__" },
+ { "attiny2313a", "__AVR_ATtiny2313A__" },
+ { "attiny24", "__AVR_ATtiny24__" },
+ { "attiny24a", "__AVR_ATtiny24A__" },
+ { "attiny4313", "__AVR_ATtiny4313__" },
+ { "attiny44", "__AVR_ATtiny44__" },
+ { "attiny44a", "__AVR_ATtiny44A__" },
+ { "attiny84", "__AVR_ATtiny84__" },
+ { "attiny84a", "__AVR_ATtiny84A__" },
+ { "attiny25", "__AVR_ATtiny25__" },
+ { "attiny45", "__AVR_ATtiny45__" },
+ { "attiny85", "__AVR_ATtiny85__" },
+ { "attiny261", "__AVR_ATtiny261__" },
+ { "attiny261a", "__AVR_ATtiny261A__" },
+ { "attiny461", "__AVR_ATtiny461__" },
+ { "attiny461a", "__AVR_ATtiny461A__" },
+ { "attiny861", "__AVR_ATtiny861__" },
+ { "attiny861a", "__AVR_ATtiny861A__" },
+ { "attiny87", "__AVR_ATtiny87__" },
+ { "attiny43u", "__AVR_ATtiny43U__" },
+ { "attiny48", "__AVR_ATtiny48__" },
+ { "attiny88", "__AVR_ATtiny88__" },
+ { "attiny828", "__AVR_ATtiny828__" },
+ { "at43usb355", "__AVR_AT43USB355__" },
+ { "at76c711", "__AVR_AT76C711__" },
+ { "atmega103", "__AVR_ATmega103__" },
+ { "at43usb320", "__AVR_AT43USB320__" },
+ { "attiny167", "__AVR_ATtiny167__" },
+ { "at90usb82", "__AVR_AT90USB82__" },
+ { "at90usb162", "__AVR_AT90USB162__" },
+ { "ata5505", "__AVR_ATA5505__" },
+ { "atmega8u2", "__AVR_ATmega8U2__" },
+ { "atmega16u2", "__AVR_ATmega16U2__" },
+ { "atmega32u2", "__AVR_ATmega32U2__" },
+ { "attiny1634", "__AVR_ATtiny1634__" },
+ { "atmega8", "__AVR_ATmega8__" },
+ { "ata6289", "__AVR_ATA6289__" },
+ { "atmega8a", "__AVR_ATmega8A__" },
+ { "ata6285", "__AVR_ATA6285__" },
+ { "ata6286", "__AVR_ATA6286__" },
+ { "atmega48", "__AVR_ATmega48__" },
+ { "atmega48a", "__AVR_ATmega48A__" },
+ { "atmega48pa", "__AVR_ATmega48PA__" },
+ { "atmega48p", "__AVR_ATmega48P__" },
+ { "atmega88", "__AVR_ATmega88__" },
+ { "atmega88a", "__AVR_ATmega88A__" },
+ { "atmega88p", "__AVR_ATmega88P__" },
+ { "atmega88pa", "__AVR_ATmega88PA__" },
+ { "atmega8515", "__AVR_ATmega8515__" },
+ { "atmega8535", "__AVR_ATmega8535__" },
+ { "atmega8hva", "__AVR_ATmega8HVA__" },
+ { "at90pwm1", "__AVR_AT90PWM1__" },
+ { "at90pwm2", "__AVR_AT90PWM2__" },
+ { "at90pwm2b", "__AVR_AT90PWM2B__" },
+ { "at90pwm3", "__AVR_AT90PWM3__" },
+ { "at90pwm3b", "__AVR_AT90PWM3B__" },
+ { "at90pwm81", "__AVR_AT90PWM81__" },
+ { "ata5790", "__AVR_ATA5790__" },
+ { "ata5795", "__AVR_ATA5795__" },
+ { "atmega16", "__AVR_ATmega16__" },
+ { "atmega16a", "__AVR_ATmega16A__" },
+ { "atmega161", "__AVR_ATmega161__" },
+ { "atmega162", "__AVR_ATmega162__" },
+ { "atmega163", "__AVR_ATmega163__" },
+ { "atmega164a", "__AVR_ATmega164A__" },
+ { "atmega164p", "__AVR_ATmega164P__" },
+ { "atmega164pa", "__AVR_ATmega164PA__" },
+ { "atmega165", "__AVR_ATmega165__" },
+ { "atmega165a", "__AVR_ATmega165A__" },
+ { "atmega165p", "__AVR_ATmega165P__" },
+ { "atmega165pa", "__AVR_ATmega165PA__" },
+ { "atmega168", "__AVR_ATmega168__" },
+ { "atmega168a", "__AVR_ATmega168A__" },
+ { "atmega168p", "__AVR_ATmega168P__" },
+ { "atmega168pa", "__AVR_ATmega168PA__" },
+ { "atmega169", "__AVR_ATmega169__" },
+ { "atmega169a", "__AVR_ATmega169A__" },
+ { "atmega169p", "__AVR_ATmega169P__" },
+ { "atmega169pa", "__AVR_ATmega169PA__" },
+ { "atmega32", "__AVR_ATmega32__" },
+ { "atmega32a", "__AVR_ATmega32A__" },
+ { "atmega323", "__AVR_ATmega323__" },
+ { "atmega324a", "__AVR_ATmega324A__" },
+ { "atmega324p", "__AVR_ATmega324P__" },
+ { "atmega324pa", "__AVR_ATmega324PA__" },
+ { "atmega325", "__AVR_ATmega325__" },
+ { "atmega325a", "__AVR_ATmega325A__" },
+ { "atmega325p", "__AVR_ATmega325P__" },
+ { "atmega325pa", "__AVR_ATmega325PA__" },
+ { "atmega3250", "__AVR_ATmega3250__" },
+ { "atmega3250a", "__AVR_ATmega3250A__" },
+ { "atmega3250p", "__AVR_ATmega3250P__" },
+ { "atmega3250pa", "__AVR_ATmega3250PA__" },
+ { "atmega328", "__AVR_ATmega328__" },
+ { "atmega328p", "__AVR_ATmega328P__" },
+ { "atmega329", "__AVR_ATmega329__" },
+ { "atmega329a", "__AVR_ATmega329A__" },
+ { "atmega329p", "__AVR_ATmega329P__" },
+ { "atmega329pa", "__AVR_ATmega329PA__" },
+ { "atmega3290", "__AVR_ATmega3290__" },
+ { "atmega3290a", "__AVR_ATmega3290A__" },
+ { "atmega3290p", "__AVR_ATmega3290P__" },
+ { "atmega3290pa", "__AVR_ATmega3290PA__" },
+ { "atmega406", "__AVR_ATmega406__" },
+ { "atmega64", "__AVR_ATmega64__" },
+ { "atmega64a", "__AVR_ATmega64A__" },
+ { "atmega640", "__AVR_ATmega640__" },
+ { "atmega644", "__AVR_ATmega644__" },
+ { "atmega644a", "__AVR_ATmega644A__" },
+ { "atmega644p", "__AVR_ATmega644P__" },
+ { "atmega644pa", "__AVR_ATmega644PA__" },
+ { "atmega645", "__AVR_ATmega645__" },
+ { "atmega645a", "__AVR_ATmega645A__" },
+ { "atmega645p", "__AVR_ATmega645P__" },
+ { "atmega649", "__AVR_ATmega649__" },
+ { "atmega649a", "__AVR_ATmega649A__" },
+ { "atmega649p", "__AVR_ATmega649P__" },
+ { "atmega6450", "__AVR_ATmega6450__" },
+ { "atmega6450a", "__AVR_ATmega6450A__" },
+ { "atmega6450p", "__AVR_ATmega6450P__" },
+ { "atmega6490", "__AVR_ATmega6490__" },
+ { "atmega6490a", "__AVR_ATmega6490A__" },
+ { "atmega6490p", "__AVR_ATmega6490P__" },
+ { "atmega64rfr2", "__AVR_ATmega64RFR2__" },
+ { "atmega644rfr2", "__AVR_ATmega644RFR2__" },
+ { "atmega16hva", "__AVR_ATmega16HVA__" },
+ { "atmega16hva2", "__AVR_ATmega16HVA2__" },
+ { "atmega16hvb", "__AVR_ATmega16HVB__" },
+ { "atmega16hvbrevb", "__AVR_ATmega16HVBREVB__" },
+ { "atmega32hvb", "__AVR_ATmega32HVB__" },
+ { "atmega32hvbrevb", "__AVR_ATmega32HVBREVB__" },
+ { "atmega64hve", "__AVR_ATmega64HVE__" },
+ { "at90can32", "__AVR_AT90CAN32__" },
+ { "at90can64", "__AVR_AT90CAN64__" },
+ { "at90pwm161", "__AVR_AT90PWM161__" },
+ { "at90pwm216", "__AVR_AT90PWM216__" },
+ { "at90pwm316", "__AVR_AT90PWM316__" },
+ { "atmega32c1", "__AVR_ATmega32C1__" },
+ { "atmega64c1", "__AVR_ATmega64C1__" },
+ { "atmega16m1", "__AVR_ATmega16M1__" },
+ { "atmega32m1", "__AVR_ATmega32M1__" },
+ { "atmega64m1", "__AVR_ATmega64M1__" },
+ { "atmega16u4", "__AVR_ATmega16U4__" },
+ { "atmega32u4", "__AVR_ATmega32U4__" },
+ { "atmega32u6", "__AVR_ATmega32U6__" },
+ { "at90usb646", "__AVR_AT90USB646__" },
+ { "at90usb647", "__AVR_AT90USB647__" },
+ { "at90scr100", "__AVR_AT90SCR100__" },
+ { "at94k", "__AVR_AT94K__" },
+ { "m3000", "__AVR_AT000__" },
+ { "atmega128", "__AVR_ATmega128__" },
+ { "atmega128a", "__AVR_ATmega128A__" },
+ { "atmega1280", "__AVR_ATmega1280__" },
+ { "atmega1281", "__AVR_ATmega1281__" },
+ { "atmega1284", "__AVR_ATmega1284__" },
+ { "atmega1284p", "__AVR_ATmega1284P__" },
+ { "atmega128rfa1", "__AVR_ATmega128RFA1__" },
+ { "atmega128rfr2", "__AVR_ATmega128RFR2__" },
+ { "atmega1284rfr2", "__AVR_ATmega1284RFR2__" },
+ { "at90can128", "__AVR_AT90CAN128__" },
+ { "at90usb1286", "__AVR_AT90USB1286__" },
+ { "at90usb1287", "__AVR_AT90USB1287__" },
+ { "atmega2560", "__AVR_ATmega2560__" },
+ { "atmega2561", "__AVR_ATmega2561__" },
+ { "atmega256rfr2", "__AVR_ATmega256RFR2__" },
+ { "atmega2564rfr2", "__AVR_ATmega2564RFR2__" },
+ { "atxmega16a4", "__AVR_ATxmega16A4__" },
+ { "atxmega16a4u", "__AVR_ATxmega16a4U__" },
+ { "atxmega16c4", "__AVR_ATxmega16C4__" },
+ { "atxmega16d4", "__AVR_ATxmega16D4__" },
+ { "atxmega32a4", "__AVR_ATxmega32A4__" },
+ { "atxmega32a4u", "__AVR_ATxmega32A4U__" },
+ { "atxmega32c4", "__AVR_ATxmega32C4__" },
+ { "atxmega32d4", "__AVR_ATxmega32D4__" },
+ { "atxmega32e5", "__AVR_ATxmega32E5__" },
+ { "atxmega16e5", "__AVR_ATxmega16E5__" },
+ { "atxmega8e5", "__AVR_ATxmega8E5__" },
+ { "atxmega32x1", "__AVR_ATxmega32X1__" },
+ { "atxmega64a3", "__AVR_ATxmega64A3__" },
+ { "atxmega64a3u", "__AVR_ATxmega64A3U__" },
+ { "atxmega64a4u", "__AVR_ATxmega64A4U__" },
+ { "atxmega64b1", "__AVR_ATxmega64B1__" },
+ { "atxmega64b3", "__AVR_ATxmega64B3__" },
+ { "atxmega64c3", "__AVR_ATxmega64C3__" },
+ { "atxmega64d3", "__AVR_ATxmega64D3__" },
+ { "atxmega64d4", "__AVR_ATxmega64D4__" },
+ { "atxmega64a1", "__AVR_ATxmega64A1__" },
+ { "atxmega64a1u", "__AVR_ATxmega64A1U__" },
+ { "atxmega128a3", "__AVR_ATxmega128A3__" },
+ { "atxmega128a3u", "__AVR_ATxmega128A3U__" },
+ { "atxmega128b1", "__AVR_ATxmega128B1__" },
+ { "atxmega128b3", "__AVR_ATxmega128B3__" },
+ { "atxmega128c3", "__AVR_ATxmega128C3__" },
+ { "atxmega128d3", "__AVR_ATxmega128D3__" },
+ { "atxmega128d4", "__AVR_ATxmega128D4__" },
+ { "atxmega192a3", "__AVR_ATxmega192A3__" },
+ { "atxmega192a3u", "__AVR_ATxmega192A3U__" },
+ { "atxmega192c3", "__AVR_ATxmega192C3__" },
+ { "atxmega192d3", "__AVR_ATxmega192D3__" },
+ { "atxmega256a3", "__AVR_ATxmega256A3__" },
+ { "atxmega256a3u", "__AVR_ATxmega256A3U__" },
+ { "atxmega256a3b", "__AVR_ATxmega256A3B__" },
+ { "atxmega256a3bu", "__AVR_ATxmega256A3BU__" },
+ { "atxmega256c3", "__AVR_ATxmega256C3__" },
+ { "atxmega256d3", "__AVR_ATxmega256D3__" },
+ { "atxmega384c3", "__AVR_ATxmega384C3__" },
+ { "atxmega384d3", "__AVR_ATxmega384D3__" },
+ { "atxmega128a1", "__AVR_ATxmega128A1__" },
+ { "atxmega128a1u", "__AVR_ATxmega128A1U__" },
+ { "atxmega128a4u", "__AVR_ATxmega128a4U__" },
+ { "attiny4", "__AVR_ATtiny4__" },
+ { "attiny5", "__AVR_ATtiny5__" },
+ { "attiny9", "__AVR_ATtiny9__" },
+ { "attiny10", "__AVR_ATtiny10__" },
+ { "attiny20", "__AVR_ATtiny20__" },
+ { "attiny40", "__AVR_ATtiny40__" },
+ { "attiny102", "__AVR_ATtiny102__" },
+ { "attiny104", "__AVR_ATtiny104__" },
+};
// AVR Target
class AVRTargetInfo : public TargetInfo {
@@ -8476,7 +8893,17 @@ public:
void getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const override {
+ Builder.defineMacro("AVR");
+ Builder.defineMacro("__AVR");
Builder.defineMacro("__AVR__");
+
+ if (!this->CPU.empty()) {
+ auto It = std::find_if(AVRMcus.begin(), AVRMcus.end(),
+ [&](const MCUInfo &Info) { return Info.Name == this->CPU; });
+
+ if (It != AVRMcus.end())
+ Builder.defineMacro(It->DefineName);
+ }
}
ArrayRef<Builtin::Info> getTargetBuiltins() const override {
@@ -8517,6 +8944,57 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &Info) const override {
+ // There aren't any multi-character AVR specific constraints.
+ if (StringRef(Name).size() > 1) return false;
+
+ switch (*Name) {
+ default: return false;
+ case 'a': // Simple upper registers
+ case 'b': // Base pointer registers pairs
+ case 'd': // Upper register
+ case 'l': // Lower registers
+ case 'e': // Pointer register pairs
+ case 'q': // Stack pointer register
+ case 'r': // Any register
+ case 'w': // Special upper register pairs
+ case 't': // Temporary register
+ case 'x': case 'X': // Pointer register pair X
+ case 'y': case 'Y': // Pointer register pair Y
+ case 'z': case 'Z': // Pointer register pair Z
+ Info.setAllowsRegister();
+ return true;
+ case 'I': // 6-bit positive integer constant
+ Info.setRequiresImmediate(0, 63);
+ return true;
+ case 'J': // 6-bit negative integer constant
+ Info.setRequiresImmediate(-63, 0);
+ return true;
+ case 'K': // Integer constant (Range: 2)
+ Info.setRequiresImmediate(2);
+ return true;
+ case 'L': // Integer constant (Range: 0)
+ Info.setRequiresImmediate(0);
+ return true;
+ case 'M': // 8-bit integer constant
+ Info.setRequiresImmediate(0, 0xff);
+ return true;
+ case 'N': // Integer constant (Range: -1)
+ Info.setRequiresImmediate(-1);
+ return true;
+ case 'O': // Integer constant (Range: 8, 16, 24)
+ Info.setRequiresImmediate({8, 16, 24});
+ return true;
+ case 'P': // Integer constant (Range: 1)
+ Info.setRequiresImmediate(1);
+ return true;
+ case 'R': // Integer constant (Range: -6 to 5)
+ Info.setRequiresImmediate(-6, 5);
+ return true;
+ case 'G': // Floating point constant
+ case 'Q': // A memory address based on Y or Z pointer with displacement.
+ return true;
+ }
+
return false;
}
@@ -8534,6 +9012,41 @@ public:
? (IsSigned ? SignedInt : UnsignedInt)
: TargetInfo::getLeastIntTypeByWidth(BitWidth, IsSigned);
}
+
+ bool setCPU(const std::string &Name) override {
+ bool IsFamily = llvm::StringSwitch<bool>(Name)
+ .Case("avr1", true)
+ .Case("avr2", true)
+ .Case("avr25", true)
+ .Case("avr3", true)
+ .Case("avr31", true)
+ .Case("avr35", true)
+ .Case("avr4", true)
+ .Case("avr5", true)
+ .Case("avr51", true)
+ .Case("avr6", true)
+ .Case("avrxmega1", true)
+ .Case("avrxmega2", true)
+ .Case("avrxmega3", true)
+ .Case("avrxmega4", true)
+ .Case("avrxmega5", true)
+ .Case("avrxmega6", true)
+ .Case("avrxmega7", true)
+ .Case("avrtiny", true)
+ .Default(false);
+
+ if (IsFamily) this->CPU = Name;
+
+ bool IsMCU = std::find_if(AVRMcus.begin(), AVRMcus.end(),
+ [&](const MCUInfo &Info) { return Info.Name == Name; }) != AVRMcus.end();
+
+ if (IsMCU) this->CPU = Name;
+
+ return IsFamily || IsMCU;
+ }
+
+protected:
+ std::string CPU;
};
} // end anonymous namespace
@@ -8574,6 +9087,8 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<AArch64leTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
+ case llvm::Triple::OpenBSD:
+ return new OpenBSDTargetInfo<AArch64leTargetInfo>(Triple, Opts);
default:
return new AArch64leTargetInfo(Triple, Opts);
}
@@ -8604,8 +9119,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
- case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMleTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -8642,8 +9155,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new LinuxTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
- case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::NetBSD:
return new NetBSDTargetInfo<ARMbeTargetInfo>(Triple, Opts);
case llvm::Triple::OpenBSD:
@@ -8879,8 +9390,6 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new BitrigI386TargetInfo(Triple, Opts);
case llvm::Triple::FreeBSD:
return new FreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
- case llvm::Triple::Fuchsia:
- return new FuchsiaTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::KFreeBSD:
return new KFreeBSDTargetInfo<X86_32TargetInfo>(Triple, Opts);
case llvm::Triple::Minix:
@@ -8976,11 +9485,19 @@ static TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new SPIR64TargetInfo(Triple, Opts);
}
case llvm::Triple::wasm32:
- if (!(Triple == llvm::Triple("wasm32-unknown-unknown")))
+ if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
+ Triple.getVendor() != llvm::Triple::UnknownVendor ||
+ Triple.getOS() != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment ||
+ !(Triple.isOSBinFormatELF() || Triple.isOSBinFormatWasm()))
return nullptr;
return new WebAssemblyOSTargetInfo<WebAssembly32TargetInfo>(Triple, Opts);
case llvm::Triple::wasm64:
- if (!(Triple == llvm::Triple("wasm64-unknown-unknown")))
+ if (Triple.getSubArch() != llvm::Triple::NoSubArch ||
+ Triple.getVendor() != llvm::Triple::UnknownVendor ||
+ Triple.getOS() != llvm::Triple::UnknownOS ||
+ Triple.getEnvironment() != llvm::Triple::UnknownEnvironment ||
+ !(Triple.isOSBinFormatELF() || Triple.isOSBinFormatWasm()))
return nullptr;
return new WebAssemblyOSTargetInfo<WebAssembly64TargetInfo>(Triple, Opts);
diff --git a/lib/Basic/Version.cpp b/lib/Basic/Version.cpp
index 97e75a9cd45e..a1a67c2bc144 100644
--- a/lib/Basic/Version.cpp
+++ b/lib/Basic/Version.cpp
@@ -36,7 +36,7 @@ std::string getClangRepositoryPath() {
// If the SVN_REPOSITORY is empty, try to use the SVN keyword. This helps us
// pick up a tag in an SVN export, for example.
- StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/tags/RELEASE_400/final/lib/Basic/Version.cpp $");
+ StringRef SVNRepository("$URL: https://llvm.org/svn/llvm-project/cfe/trunk/lib/Basic/Version.cpp $");
if (URL.empty()) {
URL = SVNRepository.slice(SVNRepository.find(':'),
SVNRepository.find("/lib/Basic"));
diff --git a/lib/Basic/VirtualFileSystem.cpp b/lib/Basic/VirtualFileSystem.cpp
index 50fcb22faf53..f5db717866a9 100644
--- a/lib/Basic/VirtualFileSystem.cpp
+++ b/lib/Basic/VirtualFileSystem.cpp
@@ -27,13 +27,6 @@
#include <memory>
#include <utility>
-// For chdir.
-#ifdef LLVM_ON_WIN32
-# include <direct.h>
-#else
-# include <unistd.h>
-#endif
-
using namespace clang;
using namespace clang::vfs;
using namespace llvm;
@@ -235,11 +228,7 @@ std::error_code RealFileSystem::setCurrentWorkingDirectory(const Twine &Path) {
// difference for example on network filesystems, where symlinks might be
// switched during runtime of the tool. Fixing this depends on having a
// file system abstraction that allows openat() style interactions.
- SmallString<256> Storage;
- StringRef Dir = Path.toNullTerminatedStringRef(Storage);
- if (int Err = ::chdir(Dir.data()))
- return std::error_code(Err, std::generic_category());
- return std::error_code();
+ return llvm::sys::fs::set_current_path(Path);
}
IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
@@ -249,16 +238,13 @@ IntrusiveRefCntPtr<FileSystem> vfs::getRealFileSystem() {
namespace {
class RealFSDirIter : public clang::vfs::detail::DirIterImpl {
- std::string Path;
llvm::sys::fs::directory_iterator Iter;
public:
- RealFSDirIter(const Twine &_Path, std::error_code &EC)
- : Path(_Path.str()), Iter(Path, EC) {
+ RealFSDirIter(const Twine &Path, std::error_code &EC) : Iter(Path, EC) {
if (!EC && Iter != llvm::sys::fs::directory_iterator()) {
llvm::sys::fs::file_status S;
EC = Iter->status(S);
- if (!EC)
- CurrentEntry = Status::copyWithNewName(S, Iter->path());
+ CurrentEntry = Status::copyWithNewName(S, Iter->path());
}
}
@@ -1869,7 +1855,7 @@ vfs::recursive_directory_iterator::recursive_directory_iterator(FileSystem &FS_,
std::error_code &EC)
: FS(&FS_) {
directory_iterator I = FS->dir_begin(Path, EC);
- if (!EC && I != directory_iterator()) {
+ if (I != directory_iterator()) {
State = std::make_shared<IterState>();
State->push(I);
}
@@ -1882,8 +1868,6 @@ recursive_directory_iterator::increment(std::error_code &EC) {
vfs::directory_iterator End;
if (State->top()->isDirectory()) {
vfs::directory_iterator I = FS->dir_begin(State->top()->getName(), EC);
- if (EC)
- return *this;
if (I != End) {
State->push(I);
return *this;
diff --git a/lib/Basic/XRayLists.cpp b/lib/Basic/XRayLists.cpp
new file mode 100644
index 000000000000..dccf3baa75e2
--- /dev/null
+++ b/lib/Basic/XRayLists.cpp
@@ -0,0 +1,53 @@
+//===--- XRayFunctionFilter.cpp - XRay automatic-attribution --------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+// User-provided filters for always/never XRay instrumenting certain functions.
+//
+//===----------------------------------------------------------------------===//
+#include "clang/Basic/XRayLists.h"
+
+using namespace clang;
+
+XRayFunctionFilter::XRayFunctionFilter(
+ ArrayRef<std::string> AlwaysInstrumentPaths,
+ ArrayRef<std::string> NeverInstrumentPaths, SourceManager &SM)
+ : AlwaysInstrument(
+ llvm::SpecialCaseList::createOrDie(AlwaysInstrumentPaths)),
+ NeverInstrument(llvm::SpecialCaseList::createOrDie(NeverInstrumentPaths)),
+ SM(SM) {}
+
+XRayFunctionFilter::ImbueAttribute
+XRayFunctionFilter::shouldImbueFunction(StringRef FunctionName) const {
+ // First apply the always instrument list, than if it isn't an "always" see
+ // whether it's treated as a "never" instrument function.
+ if (AlwaysInstrument->inSection("fun", FunctionName))
+ return ImbueAttribute::ALWAYS;
+ if (NeverInstrument->inSection("fun", FunctionName))
+ return ImbueAttribute::NEVER;
+ return ImbueAttribute::NONE;
+}
+
+XRayFunctionFilter::ImbueAttribute
+XRayFunctionFilter::shouldImbueFunctionsInFile(StringRef Filename,
+ StringRef Category) const {
+ if (AlwaysInstrument->inSection("src", Filename, Category))
+ return ImbueAttribute::ALWAYS;
+ if (NeverInstrument->inSection("src", Filename, Category))
+ return ImbueAttribute::NEVER;
+ return ImbueAttribute::NONE;
+}
+
+XRayFunctionFilter::ImbueAttribute
+XRayFunctionFilter::shouldImbueLocation(SourceLocation Loc,
+ StringRef Category) const {
+ if (!Loc.isValid())
+ return ImbueAttribute::NONE;
+ return this->shouldImbueFunctionsInFile(SM.getFilename(SM.getFileLoc(Loc)),
+ Category);
+}
diff --git a/lib/CodeGen/ABIInfo.h b/lib/CodeGen/ABIInfo.h
index ac31dfdaf3e4..c0be60ef53bc 100644
--- a/lib/CodeGen/ABIInfo.h
+++ b/lib/CodeGen/ABIInfo.h
@@ -10,6 +10,7 @@
#ifndef LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
#define LLVM_CLANG_LIB_CODEGEN_ABIINFO_H
+#include "clang/AST/CharUnits.h"
#include "clang/AST/Type.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/Type.h"
diff --git a/lib/CodeGen/BackendUtil.cpp b/lib/CodeGen/BackendUtil.cpp
index d2ce6ea48e41..855d6795b9d6 100644
--- a/lib/CodeGen/BackendUtil.cpp
+++ b/lib/CodeGen/BackendUtil.cpp
@@ -61,6 +61,9 @@ using namespace llvm;
namespace {
+// Default filename used for profile generation.
+static constexpr StringLiteral DefaultProfileGenName = "default_%m.profraw";
+
class EmitAssemblyHelper {
DiagnosticsEngine &Diags;
const HeaderSearchOptions &HSOpts;
@@ -73,7 +76,6 @@ class EmitAssemblyHelper {
std::unique_ptr<raw_pwrite_stream> OS;
-private:
TargetIRAnalysis getTargetIRAnalysis() const {
if (TM)
return TM->getTargetIRAnalysis();
@@ -262,7 +264,7 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
TLII->disableAllFunctions();
else {
// Disable individual libc/libm calls in TargetLibraryInfo.
- LibFunc::Func F;
+ LibFunc F;
for (auto &FuncName : CodeGenOpts.getNoBuiltinFuncs())
if (TLII->getLibFunc(FuncName, F))
TLII->setUnavailable(F);
@@ -292,6 +294,140 @@ static void addSymbolRewriterPass(const CodeGenOptions &Opts,
MPM->add(createRewriteSymbolsPass(DL));
}
+static CodeGenOpt::Level getCGOptLevel(const CodeGenOptions &CodeGenOpts) {
+ switch (CodeGenOpts.OptimizationLevel) {
+ default:
+ llvm_unreachable("Invalid optimization level!");
+ case 0:
+ return CodeGenOpt::None;
+ case 1:
+ return CodeGenOpt::Less;
+ case 2:
+ return CodeGenOpt::Default; // O2/Os/Oz
+ case 3:
+ return CodeGenOpt::Aggressive;
+ }
+}
+
+static llvm::CodeModel::Model getCodeModel(const CodeGenOptions &CodeGenOpts) {
+ unsigned CodeModel =
+ llvm::StringSwitch<unsigned>(CodeGenOpts.CodeModel)
+ .Case("small", llvm::CodeModel::Small)
+ .Case("kernel", llvm::CodeModel::Kernel)
+ .Case("medium", llvm::CodeModel::Medium)
+ .Case("large", llvm::CodeModel::Large)
+ .Case("default", llvm::CodeModel::Default)
+ .Default(~0u);
+ assert(CodeModel != ~0u && "invalid code model!");
+ return static_cast<llvm::CodeModel::Model>(CodeModel);
+}
+
+static llvm::Reloc::Model getRelocModel(const CodeGenOptions &CodeGenOpts) {
+ // Keep this synced with the equivalent code in
+ // lib/Frontend/CompilerInvocation.cpp
+ llvm::Optional<llvm::Reloc::Model> RM;
+ RM = llvm::StringSwitch<llvm::Reloc::Model>(CodeGenOpts.RelocationModel)
+ .Case("static", llvm::Reloc::Static)
+ .Case("pic", llvm::Reloc::PIC_)
+ .Case("ropi", llvm::Reloc::ROPI)
+ .Case("rwpi", llvm::Reloc::RWPI)
+ .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
+ .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC);
+ assert(RM.hasValue() && "invalid PIC model!");
+ return *RM;
+}
+
+static TargetMachine::CodeGenFileType getCodeGenFileType(BackendAction Action) {
+ if (Action == Backend_EmitObj)
+ return TargetMachine::CGFT_ObjectFile;
+ else if (Action == Backend_EmitMCNull)
+ return TargetMachine::CGFT_Null;
+ else {
+ assert(Action == Backend_EmitAssembly && "Invalid action!");
+ return TargetMachine::CGFT_AssemblyFile;
+ }
+}
+
+static void initTargetOptions(llvm::TargetOptions &Options,
+ const CodeGenOptions &CodeGenOpts,
+ const clang::TargetOptions &TargetOpts,
+ const LangOptions &LangOpts,
+ const HeaderSearchOptions &HSOpts) {
+ Options.ThreadModel =
+ llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
+ .Case("posix", llvm::ThreadModel::POSIX)
+ .Case("single", llvm::ThreadModel::Single);
+
+ // Set float ABI type.
+ assert((CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp" ||
+ CodeGenOpts.FloatABI == "hard" || CodeGenOpts.FloatABI.empty()) &&
+ "Invalid Floating Point ABI!");
+ Options.FloatABIType =
+ llvm::StringSwitch<llvm::FloatABI::ABIType>(CodeGenOpts.FloatABI)
+ .Case("soft", llvm::FloatABI::Soft)
+ .Case("softfp", llvm::FloatABI::Soft)
+ .Case("hard", llvm::FloatABI::Hard)
+ .Default(llvm::FloatABI::Default);
+
+ // Set FP fusion mode.
+ switch (LangOpts.getDefaultFPContractMode()) {
+ case LangOptions::FPC_Off:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
+ break;
+ case LangOptions::FPC_On:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
+ break;
+ case LangOptions::FPC_Fast:
+ Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
+ break;
+ }
+
+ Options.UseInitArray = CodeGenOpts.UseInitArray;
+ Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
+ Options.CompressDebugSections = CodeGenOpts.CompressDebugSections;
+ Options.RelaxELFRelocations = CodeGenOpts.RelaxELFRelocations;
+
+ // Set EABI version.
+ Options.EABIVersion = llvm::StringSwitch<llvm::EABI>(TargetOpts.EABIVersion)
+ .Case("4", llvm::EABI::EABI4)
+ .Case("5", llvm::EABI::EABI5)
+ .Case("gnu", llvm::EABI::GNU)
+ .Default(llvm::EABI::Default);
+
+ if (LangOpts.SjLjExceptions)
+ Options.ExceptionModel = llvm::ExceptionHandling::SjLj;
+
+ Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
+ Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
+ Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
+ Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
+ Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
+ Options.FunctionSections = CodeGenOpts.FunctionSections;
+ Options.DataSections = CodeGenOpts.DataSections;
+ Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
+ Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
+ Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
+
+ Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
+ Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
+ Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
+ Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
+ Options.MCOptions.MCIncrementalLinkerCompatible =
+ CodeGenOpts.IncrementalLinkerCompatible;
+ Options.MCOptions.MCPIECopyRelocations = CodeGenOpts.PIECopyRelocations;
+ Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
+ Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
+ Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
+ Options.MCOptions.ABIName = TargetOpts.ABI;
+ for (const auto &Entry : HSOpts.UserEntries)
+ if (!Entry.IsFramework &&
+ (Entry.Group == frontend::IncludeDirGroup::Quoted ||
+ Entry.Group == frontend::IncludeDirGroup::Angled ||
+ Entry.Group == frontend::IncludeDirGroup::System))
+ Options.MCOptions.IASSearchPaths.push_back(
+ Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
+}
+
void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
legacy::FunctionPassManager &FPM) {
// Handle disabling of all LLVM passes, where we want to preserve the
@@ -316,8 +452,13 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
!CodeGenOpts.DisableLifetimeMarkers);
PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
} else {
+ // We do not want to inline hot callsites for SamplePGO module-summary build
+ // because profile annotation will happen again in ThinLTO backend, and we
+ // want the IR of the hot path to match the profile.
PMBuilder.Inliner = createFunctionInliningPass(
- CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize);
+ CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize,
+ (!CodeGenOpts.SampleProfileFile.empty() &&
+ CodeGenOpts.EmitSummaryIndex));
}
PMBuilder.OptLevel = CodeGenOpts.OptimizationLevel;
@@ -334,16 +475,13 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
MPM.add(new TargetLibraryInfoWrapperPass(*TLII));
- // Add target-specific passes that need to run as early as possible.
if (TM)
- PMBuilder.addExtension(
- PassManagerBuilder::EP_EarlyAsPossible,
- [&](const PassManagerBuilder &, legacy::PassManagerBase &PM) {
- TM->addEarlyAsPossiblePasses(PM);
- });
+ TM->adjustPassManager(PMBuilder);
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addAddDiscriminatorsPass);
+ if (CodeGenOpts.DebugInfoForProfiling ||
+ !CodeGenOpts.SampleProfileFile.empty())
+ PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
+ addAddDiscriminatorsPass);
// In ObjC ARC mode, add the main ARC optimization passes.
if (LangOpts.ObjCAutoRefCount) {
@@ -454,7 +592,7 @@ void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
if (!CodeGenOpts.InstrProfileOutput.empty())
PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
else
- PMBuilder.PGOInstrGen = "default_%m.profraw";
+ PMBuilder.PGOInstrGen = DefaultProfileGenName;
}
if (CodeGenOpts.hasProfileIRUse())
PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
@@ -495,126 +633,14 @@ void EmitAssemblyHelper::CreateTargetMachine(bool MustCreateTM) {
return;
}
- unsigned CodeModel =
- llvm::StringSwitch<unsigned>(CodeGenOpts.CodeModel)
- .Case("small", llvm::CodeModel::Small)
- .Case("kernel", llvm::CodeModel::Kernel)
- .Case("medium", llvm::CodeModel::Medium)
- .Case("large", llvm::CodeModel::Large)
- .Case("default", llvm::CodeModel::Default)
- .Default(~0u);
- assert(CodeModel != ~0u && "invalid code model!");
- llvm::CodeModel::Model CM = static_cast<llvm::CodeModel::Model>(CodeModel);
-
+ llvm::CodeModel::Model CM = getCodeModel(CodeGenOpts);
std::string FeaturesStr =
llvm::join(TargetOpts.Features.begin(), TargetOpts.Features.end(), ",");
-
- // Keep this synced with the equivalent code in tools/driver/cc1as_main.cpp.
- llvm::Optional<llvm::Reloc::Model> RM;
- RM = llvm::StringSwitch<llvm::Reloc::Model>(CodeGenOpts.RelocationModel)
- .Case("static", llvm::Reloc::Static)
- .Case("pic", llvm::Reloc::PIC_)
- .Case("ropi", llvm::Reloc::ROPI)
- .Case("rwpi", llvm::Reloc::RWPI)
- .Case("ropi-rwpi", llvm::Reloc::ROPI_RWPI)
- .Case("dynamic-no-pic", llvm::Reloc::DynamicNoPIC);
- assert(RM.hasValue() && "invalid PIC model!");
-
- CodeGenOpt::Level OptLevel;
- switch (CodeGenOpts.OptimizationLevel) {
- default:
- llvm_unreachable("Invalid optimization level!");
- case 0:
- OptLevel = CodeGenOpt::None;
- break;
- case 1:
- OptLevel = CodeGenOpt::Less;
- break;
- case 2:
- OptLevel = CodeGenOpt::Default;
- break; // O2/Os/Oz
- case 3:
- OptLevel = CodeGenOpt::Aggressive;
- break;
- }
+ llvm::Reloc::Model RM = getRelocModel(CodeGenOpts);
+ CodeGenOpt::Level OptLevel = getCGOptLevel(CodeGenOpts);
llvm::TargetOptions Options;
-
- Options.ThreadModel =
- llvm::StringSwitch<llvm::ThreadModel::Model>(CodeGenOpts.ThreadModel)
- .Case("posix", llvm::ThreadModel::POSIX)
- .Case("single", llvm::ThreadModel::Single);
-
- // Set float ABI type.
- assert((CodeGenOpts.FloatABI == "soft" || CodeGenOpts.FloatABI == "softfp" ||
- CodeGenOpts.FloatABI == "hard" || CodeGenOpts.FloatABI.empty()) &&
- "Invalid Floating Point ABI!");
- Options.FloatABIType =
- llvm::StringSwitch<llvm::FloatABI::ABIType>(CodeGenOpts.FloatABI)
- .Case("soft", llvm::FloatABI::Soft)
- .Case("softfp", llvm::FloatABI::Soft)
- .Case("hard", llvm::FloatABI::Hard)
- .Default(llvm::FloatABI::Default);
-
- // Set FP fusion mode.
- switch (CodeGenOpts.getFPContractMode()) {
- case CodeGenOptions::FPC_Off:
- Options.AllowFPOpFusion = llvm::FPOpFusion::Strict;
- break;
- case CodeGenOptions::FPC_On:
- Options.AllowFPOpFusion = llvm::FPOpFusion::Standard;
- break;
- case CodeGenOptions::FPC_Fast:
- Options.AllowFPOpFusion = llvm::FPOpFusion::Fast;
- break;
- }
-
- Options.UseInitArray = CodeGenOpts.UseInitArray;
- Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
- Options.CompressDebugSections = CodeGenOpts.CompressDebugSections;
- Options.RelaxELFRelocations = CodeGenOpts.RelaxELFRelocations;
-
- // Set EABI version.
- Options.EABIVersion = llvm::StringSwitch<llvm::EABI>(TargetOpts.EABIVersion)
- .Case("4", llvm::EABI::EABI4)
- .Case("5", llvm::EABI::EABI5)
- .Case("gnu", llvm::EABI::GNU)
- .Default(llvm::EABI::Default);
-
- if (LangOpts.SjLjExceptions)
- Options.ExceptionModel = llvm::ExceptionHandling::SjLj;
-
- Options.LessPreciseFPMADOption = CodeGenOpts.LessPreciseFPMAD;
- Options.NoInfsFPMath = CodeGenOpts.NoInfsFPMath;
- Options.NoNaNsFPMath = CodeGenOpts.NoNaNsFPMath;
- Options.NoZerosInBSS = CodeGenOpts.NoZeroInitializedInBSS;
- Options.UnsafeFPMath = CodeGenOpts.UnsafeFPMath;
- Options.StackAlignmentOverride = CodeGenOpts.StackAlignment;
- Options.FunctionSections = CodeGenOpts.FunctionSections;
- Options.DataSections = CodeGenOpts.DataSections;
- Options.UniqueSectionNames = CodeGenOpts.UniqueSectionNames;
- Options.EmulatedTLS = CodeGenOpts.EmulatedTLS;
- Options.DebuggerTuning = CodeGenOpts.getDebuggerTuning();
-
- Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
- Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
- Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
- Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
- Options.MCOptions.MCIncrementalLinkerCompatible =
- CodeGenOpts.IncrementalLinkerCompatible;
- Options.MCOptions.MCPIECopyRelocations = CodeGenOpts.PIECopyRelocations;
- Options.MCOptions.MCFatalWarnings = CodeGenOpts.FatalWarnings;
- Options.MCOptions.AsmVerbose = CodeGenOpts.AsmVerbose;
- Options.MCOptions.PreserveAsmComments = CodeGenOpts.PreserveAsmComments;
- Options.MCOptions.ABIName = TargetOpts.ABI;
- for (const auto &Entry : HSOpts.UserEntries)
- if (!Entry.IsFramework &&
- (Entry.Group == frontend::IncludeDirGroup::Quoted ||
- Entry.Group == frontend::IncludeDirGroup::Angled ||
- Entry.Group == frontend::IncludeDirGroup::System))
- Options.MCOptions.IASSearchPaths.push_back(
- Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
-
+ initTargetOptions(Options, CodeGenOpts, TargetOpts, LangOpts, HSOpts);
TM.reset(TheTarget->createTargetMachine(Triple, TargetOpts.CPU, FeaturesStr,
Options, RM, CM, OptLevel));
}
@@ -630,13 +656,7 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
// Normal mode, emit a .s or .o file by running the code generator. Note,
// this also adds codegenerator level optimization passes.
- TargetMachine::CodeGenFileType CGFT = TargetMachine::CGFT_AssemblyFile;
- if (Action == Backend_EmitObj)
- CGFT = TargetMachine::CGFT_ObjectFile;
- else if (Action == Backend_EmitMCNull)
- CGFT = TargetMachine::CGFT_Null;
- else
- assert(Action == Backend_EmitAssembly && "Invalid action!");
+ TargetMachine::CodeGenFileType CGFT = getCodeGenFileType(Action);
// Add ObjC ARC final-cleanup optimizations. This is done as part of the
// "codegen" passes so that it isn't run multiple times when there is
@@ -683,14 +703,31 @@ void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
CodeGenPasses.add(
createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
+ std::unique_ptr<raw_fd_ostream> ThinLinkOS;
+
switch (Action) {
case Backend_EmitNothing:
break;
case Backend_EmitBC:
- PerModulePasses.add(createBitcodeWriterPass(
- *OS, CodeGenOpts.EmitLLVMUseLists, CodeGenOpts.EmitSummaryIndex,
- CodeGenOpts.EmitSummaryIndex));
+ if (CodeGenOpts.EmitSummaryIndex) {
+ if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
+ std::error_code EC;
+ ThinLinkOS.reset(new llvm::raw_fd_ostream(
+ CodeGenOpts.ThinLinkBitcodeFile, EC,
+ llvm::sys::fs::F_None));
+ if (EC) {
+ Diags.Report(diag::err_fe_unable_to_open_output) << CodeGenOpts.ThinLinkBitcodeFile
+ << EC.message();
+ return;
+ }
+ }
+ PerModulePasses.add(
+ createWriteThinLTOBitcodePass(*OS, ThinLinkOS.get()));
+ }
+ else
+ PerModulePasses.add(
+ createBitcodeWriterPass(*OS, CodeGenOpts.EmitLLVMUseLists));
break;
case Backend_EmitLL:
@@ -779,7 +816,24 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
return;
TheModule->setDataLayout(TM->createDataLayout());
- PassBuilder PB(TM.get());
+ PGOOptions PGOOpt;
+
+ // -fprofile-generate.
+ PGOOpt.RunProfileGen = CodeGenOpts.hasProfileIRInstr();
+ if (PGOOpt.RunProfileGen)
+ PGOOpt.ProfileGenFile = CodeGenOpts.InstrProfileOutput.empty() ?
+ DefaultProfileGenName : CodeGenOpts.InstrProfileOutput;
+
+ // -fprofile-use.
+ if (CodeGenOpts.hasProfileIRUse())
+ PGOOpt.ProfileUseFile = CodeGenOpts.ProfileInstrumentUsePath;
+
+ // Only pass a PGO options struct if -fprofile-generate or
+ // -fprofile-use were passed on the cmdline.
+ PassBuilder PB(TM.get(),
+ (PGOOpt.RunProfileGen ||
+ !PGOOpt.ProfileUseFile.empty()) ?
+ Optional<PGOOptions>(PGOOpt) : None);
LoopAnalysisManager LAM;
FunctionAnalysisManager FAM;
@@ -861,8 +915,31 @@ void EmitAssemblyHelper::EmitAssemblyWithNewPassManager(
}
}
+Expected<BitcodeModule> clang::FindThinLTOModule(MemoryBufferRef MBRef) {
+ Expected<std::vector<BitcodeModule>> BMsOrErr = getBitcodeModuleList(MBRef);
+ if (!BMsOrErr)
+ return BMsOrErr.takeError();
+
+ // The bitcode file may contain multiple modules, we want the one with a
+ // summary.
+ for (BitcodeModule &BM : *BMsOrErr) {
+ Expected<bool> HasSummary = BM.hasSummary();
+ if (HasSummary && *HasSummary)
+ return BM;
+ }
+
+ return make_error<StringError>("Could not find module summary",
+ inconvertibleErrorCode());
+}
+
static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
- std::unique_ptr<raw_pwrite_stream> OS) {
+ const HeaderSearchOptions &HeaderOpts,
+ const CodeGenOptions &CGOpts,
+ const clang::TargetOptions &TOpts,
+ const LangOptions &LOpts,
+ std::unique_ptr<raw_pwrite_stream> OS,
+ std::string SampleProfile,
+ BackendAction Action) {
StringMap<std::map<GlobalValue::GUID, GlobalValueSummary *>>
ModuleToDefinedGVSummaries;
CombinedIndex->collectDefinedGVSummariesPerModule(ModuleToDefinedGVSummaries);
@@ -897,32 +974,15 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
return;
}
- Expected<std::vector<BitcodeModule>> BMsOrErr =
- getBitcodeModuleList(**MBOrErr);
- if (!BMsOrErr) {
- handleAllErrors(BMsOrErr.takeError(), [&](ErrorInfoBase &EIB) {
+ Expected<BitcodeModule> BMOrErr = FindThinLTOModule(**MBOrErr);
+ if (!BMOrErr) {
+ handleAllErrors(BMOrErr.takeError(), [&](ErrorInfoBase &EIB) {
errs() << "Error loading imported file '" << I.first()
<< "': " << EIB.message() << '\n';
});
return;
}
-
- // The bitcode file may contain multiple modules, we want the one with a
- // summary.
- bool FoundModule = false;
- for (BitcodeModule &BM : *BMsOrErr) {
- Expected<bool> HasSummary = BM.hasSummary();
- if (HasSummary && *HasSummary) {
- ModuleMap.insert({I.first(), BM});
- FoundModule = true;
- break;
- }
- }
- if (!FoundModule) {
- errs() << "Error loading imported file '" << I.first()
- << "': Could not find module summary\n";
- return;
- }
+ ModuleMap.insert({I.first(), *BMOrErr});
OwnedImports.push_back(std::move(*MBOrErr));
}
@@ -930,6 +990,35 @@ static void runThinLTOBackend(ModuleSummaryIndex *CombinedIndex, Module *M,
return llvm::make_unique<lto::NativeObjectStream>(std::move(OS));
};
lto::Config Conf;
+ Conf.CPU = TOpts.CPU;
+ Conf.CodeModel = getCodeModel(CGOpts);
+ Conf.MAttrs = TOpts.Features;
+ Conf.RelocModel = getRelocModel(CGOpts);
+ Conf.CGOptLevel = getCGOptLevel(CGOpts);
+ initTargetOptions(Conf.Options, CGOpts, TOpts, LOpts, HeaderOpts);
+ Conf.SampleProfile = std::move(SampleProfile);
+ switch (Action) {
+ case Backend_EmitNothing:
+ Conf.PreCodeGenModuleHook = [](size_t Task, const Module &Mod) {
+ return false;
+ };
+ break;
+ case Backend_EmitLL:
+ Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
+ M->print(*OS, nullptr, CGOpts.EmitLLVMUseLists);
+ return false;
+ };
+ break;
+ case Backend_EmitBC:
+ Conf.PreCodeGenModuleHook = [&](size_t Task, const Module &Mod) {
+ WriteBitcodeToFile(M, *OS, CGOpts.EmitLLVMUseLists);
+ return false;
+ };
+ break;
+ default:
+ Conf.CGFileType = getCodeGenFileType(Action);
+ break;
+ }
if (Error E = thinBackend(
Conf, 0, AddStream, *M, *CombinedIndex, ImportList,
ModuleToDefinedGVSummaries[M->getModuleIdentifier()], ModuleMap)) {
@@ -965,7 +1054,8 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
// of an error).
bool DoThinLTOBackend = CombinedIndex != nullptr;
if (DoThinLTOBackend) {
- runThinLTOBackend(CombinedIndex.get(), M, std::move(OS));
+ runThinLTOBackend(CombinedIndex.get(), M, HeaderOpts, CGOpts, TOpts,
+ LOpts, std::move(OS), CGOpts.SampleProfileFile, Action);
return;
}
}
@@ -996,6 +1086,7 @@ static const char* getSectionNameForBitcode(const Triple &T) {
return "__LLVM,__bitcode";
case Triple::COFF:
case Triple::ELF:
+ case Triple::Wasm:
case Triple::UnknownObjectFormat:
return ".llvmbc";
}
@@ -1008,6 +1099,7 @@ static const char* getSectionNameForCommandline(const Triple &T) {
return "__LLVM,__cmdline";
case Triple::COFF:
case Triple::ELF:
+ case Triple::Wasm:
case Triple::UnknownObjectFormat:
return ".llvmcmd";
}
diff --git a/lib/CodeGen/CGAtomic.cpp b/lib/CodeGen/CGAtomic.cpp
index 9287e46127bd..28e20b53d656 100644
--- a/lib/CodeGen/CGAtomic.cpp
+++ b/lib/CodeGen/CGAtomic.cpp
@@ -1181,7 +1181,7 @@ RValue AtomicInfo::convertAtomicTempToRValue(Address addr,
if (LVal.isBitField())
return CGF.EmitLoadOfBitfieldLValue(
LValue::MakeBitfield(addr, LVal.getBitFieldInfo(), LVal.getType(),
- LVal.getAlignmentSource()));
+ LVal.getAlignmentSource()), loc);
if (LVal.isVectorElt())
return CGF.EmitLoadOfLValue(
LValue::MakeVectorElt(addr, LVal.getVectorIdx(), LVal.getType(),
diff --git a/lib/CodeGen/CGBlocks.cpp b/lib/CodeGen/CGBlocks.cpp
index b250b9a32b18..1a57b3e6608d 100644
--- a/lib/CodeGen/CGBlocks.cpp
+++ b/lib/CodeGen/CGBlocks.cpp
@@ -16,7 +16,7 @@
#include "CGObjCRuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "ConstantBuilder.h"
+#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/DeclObjC.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/IR/CallSite.h"
@@ -266,7 +266,7 @@ static bool isSafeForCXXConstantCapture(QualType type) {
static llvm::Constant *tryCaptureAsConstant(CodeGenModule &CGM,
CodeGenFunction *CGF,
const VarDecl *var) {
- // Return if this is a function paramter. We shouldn't try to
+ // Return if this is a function parameter. We shouldn't try to
// rematerialize default arguments of function parameters.
if (isa<ParmVarDecl>(var))
return nullptr;
@@ -318,6 +318,19 @@ static void initializeForBlockHeader(CodeGenModule &CGM, CGBlockInfo &info,
elementTypes.push_back(CGM.getBlockDescriptorType());
}
+static QualType getCaptureFieldType(const CodeGenFunction &CGF,
+ const BlockDecl::Capture &CI) {
+ const VarDecl *VD = CI.getVariable();
+
+ // If the variable is captured by an enclosing block or lambda expression,
+ // use the type of the capture field.
+ if (CGF.BlockInfo && CI.isNested())
+ return CGF.BlockInfo->getCapture(VD).fieldType();
+ if (auto *FD = CGF.LambdaCaptureFields.lookup(VD))
+ return FD->getType();
+ return VD->getType();
+}
+
/// Compute the layout of the given block. Attempts to lay the block
/// out with minimal space requirements.
static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
@@ -432,15 +445,7 @@ static void computeBlockInfo(CodeGenModule &CGM, CodeGenFunction *CGF,
}
}
- QualType VT = variable->getType();
-
- // If the variable is captured by an enclosing block or lambda expression,
- // use the type of the capture field.
- if (CGF->BlockInfo && CI.isNested())
- VT = CGF->BlockInfo->getCapture(variable).fieldType();
- else if (auto *FD = CGF->LambdaCaptureFields.lookup(variable))
- VT = FD->getType();
-
+ QualType VT = getCaptureFieldType(*CGF, CI);
CharUnits size = C.getTypeSizeInChars(VT);
CharUnits align = C.getDeclAlign(variable);
@@ -606,8 +611,8 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
if (capture.isConstant()) continue;
// Ignore objects that aren't destructed.
- QualType::DestructionKind dtorKind =
- variable->getType().isDestructedType();
+ QualType VT = getCaptureFieldType(CGF, CI);
+ QualType::DestructionKind dtorKind = VT.isDestructedType();
if (dtorKind == QualType::DK_none) continue;
CodeGenFunction::Destroyer *destroyer;
@@ -634,7 +639,7 @@ static void enterBlockScope(CodeGenFunction &CGF, BlockDecl *block) {
if (useArrayEHCleanup)
cleanupKind = InactiveNormalAndEHCleanup;
- CGF.pushDestroy(cleanupKind, addr, variable->getType(),
+ CGF.pushDestroy(cleanupKind, addr, VT,
destroyer, useArrayEHCleanup);
// Remember where that cleanup was.
@@ -718,7 +723,12 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Otherwise, we have to emit this as a local block.
- llvm::Constant *isa = CGM.getNSConcreteStackBlock();
+ llvm::Constant *isa =
+ (!CGM.getContext().getLangOpts().OpenCL)
+ ? CGM.getNSConcreteStackBlock()
+ : CGM.getNullPointer(cast<llvm::PointerType>(
+ CGM.getNSConcreteStackBlock()->getType()),
+ QualType(getContext().VoidPtrTy));
isa = llvm::ConstantExpr::getBitCast(isa, VoidPtrTy);
// Build the block descriptor.
@@ -906,9 +916,8 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
// Cast to the converted block-pointer type, which happens (somewhat
// unfortunately) to be a pointer to function type.
- llvm::Value *result =
- Builder.CreateBitCast(blockAddr.getPointer(),
- ConvertType(blockInfo.getBlockExpr()->getType()));
+ llvm::Value *result = Builder.CreatePointerCast(
+ blockAddr.getPointer(), ConvertType(blockInfo.getBlockExpr()->getType()));
return result;
}
@@ -976,21 +985,41 @@ RValue CodeGenFunction::EmitBlockCallExpr(const CallExpr *E,
llvm::Value *BlockPtr = EmitScalarExpr(E->getCallee());
// Get a pointer to the generic block literal.
+ // For OpenCL we generate generic AS void ptr to be able to reuse the same
+ // block definition for blocks with captures generated as private AS local
+ // variables and without captures generated as global AS program scope
+ // variables.
+ unsigned AddrSpace = 0;
+ if (getLangOpts().OpenCL)
+ AddrSpace = getContext().getTargetAddressSpace(LangAS::opencl_generic);
+
llvm::Type *BlockLiteralTy =
- llvm::PointerType::getUnqual(CGM.getGenericBlockLiteralType());
+ llvm::PointerType::get(CGM.getGenericBlockLiteralType(), AddrSpace);
// Bitcast the callee to a block literal.
- BlockPtr = Builder.CreateBitCast(BlockPtr, BlockLiteralTy, "block.literal");
+ BlockPtr =
+ Builder.CreatePointerCast(BlockPtr, BlockLiteralTy, "block.literal");
// Get the function pointer from the literal.
llvm::Value *FuncPtr =
Builder.CreateStructGEP(CGM.getGenericBlockLiteralType(), BlockPtr, 3);
- BlockPtr = Builder.CreateBitCast(BlockPtr, VoidPtrTy);
// Add the block literal.
CallArgList Args;
- Args.add(RValue::get(BlockPtr), getContext().VoidPtrTy);
+
+ QualType VoidPtrQualTy = getContext().VoidPtrTy;
+ llvm::Type *GenericVoidPtrTy = VoidPtrTy;
+ if (getLangOpts().OpenCL) {
+ GenericVoidPtrTy = Builder.getInt8PtrTy(
+ getContext().getTargetAddressSpace(LangAS::opencl_generic));
+ VoidPtrQualTy =
+ getContext().getPointerType(getContext().getAddrSpaceQualType(
+ getContext().VoidTy, LangAS::opencl_generic));
+ }
+
+ BlockPtr = Builder.CreatePointerCast(BlockPtr, GenericVoidPtrTy);
+ Args.add(RValue::get(BlockPtr), VoidPtrQualTy);
QualType FnType = BPT->getPointeeType();
@@ -1097,7 +1126,12 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
auto fields = builder.beginStruct();
// isa
- fields.add(CGM.getNSConcreteGlobalBlock());
+ fields.add(
+ (!CGM.getContext().getLangOpts().OpenCL)
+ ? CGM.getNSConcreteGlobalBlock()
+ : CGM.getNullPointer(cast<llvm::PointerType>(
+ CGM.getNSConcreteGlobalBlock()->getType()),
+ QualType(CGM.getContext().VoidPtrTy)));
// __flags
BlockFlags flags = BLOCK_IS_GLOBAL | BLOCK_HAS_SIGNATURE;
@@ -1114,16 +1148,19 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
// Descriptor
fields.add(buildBlockDescriptor(CGM, blockInfo));
- llvm::Constant *literal =
- fields.finishAndCreateGlobal("__block_literal_global",
- blockInfo.BlockAlign,
- /*constant*/ true);
+ unsigned AddrSpace = 0;
+ if (CGM.getContext().getLangOpts().OpenCL)
+ AddrSpace = CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
+
+ llvm::Constant *literal = fields.finishAndCreateGlobal(
+ "__block_literal_global", blockInfo.BlockAlign,
+ /*constant*/ true, llvm::GlobalVariable::InternalLinkage, AddrSpace);
// Return a constant of the appropriately-casted type.
llvm::Type *RequiredType =
CGM.getTypes().ConvertType(blockInfo.getBlockExpr()->getType());
llvm::Constant *Result =
- llvm::ConstantExpr::getBitCast(literal, RequiredType);
+ llvm::ConstantExpr::getPointerCast(literal, RequiredType);
CGM.setAddrOfGlobalBlock(blockInfo.BlockExpression, Result);
return Result;
}
@@ -1155,9 +1192,13 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
// Instead of messing around with LocalDeclMap, just set the value
// directly as BlockPointer.
- BlockPointer = Builder.CreateBitCast(arg,
- BlockInfo->StructureType->getPointerTo(),
- "block");
+ BlockPointer = Builder.CreatePointerCast(
+ arg,
+ BlockInfo->StructureType->getPointerTo(
+ getContext().getLangOpts().OpenCL
+ ? getContext().getTargetAddressSpace(LangAS::opencl_generic)
+ : 0),
+ "block");
}
Address CodeGenFunction::LoadBlockStruct() {
@@ -1196,6 +1237,15 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
// The first argument is the block pointer. Just take it as a void*
// and cast it later.
QualType selfTy = getContext().VoidPtrTy;
+
+ // For OpenCL passed block pointer can be private AS local variable or
+ // global AS program scope variable (for the case with and without captures).
+ // Generic AS is used therefore to be able to accomodate both private and
+ // generic AS in one implementation.
+ if (getLangOpts().OpenCL)
+ selfTy = getContext().getPointerType(getContext().getAddrSpaceQualType(
+ getContext().VoidTy, LangAS::opencl_generic));
+
IdentifierInfo *II = &CGM.getContext().Idents.get(".block_descriptor");
ImplicitParamDecl selfDecl(getContext(), const_cast<BlockDecl*>(blockDecl),
@@ -1323,23 +1373,102 @@ CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
return fn;
}
-/*
- notes.push_back(HelperInfo());
- HelperInfo &note = notes.back();
- note.index = capture.getIndex();
- note.RequiresCopying = (ci->hasCopyExpr() || BlockRequiresCopying(type));
- note.cxxbar_import = ci->getCopyExpr();
-
- if (ci->isByRef()) {
- note.flag = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak())
- note.flag |= BLOCK_FIELD_IS_WEAK;
- } else if (type->isBlockPointerType()) {
- note.flag = BLOCK_FIELD_IS_BLOCK;
- } else {
- note.flag = BLOCK_FIELD_IS_OBJECT;
- }
- */
+namespace {
+
+/// Represents a type of copy/destroy operation that should be performed for an
+/// entity that's captured by a block.
+enum class BlockCaptureEntityKind {
+ CXXRecord, // Copy or destroy
+ ARCWeak,
+ ARCStrong,
+ BlockObject, // Assign or release
+ None
+};
+
+/// Represents a captured entity that requires extra operations in order for
+/// this entity to be copied or destroyed correctly.
+struct BlockCaptureManagedEntity {
+ BlockCaptureEntityKind Kind;
+ BlockFieldFlags Flags;
+ const BlockDecl::Capture &CI;
+ const CGBlockInfo::Capture &Capture;
+
+ BlockCaptureManagedEntity(BlockCaptureEntityKind Type, BlockFieldFlags Flags,
+ const BlockDecl::Capture &CI,
+ const CGBlockInfo::Capture &Capture)
+ : Kind(Type), Flags(Flags), CI(CI), Capture(Capture) {}
+};
+
+} // end anonymous namespace
+
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeCopyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts) {
+ if (CI.getCopyExpr()) {
+ assert(!CI.isByRef());
+ // don't bother computing flags
+ return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags());
+ }
+ BlockFieldFlags Flags;
+ if (CI.isByRef()) {
+ Flags = BLOCK_FIELD_IS_BYREF;
+ if (T.isObjCGCWeak())
+ Flags |= BLOCK_FIELD_IS_WEAK;
+ return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
+ }
+ if (!T->isObjCRetainableType())
+ // For all other types, the memcpy is fine.
+ return std::make_pair(BlockCaptureEntityKind::None, Flags);
+
+ Flags = BLOCK_FIELD_IS_OBJECT;
+ bool isBlockPointer = T->isBlockPointerType();
+ if (isBlockPointer)
+ Flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures:
+ Qualifiers QS = T.getQualifiers();
+
+ // We need to register __weak direct captures with the runtime.
+ if (QS.getObjCLifetime() == Qualifiers::OCL_Weak)
+ return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags);
+
+ // We need to retain the copied value for __strong direct captures.
+ if (QS.getObjCLifetime() == Qualifiers::OCL_Strong) {
+ // If it's a block pointer, we have to copy the block and
+ // assign that to the destination pointer, so we might as
+ // well use _Block_object_assign. Otherwise we can avoid that.
+ return std::make_pair(!isBlockPointer ? BlockCaptureEntityKind::ARCStrong
+ : BlockCaptureEntityKind::BlockObject,
+ Flags);
+ }
+
+ // Non-ARC captures of retainable pointers are strong and
+ // therefore require a call to _Block_object_assign.
+ if (!QS.getObjCLifetime() && !LangOpts.ObjCAutoRefCount)
+ return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
+
+ // Otherwise the memcpy is fine.
+ return std::make_pair(BlockCaptureEntityKind::None, Flags);
+}
+
+/// Find the set of block captures that need to be explicitly copied or destroy.
+static void findBlockCapturedManagedEntities(
+ const CGBlockInfo &BlockInfo, const LangOptions &LangOpts,
+ SmallVectorImpl<BlockCaptureManagedEntity> &ManagedCaptures,
+ llvm::function_ref<std::pair<BlockCaptureEntityKind, BlockFieldFlags>(
+ const BlockDecl::Capture &, QualType, const LangOptions &)>
+ Predicate) {
+ for (const auto &CI : BlockInfo.getBlockDecl()->captures()) {
+ const VarDecl *Variable = CI.getVariable();
+ const CGBlockInfo::Capture &Capture = BlockInfo.getCapture(Variable);
+ if (Capture.isConstant())
+ continue;
+
+ auto Info = Predicate(CI, Variable->getType(), LangOpts);
+ if (Info.first != BlockCaptureEntityKind::None)
+ ManagedCaptures.emplace_back(Info.first, Info.second, CI, Capture);
+ }
+}
/// Generate the copy-helper function for a block closure object:
/// static void block_copy_helper(block_t *dst, block_t *src);
@@ -1399,78 +1528,28 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
- const BlockDecl *blockDecl = blockInfo.getBlockDecl();
+ SmallVector<BlockCaptureManagedEntity, 4> CopiedCaptures;
+ findBlockCapturedManagedEntities(blockInfo, getLangOpts(), CopiedCaptures,
+ computeCopyInfoForBlockCapture);
- for (const auto &CI : blockDecl->captures()) {
- const VarDecl *variable = CI.getVariable();
- QualType type = variable->getType();
-
- const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- if (capture.isConstant()) continue;
-
- const Expr *copyExpr = CI.getCopyExpr();
- BlockFieldFlags flags;
-
- bool useARCWeakCopy = false;
- bool useARCStrongCopy = false;
-
- if (copyExpr) {
- assert(!CI.isByRef());
- // don't bother computing flags
-
- } else if (CI.isByRef()) {
- flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak())
- flags |= BLOCK_FIELD_IS_WEAK;
-
- } else if (type->isObjCRetainableType()) {
- flags = BLOCK_FIELD_IS_OBJECT;
- bool isBlockPointer = type->isBlockPointerType();
- if (isBlockPointer)
- flags = BLOCK_FIELD_IS_BLOCK;
-
- // Special rules for ARC captures:
- Qualifiers qs = type.getQualifiers();
-
- // We need to register __weak direct captures with the runtime.
- if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
- useARCWeakCopy = true;
-
- // We need to retain the copied value for __strong direct captures.
- } else if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
- // If it's a block pointer, we have to copy the block and
- // assign that to the destination pointer, so we might as
- // well use _Block_object_assign. Otherwise we can avoid that.
- if (!isBlockPointer)
- useARCStrongCopy = true;
-
- // Non-ARC captures of retainable pointers are strong and
- // therefore require a call to _Block_object_assign.
- } else if (!qs.getObjCLifetime() && !getLangOpts().ObjCAutoRefCount) {
- // fall through
-
- // Otherwise the memcpy is fine.
- } else {
- continue;
- }
-
- // For all other types, the memcpy is fine.
- } else {
- continue;
- }
+ for (const auto &CopiedCapture : CopiedCaptures) {
+ const BlockDecl::Capture &CI = CopiedCapture.CI;
+ const CGBlockInfo::Capture &capture = CopiedCapture.Capture;
+ BlockFieldFlags flags = CopiedCapture.Flags;
unsigned index = capture.getIndex();
Address srcField = Builder.CreateStructGEP(src, index, capture.getOffset());
Address dstField = Builder.CreateStructGEP(dst, index, capture.getOffset());
// If there's an explicit copy expression, we do that.
- if (copyExpr) {
- EmitSynthesizedCXXCopyCtor(dstField, srcField, copyExpr);
- } else if (useARCWeakCopy) {
+ if (CI.getCopyExpr()) {
+ assert(CopiedCapture.Kind == BlockCaptureEntityKind::CXXRecord);
+ EmitSynthesizedCXXCopyCtor(dstField, srcField, CI.getCopyExpr());
+ } else if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCWeak) {
EmitARCCopyWeak(dstField, srcField);
} else {
llvm::Value *srcValue = Builder.CreateLoad(srcField, "blockcopy.src");
- if (useARCStrongCopy) {
+ if (CopiedCapture.Kind == BlockCaptureEntityKind::ARCStrong) {
// At -O0, store null into the destination field (so that the
// storeStrong doesn't over-release) and then call storeStrong.
// This is a workaround to not having an initStrong call.
@@ -1491,6 +1570,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
cast<llvm::Instruction>(dstField.getPointer())->eraseFromParent();
}
} else {
+ assert(CopiedCapture.Kind == BlockCaptureEntityKind::BlockObject);
srcValue = Builder.CreateBitCast(srcValue, VoidPtrTy);
llvm::Value *dstAddr =
Builder.CreateBitCast(dstField.getPointer(), VoidPtrTy);
@@ -1498,6 +1578,7 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
dstAddr, srcValue, llvm::ConstantInt::get(Int32Ty, flags.getBitMask())
};
+ const VarDecl *variable = CI.getVariable();
bool copyCanThrow = false;
if (CI.isByRef() && variable->getType()->getAsCXXRecordDecl()) {
const Expr *copyExpr =
@@ -1521,6 +1602,52 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
return llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
}
+static std::pair<BlockCaptureEntityKind, BlockFieldFlags>
+computeDestroyInfoForBlockCapture(const BlockDecl::Capture &CI, QualType T,
+ const LangOptions &LangOpts) {
+ BlockFieldFlags Flags;
+ if (CI.isByRef()) {
+ Flags = BLOCK_FIELD_IS_BYREF;
+ if (T.isObjCGCWeak())
+ Flags |= BLOCK_FIELD_IS_WEAK;
+ return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
+ }
+
+ if (const CXXRecordDecl *Record = T->getAsCXXRecordDecl()) {
+ if (Record->hasTrivialDestructor())
+ return std::make_pair(BlockCaptureEntityKind::None, BlockFieldFlags());
+ return std::make_pair(BlockCaptureEntityKind::CXXRecord, BlockFieldFlags());
+ }
+
+ // Other types don't need to be destroy explicitly.
+ if (!T->isObjCRetainableType())
+ return std::make_pair(BlockCaptureEntityKind::None, Flags);
+
+ Flags = BLOCK_FIELD_IS_OBJECT;
+ if (T->isBlockPointerType())
+ Flags = BLOCK_FIELD_IS_BLOCK;
+
+ // Special rules for ARC captures.
+ Qualifiers QS = T.getQualifiers();
+
+ // Use objc_storeStrong for __strong direct captures; the
+ // dynamic tools really like it when we do this.
+ if (QS.getObjCLifetime() == Qualifiers::OCL_Strong)
+ return std::make_pair(BlockCaptureEntityKind::ARCStrong, Flags);
+
+ // Support __weak direct captures.
+ if (QS.getObjCLifetime() == Qualifiers::OCL_Weak)
+ return std::make_pair(BlockCaptureEntityKind::ARCWeak, Flags);
+
+ // Non-ARC captures are strong, and we need to use
+ // _Block_object_dispose.
+ if (!QS.hasObjCLifetime() && !LangOpts.ObjCAutoRefCount)
+ return std::make_pair(BlockCaptureEntityKind::BlockObject, Flags);
+
+ // Otherwise, we have nothing to do.
+ return std::make_pair(BlockCaptureEntityKind::None, Flags);
+}
+
/// Generate the destroy-helper function for a block closure object:
/// static void block_destroy_helper(block_t *theBlock);
///
@@ -1570,79 +1697,39 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
src = Builder.CreateBitCast(src, structPtrTy, "block");
- const BlockDecl *blockDecl = blockInfo.getBlockDecl();
-
CodeGenFunction::RunCleanupsScope cleanups(*this);
- for (const auto &CI : blockDecl->captures()) {
- const VarDecl *variable = CI.getVariable();
- QualType type = variable->getType();
-
- const CGBlockInfo::Capture &capture = blockInfo.getCapture(variable);
- if (capture.isConstant()) continue;
-
- BlockFieldFlags flags;
- const CXXDestructorDecl *dtor = nullptr;
+ SmallVector<BlockCaptureManagedEntity, 4> DestroyedCaptures;
+ findBlockCapturedManagedEntities(blockInfo, getLangOpts(), DestroyedCaptures,
+ computeDestroyInfoForBlockCapture);
- bool useARCWeakDestroy = false;
- bool useARCStrongDestroy = false;
-
- if (CI.isByRef()) {
- flags = BLOCK_FIELD_IS_BYREF;
- if (type.isObjCGCWeak())
- flags |= BLOCK_FIELD_IS_WEAK;
- } else if (const CXXRecordDecl *record = type->getAsCXXRecordDecl()) {
- if (record->hasTrivialDestructor())
- continue;
- dtor = record->getDestructor();
- } else if (type->isObjCRetainableType()) {
- flags = BLOCK_FIELD_IS_OBJECT;
- if (type->isBlockPointerType())
- flags = BLOCK_FIELD_IS_BLOCK;
-
- // Special rules for ARC captures.
- Qualifiers qs = type.getQualifiers();
-
- // Use objc_storeStrong for __strong direct captures; the
- // dynamic tools really like it when we do this.
- if (qs.getObjCLifetime() == Qualifiers::OCL_Strong) {
- useARCStrongDestroy = true;
-
- // Support __weak direct captures.
- } else if (qs.getObjCLifetime() == Qualifiers::OCL_Weak) {
- useARCWeakDestroy = true;
-
- // Non-ARC captures are strong, and we need to use _Block_object_dispose.
- } else if (!qs.hasObjCLifetime() && !getLangOpts().ObjCAutoRefCount) {
- // fall through
-
- // Otherwise, we have nothing to do.
- } else {
- continue;
- }
- } else {
- continue;
- }
+ for (const auto &DestroyedCapture : DestroyedCaptures) {
+ const BlockDecl::Capture &CI = DestroyedCapture.CI;
+ const CGBlockInfo::Capture &capture = DestroyedCapture.Capture;
+ BlockFieldFlags flags = DestroyedCapture.Flags;
Address srcField =
Builder.CreateStructGEP(src, capture.getIndex(), capture.getOffset());
- // If there's an explicit copy expression, we do that.
- if (dtor) {
- PushDestructorCleanup(dtor, srcField);
+ // If the captured record has a destructor then call it.
+ if (DestroyedCapture.Kind == BlockCaptureEntityKind::CXXRecord) {
+ const auto *Dtor =
+ CI.getVariable()->getType()->getAsCXXRecordDecl()->getDestructor();
+ PushDestructorCleanup(Dtor, srcField);
- // If this is a __weak capture, emit the release directly.
- } else if (useARCWeakDestroy) {
+ // If this is a __weak capture, emit the release directly.
+ } else if (DestroyedCapture.Kind == BlockCaptureEntityKind::ARCWeak) {
EmitARCDestroyWeak(srcField);
// Destroy strong objects with a call if requested.
- } else if (useARCStrongDestroy) {
+ } else if (DestroyedCapture.Kind == BlockCaptureEntityKind::ARCStrong) {
EmitARCDestroyStrong(srcField, ARCImpreciseLifetime);
// Otherwise we call _Block_object_dispose. It wouldn't be too
// hard to just emit this as a cleanup if we wanted to make sure
// that things were done in reverse.
} else {
+ assert(DestroyedCapture.Kind == BlockCaptureEntityKind::BlockObject);
llvm::Value *value = Builder.CreateLoad(srcField);
value = Builder.CreateBitCast(value, VoidPtrTy);
BuildBlockRelease(value, flags);
diff --git a/lib/CodeGen/CGBuiltin.cpp b/lib/CodeGen/CGBuiltin.cpp
index b3d02f1f51c6..6ea0a325a429 100644
--- a/lib/CodeGen/CGBuiltin.cpp
+++ b/lib/CodeGen/CGBuiltin.cpp
@@ -420,10 +420,11 @@ getDefaultBuiltinObjectSizeResult(unsigned Type, llvm::IntegerType *ResType) {
llvm::Value *
CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType) {
+ llvm::IntegerType *ResType,
+ llvm::Value *EmittedE) {
uint64_t ObjectSize;
if (!E->tryEvaluateObjectSize(ObjectSize, getContext(), Type))
- return emitBuiltinObjectSize(E, Type, ResType);
+ return emitBuiltinObjectSize(E, Type, ResType, EmittedE);
return ConstantInt::get(ResType, ObjectSize, /*isSigned=*/true);
}
@@ -432,9 +433,14 @@ CodeGenFunction::evaluateOrEmitBuiltinObjectSize(const Expr *E, unsigned Type,
/// - A llvm::Argument (if E is a param with the pass_object_size attribute on
/// it)
/// - A call to the @llvm.objectsize intrinsic
+///
+/// EmittedE is the result of emitting `E` as a scalar expr. If it's non-null
+/// and we wouldn't otherwise try to reference a pass_object_size parameter,
+/// we'll call @llvm.objectsize on EmittedE, rather than emitting E.
llvm::Value *
CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
- llvm::IntegerType *ResType) {
+ llvm::IntegerType *ResType,
+ llvm::Value *EmittedE) {
// We need to reference an argument if the pointer is a parameter with the
// pass_object_size attribute.
if (auto *D = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts())) {
@@ -457,16 +463,20 @@ CodeGenFunction::emitBuiltinObjectSize(const Expr *E, unsigned Type,
// LLVM can't handle Type=3 appropriately, and __builtin_object_size shouldn't
// evaluate E for side-effects. In either case, we shouldn't lower to
// @llvm.objectsize.
- if (Type == 3 || E->HasSideEffects(getContext()))
+ if (Type == 3 || (!EmittedE && E->HasSideEffects(getContext())))
return getDefaultBuiltinObjectSizeResult(Type, ResType);
- // LLVM only supports 0 and 2, make sure that we pass along that
- // as a boolean.
- auto *CI = ConstantInt::get(Builder.getInt1Ty(), (Type & 2) >> 1);
- // FIXME: Get right address space.
- llvm::Type *Tys[] = {ResType, Builder.getInt8PtrTy(0)};
- Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
- return Builder.CreateCall(F, {EmitScalarExpr(E), CI});
+ Value *Ptr = EmittedE ? EmittedE : EmitScalarExpr(E);
+ assert(Ptr->getType()->isPointerTy() &&
+ "Non-pointer passed to __builtin_object_size?");
+
+ Value *F = CGM.getIntrinsic(Intrinsic::objectsize, {ResType, Ptr->getType()});
+
+ // LLVM only supports 0 and 2, make sure that we pass along that as a boolean.
+ Value *Min = Builder.getInt1((Type & 2) != 0);
+ // For GCC compatability, __builtin_object_size treat NULL as unknown size.
+ Value *NullIsUnknown = Builder.getTrue();
+ return Builder.CreateCall(F, {Ptr, Min, NullIsUnknown});
}
// Many of MSVC builtins are on both x64 and ARM; to avoid repeating code, we
@@ -482,10 +492,12 @@ enum class CodeGenFunction::MSVCIntrin {
_InterlockedIncrement,
_InterlockedOr,
_InterlockedXor,
+ _interlockedbittestandset,
+ __fastfail,
};
Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
- const CallExpr *E) {
+ const CallExpr *E) {
switch (BuiltinID) {
case MSVCIntrin::_BitScanForward:
case MSVCIntrin::_BitScanReverse: {
@@ -548,6 +560,22 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
case MSVCIntrin::_InterlockedXor:
return MakeBinaryAtomicValue(*this, AtomicRMWInst::Xor, E);
+ case MSVCIntrin::_interlockedbittestandset: {
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Bit = EmitScalarExpr(E->getArg(1));
+ AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
+ AtomicRMWInst::Or, Addr,
+ Builder.CreateShl(ConstantInt::get(Bit->getType(), 1), Bit),
+ llvm::AtomicOrdering::SequentiallyConsistent);
+ // Shift the relevant bit to the least significant position, truncate to
+ // the result type, and test the low bit.
+ llvm::Value *Shifted = Builder.CreateLShr(RMWI, Bit);
+ llvm::Value *Truncated =
+ Builder.CreateTrunc(Shifted, ConvertType(E->getType()));
+ return Builder.CreateAnd(Truncated,
+ ConstantInt::get(Truncated->getType(), 1));
+ }
+
case MSVCIntrin::_InterlockedDecrement: {
llvm::Type *IntTy = ConvertType(E->getType());
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
@@ -566,6 +594,37 @@ Value *CodeGenFunction::EmitMSVCBuiltinExpr(MSVCIntrin BuiltinID,
llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.CreateAdd(RMWI, ConstantInt::get(IntTy, 1));
}
+
+ case MSVCIntrin::__fastfail: {
+ // Request immediate process termination from the kernel. The instruction
+ // sequences to do this are documented on MSDN:
+ // https://msdn.microsoft.com/en-us/library/dn774154.aspx
+ llvm::Triple::ArchType ISA = getTarget().getTriple().getArch();
+ StringRef Asm, Constraints;
+ switch (ISA) {
+ default:
+ ErrorUnsupported(E, "__fastfail call for this architecture");
+ break;
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ Asm = "int $$0x29";
+ Constraints = "{cx}";
+ break;
+ case llvm::Triple::thumb:
+ Asm = "udf #251";
+ Constraints = "{r0}";
+ break;
+ }
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, {Int32Ty}, false);
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, Asm, Constraints, /*SideEffects=*/true);
+ llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoReturn);
+ CallSite CS = Builder.CreateCall(IA, EmitScalarExpr(E->getArg(0)));
+ CS.setAttributes(NoReturnAttr);
+ return CS.getInstruction();
+ }
}
llvm_unreachable("Incorrect MSVC intrinsic!");
}
@@ -932,7 +991,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// We pass this builtin onto the optimizer so that it can figure out the
// object size in more complex cases.
- return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType));
+ return RValue::get(emitBuiltinObjectSize(E->getArg(0), Type, ResType,
+ /*EmittedE=*/nullptr));
}
case Builtin::BI__builtin_prefetch: {
Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
@@ -2195,16 +2255,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_InterlockedXor16:
case Builtin::BI_InterlockedXor:
return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::_InterlockedXor, E));
- case Builtin::BI__readfsdword: {
- llvm::Type *IntTy = ConvertType(E->getType());
- Value *IntToPtr =
- Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
- llvm::PointerType::get(IntTy, 257));
- LoadInst *Load = Builder.CreateAlignedLoad(
- IntTy, IntToPtr, getContext().getTypeAlignInChars(E->getType()));
- Load->setVolatile(true);
- return RValue::get(Load);
- }
+ case Builtin::BI_interlockedbittestandset:
+ return RValue::get(
+ EmitMSVCBuiltinExpr(MSVCIntrin::_interlockedbittestandset, E));
case Builtin::BI__exception_code:
case Builtin::BI_exception_code:
@@ -2218,9 +2271,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
case Builtin::BI_setjmpex: {
if (getTarget().getTriple().isOSMSVCRT()) {
llvm::Type *ArgTypes[] = {Int8PtrTy, Int8PtrTy};
- llvm::AttributeSet ReturnsTwiceAttr =
- AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::ReturnsTwice);
+ llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::ReturnsTwice);
llvm::Constant *SetJmpEx = CGM.CreateRuntimeFunction(
llvm::FunctionType::get(IntTy, ArgTypes, /*isVarArg=*/false),
"_setjmpex", ReturnsTwiceAttr, /*Local=*/true);
@@ -2238,9 +2291,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
}
case Builtin::BI_setjmp: {
if (getTarget().getTriple().isOSMSVCRT()) {
- llvm::AttributeSet ReturnsTwiceAttr =
- AttributeSet::get(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
- llvm::Attribute::ReturnsTwice);
+ llvm::AttributeList ReturnsTwiceAttr = llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::ReturnsTwice);
llvm::Value *Buf = Builder.CreateBitOrPointerCast(
EmitScalarExpr(E->getArg(0)), Int8PtrTy);
llvm::CallSite CS;
@@ -2276,6 +2329,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
break;
}
+ case Builtin::BI__fastfail:
+ return RValue::get(EmitMSVCBuiltinExpr(MSVCIntrin::__fastfail, E));
+
case Builtin::BI__builtin_coro_size: {
auto & Context = getContext();
auto SizeTy = Context.getSizeType();
@@ -2492,25 +2548,36 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
unsigned NumArgs = E->getNumArgs();
llvm::Type *QueueTy = ConvertType(getContext().OCLQueueTy);
- llvm::Type *RangeTy = ConvertType(getContext().OCLNDRangeTy);
+ llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ getContext().getTargetAddressSpace(LangAS::opencl_generic));
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
- llvm::Value *Range = EmitScalarExpr(E->getArg(2));
+ LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
+ llvm::Value *Range = NDRangeL.getAddress().getPointer();
+ llvm::Type *RangeTy = NDRangeL.getAddress().getType();
if (NumArgs == 4) {
// The most basic form of the call with parameters:
// queue_t, kernel_enqueue_flags_t, ndrange_t, block(void)
Name = "__enqueue_kernel_basic";
- llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, Int8PtrTy};
+ llvm::Type *ArgTys[] = {QueueTy, Int32Ty, RangeTy, GenericVoidPtrTy};
llvm::FunctionType *FTy = llvm::FunctionType::get(
Int32Ty, llvm::ArrayRef<llvm::Type *>(ArgTys, 4), false);
- llvm::Value *Block =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int8PtrTy);
+ llvm::Value *Block = Builder.CreatePointerCast(
+ EmitScalarExpr(E->getArg(3)), GenericVoidPtrTy);
- return RValue::get(Builder.CreateCall(
- CGM.CreateRuntimeFunction(FTy, Name), {Queue, Flags, Range, Block}));
+ AttrBuilder B;
+ B.addAttribute(Attribute::ByVal);
+ llvm::AttributeList ByValAttrSet =
+ llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
+
+ auto RTCall =
+ Builder.CreateCall(CGM.CreateRuntimeFunction(FTy, Name, ByValAttrSet),
+ {Queue, Flags, Range, Block});
+ RTCall->setAttributes(ByValAttrSet);
+ return RValue::get(RTCall);
}
assert(NumArgs >= 5 && "Invalid enqueue_kernel signature");
@@ -2518,14 +2585,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
if (E->getArg(3)->getType()->isBlockPointerType()) {
// No events passed, but has variadic arguments.
Name = "__enqueue_kernel_vaargs";
- llvm::Value *Block =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(3)), Int8PtrTy);
+ llvm::Value *Block = Builder.CreatePointerCast(
+ EmitScalarExpr(E->getArg(3)), GenericVoidPtrTy);
// Create a vector of the arguments, as well as a constant value to
// express to the runtime the number of variadic arguments.
std::vector<llvm::Value *> Args = {Queue, Flags, Range, Block,
ConstantInt::get(IntTy, NumArgs - 4)};
- std::vector<llvm::Type *> ArgTys = {QueueTy, IntTy, RangeTy, Int8PtrTy,
- IntTy};
+ std::vector<llvm::Type *> ArgTys = {QueueTy, IntTy, RangeTy,
+ GenericVoidPtrTy, IntTy};
// Each of the following arguments specifies the size of the corresponding
// argument passed to the enqueued block.
@@ -2555,12 +2622,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// Convert to generic address space.
EventList = Builder.CreatePointerCast(EventList, EventPtrTy);
ClkEvent = Builder.CreatePointerCast(ClkEvent, EventPtrTy);
- llvm::Value *Block =
- Builder.CreateBitCast(EmitScalarExpr(E->getArg(6)), Int8PtrTy);
+ llvm::Value *Block = Builder.CreatePointerCast(
+ EmitScalarExpr(E->getArg(6)), GenericVoidPtrTy);
- std::vector<llvm::Type *> ArgTys = {QueueTy, Int32Ty, RangeTy,
- Int32Ty, EventPtrTy, EventPtrTy,
- Int8PtrTy};
+ std::vector<llvm::Type *> ArgTys = {
+ QueueTy, Int32Ty, RangeTy, Int32Ty,
+ EventPtrTy, EventPtrTy, GenericVoidPtrTy};
std::vector<llvm::Value *> Args = {Queue, Flags, Range, NumEvents,
EventList, ClkEvent, Block};
@@ -2596,26 +2663,30 @@ RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
// OpenCL v2.0 s6.13.17.6 - Kernel query functions need bitcast of block
// parameter.
case Builtin::BIget_kernel_work_group_size: {
+ llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ getContext().getTargetAddressSpace(LangAS::opencl_generic));
Value *Arg = EmitScalarExpr(E->getArg(0));
- Arg = Builder.CreateBitCast(Arg, Int8PtrTy);
- return RValue::get(
- Builder.CreateCall(CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, Int8PtrTy, false),
- "__get_kernel_work_group_size_impl"),
- Arg));
+ Arg = Builder.CreatePointerCast(Arg, GenericVoidPtrTy);
+ return RValue::get(Builder.CreateCall(
+ CGM.CreateRuntimeFunction(
+ llvm::FunctionType::get(IntTy, GenericVoidPtrTy, false),
+ "__get_kernel_work_group_size_impl"),
+ Arg));
}
case Builtin::BIget_kernel_preferred_work_group_size_multiple: {
+ llvm::Type *GenericVoidPtrTy = Builder.getInt8PtrTy(
+ getContext().getTargetAddressSpace(LangAS::opencl_generic));
Value *Arg = EmitScalarExpr(E->getArg(0));
- Arg = Builder.CreateBitCast(Arg, Int8PtrTy);
+ Arg = Builder.CreatePointerCast(Arg, GenericVoidPtrTy);
return RValue::get(Builder.CreateCall(
CGM.CreateRuntimeFunction(
- llvm::FunctionType::get(IntTy, Int8PtrTy, false),
+ llvm::FunctionType::get(IntTy, GenericVoidPtrTy, false),
"__get_kernel_preferred_work_group_multiple_impl"),
Arg));
}
case Builtin::BIprintf:
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
- return EmitCUDADevicePrintfCallExpr(E, ReturnValue);
+ if (getTarget().getTriple().isNVPTX())
+ return EmitNVPTXDevicePrintfCallExpr(E, ReturnValue);
break;
case Builtin::BI__builtin_canonicalize:
case Builtin::BI__builtin_canonicalizef:
@@ -7115,6 +7186,13 @@ static Value *EmitX86MinMax(CodeGenFunction &CGF, ICmpInst::Predicate Pred,
return EmitX86Select(CGF, Ops[3], Res, Ops[2]);
}
+static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
+ llvm::Type *DstTy) {
+ unsigned NumberOfElements = DstTy->getVectorNumElements();
+ Value *Mask = getMaskVecValue(CGF, Op, NumberOfElements);
+ return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
+}
+
Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
if (BuiltinID == X86::BI__builtin_ms_va_start ||
@@ -7321,7 +7399,12 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_undef128:
case X86::BI__builtin_ia32_undef256:
case X86::BI__builtin_ia32_undef512:
- return UndefValue::get(ConvertType(E->getType()));
+ // The x86 definition of "undef" is not the same as the LLVM definition
+ // (PR32176). We leave optimizing away an unnecessary zero constant to the
+ // IR optimizer and backend.
+ // TODO: If we had a "freeze" IR instruction to generate a fixed undef
+ // value, we should use that here instead of a zero.
+ return llvm::Constant::getNullValue(ConvertType(E->getType()));
case X86::BI__builtin_ia32_vec_init_v8qi:
case X86::BI__builtin_ia32_vec_init_v4hi:
case X86::BI__builtin_ia32_vec_init_v2si:
@@ -7408,6 +7491,21 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
case X86::BI__builtin_ia32_storesd128_mask: {
return EmitX86MaskedStore(*this, Ops, 16);
}
+
+ case X86::BI__builtin_ia32_cvtmask2b128:
+ case X86::BI__builtin_ia32_cvtmask2b256:
+ case X86::BI__builtin_ia32_cvtmask2b512:
+ case X86::BI__builtin_ia32_cvtmask2w128:
+ case X86::BI__builtin_ia32_cvtmask2w256:
+ case X86::BI__builtin_ia32_cvtmask2w512:
+ case X86::BI__builtin_ia32_cvtmask2d128:
+ case X86::BI__builtin_ia32_cvtmask2d256:
+ case X86::BI__builtin_ia32_cvtmask2d512:
+ case X86::BI__builtin_ia32_cvtmask2q128:
+ case X86::BI__builtin_ia32_cvtmask2q256:
+ case X86::BI__builtin_ia32_cvtmask2q512:
+ return EmitX86SExtMask(*this, Ops[0], ConvertType(E->getType()));
+
case X86::BI__builtin_ia32_movdqa32store128_mask:
case X86::BI__builtin_ia32_movdqa64store128_mask:
case X86::BI__builtin_ia32_storeaps128_mask:
@@ -7922,6 +8020,45 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
// instruction, but it will create a memset that won't be optimized away.
return Builder.CreateMemSet(Ops[0], Ops[1], Ops[2], 1, true);
}
+ case X86::BI__ud2:
+ // llvm.trap makes a ud2a instruction on x86.
+ return EmitTrapCall(Intrinsic::trap);
+ case X86::BI__int2c: {
+ // This syscall signals a driver assertion failure in x86 NT kernels.
+ llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
+ llvm::InlineAsm *IA =
+ llvm::InlineAsm::get(FTy, "int $$0x2c", "", /*SideEffects=*/true);
+ llvm::AttributeList NoReturnAttr = llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::FunctionIndex,
+ llvm::Attribute::NoReturn);
+ CallSite CS = Builder.CreateCall(IA);
+ CS.setAttributes(NoReturnAttr);
+ return CS.getInstruction();
+ }
+ case X86::BI__readfsbyte:
+ case X86::BI__readfsword:
+ case X86::BI__readfsdword:
+ case X86::BI__readfsqword: {
+ llvm::Type *IntTy = ConvertType(E->getType());
+ Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
+ llvm::PointerType::get(IntTy, 257));
+ LoadInst *Load = Builder.CreateAlignedLoad(
+ IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
+ Load->setVolatile(true);
+ return Load;
+ }
+ case X86::BI__readgsbyte:
+ case X86::BI__readgsword:
+ case X86::BI__readgsdword:
+ case X86::BI__readgsqword: {
+ llvm::Type *IntTy = ConvertType(E->getType());
+ Value *Ptr = Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
+ llvm::PointerType::get(IntTy, 256));
+ LoadInst *Load = Builder.CreateAlignedLoad(
+ IntTy, Ptr, getContext().getTypeAlignInChars(E->getType()));
+ Load->setVolatile(true);
+ return Load;
+ }
}
}
@@ -8326,6 +8463,14 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_ds_swizzle:
return emitBinaryBuiltin(*this, E, Intrinsic::amdgcn_ds_swizzle);
+ case AMDGPU::BI__builtin_amdgcn_mov_dpp: {
+ llvm::SmallVector<llvm::Value *, 5> Args;
+ for (unsigned I = 0; I != 5; ++I)
+ Args.push_back(EmitScalarExpr(E->getArg(I)));
+ Value *F = CGM.getIntrinsic(Intrinsic::amdgcn_mov_dpp,
+ Args[0]->getType());
+ return Builder.CreateCall(F, Args);
+ }
case AMDGPU::BI__builtin_amdgcn_div_fixup:
case AMDGPU::BI__builtin_amdgcn_div_fixupf:
case AMDGPU::BI__builtin_amdgcn_div_fixuph:
@@ -8391,7 +8536,9 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_classf:
case AMDGPU::BI__builtin_amdgcn_classh:
return emitFPIntBuiltin(*this, E, Intrinsic::amdgcn_class);
-
+ case AMDGPU::BI__builtin_amdgcn_fmed3f:
+ case AMDGPU::BI__builtin_amdgcn_fmed3h:
+ return emitTernaryBuiltin(*this, E, Intrinsic::amdgcn_fmed3);
case AMDGPU::BI__builtin_amdgcn_read_exec: {
CallInst *CI = cast<CallInst>(
EmitSpecialRegisterBuiltin(*this, E, Int64Ty, Int64Ty, true, "exec"));
diff --git a/lib/CodeGen/CGCUDANV.cpp b/lib/CodeGen/CGCUDANV.cpp
index 83febcb4af8c..813cd7400186 100644
--- a/lib/CodeGen/CGCUDANV.cpp
+++ b/lib/CodeGen/CGCUDANV.cpp
@@ -15,7 +15,7 @@
#include "CGCUDARuntime.h"
#include "CodeGenFunction.h"
#include "CodeGenModule.h"
-#include "ConstantBuilder.h"
+#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/AST/Decl.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallSite.h"
diff --git a/lib/CodeGen/CGCXX.cpp b/lib/CodeGen/CGCXX.cpp
index 59010f4407c2..0f3141ab76d0 100644
--- a/lib/CodeGen/CGCXX.cpp
+++ b/lib/CodeGen/CGCXX.cpp
@@ -256,7 +256,7 @@ llvm::Constant *CodeGenModule::getAddrOfCXXStructor(
return GetOrCreateLLVMFunction(
getMangledName(GD), FnType, GD, /*ForVTable=*/false, DontDefer,
- /*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeSet(), IsForDefinition);
+ /*isThunk=*/false, /*ExtraAttrs=*/llvm::AttributeList(), IsForDefinition);
}
static CGCallee BuildAppleKextVirtualCall(CodeGenFunction &CGF,
diff --git a/lib/CodeGen/CGCXXABI.h b/lib/CodeGen/CGCXXABI.h
index d53fd4cb63b2..7b912e3aca57 100644
--- a/lib/CodeGen/CGCXXABI.h
+++ b/lib/CodeGen/CGCXXABI.h
@@ -291,11 +291,26 @@ public:
/// Emit constructor variants required by this ABI.
virtual void EmitCXXConstructors(const CXXConstructorDecl *D) = 0;
+ /// Notes how many arguments were added to the beginning (Prefix) and ending
+ /// (Suffix) of an arg list.
+ ///
+ /// Note that Prefix actually refers to the number of args *after* the first
+ /// one: `this` arguments always come first.
+ struct AddedStructorArgs {
+ unsigned Prefix = 0;
+ unsigned Suffix = 0;
+ AddedStructorArgs() = default;
+ AddedStructorArgs(unsigned P, unsigned S) : Prefix(P), Suffix(S) {}
+ static AddedStructorArgs prefix(unsigned N) { return {N, 0}; }
+ static AddedStructorArgs suffix(unsigned N) { return {0, N}; }
+ };
+
/// Build the signature of the given constructor or destructor variant by
/// adding any required parameters. For convenience, ArgTys has been
/// initialized with the type of 'this'.
- virtual void buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
- SmallVectorImpl<CanQualType> &ArgTys) = 0;
+ virtual AddedStructorArgs
+ buildStructorSignature(const CXXMethodDecl *MD, StructorType T,
+ SmallVectorImpl<CanQualType> &ArgTys) = 0;
/// Returns true if the given destructor type should be emitted as a linkonce
/// delegating thunk, regardless of whether the dtor is defined in this TU or
@@ -355,9 +370,9 @@ public:
/// Add any ABI-specific implicit arguments needed to call a constructor.
///
- /// \return The number of args added to the call, which is typically zero or
- /// one.
- virtual unsigned
+ /// \return The number of arguments added at the beginning and end of the
+ /// call, which is typically zero or one.
+ virtual AddedStructorArgs
addImplicitConstructorArgs(CodeGenFunction &CGF, const CXXConstructorDecl *D,
CXXCtorType Type, bool ForVirtualBase,
bool Delegating, CallArgList &Args) = 0;
@@ -377,7 +392,7 @@ public:
isVirtualOffsetNeededForVTableField(CodeGenFunction &CGF,
CodeGenFunction::VPtr Vptr) = 0;
- /// Checks if ABI requires to initilize vptrs for given dynamic class.
+ /// Checks if ABI requires to initialize vptrs for given dynamic class.
virtual bool doStructorsInitializeVPtrs(const CXXRecordDecl *VTableClass) = 0;
/// Get the address point of the vtable for the given base subobject.
diff --git a/lib/CodeGen/CGCall.cpp b/lib/CodeGen/CGCall.cpp
index c7c61e0c8ecb..8af32055fc4c 100644
--- a/lib/CodeGen/CGCall.cpp
+++ b/lib/CodeGen/CGCall.cpp
@@ -101,39 +101,64 @@ CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
FTNP->getExtInfo(), {}, RequiredArgs(0));
}
+static void addExtParameterInfosForCall(
+ llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
+ const FunctionProtoType *proto,
+ unsigned prefixArgs,
+ unsigned totalArgs) {
+ assert(proto->hasExtParameterInfos());
+ assert(paramInfos.size() <= prefixArgs);
+ assert(proto->getNumParams() + prefixArgs <= totalArgs);
+
+ paramInfos.reserve(totalArgs);
+
+ // Add default infos for any prefix args that don't already have infos.
+ paramInfos.resize(prefixArgs);
+
+ // Add infos for the prototype.
+ for (const auto &ParamInfo : proto->getExtParameterInfos()) {
+ paramInfos.push_back(ParamInfo);
+ // pass_object_size params have no parameter info.
+ if (ParamInfo.hasPassObjectSize())
+ paramInfos.emplace_back();
+ }
+
+ assert(paramInfos.size() <= totalArgs &&
+ "Did we forget to insert pass_object_size args?");
+ // Add default infos for the variadic and/or suffix arguments.
+ paramInfos.resize(totalArgs);
+}
+
/// Adds the formal paramaters in FPT to the given prefix. If any parameter in
/// FPT has pass_object_size attrs, then we'll add parameters for those, too.
static void appendParameterTypes(const CodeGenTypes &CGT,
SmallVectorImpl<CanQualType> &prefix,
SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
- CanQual<FunctionProtoType> FPT,
- const FunctionDecl *FD) {
- // Fill out paramInfos.
- if (FPT->hasExtParameterInfos() || !paramInfos.empty()) {
- assert(paramInfos.size() <= prefix.size());
- auto protoParamInfos = FPT->getExtParameterInfos();
- paramInfos.reserve(prefix.size() + protoParamInfos.size());
- paramInfos.resize(prefix.size());
- paramInfos.append(protoParamInfos.begin(), protoParamInfos.end());
- }
-
- // Fast path: unknown target.
- if (FD == nullptr) {
+ CanQual<FunctionProtoType> FPT) {
+ // Fast path: don't touch param info if we don't need to.
+ if (!FPT->hasExtParameterInfos()) {
+ assert(paramInfos.empty() &&
+ "We have paramInfos, but the prototype doesn't?");
prefix.append(FPT->param_type_begin(), FPT->param_type_end());
return;
}
- // In the vast majority cases, we'll have precisely FPT->getNumParams()
+ unsigned PrefixSize = prefix.size();
+ // In the vast majority of cases, we'll have precisely FPT->getNumParams()
// parameters; the only thing that can change this is the presence of
// pass_object_size. So, we preallocate for the common case.
prefix.reserve(prefix.size() + FPT->getNumParams());
- assert(FD->getNumParams() == FPT->getNumParams());
+ auto ExtInfos = FPT->getExtParameterInfos();
+ assert(ExtInfos.size() == FPT->getNumParams());
for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
prefix.push_back(FPT->getParamType(I));
- if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
+ if (ExtInfos[I].hasPassObjectSize())
prefix.push_back(CGT.getContext().getSizeType());
}
+
+ addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
+ prefix.size());
}
/// Arrange the LLVM function layout for a value of the given function
@@ -147,7 +172,7 @@ arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
RequiredArgs Required =
RequiredArgs::forPrototypePlus(FTP, prefix.size(), FD);
// FIXME: Kill copy.
- appendParameterTypes(CGT, prefix, paramInfos, FTP, FD);
+ appendParameterTypes(CGT, prefix, paramInfos, FTP);
CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
@@ -286,9 +311,19 @@ CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
// Add the formal parameters.
if (PassParams)
- appendParameterTypes(*this, argTypes, paramInfos, FTP, MD);
-
- TheCXXABI.buildStructorSignature(MD, Type, argTypes);
+ appendParameterTypes(*this, argTypes, paramInfos, FTP);
+
+ CGCXXABI::AddedStructorArgs AddedArgs =
+ TheCXXABI.buildStructorSignature(MD, Type, argTypes);
+ if (!paramInfos.empty()) {
+ // Note: prefix implies after the first param.
+ if (AddedArgs.Prefix)
+ paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
+ FunctionProtoType::ExtParameterInfo{});
+ if (AddedArgs.Suffix)
+ paramInfos.append(AddedArgs.Suffix,
+ FunctionProtoType::ExtParameterInfo{});
+ }
RequiredArgs required =
(PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
@@ -321,26 +356,6 @@ getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
return argTypes;
}
-static void addExtParameterInfosForCall(
- llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> &paramInfos,
- const FunctionProtoType *proto,
- unsigned prefixArgs,
- unsigned totalArgs) {
- assert(proto->hasExtParameterInfos());
- assert(paramInfos.size() <= prefixArgs);
- assert(proto->getNumParams() + prefixArgs <= totalArgs);
-
- // Add default infos for any prefix args that don't already have infos.
- paramInfos.resize(prefixArgs);
-
- // Add infos for the prototype.
- auto protoInfos = proto->getExtParameterInfos();
- paramInfos.append(protoInfos.begin(), protoInfos.end());
-
- // Add default infos for the variadic arguments.
- paramInfos.resize(totalArgs);
-}
-
static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
getExtParameterInfosForCall(const FunctionProtoType *proto,
unsigned prefixArgs, unsigned totalArgs) {
@@ -352,18 +367,31 @@ getExtParameterInfosForCall(const FunctionProtoType *proto,
}
/// Arrange a call to a C++ method, passing the given arguments.
+///
+/// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
+/// parameter.
+/// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
+/// args.
+/// PassProtoArgs indicates whether `args` has args for the parameters in the
+/// given CXXConstructorDecl.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
const CXXConstructorDecl *D,
CXXCtorType CtorKind,
- unsigned ExtraArgs) {
+ unsigned ExtraPrefixArgs,
+ unsigned ExtraSuffixArgs,
+ bool PassProtoArgs) {
// FIXME: Kill copy.
SmallVector<CanQualType, 16> ArgTypes;
for (const auto &Arg : args)
ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
+ // +1 for implicit this, which should always be args[0].
+ unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
+
CanQual<FunctionProtoType> FPT = GetFormalType(D);
- RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs, D);
+ RequiredArgs Required =
+ RequiredArgs::forPrototypePlus(FPT, TotalPrefixArgs + ExtraSuffixArgs, D);
GlobalDecl GD(D, CtorKind);
CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
? ArgTypes.front()
@@ -372,8 +400,14 @@ CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
: Context.VoidTy;
FunctionType::ExtInfo Info = FPT->getExtInfo();
- auto ParamInfos = getExtParameterInfosForCall(FPT.getTypePtr(), 1 + ExtraArgs,
- ArgTypes.size());
+ llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
+ // If the prototype args are elided, we should only have ABI-specific args,
+ // which never have param info.
+ if (PassProtoArgs && FPT->hasExtParameterInfos()) {
+ // ABI-specific suffix arguments are treated the same as variadic arguments.
+ addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
+ ArgTypes.size());
+ }
return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
/*chainCall=*/false, ArgTypes, Info,
ParamInfos, Required);
@@ -617,15 +651,20 @@ CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
}
/// Arrange a call to a C++ method, passing the given arguments.
+///
+/// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
+/// does not count `this`.
const CGFunctionInfo &
CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
const FunctionProtoType *proto,
- RequiredArgs required) {
- unsigned numRequiredArgs =
- (proto->isVariadic() ? required.getNumRequiredArgs() : args.size());
- unsigned numPrefixArgs = numRequiredArgs - proto->getNumParams();
+ RequiredArgs required,
+ unsigned numPrefixArgs) {
+ assert(numPrefixArgs + 1 <= args.size() &&
+ "Emitting a call with less args than the required prefix?");
+ // Add one to account for `this`. It's a bit awkward here, but we don't count
+ // `this` in similar places elsewhere.
auto paramInfos =
- getExtParameterInfosForCall(proto, numPrefixArgs, args.size());
+ getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
// FIXME: Kill copy.
auto argTypes = getArgTypesForCall(Context, args);
@@ -680,7 +719,7 @@ CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
RequiredArgs required) {
assert(std::all_of(argTypes.begin(), argTypes.end(),
- std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
+ [](CanQualType T) { return T.isCanonicalAsParam(); }));
// Lookup or create unique function info.
llvm::FoldingSetNodeID ID;
@@ -1620,15 +1659,113 @@ static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
}
+void CodeGenModule::ConstructDefaultFnAttrList(StringRef Name, bool HasOptnone,
+ bool AttrOnCallSite,
+ llvm::AttrBuilder &FuncAttrs) {
+ // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
+ if (!HasOptnone) {
+ if (CodeGenOpts.OptimizeSize)
+ FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
+ if (CodeGenOpts.OptimizeSize == 2)
+ FuncAttrs.addAttribute(llvm::Attribute::MinSize);
+ }
+
+ if (CodeGenOpts.DisableRedZone)
+ FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
+ if (CodeGenOpts.NoImplicitFloat)
+ FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
+
+ if (AttrOnCallSite) {
+ // Attributes that should go on the call site only.
+ if (!CodeGenOpts.SimplifyLibCalls ||
+ CodeGenOpts.isNoBuiltinFunc(Name.data()))
+ FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
+ if (!CodeGenOpts.TrapFuncName.empty())
+ FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
+ } else {
+ // Attributes that should go on the function, but not the call site.
+ if (!CodeGenOpts.DisableFPElim) {
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
+ } else if (CodeGenOpts.OmitLeafFramePointer) {
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
+ } else {
+ FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
+ FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
+ }
+
+ FuncAttrs.addAttribute("less-precise-fpmad",
+ llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
+
+ if (!CodeGenOpts.FPDenormalMode.empty())
+ FuncAttrs.addAttribute("denormal-fp-math", CodeGenOpts.FPDenormalMode);
+
+ FuncAttrs.addAttribute("no-trapping-math",
+ llvm::toStringRef(CodeGenOpts.NoTrappingMath));
+
+ // TODO: Are these all needed?
+ // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
+ FuncAttrs.addAttribute("no-infs-fp-math",
+ llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
+ FuncAttrs.addAttribute("no-nans-fp-math",
+ llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
+ FuncAttrs.addAttribute("unsafe-fp-math",
+ llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
+ FuncAttrs.addAttribute("use-soft-float",
+ llvm::toStringRef(CodeGenOpts.SoftFloat));
+ FuncAttrs.addAttribute("stack-protector-buffer-size",
+ llvm::utostr(CodeGenOpts.SSPBufferSize));
+ FuncAttrs.addAttribute("no-signed-zeros-fp-math",
+ llvm::toStringRef(CodeGenOpts.NoSignedZeros));
+ FuncAttrs.addAttribute(
+ "correctly-rounded-divide-sqrt-fp-math",
+ llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
+
+ // TODO: Reciprocal estimate codegen options should apply to instructions?
+ std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
+ if (!Recips.empty())
+ FuncAttrs.addAttribute("reciprocal-estimates",
+ llvm::join(Recips.begin(), Recips.end(), ","));
+
+ if (CodeGenOpts.StackRealignment)
+ FuncAttrs.addAttribute("stackrealign");
+ if (CodeGenOpts.Backchain)
+ FuncAttrs.addAttribute("backchain");
+ }
+
+ if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
+ // Conservatively, mark all functions and calls in CUDA as convergent
+ // (meaning, they may call an intrinsically convergent op, such as
+ // __syncthreads(), and so can't have certain optimizations applied around
+ // them). LLVM will remove this attribute where it safely can.
+ FuncAttrs.addAttribute(llvm::Attribute::Convergent);
+
+ // Exceptions aren't supported in CUDA device code.
+ FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
+
+ // Respect -fcuda-flush-denormals-to-zero.
+ if (getLangOpts().CUDADeviceFlushDenormalsToZero)
+ FuncAttrs.addAttribute("nvptx-f32ftz", "true");
+ }
+}
+
+void CodeGenModule::AddDefaultFnAttrs(llvm::Function &F) {
+ llvm::AttrBuilder FuncAttrs;
+ ConstructDefaultFnAttrList(F.getName(),
+ F.hasFnAttribute(llvm::Attribute::OptimizeNone),
+ /* AttrOnCallsite = */ false, FuncAttrs);
+ llvm::AttributeList AS = llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs);
+ F.addAttributes(llvm::AttributeList::FunctionIndex, AS);
+}
+
void CodeGenModule::ConstructAttributeList(
StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
llvm::AttrBuilder FuncAttrs;
llvm::AttrBuilder RetAttrs;
- bool HasOptnone = false;
CallingConv = FI.getEffectiveCallingConvention();
-
if (FI.isNoReturn())
FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
@@ -1639,7 +1776,7 @@ void CodeGenModule::ConstructAttributeList(
const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
- bool HasAnyX86InterruptAttr = false;
+ bool HasOptnone = false;
// FIXME: handle sseregparm someday...
if (TargetDecl) {
if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
@@ -1679,7 +1816,6 @@ void CodeGenModule::ConstructAttributeList(
if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
RetAttrs.addAttribute(llvm::Attribute::NonNull);
- HasAnyX86InterruptAttr = TargetDecl->hasAttr<AnyX86InterruptAttr>();
HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
Optional<unsigned> NumElemsParam;
@@ -1691,86 +1827,19 @@ void CodeGenModule::ConstructAttributeList(
}
}
- // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
- if (!HasOptnone) {
- if (CodeGenOpts.OptimizeSize)
- FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
- if (CodeGenOpts.OptimizeSize == 2)
- FuncAttrs.addAttribute(llvm::Attribute::MinSize);
- }
+ ConstructDefaultFnAttrList(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
- if (CodeGenOpts.DisableRedZone)
- FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
- if (CodeGenOpts.NoImplicitFloat)
- FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
if (CodeGenOpts.EnableSegmentedStacks &&
!(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
FuncAttrs.addAttribute("split-stack");
- if (AttrOnCallSite) {
- // Attributes that should go on the call site only.
- if (!CodeGenOpts.SimplifyLibCalls ||
- CodeGenOpts.isNoBuiltinFunc(Name.data()))
- FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
- if (!CodeGenOpts.TrapFuncName.empty())
- FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
- } else {
- // Attributes that should go on the function, but not the call site.
- if (!CodeGenOpts.DisableFPElim) {
- FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
- } else if (CodeGenOpts.OmitLeafFramePointer) {
- FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
- } else {
- FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
- FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
- }
-
+ if (!AttrOnCallSite) {
bool DisableTailCalls =
- CodeGenOpts.DisableTailCalls || HasAnyX86InterruptAttr ||
- (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
- FuncAttrs.addAttribute(
- "disable-tail-calls",
- llvm::toStringRef(DisableTailCalls));
-
- FuncAttrs.addAttribute("less-precise-fpmad",
- llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
-
- if (!CodeGenOpts.FPDenormalMode.empty())
- FuncAttrs.addAttribute("denormal-fp-math",
- CodeGenOpts.FPDenormalMode);
-
- FuncAttrs.addAttribute("no-trapping-math",
- llvm::toStringRef(CodeGenOpts.NoTrappingMath));
-
- // TODO: Are these all needed?
- // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
- FuncAttrs.addAttribute("no-infs-fp-math",
- llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
- FuncAttrs.addAttribute("no-nans-fp-math",
- llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
- FuncAttrs.addAttribute("unsafe-fp-math",
- llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
- FuncAttrs.addAttribute("use-soft-float",
- llvm::toStringRef(CodeGenOpts.SoftFloat));
- FuncAttrs.addAttribute("stack-protector-buffer-size",
- llvm::utostr(CodeGenOpts.SSPBufferSize));
- FuncAttrs.addAttribute("no-signed-zeros-fp-math",
- llvm::toStringRef(CodeGenOpts.NoSignedZeros));
- FuncAttrs.addAttribute(
- "correctly-rounded-divide-sqrt-fp-math",
- llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
-
- // TODO: Reciprocal estimate codegen options should apply to instructions?
- std::vector<std::string> &Recips = getTarget().getTargetOpts().Reciprocals;
- if (!Recips.empty())
- FuncAttrs.addAttribute("reciprocal-estimates",
- llvm::join(Recips.begin(), Recips.end(), ","));
-
- if (CodeGenOpts.StackRealignment)
- FuncAttrs.addAttribute("stackrealign");
- if (CodeGenOpts.Backchain)
- FuncAttrs.addAttribute("backchain");
+ CodeGenOpts.DisableTailCalls ||
+ (TargetDecl && (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
+ TargetDecl->hasAttr<AnyX86InterruptAttr>()));
+ FuncAttrs.addAttribute("disable-tail-calls",
+ llvm::toStringRef(DisableTailCalls));
// Add target-cpu and target-features attributes to functions. If
// we have a decl for the function and it has a target attribute then
@@ -1819,21 +1888,6 @@ void CodeGenModule::ConstructAttributeList(
}
}
- if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
- // Conservatively, mark all functions and calls in CUDA as convergent
- // (meaning, they may call an intrinsically convergent op, such as
- // __syncthreads(), and so can't have certain optimizations applied around
- // them). LLVM will remove this attribute where it safely can.
- FuncAttrs.addAttribute(llvm::Attribute::Convergent);
-
- // Exceptions aren't supported in CUDA device code.
- FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
-
- // Respect -fcuda-flush-denormals-to-zero.
- if (getLangOpts().CUDADeviceFlushDenormalsToZero)
- FuncAttrs.addAttribute("nvptx-f32ftz", "true");
- }
-
ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
QualType RetTy = FI.getReturnType();
@@ -1878,8 +1932,8 @@ void CodeGenModule::ConstructAttributeList(
// Attach return attributes.
if (RetAttrs.hasAttributes()) {
- PAL.push_back(llvm::AttributeSet::get(
- getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
+ PAL.push_back(llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::ReturnIndex, RetAttrs));
}
bool hasUsedSRet = false;
@@ -1891,7 +1945,7 @@ void CodeGenModule::ConstructAttributeList(
hasUsedSRet = true;
if (RetAI.getInReg())
SRETAttrs.addAttribute(llvm::Attribute::InReg);
- PAL.push_back(llvm::AttributeSet::get(
+ PAL.push_back(llvm::AttributeList::get(
getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
}
@@ -1899,7 +1953,7 @@ void CodeGenModule::ConstructAttributeList(
if (IRFunctionArgs.hasInallocaArg()) {
llvm::AttrBuilder Attrs;
Attrs.addAttribute(llvm::Attribute::InAlloca);
- PAL.push_back(llvm::AttributeSet::get(
+ PAL.push_back(llvm::AttributeList::get(
getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
}
@@ -1914,7 +1968,7 @@ void CodeGenModule::ConstructAttributeList(
// Add attribute for padding argument, if necessary.
if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
if (AI.getPaddingInReg())
- PAL.push_back(llvm::AttributeSet::get(
+ PAL.push_back(llvm::AttributeList::get(
getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
llvm::Attribute::InReg));
}
@@ -2031,17 +2085,15 @@ void CodeGenModule::ConstructAttributeList(
unsigned FirstIRArg, NumIRArgs;
std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
for (unsigned i = 0; i < NumIRArgs; i++)
- PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
- FirstIRArg + i + 1, Attrs));
+ PAL.push_back(llvm::AttributeList::get(getLLVMContext(),
+ FirstIRArg + i + 1, Attrs));
}
}
assert(ArgNo == FI.arg_size());
if (FuncAttrs.hasAttributes())
- PAL.push_back(llvm::
- AttributeSet::get(getLLVMContext(),
- llvm::AttributeSet::FunctionIndex,
- FuncAttrs));
+ PAL.push_back(llvm::AttributeList::get(
+ getLLVMContext(), llvm::AttributeList::FunctionIndex, FuncAttrs));
}
/// An argument came in as a promoted argument; demote it back to its
@@ -2152,8 +2204,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (IRFunctionArgs.hasSRetArg()) {
auto AI = cast<llvm::Argument>(FnArgs[IRFunctionArgs.getSRetArgNo()]);
AI->setName("agg.result");
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
- llvm::Attribute::NoAlias));
+ AI->addAttr(llvm::AttributeList::get(getLLVMContext(), AI->getArgNo() + 1,
+ llvm::Attribute::NoAlias));
}
// Track if we received the parameter as a pointer (indirect, byval, or
@@ -2244,9 +2296,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
PVD->getFunctionScopeIndex()))
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NonNull));
+ AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NonNull));
QualType OTy = PVD->getOriginalType();
if (const auto *ArrTy =
@@ -2263,12 +2315,12 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::AttrBuilder Attrs;
Attrs.addDereferenceableAttr(
getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
- AI->getArgNo() + 1, Attrs));
+ AI->addAttr(llvm::AttributeList::get(
+ getLLVMContext(), AI->getArgNo() + 1, Attrs));
} else if (getContext().getTargetAddressSpace(ETy) == 0) {
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NonNull));
+ AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NonNull));
}
}
} else if (const auto *ArrTy =
@@ -2278,9 +2330,9 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// we know that it must be nonnull.
if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
!getContext().getTargetAddressSpace(ArrTy->getElementType()))
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NonNull));
+ AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
+ AI->getArgNo() + 1,
+ llvm::Attribute::NonNull));
}
const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
@@ -2298,15 +2350,14 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
llvm::AttrBuilder Attrs;
Attrs.addAlignmentAttr(Alignment);
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
- AI->getArgNo() + 1, Attrs));
+ AI->addAttr(llvm::AttributeList::get(getLLVMContext(),
+ AI->getArgNo() + 1, Attrs));
}
}
if (Arg->getType().isRestrictQualified())
- AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
- AI->getArgNo() + 1,
- llvm::Attribute::NoAlias));
+ AI->addAttr(llvm::AttributeList::get(
+ getLLVMContext(), AI->getArgNo() + 1, llvm::Attribute::NoAlias));
// LLVM expects swifterror parameters to be used in very restricted
// ways. Copy the value into a less-restricted temporary.
@@ -2858,19 +2909,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
llvm::Instruction *Ret;
if (RV) {
- if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
- if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
- SanitizerScope SanScope(this);
- llvm::Value *Cond = Builder.CreateICmpNE(
- RV, llvm::Constant::getNullValue(RV->getType()));
- llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(EndLoc),
- EmitCheckSourceLocation(RetNNAttr->getLocation()),
- };
- EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
- SanitizerHandler::NonnullReturn, StaticData, None);
- }
- }
+ EmitReturnValueCheck(RV, EndLoc);
Ret = Builder.CreateRet(RV);
} else {
Ret = Builder.CreateRetVoid();
@@ -2880,6 +2919,63 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
Ret->setDebugLoc(std::move(RetDbgLoc));
}
+void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV,
+ SourceLocation EndLoc) {
+ // A current decl may not be available when emitting vtable thunks.
+ if (!CurCodeDecl)
+ return;
+
+ ReturnsNonNullAttr *RetNNAttr = nullptr;
+ if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
+ RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
+
+ if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
+ return;
+
+ // Prefer the returns_nonnull attribute if it's present.
+ SourceLocation AttrLoc;
+ SanitizerMask CheckKind;
+ SanitizerHandler Handler;
+ if (RetNNAttr) {
+ assert(!requiresReturnValueNullabilityCheck() &&
+ "Cannot check nullability and the nonnull attribute");
+ AttrLoc = RetNNAttr->getLocation();
+ CheckKind = SanitizerKind::ReturnsNonnullAttribute;
+ Handler = SanitizerHandler::NonnullReturn;
+ } else {
+ if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
+ if (auto *TSI = DD->getTypeSourceInfo())
+ if (auto FTL = TSI->getTypeLoc().castAs<FunctionTypeLoc>())
+ AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
+ CheckKind = SanitizerKind::NullabilityReturn;
+ Handler = SanitizerHandler::NullabilityReturn;
+ }
+
+ SanitizerScope SanScope(this);
+
+ llvm::BasicBlock *Check = nullptr;
+ llvm::BasicBlock *NoCheck = nullptr;
+ if (requiresReturnValueNullabilityCheck()) {
+ // Before doing the nullability check, make sure that the preconditions for
+ // the check are met.
+ Check = createBasicBlock("nullcheck");
+ NoCheck = createBasicBlock("no.nullcheck");
+ Builder.CreateCondBr(RetValNullabilityPrecondition, Check, NoCheck);
+ EmitBlock(Check);
+ }
+
+ // Now do the null check. If the returns_nonnull attribute is present, this
+ // is done unconditionally.
+ llvm::Value *Cond = Builder.CreateIsNotNull(RV);
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(EndLoc), EmitCheckSourceLocation(AttrLoc),
+ };
+ EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
+
+ if (requiresReturnValueNullabilityCheck())
+ EmitBlock(NoCheck);
+}
+
static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
@@ -3188,50 +3284,63 @@ void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
SourceLocation ArgLoc,
- const FunctionDecl *FD,
+ AbstractCallee AC,
unsigned ParmNum) {
- if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
+ if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
+ SanOpts.has(SanitizerKind::NullabilityArg)))
return;
- auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
+
+ // The param decl may be missing in a variadic function.
+ auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
- auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
- if (!NNAttr)
+
+ // Prefer the nonnull attribute if it's present.
+ const NonNullAttr *NNAttr = nullptr;
+ if (SanOpts.has(SanitizerKind::NonnullAttribute))
+ NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
+
+ bool CanCheckNullability = false;
+ if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
+ auto Nullability = PVD->getType()->getNullability(getContext());
+ CanCheckNullability = Nullability &&
+ *Nullability == NullabilityKind::NonNull &&
+ PVD->getTypeSourceInfo();
+ }
+
+ if (!NNAttr && !CanCheckNullability)
return;
+
+ SourceLocation AttrLoc;
+ SanitizerMask CheckKind;
+ SanitizerHandler Handler;
+ if (NNAttr) {
+ AttrLoc = NNAttr->getLocation();
+ CheckKind = SanitizerKind::NonnullAttribute;
+ Handler = SanitizerHandler::NonnullArg;
+ } else {
+ AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
+ CheckKind = SanitizerKind::NullabilityArg;
+ Handler = SanitizerHandler::NullabilityArg;
+ }
+
SanitizerScope SanScope(this);
assert(RV.isScalar());
llvm::Value *V = RV.getScalarVal();
llvm::Value *Cond =
Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
llvm::Constant *StaticData[] = {
- EmitCheckSourceLocation(ArgLoc),
- EmitCheckSourceLocation(NNAttr->getLocation()),
+ EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
};
- EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
- SanitizerHandler::NonnullArg, StaticData, None);
+ EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
}
void CodeGenFunction::EmitCallArgs(
CallArgList &Args, ArrayRef<QualType> ArgTypes,
llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
- const FunctionDecl *CalleeDecl, unsigned ParamsToSkip,
- EvaluationOrder Order) {
+ AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
- auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
- if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
- return;
- auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
- if (PS == nullptr)
- return;
-
- const auto &Context = getContext();
- auto SizeTy = Context.getSizeType();
- auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
- llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
- Args.add(RValue::get(V), SizeTy);
- };
-
// We *have* to evaluate arguments from right to left in the MS C++ ABI,
// because arguments are destroyed left to right in the callee. As a special
// case, there are certain language constructs that require left-to-right
@@ -3242,6 +3351,27 @@ void CodeGenFunction::EmitCallArgs(
? Order == EvaluationOrder::ForceLeftToRight
: Order != EvaluationOrder::ForceRightToLeft;
+ auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
+ RValue EmittedArg) {
+ if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
+ return;
+ auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
+ if (PS == nullptr)
+ return;
+
+ const auto &Context = getContext();
+ auto SizeTy = Context.getSizeType();
+ auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
+ assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
+ llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
+ EmittedArg.getScalarVal());
+ Args.add(RValue::get(V), SizeTy);
+ // If we're emitting args in reverse, be sure to do so with
+ // pass_object_size, as well.
+ if (!LeftToRight)
+ std::swap(Args.back(), *(&Args.back() - 1));
+ };
+
// Insert a stack save if we're going to need any inalloca args.
bool HasInAllocaArgs = false;
if (CGM.getTarget().getCXXABI().isMicrosoft()) {
@@ -3259,11 +3389,20 @@ void CodeGenFunction::EmitCallArgs(
for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
unsigned Idx = LeftToRight ? I : E - I - 1;
CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
- if (!LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
+ unsigned InitialArgSize = Args.size();
EmitCallArg(Args, *Arg, ArgTypes[Idx]);
- EmitNonNullArgCheck(Args.back().RV, ArgTypes[Idx], (*Arg)->getExprLoc(),
- CalleeDecl, ParamsToSkip + Idx);
- if (LeftToRight) MaybeEmitImplicitObjectSize(Idx, *Arg);
+ // In particular, we depend on it being the last arg in Args, and the
+ // objectsize bits depend on there only being one arg if !LeftToRight.
+ assert(InitialArgSize + 1 == Args.size() &&
+ "The code below depends on only adding one arg per EmitCallArg");
+ (void)InitialArgSize;
+ RValue RVArg = Args.back().RV;
+ EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
+ ParamsToSkip + Idx);
+ // @llvm.objectsize should never have side-effects and shouldn't need
+ // destruction/cleanups, so we can safely "emit" it after its arg,
+ // regardless of right-to-leftness
+ MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
}
if (!LeftToRight) {
@@ -3571,12 +3710,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Address ArgMemory = Address::invalid();
const llvm::StructLayout *ArgMemoryLayout = nullptr;
if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
- ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
+ const llvm::DataLayout &DL = CGM.getDataLayout();
+ ArgMemoryLayout = DL.getStructLayout(ArgStruct);
llvm::Instruction *IP = CallArgs.getStackBase();
llvm::AllocaInst *AI;
if (IP) {
IP = IP->getNextNode();
- AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
+ AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
+ "argmem", IP);
} else {
AI = CreateTempAlloca(ArgStruct, "argmem");
}
@@ -3977,8 +4118,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Callee.getAbstractInfo(),
AttributeList, CallingConv,
/*AttrOnCallSite=*/true);
- llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
- AttributeList);
+ llvm::AttributeList Attrs =
+ llvm::AttributeList::get(getLLVMContext(), AttributeList);
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
@@ -3989,15 +4130,14 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
!(Callee.getAbstractInfo().getCalleeDecl() &&
Callee.getAbstractInfo().getCalleeDecl()->hasAttr<NoInlineAttr>())) {
Attrs =
- Attrs.addAttribute(getLLVMContext(),
- llvm::AttributeSet::FunctionIndex,
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::AlwaysInline);
}
// Disable inlining inside SEH __try blocks.
if (isSEHTryScope()) {
Attrs =
- Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
+ Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoInline);
}
@@ -4014,7 +4154,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CannotThrow = true;
} else {
// Otherwise, nounwind call sites will never throw.
- CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
+ CannotThrow = Attrs.hasAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::NoUnwind);
}
llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
@@ -4210,6 +4350,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
OffsetValue);
+ } else if (const auto *AA = TargetDecl->getAttr<AllocAlignAttr>()) {
+ llvm::Value *ParamVal =
+ CallArgs[AA->getParamIndex() - 1].RV.getScalarVal();
+ EmitAlignmentAssumption(Ret.getScalarVal(), ParamVal);
}
}
diff --git a/lib/CodeGen/CGCall.h b/lib/CodeGen/CGCall.h
index 031ce831cb37..97221e20c195 100644
--- a/lib/CodeGen/CGCall.h
+++ b/lib/CodeGen/CGCall.h
@@ -25,10 +25,10 @@
#include "ABIInfo.h"
namespace llvm {
- class AttributeSet;
- class Function;
- class Type;
- class Value;
+class AttributeList;
+class Function;
+class Type;
+class Value;
}
namespace clang {
@@ -39,28 +39,28 @@ namespace clang {
class VarDecl;
namespace CodeGen {
- typedef SmallVector<llvm::AttributeSet, 8> AttributeListType;
-
- /// Abstract information about a function or function prototype.
- class CGCalleeInfo {
- /// \brief The function prototype of the callee.
- const FunctionProtoType *CalleeProtoTy;
- /// \brief The function declaration of the callee.
- const Decl *CalleeDecl;
-
- public:
- explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
- CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
- CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
- : CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
- CGCalleeInfo(const Decl *calleeDecl)
- : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
-
- const FunctionProtoType *getCalleeFunctionProtoType() const {
- return CalleeProtoTy;
- }
- const Decl *getCalleeDecl() const { return CalleeDecl; }
+typedef SmallVector<llvm::AttributeList, 8> AttributeListType;
+
+/// Abstract information about a function or function prototype.
+class CGCalleeInfo {
+ /// \brief The function prototype of the callee.
+ const FunctionProtoType *CalleeProtoTy;
+ /// \brief The function declaration of the callee.
+ const Decl *CalleeDecl;
+
+public:
+ explicit CGCalleeInfo() : CalleeProtoTy(nullptr), CalleeDecl(nullptr) {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy, const Decl *calleeDecl)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl(calleeDecl) {}
+ CGCalleeInfo(const FunctionProtoType *calleeProtoTy)
+ : CalleeProtoTy(calleeProtoTy), CalleeDecl(nullptr) {}
+ CGCalleeInfo(const Decl *calleeDecl)
+ : CalleeProtoTy(nullptr), CalleeDecl(calleeDecl) {}
+
+ const FunctionProtoType *getCalleeFunctionProtoType() const {
+ return CalleeProtoTy;
+ }
+ const Decl *getCalleeDecl() const { return CalleeDecl; }
};
/// All available information about a concrete callee.
diff --git a/lib/CodeGen/CGClass.cpp b/lib/CodeGen/CGClass.cpp
index 05d056739524..7ba03a0d42dd 100644
--- a/lib/CodeGen/CGClass.cpp
+++ b/lib/CodeGen/CGClass.cpp
@@ -309,8 +309,10 @@ Address CodeGenFunction::GetAddressOfBaseClass(
// just do a bitcast; null checks are unnecessary.
if (NonVirtualOffset.isZero() && !VBase) {
if (sanitizePerformTypeCheck()) {
+ SanitizerSet SkippedChecks;
+ SkippedChecks.set(SanitizerKind::Null, !NullCheckValue);
EmitTypeCheck(TCK_Upcast, Loc, Value.getPointer(),
- DerivedTy, DerivedAlign, !NullCheckValue);
+ DerivedTy, DerivedAlign, SkippedChecks);
}
return Builder.CreateBitCast(Value, BasePtrTy);
}
@@ -331,8 +333,10 @@ Address CodeGenFunction::GetAddressOfBaseClass(
}
if (sanitizePerformTypeCheck()) {
+ SanitizerSet SkippedChecks;
+ SkippedChecks.set(SanitizerKind::Null, true);
EmitTypeCheck(VBase ? TCK_UpcastToVirtualBase : TCK_Upcast, Loc,
- Value.getPointer(), DerivedTy, DerivedAlign, true);
+ Value.getPointer(), DerivedTy, DerivedAlign, SkippedChecks);
}
// Compute the virtual offset.
@@ -685,7 +689,8 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
/// complete-to-base constructor delegation optimization, i.e.
/// emitting the complete constructor as a simple call to the base
/// constructor.
-static bool IsConstructorDelegationValid(const CXXConstructorDecl *Ctor) {
+bool CodeGenFunction::IsConstructorDelegationValid(
+ const CXXConstructorDecl *Ctor) {
// Currently we disable the optimization for classes with virtual
// bases because (1) the addresses of parameter variables need to be
@@ -1131,10 +1136,11 @@ namespace {
RHS = EC->getSubExpr();
if (!RHS)
return nullptr;
- MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS);
- if (dyn_cast<FieldDecl>(ME2->getMemberDecl()) != Field)
- return nullptr;
- return Field;
+ if (MemberExpr *ME2 = dyn_cast<MemberExpr>(RHS)) {
+ if (ME2->getMemberDecl() == Field)
+ return Field;
+ }
+ return nullptr;
} else if (CXXMemberCallExpr *MCE = dyn_cast<CXXMemberCallExpr>(S)) {
CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(MCE->getCalleeDecl());
if (!(MD && isMemcpyEquivalentSpecialMember(MD)))
@@ -1384,6 +1390,20 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
const CXXDestructorDecl *Dtor = cast<CXXDestructorDecl>(CurGD.getDecl());
CXXDtorType DtorType = CurGD.getDtorType();
+ // For an abstract class, non-base destructors are never used (and can't
+ // be emitted in general, because vbase dtors may not have been validated
+ // by Sema), but the Itanium ABI doesn't make them optional and Clang may
+ // in fact emit references to them from other compilations, so emit them
+ // as functions containing a trap instruction.
+ if (DtorType != Dtor_Base && Dtor->getParent()->isAbstract()) {
+ llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
+ TrapCall->setDoesNotReturn();
+ TrapCall->setDoesNotThrow();
+ Builder.CreateUnreachable();
+ Builder.ClearInsertionPoint();
+ return;
+ }
+
Stmt *Body = Dtor->getBody();
if (Body)
incrementProfileCounter(Body);
@@ -1416,9 +1436,7 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
// we'd introduce *two* handler blocks. In the Microsoft ABI, we
// always delegate because we might not have a definition in this TU.
switch (DtorType) {
- case Dtor_Comdat:
- llvm_unreachable("not expecting a COMDAT");
-
+ case Dtor_Comdat: llvm_unreachable("not expecting a COMDAT");
case Dtor_Deleting: llvm_unreachable("already handled deleting case");
case Dtor_Complete:
@@ -1433,7 +1451,9 @@ void CodeGenFunction::EmitDestructorBody(FunctionArgList &Args) {
/*Delegating=*/false, LoadCXXThisAddress());
break;
}
+
// Fallthrough: act like we're in the base variant.
+ LLVM_FALLTHROUGH;
case Dtor_Base:
assert(Body);
@@ -1950,7 +1970,11 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
// Add the rest of the user-supplied arguments.
const FunctionProtoType *FPT = D->getType()->castAs<FunctionProtoType>();
- EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor());
+ EvaluationOrder Order = E->isListInitialization()
+ ? EvaluationOrder::ForceLeftToRight
+ : EvaluationOrder::Default;
+ EmitCallArgs(Args, FPT, E->arguments(), E->getConstructor(),
+ /*ParamsToSkip*/ 0, Order);
EmitCXXConstructorCall(D, Type, ForVirtualBase, Delegating, This, Args);
}
@@ -1970,7 +1994,7 @@ static bool canEmitDelegateCallArgs(CodeGenFunction &CGF,
// Likewise if they're inalloca.
const CGFunctionInfo &Info =
- CGF.CGM.getTypes().arrangeCXXConstructorCall(Args, Ctor, Type, 0);
+ CGF.CGM.getTypes().arrangeCXXConstructorCall(Args, Ctor, Type, 0, 0);
if (Info.usesInAlloca())
return false;
}
@@ -2012,10 +2036,11 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
return;
}
+ bool PassPrototypeArgs = true;
// Check whether we can actually emit the constructor before trying to do so.
if (auto Inherited = D->getInheritedConstructor()) {
- if (getTypes().inheritingCtorHasParams(Inherited, Type) &&
- !canEmitDelegateCallArgs(*this, D, Type, Args)) {
+ PassPrototypeArgs = getTypes().inheritingCtorHasParams(Inherited, Type);
+ if (PassPrototypeArgs && !canEmitDelegateCallArgs(*this, D, Type, Args)) {
EmitInlinedInheritingCXXConstructorCall(D, Type, ForVirtualBase,
Delegating, Args);
return;
@@ -2023,14 +2048,15 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
}
// Insert any ABI-specific implicit constructor arguments.
- unsigned ExtraArgs = CGM.getCXXABI().addImplicitConstructorArgs(
- *this, D, Type, ForVirtualBase, Delegating, Args);
+ CGCXXABI::AddedStructorArgs ExtraArgs =
+ CGM.getCXXABI().addImplicitConstructorArgs(*this, D, Type, ForVirtualBase,
+ Delegating, Args);
// Emit the call.
llvm::Constant *CalleePtr =
CGM.getAddrOfCXXStructor(D, getFromCtorType(Type));
- const CGFunctionInfo &Info =
- CGM.getTypes().arrangeCXXConstructorCall(Args, D, Type, ExtraArgs);
+ const CGFunctionInfo &Info = CGM.getTypes().arrangeCXXConstructorCall(
+ Args, D, Type, ExtraArgs.Prefix, ExtraArgs.Suffix, PassPrototypeArgs);
CGCallee Callee = CGCallee::forDirect(CalleePtr, D);
EmitCall(Info, Callee, ReturnValueSlot(), Args);
@@ -2102,7 +2128,9 @@ void CodeGenFunction::EmitInheritedCXXConstructorCall(
void CodeGenFunction::EmitInlinedInheritingCXXConstructorCall(
const CXXConstructorDecl *Ctor, CXXCtorType CtorType, bool ForVirtualBase,
bool Delegating, CallArgList &Args) {
- InlinedInheritingConstructorScope Scope(*this, GlobalDecl(Ctor, CtorType));
+ GlobalDecl GD(Ctor, CtorType);
+ InlinedInheritingConstructorScope Scope(*this, GD);
+ ApplyInlineDebugLocation DebugScope(*this, GD);
// Save the arguments to be passed to the inherited constructor.
CXXInheritedCtorInitExprArgs = Args;
diff --git a/lib/CodeGen/CGCleanup.cpp b/lib/CodeGen/CGCleanup.cpp
index 3666858e63d2..437ab7dd4649 100644
--- a/lib/CodeGen/CGCleanup.cpp
+++ b/lib/CodeGen/CGCleanup.cpp
@@ -418,11 +418,15 @@ void CodeGenFunction::ResolveBranchFixups(llvm::BasicBlock *Block) {
}
/// Pops cleanup blocks until the given savepoint is reached.
-void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
+void CodeGenFunction::PopCleanupBlocks(
+ EHScopeStack::stable_iterator Old,
+ std::initializer_list<llvm::Value **> ValuesToReload) {
assert(Old.isValid());
+ bool HadBranches = false;
while (EHStack.stable_begin() != Old) {
EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
+ HadBranches |= Scope.hasBranches();
// As long as Old strictly encloses the scope's enclosing normal
// cleanup, we're going to emit another normal cleanup which
@@ -432,14 +436,41 @@ void CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old) {
PopCleanupBlock(FallThroughIsBranchThrough);
}
+
+ // If we didn't have any branches, the insertion point before cleanups must
+ // dominate the current insertion point and we don't need to reload any
+ // values.
+ if (!HadBranches)
+ return;
+
+ // Spill and reload all values that the caller wants to be live at the current
+ // insertion point.
+ for (llvm::Value **ReloadedValue : ValuesToReload) {
+ auto *Inst = dyn_cast_or_null<llvm::Instruction>(*ReloadedValue);
+ if (!Inst)
+ continue;
+ Address Tmp =
+ CreateDefaultAlignTempAlloca(Inst->getType(), "tmp.exprcleanup");
+
+ // Find an insertion point after Inst and spill it to the temporary.
+ llvm::BasicBlock::iterator InsertBefore;
+ if (auto *Invoke = dyn_cast<llvm::InvokeInst>(Inst))
+ InsertBefore = Invoke->getNormalDest()->getFirstInsertionPt();
+ else
+ InsertBefore = std::next(Inst->getIterator());
+ CGBuilderTy(CGM, &*InsertBefore).CreateStore(Inst, Tmp);
+
+ // Reload the value at the current insertion point.
+ *ReloadedValue = Builder.CreateLoad(Tmp);
+ }
}
/// Pops cleanup blocks until the given savepoint is reached, then add the
/// cleanups from the given savepoint in the lifetime-extended cleanups stack.
-void
-CodeGenFunction::PopCleanupBlocks(EHScopeStack::stable_iterator Old,
- size_t OldLifetimeExtendedSize) {
- PopCleanupBlocks(Old);
+void CodeGenFunction::PopCleanupBlocks(
+ EHScopeStack::stable_iterator Old, size_t OldLifetimeExtendedSize,
+ std::initializer_list<llvm::Value **> ValuesToReload) {
+ PopCleanupBlocks(Old, ValuesToReload);
// Move our deferred cleanups onto the EH stack.
for (size_t I = OldLifetimeExtendedSize,
@@ -578,7 +609,7 @@ static void destroyOptimisticNormalEntry(CodeGenFunction &CGF,
llvm::SwitchInst *si = cast<llvm::SwitchInst>(use.getUser());
if (si->getNumCases() == 1 && si->getDefaultDest() == unreachableBB) {
// Replace the switch with a branch.
- llvm::BranchInst::Create(si->case_begin().getCaseSuccessor(), si);
+ llvm::BranchInst::Create(si->case_begin()->getCaseSuccessor(), si);
// The switch operand is a load from the cleanup-dest alloca.
llvm::LoadInst *condition = cast<llvm::LoadInst>(si->getCondition());
diff --git a/lib/CodeGen/CGCoroutine.cpp b/lib/CodeGen/CGCoroutine.cpp
index 2fdb1279ece9..0ef680ef6609 100644
--- a/lib/CodeGen/CGCoroutine.cpp
+++ b/lib/CodeGen/CGCoroutine.cpp
@@ -12,28 +12,58 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "llvm/ADT/ScopeExit.h"
#include "clang/AST/StmtCXX.h"
using namespace clang;
using namespace CodeGen;
-namespace clang {
-namespace CodeGen {
+using llvm::Value;
+using llvm::BasicBlock;
+
+namespace {
+enum class AwaitKind { Init, Normal, Yield, Final };
+static constexpr llvm::StringLiteral AwaitKindStr[] = {"init", "await", "yield",
+ "final"};
+}
+
+struct clang::CodeGen::CGCoroData {
+ // What is the current await expression kind and how many
+ // await/yield expressions were encountered so far.
+ // These are used to generate pretty labels for await expressions in LLVM IR.
+ AwaitKind CurrentAwaitKind = AwaitKind::Init;
+ unsigned AwaitNum = 0;
+ unsigned YieldNum = 0;
+
+ // How many co_return statements are in the coroutine. Used to decide whether
+ // we need to add co_return; equivalent at the end of the user authored body.
+ unsigned CoreturnCount = 0;
+
+ // A branch to this block is emitted when coroutine needs to suspend.
+ llvm::BasicBlock *SuspendBB = nullptr;
+
+ // Stores the jump destination just before the coroutine memory is freed.
+ // This is the destination that every suspend point jumps to for the cleanup
+ // branch.
+ CodeGenFunction::JumpDest CleanupJD;
+
+ // Stores the jump destination just before the final suspend. The co_return
+ // statements jumps to this point after calling return_xxx promise member.
+ CodeGenFunction::JumpDest FinalJD;
-struct CGCoroData {
// Stores the llvm.coro.id emitted in the function so that we can supply it
// as the first argument to coro.begin, coro.alloc and coro.free intrinsics.
// Note: llvm.coro.id returns a token that cannot be directly expressed in a
// builtin.
llvm::CallInst *CoroId = nullptr;
+
// If coro.id came from the builtin, remember the expression to give better
// diagnostic. If CoroIdExpr is nullptr, the coro.id was created by
// EmitCoroutineBody.
CallExpr const *CoroIdExpr = nullptr;
};
-}
-}
+// Defining these here allows to keep CGCoroData private to this file.
clang::CodeGen::CodeGenFunction::CGCoroInfo::CGCoroInfo() {}
CodeGenFunction::CGCoroInfo::~CGCoroInfo() {}
@@ -59,19 +89,250 @@ static void createCoroData(CodeGenFunction &CGF,
CurCoro.Data->CoroIdExpr = CoroIdExpr;
}
+// Synthesize a pretty name for a suspend point.
+static SmallString<32> buildSuspendPrefixStr(CGCoroData &Coro, AwaitKind Kind) {
+ unsigned No = 0;
+ switch (Kind) {
+ case AwaitKind::Init:
+ case AwaitKind::Final:
+ break;
+ case AwaitKind::Normal:
+ No = ++Coro.AwaitNum;
+ break;
+ case AwaitKind::Yield:
+ No = ++Coro.YieldNum;
+ break;
+ }
+ SmallString<32> Prefix(AwaitKindStr[static_cast<unsigned>(Kind)]);
+ if (No > 1) {
+ Twine(No).toVector(Prefix);
+ }
+ return Prefix;
+}
+
+// Emit suspend expression which roughly looks like:
+//
+// auto && x = CommonExpr();
+// if (!x.await_ready()) {
+// llvm_coro_save();
+// x.await_suspend(...); (*)
+// llvm_coro_suspend(); (**)
+// }
+// x.await_resume();
+//
+// where the result of the entire expression is the result of x.await_resume()
+//
+// (*) If x.await_suspend return type is bool, it allows to veto a suspend:
+// if (x.await_suspend(...))
+// llvm_coro_suspend();
+//
+// (**) llvm_coro_suspend() encodes three possible continuations as
+// a switch instruction:
+//
+// %where-to = call i8 @llvm.coro.suspend(...)
+// switch i8 %where-to, label %coro.ret [ ; jump to epilogue to suspend
+// i8 0, label %yield.ready ; go here when resumed
+// i8 1, label %yield.cleanup ; go here when destroyed
+// ]
+//
+// See llvm's docs/Coroutines.rst for more details.
+//
+static RValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Coro,
+ CoroutineSuspendExpr const &S,
+ AwaitKind Kind, AggValueSlot aggSlot,
+ bool ignoreResult) {
+ auto *E = S.getCommonExpr();
+ auto Binder =
+ CodeGenFunction::OpaqueValueMappingData::bind(CGF, S.getOpaqueValue(), E);
+ auto UnbindOnExit = llvm::make_scope_exit([&] { Binder.unbind(CGF); });
+
+ auto Prefix = buildSuspendPrefixStr(Coro, Kind);
+ BasicBlock *ReadyBlock = CGF.createBasicBlock(Prefix + Twine(".ready"));
+ BasicBlock *SuspendBlock = CGF.createBasicBlock(Prefix + Twine(".suspend"));
+ BasicBlock *CleanupBlock = CGF.createBasicBlock(Prefix + Twine(".cleanup"));
+
+ // If expression is ready, no need to suspend.
+ CGF.EmitBranchOnBoolExpr(S.getReadyExpr(), ReadyBlock, SuspendBlock, 0);
+
+ // Otherwise, emit suspend logic.
+ CGF.EmitBlock(SuspendBlock);
+
+ auto &Builder = CGF.Builder;
+ llvm::Function *CoroSave = CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_save);
+ auto *NullPtr = llvm::ConstantPointerNull::get(CGF.CGM.Int8PtrTy);
+ auto *SaveCall = Builder.CreateCall(CoroSave, {NullPtr});
+
+ auto *SuspendRet = CGF.EmitScalarExpr(S.getSuspendExpr());
+ if (SuspendRet != nullptr) {
+ // Veto suspension if requested by bool returning await_suspend.
+ assert(SuspendRet->getType()->isIntegerTy(1) &&
+ "Sema should have already checked that it is void or bool");
+ BasicBlock *RealSuspendBlock =
+ CGF.createBasicBlock(Prefix + Twine(".suspend.bool"));
+ CGF.Builder.CreateCondBr(SuspendRet, RealSuspendBlock, ReadyBlock);
+ SuspendBlock = RealSuspendBlock;
+ CGF.EmitBlock(RealSuspendBlock);
+ }
+
+ // Emit the suspend point.
+ const bool IsFinalSuspend = (Kind == AwaitKind::Final);
+ llvm::Function *CoroSuspend =
+ CGF.CGM.getIntrinsic(llvm::Intrinsic::coro_suspend);
+ auto *SuspendResult = Builder.CreateCall(
+ CoroSuspend, {SaveCall, Builder.getInt1(IsFinalSuspend)});
+
+ // Create a switch capturing three possible continuations.
+ auto *Switch = Builder.CreateSwitch(SuspendResult, Coro.SuspendBB, 2);
+ Switch->addCase(Builder.getInt8(0), ReadyBlock);
+ Switch->addCase(Builder.getInt8(1), CleanupBlock);
+
+ // Emit cleanup for this suspend point.
+ CGF.EmitBlock(CleanupBlock);
+ CGF.EmitBranchThroughCleanup(Coro.CleanupJD);
+
+ // Emit await_resume expression.
+ CGF.EmitBlock(ReadyBlock);
+ return CGF.EmitAnyExpr(S.getResumeExpr(), aggSlot, ignoreResult);
+}
+
+RValue CodeGenFunction::EmitCoawaitExpr(const CoawaitExpr &E,
+ AggValueSlot aggSlot,
+ bool ignoreResult) {
+ return emitSuspendExpression(*this, *CurCoro.Data, E,
+ CurCoro.Data->CurrentAwaitKind, aggSlot,
+ ignoreResult);
+}
+RValue CodeGenFunction::EmitCoyieldExpr(const CoyieldExpr &E,
+ AggValueSlot aggSlot,
+ bool ignoreResult) {
+ return emitSuspendExpression(*this, *CurCoro.Data, E, AwaitKind::Yield,
+ aggSlot, ignoreResult);
+}
+
+void CodeGenFunction::EmitCoreturnStmt(CoreturnStmt const &S) {
+ ++CurCoro.Data->CoreturnCount;
+ EmitStmt(S.getPromiseCall());
+ EmitBranchThroughCleanup(CurCoro.Data->FinalJD);
+}
+
+// For WinEH exception representation backend need to know what funclet coro.end
+// belongs to. That information is passed in a funclet bundle.
+static SmallVector<llvm::OperandBundleDef, 1>
+getBundlesForCoroEnd(CodeGenFunction &CGF) {
+ SmallVector<llvm::OperandBundleDef, 1> BundleList;
+
+ if (llvm::Instruction *EHPad = CGF.CurrentFuncletPad)
+ BundleList.emplace_back("funclet", EHPad);
+
+ return BundleList;
+}
+
+namespace {
+// We will insert coro.end to cut any of the destructors for objects that
+// do not need to be destroyed once the coroutine is resumed.
+// See llvm/docs/Coroutines.rst for more details about coro.end.
+struct CallCoroEnd final : public EHScopeStack::Cleanup {
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ auto &CGM = CGF.CGM;
+ auto *NullPtr = llvm::ConstantPointerNull::get(CGF.Int8PtrTy);
+ llvm::Function *CoroEndFn = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
+ // See if we have a funclet bundle to associate coro.end with. (WinEH)
+ auto Bundles = getBundlesForCoroEnd(CGF);
+ auto *CoroEnd = CGF.Builder.CreateCall(
+ CoroEndFn, {NullPtr, CGF.Builder.getTrue()}, Bundles);
+ if (Bundles.empty()) {
+ // Otherwise, (landingpad model), create a conditional branch that leads
+ // either to a cleanup block or a block with EH resume instruction.
+ auto *ResumeBB = CGF.getEHResumeBlock(/*cleanup=*/true);
+ auto *CleanupContBB = CGF.createBasicBlock("cleanup.cont");
+ CGF.Builder.CreateCondBr(CoroEnd, ResumeBB, CleanupContBB);
+ CGF.EmitBlock(CleanupContBB);
+ }
+ }
+};
+}
+
+namespace {
+// Make sure to call coro.delete on scope exit.
+struct CallCoroDelete final : public EHScopeStack::Cleanup {
+ Stmt *Deallocate;
+
+ // TODO: Wrap deallocate in if(coro.free(...)) Deallocate.
+ void Emit(CodeGenFunction &CGF, Flags) override {
+ // Note: That deallocation will be emitted twice: once for a normal exit and
+ // once for exceptional exit. This usage is safe because Deallocate does not
+ // contain any declarations. The SubStmtBuilder::makeNewAndDeleteExpr()
+ // builds a single call to a deallocation function which is safe to emit
+ // multiple times.
+ CGF.EmitStmt(Deallocate);
+ }
+ explicit CallCoroDelete(Stmt *DeallocStmt) : Deallocate(DeallocStmt) {}
+};
+}
+
void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
auto *NullPtr = llvm::ConstantPointerNull::get(Builder.getInt8PtrTy());
auto &TI = CGM.getContext().getTargetInfo();
unsigned NewAlign = TI.getNewAlign() / TI.getCharWidth();
+ auto *FinalBB = createBasicBlock("coro.final");
+ auto *RetBB = createBasicBlock("coro.ret");
+
auto *CoroId = Builder.CreateCall(
CGM.getIntrinsic(llvm::Intrinsic::coro_id),
{Builder.getInt32(NewAlign), NullPtr, NullPtr, NullPtr});
createCoroData(*this, CurCoro, CoroId);
+ CurCoro.Data->SuspendBB = RetBB;
+
+ auto *AllocateCall = EmitScalarExpr(S.getAllocate());
+
+ // Handle allocation failure if 'ReturnStmtOnAllocFailure' was provided.
+ if (auto *RetOnAllocFailure = S.getReturnStmtOnAllocFailure()) {
+ auto *RetOnFailureBB = createBasicBlock("coro.ret.on.failure");
+ auto *InitBB = createBasicBlock("coro.init");
+
+ // See if allocation was successful.
+ auto *NullPtr = llvm::ConstantPointerNull::get(Int8PtrTy);
+ auto *Cond = Builder.CreateICmpNE(AllocateCall, NullPtr);
+ Builder.CreateCondBr(Cond, InitBB, RetOnFailureBB);
+
+ // If not, return OnAllocFailure object.
+ EmitBlock(RetOnFailureBB);
+ EmitStmt(RetOnAllocFailure);
+
+ EmitBlock(InitBB);
+ }
+
+ CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB);
+ {
+ CodeGenFunction::RunCleanupsScope ResumeScope(*this);
+ EHStack.pushCleanup<CallCoroDelete>(NormalAndEHCleanup, S.getDeallocate());
+
+ EmitStmt(S.getPromiseDeclStmt());
+
+ EHStack.pushCleanup<CallCoroEnd>(EHCleanup);
+
+ CurCoro.Data->FinalJD = getJumpDestInCurrentScope(FinalBB);
+
+ // FIXME: Emit initial suspend and more before the body.
+
+ CurCoro.Data->CurrentAwaitKind = AwaitKind::Normal;
+ EmitStmt(S.getBody());
+
+ // See if we need to generate final suspend.
+ const bool CanFallthrough = Builder.GetInsertBlock();
+ const bool HasCoreturns = CurCoro.Data->CoreturnCount > 0;
+ if (CanFallthrough || HasCoreturns) {
+ EmitBlock(FinalBB);
+ // FIXME: Emit final suspend.
+ }
+ }
+
+ EmitBlock(RetBB);
+ llvm::Function *CoroEnd = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
+ Builder.CreateCall(CoroEnd, {NullPtr, Builder.getFalse()});
- EmitScalarExpr(S.getAllocate());
- // FIXME: Emit the rest of the coroutine.
- EmitStmt(S.getDeallocate());
+ // FIXME: Emit return for the coroutine return object.
}
// Emit coroutine intrinsic and patch up arguments of the token type.
diff --git a/lib/CodeGen/CGDebugInfo.cpp b/lib/CodeGen/CGDebugInfo.cpp
index 12a68036b09c..818b51543d30 100644
--- a/lib/CodeGen/CGDebugInfo.cpp
+++ b/lib/CodeGen/CGDebugInfo.cpp
@@ -107,8 +107,8 @@ void ApplyDebugLocation::init(SourceLocation TemporaryLocation,
// Construct a location that has a valid scope, but no line info.
assert(!DI->LexicalBlockStack.empty());
- CGF->Builder.SetCurrentDebugLocation(
- llvm::DebugLoc::get(0, 0, DI->LexicalBlockStack.back()));
+ CGF->Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
+ 0, 0, DI->LexicalBlockStack.back(), DI->getInlinedAt()));
}
ApplyDebugLocation::ApplyDebugLocation(CodeGenFunction &CGF, const Expr *E)
@@ -134,6 +134,30 @@ ApplyDebugLocation::~ApplyDebugLocation() {
CGF->Builder.SetCurrentDebugLocation(std::move(OriginalLocation));
}
+ApplyInlineDebugLocation::ApplyInlineDebugLocation(CodeGenFunction &CGF,
+ GlobalDecl InlinedFn)
+ : CGF(&CGF) {
+ if (!CGF.getDebugInfo()) {
+ this->CGF = nullptr;
+ return;
+ }
+ auto &DI = *CGF.getDebugInfo();
+ SavedLocation = DI.getLocation();
+ assert((DI.getInlinedAt() ==
+ CGF.Builder.getCurrentDebugLocation()->getInlinedAt()) &&
+ "CGDebugInfo and IRBuilder are out of sync");
+
+ DI.EmitInlineFunctionStart(CGF.Builder, InlinedFn);
+}
+
+ApplyInlineDebugLocation::~ApplyInlineDebugLocation() {
+ if (!CGF)
+ return;
+ auto &DI = *CGF->getDebugInfo();
+ DI.EmitInlineFunctionEnd(CGF->Builder);
+ DI.EmitLocation(CGF->Builder, SavedLocation);
+}
+
void CGDebugInfo::setLocation(SourceLocation Loc) {
// If the new location isn't valid return.
if (Loc.isInvalid())
@@ -249,8 +273,8 @@ StringRef CGDebugInfo::getObjCMethodName(const ObjCMethodDecl *OMD) {
<< OC->getIdentifier()->getNameStart() << ')';
}
} else if (const auto *OCD = dyn_cast<ObjCCategoryImplDecl>(DC)) {
- OS << ((const NamedDecl *)OCD)->getIdentifier()->getNameStart() << '('
- << OCD->getIdentifier()->getNameStart() << ')';
+ OS << OCD->getClassInterface()->getName() << '('
+ << OCD->getName() << ')';
} else if (isa<ObjCProtocolDecl>(DC)) {
// We can extract the type of the class from the self pointer.
if (ImplicitParamDecl *SelfDecl = OMD->getSelfDecl()) {
@@ -509,7 +533,8 @@ void CGDebugInfo::CreateCompileUnit() {
Checksum),
Producer, LO.Optimize, CGM.getCodeGenOpts().DwarfDebugFlags, RuntimeVers,
CGM.getCodeGenOpts().SplitDwarfFile, EmissionKind, 0 /* DWOid */,
- CGM.getCodeGenOpts().SplitDwarfInlining);
+ CGM.getCodeGenOpts().SplitDwarfInlining,
+ CGM.getCodeGenOpts().DebugInfoForProfiling);
}
llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
@@ -581,8 +606,6 @@ llvm::DIType *CGDebugInfo::CreateType(const BuiltinType *BT) {
return getOrCreateStructPtrType("opencl_clk_event_t", OCLClkEventDITy);
case BuiltinType::OCLQueue:
return getOrCreateStructPtrType("opencl_queue_t", OCLQueueDITy);
- case BuiltinType::OCLNDRange:
- return getOrCreateStructPtrType("opencl_ndrange_t", OCLNDRangeDITy);
case BuiltinType::OCLReserveID:
return getOrCreateStructPtrType("opencl_reserve_id_t", OCLReserveIDDITy);
@@ -793,17 +816,19 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
- unsigned AS = CGM.getContext().getTargetAddressSpace(PointeeTy);
- uint64_t Size = CGM.getTarget().getPointerWidth(AS);
+ unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(PointeeTy);
+ uint64_t Size = CGM.getTarget().getPointerWidth(AddressSpace);
auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
+ Optional<unsigned> DWARFAddressSpace =
+ CGM.getTarget().getDWARFAddressSpace(AddressSpace);
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit),
- Size, Align);
+ Size, Align, DWARFAddressSpace);
else
return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
- Align);
+ Align, DWARFAddressSpace);
}
llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
@@ -1608,8 +1633,13 @@ llvm::DIType *CGDebugInfo::getOrCreateVTablePtrType(llvm::DIFile *Unit) {
llvm::DITypeRefArray SElements = DBuilder.getOrCreateTypeArray(STy);
llvm::DIType *SubTy = DBuilder.createSubroutineType(SElements);
unsigned Size = Context.getTypeSize(Context.VoidPtrTy);
+ unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace();
+ Optional<unsigned> DWARFAddressSpace =
+ CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
+
llvm::DIType *vtbl_ptr_type =
- DBuilder.createPointerType(SubTy, Size, 0, "__vtbl_ptr_type");
+ DBuilder.createPointerType(SubTy, Size, 0, DWARFAddressSpace,
+ "__vtbl_ptr_type");
VTablePtrType = DBuilder.createPointerType(vtbl_ptr_type, Size);
return VTablePtrType;
}
@@ -1648,10 +1678,14 @@ void CGDebugInfo::CollectVTableInfo(const CXXRecordDecl *RD, llvm::DIFile *Unit,
unsigned VSlotCount =
VFTLayout.vtable_components().size() - CGM.getLangOpts().RTTIData;
unsigned VTableWidth = PtrWidth * VSlotCount;
+ unsigned VtblPtrAddressSpace = CGM.getTarget().getVtblPtrAddressSpace();
+ Optional<unsigned> DWARFAddressSpace =
+ CGM.getTarget().getDWARFAddressSpace(VtblPtrAddressSpace);
// Create a very wide void* type and insert it directly in the element list.
llvm::DIType *VTableType =
- DBuilder.createPointerType(nullptr, VTableWidth, 0, "__vtbl_ptr_type");
+ DBuilder.createPointerType(nullptr, VTableWidth, 0, DWARFAddressSpace,
+ "__vtbl_ptr_type");
EltTys.push_back(VTableType);
// The vptr is a pointer to this special vtable type.
@@ -1714,7 +1748,27 @@ void CGDebugInfo::completeType(const RecordDecl *RD) {
completeRequiredType(RD);
}
+/// Return true if the class or any of its methods are marked dllimport.
+static bool isClassOrMethodDLLImport(const CXXRecordDecl *RD) {
+ if (RD->hasAttr<DLLImportAttr>())
+ return true;
+ for (const CXXMethodDecl *MD : RD->methods())
+ if (MD->hasAttr<DLLImportAttr>())
+ return true;
+ return false;
+}
+
void CGDebugInfo::completeClassData(const RecordDecl *RD) {
+ if (auto *CXXRD = dyn_cast<CXXRecordDecl>(RD))
+ if (CXXRD->isDynamicClass() &&
+ CGM.getVTableLinkage(CXXRD) ==
+ llvm::GlobalValue::AvailableExternallyLinkage &&
+ !isClassOrMethodDLLImport(CXXRD))
+ return;
+ completeClass(RD);
+}
+
+void CGDebugInfo::completeClass(const RecordDecl *RD) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
QualType Ty = CGM.getContext().getRecordType(RD);
@@ -1760,22 +1814,16 @@ static bool isDefinedInClangModule(const RecordDecl *RD) {
return true;
}
-/// Return true if the class or any of its methods are marked dllimport.
-static bool isClassOrMethodDLLImport(const CXXRecordDecl *RD) {
- if (RD->hasAttr<DLLImportAttr>())
- return true;
- for (const CXXMethodDecl *MD : RD->methods())
- if (MD->hasAttr<DLLImportAttr>())
- return true;
- return false;
-}
-
static bool shouldOmitDefinition(codegenoptions::DebugInfoKind DebugKind,
bool DebugTypeExtRefs, const RecordDecl *RD,
const LangOptions &LangOpts) {
if (DebugTypeExtRefs && isDefinedInClangModule(RD->getDefinition()))
return true;
+ if (auto *ES = RD->getASTContext().getExternalSource())
+ if (ES->hasExternalDefinitions(RD) == ExternalASTSource::EK_Always)
+ return true;
+
if (DebugKind > codegenoptions::LimitedDebugInfo)
return false;
@@ -2009,7 +2057,11 @@ CGDebugInfo::getOrCreateModuleRef(ExternalASTSource::ASTSourceDescriptor Mod,
if (CreateSkeletonCU && IsRootModule) {
// PCH files don't have a signature field in the control block,
// but LLVM detects skeleton CUs by looking for a non-zero DWO id.
- uint64_t Signature = Mod.getSignature() ? Mod.getSignature() : ~1ULL;
+ // We use the lower 64 bits for debug info.
+ uint64_t Signature =
+ Mod.getSignature()
+ ? (uint64_t)Mod.getSignature()[1] << 32 | Mod.getSignature()[0]
+ : ~1ULL;
llvm::DIBuilder DIB(CGM.getModule());
DIB.createCompileUnit(TheCU->getSourceLanguage(),
DIB.createFile(Mod.getModuleName(), Mod.getPath()),
@@ -2408,6 +2460,21 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const EnumType *Ty) {
FullName);
}
+llvm::DIMacro *CGDebugInfo::CreateMacro(llvm::DIMacroFile *Parent,
+ unsigned MType, SourceLocation LineLoc,
+ StringRef Name, StringRef Value) {
+ unsigned Line = LineLoc.isInvalid() ? 0 : getLineNumber(LineLoc);
+ return DBuilder.createMacro(Parent, Line, MType, Name, Value);
+}
+
+llvm::DIMacroFile *CGDebugInfo::CreateTempMacroFile(llvm::DIMacroFile *Parent,
+ SourceLocation LineLoc,
+ SourceLocation FileLoc) {
+ llvm::DIFile *FName = getOrCreateFile(FileLoc);
+ unsigned Line = LineLoc.isInvalid() ? 0 : getLineNumber(LineLoc);
+ return DBuilder.createTempMacroFile(Parent, Line, FName);
+}
+
static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
Qualifiers Quals;
do {
@@ -2451,8 +2518,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::SubstTemplateTypeParm:
T = cast<SubstTemplateTypeParmType>(T)->getReplacementType();
break;
- case Type::Auto: {
- QualType DT = cast<AutoType>(T)->getDeducedType();
+ case Type::Auto:
+ case Type::DeducedTemplateSpecialization: {
+ QualType DT = cast<DeducedType>(T)->getDeducedType();
assert(!DT.isNull() && "Undeduced types shouldn't reach here.");
T = DT;
break;
@@ -2488,11 +2556,17 @@ void CGDebugInfo::completeTemplateDefinition(
const ClassTemplateSpecializationDecl &SD) {
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
+ completeUnusedClass(SD);
+}
+
+void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
+ if (DebugKind <= codegenoptions::DebugLineTablesOnly)
+ return;
- completeClassData(&SD);
+ completeClassData(&D);
// In case this type has no member function definitions being emitted, ensure
// it is retained
- RetainedTypes.push_back(CGM.getContext().getRecordType(&SD).getAsOpaquePtr());
+ RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr());
}
llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
@@ -2618,6 +2692,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
case Type::Attributed:
case Type::Adjusted:
case Type::Decayed:
+ case Type::DeducedTemplateSpecialization:
case Type::Elaborated:
case Type::Paren:
case Type::SubstTemplateTypeParm:
@@ -2774,9 +2849,10 @@ void CGDebugInfo::collectFunctionDeclProps(GlobalDecl GD, llvm::DIFile *Unit,
}
// No need to replicate the linkage name if it isn't different from the
// subprogram name, no need to have it at all unless coverage is enabled or
- // debug is set to more than just line tables.
+ // debug is set to more than just line tables or extra debug info is needed.
if (LinkageName == Name || (!CGM.getCodeGenOpts().EmitGcovArcs &&
!CGM.getCodeGenOpts().EmitGcovNotes &&
+ !CGM.getCodeGenOpts().DebugInfoForProfiling &&
DebugKind <= codegenoptions::DebugLineTablesOnly))
LinkageName = StringRef();
@@ -2844,28 +2920,40 @@ void CGDebugInfo::collectVarDeclProps(const VarDecl *VD, llvm::DIFile *&Unit,
VDContext = getContextDescriptor(cast<Decl>(DC), Mod ? Mod : TheCU);
}
-llvm::DISubprogram *
-CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
+llvm::DISubprogram *CGDebugInfo::getFunctionFwdDeclOrStub(GlobalDecl GD,
+ bool Stub) {
llvm::DINodeArray TParamsArray;
StringRef Name, LinkageName;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
- SourceLocation Loc = FD->getLocation();
+ SourceLocation Loc = GD.getDecl()->getLocation();
llvm::DIFile *Unit = getOrCreateFile(Loc);
llvm::DIScope *DContext = Unit;
unsigned Line = getLineNumber(Loc);
-
- collectFunctionDeclProps(FD, Unit, Name, LinkageName, DContext,
+ collectFunctionDeclProps(GD, Unit, Name, LinkageName, DContext,
TParamsArray, Flags);
+ auto *FD = dyn_cast<FunctionDecl>(GD.getDecl());
+
// Build function type.
SmallVector<QualType, 16> ArgTypes;
- for (const ParmVarDecl *Parm: FD->parameters())
- ArgTypes.push_back(Parm->getType());
+ if (FD)
+ for (const ParmVarDecl *Parm : FD->parameters())
+ ArgTypes.push_back(Parm->getType());
CallingConv CC = FD->getType()->castAs<FunctionType>()->getCallConv();
QualType FnType = CGM.getContext().getFunctionType(
FD->getReturnType(), ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
+ if (Stub) {
+ return DBuilder.createFunction(
+ DContext, Name, LinkageName, Unit, Line,
+ getOrCreateFunctionType(GD.getDecl(), FnType, Unit),
+ !FD->isExternallyVisible(),
+ /* isDefinition = */ true, 0, Flags, CGM.getLangOpts().Optimize,
+ TParamsArray.get(), getFunctionDeclaration(FD));
+ }
+
llvm::DISubprogram *SP = DBuilder.createTempFunctionFwdDecl(
DContext, Name, LinkageName, Unit, Line,
- getOrCreateFunctionType(FD, FnType, Unit), !FD->isExternallyVisible(),
+ getOrCreateFunctionType(GD.getDecl(), FnType, Unit),
+ !FD->isExternallyVisible(),
/* isDefinition = */ false, 0, Flags, CGM.getLangOpts().Optimize,
TParamsArray.get(), getFunctionDeclaration(FD));
const auto *CanonDecl = cast<FunctionDecl>(FD->getCanonicalDecl());
@@ -2875,6 +2963,16 @@ CGDebugInfo::getFunctionForwardDeclaration(const FunctionDecl *FD) {
return SP;
}
+llvm::DISubprogram *
+CGDebugInfo::getFunctionForwardDeclaration(GlobalDecl GD) {
+ return getFunctionFwdDeclOrStub(GD, /* Stub = */ false);
+}
+
+llvm::DISubprogram *
+CGDebugInfo::getFunctionStub(GlobalDecl GD) {
+ return getFunctionFwdDeclOrStub(GD, /* Stub = */ true);
+}
+
llvm::DIGlobalVariable *
CGDebugInfo::getGlobalVariableForwardDeclaration(const VarDecl *VD) {
QualType T;
@@ -3146,6 +3244,27 @@ void CGDebugInfo::EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc,
TParamsArray.get(), getFunctionDeclaration(D)));
}
+void CGDebugInfo::EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ // If there is a subprogram for this function available then use it.
+ auto FI = SPCache.find(FD->getCanonicalDecl());
+ llvm::DISubprogram *SP = nullptr;
+ if (FI != SPCache.end())
+ SP = dyn_cast_or_null<llvm::DISubprogram>(FI->second);
+ if (!SP)
+ SP = getFunctionStub(GD);
+ FnBeginRegionCount.push_back(LexicalBlockStack.size());
+ LexicalBlockStack.emplace_back(SP);
+ setInlinedAt(Builder.getCurrentDebugLocation());
+ EmitLocation(Builder, FD->getLocation());
+}
+
+void CGDebugInfo::EmitInlineFunctionEnd(CGBuilderTy &Builder) {
+ assert(CurInlinedAt && "unbalanced inline scope stack");
+ EmitFunctionEnd(Builder);
+ setInlinedAt(llvm::DebugLoc(CurInlinedAt).getInlinedAt());
+}
+
void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
// Update our current location
setLocation(Loc);
@@ -3155,7 +3274,7 @@ void CGDebugInfo::EmitLocation(CGBuilderTy &Builder, SourceLocation Loc) {
llvm::MDNode *Scope = LexicalBlockStack.back();
Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
- getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope));
+ getLineNumber(CurLoc), getColumnNumber(CurLoc), Scope, CurInlinedAt));
}
void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
@@ -3167,14 +3286,29 @@ void CGDebugInfo::CreateLexicalBlock(SourceLocation Loc) {
getColumnNumber(CurLoc)));
}
+void CGDebugInfo::AppendAddressSpaceXDeref(
+ unsigned AddressSpace,
+ SmallVectorImpl<int64_t> &Expr) const {
+ Optional<unsigned> DWARFAddressSpace =
+ CGM.getTarget().getDWARFAddressSpace(AddressSpace);
+ if (!DWARFAddressSpace)
+ return;
+
+ Expr.push_back(llvm::dwarf::DW_OP_constu);
+ Expr.push_back(DWARFAddressSpace.getValue());
+ Expr.push_back(llvm::dwarf::DW_OP_swap);
+ Expr.push_back(llvm::dwarf::DW_OP_xderef);
+}
+
void CGDebugInfo::EmitLexicalBlockStart(CGBuilderTy &Builder,
SourceLocation Loc) {
// Set our current location.
setLocation(Loc);
// Emit a line table change for the current location inside the new scope.
- Builder.SetCurrentDebugLocation(llvm::DebugLoc::get(
- getLineNumber(Loc), getColumnNumber(Loc), LexicalBlockStack.back()));
+ Builder.SetCurrentDebugLocation(
+ llvm::DebugLoc::get(getLineNumber(Loc), getColumnNumber(Loc),
+ LexicalBlockStack.back(), CurInlinedAt));
if (DebugKind <= codegenoptions::DebugLineTablesOnly)
return;
@@ -3316,13 +3450,16 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
Line = getLineNumber(VD->getLocation());
Column = getColumnNumber(VD->getLocation());
}
- SmallVector<int64_t, 9> Expr;
+ SmallVector<int64_t, 13> Expr;
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
if (VD->isImplicit())
Flags |= llvm::DINode::FlagArtificial;
auto Align = getDeclAlignIfRequired(VD, CGM.getContext());
+ unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(VD->getType());
+ AppendAddressSpaceXDeref(AddressSpace, Expr);
+
// If this is the first argument and it is implicit then
// give it an object pointer flag.
// FIXME: There has to be a better way to do this, but for static
@@ -3360,9 +3497,10 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
Line, Ty, Align);
// Insert an llvm.dbg.declare into the current block.
- DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
- llvm::DebugLoc::get(Line, Column, Scope),
- Builder.GetInsertBlock());
+ DBuilder.insertDeclare(
+ Storage, D, DBuilder.createExpression(Expr),
+ llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
+ Builder.GetInsertBlock());
return;
} else if (isa<VariableArrayType>(VD->getType()))
Expr.push_back(llvm::dwarf::DW_OP_deref);
@@ -3393,9 +3531,10 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
Flags | llvm::DINode::FlagArtificial, FieldAlign);
// Insert an llvm.dbg.declare into the current block.
- DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
- llvm::DebugLoc::get(Line, Column, Scope),
- Builder.GetInsertBlock());
+ DBuilder.insertDeclare(
+ Storage, D, DBuilder.createExpression(Expr),
+ llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
+ Builder.GetInsertBlock());
}
}
}
@@ -3411,7 +3550,7 @@ void CGDebugInfo::EmitDeclare(const VarDecl *VD, llvm::Value *Storage,
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
- llvm::DebugLoc::get(Line, Column, Scope),
+ llvm::DebugLoc::get(Line, Column, Scope, CurInlinedAt),
Builder.GetInsertBlock());
}
@@ -3492,7 +3631,8 @@ void CGDebugInfo::EmitDeclareOfBlockDeclRefVariable(
Line, Ty, false, llvm::DINode::FlagZero, Align);
// Insert an llvm.dbg.declare into the current block.
- auto DL = llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back());
+ auto DL =
+ llvm::DebugLoc::get(Line, Column, LexicalBlockStack.back(), CurInlinedAt);
if (InsertPoint)
DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(addr), DL,
InsertPoint);
@@ -3660,12 +3800,13 @@ void CGDebugInfo::EmitDeclareOfBlockLiteralArgVariable(const CGBlockInfo &block,
// Insert an llvm.dbg.value into the current block.
DBuilder.insertDbgValueIntrinsic(
LocalAddr, 0, debugVar, DBuilder.createExpression(),
- llvm::DebugLoc::get(line, column, scope), Builder.GetInsertBlock());
+ llvm::DebugLoc::get(line, column, scope, CurInlinedAt),
+ Builder.GetInsertBlock());
}
// Insert an llvm.dbg.declare into the current block.
DBuilder.insertDeclare(Arg, debugVar, DBuilder.createExpression(),
- llvm::DebugLoc::get(line, column, scope),
+ llvm::DebugLoc::get(line, column, scope, CurInlinedAt),
Builder.GetInsertBlock());
}
@@ -3747,9 +3888,16 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
GVE = CollectAnonRecordDecls(RD, Unit, LineNo, LinkageName, Var, DContext);
} else {
auto Align = getDeclAlignIfRequired(D, CGM.getContext());
+
+ SmallVector<int64_t, 4> Expr;
+ unsigned AddressSpace =
+ CGM.getContext().getTargetAddressSpace(D->getType());
+ AppendAddressSpaceXDeref(AddressSpace, Expr);
+
GVE = DBuilder.createGlobalVariableExpression(
DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
- Var->hasLocalLinkage(), /*Expr=*/nullptr,
+ Var->hasLocalLinkage(),
+ Expr.empty() ? nullptr : DBuilder.createExpression(Expr),
getOrCreateStaticDataMemberDeclarationOrNull(D), Align);
Var->addDebugInfo(GVE);
}
diff --git a/lib/CodeGen/CGDebugInfo.h b/lib/CodeGen/CGDebugInfo.h
index ac2e8dd2e0a4..5050ca0ad3fa 100644
--- a/lib/CodeGen/CGDebugInfo.h
+++ b/lib/CodeGen/CGDebugInfo.h
@@ -61,6 +61,7 @@ class CGDebugInfo {
ModuleMap *ClangModuleMap = nullptr;
ExternalASTSource::ASTSourceDescriptor PCHDescriptor;
SourceLocation CurLoc;
+ llvm::MDNode *CurInlinedAt = nullptr;
llvm::DIType *VTablePtrType = nullptr;
llvm::DIType *ClassTy = nullptr;
llvm::DICompositeType *ObjTy = nullptr;
@@ -292,6 +293,15 @@ class CGDebugInfo {
/// Create a new lexical block node and push it on the stack.
void CreateLexicalBlock(SourceLocation Loc);
+ /// If target-specific LLVM \p AddressSpace directly maps to target-specific
+ /// DWARF address space, appends extended dereferencing mechanism to complex
+ /// expression \p Expr. Otherwise, does nothing.
+ ///
+ /// Extended dereferencing mechanism is has the following format:
+ /// DW_OP_constu <DWARF Address Space> DW_OP_swap DW_OP_xderef
+ void AppendAddressSpaceXDeref(unsigned AddressSpace,
+ SmallVectorImpl<int64_t> &Expr) const;
+
public:
CGDebugInfo(CodeGenModule &CGM);
~CGDebugInfo();
@@ -320,6 +330,17 @@ public:
/// ignored.
void setLocation(SourceLocation Loc);
+ /// Return the current source location. This does not necessarily correspond
+ /// to the IRBuilder's current DebugLoc.
+ SourceLocation getLocation() const { return CurLoc; }
+
+ /// Update the current inline scope. All subsequent calls to \p EmitLocation
+ /// will create a location with this inlinedAt field.
+ void setInlinedAt(llvm::MDNode *InlinedAt) { CurInlinedAt = InlinedAt; }
+
+ /// \return the current inline scope.
+ llvm::MDNode *getInlinedAt() const { return CurInlinedAt; }
+
// Converts a SourceLocation to a DebugLoc
llvm::DebugLoc SourceLocToDebugLoc(SourceLocation Loc);
@@ -336,6 +357,11 @@ public:
SourceLocation ScopeLoc, QualType FnType,
llvm::Function *Fn, CGBuilderTy &Builder);
+ /// Start a new scope for an inlined function.
+ void EmitInlineFunctionStart(CGBuilderTy &Builder, GlobalDecl GD);
+ /// End an inlined function scope.
+ void EmitInlineFunctionEnd(CGBuilderTy &Builder);
+
/// Emit debug info for a function declaration.
void EmitFunctionDecl(GlobalDecl GD, SourceLocation Loc, QualType FnType);
@@ -409,9 +435,21 @@ public:
void completeType(const RecordDecl *RD);
void completeRequiredType(const RecordDecl *RD);
void completeClassData(const RecordDecl *RD);
+ void completeClass(const RecordDecl *RD);
void completeTemplateDefinition(const ClassTemplateSpecializationDecl &SD);
-
+ void completeUnusedClass(const CXXRecordDecl &D);
+
+ /// Create debug info for a macro defined by a #define directive or a macro
+ /// undefined by a #undef directive.
+ llvm::DIMacro *CreateMacro(llvm::DIMacroFile *Parent, unsigned MType,
+ SourceLocation LineLoc, StringRef Name,
+ StringRef Value);
+
+ /// Create debug info for a file referenced by an #include directive.
+ llvm::DIMacroFile *CreateTempMacroFile(llvm::DIMacroFile *Parent,
+ SourceLocation LineLoc,
+ SourceLocation FileLoc);
private:
/// Emit call to llvm.dbg.declare for a variable declaration.
void EmitDeclare(const VarDecl *decl, llvm::Value *AI,
@@ -491,11 +529,18 @@ private:
llvm::DIDerivedType *
getOrCreateStaticDataMemberDeclarationOrNull(const VarDecl *D);
+ /// Helper that either creates a forward declaration or a stub.
+ llvm::DISubprogram *getFunctionFwdDeclOrStub(GlobalDecl GD, bool Stub);
+
/// Create a subprogram describing the forward declaration
- /// represented in the given FunctionDecl.
- llvm::DISubprogram *getFunctionForwardDeclaration(const FunctionDecl *FD);
+ /// represented in the given FunctionDecl wrapped in a GlobalDecl.
+ llvm::DISubprogram *getFunctionForwardDeclaration(GlobalDecl GD);
+
+ /// Create a DISubprogram describing the function
+ /// represented in the given FunctionDecl wrapped in a GlobalDecl.
+ llvm::DISubprogram *getFunctionStub(GlobalDecl GD);
- /// Create a global variable describing the forward decalration
+ /// Create a global variable describing the forward declaration
/// represented in the given VarDecl.
llvm::DIGlobalVariable *
getGlobalVariableForwardDeclaration(const VarDecl *VD);
@@ -622,6 +667,20 @@ public:
};
+/// A scoped helper to set the current debug location to an inlined location.
+class ApplyInlineDebugLocation {
+ SourceLocation SavedLocation;
+ CodeGenFunction *CGF;
+
+public:
+ /// Set up the CodeGenFunction's DebugInfo to produce inline locations for the
+ /// function \p InlinedFn. The current debug location becomes the inlined call
+ /// site of the inlined function.
+ ApplyInlineDebugLocation(CodeGenFunction &CGF, GlobalDecl InlinedFn);
+ /// Restore everything back to the orginial state.
+ ~ApplyInlineDebugLocation();
+};
+
} // namespace CodeGen
} // namespace clang
diff --git a/lib/CodeGen/CGDecl.cpp b/lib/CodeGen/CGDecl.cpp
index 0a88b2310beb..0f959043a22e 100644
--- a/lib/CodeGen/CGDecl.cpp
+++ b/lib/CodeGen/CGDecl.cpp
@@ -50,6 +50,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::TemplateTypeParm:
case Decl::UnresolvedUsingValue:
case Decl::NonTypeTemplateParm:
+ case Decl::CXXDeductionGuide:
case Decl::CXXMethod:
case Decl::CXXConstructor:
case Decl::CXXDestructor:
@@ -671,6 +672,27 @@ static void drillIntoBlockVariable(CodeGenFunction &CGF,
lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
+void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
+ SourceLocation Loc) {
+ if (!SanOpts.has(SanitizerKind::NullabilityAssign))
+ return;
+
+ auto Nullability = LHS.getType()->getNullability(getContext());
+ if (!Nullability || *Nullability != NullabilityKind::NonNull)
+ return;
+
+ // Check if the right hand side of the assignment is nonnull, if the left
+ // hand side must be nonnull.
+ SanitizerScope SanScope(this);
+ llvm::Value *IsNotNull = Builder.CreateIsNotNull(RHS);
+ llvm::Constant *StaticData[] = {
+ EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(LHS.getType()),
+ llvm::ConstantInt::get(Int8Ty, 0), // The LogAlignment info is unused.
+ llvm::ConstantInt::get(Int8Ty, TCK_NonnullAssign)};
+ EmitCheck({{IsNotNull, SanitizerKind::NullabilityAssign}},
+ SanitizerHandler::TypeMismatch, StaticData, RHS);
+}
+
void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
LValue lvalue, bool capturedByInit) {
Qualifiers::ObjCLifetime lifetime = lvalue.getObjCLifetime();
@@ -678,6 +700,7 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
llvm::Value *value = EmitScalarExpr(init);
if (capturedByInit)
drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitNullabilityCheck(lvalue, value, init->getExprLoc());
EmitStoreThroughLValue(RValue::get(value), lvalue, true);
return;
}
@@ -766,6 +789,8 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
+ EmitNullabilityCheck(lvalue, value, init->getExprLoc());
+
// If the variable might have been accessed by its initializer, we
// might have to initialize with a barrier. We have to do this for
// both __weak and __strong, but __weak got filtered out above.
@@ -1022,11 +1047,21 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
// Emit a lifetime intrinsic if meaningful. There's no point in doing this
// if we don't have a valid insertion point (?).
if (HaveInsertPoint() && !IsMSCatchParam) {
- // goto or switch-case statements can break lifetime into several
- // regions which need more efforts to handle them correctly. PR28267
- // This is rare case, but it's better just omit intrinsics than have
- // them incorrectly placed.
- if (!Bypasses.IsBypassed(&D)) {
+ // If there's a jump into the lifetime of this variable, its lifetime
+ // gets broken up into several regions in IR, which requires more work
+ // to handle correctly. For now, just omit the intrinsics; this is a
+ // rare case, and it's better to just be conservatively correct.
+ // PR28267.
+ //
+ // We have to do this in all language modes if there's a jump past the
+ // declaration. We also have to do it in C if there's a jump to an
+ // earlier point in the current block because non-VLA lifetimes begin as
+ // soon as the containing block is entered, not when its variables
+ // actually come into scope; suppressing the lifetime annotations
+ // completely in this case is unnecessarily pessimistic, but again, this
+ // is rare.
+ if (!Bypasses.IsBypassed(&D) &&
+ !(!getLangOpts().CPlusPlus && hasLabelBeenSeenInCurrentScope())) {
uint64_t size = CGM.getDataLayout().getTypeAllocSize(allocaTy);
emission.SizeForLifetimeMarkers =
EmitLifetimeStart(size, address.getPointer());
@@ -1083,6 +1118,12 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, address.getPointer());
+ // Make sure we call @llvm.lifetime.end.
+ if (emission.useLifetimeMarkers())
+ EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
+ emission.getAllocatedAddress(),
+ emission.getSizeForLifetimeMarkers());
+
return emission;
}
@@ -1373,13 +1414,6 @@ void CodeGenFunction::EmitAutoVarCleanups(const AutoVarEmission &emission) {
const VarDecl &D = *emission.Variable;
- // Make sure we call @llvm.lifetime.end. This needs to happen
- // *last*, so the cleanup needs to be pushed *first*.
- if (emission.useLifetimeMarkers())
- EHStack.pushCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker,
- emission.getAllocatedAddress(),
- emission.getSizeForLifetimeMarkers());
-
// Check the type for a cleanup.
if (QualType::DestructionKind dtorKind = D.getType().isDestructedType())
emitAutoVarTypeCleanup(emission, dtorKind);
@@ -1691,17 +1725,19 @@ void CodeGenFunction::pushRegularPartialArrayCleanup(llvm::Value *arrayBegin,
/// Lazily declare the @llvm.lifetime.start intrinsic.
llvm::Constant *CodeGenModule::getLLVMLifetimeStartFn() {
- if (LifetimeStartFn) return LifetimeStartFn;
+ if (LifetimeStartFn)
+ return LifetimeStartFn;
LifetimeStartFn = llvm::Intrinsic::getDeclaration(&getModule(),
- llvm::Intrinsic::lifetime_start);
+ llvm::Intrinsic::lifetime_start, Int8PtrTy);
return LifetimeStartFn;
}
/// Lazily declare the @llvm.lifetime.end intrinsic.
llvm::Constant *CodeGenModule::getLLVMLifetimeEndFn() {
- if (LifetimeEndFn) return LifetimeEndFn;
+ if (LifetimeEndFn)
+ return LifetimeEndFn;
LifetimeEndFn = llvm::Intrinsic::getDeclaration(&getModule(),
- llvm::Intrinsic::lifetime_end);
+ llvm::Intrinsic::lifetime_end, Int8PtrTy);
return LifetimeEndFn;
}
@@ -1869,6 +1905,19 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
if (D.hasAttr<AnnotateAttr>())
EmitVarAnnotations(&D, DeclPtr.getPointer());
+
+ // We can only check return value nullability if all arguments to the
+ // function satisfy their nullability preconditions. This makes it necessary
+ // to emit null checks for args in the function body itself.
+ if (requiresReturnValueNullabilityCheck()) {
+ auto Nullability = Ty->getNullability(getContext());
+ if (Nullability && *Nullability == NullabilityKind::NonNull) {
+ SanitizerScope SanScope(this);
+ RetValNullabilityPrecondition =
+ Builder.CreateAnd(RetValNullabilityPrecondition,
+ Builder.CreateIsNotNull(Arg.getAnyValue()));
+ }
+ }
}
void CodeGenModule::EmitOMPDeclareReduction(const OMPDeclareReductionDecl *D,
diff --git a/lib/CodeGen/CGDeclCXX.cpp b/lib/CodeGen/CGDeclCXX.cpp
index f56e18216931..f61d60a63a6a 100644
--- a/lib/CodeGen/CGDeclCXX.cpp
+++ b/lib/CodeGen/CGDeclCXX.cpp
@@ -237,7 +237,7 @@ void CodeGenFunction::registerGlobalDtorWithAtExit(const VarDecl &VD,
llvm::FunctionType::get(IntTy, dtorStub->getType(), false);
llvm::Constant *atexit =
- CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeSet(),
+ CGM.CreateRuntimeFunction(atexitTy, "atexit", llvm::AttributeList(),
/*Local=*/true);
if (llvm::Function *atexitFn = dyn_cast<llvm::Function>(atexit))
atexitFn->setDoesNotThrow();
diff --git a/lib/CodeGen/CGException.cpp b/lib/CodeGen/CGException.cpp
index f908bf2b3b0a..ca1535182ec1 100644
--- a/lib/CodeGen/CGException.cpp
+++ b/lib/CodeGen/CGException.cpp
@@ -180,8 +180,8 @@ static const EHPersonality &getObjCXXPersonality(const llvm::Triple &T,
// The GCC runtime's personality function inherently doesn't support
// mixed EH. Use the C++ personality just to avoid returning null.
case ObjCRuntime::GCC:
- case ObjCRuntime::ObjFW: // XXX: this will change soon
- return EHPersonality::GNU_ObjC;
+ case ObjCRuntime::ObjFW:
+ return getObjCPersonality(T, L);
case ObjCRuntime::GNUstep:
return EHPersonality::GNU_ObjCXX;
}
@@ -231,7 +231,7 @@ static llvm::Constant *getPersonalityFn(CodeGenModule &CGM,
const EHPersonality &Personality) {
return CGM.CreateRuntimeFunction(llvm::FunctionType::get(CGM.Int32Ty, true),
Personality.PersonalityFn,
- llvm::AttributeSet(), /*Local=*/true);
+ llvm::AttributeList(), /*Local=*/true);
}
static llvm::Constant *getOpaquePersonalityFn(CodeGenModule &CGM,
@@ -1666,23 +1666,12 @@ void CodeGenFunction::startOutlinedSEHHelper(CodeGenFunction &ParentCGF,
QualType RetTy = IsFilter ? getContext().LongTy : getContext().VoidTy;
- llvm::Function *ParentFn = ParentCGF.CurFn;
const CGFunctionInfo &FnInfo =
CGM.getTypes().arrangeBuiltinFunctionDeclaration(RetTy, Args);
llvm::FunctionType *FnTy = CGM.getTypes().GetFunctionType(FnInfo);
llvm::Function *Fn = llvm::Function::Create(
FnTy, llvm::GlobalValue::InternalLinkage, Name.str(), &CGM.getModule());
- // The filter is either in the same comdat as the function, or it's internal.
- if (llvm::Comdat *C = ParentFn->getComdat()) {
- Fn->setComdat(C);
- } else if (ParentFn->hasWeakLinkage() || ParentFn->hasLinkOnceLinkage()) {
- llvm::Comdat *C = CGM.getModule().getOrInsertComdat(ParentFn->getName());
- ParentFn->setComdat(C);
- Fn->setComdat(C);
- } else {
- Fn->setLinkage(llvm::GlobalValue::InternalLinkage);
- }
IsOutlinedSEHHelper = true;
diff --git a/lib/CodeGen/CGExpr.cpp b/lib/CodeGen/CGExpr.cpp
index e5e34a5f3ed6..265ef27a46b0 100644
--- a/lib/CodeGen/CGExpr.cpp
+++ b/lib/CodeGen/CGExpr.cpp
@@ -71,7 +71,8 @@ Address CodeGenFunction::CreateTempAlloca(llvm::Type *Ty, CharUnits Align,
/// block.
llvm::AllocaInst *CodeGenFunction::CreateTempAlloca(llvm::Type *Ty,
const Twine &Name) {
- return new llvm::AllocaInst(Ty, nullptr, Name, AllocaInsertPt);
+ return new llvm::AllocaInst(Ty, CGM.getDataLayout().getAllocaAddrSpace(),
+ nullptr, Name, AllocaInsertPt);
}
/// CreateDefaultAlignTempAlloca - This creates an alloca with the
@@ -534,7 +535,8 @@ bool CodeGenFunction::sanitizePerformTypeCheck() const {
void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Value *Ptr, QualType Ty,
- CharUnits Alignment, bool SkipNullCheck) {
+ CharUnits Alignment,
+ SanitizerSet SkippedChecks) {
if (!sanitizePerformTypeCheck())
return;
@@ -552,7 +554,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
bool AllowNullPointers = TCK == TCK_DowncastPointer || TCK == TCK_Upcast ||
TCK == TCK_UpcastToVirtualBase;
if ((SanOpts.has(SanitizerKind::Null) || AllowNullPointers) &&
- !SkipNullCheck) {
+ !SkippedChecks.has(SanitizerKind::Null)) {
// The glvalue must not be an empty glvalue.
llvm::Value *IsNonNull = Builder.CreateIsNotNull(Ptr);
@@ -568,7 +570,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
}
}
- if (SanOpts.has(SanitizerKind::ObjectSize) && !Ty->isIncompleteType()) {
+ if (SanOpts.has(SanitizerKind::ObjectSize) &&
+ !SkippedChecks.has(SanitizerKind::ObjectSize) &&
+ !Ty->isIncompleteType()) {
uint64_t Size = getContext().getTypeSizeInChars(Ty).getQuantity();
// The glvalue must refer to a large enough storage region.
@@ -578,22 +582,24 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
llvm::Type *Tys[2] = { IntPtrTy, Int8PtrTy };
llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::objectsize, Tys);
llvm::Value *Min = Builder.getFalse();
+ llvm::Value *NullIsUnknown = Builder.getFalse();
llvm::Value *CastAddr = Builder.CreateBitCast(Ptr, Int8PtrTy);
- llvm::Value *LargeEnough =
- Builder.CreateICmpUGE(Builder.CreateCall(F, {CastAddr, Min}),
- llvm::ConstantInt::get(IntPtrTy, Size));
+ llvm::Value *LargeEnough = Builder.CreateICmpUGE(
+ Builder.CreateCall(F, {CastAddr, Min, NullIsUnknown}),
+ llvm::ConstantInt::get(IntPtrTy, Size));
Checks.push_back(std::make_pair(LargeEnough, SanitizerKind::ObjectSize));
}
uint64_t AlignVal = 0;
- if (SanOpts.has(SanitizerKind::Alignment)) {
+ if (SanOpts.has(SanitizerKind::Alignment) &&
+ !SkippedChecks.has(SanitizerKind::Alignment)) {
AlignVal = Alignment.getQuantity();
if (!Ty->isIncompleteType() && !AlignVal)
AlignVal = getContext().getTypeAlignInChars(Ty).getQuantity();
// The glvalue must be suitably aligned.
- if (AlignVal) {
+ if (AlignVal > 1) {
llvm::Value *Align =
Builder.CreateAnd(Builder.CreatePtrToInt(Ptr, IntPtrTy),
llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
@@ -624,6 +630,7 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// or call a non-static member function
CXXRecordDecl *RD = Ty->getAsCXXRecordDecl();
if (SanOpts.has(SanitizerKind::Vptr) &&
+ !SkippedChecks.has(SanitizerKind::Vptr) &&
(TCK == TCK_MemberAccess || TCK == TCK_MemberCall ||
TCK == TCK_DowncastPointer || TCK == TCK_DowncastReference ||
TCK == TCK_UpcastToVirtualBase) &&
@@ -947,15 +954,47 @@ LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
E->getType());
}
+bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
+ const Expr *Base = Obj;
+ while (!isa<CXXThisExpr>(Base)) {
+ // The result of a dynamic_cast can be null.
+ if (isa<CXXDynamicCastExpr>(Base))
+ return false;
+
+ if (const auto *CE = dyn_cast<CastExpr>(Base)) {
+ Base = CE->getSubExpr();
+ } else if (const auto *PE = dyn_cast<ParenExpr>(Base)) {
+ Base = PE->getSubExpr();
+ } else if (const auto *UO = dyn_cast<UnaryOperator>(Base)) {
+ if (UO->getOpcode() == UO_Extension)
+ Base = UO->getSubExpr();
+ else
+ return false;
+ } else {
+ return false;
+ }
+ }
+ return true;
+}
+
LValue CodeGenFunction::EmitCheckedLValue(const Expr *E, TypeCheckKind TCK) {
LValue LV;
if (SanOpts.has(SanitizerKind::ArrayBounds) && isa<ArraySubscriptExpr>(E))
LV = EmitArraySubscriptExpr(cast<ArraySubscriptExpr>(E), /*Accessed*/true);
else
LV = EmitLValue(E);
- if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple())
+ if (!isa<DeclRefExpr>(E) && !LV.isBitField() && LV.isSimple()) {
+ SanitizerSet SkippedChecks;
+ if (const auto *ME = dyn_cast<MemberExpr>(E)) {
+ bool IsBaseCXXThis = IsWrappedCXXThis(ME->getBase());
+ if (IsBaseCXXThis)
+ SkippedChecks.set(SanitizerKind::Alignment, true);
+ if (IsBaseCXXThis || isa<DeclRefExpr>(ME->getBase()))
+ SkippedChecks.set(SanitizerKind::Null, true);
+ }
EmitTypeCheck(TCK, E->getExprLoc(), LV.getPointer(),
- E->getType(), LV.getAlignment());
+ E->getType(), LV.getAlignment(), SkippedChecks);
+ }
return LV;
}
@@ -1033,7 +1072,19 @@ LValue CodeGenFunction::EmitLValue(const Expr *E) {
const auto *cleanups = cast<ExprWithCleanups>(E);
enterFullExpression(cleanups);
RunCleanupsScope Scope(*this);
- return EmitLValue(cleanups->getSubExpr());
+ LValue LV = EmitLValue(cleanups->getSubExpr());
+ if (LV.isSimple()) {
+ // Defend against branches out of gnu statement expressions surrounded by
+ // cleanups.
+ llvm::Value *V = LV.getPointer();
+ Scope.ForceCleanup({&V});
+ return LValue::MakeAddr(Address(V, LV.getAlignment()), LV.getType(),
+ getContext(), LV.getAlignmentSource(),
+ LV.getTBAAInfo());
+ }
+ // FIXME: Is it possible to create an ExprWithCleanups that produces a
+ // bitfield lvalue or some other non-simple lvalue?
+ return LV;
}
case Expr::CXXDefaultArgExprClass:
@@ -1265,6 +1316,53 @@ llvm::MDNode *CodeGenFunction::getRangeForLoadFromType(QualType Ty) {
return MDHelper.createRange(Min, End);
}
+bool CodeGenFunction::EmitScalarRangeCheck(llvm::Value *Value, QualType Ty,
+ SourceLocation Loc) {
+ bool HasBoolCheck = SanOpts.has(SanitizerKind::Bool);
+ bool HasEnumCheck = SanOpts.has(SanitizerKind::Enum);
+ if (!HasBoolCheck && !HasEnumCheck)
+ return false;
+
+ bool IsBool = hasBooleanRepresentation(Ty) ||
+ NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
+ bool NeedsBoolCheck = HasBoolCheck && IsBool;
+ bool NeedsEnumCheck = HasEnumCheck && Ty->getAs<EnumType>();
+ if (!NeedsBoolCheck && !NeedsEnumCheck)
+ return false;
+
+ // Single-bit booleans don't need to be checked. Special-case this to avoid
+ // a bit width mismatch when handling bitfield values. This is handled by
+ // EmitFromMemory for the non-bitfield case.
+ if (IsBool &&
+ cast<llvm::IntegerType>(Value->getType())->getBitWidth() == 1)
+ return false;
+
+ llvm::APInt Min, End;
+ if (!getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool))
+ return true;
+
+ SanitizerScope SanScope(this);
+ llvm::Value *Check;
+ --End;
+ if (!Min) {
+ Check = Builder.CreateICmpULE(
+ Value, llvm::ConstantInt::get(getLLVMContext(), End));
+ } else {
+ llvm::Value *Upper = Builder.CreateICmpSLE(
+ Value, llvm::ConstantInt::get(getLLVMContext(), End));
+ llvm::Value *Lower = Builder.CreateICmpSGE(
+ Value, llvm::ConstantInt::get(getLLVMContext(), Min));
+ Check = Builder.CreateAnd(Upper, Lower);
+ }
+ llvm::Constant *StaticArgs[] = {EmitCheckSourceLocation(Loc),
+ EmitCheckTypeDescriptor(Ty)};
+ SanitizerMask Kind =
+ NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
+ EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
+ StaticArgs, EmitCheckValue(Value));
+ return true;
+}
+
llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
QualType Ty,
SourceLocation Loc,
@@ -1273,26 +1371,28 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
QualType TBAABaseType,
uint64_t TBAAOffset,
bool isNontemporal) {
- // For better performance, handle vector loads differently.
- if (Ty->isVectorType()) {
- const llvm::Type *EltTy = Addr.getElementType();
-
- const auto *VTy = cast<llvm::VectorType>(EltTy);
-
- // Handle vectors of size 3 like size 4 for better performance.
- if (VTy->getNumElements() == 3) {
-
- // Bitcast to vec4 type.
- llvm::VectorType *vec4Ty = llvm::VectorType::get(VTy->getElementType(),
- 4);
- Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
- // Now load value.
- llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
-
- // Shuffle vector to get vec3.
- V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
- {0, 1, 2}, "extractVec");
- return EmitFromMemory(V, Ty);
+ if (!CGM.getCodeGenOpts().PreserveVec3Type) {
+ // For better performance, handle vector loads differently.
+ if (Ty->isVectorType()) {
+ const llvm::Type *EltTy = Addr.getElementType();
+
+ const auto *VTy = cast<llvm::VectorType>(EltTy);
+
+ // Handle vectors of size 3 like size 4 for better performance.
+ if (VTy->getNumElements() == 3) {
+
+ // Bitcast to vec4 type.
+ llvm::VectorType *vec4Ty =
+ llvm::VectorType::get(VTy->getElementType(), 4);
+ Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
+ // Now load value.
+ llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+
+ // Shuffle vector to get vec3.
+ V = Builder.CreateShuffleVector(V, llvm::UndefValue::get(vec4Ty),
+ {0, 1, 2}, "extractVec");
+ return EmitFromMemory(V, Ty);
+ }
}
}
@@ -1317,35 +1417,9 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
false /*ConvertTypeToTag*/);
}
- bool IsBool = hasBooleanRepresentation(Ty) ||
- NSAPI(CGM.getContext()).isObjCBOOLType(Ty);
- bool NeedsBoolCheck = SanOpts.has(SanitizerKind::Bool) && IsBool;
- bool NeedsEnumCheck =
- SanOpts.has(SanitizerKind::Enum) && Ty->getAs<EnumType>();
- if (NeedsBoolCheck || NeedsEnumCheck) {
- SanitizerScope SanScope(this);
- llvm::APInt Min, End;
- if (getRangeForType(*this, Ty, Min, End, /*StrictEnums=*/true, IsBool)) {
- --End;
- llvm::Value *Check;
- if (!Min)
- Check = Builder.CreateICmpULE(
- Load, llvm::ConstantInt::get(getLLVMContext(), End));
- else {
- llvm::Value *Upper = Builder.CreateICmpSLE(
- Load, llvm::ConstantInt::get(getLLVMContext(), End));
- llvm::Value *Lower = Builder.CreateICmpSGE(
- Load, llvm::ConstantInt::get(getLLVMContext(), Min));
- Check = Builder.CreateAnd(Upper, Lower);
- }
- llvm::Constant *StaticArgs[] = {
- EmitCheckSourceLocation(Loc),
- EmitCheckTypeDescriptor(Ty)
- };
- SanitizerMask Kind = NeedsEnumCheck ? SanitizerKind::Enum : SanitizerKind::Bool;
- EmitCheck(std::make_pair(Check, Kind), SanitizerHandler::LoadInvalidValue,
- StaticArgs, EmitCheckValue(Load));
- }
+ if (EmitScalarRangeCheck(Load, Ty, Loc)) {
+ // In order to prevent the optimizer from throwing away the check, don't
+ // attach range metadata to the load.
} else if (CGM.getCodeGenOpts().OptimizationLevel > 0)
if (llvm::MDNode *RangeInfo = getRangeForLoadFromType(Ty))
Load->setMetadata(llvm::LLVMContext::MD_range, RangeInfo);
@@ -1386,24 +1460,25 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
uint64_t TBAAOffset,
bool isNontemporal) {
- // Handle vectors differently to get better performance.
- if (Ty->isVectorType()) {
- llvm::Type *SrcTy = Value->getType();
- auto *VecTy = cast<llvm::VectorType>(SrcTy);
- // Handle vec3 special.
- if (VecTy->getNumElements() == 3) {
- // Our source is a vec3, do a shuffle vector to make it a vec4.
- llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
- Builder.getInt32(2),
- llvm::UndefValue::get(Builder.getInt32Ty())};
- llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
- Value = Builder.CreateShuffleVector(Value,
- llvm::UndefValue::get(VecTy),
- MaskV, "extractVec");
- SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
- }
- if (Addr.getElementType() != SrcTy) {
- Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
+ if (!CGM.getCodeGenOpts().PreserveVec3Type) {
+ // Handle vectors differently to get better performance.
+ if (Ty->isVectorType()) {
+ llvm::Type *SrcTy = Value->getType();
+ auto *VecTy = cast<llvm::VectorType>(SrcTy);
+ // Handle vec3 special.
+ if (VecTy->getNumElements() == 3) {
+ // Our source is a vec3, do a shuffle vector to make it a vec4.
+ llvm::Constant *Mask[] = {Builder.getInt32(0), Builder.getInt32(1),
+ Builder.getInt32(2),
+ llvm::UndefValue::get(Builder.getInt32Ty())};
+ llvm::Value *MaskV = llvm::ConstantVector::get(Mask);
+ Value = Builder.CreateShuffleVector(Value, llvm::UndefValue::get(VecTy),
+ MaskV, "extractVec");
+ SrcTy = llvm::VectorType::get(VecTy->getElementType(), 4);
+ }
+ if (Addr.getElementType() != SrcTy) {
+ Addr = Builder.CreateElementBitCast(Addr, SrcTy, "storetmp");
+ }
}
}
@@ -1487,10 +1562,11 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
return EmitLoadOfGlobalRegLValue(LV);
assert(LV.isBitField() && "Unknown LValue type!");
- return EmitLoadOfBitfieldLValue(LV);
+ return EmitLoadOfBitfieldLValue(LV, Loc);
}
-RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
+RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV,
+ SourceLocation Loc) {
const CGBitFieldInfo &Info = LV.getBitFieldInfo();
// Get the output type.
@@ -1515,7 +1591,7 @@ RValue CodeGenFunction::EmitLoadOfBitfieldLValue(LValue LV) {
"bf.clear");
}
Val = Builder.CreateIntCast(Val, ResLTy, Info.IsSigned, "bf.cast");
-
+ EmitScalarRangeCheck(Val, LV.getType(), Loc);
return RValue::get(Val);
}
@@ -2545,8 +2621,8 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
llvm::Value *Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
- llvm::AttributeSet::get(CGF.getLLVMContext(),
- llvm::AttributeSet::FunctionIndex, B),
+ llvm::AttributeList::get(CGF.getLLVMContext(),
+ llvm::AttributeList::FunctionIndex, B),
/*Local=*/true);
llvm::CallInst *HandlerCall = CGF.EmitNounwindRuntimeCall(Fn, FnArgs);
if (!MayReturn) {
@@ -2709,6 +2785,24 @@ void CodeGenFunction::EmitCfiSlowPathCheck(
EmitBlock(Cont);
}
+// Emit a stub for __cfi_check function so that the linker knows about this
+// symbol in LTO mode.
+void CodeGenFunction::EmitCfiCheckStub() {
+ llvm::Module *M = &CGM.getModule();
+ auto &Ctx = M->getContext();
+ llvm::Function *F = llvm::Function::Create(
+ llvm::FunctionType::get(VoidTy, {Int64Ty, Int8PtrTy, Int8PtrTy}, false),
+ llvm::GlobalValue::WeakAnyLinkage, "__cfi_check", M);
+ llvm::BasicBlock *BB = llvm::BasicBlock::Create(Ctx, "entry", F);
+ // FIXME: consider emitting an intrinsic call like
+ // call void @llvm.cfi_check(i64 %0, i8* %1, i8* %2)
+ // which can be lowered in CrossDSOCFI pass to the actual contents of
+ // __cfi_check. This would allow inlining of __cfi_check calls.
+ llvm::CallInst::Create(
+ llvm::Intrinsic::getDeclaration(M, llvm::Intrinsic::trap), "", BB);
+ llvm::ReturnInst::Create(Ctx, nullptr, BB);
+}
+
// This function is basically a switch over the CFI failure kind, which is
// extracted from CFICheckFailData (1st function argument). Each case is either
// llvm.trap or a call to one of the two runtime handlers, based on
@@ -2821,7 +2915,7 @@ llvm::CallInst *CodeGenFunction::EmitTrapCall(llvm::Intrinsic::ID IntrID) {
if (!CGM.getCodeGenOpts().TrapFuncName.empty()) {
auto A = llvm::Attribute::get(getLLVMContext(), "trap-func-name",
CGM.getCodeGenOpts().TrapFuncName);
- TrapCall->addAttribute(llvm::AttributeSet::FunctionIndex, A);
+ TrapCall->addAttribute(llvm::AttributeList::FunctionIndex, A);
}
return TrapCall;
@@ -3335,7 +3429,14 @@ LValue CodeGenFunction::EmitMemberExpr(const MemberExpr *E) {
AlignmentSource AlignSource;
Address Addr = EmitPointerWithAlignment(BaseExpr, &AlignSource);
QualType PtrTy = BaseExpr->getType()->getPointeeType();
- EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy);
+ SanitizerSet SkippedChecks;
+ bool IsBaseCXXThis = IsWrappedCXXThis(BaseExpr);
+ if (IsBaseCXXThis)
+ SkippedChecks.set(SanitizerKind::Alignment, true);
+ if (IsBaseCXXThis || isa<DeclRefExpr>(BaseExpr))
+ SkippedChecks.set(SanitizerKind::Null, true);
+ EmitTypeCheck(TCK_MemberAccess, E->getExprLoc(), Addr.getPointer(), PtrTy,
+ /*Alignment=*/CharUnits::Zero(), SkippedChecks);
BaseLV = MakeAddrLValue(Addr, PtrTy, AlignSource);
} else
BaseLV = EmitCheckedLValue(BaseExpr, TCK_MemberAccess);
diff --git a/lib/CodeGen/CGExprAgg.cpp b/lib/CodeGen/CGExprAgg.cpp
index 009244784e50..49bbb4808eaa 100644
--- a/lib/CodeGen/CGExprAgg.cpp
+++ b/lib/CodeGen/CGExprAgg.cpp
@@ -111,6 +111,13 @@ public:
void VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
Visit(GE->getResultExpr());
}
+ void VisitCoawaitExpr(CoawaitExpr *E) {
+ CGF.EmitCoawaitExpr(*E, Dest, IsResultUnused);
+ }
+ void VisitCoyieldExpr(CoyieldExpr *E) {
+ CGF.EmitCoyieldExpr(*E, Dest, IsResultUnused);
+ }
+ void VisitUnaryCoawait(UnaryOperator *E) { Visit(E->getSubExpr()); }
void VisitUnaryExtension(UnaryOperator *E) { Visit(E->getSubExpr()); }
void VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *E) {
return Visit(E->getReplacement());
@@ -1267,7 +1274,7 @@ void AggExprEmitter::VisitInitListExpr(InitListExpr *E) {
// Store the initializer into the field.
EmitInitializationToLValue(E->getInit(curInitIndex++), LV);
} else {
- // We're out of initalizers; default-initialize to null
+ // We're out of initializers; default-initialize to null
EmitNullInitializationToLValue(LV);
}
diff --git a/lib/CodeGen/CGExprCXX.cpp b/lib/CodeGen/CGExprCXX.cpp
index 71c8fb8b7ae3..d02f074dd605 100644
--- a/lib/CodeGen/CGExprCXX.cpp
+++ b/lib/CodeGen/CGExprCXX.cpp
@@ -24,7 +24,15 @@
using namespace clang;
using namespace CodeGen;
-static RequiredArgs
+namespace {
+struct MemberCallInfo {
+ RequiredArgs ReqArgs;
+ // Number of prefix arguments for the call. Ignores the `this` pointer.
+ unsigned PrefixSize;
+};
+}
+
+static MemberCallInfo
commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
llvm::Value *This, llvm::Value *ImplicitParam,
QualType ImplicitParamTy, const CallExpr *CE,
@@ -48,6 +56,7 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
RequiredArgs required = RequiredArgs::forPrototypePlus(FPT, Args.size(), MD);
+ unsigned PrefixSize = Args.size() - 1;
// And the rest of the call args.
if (RtlArgs) {
@@ -65,7 +74,7 @@ commonEmitCXXMemberOrOperatorCall(CodeGenFunction &CGF, const CXXMethodDecl *MD,
FPT->getNumParams() == 0 &&
"No CallExpr specified for function with non-zero number of arguments");
}
- return required;
+ return {required, PrefixSize};
}
RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
@@ -75,9 +84,10 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorCall(
const CallExpr *CE, CallArgList *RtlArgs) {
const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
CallArgList Args;
- RequiredArgs required = commonEmitCXXMemberOrOperatorCall(
+ MemberCallInfo CallInfo = commonEmitCXXMemberOrOperatorCall(
*this, MD, This, ImplicitParam, ImplicitParamTy, CE, Args, RtlArgs);
- auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required);
+ auto &FnInfo = CGM.getTypes().arrangeCXXMethodCall(
+ Args, FPT, CallInfo.ReqArgs, CallInfo.PrefixSize);
return EmitCall(FnInfo, Callee, ReturnValue, Args);
}
@@ -290,10 +300,20 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (CE)
CallLoc = CE->getExprLoc();
- EmitTypeCheck(isa<CXXConstructorDecl>(CalleeDecl)
- ? CodeGenFunction::TCK_ConstructorCall
- : CodeGenFunction::TCK_MemberCall,
- CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()));
+ SanitizerSet SkippedChecks;
+ if (const auto *CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
+ auto *IOA = CMCE->getImplicitObjectArgument();
+ bool IsImplicitObjectCXXThis = IsWrappedCXXThis(IOA);
+ if (IsImplicitObjectCXXThis)
+ SkippedChecks.set(SanitizerKind::Alignment, true);
+ if (IsImplicitObjectCXXThis || isa<DeclRefExpr>(IOA))
+ SkippedChecks.set(SanitizerKind::Null, true);
+ }
+ EmitTypeCheck(
+ isa<CXXConstructorDecl>(CalleeDecl) ? CodeGenFunction::TCK_ConstructorCall
+ : CodeGenFunction::TCK_MemberCall,
+ CallLoc, This.getPointer(), C.getRecordType(CalleeDecl->getParent()),
+ /*Alignment=*/CharUnits::Zero(), SkippedChecks);
// FIXME: Uses of 'MD' past this point need to be audited. We may need to use
// 'CalleeDecl' instead.
@@ -420,7 +440,8 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
// And the rest of the call args
EmitCallArgs(Args, FPT, E->arguments());
- return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required),
+ return EmitCall(CGM.getTypes().arrangeCXXMethodCall(Args, FPT, required,
+ /*PrefixSize=*/0),
Callee, ReturnValue, Args);
}
@@ -659,7 +680,10 @@ static llvm::Value *EmitCXXNewAllocSize(CodeGenFunction &CGF,
// Emit the array size expression.
// We multiply the size of all dimensions for NumElements.
// e.g for 'int[2][3]', ElemType is 'int' and NumElements is 6.
- numElements = CGF.EmitScalarExpr(e->getArraySize());
+ numElements = CGF.CGM.EmitConstantExpr(e->getArraySize(),
+ CGF.getContext().getSizeType(), &CGF);
+ if (!numElements)
+ numElements = CGF.EmitScalarExpr(e->getArraySize());
assert(isa<llvm::IntegerType>(numElements->getType()));
// The number of elements can be have an arbitrary integer type;
@@ -1256,10 +1280,10 @@ static RValue EmitNewDeleteCall(CodeGenFunction &CGF,
Fn && Fn->hasFnAttribute(llvm::Attribute::NoBuiltin)) {
// FIXME: Add addAttribute to CallSite.
if (llvm::CallInst *CI = dyn_cast<llvm::CallInst>(CallOrInvoke))
- CI->addAttribute(llvm::AttributeSet::FunctionIndex,
+ CI->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::Builtin);
else if (llvm::InvokeInst *II = dyn_cast<llvm::InvokeInst>(CallOrInvoke))
- II->addAttribute(llvm::AttributeSet::FunctionIndex,
+ II->addAttribute(llvm::AttributeList::FunctionIndex,
llvm::Attribute::Builtin);
else
llvm_unreachable("unexpected kind of call instruction");
@@ -1560,7 +1584,7 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
// FIXME: Why do we not pass a CalleeDecl here?
EmitCallArgs(allocatorArgs, allocatorType, E->placement_arguments(),
- /*CalleeDecl*/nullptr, /*ParamsToSkip*/ParamsToSkip);
+ /*AC*/AbstractCallee(), /*ParamsToSkip*/ParamsToSkip);
RValue RV =
EmitNewDeleteCall(*this, allocator, allocatorType, allocatorArgs);
diff --git a/lib/CodeGen/CGExprComplex.cpp b/lib/CodeGen/CGExprComplex.cpp
index 59bc9cdbc056..980972370dc2 100644
--- a/lib/CodeGen/CGExprComplex.cpp
+++ b/lib/CodeGen/CGExprComplex.cpp
@@ -110,6 +110,16 @@ public:
VisitSubstNonTypeTemplateParmExpr(SubstNonTypeTemplateParmExpr *PE) {
return Visit(PE->getReplacement());
}
+ ComplexPairTy VisitCoawaitExpr(CoawaitExpr *S) {
+ return CGF.EmitCoawaitExpr(*S).getComplexVal();
+ }
+ ComplexPairTy VisitCoyieldExpr(CoyieldExpr *S) {
+ return CGF.EmitCoyieldExpr(*S).getComplexVal();
+ }
+ ComplexPairTy VisitUnaryCoawait(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
+
// l-values.
ComplexPairTy VisitDeclRefExpr(DeclRefExpr *E) {
@@ -198,7 +208,11 @@ public:
ComplexPairTy VisitExprWithCleanups(ExprWithCleanups *E) {
CGF.enterFullExpression(E);
CodeGenFunction::RunCleanupsScope Scope(CGF);
- return Visit(E->getSubExpr());
+ ComplexPairTy Vals = Visit(E->getSubExpr());
+ // Defend against dominance problems caused by jumps out of expression
+ // evaluation through the shared cleanup block.
+ Scope.ForceCleanup({&Vals.first, &Vals.second});
+ return Vals;
}
ComplexPairTy VisitCXXScalarValueInitExpr(CXXScalarValueInitExpr *E) {
assert(E->getType()->isAnyComplexType() && "Expected complex type!");
diff --git a/lib/CodeGen/CGExprScalar.cpp b/lib/CodeGen/CGExprScalar.cpp
index 1b85c45cd4be..a64303831171 100644
--- a/lib/CodeGen/CGExprScalar.cpp
+++ b/lib/CodeGen/CGExprScalar.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "CodeGenFunction.h"
+#include "CGCleanup.h"
#include "CGCXXABI.h"
#include "CGDebugInfo.h"
#include "CGObjCRuntime.h"
@@ -24,6 +25,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CodeGenOptions.h"
+#include "llvm/ADT/Optional.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
@@ -47,7 +49,7 @@ struct BinOpInfo {
Value *RHS;
QualType Ty; // Computation Type.
BinaryOperator::Opcode Opcode; // Opcode of BinOp to perform
- bool FPContractable;
+ FPOptions FPFeatures;
const Expr *E; // Entire expr, for error unsupported. May not be binop.
};
@@ -58,6 +60,75 @@ static bool MustVisitNullValue(const Expr *E) {
return E->getType()->isNullPtrType();
}
+/// If \p E is a widened promoted integer, get its base (unpromoted) type.
+static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
+ const Expr *E) {
+ const Expr *Base = E->IgnoreImpCasts();
+ if (E == Base)
+ return llvm::None;
+
+ QualType BaseTy = Base->getType();
+ if (!BaseTy->isPromotableIntegerType() ||
+ Ctx.getTypeSize(BaseTy) >= Ctx.getTypeSize(E->getType()))
+ return llvm::None;
+
+ return BaseTy;
+}
+
+/// Check if \p E is a widened promoted integer.
+static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
+ return getUnwidenedIntegerType(Ctx, E).hasValue();
+}
+
+/// Check if we can skip the overflow check for \p Op.
+static bool CanElideOverflowCheck(const ASTContext &Ctx, const BinOpInfo &Op) {
+ assert((isa<UnaryOperator>(Op.E) || isa<BinaryOperator>(Op.E)) &&
+ "Expected a unary or binary operator");
+
+ if (const auto *UO = dyn_cast<UnaryOperator>(Op.E))
+ return IsWidenedIntegerOp(Ctx, UO->getSubExpr());
+
+ const auto *BO = cast<BinaryOperator>(Op.E);
+ auto OptionalLHSTy = getUnwidenedIntegerType(Ctx, BO->getLHS());
+ if (!OptionalLHSTy)
+ return false;
+
+ auto OptionalRHSTy = getUnwidenedIntegerType(Ctx, BO->getRHS());
+ if (!OptionalRHSTy)
+ return false;
+
+ QualType LHSTy = *OptionalLHSTy;
+ QualType RHSTy = *OptionalRHSTy;
+
+ // We usually don't need overflow checks for binary operations with widened
+ // operands. Multiplication with promoted unsigned operands is a special case.
+ if ((Op.Opcode != BO_Mul && Op.Opcode != BO_MulAssign) ||
+ !LHSTy->isUnsignedIntegerType() || !RHSTy->isUnsignedIntegerType())
+ return true;
+
+ // The overflow check can be skipped if either one of the unpromoted types
+ // are less than half the size of the promoted type.
+ unsigned PromotedSize = Ctx.getTypeSize(Op.E->getType());
+ return (2 * Ctx.getTypeSize(LHSTy)) < PromotedSize ||
+ (2 * Ctx.getTypeSize(RHSTy)) < PromotedSize;
+}
+
+/// Update the FastMathFlags of LLVM IR from the FPOptions in LangOptions.
+static void updateFastMathFlags(llvm::FastMathFlags &FMF,
+ FPOptions FPFeatures) {
+ FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
+}
+
+/// Propagate fast-math flags from \p Op to the instruction in \p V.
+static Value *propagateFMFlags(Value *V, const BinOpInfo &Op) {
+ if (auto *I = dyn_cast<llvm::Instruction>(V)) {
+ llvm::FastMathFlags FMF = I->getFastMathFlags();
+ updateFastMathFlags(FMF, Op.FPFeatures);
+ I->setFastMathFlags(FMF);
+ }
+ return V;
+}
+
class ScalarExprEmitter
: public StmtVisitor<ScalarExprEmitter, Value*> {
CodeGenFunction &CGF;
@@ -221,6 +292,15 @@ public:
Value *VisitGenericSelectionExpr(GenericSelectionExpr *GE) {
return Visit(GE->getResultExpr());
}
+ Value *VisitCoawaitExpr(CoawaitExpr *S) {
+ return CGF.EmitCoawaitExpr(*S).getScalarVal();
+ }
+ Value *VisitCoyieldExpr(CoyieldExpr *S) {
+ return CGF.EmitCoyieldExpr(*S).getScalarVal();
+ }
+ Value *VisitUnaryCoawait(const UnaryOperator *E) {
+ return Visit(E->getSubExpr());
+ }
// Leaves.
Value *VisitIntegerLiteral(const IntegerLiteral *E) {
@@ -300,6 +380,24 @@ public:
return V;
}
+ Value *VisitObjCAvailabilityCheckExpr(ObjCAvailabilityCheckExpr *E) {
+ VersionTuple Version = E->getVersion();
+
+ // If we're checking for a platform older than our minimum deployment
+ // target, we can fold the check away.
+ if (Version <= CGF.CGM.getTarget().getPlatformMinVersion())
+ return llvm::ConstantInt::get(Builder.getInt1Ty(), 1);
+
+ Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
+ llvm::Value *Args[] = {
+ llvm::ConstantInt::get(CGF.CGM.Int32Ty, Version.getMajor()),
+ llvm::ConstantInt::get(CGF.CGM.Int32Ty, Min ? *Min : 0),
+ llvm::ConstantInt::get(CGF.CGM.Int32Ty, SMin ? *SMin : 0),
+ };
+
+ return CGF.EmitBuiltinAvailable(Args);
+ }
+
Value *VisitArraySubscriptExpr(ArraySubscriptExpr *E);
Value *VisitShuffleVectorExpr(ShuffleVectorExpr *E);
Value *VisitConvertVectorExpr(ConvertVectorExpr *E);
@@ -405,11 +503,7 @@ public:
return CGF.LoadCXXThis();
}
- Value *VisitExprWithCleanups(ExprWithCleanups *E) {
- CGF.enterFullExpression(E);
- CodeGenFunction::RunCleanupsScope Scope(CGF);
- return Visit(E->getSubExpr());
- }
+ Value *VisitExprWithCleanups(ExprWithCleanups *E);
Value *VisitCXXNewExpr(const CXXNewExpr *E) {
return CGF.EmitCXXNewExpr(E);
}
@@ -464,16 +558,21 @@ public:
return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
// Fall through.
case LangOptions::SOB_Trapping:
+ if (CanElideOverflowCheck(CGF.getContext(), Ops))
+ return Builder.CreateNSWMul(Ops.LHS, Ops.RHS, "mul");
return EmitOverflowCheckedBinOp(Ops);
}
}
if (Ops.Ty->isUnsignedIntegerType() &&
- CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
+ !CanElideOverflowCheck(CGF.getContext(), Ops))
return EmitOverflowCheckedBinOp(Ops);
- if (Ops.LHS->getType()->isFPOrFPVectorTy())
- return Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ if (Ops.LHS->getType()->isFPOrFPVectorTy()) {
+ Value *V = Builder.CreateFMul(Ops.LHS, Ops.RHS, "mul");
+ return propagateFMFlags(V, Ops);
+ }
return Builder.CreateMul(Ops.LHS, Ops.RHS, "mul");
}
/// Create a binary op that checks for overflow.
@@ -1616,6 +1715,16 @@ Value *ScalarExprEmitter::VisitStmtExpr(const StmtExpr *E) {
E->getExprLoc());
}
+Value *ScalarExprEmitter::VisitExprWithCleanups(ExprWithCleanups *E) {
+ CGF.enterFullExpression(E);
+ CodeGenFunction::RunCleanupsScope Scope(CGF);
+ Value *V = Visit(E->getSubExpr());
+ // Defend against dominance problems caused by jumps out of expression
+ // evaluation through the shared cleanup block.
+ Scope.ForceCleanup({&V});
+ return V;
+}
+
//===----------------------------------------------------------------------===//
// Unary Operators
//===----------------------------------------------------------------------===//
@@ -1627,7 +1736,7 @@ static BinOpInfo createBinOpInfoFromIncDec(const UnaryOperator *E,
BinOp.RHS = llvm::ConstantInt::get(InVal->getType(), 1, false);
BinOp.Ty = E->getType();
BinOp.Opcode = IsInc ? BO_Add : BO_Sub;
- BinOp.FPContractable = false;
+ // FIXME: once UnaryOperator carries FPFeatures, copy it here.
BinOp.E = E;
return BinOp;
}
@@ -1645,6 +1754,8 @@ llvm::Value *ScalarExprEmitter::EmitIncDecConsiderOverflowBehavior(
return Builder.CreateNSWAdd(InVal, Amount, Name);
// Fall through.
case LangOptions::SOB_Trapping:
+ if (IsWidenedIntegerOp(CGF.getContext(), E->getSubExpr()))
+ return Builder.CreateNSWAdd(InVal, Amount, Name);
return EmitOverflowCheckedBinOp(createBinOpInfoFromIncDec(E, InVal, IsInc));
}
llvm_unreachable("Unknown SignedOverflowBehaviorTy");
@@ -1891,7 +2002,7 @@ Value *ScalarExprEmitter::VisitUnaryMinus(const UnaryOperator *E) {
BinOp.LHS = llvm::Constant::getNullValue(BinOp.RHS->getType());
BinOp.Ty = E->getType();
BinOp.Opcode = BO_Sub;
- BinOp.FPContractable = false;
+ // FIXME: once UnaryOperator carries FPFeatures, copy it here.
BinOp.E = E;
return EmitSub(BinOp);
}
@@ -2112,7 +2223,7 @@ BinOpInfo ScalarExprEmitter::EmitBinOps(const BinaryOperator *E) {
Result.RHS = Visit(E->getRHS());
Result.Ty = E->getType();
Result.Opcode = E->getOpcode();
- Result.FPContractable = E->isFPContractable();
+ Result.FPFeatures = E->getFPFeatures();
Result.E = E;
return Result;
}
@@ -2132,7 +2243,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
OpInfo.RHS = Visit(E->getRHS());
OpInfo.Ty = E->getComputationResultType();
OpInfo.Opcode = E->getOpcode();
- OpInfo.FPContractable = E->isFPContractable();
+ OpInfo.FPFeatures = E->getFPFeatures();
OpInfo.E = E;
// Load/convert the LHS.
LValue LHSLV = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
@@ -2263,8 +2374,10 @@ void ScalarExprEmitter::EmitUndefinedBehaviorIntegerDivAndRemCheck(
SanitizerKind::IntegerDivideByZero));
}
+ const auto *BO = cast<BinaryOperator>(Ops.E);
if (CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow) &&
- Ops.Ty->hasSignedIntegerRepresentation()) {
+ Ops.Ty->hasSignedIntegerRepresentation() &&
+ !IsWidenedIntegerOp(CGF.getContext(), BO->getLHS())) {
llvm::IntegerType *Ty = cast<llvm::IntegerType>(Zero->getType());
llvm::Value *IntMin =
@@ -2324,12 +2437,12 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
Value *ScalarExprEmitter::EmitRem(const BinOpInfo &Ops) {
// Rem in C can't be a floating point type: C99 6.5.5p2.
- if (CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero)) {
+ if ((CGF.SanOpts.has(SanitizerKind::IntegerDivideByZero) ||
+ CGF.SanOpts.has(SanitizerKind::SignedIntegerOverflow)) &&
+ Ops.Ty->isIntegerType()) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
llvm::Value *Zero = llvm::Constant::getNullValue(ConvertType(Ops.Ty));
-
- if (Ops.Ty->isIntegerType())
- EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
+ EmitUndefinedBehaviorIntegerDivAndRemCheck(Ops, Zero, false);
}
if (Ops.Ty->hasUnsignedIntegerRepresentation())
@@ -2577,12 +2690,7 @@ static Value* tryEmitFMulAdd(const BinOpInfo &op,
"Only fadd/fsub can be the root of an fmuladd.");
// Check whether this op is marked as fusable.
- if (!op.FPContractable)
- return nullptr;
-
- // Check whether -ffp-contract=on. (If -ffp-contract=off/fast, fusing is
- // either disabled, or handled entirely by the LLVM backend).
- if (CGF.CGM.getCodeGenOpts().getFPContractMode() != CodeGenOptions::FPC_On)
+ if (!op.FPFeatures.allowFPContractWithinStatement())
return nullptr;
// We have a potentially fusable op. Look for a mul on one of the operands.
@@ -2616,12 +2724,15 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
// Fall through.
case LangOptions::SOB_Trapping:
+ if (CanElideOverflowCheck(CGF.getContext(), op))
+ return Builder.CreateNSWAdd(op.LHS, op.RHS, "add");
return EmitOverflowCheckedBinOp(op);
}
}
if (op.Ty->isUnsignedIntegerType() &&
- CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
+ !CanElideOverflowCheck(CGF.getContext(), op))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
@@ -2629,7 +2740,8 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder))
return FMulAdd;
- return Builder.CreateFAdd(op.LHS, op.RHS, "add");
+ Value *V = Builder.CreateFAdd(op.LHS, op.RHS, "add");
+ return propagateFMFlags(V, op);
}
return Builder.CreateAdd(op.LHS, op.RHS, "add");
@@ -2647,19 +2759,23 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
// Fall through.
case LangOptions::SOB_Trapping:
+ if (CanElideOverflowCheck(CGF.getContext(), op))
+ return Builder.CreateNSWSub(op.LHS, op.RHS, "sub");
return EmitOverflowCheckedBinOp(op);
}
}
if (op.Ty->isUnsignedIntegerType() &&
- CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow))
+ CGF.SanOpts.has(SanitizerKind::UnsignedIntegerOverflow) &&
+ !CanElideOverflowCheck(CGF.getContext(), op))
return EmitOverflowCheckedBinOp(op);
if (op.LHS->getType()->isFPOrFPVectorTy()) {
// Try to form an fmuladd.
if (Value *FMulAdd = tryEmitFMulAdd(op, CGF, Builder, true))
return FMulAdd;
- return Builder.CreateFSub(op.LHS, op.RHS, "sub");
+ Value *V = Builder.CreateFSub(op.LHS, op.RHS, "sub");
+ return propagateFMFlags(V, op);
}
return Builder.CreateSub(op.LHS, op.RHS, "sub");
@@ -2751,8 +2867,8 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
isa<llvm::IntegerType>(Ops.LHS->getType())) {
CodeGenFunction::SanitizerScope SanScope(&CGF);
SmallVector<std::pair<Value *, SanitizerMask>, 2> Checks;
- llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, RHS);
- llvm::Value *ValidExponent = Builder.CreateICmpULE(RHS, WidthMinusOne);
+ llvm::Value *WidthMinusOne = GetWidthMinusOneValue(Ops.LHS, Ops.RHS);
+ llvm::Value *ValidExponent = Builder.CreateICmpULE(Ops.RHS, WidthMinusOne);
if (SanitizeExponent) {
Checks.push_back(
@@ -2767,12 +2883,14 @@ Value *ScalarExprEmitter::EmitShl(const BinOpInfo &Ops) {
llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
llvm::BasicBlock *CheckShiftBase = CGF.createBasicBlock("check");
Builder.CreateCondBr(ValidExponent, CheckShiftBase, Cont);
+ llvm::Value *PromotedWidthMinusOne =
+ (RHS == Ops.RHS) ? WidthMinusOne
+ : GetWidthMinusOneValue(Ops.LHS, RHS);
CGF.EmitBlock(CheckShiftBase);
- llvm::Value *BitsShiftedOff =
- Builder.CreateLShr(Ops.LHS,
- Builder.CreateSub(WidthMinusOne, RHS, "shl.zeros",
- /*NUW*/true, /*NSW*/true),
- "shl.check");
+ llvm::Value *BitsShiftedOff = Builder.CreateLShr(
+ Ops.LHS, Builder.CreateSub(PromotedWidthMinusOne, RHS, "shl.zeros",
+ /*NUW*/ true, /*NSW*/ true),
+ "shl.check");
if (CGF.getLangOpts().CPlusPlus) {
// In C99, we are not permitted to shift a 1 bit into the sign bit.
// Under C++11's rules, shifting a 1 bit into the sign bit is
@@ -3038,10 +3156,12 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// because the result is altered by the store, i.e., [C99 6.5.16p1]
// 'An assignment expression has the value of the left operand after
// the assignment...'.
- if (LHS.isBitField())
+ if (LHS.isBitField()) {
CGF.EmitStoreThroughBitfieldLValue(RValue::get(RHS), LHS, &RHS);
- else
+ } else {
+ CGF.EmitNullabilityCheck(LHS, RHS, E->getExprLoc());
CGF.EmitStoreThroughLValue(RValue::get(RHS), LHS);
+ }
}
// If the result is clearly ignored, return now.
@@ -3327,9 +3447,11 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
// safe to evaluate the LHS and RHS unconditionally.
if (isCheapEnoughToEvaluateUnconditionally(lhsExpr, CGF) &&
isCheapEnoughToEvaluateUnconditionally(rhsExpr, CGF)) {
- CGF.incrementProfileCounter(E);
-
llvm::Value *CondV = CGF.EvaluateExprAsBool(condExpr);
+ llvm::Value *StepV = Builder.CreateZExtOrBitCast(CondV, CGF.Int64Ty);
+
+ CGF.incrementProfileCounter(E, StepV);
+
llvm::Value *LHS = Visit(lhsExpr);
llvm::Value *RHS = Visit(rhsExpr);
if (!LHS) {
@@ -3491,8 +3613,12 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// vector to get a vec4, then a bitcast if the target type is different.
if (NumElementsSrc == 3 && NumElementsDst != 3) {
Src = ConvertVec3AndVec4(Builder, CGF, Src, 4);
- Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
- DstTy);
+
+ if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ DstTy);
+ }
+
Src->setName("astype");
return Src;
}
@@ -3501,9 +3627,12 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
// to vec4 if the original type is not vec4, then a shuffle vector to
// get a vec3.
if (NumElementsSrc != 3 && NumElementsDst == 3) {
- auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
- Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
- Vec4Ty);
+ if (!CGF.CGM.getCodeGenOpts().PreserveVec3Type) {
+ auto Vec4Ty = llvm::VectorType::get(DstTy->getVectorElementType(), 4);
+ Src = createCastsForTypeOfSameSize(Builder, CGF.CGM.getDataLayout(), Src,
+ Vec4Ty);
+ }
+
Src = ConvertVec3AndVec4(Builder, CGF, Src, 3);
Src->setName("astype");
return Src;
diff --git a/lib/CodeGen/CGCUDABuiltin.cpp b/lib/CodeGen/CGGPUBuiltin.cpp
index 44dd003757ad..48156b1b26b7 100644
--- a/lib/CodeGen/CGCUDABuiltin.cpp
+++ b/lib/CodeGen/CGGPUBuiltin.cpp
@@ -1,4 +1,4 @@
-//===----- CGCUDABuiltin.cpp - Codegen for CUDA builtins ------------------===//
+//===------ CGGPUBuiltin.cpp - Codegen for GPU builtins -------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
//
-// Generates code for built-in CUDA calls which are not runtime-specific.
-// (Runtime-specific codegen lives in CGCUDARuntime.)
+// Generates code for built-in GPU calls which are not runtime-specific.
+// (Runtime-specific codegen lives in programming model specific files.)
//
//===----------------------------------------------------------------------===//
@@ -67,10 +67,9 @@ static llvm::Function *GetVprintfDeclaration(llvm::Module &M) {
// Note that by the time this function runs, E's args have already undergone the
// standard C vararg promotion (short -> int, float -> double, etc.).
RValue
-CodeGenFunction::EmitCUDADevicePrintfCallExpr(const CallExpr *E,
- ReturnValueSlot ReturnValue) {
- assert(getLangOpts().CUDA);
- assert(getLangOpts().CUDAIsDevice);
+CodeGenFunction::EmitNVPTXDevicePrintfCallExpr(const CallExpr *E,
+ ReturnValueSlot ReturnValue) {
+ assert(getTarget().getTriple().isNVPTX());
assert(E->getBuiltinCallee() == Builtin::BIprintf);
assert(E->getNumArgs() >= 1); // printf always has at least one arg.
diff --git a/lib/CodeGen/CGObjC.cpp b/lib/CodeGen/CGObjC.cpp
index 932b8a129e6e..3a09a15dbc15 100644
--- a/lib/CodeGen/CGObjC.cpp
+++ b/lib/CodeGen/CGObjC.cpp
@@ -1,4 +1,4 @@
-//===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
+//===---- CGObjC.cpp - Emit LLVM Code for Objective-C ---------------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -117,10 +117,22 @@ llvm::Value *CodeGenFunction::EmitObjCCollectionLiteral(const Expr *E,
const ObjCArrayLiteral *ALE = dyn_cast<ObjCArrayLiteral>(E);
if (!ALE)
DLE = cast<ObjCDictionaryLiteral>(E);
-
- // Compute the type of the array we're initializing.
+
+ // Optimize empty collections by referencing constants, when available.
uint64_t NumElements =
ALE ? ALE->getNumElements() : DLE->getNumElements();
+ if (NumElements == 0 && CGM.getLangOpts().ObjCRuntime.hasEmptyCollections()) {
+ StringRef ConstantName = ALE ? "__NSArray0__" : "__NSDictionary0__";
+ QualType IdTy(CGM.getContext().getObjCIdType());
+ llvm::Constant *Constant =
+ CGM.CreateRuntimeVariable(ConvertType(IdTy), ConstantName);
+ Address Addr(Constant, Context.getTypeAlignInChars(IdTy));
+ LValue LV = MakeAddrLValue(Addr, IdTy);
+ return Builder.CreateBitCast(EmitLoadOfScalar(LV, E->getLocStart()),
+ ConvertType(E->getType()));
+ }
+
+ // Compute the type of the array we're initializing.
llvm::APInt APNumElements(Context.getTypeSize(Context.getSizeType()),
NumElements);
QualType ElementType = Context.getObjCIdType().withConst();
@@ -427,7 +439,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
QualType ResultType = method ? method->getReturnType() : E->getType();
CallArgList Args;
- EmitCallArgs(Args, method, E->arguments());
+ EmitCallArgs(Args, method, E->arguments(), /*AC*/AbstractCallee(method));
// For delegate init calls in ARC, do an unsafe store of null into
// self. This represents the call taking direct ownership of that
@@ -1316,7 +1328,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
BinaryOperator assign(&ivarRef, finalArg, BO_Assign,
ivarRef.getType(), VK_RValue, OK_Ordinary,
- SourceLocation(), false);
+ SourceLocation(), FPOptions());
EmitStmt(&assign);
}
@@ -1469,6 +1481,8 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
if (DI)
DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());
+ RunCleanupsScope ForScope(*this);
+
// The local variable comes into scope immediately.
AutoVarEmission variable = AutoVarEmission::invalid();
if (const DeclStmt *SD = dyn_cast<DeclStmt>(S.getElement()))
@@ -1499,8 +1513,6 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
ArrayType::Normal, 0);
Address ItemsPtr = CreateMemTemp(ItemsTy, "items.ptr");
- RunCleanupsScope ForScope(*this);
-
// Emit the collection pointer. In ARC, we do a retain.
llvm::Value *Collection;
if (getLangOpts().ObjCAutoRefCount) {
@@ -1802,26 +1814,49 @@ void CodeGenFunction::EmitARCIntrinsicUse(ArrayRef<llvm::Value*> values) {
}
+static bool IsForwarding(StringRef Name) {
+ return llvm::StringSwitch<bool>(Name)
+ .Cases("objc_autoreleaseReturnValue", // ARCInstKind::AutoreleaseRV
+ "objc_autorelease", // ARCInstKind::Autorelease
+ "objc_retainAutoreleaseReturnValue", // ARCInstKind::FusedRetainAutoreleaseRV
+ "objc_retainAutoreleasedReturnValue", // ARCInstKind::RetainRV
+ "objc_retainAutorelease", // ARCInstKind::FusedRetainAutorelease
+ "objc_retainedObject", // ARCInstKind::NoopCast
+ "objc_retain", // ARCInstKind::Retain
+ "objc_unretainedObject", // ARCInstKind::NoopCast
+ "objc_unretainedPointer", // ARCInstKind::NoopCast
+ "objc_unsafeClaimAutoreleasedReturnValue", // ARCInstKind::ClaimRV
+ true)
+ .Default(false);
+}
+
static llvm::Constant *createARCRuntimeFunction(CodeGenModule &CGM,
- llvm::FunctionType *type,
- StringRef fnName) {
- llvm::Constant *fn = CGM.CreateRuntimeFunction(type, fnName);
+ llvm::FunctionType *FTy,
+ StringRef Name) {
+ llvm::Constant *RTF = CGM.CreateRuntimeFunction(FTy, Name);
- if (llvm::Function *f = dyn_cast<llvm::Function>(fn)) {
+ if (auto *F = dyn_cast<llvm::Function>(RTF)) {
// If the target runtime doesn't naturally support ARC, emit weak
// references to the runtime support library. We don't really
// permit this to fail, but we need a particular relocation style.
if (!CGM.getLangOpts().ObjCRuntime.hasNativeARC() &&
!CGM.getTriple().isOSBinFormatCOFF()) {
- f->setLinkage(llvm::Function::ExternalWeakLinkage);
- } else if (fnName == "objc_retain" || fnName == "objc_release") {
+ F->setLinkage(llvm::Function::ExternalWeakLinkage);
+ } else if (Name == "objc_retain" || Name == "objc_release") {
// If we have Native ARC, set nonlazybind attribute for these APIs for
// performance.
- f->addFnAttr(llvm::Attribute::NonLazyBind);
+ F->addFnAttr(llvm::Attribute::NonLazyBind);
+ }
+
+ if (IsForwarding(Name)) {
+ llvm::AttrBuilder B;
+ B.addAttribute(llvm::Attribute::Returned);
+
+ F->arg_begin()->addAttr(llvm::AttributeList::get(F->getContext(), 1, B));
}
}
- return fn;
+ return RTF;
}
/// Perform an operation having the signature
@@ -1832,7 +1867,8 @@ static llvm::Value *emitARCValueOperation(CodeGenFunction &CGF,
llvm::Constant *&fn,
StringRef fnName,
bool isTailCall = false) {
- if (isa<llvm::ConstantPointerNull>(value)) return value;
+ if (isa<llvm::ConstantPointerNull>(value))
+ return value;
if (!fn) {
llvm::FunctionType *fnType =
@@ -3239,7 +3275,7 @@ CodeGenFunction::GenerateObjCAtomicSetterCopyHelperFunction(
CallExpr *CalleeExp = cast<CallExpr>(PID->getSetterCXXAssignment());
CXXOperatorCallExpr TheCall(C, OO_Equal, CalleeExp->getCallee(),
Args, DestTy->g