aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-06-16 21:03:24 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-06-16 21:03:24 +0000
commit7c7aba6e5fef47a01a136be655b0a92cfd7090f6 (patch)
tree99ec531924f6078534b100ab9d7696abce848099
parent7ab83427af0f77b59941ceba41d509d7d097b065 (diff)
downloadsrc-7c7aba6e5fef47a01a136be655b0a92cfd7090f6.tar.gz
src-7c7aba6e5fef47a01a136be655b0a92cfd7090f6.zip
Vendor import of llvm trunk r305575:vendor/llvm/llvm-trunk-r305575
Notes
Notes: svn path=/vendor/llvm/dist/; revision=320013 svn path=/vendor/llvm/llvm-trunk-r305575/; revision=320014; tag=vendor/llvm/llvm-trunk-r305575
-rw-r--r--cmake/modules/TableGen.cmake50
-rw-r--r--docs/BranchWeightMetadata.rst14
-rw-r--r--docs/LangRef.rst241
-rw-r--r--docs/Lexicon.rst7
-rw-r--r--docs/Phabricator.rst3
-rw-r--r--include/llvm/ADT/AllocatorList.h61
-rw-r--r--include/llvm/ADT/ArrayRef.h44
-rw-r--r--include/llvm/ADT/BreadthFirstIterator.h11
-rw-r--r--include/llvm/ADT/DAGDeltaAlgorithm.h10
-rw-r--r--include/llvm/ADT/DeltaAlgorithm.h10
-rw-r--r--include/llvm/ADT/DenseMap.h74
-rw-r--r--include/llvm/ADT/DenseMapInfo.h40
-rw-r--r--include/llvm/ADT/DenseSet.h46
-rw-r--r--include/llvm/ADT/DepthFirstIterator.h23
-rw-r--r--include/llvm/ADT/EquivalenceClasses.h22
-rw-r--r--include/llvm/ADT/FoldingSet.h233
-rw-r--r--include/llvm/ADT/GraphTraits.h15
-rw-r--r--include/llvm/ADT/ImmutableList.h8
-rw-r--r--include/llvm/ADT/ImmutableMap.h62
-rw-r--r--include/llvm/ADT/ImmutableSet.h121
-rw-r--r--include/llvm/ADT/IndexedMap.h18
-rw-r--r--include/llvm/ADT/IntervalMap.h49
-rw-r--r--include/llvm/ADT/IntrusiveRefCntPtr.h18
-rw-r--r--include/llvm/ADT/MapVector.h24
-rw-r--r--include/llvm/ADT/Optional.h33
-rw-r--r--include/llvm/ADT/PackedVector.h6
-rw-r--r--include/llvm/ADT/PointerEmbeddedInt.h30
-rw-r--r--include/llvm/ADT/PointerUnion.h2
-rw-r--r--include/llvm/ADT/ScopedHashTable.h12
-rw-r--r--include/llvm/ADT/SmallBitVector.h58
-rw-r--r--include/llvm/ADT/SmallSet.h7
-rw-r--r--include/llvm/ADT/StringExtras.h12
-rw-r--r--include/llvm/ADT/Triple.h4
-rw-r--r--include/llvm/ADT/ilist_base.h6
-rw-r--r--include/llvm/ADT/ilist_iterator.h59
-rw-r--r--include/llvm/ADT/ilist_node.h50
-rw-r--r--include/llvm/ADT/iterator.h8
-rw-r--r--include/llvm/ADT/simple_ilist.h33
-rw-r--r--include/llvm/Analysis/MemorySSA.h6
-rw-r--r--include/llvm/Analysis/ScalarEvolution.h32
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h14
-rw-r--r--include/llvm/Analysis/TargetTransformInfoImpl.h4
-rw-r--r--include/llvm/Analysis/TypeMetadataUtils.h7
-rw-r--r--include/llvm/Analysis/ValueTracking.h4
-rw-r--r--include/llvm/BinaryFormat/ELF.h38
-rw-r--r--include/llvm/Bitcode/BitcodeReader.h20
-rw-r--r--include/llvm/Bitcode/BitcodeWriter.h4
-rw-r--r--include/llvm/Bitcode/LLVMBitCodes.h8
-rw-r--r--include/llvm/CodeGen/BasicTTIImpl.h4
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h12
-rw-r--r--include/llvm/CodeGen/GlobalISel/LegalizerHelper.h8
-rw-r--r--include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h24
-rw-r--r--include/llvm/CodeGen/RuntimeLibcalls.h19
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h6
-rw-r--r--include/llvm/CodeGen/TargetLoweringObjectFileImpl.h15
-rw-r--r--include/llvm/DebugInfo/CodeView/CodeView.h5
-rw-r--r--include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h1
-rw-r--r--include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h2
-rw-r--r--include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h7
-rw-r--r--include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h80
-rw-r--r--include/llvm/DebugInfo/CodeView/Formatters.h17
-rw-r--r--include/llvm/DebugInfo/CodeView/StringsAndChecksums.h106
-rw-r--r--include/llvm/DebugInfo/CodeView/SymbolRecord.h10
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeIndex.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h4
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFVerifier.h28
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleList.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h9
-rw-r--r--include/llvm/DebugInfo/PDB/Native/InfoStream.h1
-rw-r--r--include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h9
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBFile.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBStringTable.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h5
-rw-r--r--include/llvm/DebugInfo/PDB/Native/PublicsStream.h1
-rw-r--r--include/llvm/DebugInfo/PDB/Native/RawConstants.h6
-rw-r--r--include/llvm/DebugInfo/PDB/Native/SymbolStream.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h2
-rw-r--r--include/llvm/IR/Constants.h9
-rw-r--r--include/llvm/IR/DebugInfoMetadata.h3
-rw-r--r--include/llvm/IR/GlobalVariable.h2
-rw-r--r--include/llvm/IR/IRBuilder.h24
-rw-r--r--include/llvm/IR/InstrTypes.h8
-rw-r--r--include/llvm/IR/Instructions.h29
-rw-r--r--include/llvm/IR/IntrinsicInst.h90
-rw-r--r--include/llvm/IR/Intrinsics.td15
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h26
-rw-r--r--include/llvm/IR/ModuleSummaryIndexYAML.h18
-rw-r--r--include/llvm/IR/Operator.h1
-rw-r--r--include/llvm/IR/PatternMatch.h36
-rw-r--r--include/llvm/LTO/LTO.h33
-rw-r--r--include/llvm/LTO/legacy/LTOModule.h2
-rw-r--r--include/llvm/MC/MCSymbolWasm.h6
-rw-r--r--include/llvm/MC/MCWasmObjectWriter.h15
-rw-r--r--include/llvm/Object/ArchiveWriter.h1
-rw-r--r--include/llvm/Object/WindowsResource.h12
-rw-r--r--include/llvm/ObjectYAML/COFFYAML.h4
-rw-r--r--include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h25
-rw-r--r--include/llvm/ObjectYAML/CodeViewYAMLTypes.h3
-rw-r--r--include/llvm/Option/Arg.h15
-rw-r--r--include/llvm/Option/ArgList.h52
-rw-r--r--include/llvm/Option/OptSpecifier.h40
-rw-r--r--include/llvm/Option/OptTable.h19
-rw-r--r--include/llvm/Option/Option.h15
-rw-r--r--include/llvm/Support/BinaryStreamArray.h6
-rw-r--r--include/llvm/Support/DebugCounter.h6
-rw-r--r--include/llvm/Support/FormatAdapters.h13
-rw-r--r--include/llvm/Support/FormatCommon.h20
-rw-r--r--include/llvm/Support/MathExtras.h107
-rw-r--r--include/llvm/Support/ThreadPool.h28
-rw-r--r--include/llvm/TableGen/Main.h10
-rw-r--r--include/llvm/TableGen/Record.h150
-rw-r--r--include/llvm/TableGen/SetTheory.h6
-rw-r--r--include/llvm/TableGen/StringMatcher.h9
-rw-r--r--include/llvm/Target/TargetLoweringObjectFile.h7
-rw-r--r--include/llvm/Target/TargetRegisterInfo.h10
-rw-r--r--include/llvm/Testing/Support/Error.h69
-rw-r--r--include/llvm/Testing/Support/SupportHelpers.h47
-rw-r--r--include/llvm/Transforms/Scalar/GVNExpression.h5
-rw-r--r--include/llvm/Transforms/Utils/CodeExtractor.h21
-rw-r--r--include/llvm/Transforms/Utils/Mem2Reg.h2
-rw-r--r--lib/Analysis/BasicAliasAnalysis.cpp22
-rw-r--r--lib/Analysis/CallGraphSCCPass.cpp8
-rw-r--r--lib/Analysis/DivergenceAnalysis.cpp2
-rw-r--r--lib/Analysis/MemorySSA.cpp2
-rw-r--r--lib/Analysis/ScalarEvolution.cpp129
-rw-r--r--lib/Analysis/TargetTransformInfo.cpp4
-rw-r--r--lib/Analysis/ValueTracking.cpp5
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp87
-rw-r--r--lib/Bitcode/Reader/MetadataLoader.cpp123
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp52
-rw-r--r--lib/CMakeLists.txt1
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp6
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfDebug.h7
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.cpp33
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfExpression.h3
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.cpp6
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp2
-rw-r--r--lib/CodeGen/GlobalISel/LegalizerHelper.cpp81
-rw-r--r--lib/CodeGen/GlobalISel/MachineIRBuilder.cpp18
-rw-r--r--lib/CodeGen/MachineBlockPlacement.cpp10
-rw-r--r--lib/CodeGen/MachineLICM.cpp7
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp62
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp26
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp15
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp21
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp18
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp61
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp79
-rw-r--r--lib/CodeGen/SplitKit.cpp3
-rw-r--r--lib/CodeGen/StackColoring.cpp237
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp28
-rw-r--r--lib/CodeGen/TargetLoweringObjectFileImpl.cpp49
-rw-r--r--lib/DebugInfo/CodeView/CMakeLists.txt3
-rw-r--r--lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp5
-rw-r--r--lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp2
-rw-r--r--lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp37
-rw-r--r--lib/DebugInfo/CodeView/StringsAndChecksums.cpp55
-rw-r--r--lib/DebugInfo/CodeView/SymbolDumper.cpp8
-rw-r--r--lib/DebugInfo/CodeView/SymbolRecordMapping.cpp4
-rw-r--r--lib/DebugInfo/CodeView/TypeDatabase.cpp71
-rw-r--r--lib/DebugInfo/CodeView/TypeIndex.cpp81
-rw-r--r--lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp2
-rw-r--r--lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp7
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp249
-rw-r--r--lib/DebugInfo/DWARF/DWARFDebugFrame.cpp23
-rw-r--r--lib/DebugInfo/DWARF/DWARFVerifier.cpp34
-rw-r--r--lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp2
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp34
-rw-r--r--lib/DebugInfo/PDB/Native/InfoStream.cpp4
-rw-r--r--lib/DebugInfo/PDB/Native/PDBFile.cpp10
-rw-r--r--lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp10
-rw-r--r--lib/DebugInfo/PDB/Native/PDBStringTable.cpp3
-rw-r--r--lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp5
-rw-r--r--lib/DebugInfo/PDB/Native/PublicsStream.cpp9
-rw-r--r--lib/DebugInfo/PDB/Native/TpiHashing.cpp1
-rw-r--r--lib/DebugInfo/PDB/UDTLayout.cpp5
-rw-r--r--lib/Fuzzer/FuzzerDriver.cpp4
-rw-r--r--lib/Fuzzer/FuzzerLoop.cpp4
-rw-r--r--lib/Fuzzer/FuzzerTracePC.cpp23
-rw-r--r--lib/Fuzzer/FuzzerTracePC.h35
-rw-r--r--lib/Fuzzer/test/AbsNegAndConstant64Test.cpp2
-rw-r--r--lib/Fuzzer/test/CMakeLists.txt3
-rw-r--r--lib/Fuzzer/test/FourIndependentBranchesTest.cpp1
-rw-r--r--lib/Fuzzer/test/FuzzerUnittest.cpp12
-rw-r--r--lib/Fuzzer/test/ShrinkControlFlowTest.cpp1
-rw-r--r--lib/Fuzzer/test/SimpleHashTest.cpp2
-rw-r--r--lib/Fuzzer/test/SingleStrncmpTest.cpp1
-rw-r--r--lib/Fuzzer/test/TableLookupTest.cpp3
-rw-r--r--lib/Fuzzer/test/fuzzer-dirs.test8
-rw-r--r--lib/Fuzzer/test/inline-8bit-counters.test4
-rw-r--r--lib/Fuzzer/test/inline-8bit-counters/CMakeLists.txt12
-rw-r--r--lib/Fuzzer/test/trace-pc/CMakeLists.txt3
-rw-r--r--lib/IR/ConstantFold.cpp9
-rw-r--r--lib/IR/ConstantsContext.h18
-rw-r--r--lib/IR/DebugInfoMetadata.cpp28
-rw-r--r--lib/IR/IRBuilder.cpp15
-rw-r--r--lib/IR/Metadata.cpp2
-rw-r--r--lib/IR/ModuleSummaryIndex.cpp13
-rw-r--r--lib/IR/Verifier.cpp39
-rw-r--r--lib/LLVMBuild.txt1
-rw-r--r--lib/LTO/LTO.cpp200
-rw-r--r--lib/LTO/LTOModule.cpp12
-rw-r--r--lib/MC/MCParser/ELFAsmParser.cpp2
-rw-r--r--lib/MC/MCSectionELF.cpp2
-rw-r--r--lib/MC/WasmObjectWriter.cpp43
-rw-r--r--lib/Object/ArchiveWriter.cpp14
-rw-r--r--lib/Object/ELF.cpp1
-rw-r--r--lib/Object/IRSymtab.cpp6
-rw-r--r--lib/Object/WindowsResource.cpp156
-rw-r--r--lib/ObjectYAML/COFFYAML.cpp11
-rw-r--r--lib/ObjectYAML/CodeViewYAMLDebugSections.cpp338
-rw-r--r--lib/ObjectYAML/CodeViewYAMLSymbols.cpp64
-rw-r--r--lib/ObjectYAML/CodeViewYAMLTypes.cpp40
-rw-r--r--lib/ObjectYAML/ELFYAML.cpp1
-rw-r--r--lib/Option/Arg.cpp10
-rw-r--r--lib/Option/ArgList.cpp21
-rw-r--r--lib/Option/OptTable.cpp40
-rw-r--r--lib/Option/Option.cpp10
-rw-r--r--lib/Passes/PassBuilder.cpp6
-rw-r--r--lib/Support/BinaryStreamWriter.cpp4
-rw-r--r--lib/Support/DebugCounter.cpp6
-rw-r--r--lib/Support/FoldingSet.cpp42
-rw-r--r--lib/Support/ThreadPool.cpp19
-rw-r--r--lib/Support/Unix/Program.inc10
-rw-r--r--lib/TableGen/Record.cpp43
-rw-r--r--lib/TableGen/SetTheory.cpp20
-rw-r--r--lib/Target/AArch64/AArch64.td1
-rw-r--r--lib/Target/AArch64/AArch64FastISel.cpp8
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp2
-rw-r--r--lib/Target/AArch64/AArch64SchedFalkorDetails.td61
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp4
-rw-r--r--lib/Target/AArch64/AArch64TargetTransformInfo.h2
-rw-r--r--lib/Target/AMDGPU/AMDGPUAlwaysInlinePass.cpp2
-rw-r--r--lib/Target/AMDGPU/AMDGPUAnnotateUniformValues.cpp2
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp34
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstructionSelector.cpp10
-rw-r--r--lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp2
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp15
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.h3
-rw-r--r--lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp21
-rw-r--r--lib/Target/AMDGPU/FLATInstructions.td77
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp14
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.cpp19
-rw-r--r--lib/Target/AMDGPU/SIInstrInfo.td12
-rw-r--r--lib/Target/ARM/ARMCallLowering.cpp55
-rw-r--r--lib/Target/ARM/ARMInstrVFP.td18
-rw-r--r--lib/Target/ARM/ARMLegalizerInfo.cpp45
-rw-r--r--lib/Target/ARM/ARMTargetTransformInfo.h2
-rw-r--r--lib/Target/BPF/BPFAsmPrinter.cpp5
-rw-r--r--lib/Target/BPF/BPFISelDAGToDAG.cpp240
-rw-r--r--lib/Target/BPF/BPFInstrInfo.td14
-rw-r--r--lib/Target/Hexagon/HexagonGenMux.cpp52
-rw-r--r--lib/Target/Hexagon/HexagonISelDAGToDAG.cpp15
-rw-r--r--lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonPatterns.td19
-rw-r--r--lib/Target/Hexagon/HexagonTargetMachine.cpp8
-rw-r--r--lib/Target/Mips/MipsISelLowering.cpp135
-rw-r--r--lib/Target/Mips/MipsInstrInfo.cpp9
-rw-r--r--lib/Target/Mips/MipsLongBranch.cpp12
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.cpp99
-rw-r--r--lib/Target/Mips/MipsSEISelDAGToDAG.h3
-rw-r--r--lib/Target/Mips/MipsSEISelLowering.cpp176
-rw-r--r--lib/Target/Mips/MipsSubtarget.h2
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp26
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp118
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h22
-rw-r--r--lib/Target/PowerPC/PPCInstr64Bit.td10
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.cpp2
-rw-r--r--lib/Target/PowerPC/PPCInstrInfo.td13
-rw-r--r--lib/Target/PowerPC/PPCInstrVSX.td10
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.cpp14
-rw-r--r--lib/Target/PowerPC/PPCRegisterInfo.h1
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.cpp2
-rw-r--r--lib/Target/PowerPC/PPCTargetTransformInfo.h2
-rw-r--r--lib/Target/SystemZ/SystemZTargetTransformInfo.cpp2
-rw-r--r--lib/Target/SystemZ/SystemZTargetTransformInfo.h2
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyTargetStreamer.h4
-rw-r--r--lib/Target/WebAssembly/MCTargetDesc/WebAssemblyWasmObjectWriter.cpp25
-rw-r--r--lib/Target/WebAssembly/WebAssemblyAsmPrinter.cpp10
-rw-r--r--lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyTargetTransformInfo.h2
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp218
-rw-r--r--lib/Target/X86/X86InstrAVX512.td2
-rw-r--r--lib/Target/X86/X86InstrFragmentsSIMD.td12
-rw-r--r--lib/Target/X86/X86InstrInfo.cpp134
-rw-r--r--lib/Testing/CMakeLists.txt1
-rw-r--r--lib/Testing/LLVMBuild.txt19
-rw-r--r--lib/Testing/Support/CMakeLists.txt12
-rw-r--r--lib/Testing/Support/Error.cpp22
-rw-r--r--lib/Testing/Support/LLVMBuild.txt22
-rw-r--r--lib/Transforms/IPO/CrossDSOCFI.cpp11
-rw-r--r--lib/Transforms/IPO/Inliner.cpp53
-rw-r--r--lib/Transforms/IPO/LowerTypeTests.cpp166
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp414
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp6
-rw-r--r--lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp50
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp113
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp121
-rw-r--r--lib/Transforms/InstCombine/InstCombineInternal.h9
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp14
-rw-r--r--lib/Transforms/Instrumentation/CMakeLists.txt1
-rw-r--r--lib/Transforms/Instrumentation/IndirectCallPromotion.cpp353
-rw-r--r--lib/Transforms/Instrumentation/InstrProfiling.cpp12
-rw-r--r--lib/Transforms/Instrumentation/PGOMemOPSizeOpt.cpp419
-rw-r--r--lib/Transforms/Instrumentation/SanitizerCoverage.cpp2
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp2
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp5
-rw-r--r--lib/Transforms/Scalar/GVNSink.cpp11
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp24
-rw-r--r--lib/Transforms/Scalar/NewGVN.cpp4
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp71
-rw-r--r--lib/Transforms/Scalar/SCCP.cpp11
-rw-r--r--lib/Transforms/Utils/CodeExtractor.cpp219
-rw-r--r--lib/Transforms/Utils/PredicateInfo.cpp3
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp14
-rw-r--r--test/Analysis/ScalarEvolution/limit-depth.ll44
-rw-r--r--test/Assembler/diexpression.ll14
-rw-r--r--test/Bitcode/DIExpression-deref.ll6
-rw-r--r--test/Bitcode/DIExpression-minus-upgrade.ll16
-rw-r--r--test/Bitcode/DIExpression-minus-upgrade.ll.bcbin0 -> 988 bytes
-rw-r--r--test/Bitcode/DIGlobalVariableExpression.ll2
-rw-r--r--test/Bitcode/upgrade-linker-options.ll15
-rw-r--r--test/CodeGen/AArch64/GlobalISel/legalize-load-store.mir8
-rw-r--r--test/CodeGen/AArch64/arm64-sincos.ll50
-rw-r--r--test/CodeGen/AArch64/fast-isel-sp-adjust.ll288
-rw-r--r--test/CodeGen/AArch64/misched-fusion-aes.ll77
-rw-r--r--test/CodeGen/AArch64/sincos-expansion.ll42
-rw-r--r--test/CodeGen/AArch64/swifterror.ll27
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir2
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir2
-rw-r--r--test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir22
-rw-r--r--test/CodeGen/AMDGPU/always-uniform.ll21
-rw-r--r--test/CodeGen/AMDGPU/cgp-addressing-modes-flat.ll123
-rw-r--r--test/CodeGen/AMDGPU/code-object-metadata-kernel-debug-props.ll4
-rw-r--r--test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir70
-rw-r--r--test/CodeGen/AMDGPU/flat-address-space.ll56
-rw-r--r--test/CodeGen/AMDGPU/flat_atomics.ll168
-rw-r--r--test/CodeGen/AMDGPU/global_smrd_cfg.ll33
-rw-r--r--test/CodeGen/AMDGPU/inserted-wait-states.mir10
-rw-r--r--test/CodeGen/AMDGPU/limit-coalesce.mir6
-rw-r--r--test/CodeGen/AMDGPU/rename-independent-subregs-invalid-mac-operands.mir4
-rw-r--r--test/CodeGen/AMDGPU/sdwa-scalar-ops.mir16
-rw-r--r--test/CodeGen/AMDGPU/waitcnt.mir22
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll195
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel-divmod.ll21
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-legalize-divmod.mir75
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-unsupported.ll8
-rw-r--r--test/CodeGen/ARM/cortex-a57-misched-vfma.ll38
-rw-r--r--test/CodeGen/ARM/debug-info-blocks.ll6
-rw-r--r--test/CodeGen/ARM/sincos.ll67
-rw-r--r--test/CodeGen/ARM/swifterror.ll28
-rw-r--r--test/CodeGen/BPF/rodata_1.ll52
-rw-r--r--test/CodeGen/BPF/rodata_2.ll51
-rw-r--r--test/CodeGen/BPF/rodata_3.ll41
-rw-r--r--test/CodeGen/BPF/rodata_4.ll43
-rw-r--r--test/CodeGen/Hexagon/loop-idiom/pmpy-shiftconv-fail.ll48
-rw-r--r--test/CodeGen/Hexagon/mulh.ll27
-rw-r--r--test/CodeGen/Hexagon/mux-kill.mir15
-rw-r--r--test/CodeGen/Hexagon/mux-kill2.mir17
-rw-r--r--test/CodeGen/Hexagon/store-imm-stack-object.ll86
-rw-r--r--test/CodeGen/Mips/2008-06-05-Carry.ll13
-rw-r--r--test/CodeGen/Mips/brundef.ll26
-rw-r--r--test/CodeGen/Mips/dsp-patterns.ll4
-rw-r--r--test/CodeGen/Mips/llcarry.ll11
-rw-r--r--test/CodeGen/Mips/llvm-ir/add.ll394
-rw-r--r--test/CodeGen/Mips/llvm-ir/sub.ll174
-rw-r--r--test/CodeGen/Mips/longbranch.ll40
-rw-r--r--test/CodeGen/Mips/madd-msub.ll81
-rw-r--r--test/CodeGen/PowerPC/atomic-2.ll2
-rw-r--r--test/CodeGen/PowerPC/atomics-constant.ll2
-rw-r--r--test/CodeGen/PowerPC/atomics-regression.ll20
-rw-r--r--test/CodeGen/PowerPC/licm-tocReg.ll110
-rw-r--r--test/CodeGen/PowerPC/logic-ops-on-compares.ll73
-rw-r--r--test/CodeGen/PowerPC/ppc64-P9-mod.ll263
-rw-r--r--test/CodeGen/PowerPC/testComparesinesll.ll125
-rw-r--r--test/CodeGen/PowerPC/testComparesineull.ll125
-rw-r--r--test/CodeGen/PowerPC/testComparesllnesll.ll125
-rw-r--r--test/CodeGen/PowerPC/testComparesllneull.ll125
-rw-r--r--test/CodeGen/PowerPC/vec_revb.ll54
-rw-r--r--test/CodeGen/SystemZ/fp-sincos-01.ll55
-rw-r--r--test/CodeGen/X86/2012-01-11-split-cv.ll3
-rw-r--r--test/CodeGen/X86/StackColoring.ll64
-rw-r--r--test/CodeGen/X86/add-sub-nsw-nuw.ll3
-rw-r--r--test/CodeGen/X86/addcarry.ll24
-rw-r--r--test/CodeGen/X86/avx-vperm2x128.ll20
-rw-r--r--test/CodeGen/X86/bt.ll209
-rw-r--r--test/CodeGen/X86/cmov-into-branch.ll24
-rw-r--r--test/CodeGen/X86/combine-64bit-vec-binop.ll23
-rw-r--r--test/CodeGen/X86/element-wise-atomic-memory-intrinsics.ll45
-rw-r--r--test/CodeGen/X86/fast-isel-select-sse.ll26
-rw-r--r--test/CodeGen/X86/fp-logic-replace.ll2
-rw-r--r--test/CodeGen/X86/fp-logic.ll23
-rw-r--r--test/CodeGen/X86/fp-select-cmp-and.ll20
-rw-r--r--test/CodeGen/X86/immediate_merging64.ll4
-rw-r--r--test/CodeGen/X86/lea-opt-with-debug.mir2
-rw-r--r--test/CodeGen/X86/loop-search.ll3
-rw-r--r--test/CodeGen/X86/mask-negated-bool.ll8
-rw-r--r--test/CodeGen/X86/memset-2.ll6
-rw-r--r--test/CodeGen/X86/memset-nonzero.ll12
-rw-r--r--test/CodeGen/X86/memset64-on-x86-32.ll3
-rw-r--r--test/CodeGen/X86/negate-i1.ll10
-rw-r--r--test/CodeGen/X86/negate-shift.ll6
-rw-r--r--test/CodeGen/X86/negate.ll8
-rw-r--r--test/CodeGen/X86/negative-sin.ll14
-rw-r--r--test/CodeGen/X86/no-sse2-avg.ll3
-rw-r--r--test/CodeGen/X86/not-and-simplify.ll1
-rw-r--r--test/CodeGen/X86/pr13577.ll8
-rw-r--r--test/CodeGen/X86/pr18014.ll3
-rw-r--r--test/CodeGen/X86/pr32368.ll153
-rw-r--r--test/CodeGen/X86/rem.ll8
-rw-r--r--test/CodeGen/X86/sar_fold64.ll8
-rw-r--r--test/CodeGen/X86/select-with-and-or.ll16
-rw-r--r--test/CodeGen/X86/sext-setcc-self.ll8
-rw-r--r--test/CodeGen/X86/shift-pcmp.ll4
-rw-r--r--test/CodeGen/X86/sincos-opt.ll137
-rw-r--r--test/CodeGen/X86/sse-intrinsics-x86-upgrade.ll2
-rw-r--r--test/CodeGen/X86/sse41-intrinsics-x86-upgrade.ll10
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx512.ll93
-rw-r--r--test/CodeGen/X86/stack-folding-int-avx512vl.ll74
-rw-r--r--test/CodeGen/X86/statepoint-live-in.ll8
-rw-r--r--test/CodeGen/X86/swifterror.ll108
-rw-r--r--test/CodeGen/X86/urem-i8-constant.ll3
-rw-r--r--test/CodeGen/X86/urem-power-of-two.ll7
-rw-r--r--test/CodeGen/X86/vec3.ll4
-rw-r--r--test/CodeGen/X86/vector-compare-combines.ll4
-rw-r--r--test/CodeGen/X86/vector-shuffle-256-v16.ll18
-rw-r--r--test/CodeGen/X86/vzero-excess.ll2
-rw-r--r--test/CodeGen/X86/x86-interleaved-access.ll44
-rw-r--r--test/DebugInfo/COFF/array-odr-violation.ll6
-rw-r--r--test/DebugInfo/COFF/inlining-same-name.ll3
-rw-r--r--test/DebugInfo/Generic/block-asan.ll4
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-str-offsets-dwp.x86_64.obin0 -> 3328 bytes
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-str-offsets.s250
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-test-zlib.cc3
-rw-r--r--test/DebugInfo/Inputs/dwarfdump-test-zlib.o.elf-x86-64bin0 -> 4688 bytes
-rw-r--r--test/DebugInfo/MIR/ARM/split-superreg-complex.mir2
-rw-r--r--test/DebugInfo/PDB/Inputs/unknown-symbol.yaml10
-rw-r--r--test/DebugInfo/PDB/pdb-unknown-symbol.test6
-rw-r--r--test/DebugInfo/PDB/pdb-yaml-types.test74
-rw-r--r--test/DebugInfo/PDB/pdbdump-debug-subsections.test146
-rw-r--r--test/DebugInfo/PDB/pdbdump-headers.test3971
-rw-r--r--test/DebugInfo/PDB/pdbdump-merge-ids-and-types.test106
-rw-r--r--test/DebugInfo/PDB/pdbdump-mergeids.test43
-rw-r--r--test/DebugInfo/PDB/pdbdump-mergetypes.test60
-rw-r--r--test/DebugInfo/PDB/pdbdump-raw-blocks.test64
-rw-r--r--test/DebugInfo/PDB/pdbdump-raw-stream.test51
-rw-r--r--test/DebugInfo/PDB/pdbdump-readwrite.test73
-rw-r--r--test/DebugInfo/X86/block-capture.ll2
-rw-r--r--test/DebugInfo/X86/debug-info-block-captured-self.ll4
-rw-r--r--test/DebugInfo/X86/debug-info-blocks.ll2
-rw-r--r--test/DebugInfo/X86/double-declare.ll44
-rw-r--r--test/DebugInfo/X86/dw_op_minus.ll4
-rw-r--r--test/DebugInfo/X86/dw_op_minus_direct.ll2
-rw-r--r--test/DebugInfo/X86/safestack-byval.ll4
-rw-r--r--test/DebugInfo/X86/stack-value-dwarf2.ll2
-rw-r--r--test/DebugInfo/X86/unattached-global.ll2
-rw-r--r--test/DebugInfo/dwarfdump-str-offsets-dwp.test56
-rw-r--r--test/DebugInfo/dwarfdump-zlib.test5
-rw-r--r--test/Instrumentation/InstrProfiling/always_inline.ll28
-rw-r--r--test/Instrumentation/SanitizerCoverage/inline-8bit-counters.ll2
-rw-r--r--test/LTO/Resolution/X86/Inputs/dead-strip-fulllto.ll16
-rw-r--r--test/LTO/Resolution/X86/dead-strip-fulllto.ll37
-rw-r--r--test/LTO/Resolution/X86/symtab-elf.ll4
-rw-r--r--test/LTO/Resolution/X86/symtab.ll4
-rw-r--r--test/LibDriver/use-paths.test24
-rw-r--r--test/MC/AMDGPU/flat-gfx9.s40
-rw-r--r--test/MC/AMDGPU/flat.s3
-rw-r--r--test/MC/COFF/cv-compiler-info.ll6
-rwxr-xr-xtest/MC/COFF/linker-options.ll8
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64-encoding.txt12
-rw-r--r--test/MC/Disassembler/PowerPC/ppc64le-encoding.txt12
-rw-r--r--test/MC/ELF/section.s12
-rw-r--r--test/MC/MachO/linker-options.ll7
-rw-r--r--test/MC/PowerPC/ppc64-encoding.s13
-rw-r--r--test/MC/WebAssembly/external-data.ll4
-rw-r--r--test/MC/WebAssembly/external-func-address.ll25
-rw-r--r--test/MC/WebAssembly/func-address.ll48
-rw-r--r--test/ThinLTO/X86/cfi-icall.ll29
-rw-r--r--test/Transforms/CodeExtractor/live_shrink.ll67
-rw-r--r--test/Transforms/CodeExtractor/live_shrink_gep.ll66
-rw-r--r--test/Transforms/CodeExtractor/live_shrink_hoist.ll66
-rw-r--r--test/Transforms/CodeExtractor/live_shrink_multiple.ll66
-rw-r--r--test/Transforms/CodeExtractor/live_shrink_unsafe.ll94
-rw-r--r--test/Transforms/CrossDSOCFI/cfi_functions.ll23
-rw-r--r--test/Transforms/EarlyCSE/pr33406.ll26
-rw-r--r--test/Transforms/GVN/pr32314.ll53
-rw-r--r--test/Transforms/GlobalMerge/debug-info.ll2
-rw-r--r--test/Transforms/Inline/always-inline.ll11
-rw-r--r--test/Transforms/InstCombine/debuginfo-dce.ll12
-rw-r--r--test/Transforms/InstCombine/element-atomic-memcpy-to-loads.ll30
-rw-r--r--test/Transforms/InstCombine/ffs-1.ll156
-rw-r--r--test/Transforms/InstCombine/lshr.ll19
-rw-r--r--test/Transforms/InstCombine/onehot_merge.ll76
-rw-r--r--test/Transforms/InstCombine/or-xor.ll44
-rw-r--r--test/Transforms/InstCombine/select-with-bitwise-ops.ll268
-rw-r--r--test/Transforms/InstCombine/shift.ll10
-rw-r--r--test/Transforms/InstCombine/xor2.ll33
-rw-r--r--test/Transforms/LoopIdiom/X86/unordered-atomic-memcpy.ll36
-rw-r--r--test/Transforms/LoopIdiom/unordered-atomic-memcpy-noarch.ll2
-rw-r--r--test/Transforms/LowerTypeTests/Inputs/import-icall.yaml19
-rw-r--r--test/Transforms/LowerTypeTests/export-icall.ll70
-rw-r--r--test/Transforms/LowerTypeTests/import-icall.ll40
-rw-r--r--test/Transforms/PGOProfile/memop_size_opt.ll9
-rw-r--r--test/Transforms/RewriteStatepointsForGC/drop-invalid-metadata.ll92
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-add.ll58
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-fp.ll180
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-mul.ll74
-rw-r--r--test/Transforms/SLPVectorizer/X86/arith-sub.ll58
-rw-r--r--test/Transforms/SafeStack/X86/debug-loc.ll4
-rw-r--r--test/Transforms/SafeStack/X86/debug-loc2.ll6
-rw-r--r--test/Transforms/Util/PredicateInfo/pr33456.ll68
-rw-r--r--test/Transforms/Util/PredicateInfo/pr33457.ll93
-rw-r--r--test/Verifier/element-wise-atomic-memory-intrinsics.ll20
-rw-r--r--test/lit.cfg2
-rw-r--r--test/tools/llvm-cvtres/Inputs/combined.obj.coffbin0 -> 4040 bytes
-rw-r--r--test/tools/llvm-cvtres/Inputs/languages.rc36
-rw-r--r--test/tools/llvm-cvtres/Inputs/languages.resbin0 -> 452 bytes
-rw-r--r--test/tools/llvm-cvtres/Inputs/test_resource.obj.coff.armbin0 -> 3472 bytes
-rw-r--r--test/tools/llvm-cvtres/Inputs/test_resource.obj.coff.x64bin0 -> 3472 bytes
-rw-r--r--test/tools/llvm-cvtres/basic.test4
-rw-r--r--test/tools/llvm-cvtres/combined.test313
-rw-r--r--test/tools/llvm-cvtres/help.test13
-rw-r--r--test/tools/llvm-cvtres/machine.test59
-rw-r--r--test/tools/llvm-cvtres/object.test227
-rw-r--r--test/tools/llvm-cvtres/parse.test2
-rw-r--r--test/tools/llvm-dwarfdump/X86/apple_names_verify_buckets.s192
-rw-r--r--test/tools/llvm-pdbdump/raw-stream-data.test47
-rw-r--r--test/tools/llvm-readobj/resources.test230
-rw-r--r--tools/llvm-ar/llvm-ar.cpp12
-rw-r--r--tools/llvm-bcanalyzer/llvm-bcanalyzer.cpp2
-rw-r--r--tools/llvm-cvtres/llvm-cvtres.cpp16
-rw-r--r--tools/llvm-dwarfdump/llvm-dwarfdump.cpp1
-rw-r--r--tools/llvm-pdbutil/CMakeLists.txt5
-rw-r--r--tools/llvm-pdbutil/FormatUtil.cpp49
-rw-r--r--tools/llvm-pdbutil/FormatUtil.h120
-rw-r--r--tools/llvm-pdbutil/LLVMOutputStyle.cpp1188
-rw-r--r--tools/llvm-pdbutil/LinePrinter.cpp33
-rw-r--r--tools/llvm-pdbutil/LinePrinter.h31
-rw-r--r--tools/llvm-pdbutil/MinimalSymbolDumper.cpp749
-rw-r--r--tools/llvm-pdbutil/MinimalSymbolDumper.h47
-rw-r--r--tools/llvm-pdbutil/MinimalTypeDumper.cpp543
-rw-r--r--tools/llvm-pdbutil/MinimalTypeDumper.h61
-rw-r--r--tools/llvm-pdbutil/PrettyClassLayoutGraphicalDumper.cpp24
-rw-r--r--tools/llvm-pdbutil/RawOutputStyle.cpp1064
-rw-r--r--tools/llvm-pdbutil/RawOutputStyle.h (renamed from tools/llvm-pdbutil/LLVMOutputStyle.h)40
-rw-r--r--tools/llvm-pdbutil/YAMLOutputStyle.cpp34
-rw-r--r--tools/llvm-pdbutil/llvm-pdbutil.cpp217
-rw-r--r--tools/llvm-pdbutil/llvm-pdbutil.h49
-rw-r--r--tools/llvm-readobj/COFFDumper.cpp45
-rw-r--r--tools/llvm-readobj/ELFDumper.cpp2
-rw-r--r--tools/llvm-stress/llvm-stress.cpp20
-rw-r--r--tools/obj2yaml/CMakeLists.txt1
-rw-r--r--tools/obj2yaml/coff2yaml.cpp57
-rw-r--r--tools/yaml2obj/CMakeLists.txt1
-rw-r--r--tools/yaml2obj/yaml2coff.cpp56
-rw-r--r--tools/yaml2obj/yaml2obj.cpp1
-rw-r--r--unittests/ADT/SCCIteratorTest.cpp6
-rw-r--r--unittests/ADT/SmallVectorTest.cpp20
-rw-r--r--unittests/ADT/StringRefTest.cpp3
-rw-r--r--unittests/CodeGen/LowLevelTypeTest.cpp3
-rw-r--r--unittests/DebugInfo/CodeView/CMakeLists.txt2
-rw-r--r--unittests/DebugInfo/CodeView/ErrorChecking.h70
-rw-r--r--unittests/DebugInfo/CodeView/RandomAccessVisitorTest.cpp18
-rw-r--r--unittests/DebugInfo/CodeView/TypeIndexDiscoveryTest.cpp1
-rw-r--r--unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp12
-rw-r--r--unittests/DebugInfo/PDB/CMakeLists.txt2
-rw-r--r--unittests/DebugInfo/PDB/ErrorChecking.h61
-rw-r--r--unittests/DebugInfo/PDB/HashTableTest.cpp10
-rw-r--r--unittests/DebugInfo/PDB/MSFBuilderTest.cpp101
-rw-r--r--unittests/DebugInfo/PDB/MappedBlockStreamTest.cpp119
-rw-r--r--unittests/DebugInfo/PDB/StringTableBuilderTest.cpp19
-rw-r--r--unittests/DebugInfo/PDB/TypeServerHandlerTest.cpp32
-rw-r--r--unittests/IR/MetadataTest.cpp14
-rw-r--r--unittests/IR/PatternMatch.cpp77
-rw-r--r--unittests/MI/LiveIntervalTest.cpp3
-rw-r--r--unittests/Support/BinaryStreamTest.cpp163
-rw-r--r--unittests/Support/CMakeLists.txt2
-rw-r--r--unittests/Support/CommandLineTest.cpp9
-rw-r--r--unittests/Support/DynamicLibrary/CMakeLists.txt4
-rw-r--r--unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp2
-rw-r--r--unittests/Support/DynamicLibrary/ExportedFuncs.cxx16
-rw-r--r--unittests/Support/DynamicLibrary/PipSqueak.cxx3
-rw-r--r--unittests/Support/DynamicLibrary/PipSqueak.h2
-rw-r--r--unittests/Support/FormatVariadicTest.cpp2
-rw-r--r--unittests/Target/AArch64/InstSizes.cpp15
-rw-r--r--unittests/Transforms/Scalar/LoopPassManagerTest.cpp11
-rw-r--r--utils/TableGen/CodeGenDAGPatterns.cpp4
-rwxr-xr-xutils/opt-viewer/opt-diff.py2
-rwxr-xr-xutils/opt-viewer/opt-stats.py2
-rwxr-xr-xutils/opt-viewer/opt-viewer.py2
-rwxr-xr-xutils/release/test-release.sh5
-rwxr-xr-xutils/update_test_checks.py96
595 files changed, 17845 insertions, 11359 deletions
diff --git a/cmake/modules/TableGen.cmake b/cmake/modules/TableGen.cmake
index 17ae1c9e7717..21421e4fdbd2 100644
--- a/cmake/modules/TableGen.cmake
+++ b/cmake/modules/TableGen.cmake
@@ -35,38 +35,24 @@ function(tablegen project ofn)
# a tablegen change, as cmake does not propagate file-level dependencies
# of custom targets. See the following ticket for more information:
# https://cmake.org/Bug/view.php?id=15858
- # We could always have just one dependency on both the target and
- # the file, but these 2 cases would produce cleaner cmake files.
- if (${${project}_TABLEGEN_TARGET} STREQUAL ${${project}_TABLEGEN_EXE})
- add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
- # Generate tablegen output in a temporary file.
- COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
- ${LLVM_TABLEGEN_FLAGS}
- ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
- -o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
- # The file in LLVM_TARGET_DEFINITIONS may be not in the current
- # directory and local_tds may not contain it, so we must
- # explicitly list it here:
- DEPENDS ${${project}_TABLEGEN_TARGET} ${local_tds} ${global_tds}
- ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
- COMMENT "Building ${ofn}..."
- )
- else()
- add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
- # Generate tablegen output in a temporary file.
- COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
- ${LLVM_TABLEGEN_FLAGS}
- ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
- -o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
- # The file in LLVM_TARGET_DEFINITIONS may be not in the current
- # directory and local_tds may not contain it, so we must
- # explicitly list it here:
- DEPENDS ${${project}_TABLEGEN_TARGET} ${${project}_TABLEGEN_EXE}
- ${local_tds} ${global_tds}
- ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
- COMMENT "Building ${ofn}..."
- )
- endif()
+ # The dependency on both, the target and the file, produces the same
+ # dependency twice in the result file when
+ # ("${${project}_TABLEGEN_TARGET}" STREQUAL "${${project}_TABLEGEN_EXE}")
+ # but lets us having smaller and cleaner code here.
+ add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
+ # Generate tablegen output in a temporary file.
+ COMMAND ${${project}_TABLEGEN_EXE} ${ARGN} -I ${CMAKE_CURRENT_SOURCE_DIR}
+ ${LLVM_TABLEGEN_FLAGS}
+ ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
+ -o ${CMAKE_CURRENT_BINARY_DIR}/${ofn}.tmp
+ # The file in LLVM_TARGET_DEFINITIONS may be not in the current
+ # directory and local_tds may not contain it, so we must
+ # explicitly list it here:
+ DEPENDS ${${project}_TABLEGEN_TARGET} ${${project}_TABLEGEN_EXE}
+ ${local_tds} ${global_tds}
+ ${LLVM_TARGET_DEFINITIONS_ABSOLUTE}
+ COMMENT "Building ${ofn}..."
+ )
add_custom_command(OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${ofn}
# Only update the real output file if there are any differences.
# This prevents recompilation of all the files depending on it if there
diff --git a/docs/BranchWeightMetadata.rst b/docs/BranchWeightMetadata.rst
index b941d0d15050..9bd8bd4ae744 100644
--- a/docs/BranchWeightMetadata.rst
+++ b/docs/BranchWeightMetadata.rst
@@ -64,6 +64,20 @@ Branch weights are assigned to every destination.
[ , i32 <LABEL_BRANCH_WEIGHT> ... ]
}
+``CallInst``
+^^^^^^^^^^^^^^^^^^
+
+Calls may have branch weight metadata, containing the execution count of
+the call. It is currently used in SamplePGO mode only, to augment the
+block and entry counts which may not be accurate with sampling.
+
+.. code-block:: none
+
+ !0 = metadata !{
+ metadata !"branch_weights",
+ i32 <CALL_BRANCH_WEIGHT>
+ }
+
Other
^^^^^
diff --git a/docs/LangRef.rst b/docs/LangRef.rst
index e063f6bd35fe..68aa500150ae 100644
--- a/docs/LangRef.rst
+++ b/docs/LangRef.rst
@@ -4033,26 +4033,26 @@ DICompileUnit
"""""""""""""
``DICompileUnit`` nodes represent a compile unit. The ``enums:``,
-``retainedTypes:``, ``subprograms:``, ``globals:``, ``imports:`` and ``macros:``
-fields are tuples containing the debug info to be emitted along with the compile
-unit, regardless of code optimizations (some nodes are only emitted if there are
-references to them from instructions). The ``debugInfoForProfiling:`` field is a
-boolean indicating whether or not line-table discriminators are updated to
-provide more-accurate debug info for profiling results.
+``retainedTypes:``, ``globals:``, ``imports:`` and ``macros:`` fields are tuples
+containing the debug info to be emitted along with the compile unit, regardless
+of code optimizations (some nodes are only emitted if there are references to
+them from instructions). The ``debugInfoForProfiling:`` field is a boolean
+indicating whether or not line-table discriminators are updated to provide
+more-accurate debug info for profiling results.
.. code-block:: text
!0 = !DICompileUnit(language: DW_LANG_C99, file: !1, producer: "clang",
isOptimized: true, flags: "-O2", runtimeVersion: 2,
splitDebugFilename: "abc.debug", emissionKind: FullDebug,
- enums: !2, retainedTypes: !3, subprograms: !4,
- globals: !5, imports: !6, macros: !7, dwoId: 0x0abcd)
+ enums: !2, retainedTypes: !3, globals: !4, imports: !5,
+ macros: !6, dwoId: 0x0abcd)
Compile unit descriptors provide the root scope for objects declared in a
-specific compilation unit. File descriptors are defined using this scope.
-These descriptors are collected by a named metadata ``!llvm.dbg.cu``. They
-keep track of subprograms, global variables, type information, and imported
-entities (declarations and namespaces).
+specific compilation unit. File descriptors are defined using this scope. These
+descriptors are collected by a named metadata node ``!llvm.dbg.cu``. They keep
+track of global variables, type information, and imported entities (declarations
+and namespaces).
.. _DIFile:
@@ -4326,8 +4326,8 @@ and ``scope:``.
containingType: !4,
virtuality: DW_VIRTUALITY_pure_virtual,
virtualIndex: 10, flags: DIFlagPrototyped,
- isOptimized: true, templateParams: !5,
- declaration: !6, variables: !7)
+ isOptimized: true, unit: !5, templateParams: !6,
+ declaration: !7, variables: !8, thrownTypes: !9)
.. _DILexicalBlock:
@@ -4404,7 +4404,12 @@ referenced LLVM variable relates to the source language variable.
The current supported vocabulary is limited:
- ``DW_OP_deref`` dereferences the top of the expression stack.
-- ``DW_OP_plus, 93`` adds ``93`` to the working expression.
+- ``DW_OP_plus`` pops the last two entries from the expression stack, adds
+ them together and appends the result to the expression stack.
+- ``DW_OP_minus`` pops the last two entries from the expression stack, subtracts
+ the last entry from the second last entry and appends the result to the
+ expression stack.
+- ``DW_OP_plus_uconst, 93`` adds ``93`` to the working expression.
- ``DW_OP_LLVM_fragment, 16, 8`` specifies the offset and size (``16`` and ``8``
here, respectively) of the variable fragment from the working expression. Note
that contrary to DW_OP_bit_piece, the offset is describing the the location
@@ -4426,9 +4431,10 @@ combined with a concrete location.
.. code-block:: llvm
!0 = !DIExpression(DW_OP_deref)
- !1 = !DIExpression(DW_OP_plus, 3)
+ !1 = !DIExpression(DW_OP_plus_uconst, 3)
+ !1 = !DIExpression(DW_OP_constu, 3, DW_OP_plus)
!2 = !DIExpression(DW_OP_bit_piece, 3, 7)
- !3 = !DIExpression(DW_OP_deref, DW_OP_plus, 3, DW_OP_LLVM_fragment, 3, 7)
+ !3 = !DIExpression(DW_OP_deref, DW_OP_constu, 3, DW_OP_plus, DW_OP_LLVM_fragment, 3, 7)
!4 = !DIExpression(DW_OP_constu, 2, DW_OP_swap, DW_OP_xderef)
!5 = !DIExpression(DW_OP_constu, 42, DW_OP_stack_value)
@@ -5186,6 +5192,72 @@ Example:
!0 = !{i32* @a}
+'``prof``' Metadata
+^^^^^^^^^^^^^^^^^^^
+
+The ``prof`` metadata is used to record profile data in the IR.
+The first operand of the metadata node indicates the profile metadata
+type. There are currently 3 types:
+:ref:`branch_weights<prof_node_branch_weights>`,
+:ref:`function_entry_count<prof_node_function_entry_count>`, and
+:ref:`VP<prof_node_VP>`.
+
+.. _prof_node_branch_weights:
+
+branch_weights
+""""""""""""""
+
+Branch weight metadata attached to a branch, select, switch or call instruction
+represents the likeliness of the associated branch being taken.
+For more information, see :doc:`BranchWeightMetadata`.
+
+.. _prof_node_function_entry_count:
+
+function_entry_count
+""""""""""""""""""""
+
+Function entry count metadata can be attached to function definitions
+to record the number of times the function is called. Used with BFI
+information, it is also used to derive the basic block profile count.
+For more information, see :doc:`BranchWeightMetadata`.
+
+.. _prof_node_VP:
+
+VP
+""
+
+VP (value profile) metadata can be attached to instructions that have
+value profile information. Currently this is indirect calls (where it
+records the hottest callees) and calls to memory intrinsics such as memcpy,
+memmove, and memset (where it records the hottest byte lengths).
+
+Each VP metadata node contains "VP" string, then a uint32_t value for the value
+profiling kind, a uint64_t value for the total number of times the instruction
+is executed, followed by uint64_t value and execution count pairs.
+The value profiling kind is 0 for indirect call targets and 1 for memory
+operations. For indirect call targets, each profile value is a hash
+of the callee function name, and for memory operations each value is the
+byte length.
+
+Note that the value counts do not need to add up to the total count
+listed in the third operand (in practice only the top hottest values
+are tracked and reported).
+
+Indirect call example:
+
+.. code-block:: llvm
+
+ call void %f(), !prof !1
+ !1 = !{!"VP", i32 0, i64 1600, i64 7651369219802541373, i64 1030, i64 -4377547752858689819, i64 410}
+
+Note that the VP type is 0 (the second operand), which indicates this is
+an indirect call value profile data. The third operand indicates that the
+indirect call executed 1600 times. The 4th and 6th operands give the
+hashes of the 2 hottest target functions' names (this is the same hash used
+to represent function names in the profile database), and the 5th and 7th
+operands give the execution count that each of the respective prior target
+functions was called.
+
Module Flags Metadata
=====================
@@ -5352,40 +5424,6 @@ Some important flag interactions:
- A module with ``Objective-C Garbage Collection`` set to 0 cannot be
merged with a module with ``Objective-C GC Only`` set to 6.
-Automatic Linker Flags Module Flags Metadata
---------------------------------------------
-
-Some targets support embedding flags to the linker inside individual object
-files. Typically this is used in conjunction with language extensions which
-allow source files to explicitly declare the libraries they depend on, and have
-these automatically be transmitted to the linker via object files.
-
-These flags are encoded in the IR using metadata in the module flags section,
-using the ``Linker Options`` key. The merge behavior for this flag is required
-to be ``AppendUnique``, and the value for the key is expected to be a metadata
-node which should be a list of other metadata nodes, each of which should be a
-list of metadata strings defining linker options.
-
-For example, the following metadata section specifies two separate sets of
-linker options, presumably to link against ``libz`` and the ``Cocoa``
-framework::
-
- !0 = !{ i32 6, !"Linker Options",
- !{
- !{ !"-lz" },
- !{ !"-framework", !"Cocoa" } } }
- !llvm.module.flags = !{ !0 }
-
-The metadata encoding as lists of lists of options, as opposed to a collapsed
-list of options, is chosen so that the IR encoding can use multiple option
-strings to specify e.g., a single library, while still having that specifier be
-preserved as an atomic element that can be recognized by a target specific
-assembly writer or object file emitter.
-
-Each individual option is required to be either a valid option for the target's
-linker, or an option that is reserved by the target specific assembly writer or
-object file emitter. No other aspect of these options is defined by the IR.
-
C type width Module Flags Metadata
----------------------------------
@@ -5422,6 +5460,37 @@ enum is the smallest type which can represent all of its values::
!0 = !{i32 1, !"short_wchar", i32 1}
!1 = !{i32 1, !"short_enum", i32 0}
+Automatic Linker Flags Named Metadata
+=====================================
+
+Some targets support embedding flags to the linker inside individual object
+files. Typically this is used in conjunction with language extensions which
+allow source files to explicitly declare the libraries they depend on, and have
+these automatically be transmitted to the linker via object files.
+
+These flags are encoded in the IR using named metadata with the name
+``!llvm.linker.options``. Each operand is expected to be a metadata node
+which should be a list of other metadata nodes, each of which should be a
+list of metadata strings defining linker options.
+
+For example, the following metadata section specifies two separate sets of
+linker options, presumably to link against ``libz`` and the ``Cocoa``
+framework::
+
+ !0 = !{ !"-lz" },
+ !1 = !{ !"-framework", !"Cocoa" } } }
+ !llvm.linker.options = !{ !0, !1 }
+
+The metadata encoding as lists of lists of options, as opposed to a collapsed
+list of options, is chosen so that the IR encoding can use multiple option
+strings to specify e.g., a single library, while still having that specifier be
+preserved as an atomic element that can be recognized by a target specific
+assembly writer or object file emitter.
+
+Each individual option is required to be either a valid option for the target's
+linker, or an option that is reserved by the target specific assembly writer or
+object file emitter. No other aspect of these options is defined by the IR.
+
.. _intrinsicglobalvariables:
Intrinsic Global Variables
@@ -13999,62 +14068,66 @@ Element Wise Atomic Memory Intrinsics
These intrinsics are similar to the standard library memory intrinsics except
that they perform memory transfer as a sequence of atomic memory accesses.
-.. _int_memcpy_element_atomic:
+.. _int_memcpy_element_unordered_atomic:
-'``llvm.memcpy.element.atomic``' Intrinsic
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+'``llvm.memcpy.element.unordered.atomic``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Syntax:
"""""""
-This is an overloaded intrinsic. You can use ``llvm.memcpy.element.atomic`` on
+This is an overloaded intrinsic. You can use ``llvm.memcpy.element.unordered.atomic`` on
any integer bit width and for different address spaces. Not all targets
support all bit widths however.
::
- declare void @llvm.memcpy.element.atomic.p0i8.p0i8(i8* <dest>, i8* <src>,
- i64 <num_elements>, i32 <element_size>)
+ declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* <dest>,
+ i8* <src>,
+ i32 <len>,
+ i32 <element_size>)
+ declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* <dest>,
+ i8* <src>,
+ i64 <len>,
+ i32 <element_size>)
Overview:
"""""""""
-The '``llvm.memcpy.element.atomic.*``' intrinsic performs copy of a block of
-memory from the source location to the destination location as a sequence of
-unordered atomic memory accesses where each access is a multiple of
-``element_size`` bytes wide and aligned at an element size boundary. For example
-each element is accessed atomically in source and destination buffers.
+The '``llvm.memcpy.element.unordered.atomic.*``' intrinsic is a specialization of the
+'``llvm.memcpy.*``' intrinsic. It differs in that the ``dest`` and ``src`` are treated
+as arrays with elements that are exactly ``element_size`` bytes, and the copy between
+buffers uses a sequence of :ref:`unordered atomic <ordering>` load/store operations
+that are a positive integer multiple of the ``element_size`` in size.
Arguments:
""""""""""
-The first argument is a pointer to the destination, the second is a
-pointer to the source. The third argument is an integer argument
-specifying the number of elements to copy, the fourth argument is size of
-the single element in bytes.
+The first three arguments are the same as they are in the :ref:`@llvm.memcpy <int_memcpy>`
+intrinsic, with the added constraint that ``len`` is required to be a positive integer
+multiple of the ``element_size``. If ``len`` is not a positive integer multiple of
+``element_size``, then the behaviour of the intrinsic is undefined.
-``element_size`` should be a power of two, greater than zero and less than
-a target-specific atomic access size limit.
+``element_size`` must be a compile-time constant positive power of two no greater than
+target-specific atomic access size limit.
-For each of the input pointers ``align`` parameter attribute must be specified.
-It must be a power of two and greater than or equal to the ``element_size``.
-Caller guarantees that both the source and destination pointers are aligned to
-that boundary.
+For each of the input pointers ``align`` parameter attribute must be specified. It
+must be a power of two no less than the ``element_size``. Caller guarantees that
+both the source and destination pointers are aligned to that boundary.
Semantics:
""""""""""
-The '``llvm.memcpy.element.atomic.*``' intrinsic copies
-'``num_elements`` * ``element_size``' bytes of memory from the source location to
-the destination location. These locations are not allowed to overlap. Memory copy
-is performed as a sequence of unordered atomic memory accesses where each access
-is guaranteed to be a multiple of ``element_size`` bytes wide and aligned at an
-element size boundary.
+The '``llvm.memcpy.element.unordered.atomic.*``' intrinsic copies ``len`` bytes of
+memory from the source location to the destination location. These locations are not
+allowed to overlap. The memory copy is performed as a sequence of load/store operations
+where each access is guaranteed to be a multiple of ``element_size`` bytes wide and
+aligned at an ``element_size`` boundary.
The order of the copy is unspecified. The same value may be read from the source
buffer many times, but only one write is issued to the destination buffer per
-element. It is well defined to have concurrent reads and writes to both source
-and destination provided those reads and writes are at least unordered atomic.
+element. It is well defined to have concurrent reads and writes to both source and
+destination provided those reads and writes are unordered atomic when specified.
This intrinsic does not provide any additional ordering guarantees over those
provided by a set of unordered loads from the source location and stores to the
@@ -14063,8 +14136,8 @@ destination.
Lowering:
"""""""""
-In the most general case call to the '``llvm.memcpy.element.atomic.*``' is lowered
-to a call to the symbol ``__llvm_memcpy_element_atomic_*``. Where '*' is replaced
-with an actual element size.
+In the most general case call to the '``llvm.memcpy.element.unordered.atomic.*``' is
+lowered to a call to the symbol ``__llvm_memcpy_element_unordered_atomic_*``. Where '*'
+is replaced with an actual element size.
-Optimizer is allowed to inline memory copy when it's profitable to do so.
+The optimizer is allowed to inline the memory copy when it's profitable to do so.
diff --git a/docs/Lexicon.rst b/docs/Lexicon.rst
index ebc3fb772e81..ce7ed318fe4b 100644
--- a/docs/Lexicon.rst
+++ b/docs/Lexicon.rst
@@ -109,6 +109,13 @@ G
Garbage Collection. The practice of using reachability analysis instead of
explicit memory management to reclaim unused memory.
+**GVN**
+ Global Value Numbering. GVN is a pass that partitions values computed by a
+ function into congruence classes. Values ending up in the same congruence
+ class are guaranteed to be the same for every execution of the program.
+ In that respect, congruency is a compile-time approximation of equivalence
+ of values at runtime.
+
H
-
diff --git a/docs/Phabricator.rst b/docs/Phabricator.rst
index 8d1984b65cd9..cc8484cc1e3e 100644
--- a/docs/Phabricator.rst
+++ b/docs/Phabricator.rst
@@ -54,7 +54,8 @@ reviewer understand your code.
To get a full diff, use one of the following commands (or just use Arcanist
to upload your patch):
-* ``git diff -U999999 other-branch``
+* ``git show HEAD -U999999 > mypatch.patch``
+* ``git format-patch -U999999 @{u}``
* ``svn diff --diff-cmd=diff -x -U999999``
To upload a new patch:
diff --git a/include/llvm/ADT/AllocatorList.h b/include/llvm/ADT/AllocatorList.h
index 05a549f96ec7..178c6742a87b 100644
--- a/include/llvm/ADT/AllocatorList.h
+++ b/include/llvm/ADT/AllocatorList.h
@@ -10,10 +10,16 @@
#ifndef LLVM_ADT_ALLOCATORLIST_H
#define LLVM_ADT_ALLOCATORLIST_H
+#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/simple_ilist.h"
#include "llvm/Support/Allocator.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
#include <type_traits>
+#include <utility>
namespace llvm {
@@ -39,7 +45,8 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
T V;
};
- typedef simple_ilist<Node> list_type;
+ using list_type = simple_ilist<Node>;
+
list_type List;
AllocatorT &getAlloc() { return *this; }
@@ -51,13 +58,17 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
struct Cloner {
AllocatorList &AL;
+
Cloner(AllocatorList &AL) : AL(AL) {}
+
Node *operator()(const Node &N) const { return AL.create(N.V); }
};
struct Disposer {
AllocatorList &AL;
+
Disposer(AllocatorList &AL) : AL(AL) {}
+
void operator()(Node *N) const {
N->~Node();
AL.getAlloc().Deallocate(N);
@@ -65,13 +76,13 @@ template <class T, class AllocatorT> class AllocatorList : AllocatorT {
};
public:
- typedef T value_type;
- typedef T *pointer;
- typedef T &reference;
- typedef const T *const_pointer;
- typedef const T &const_reference;
- typedef typename list_type::size_type size_type;
- typedef typename list_type::difference_type difference_type;
+ using value_type = T;
+ using pointer = T *;
+ using reference = T &;
+ using const_pointer = const T *;
+ using const_reference = const T &;
+ using size_type = typename list_type::size_type;
+ using difference_type = typename list_type::difference_type;
private:
template <class ValueT, class IteratorBase>
@@ -83,20 +94,18 @@ private:
friend class IteratorImpl;
friend AllocatorList;
- typedef iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>,
- IteratorBase, std::bidirectional_iterator_tag,
- ValueT>
- base_type;
+ using base_type =
+ iterator_adaptor_base<IteratorImpl<ValueT, IteratorBase>, IteratorBase,
+ std::bidirectional_iterator_tag, ValueT>;
public:
- typedef ValueT value_type;
- typedef ValueT *pointer;
- typedef ValueT &reference;
+ using value_type = ValueT;
+ using pointer = ValueT *;
+ using reference = ValueT &;
IteratorImpl() = default;
IteratorImpl(const IteratorImpl &) = default;
IteratorImpl &operator=(const IteratorImpl &) = default;
- ~IteratorImpl() = default;
explicit IteratorImpl(const IteratorBase &I) : base_type(I) {}
@@ -106,6 +115,8 @@ private:
OtherIteratorBase, IteratorBase>::value>::type * = nullptr)
: base_type(X.wrapped()) {}
+ ~IteratorImpl() = default;
+
reference operator*() const { return base_type::wrapped()->V; }
pointer operator->() const { return &operator*(); }
@@ -118,30 +129,34 @@ private:
};
public:
- typedef IteratorImpl<T, typename list_type::iterator> iterator;
- typedef IteratorImpl<T, typename list_type::reverse_iterator>
- reverse_iterator;
- typedef IteratorImpl<const T, typename list_type::const_iterator>
- const_iterator;
- typedef IteratorImpl<const T, typename list_type::const_reverse_iterator>
- const_reverse_iterator;
+ using iterator = IteratorImpl<T, typename list_type::iterator>;
+ using reverse_iterator =
+ IteratorImpl<T, typename list_type::reverse_iterator>;
+ using const_iterator =
+ IteratorImpl<const T, typename list_type::const_iterator>;
+ using const_reverse_iterator =
+ IteratorImpl<const T, typename list_type::const_reverse_iterator>;
AllocatorList() = default;
AllocatorList(AllocatorList &&X)
: AllocatorT(std::move(X.getAlloc())), List(std::move(X.List)) {}
+
AllocatorList(const AllocatorList &X) {
List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
}
+
AllocatorList &operator=(AllocatorList &&X) {
clear(); // Dispose of current nodes explicitly.
List = std::move(X.List);
getAlloc() = std::move(X.getAlloc());
return *this;
}
+
AllocatorList &operator=(const AllocatorList &X) {
List.cloneFrom(X.List, Cloner(*this), Disposer(*this));
return *this;
}
+
~AllocatorList() { clear(); }
void swap(AllocatorList &RHS) {
diff --git a/include/llvm/ADT/ArrayRef.h b/include/llvm/ADT/ArrayRef.h
index 6b35d0aec8b2..925ebafc3fed 100644
--- a/include/llvm/ADT/ArrayRef.h
+++ b/include/llvm/ADT/ArrayRef.h
@@ -1,4 +1,4 @@
-//===--- ArrayRef.h - Array Reference Wrapper -------------------*- C++ -*-===//
+//===- ArrayRef.h - Array Reference Wrapper ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,12 +12,21 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/None.h"
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/Support/Compiler.h"
+#include <algorithm>
#include <array>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
#include <vector>
namespace llvm {
+
/// ArrayRef - Represent a constant reference to an array (0 or more elements
/// consecutively in memory), i.e. a start pointer and a length. It allows
/// various APIs to take consecutive elements easily and conveniently.
@@ -32,28 +41,27 @@ namespace llvm {
template<typename T>
class LLVM_NODISCARD ArrayRef {
public:
- typedef const T *iterator;
- typedef const T *const_iterator;
- typedef size_t size_type;
-
- typedef std::reverse_iterator<iterator> reverse_iterator;
+ using iterator = const T *;
+ using const_iterator = const T *;
+ using size_type = size_t;
+ using reverse_iterator = std::reverse_iterator<iterator>;
private:
/// The start of the array, in an external buffer.
- const T *Data;
+ const T *Data = nullptr;
/// The number of elements.
- size_type Length;
+ size_type Length = 0;
public:
/// @name Constructors
/// @{
/// Construct an empty ArrayRef.
- /*implicit*/ ArrayRef() : Data(nullptr), Length(0) {}
+ /*implicit*/ ArrayRef() = default;
/// Construct an empty ArrayRef from None.
- /*implicit*/ ArrayRef(NoneType) : Data(nullptr), Length(0) {}
+ /*implicit*/ ArrayRef(NoneType) {}
/// Construct an ArrayRef from a single element.
/*implicit*/ ArrayRef(const T &OneElt)
@@ -282,9 +290,8 @@ namespace llvm {
template<typename T>
class LLVM_NODISCARD MutableArrayRef : public ArrayRef<T> {
public:
- typedef T *iterator;
-
- typedef std::reverse_iterator<iterator> reverse_iterator;
+ using iterator = T *;
+ using reverse_iterator = std::reverse_iterator<iterator>;
/// Construct an empty MutableArrayRef.
/*implicit*/ MutableArrayRef() : ArrayRef<T>() {}
@@ -416,19 +423,23 @@ namespace llvm {
/// This is a MutableArrayRef that owns its array.
template <typename T> class OwningArrayRef : public MutableArrayRef<T> {
public:
- OwningArrayRef() {}
+ OwningArrayRef() = default;
OwningArrayRef(size_t Size) : MutableArrayRef<T>(new T[Size], Size) {}
+
OwningArrayRef(ArrayRef<T> Data)
: MutableArrayRef<T>(new T[Data.size()], Data.size()) {
std::copy(Data.begin(), Data.end(), this->begin());
}
+
OwningArrayRef(OwningArrayRef &&Other) { *this = Other; }
+
OwningArrayRef &operator=(OwningArrayRef &&Other) {
delete[] this->data();
this->MutableArrayRef<T>::operator=(Other);
Other.MutableArrayRef<T>::operator=(MutableArrayRef<T>());
return *this;
}
+
~OwningArrayRef() { delete[] this->data(); }
};
@@ -517,13 +528,14 @@ namespace llvm {
// ArrayRefs can be treated like a POD type.
template <typename T> struct isPodLike;
- template <typename T> struct isPodLike<ArrayRef<T> > {
+ template <typename T> struct isPodLike<ArrayRef<T>> {
static const bool value = true;
};
template <typename T> hash_code hash_value(ArrayRef<T> S) {
return hash_combine_range(S.begin(), S.end());
}
+
} // end namespace llvm
#endif // LLVM_ADT_ARRAYREF_H
diff --git a/include/llvm/ADT/BreadthFirstIterator.h b/include/llvm/ADT/BreadthFirstIterator.h
index eaeecb6e057f..6bc63c283b09 100644
--- a/include/llvm/ADT/BreadthFirstIterator.h
+++ b/include/llvm/ADT/BreadthFirstIterator.h
@@ -25,7 +25,6 @@
#include "llvm/ADT/iterator_range.h"
#include <iterator>
#include <queue>
-#include <set>
#include <utility>
namespace llvm {
@@ -49,13 +48,13 @@ template <class GraphT,
class bf_iterator
: public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
public bf_iterator_storage<SetType> {
- typedef std::iterator<std::forward_iterator_tag, typename GT::NodeRef> super;
+ using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
- typedef typename GT::NodeRef NodeRef;
- typedef typename GT::ChildIteratorType ChildItTy;
+ using NodeRef = typename GT::NodeRef;
+ using ChildItTy = typename GT::ChildIteratorType;
// First element is the node reference, second is the next child to visit.
- typedef std::pair<NodeRef, Optional<ChildItTy>> QueueElement;
+ using QueueElement = std::pair<NodeRef, Optional<ChildItTy>>;
// Visit queue - used to maintain BFS ordering.
// Optional<> because we need markers for levels.
@@ -109,7 +108,7 @@ private:
}
public:
- typedef typename super::pointer pointer;
+ using pointer = typename super::pointer;
// Provide static begin and end methods as our public "constructors"
static bf_iterator begin(const GraphT &G) {
diff --git a/include/llvm/ADT/DAGDeltaAlgorithm.h b/include/llvm/ADT/DAGDeltaAlgorithm.h
index 5ea0fe872868..41fdd43efb8a 100644
--- a/include/llvm/ADT/DAGDeltaAlgorithm.h
+++ b/include/llvm/ADT/DAGDeltaAlgorithm.h
@@ -1,4 +1,4 @@
-//===--- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ----*- C++ -*--===//
+//===- DAGDeltaAlgorithm.h - A DAG Minimization Algorithm ------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -40,12 +40,12 @@ class DAGDeltaAlgorithm {
virtual void anchor();
public:
- typedef unsigned change_ty;
- typedef std::pair<change_ty, change_ty> edge_ty;
+ using change_ty = unsigned;
+ using edge_ty = std::pair<change_ty, change_ty>;
// FIXME: Use a decent data structure.
- typedef std::set<change_ty> changeset_ty;
- typedef std::vector<changeset_ty> changesetlist_ty;
+ using changeset_ty = std::set<change_ty>;
+ using changesetlist_ty = std::vector<changeset_ty>;
public:
virtual ~DAGDeltaAlgorithm() = default;
diff --git a/include/llvm/ADT/DeltaAlgorithm.h b/include/llvm/ADT/DeltaAlgorithm.h
index a26f37dfdc7d..6becb2a60104 100644
--- a/include/llvm/ADT/DeltaAlgorithm.h
+++ b/include/llvm/ADT/DeltaAlgorithm.h
@@ -1,4 +1,4 @@
-//===--- DeltaAlgorithm.h - A Set Minimization Algorithm -------*- C++ -*--===//
+//===- DeltaAlgorithm.h - A Set Minimization Algorithm ---------*- C++ -*--===//
//
// The LLVM Compiler Infrastructure
//
@@ -35,10 +35,10 @@ namespace llvm {
/// predicate.
class DeltaAlgorithm {
public:
- typedef unsigned change_ty;
+ using change_ty = unsigned;
// FIXME: Use a decent data structure.
- typedef std::set<change_ty> changeset_ty;
- typedef std::vector<changeset_ty> changesetlist_ty;
+ using changeset_ty = std::set<change_ty>;
+ using changesetlist_ty = std::vector<changeset_ty>;
private:
/// Cache of failed test results. Successful test results are never cached
@@ -90,4 +90,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_DELTAALGORITHM_H
diff --git a/include/llvm/ADT/DenseMap.h b/include/llvm/ADT/DenseMap.h
index fd8d3bf368a8..b311e69ec9d3 100644
--- a/include/llvm/ADT/DenseMap.h
+++ b/include/llvm/ADT/DenseMap.h
@@ -25,8 +25,8 @@
#include <cstddef>
#include <cstring>
#include <iterator>
-#include <limits>
#include <new>
+#include <type_traits>
#include <utility>
namespace llvm {
@@ -57,14 +57,15 @@ class DenseMapBase : public DebugEpochBase {
using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
public:
- typedef unsigned size_type;
- typedef KeyT key_type;
- typedef ValueT mapped_type;
- typedef BucketT value_type;
-
- typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT> iterator;
- typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>
- const_iterator;
+ using size_type = unsigned;
+ using key_type = KeyT;
+ using mapped_type = ValueT;
+ using value_type = BucketT;
+
+ using iterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT>;
+ using const_iterator =
+ DenseMapIterator<KeyT, ValueT, KeyInfoT, BucketT, true>;
+
inline iterator begin() {
// When the map is empty, avoid the overhead of AdvancePastEmptyBuckets().
return empty() ? end() : iterator(getBuckets(), getBucketsEnd(), *this);
@@ -387,15 +388,18 @@ protected:
static unsigned getHashValue(const KeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
+
template<typename LookupKeyT>
static unsigned getHashValue(const LookupKeyT &Val) {
return KeyInfoT::getHashValue(Val);
}
+
static const KeyT getEmptyKey() {
static_assert(std::is_base_of<DenseMapBase, DerivedT>::value,
"Must pass the derived type to this template!");
return KeyInfoT::getEmptyKey();
}
+
static const KeyT getTombstoneKey() {
return KeyInfoT::getTombstoneKey();
}
@@ -404,39 +408,51 @@ private:
unsigned getNumEntries() const {
return static_cast<const DerivedT *>(this)->getNumEntries();
}
+
void setNumEntries(unsigned Num) {
static_cast<DerivedT *>(this)->setNumEntries(Num);
}
+
void incrementNumEntries() {
setNumEntries(getNumEntries() + 1);
}
+
void decrementNumEntries() {
setNumEntries(getNumEntries() - 1);
}
+
unsigned getNumTombstones() const {
return static_cast<const DerivedT *>(this)->getNumTombstones();
}
+
void setNumTombstones(unsigned Num) {
static_cast<DerivedT *>(this)->setNumTombstones(Num);
}
+
void incrementNumTombstones() {
setNumTombstones(getNumTombstones() + 1);
}
+
void decrementNumTombstones() {
setNumTombstones(getNumTombstones() - 1);
}
+
const BucketT *getBuckets() const {
return static_cast<const DerivedT *>(this)->getBuckets();
}
+
BucketT *getBuckets() {
return static_cast<DerivedT *>(this)->getBuckets();
}
+
unsigned getNumBuckets() const {
return static_cast<const DerivedT *>(this)->getNumBuckets();
}
+
BucketT *getBucketsEnd() {
return getBuckets() + getNumBuckets();
}
+
const BucketT *getBucketsEnd() const {
return getBuckets() + getNumBuckets();
}
@@ -587,10 +603,11 @@ template <typename KeyT, typename ValueT,
typename BucketT = detail::DenseMapPair<KeyT, ValueT>>
class DenseMap : public DenseMapBase<DenseMap<KeyT, ValueT, KeyInfoT, BucketT>,
KeyT, ValueT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
- typedef DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
- friend class DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+ using BaseT = DenseMapBase<DenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
BucketT *Buckets;
unsigned NumEntries;
@@ -705,6 +722,7 @@ private:
unsigned getNumEntries() const {
return NumEntries;
}
+
void setNumEntries(unsigned Num) {
NumEntries = Num;
}
@@ -712,6 +730,7 @@ private:
unsigned getNumTombstones() const {
return NumTombstones;
}
+
void setNumTombstones(unsigned Num) {
NumTombstones = Num;
}
@@ -743,10 +762,12 @@ class SmallDenseMap
: public DenseMapBase<
SmallDenseMap<KeyT, ValueT, InlineBuckets, KeyInfoT, BucketT>, KeyT,
ValueT, KeyInfoT, BucketT> {
+ friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
// Lift some types from the dependent base class into this class for
// simplicity of referring to them.
- typedef DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT> BaseT;
- friend class DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+ using BaseT = DenseMapBase<SmallDenseMap, KeyT, ValueT, KeyInfoT, BucketT>;
+
static_assert(isPowerOf2_64(InlineBuckets),
"InlineBuckets must be a power of 2.");
@@ -972,6 +993,7 @@ private:
unsigned getNumEntries() const {
return NumEntries;
}
+
void setNumEntries(unsigned Num) {
// NumEntries is hardcoded to be 31 bits wide.
assert(Num < (1U << 31) && "Cannot support more than 1<<31 entries");
@@ -981,6 +1003,7 @@ private:
unsigned getNumTombstones() const {
return NumTombstones;
}
+
void setNumTombstones(unsigned Num) {
NumTombstones = Num;
}
@@ -992,15 +1015,18 @@ private:
// 'storage.buffer' static type is 'char *'.
return reinterpret_cast<const BucketT *>(storage.buffer);
}
+
BucketT *getInlineBuckets() {
return const_cast<BucketT *>(
const_cast<const SmallDenseMap *>(this)->getInlineBuckets());
}
+
const LargeRep *getLargeRep() const {
assert(!Small);
// Note, same rule about aliasing as with getInlineBuckets.
return reinterpret_cast<const LargeRep *>(storage.buffer);
}
+
LargeRep *getLargeRep() {
return const_cast<LargeRep *>(
const_cast<const SmallDenseMap *>(this)->getLargeRep());
@@ -1009,10 +1035,12 @@ private:
const BucketT *getBuckets() const {
return Small ? getInlineBuckets() : getLargeRep()->Buckets;
}
+
BucketT *getBuckets() {
return const_cast<BucketT *>(
const_cast<const SmallDenseMap *>(this)->getBuckets());
}
+
unsigned getNumBuckets() const {
return Small ? InlineBuckets : getLargeRep()->NumBuckets;
}
@@ -1037,23 +1065,25 @@ private:
template <typename KeyT, typename ValueT, typename KeyInfoT, typename Bucket,
bool IsConst>
class DenseMapIterator : DebugEpochBase::HandleBase {
- typedef DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true> ConstIterator;
friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
friend class DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, false>;
+ using ConstIterator = DenseMapIterator<KeyT, ValueT, KeyInfoT, Bucket, true>;
+
public:
- typedef ptrdiff_t difference_type;
- typedef typename std::conditional<IsConst, const Bucket, Bucket>::type
- value_type;
- typedef value_type *pointer;
- typedef value_type &reference;
- typedef std::forward_iterator_tag iterator_category;
+ using difference_type = ptrdiff_t;
+ using value_type =
+ typename std::conditional<IsConst, const Bucket, Bucket>::type;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
private:
- pointer Ptr, End;
+ pointer Ptr = nullptr;
+ pointer End = nullptr;
public:
- DenseMapIterator() : Ptr(nullptr), End(nullptr) {}
+ DenseMapIterator() = default;
DenseMapIterator(pointer Pos, pointer E, const DebugEpochBase &Epoch,
bool NoAdvance = false)
diff --git a/include/llvm/ADT/DenseMapInfo.h b/include/llvm/ADT/DenseMapInfo.h
index bb973ac65063..a96904c7dbbf 100644
--- a/include/llvm/ADT/DenseMapInfo.h
+++ b/include/llvm/ADT/DenseMapInfo.h
@@ -18,7 +18,10 @@
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
-#include "llvm/Support/type_traits.h"
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <utility>
namespace llvm {
@@ -38,15 +41,18 @@ struct DenseMapInfo<T*> {
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
+
static inline T* getTombstoneKey() {
uintptr_t Val = static_cast<uintptr_t>(-2);
Val <<= PointerLikeTypeTraits<T*>::NumLowBitsAvailable;
return reinterpret_cast<T*>(Val);
}
+
static unsigned getHashValue(const T *PtrVal) {
return (unsigned((uintptr_t)PtrVal) >> 4) ^
(unsigned((uintptr_t)PtrVal) >> 9);
}
+
static bool isEqual(const T *LHS, const T *RHS) { return LHS == RHS; }
};
@@ -55,6 +61,7 @@ template<> struct DenseMapInfo<char> {
static inline char getEmptyKey() { return ~0; }
static inline char getTombstoneKey() { return ~0 - 1; }
static unsigned getHashValue(const char& Val) { return Val * 37U; }
+
static bool isEqual(const char &LHS, const char &RHS) {
return LHS == RHS;
}
@@ -65,6 +72,7 @@ template <> struct DenseMapInfo<unsigned short> {
static inline unsigned short getEmptyKey() { return 0xFFFF; }
static inline unsigned short getTombstoneKey() { return 0xFFFF - 1; }
static unsigned getHashValue(const unsigned short &Val) { return Val * 37U; }
+
static bool isEqual(const unsigned short &LHS, const unsigned short &RHS) {
return LHS == RHS;
}
@@ -75,6 +83,7 @@ template<> struct DenseMapInfo<unsigned> {
static inline unsigned getEmptyKey() { return ~0U; }
static inline unsigned getTombstoneKey() { return ~0U - 1; }
static unsigned getHashValue(const unsigned& Val) { return Val * 37U; }
+
static bool isEqual(const unsigned& LHS, const unsigned& RHS) {
return LHS == RHS;
}
@@ -84,9 +93,11 @@ template<> struct DenseMapInfo<unsigned> {
template<> struct DenseMapInfo<unsigned long> {
static inline unsigned long getEmptyKey() { return ~0UL; }
static inline unsigned long getTombstoneKey() { return ~0UL - 1L; }
+
static unsigned getHashValue(const unsigned long& Val) {
return (unsigned)(Val * 37UL);
}
+
static bool isEqual(const unsigned long& LHS, const unsigned long& RHS) {
return LHS == RHS;
}
@@ -96,9 +107,11 @@ template<> struct DenseMapInfo<unsigned long> {
template<> struct DenseMapInfo<unsigned long long> {
static inline unsigned long long getEmptyKey() { return ~0ULL; }
static inline unsigned long long getTombstoneKey() { return ~0ULL - 1ULL; }
+
static unsigned getHashValue(const unsigned long long& Val) {
return (unsigned)(Val * 37ULL);
}
+
static bool isEqual(const unsigned long long& LHS,
const unsigned long long& RHS) {
return LHS == RHS;
@@ -118,6 +131,7 @@ template<> struct DenseMapInfo<int> {
static inline int getEmptyKey() { return 0x7fffffff; }
static inline int getTombstoneKey() { return -0x7fffffff - 1; }
static unsigned getHashValue(const int& Val) { return (unsigned)(Val * 37U); }
+
static bool isEqual(const int& LHS, const int& RHS) {
return LHS == RHS;
}
@@ -128,10 +142,13 @@ template<> struct DenseMapInfo<long> {
static inline long getEmptyKey() {
return (1UL << (sizeof(long) * 8 - 1)) - 1UL;
}
+
static inline long getTombstoneKey() { return getEmptyKey() - 1L; }
+
static unsigned getHashValue(const long& Val) {
return (unsigned)(Val * 37UL);
}
+
static bool isEqual(const long& LHS, const long& RHS) {
return LHS == RHS;
}
@@ -141,9 +158,11 @@ template<> struct DenseMapInfo<long> {
template<> struct DenseMapInfo<long long> {
static inline long long getEmptyKey() { return 0x7fffffffffffffffLL; }
static inline long long getTombstoneKey() { return -0x7fffffffffffffffLL-1; }
+
static unsigned getHashValue(const long long& Val) {
return (unsigned)(Val * 37ULL);
}
+
static bool isEqual(const long long& LHS,
const long long& RHS) {
return LHS == RHS;
@@ -152,19 +171,21 @@ template<> struct DenseMapInfo<long long> {
// Provide DenseMapInfo for all pairs whose members have info.
template<typename T, typename U>
-struct DenseMapInfo<std::pair<T, U> > {
- typedef std::pair<T, U> Pair;
- typedef DenseMapInfo<T> FirstInfo;
- typedef DenseMapInfo<U> SecondInfo;
+struct DenseMapInfo<std::pair<T, U>> {
+ using Pair = std::pair<T, U>;
+ using FirstInfo = DenseMapInfo<T>;
+ using SecondInfo = DenseMapInfo<U>;
static inline Pair getEmptyKey() {
return std::make_pair(FirstInfo::getEmptyKey(),
SecondInfo::getEmptyKey());
}
+
static inline Pair getTombstoneKey() {
return std::make_pair(FirstInfo::getTombstoneKey(),
SecondInfo::getTombstoneKey());
}
+
static unsigned getHashValue(const Pair& PairVal) {
uint64_t key = (uint64_t)FirstInfo::getHashValue(PairVal.first) << 32
| (uint64_t)SecondInfo::getHashValue(PairVal.second);
@@ -178,6 +199,7 @@ struct DenseMapInfo<std::pair<T, U> > {
key ^= (key >> 31);
return (unsigned)key;
}
+
static bool isEqual(const Pair &LHS, const Pair &RHS) {
return FirstInfo::isEqual(LHS.first, RHS.first) &&
SecondInfo::isEqual(LHS.second, RHS.second);
@@ -190,16 +212,19 @@ template <> struct DenseMapInfo<StringRef> {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(0)),
0);
}
+
static inline StringRef getTombstoneKey() {
return StringRef(reinterpret_cast<const char *>(~static_cast<uintptr_t>(1)),
0);
}
+
static unsigned getHashValue(StringRef Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
+
static bool isEqual(StringRef LHS, StringRef RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
@@ -215,16 +240,19 @@ template <typename T> struct DenseMapInfo<ArrayRef<T>> {
return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(0)),
size_t(0));
}
+
static inline ArrayRef<T> getTombstoneKey() {
return ArrayRef<T>(reinterpret_cast<const T *>(~static_cast<uintptr_t>(1)),
size_t(0));
}
+
static unsigned getHashValue(ArrayRef<T> Val) {
assert(Val.data() != getEmptyKey().data() && "Cannot hash the empty key!");
assert(Val.data() != getTombstoneKey().data() &&
"Cannot hash the tombstone key!");
return (unsigned)(hash_value(Val));
}
+
static bool isEqual(ArrayRef<T> LHS, ArrayRef<T> RHS) {
if (RHS.data() == getEmptyKey().data())
return LHS.data() == getEmptyKey().data();
@@ -236,4 +264,4 @@ template <typename T> struct DenseMapInfo<ArrayRef<T>> {
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_DENSEMAPINFO_H
diff --git a/include/llvm/ADT/DenseSet.h b/include/llvm/ADT/DenseSet.h
index fcf304c3ecc4..7e5171c3f3a4 100644
--- a/include/llvm/ADT/DenseSet.h
+++ b/include/llvm/ADT/DenseSet.h
@@ -15,11 +15,18 @@
#define LLVM_ADT_DENSESET_H
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseMapInfo.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
+#include <cstddef>
#include <initializer_list>
+#include <iterator>
+#include <utility>
namespace llvm {
namespace detail {
+
struct DenseSetEmpty {};
// Use the empty base class trick so we can create a DenseMap where the buckets
@@ -48,13 +55,14 @@ class DenseSetImpl {
static_assert(sizeof(typename MapTy::value_type) == sizeof(ValueT),
"DenseMap buckets unexpectedly large!");
MapTy TheMap;
+
template <typename T>
using const_arg_type_t = typename const_pointer_or_const_ref<T>::type;
public:
- typedef ValueT key_type;
- typedef ValueT value_type;
- typedef unsigned size_type;
+ using key_type = ValueT;
+ using value_type = ValueT;
+ using size_type = unsigned;
explicit DenseSetImpl(unsigned InitialReserve = 0) : TheMap(InitialReserve) {}
@@ -100,11 +108,11 @@ public:
friend class ConstIterator;
public:
- typedef typename MapTy::iterator::difference_type difference_type;
- typedef ValueT value_type;
- typedef value_type *pointer;
- typedef value_type &reference;
- typedef std::forward_iterator_tag iterator_category;
+ using difference_type = typename MapTy::iterator::difference_type;
+ using value_type = ValueT;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
Iterator() = default;
Iterator(const typename MapTy::iterator &i) : I(i) {}
@@ -126,16 +134,14 @@ public:
friend class Iterator;
public:
- typedef typename MapTy::const_iterator::difference_type difference_type;
- typedef ValueT value_type;
- typedef value_type *pointer;
- typedef value_type &reference;
- typedef std::forward_iterator_tag iterator_category;
-
- ConstIterator(const Iterator &B) : I(B.I) {}
+ using difference_type = typename MapTy::const_iterator::difference_type;
+ using value_type = ValueT;
+ using pointer = value_type *;
+ using reference = value_type &;
+ using iterator_category = std::forward_iterator_tag;
ConstIterator() = default;
-
+ ConstIterator(const Iterator &B) : I(B.I) {}
ConstIterator(const typename MapTy::const_iterator &i) : I(i) {}
const ValueT &operator*() const { return I->getFirst(); }
@@ -147,8 +153,8 @@ public:
bool operator!=(const ConstIterator& X) const { return I != X.I; }
};
- typedef Iterator iterator;
- typedef ConstIterator const_iterator;
+ using iterator = Iterator;
+ using const_iterator = ConstIterator;
iterator begin() { return Iterator(TheMap.begin()); }
iterator end() { return Iterator(TheMap.end()); }
@@ -208,7 +214,7 @@ public:
}
};
-} // namespace detail
+} // end namespace detail
/// Implements a dense probed hash-table based set.
template <typename ValueT, typename ValueInfoT = DenseMapInfo<ValueT>>
@@ -246,4 +252,4 @@ public:
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_DENSESET_H
diff --git a/include/llvm/ADT/DepthFirstIterator.h b/include/llvm/ADT/DepthFirstIterator.h
index b020d48cb3f0..e964d7fa2391 100644
--- a/include/llvm/ADT/DepthFirstIterator.h
+++ b/include/llvm/ADT/DepthFirstIterator.h
@@ -68,13 +68,14 @@ public:
// cross edges in the spanning tree but is not used in the common case.
template <typename NodeRef, unsigned SmallSize=8>
struct df_iterator_default_set : public SmallPtrSet<NodeRef, SmallSize> {
- typedef SmallPtrSet<NodeRef, SmallSize> BaseSet;
- typedef typename BaseSet::iterator iterator;
- std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N) ; }
+ using BaseSet = SmallPtrSet<NodeRef, SmallSize>;
+ using iterator = typename BaseSet::iterator;
+
+ std::pair<iterator,bool> insert(NodeRef N) { return BaseSet::insert(N); }
template <typename IterT>
void insert(IterT Begin, IterT End) { BaseSet::insert(Begin,End); }
- void completed(NodeRef) { }
+ void completed(NodeRef) {}
};
// Generic Depth First Iterator
@@ -85,15 +86,14 @@ template <class GraphT,
class df_iterator
: public std::iterator<std::forward_iterator_tag, typename GT::NodeRef>,
public df_iterator_storage<SetType, ExtStorage> {
- typedef std::iterator<std::forward_iterator_tag, typename GT::NodeRef> super;
-
- typedef typename GT::NodeRef NodeRef;
- typedef typename GT::ChildIteratorType ChildItTy;
+ using super = std::iterator<std::forward_iterator_tag, typename GT::NodeRef>;
+ using NodeRef = typename GT::NodeRef;
+ using ChildItTy = typename GT::ChildIteratorType;
// First element is node reference, second is the 'next child' to visit.
// The second child is initialized lazily to pick up graph changes during the
// DFS.
- typedef std::pair<NodeRef, Optional<ChildItTy>> StackElement;
+ using StackElement = std::pair<NodeRef, Optional<ChildItTy>>;
// VisitStack - Used to maintain the ordering. Top = current block
std::vector<StackElement> VisitStack;
@@ -103,12 +103,15 @@ private:
this->Visited.insert(Node);
VisitStack.push_back(StackElement(Node, None));
}
+
inline df_iterator() = default; // End is when stack is empty
+
inline df_iterator(NodeRef Node, SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
if (this->Visited.insert(Node).second)
VisitStack.push_back(StackElement(Node, None));
}
+
inline df_iterator(SetType &S)
: df_iterator_storage<SetType, ExtStorage>(S) {
// End is when stack is empty
@@ -142,7 +145,7 @@ private:
}
public:
- typedef typename super::pointer pointer;
+ using pointer = typename super::pointer;
// Provide static begin and end methods as our public "constructors"
static df_iterator begin(const GraphT &G) {
diff --git a/include/llvm/ADT/EquivalenceClasses.h b/include/llvm/ADT/EquivalenceClasses.h
index 8fcac178ffc9..af293d4c1422 100644
--- a/include/llvm/ADT/EquivalenceClasses.h
+++ b/include/llvm/ADT/EquivalenceClasses.h
@@ -1,4 +1,4 @@
-//===-- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes --*- C++ -*-===//
+//===- llvm/ADT/EquivalenceClasses.h - Generic Equiv. Classes ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -69,6 +69,7 @@ class EquivalenceClasses {
/// leader is determined by a bit stolen from one of the pointers.
class ECValue {
friend class EquivalenceClasses;
+
mutable const ECValue *Leader, *Next;
ElemTy Data;
@@ -141,14 +142,14 @@ public:
//
/// iterator* - Provides a way to iterate over all values in the set.
- typedef typename std::set<ECValue>::const_iterator iterator;
+ using iterator = typename std::set<ECValue>::const_iterator;
+
iterator begin() const { return TheMapping.begin(); }
iterator end() const { return TheMapping.end(); }
bool empty() const { return TheMapping.empty(); }
/// member_* Iterate over the members of an equivalence class.
- ///
class member_iterator;
member_iterator member_begin(iterator I) const {
// Only leaders provide anything to iterate over.
@@ -204,7 +205,6 @@ public:
/// equivalence class it is in. This does the path-compression part that
/// makes union-find "union findy". This returns an end iterator if the value
/// is not in the equivalence class.
- ///
member_iterator findLeader(iterator I) const {
if (I == TheMapping.end()) return member_end();
return member_iterator(I->getLeader());
@@ -241,15 +241,17 @@ public:
class member_iterator : public std::iterator<std::forward_iterator_tag,
const ElemTy, ptrdiff_t> {
- typedef std::iterator<std::forward_iterator_tag,
- const ElemTy, ptrdiff_t> super;
- const ECValue *Node;
friend class EquivalenceClasses;
+ using super = std::iterator<std::forward_iterator_tag,
+ const ElemTy, ptrdiff_t>;
+
+ const ECValue *Node;
+
public:
- typedef size_t size_type;
- typedef typename super::pointer pointer;
- typedef typename super::reference reference;
+ using size_type = size_t;
+ using pointer = typename super::pointer;
+ using reference = typename super::reference;
explicit member_iterator() = default;
explicit member_iterator(const ECValue *N) : Node(N) {}
diff --git a/include/llvm/ADT/FoldingSet.h b/include/llvm/ADT/FoldingSet.h
index dab18297dd3b..c5987a947e18 100644
--- a/include/llvm/ADT/FoldingSet.h
+++ b/include/llvm/ADT/FoldingSet.h
@@ -40,7 +40,7 @@ namespace llvm {
/// FoldingSetNode. The node class must also define a Profile method used to
/// establish the unique bits of data for the node. The Profile method is
/// passed a FoldingSetNodeID object which is used to gather the bits. Just
-/// call one of the Add* functions defined in the FoldingSetImpl::NodeID class.
+/// call one of the Add* functions defined in the FoldingSetBase::NodeID class.
/// NOTE: That the folding set does not own the nodes and it is the
/// responsibility of the user to dispose of the nodes.
///
@@ -104,13 +104,13 @@ class FoldingSetNodeID;
class StringRef;
//===----------------------------------------------------------------------===//
-/// FoldingSetImpl - Implements the folding set functionality. The main
+/// FoldingSetBase - Implements the folding set functionality. The main
/// structure is an array of buckets. Each bucket is indexed by the hash of
/// the nodes it contains. The bucket itself points to the nodes contained
/// in the bucket via a singly linked list. The last node in the list points
/// back to the bucket to facilitate node removal.
///
-class FoldingSetImpl {
+class FoldingSetBase {
virtual void anchor(); // Out of line virtual method.
protected:
@@ -126,10 +126,10 @@ protected:
/// is greater than twice the number of buckets.
unsigned NumNodes;
- explicit FoldingSetImpl(unsigned Log2InitSize = 6);
- FoldingSetImpl(FoldingSetImpl &&Arg);
- FoldingSetImpl &operator=(FoldingSetImpl &&RHS);
- ~FoldingSetImpl();
+ explicit FoldingSetBase(unsigned Log2InitSize = 6);
+ FoldingSetBase(FoldingSetBase &&Arg);
+ FoldingSetBase &operator=(FoldingSetBase &&RHS);
+ ~FoldingSetBase();
public:
//===--------------------------------------------------------------------===//
@@ -152,33 +152,6 @@ public:
/// clear - Remove all nodes from the folding set.
void clear();
- /// RemoveNode - Remove a node from the folding set, returning true if one
- /// was removed or false if the node was not in the folding set.
- bool RemoveNode(Node *N);
-
- /// GetOrInsertNode - If there is an existing simple Node exactly
- /// equal to the specified node, return it. Otherwise, insert 'N' and return
- /// it instead.
- Node *GetOrInsertNode(Node *N);
-
- /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
- /// return it. If not, return the insertion token that will make insertion
- /// faster.
- Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
-
- /// InsertNode - Insert the specified node into the folding set, knowing that
- /// it is not already in the folding set. InsertPos must be obtained from
- /// FindNodeOrInsertPos.
- void InsertNode(Node *N, void *InsertPos);
-
- /// InsertNode - Insert the specified node into the folding set, knowing that
- /// it is not already in the folding set.
- void InsertNode(Node *N) {
- Node *Inserted = GetOrInsertNode(N);
- (void)Inserted;
- assert(Inserted == N && "Node already inserted!");
- }
-
/// size - Returns the number of nodes in the folding set.
unsigned size() const { return NumNodes; }
@@ -220,6 +193,28 @@ protected:
/// ComputeNodeHash - Instantiations of the FoldingSet template implement
/// this function to compute a hash value for the given node.
virtual unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const = 0;
+
+ // The below methods are protected to encourage subclasses to provide a more
+ // type-safe API.
+
+ /// RemoveNode - Remove a node from the folding set, returning true if one
+ /// was removed or false if the node was not in the folding set.
+ bool RemoveNode(Node *N);
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and return
+ /// it instead.
+ Node *GetOrInsertNode(Node *N);
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ Node *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos);
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(Node *N, void *InsertPos);
};
//===----------------------------------------------------------------------===//
@@ -293,7 +288,7 @@ public:
FoldingSetNodeIDRef(const unsigned *D, size_t S) : Data(D), Size(S) {}
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeIDRef,
- /// used to lookup the node in the FoldingSetImpl.
+ /// used to lookup the node in the FoldingSetBase.
unsigned ComputeHash() const;
bool operator==(FoldingSetNodeIDRef) const;
@@ -345,7 +340,7 @@ public:
inline void clear() { Bits.clear(); }
/// ComputeHash - Compute a strong hash value for this FoldingSetNodeID, used
- /// to lookup the node in the FoldingSetImpl.
+ /// to lookup the node in the FoldingSetBase.
unsigned ComputeHash() const;
/// operator== - Used to compare two nodes to each other.
@@ -368,7 +363,7 @@ public:
};
// Convenience type to hide the implementation of the folding set.
-typedef FoldingSetImpl::Node FoldingSetNode;
+typedef FoldingSetBase::Node FoldingSetNode;
template<class T> class FoldingSetIterator;
template<class T> class FoldingSetBucketIterator;
@@ -408,6 +403,71 @@ DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
}
//===----------------------------------------------------------------------===//
+/// FoldingSetImpl - An implementation detail that lets us share code between
+/// FoldingSet and ContextualFoldingSet.
+template <class T> class FoldingSetImpl : public FoldingSetBase {
+protected:
+ explicit FoldingSetImpl(unsigned Log2InitSize)
+ : FoldingSetBase(Log2InitSize) {}
+
+ FoldingSetImpl(FoldingSetImpl &&Arg) = default;
+ FoldingSetImpl &operator=(FoldingSetImpl &&RHS) = default;
+ ~FoldingSetImpl() = default;
+
+public:
+ typedef FoldingSetIterator<T> iterator;
+ iterator begin() { return iterator(Buckets); }
+ iterator end() { return iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetIterator<const T> const_iterator;
+ const_iterator begin() const { return const_iterator(Buckets); }
+ const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
+
+ typedef FoldingSetBucketIterator<T> bucket_iterator;
+
+ bucket_iterator bucket_begin(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
+ }
+
+ bucket_iterator bucket_end(unsigned hash) {
+ return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
+ }
+
+ /// RemoveNode - Remove a node from the folding set, returning true if one
+ /// was removed or false if the node was not in the folding set.
+ bool RemoveNode(T *N) { return FoldingSetBase::RemoveNode(N); }
+
+ /// GetOrInsertNode - If there is an existing simple Node exactly
+ /// equal to the specified node, return it. Otherwise, insert 'N' and
+ /// return it instead.
+ T *GetOrInsertNode(T *N) {
+ return static_cast<T *>(FoldingSetBase::GetOrInsertNode(N));
+ }
+
+ /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
+ /// return it. If not, return the insertion token that will make insertion
+ /// faster.
+ T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
+ return static_cast<T *>(FoldingSetBase::FindNodeOrInsertPos(ID, InsertPos));
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set. InsertPos must be obtained from
+ /// FindNodeOrInsertPos.
+ void InsertNode(T *N, void *InsertPos) {
+ FoldingSetBase::InsertNode(N, InsertPos);
+ }
+
+ /// InsertNode - Insert the specified node into the folding set, knowing that
+ /// it is not already in the folding set.
+ void InsertNode(T *N) {
+ T *Inserted = GetOrInsertNode(N);
+ (void)Inserted;
+ assert(Inserted == N && "Node already inserted!");
+ }
+};
+
+//===----------------------------------------------------------------------===//
/// FoldingSet - This template class is used to instantiate a specialized
/// implementation of the folding set to the node class T. T must be a
/// subclass of FoldingSetNode and implement a Profile function.
@@ -416,8 +476,10 @@ DefaultContextualFoldingSetTrait<T, Ctx>::ComputeHash(T &X,
/// moved-from state is not a valid state for anything other than
/// move-assigning and destroying. This is primarily to enable movable APIs
/// that incorporate these objects.
-template <class T> class FoldingSet final : public FoldingSetImpl {
-private:
+template <class T> class FoldingSet final : public FoldingSetImpl<T> {
+ using Super = FoldingSetImpl<T>;
+ using Node = typename Super::Node;
+
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
@@ -442,45 +504,10 @@ private:
public:
explicit FoldingSet(unsigned Log2InitSize = 6)
- : FoldingSetImpl(Log2InitSize) {}
-
- FoldingSet(FoldingSet &&Arg) : FoldingSetImpl(std::move(Arg)) {}
- FoldingSet &operator=(FoldingSet &&RHS) {
- (void)FoldingSetImpl::operator=(std::move(RHS));
- return *this;
- }
-
- typedef FoldingSetIterator<T> iterator;
- iterator begin() { return iterator(Buckets); }
- iterator end() { return iterator(Buckets+NumBuckets); }
-
- typedef FoldingSetIterator<const T> const_iterator;
- const_iterator begin() const { return const_iterator(Buckets); }
- const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
-
- typedef FoldingSetBucketIterator<T> bucket_iterator;
-
- bucket_iterator bucket_begin(unsigned hash) {
- return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
- }
-
- bucket_iterator bucket_end(unsigned hash) {
- return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
- }
+ : Super(Log2InitSize) {}
- /// GetOrInsertNode - If there is an existing simple Node exactly
- /// equal to the specified node, return it. Otherwise, insert 'N' and
- /// return it instead.
- T *GetOrInsertNode(Node *N) {
- return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
- }
-
- /// FindNodeOrInsertPos - Look up the node specified by ID. If it exists,
- /// return it. If not, return the insertion token that will make insertion
- /// faster.
- T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
- return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
- }
+ FoldingSet(FoldingSet &&Arg) = default;
+ FoldingSet &operator=(FoldingSet &&RHS) = default;
};
//===----------------------------------------------------------------------===//
@@ -493,74 +520,42 @@ public:
/// function with signature
/// void Profile(FoldingSetNodeID &, Ctx);
template <class T, class Ctx>
-class ContextualFoldingSet final : public FoldingSetImpl {
+class ContextualFoldingSet final : public FoldingSetImpl<T> {
// Unfortunately, this can't derive from FoldingSet<T> because the
- // construction vtable for FoldingSet<T> requires
+ // construction of the vtable for FoldingSet<T> requires
// FoldingSet<T>::GetNodeProfile to be instantiated, which in turn
// requires a single-argument T::Profile().
-private:
+ using Super = FoldingSetImpl<T>;
+ using Node = typename Super::Node;
+
Ctx Context;
/// GetNodeProfile - Each instantiatation of the FoldingSet needs to provide a
/// way to convert nodes into a unique specifier.
- void GetNodeProfile(FoldingSetImpl::Node *N,
- FoldingSetNodeID &ID) const override {
+ void GetNodeProfile(Node *N, FoldingSetNodeID &ID) const override {
T *TN = static_cast<T *>(N);
ContextualFoldingSetTrait<T, Ctx>::Profile(*TN, ID, Context);
}
- bool NodeEquals(FoldingSetImpl::Node *N, const FoldingSetNodeID &ID,
- unsigned IDHash, FoldingSetNodeID &TempID) const override {
+ bool NodeEquals(Node *N, const FoldingSetNodeID &ID, unsigned IDHash,
+ FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::Equals(*TN, ID, IDHash, TempID,
Context);
}
- unsigned ComputeNodeHash(FoldingSetImpl::Node *N,
- FoldingSetNodeID &TempID) const override {
+ unsigned ComputeNodeHash(Node *N, FoldingSetNodeID &TempID) const override {
T *TN = static_cast<T *>(N);
return ContextualFoldingSetTrait<T, Ctx>::ComputeHash(*TN, TempID, Context);
}
public:
explicit ContextualFoldingSet(Ctx Context, unsigned Log2InitSize = 6)
- : FoldingSetImpl(Log2InitSize), Context(Context)
+ : Super(Log2InitSize), Context(Context)
{}
Ctx getContext() const { return Context; }
-
- typedef FoldingSetIterator<T> iterator;
- iterator begin() { return iterator(Buckets); }
- iterator end() { return iterator(Buckets+NumBuckets); }
-
- typedef FoldingSetIterator<const T> const_iterator;
- const_iterator begin() const { return const_iterator(Buckets); }
- const_iterator end() const { return const_iterator(Buckets+NumBuckets); }
-
- typedef FoldingSetBucketIterator<T> bucket_iterator;
-
- bucket_iterator bucket_begin(unsigned hash) {
- return bucket_iterator(Buckets + (hash & (NumBuckets-1)));
- }
-
- bucket_iterator bucket_end(unsigned hash) {
- return bucket_iterator(Buckets + (hash & (NumBuckets-1)), true);
- }
-
- /// GetOrInsertNode - If there is an existing simple Node exactly
- /// equal to the specified node, return it. Otherwise, insert 'N'
- /// and return it instead.
- T *GetOrInsertNode(Node *N) {
- return static_cast<T *>(FoldingSetImpl::GetOrInsertNode(N));
- }
-
- /// FindNodeOrInsertPos - Look up the node specified by ID. If it
- /// exists, return it. If not, return the insertion token that will
- /// make insertion faster.
- T *FindNodeOrInsertPos(const FoldingSetNodeID &ID, void *&InsertPos) {
- return static_cast<T *>(FoldingSetImpl::FindNodeOrInsertPos(ID, InsertPos));
- }
};
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/ADT/GraphTraits.h b/include/llvm/ADT/GraphTraits.h
index 68149d9e3bf5..225d9eb847f0 100644
--- a/include/llvm/ADT/GraphTraits.h
+++ b/include/llvm/ADT/GraphTraits.h
@@ -1,4 +1,4 @@
-//===-- llvm/ADT/GraphTraits.h - Graph traits template ----------*- C++ -*-===//
+//===- llvm/ADT/GraphTraits.h - Graph traits template -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -41,7 +41,6 @@ struct GraphTraits {
// static ChildIteratorType child_end (NodeRef)
// Return iterators that point to the beginning and ending of the child
// node list for the specified node.
- //
// typedef ...iterator nodes_iterator; - dereference to a NodeRef
// static nodes_iterator nodes_begin(GraphType *G)
@@ -50,7 +49,6 @@ struct GraphTraits {
// static unsigned size (GraphType *G)
// Return total number of nodes in the graph
- //
// If anyone tries to use this class without having an appropriate
// specialization, make an error. If you get this error, it's because you
@@ -58,11 +56,9 @@ struct GraphTraits {
// graph, or you need to define it for a new graph type. Either that or
// your argument to XXX_begin(...) is unknown or needs to have the proper .h
// file #include'd.
- //
- typedef typename GraphType::UnknownGraphTypeError NodeRef;
+ using NodeRef = typename GraphType::UnknownGraphTypeError;
};
-
// Inverse - This class is used as a little marker class to tell the graph
// iterator to iterate over the graph in a graph defined "Inverse" ordering.
// Not all graphs define an inverse ordering, and if they do, it depends on
@@ -73,7 +69,7 @@ struct GraphTraits {
// for (; I != E; ++I) { ... }
//
// Which is equivalent to:
-// df_iterator<Inverse<Method*> > I = idf_begin(M), E = idf_end(M);
+// df_iterator<Inverse<Method*>> I = idf_begin(M), E = idf_end(M);
// for (; I != E; ++I) { ... }
//
template <class GraphType>
@@ -114,6 +110,7 @@ inverse_children(const typename GraphTraits<GraphType>::NodeRef &G) {
return make_range(GraphTraits<Inverse<GraphType>>::child_begin(G),
GraphTraits<Inverse<GraphType>>::child_end(G));
}
-} // End llvm namespace
-#endif
+} // end namespace llvm
+
+#endif // LLVM_ADT_GRAPHTRAITS_H
diff --git a/include/llvm/ADT/ImmutableList.h b/include/llvm/ADT/ImmutableList.h
index e5f51bafe995..60d63e09d426 100644
--- a/include/llvm/ADT/ImmutableList.h
+++ b/include/llvm/ADT/ImmutableList.h
@@ -63,8 +63,8 @@ public:
template <typename T>
class ImmutableList {
public:
- typedef T value_type;
- typedef ImmutableListFactory<T> Factory;
+ using value_type = T;
+ using Factory = ImmutableListFactory<T>;
private:
const ImmutableListImpl<T>* X;
@@ -141,8 +141,8 @@ public:
template <typename T>
class ImmutableListFactory {
- typedef ImmutableListImpl<T> ListTy;
- typedef FoldingSet<ListTy> CacheTy;
+ using ListTy = ImmutableListImpl<T>;
+ using CacheTy = FoldingSet<ListTy>;
CacheTy Cache;
uintptr_t Allocator;
diff --git a/include/llvm/ADT/ImmutableMap.h b/include/llvm/ADT/ImmutableMap.h
index f197d407ba3b..10d1e1f0139b 100644
--- a/include/llvm/ADT/ImmutableMap.h
+++ b/include/llvm/ADT/ImmutableMap.h
@@ -26,12 +26,12 @@ namespace llvm {
/// only the first element (the key) is used by isEqual and isLess.
template <typename T, typename S>
struct ImutKeyValueInfo {
- typedef const std::pair<T,S> value_type;
- typedef const value_type& value_type_ref;
- typedef const T key_type;
- typedef const T& key_type_ref;
- typedef const S data_type;
- typedef const S& data_type_ref;
+ using value_type = const std::pair<T,S>;
+ using value_type_ref = const value_type&;
+ using key_type = const T;
+ using key_type_ref = const T&;
+ using data_type = const S;
+ using data_type_ref = const S&;
static inline key_type_ref KeyOfValue(value_type_ref V) {
return V.first;
@@ -62,13 +62,13 @@ template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMap {
public:
- typedef typename ValInfo::value_type value_type;
- typedef typename ValInfo::value_type_ref value_type_ref;
- typedef typename ValInfo::key_type key_type;
- typedef typename ValInfo::key_type_ref key_type_ref;
- typedef typename ValInfo::data_type data_type;
- typedef typename ValInfo::data_type_ref data_type_ref;
- typedef ImutAVLTree<ValInfo> TreeTy;
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using key_type = typename ValInfo::key_type;
+ using key_type_ref = typename ValInfo::key_type_ref;
+ using data_type = typename ValInfo::data_type;
+ using data_type_ref = typename ValInfo::data_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
protected:
TreeTy* Root;
@@ -86,6 +86,10 @@ public:
if (Root) { Root->retain(); }
}
+ ~ImmutableMap() {
+ if (Root) { Root->release(); }
+ }
+
ImmutableMap &operator=(const ImmutableMap &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@@ -95,10 +99,6 @@ public:
return *this;
}
- ~ImmutableMap() {
- if (Root) { Root->release(); }
- }
-
class Factory {
typename TreeTy::Factory F;
const bool Canonicalize;
@@ -166,12 +166,14 @@ private:
template <typename Callback>
struct CBWrapper {
Callback C;
+
void operator()(value_type_ref V) { C(V.first,V.second); }
};
template <typename Callback>
struct CBWrapperRef {
Callback &C;
+
CBWrapperRef(Callback& c) : C(c) {}
void operator()(value_type_ref V) { C(V.first,V.second); }
@@ -254,14 +256,14 @@ template <typename KeyT, typename ValT,
typename ValInfo = ImutKeyValueInfo<KeyT,ValT>>
class ImmutableMapRef {
public:
- typedef typename ValInfo::value_type value_type;
- typedef typename ValInfo::value_type_ref value_type_ref;
- typedef typename ValInfo::key_type key_type;
- typedef typename ValInfo::key_type_ref key_type_ref;
- typedef typename ValInfo::data_type data_type;
- typedef typename ValInfo::data_type_ref data_type_ref;
- typedef ImutAVLTree<ValInfo> TreeTy;
- typedef typename TreeTy::Factory FactoryTy;
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using key_type = typename ValInfo::key_type;
+ using key_type_ref = typename ValInfo::key_type_ref;
+ using data_type = typename ValInfo::data_type;
+ using data_type_ref = typename ValInfo::data_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+ using FactoryTy = typename TreeTy::Factory;
protected:
TreeTy *Root;
@@ -292,6 +294,11 @@ public:
}
}
+ ~ImmutableMapRef() {
+ if (Root)
+ Root->release();
+ }
+
ImmutableMapRef &operator=(const ImmutableMapRef &X) {
if (Root != X.Root) {
if (X.Root)
@@ -306,11 +313,6 @@ public:
return *this;
}
- ~ImmutableMapRef() {
- if (Root)
- Root->release();
- }
-
static inline ImmutableMapRef getEmptyMap(FactoryTy *F) {
return ImmutableMapRef(0, F);
}
diff --git a/include/llvm/ADT/ImmutableSet.h b/include/llvm/ADT/ImmutableSet.h
index 9c9bcb81f76b..9d580c5a3d41 100644
--- a/include/llvm/ADT/ImmutableSet.h
+++ b/include/llvm/ADT/ImmutableSet.h
@@ -41,18 +41,16 @@ template <typename ImutInfo> class ImutAVLTreeGenericIterator;
template <typename ImutInfo >
class ImutAVLTree {
public:
- typedef typename ImutInfo::key_type_ref key_type_ref;
- typedef typename ImutInfo::value_type value_type;
- typedef typename ImutInfo::value_type_ref value_type_ref;
+ using key_type_ref = typename ImutInfo::key_type_ref;
+ using value_type = typename ImutInfo::value_type;
+ using value_type_ref = typename ImutInfo::value_type_ref;
+ using Factory = ImutAVLFactory<ImutInfo>;
+ using iterator = ImutAVLTreeInOrderIterator<ImutInfo>;
- typedef ImutAVLFactory<ImutInfo> Factory;
friend class ImutAVLFactory<ImutInfo>;
friend class ImutIntervalAVLFactory<ImutInfo>;
-
friend class ImutAVLTreeGenericIterator<ImutInfo>;
- typedef ImutAVLTreeInOrderIterator<ImutInfo> iterator;
-
//===----------------------------------------------------===//
// Public Interface.
//===----------------------------------------------------===//
@@ -225,17 +223,17 @@ private:
Factory *factory;
ImutAVLTree *left;
ImutAVLTree *right;
- ImutAVLTree *prev;
- ImutAVLTree *next;
+ ImutAVLTree *prev = nullptr;
+ ImutAVLTree *next = nullptr;
- unsigned height : 28;
- unsigned IsMutable : 1;
- unsigned IsDigestCached : 1;
- unsigned IsCanonicalized : 1;
+ unsigned height : 28;
+ bool IsMutable : 1;
+ bool IsDigestCached : 1;
+ bool IsCanonicalized : 1;
value_type value;
- uint32_t digest;
- uint32_t refCount;
+ uint32_t digest = 0;
+ uint32_t refCount = 0;
//===----------------------------------------------------===//
// Internal methods (node manipulation; used by Factory).
@@ -246,9 +244,8 @@ private:
/// ImutAVLFactory.
ImutAVLTree(Factory *f, ImutAVLTree* l, ImutAVLTree* r, value_type_ref v,
unsigned height)
- : factory(f), left(l), right(r), prev(nullptr), next(nullptr),
- height(height), IsMutable(true), IsDigestCached(false),
- IsCanonicalized(0), value(v), digest(0), refCount(0)
+ : factory(f), left(l), right(r), height(height), IsMutable(true),
+ IsDigestCached(false), IsCanonicalized(false), value(v)
{
if (left) left->retain();
if (right) right->retain();
@@ -369,11 +366,11 @@ public:
template <typename ImutInfo >
class ImutAVLFactory {
friend class ImutAVLTree<ImutInfo>;
- typedef ImutAVLTree<ImutInfo> TreeTy;
- typedef typename TreeTy::value_type_ref value_type_ref;
- typedef typename TreeTy::key_type_ref key_type_ref;
- typedef DenseMap<unsigned, TreeTy*> CacheTy;
+ using TreeTy = ImutAVLTree<ImutInfo>;
+ using value_type_ref = typename TreeTy::value_type_ref;
+ using key_type_ref = typename TreeTy::key_type_ref;
+ using CacheTy = DenseMap<unsigned, TreeTy*>;
CacheTy Cache;
uintptr_t Allocator;
@@ -659,7 +656,7 @@ public:
enum VisitFlag { VisitedNone=0x0, VisitedLeft=0x1, VisitedRight=0x3,
Flags=0x3 };
- typedef ImutAVLTree<ImutInfo> TreeTy;
+ using TreeTy = ImutAVLTree<ImutInfo>;
ImutAVLTreeGenericIterator() = default;
ImutAVLTreeGenericIterator(const TreeTy *Root) {
@@ -764,11 +761,12 @@ template <typename ImutInfo>
class ImutAVLTreeInOrderIterator
: public std::iterator<std::bidirectional_iterator_tag,
ImutAVLTree<ImutInfo>> {
- typedef ImutAVLTreeGenericIterator<ImutInfo> InternalIteratorTy;
+ using InternalIteratorTy = ImutAVLTreeGenericIterator<ImutInfo>;
+
InternalIteratorTy InternalItr;
public:
- typedef ImutAVLTree<ImutInfo> TreeTy;
+ using TreeTy = ImutAVLTree<ImutInfo>;
ImutAVLTreeInOrderIterator(const TreeTy* Root) : InternalItr(Root) {
if (Root)
@@ -840,8 +838,8 @@ struct ImutAVLValueIterator
/// and generic handling of pointers is done below.
template <typename T>
struct ImutProfileInfo {
- typedef const T value_type;
- typedef const T& value_type_ref;
+ using value_type = const T;
+ using value_type_ref = const T&;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
FoldingSetTrait<T>::Profile(X,ID);
@@ -851,8 +849,8 @@ struct ImutProfileInfo {
/// Profile traits for integers.
template <typename T>
struct ImutProfileInteger {
- typedef const T value_type;
- typedef const T& value_type_ref;
+ using value_type = const T;
+ using value_type_ref = const T&;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
ID.AddInteger(X);
@@ -878,8 +876,8 @@ PROFILE_INTEGER_INFO(unsigned long long)
/// Profile traits for booleans.
template <>
struct ImutProfileInfo<bool> {
- typedef const bool value_type;
- typedef const bool& value_type_ref;
+ using value_type = const bool;
+ using value_type_ref = const bool&;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
ID.AddBoolean(X);
@@ -890,8 +888,8 @@ struct ImutProfileInfo<bool> {
/// references to unique objects.
template <typename T>
struct ImutProfileInfo<T*> {
- typedef const T* value_type;
- typedef value_type value_type_ref;
+ using value_type = const T*;
+ using value_type_ref = value_type;
static void Profile(FoldingSetNodeID &ID, value_type_ref X) {
ID.AddPointer(X);
@@ -910,12 +908,12 @@ struct ImutProfileInfo<T*> {
/// std::equal_to<> and std::less<> to perform comparison of elements.
template <typename T>
struct ImutContainerInfo : public ImutProfileInfo<T> {
- typedef typename ImutProfileInfo<T>::value_type value_type;
- typedef typename ImutProfileInfo<T>::value_type_ref value_type_ref;
- typedef value_type key_type;
- typedef value_type_ref key_type_ref;
- typedef bool data_type;
- typedef bool data_type_ref;
+ using value_type = typename ImutProfileInfo<T>::value_type;
+ using value_type_ref = typename ImutProfileInfo<T>::value_type_ref;
+ using key_type = value_type;
+ using key_type_ref = value_type_ref;
+ using data_type = bool;
+ using data_type_ref = bool;
static key_type_ref KeyOfValue(value_type_ref D) { return D; }
static data_type_ref DataOfValue(value_type_ref) { return true; }
@@ -936,12 +934,12 @@ struct ImutContainerInfo : public ImutProfileInfo<T> {
/// their addresses.
template <typename T>
struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
- typedef typename ImutProfileInfo<T*>::value_type value_type;
- typedef typename ImutProfileInfo<T*>::value_type_ref value_type_ref;
- typedef value_type key_type;
- typedef value_type_ref key_type_ref;
- typedef bool data_type;
- typedef bool data_type_ref;
+ using value_type = typename ImutProfileInfo<T*>::value_type;
+ using value_type_ref = typename ImutProfileInfo<T*>::value_type_ref;
+ using key_type = value_type;
+ using key_type_ref = value_type_ref;
+ using data_type = bool;
+ using data_type_ref = bool;
static key_type_ref KeyOfValue(value_type_ref D) { return D; }
static data_type_ref DataOfValue(value_type_ref) { return true; }
@@ -960,9 +958,9 @@ struct ImutContainerInfo<T*> : public ImutProfileInfo<T*> {
template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSet {
public:
- typedef typename ValInfo::value_type value_type;
- typedef typename ValInfo::value_type_ref value_type_ref;
- typedef ImutAVLTree<ValInfo> TreeTy;
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
private:
TreeTy *Root;
@@ -980,6 +978,10 @@ public:
if (Root) { Root->retain(); }
}
+ ~ImmutableSet() {
+ if (Root) { Root->release(); }
+ }
+
ImmutableSet &operator=(const ImmutableSet &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@@ -989,10 +991,6 @@ public:
return *this;
}
- ~ImmutableSet() {
- if (Root) { Root->release(); }
- }
-
class Factory {
typename TreeTy::Factory F;
const bool Canonicalize;
@@ -1084,7 +1082,7 @@ public:
// Iterators.
//===--------------------------------------------------===//
- typedef ImutAVLValueIterator<ImmutableSet> iterator;
+ using iterator = ImutAVLValueIterator<ImmutableSet>;
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }
@@ -1112,10 +1110,10 @@ public:
template <typename ValT, typename ValInfo = ImutContainerInfo<ValT>>
class ImmutableSetRef {
public:
- typedef typename ValInfo::value_type value_type;
- typedef typename ValInfo::value_type_ref value_type_ref;
- typedef ImutAVLTree<ValInfo> TreeTy;
- typedef typename TreeTy::Factory FactoryTy;
+ using value_type = typename ValInfo::value_type;
+ using value_type_ref = typename ValInfo::value_type_ref;
+ using TreeTy = ImutAVLTree<ValInfo>;
+ using FactoryTy = typename TreeTy::Factory;
private:
TreeTy *Root;
@@ -1138,6 +1136,10 @@ public:
if (Root) { Root->retain(); }
}
+ ~ImmutableSetRef() {
+ if (Root) { Root->release(); }
+ }
+
ImmutableSetRef &operator=(const ImmutableSetRef &X) {
if (Root != X.Root) {
if (X.Root) { X.Root->retain(); }
@@ -1147,9 +1149,6 @@ public:
}
return *this;
}
- ~ImmutableSetRef() {
- if (Root) { Root->release(); }
- }
static ImmutableSetRef getEmptySet(FactoryTy *F) {
return ImmutableSetRef(0, F);
@@ -1196,7 +1195,7 @@ public:
// Iterators.
//===--------------------------------------------------===//
- typedef ImutAVLValueIterator<ImmutableSetRef> iterator;
+ using iterator = ImutAVLValueIterator<ImmutableSetRef>;
iterator begin() const { return iterator(Root); }
iterator end() const { return iterator(); }
diff --git a/include/llvm/ADT/IndexedMap.h b/include/llvm/ADT/IndexedMap.h
index 5ba85c027920..2ee80d2cde63 100644
--- a/include/llvm/ADT/IndexedMap.h
+++ b/include/llvm/ADT/IndexedMap.h
@@ -20,28 +20,28 @@
#ifndef LLVM_ADT_INDEXEDMAP_H
#define LLVM_ADT_INDEXEDMAP_H
-#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
+#include "llvm/ADT/STLExtras.h"
#include <cassert>
-#include <functional>
namespace llvm {
-template <typename T, typename ToIndexT = llvm::identity<unsigned> >
+template <typename T, typename ToIndexT = identity<unsigned>>
class IndexedMap {
- typedef typename ToIndexT::argument_type IndexT;
+ using IndexT = typename ToIndexT::argument_type;
// Prefer SmallVector with zero inline storage over std::vector. IndexedMaps
// can grow very large and SmallVector grows more efficiently as long as T
// is trivially copyable.
- typedef SmallVector<T, 0> StorageT;
+ using StorageT = SmallVector<T, 0>;
+
StorageT storage_;
T nullVal_;
ToIndexT toIndex_;
public:
- IndexedMap() : nullVal_(T()) { }
+ IndexedMap() : nullVal_(T()) {}
- explicit IndexedMap(const T& val) : nullVal_(val) { }
+ explicit IndexedMap(const T& val) : nullVal_(val) {}
typename StorageT::reference operator[](IndexT n) {
assert(toIndex_(n) < storage_.size() && "index out of bounds!");
@@ -80,6 +80,6 @@ template <typename T, typename ToIndexT = llvm::identity<unsigned> >
}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_INDEXEDMAP_H
diff --git a/include/llvm/ADT/IntervalMap.h b/include/llvm/ADT/IntervalMap.h
index 430b9671bd1d..f71366811218 100644
--- a/include/llvm/ADT/IntervalMap.h
+++ b/include/llvm/ADT/IntervalMap.h
@@ -106,6 +106,7 @@
#include "llvm/Support/RecyclingAllocator.h"
#include <algorithm>
#include <cassert>
+#include <cstdint>
#include <iterator>
#include <new>
#include <utility>
@@ -186,7 +187,7 @@ struct IntervalMapHalfOpenInfo {
/// It should be considered private to the implementation.
namespace IntervalMapImpl {
-typedef std::pair<unsigned,unsigned> IdxPair;
+using IdxPair = std::pair<unsigned,unsigned>;
//===----------------------------------------------------------------------===//
//--- IntervalMapImpl::NodeBase ---//
@@ -445,7 +446,7 @@ struct NodeSizer {
LeafSize = DesiredLeafSize > MinLeafSize ? DesiredLeafSize : MinLeafSize
};
- typedef NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize> LeafBase;
+ using LeafBase = NodeBase<std::pair<KeyT, KeyT>, ValT, LeafSize>;
enum {
// Now that we have the leaf branching factor, compute the actual allocation
@@ -461,8 +462,8 @@ struct NodeSizer {
/// This typedef is very likely to be identical for all IntervalMaps with
/// reasonably sized entries, so the same allocator can be shared among
/// different kinds of maps.
- typedef RecyclingAllocator<BumpPtrAllocator, char,
- AllocBytes, CacheLineBytes> Allocator;
+ using Allocator =
+ RecyclingAllocator<BumpPtrAllocator, char, AllocBytes, CacheLineBytes>;
};
//===----------------------------------------------------------------------===//
@@ -930,12 +931,12 @@ template <typename KeyT, typename ValT,
unsigned N = IntervalMapImpl::NodeSizer<KeyT, ValT>::LeafSize,
typename Traits = IntervalMapInfo<KeyT>>
class IntervalMap {
- typedef IntervalMapImpl::NodeSizer<KeyT, ValT> Sizer;
- typedef IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits> Leaf;
- typedef IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>
- Branch;
- typedef IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits> RootLeaf;
- typedef IntervalMapImpl::IdxPair IdxPair;
+ using Sizer = IntervalMapImpl::NodeSizer<KeyT, ValT>;
+ using Leaf = IntervalMapImpl::LeafNode<KeyT, ValT, Sizer::LeafSize, Traits>;
+ using Branch =
+ IntervalMapImpl::BranchNode<KeyT, ValT, Sizer::BranchSize, Traits>;
+ using RootLeaf = IntervalMapImpl::LeafNode<KeyT, ValT, N, Traits>;
+ using IdxPair = IntervalMapImpl::IdxPair;
// The RootLeaf capacity is given as a template parameter. We must compute the
// corresponding RootBranch capacity.
@@ -945,8 +946,8 @@ class IntervalMap {
RootBranchCap = DesiredRootBranchCap ? DesiredRootBranchCap : 1
};
- typedef IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>
- RootBranch;
+ using RootBranch =
+ IntervalMapImpl::BranchNode<KeyT, ValT, RootBranchCap, Traits>;
// When branched, we store a global start key as well as the branch node.
struct RootBranchData {
@@ -955,10 +956,10 @@ class IntervalMap {
};
public:
- typedef typename Sizer::Allocator Allocator;
- typedef KeyT KeyType;
- typedef ValT ValueType;
- typedef Traits KeyTraits;
+ using Allocator = typename Sizer::Allocator;
+ using KeyType = KeyT;
+ using ValueType = ValT;
+ using KeyTraits = Traits;
private:
// The root data is either a RootLeaf or a RootBranchData instance.
@@ -1290,7 +1291,7 @@ protected:
friend class IntervalMap;
// The map referred to.
- IntervalMap *map;
+ IntervalMap *map = nullptr;
// We store a full path from the root to the current position.
// The path may be partially filled, but never between iterator calls.
@@ -1338,7 +1339,7 @@ protected:
public:
/// const_iterator - Create an iterator that isn't pointing anywhere.
- const_iterator() : map(nullptr) {}
+ const_iterator() = default;
/// setMap - Change the map iterated over. This call must be followed by a
/// call to goToBegin(), goToEnd(), or find()
@@ -1509,7 +1510,8 @@ const_iterator::treeAdvanceTo(KeyT x) {
template <typename KeyT, typename ValT, unsigned N, typename Traits>
class IntervalMap<KeyT, ValT, N, Traits>::iterator : public const_iterator {
friend class IntervalMap;
- typedef IntervalMapImpl::IdxPair IdxPair;
+
+ using IdxPair = IntervalMapImpl::IdxPair;
explicit iterator(IntervalMap &map) : const_iterator(map) {}
@@ -2003,7 +2005,7 @@ iterator::overflow(unsigned Level) {
// Elements have been rearranged, now update node sizes and stops.
bool SplitRoot = false;
unsigned Pos = 0;
- for (;;) {
+ while (true) {
KeyT Stop = Node[Pos]->stop(NewSize[Pos]-1);
if (NewNode && Pos == NewNode) {
SplitRoot = insertNode(Level, NodeRef(Node[Pos], NewSize[Pos]), Stop);
@@ -2045,8 +2047,9 @@ iterator::overflow(unsigned Level) {
///
template <typename MapA, typename MapB>
class IntervalMapOverlaps {
- typedef typename MapA::KeyType KeyType;
- typedef typename MapA::KeyTraits Traits;
+ using KeyType = typename MapA::KeyType;
+ using Traits = typename MapA::KeyTraits;
+
typename MapA::const_iterator posA;
typename MapB::const_iterator posB;
@@ -2071,7 +2074,7 @@ class IntervalMapOverlaps {
// Already overlapping.
return;
- for (;;) {
+ while (true) {
// Make a.end > b.start.
posA.advanceTo(posB.start());
if (!posA.valid() || !Traits::stopLess(posB.stop(), posA.start()))
diff --git a/include/llvm/ADT/IntrusiveRefCntPtr.h b/include/llvm/ADT/IntrusiveRefCntPtr.h
index a77cf04ea4d1..430ef86afbd9 100644
--- a/include/llvm/ADT/IntrusiveRefCntPtr.h
+++ b/include/llvm/ADT/IntrusiveRefCntPtr.h
@@ -1,4 +1,4 @@
-//== llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer ---*- C++ -*-==//
+//==- llvm/ADT/IntrusiveRefCntPtr.h - Smart Refcounting Pointer --*- C++ -*-==//
//
// The LLVM Compiler Infrastructure
//
@@ -73,9 +73,10 @@ template <class Derived> class RefCountedBase {
public:
RefCountedBase() = default;
- RefCountedBase(const RefCountedBase &) : RefCount(0) {}
+ RefCountedBase(const RefCountedBase &) {}
void Retain() const { ++RefCount; }
+
void Release() const {
assert(RefCount > 0 && "Reference count is already zero.");
if (--RefCount == 0)
@@ -136,7 +137,7 @@ template <typename T> class IntrusiveRefCntPtr {
T *Obj = nullptr;
public:
- typedef T element_type;
+ using element_type = T;
explicit IntrusiveRefCntPtr() = default;
IntrusiveRefCntPtr(T *obj) : Obj(obj) { retain(); }
@@ -153,13 +154,13 @@ public:
retain();
}
+ ~IntrusiveRefCntPtr() { release(); }
+
IntrusiveRefCntPtr &operator=(IntrusiveRefCntPtr S) {
swap(S);
return *this;
}
- ~IntrusiveRefCntPtr() { release(); }
-
T &operator*() const { return *Obj; }
T *operator->() const { return Obj; }
T *get() const { return Obj; }
@@ -183,6 +184,7 @@ private:
if (Obj)
IntrusiveRefCntPtrInfo<T>::retain(Obj);
}
+
void release() {
if (Obj)
IntrusiveRefCntPtrInfo<T>::release(Obj);
@@ -248,14 +250,16 @@ bool operator!=(const IntrusiveRefCntPtr<T> &A, std::nullptr_t B) {
template <typename From> struct simplify_type;
template <class T> struct simplify_type<IntrusiveRefCntPtr<T>> {
- typedef T *SimpleType;
+ using SimpleType = T *;
+
static SimpleType getSimplifiedValue(IntrusiveRefCntPtr<T> &Val) {
return Val.get();
}
};
template <class T> struct simplify_type<const IntrusiveRefCntPtr<T>> {
- typedef /*const*/ T *SimpleType;
+ using SimpleType = /*const*/ T *;
+
static SimpleType getSimplifiedValue(const IntrusiveRefCntPtr<T> &Val) {
return Val.get();
}
diff --git a/include/llvm/ADT/MapVector.h b/include/llvm/ADT/MapVector.h
index ac1885758cb9..26a555ee1d3b 100644
--- a/include/llvm/ADT/MapVector.h
+++ b/include/llvm/ADT/MapVector.h
@@ -19,6 +19,12 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h"
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <iterator>
+#include <type_traits>
+#include <utility>
#include <vector>
namespace llvm {
@@ -27,20 +33,20 @@ namespace llvm {
/// in a deterministic order. The values are kept in a std::vector and the
/// mapping is done with DenseMap from Keys to indexes in that vector.
template<typename KeyT, typename ValueT,
- typename MapType = llvm::DenseMap<KeyT, unsigned>,
- typename VectorType = std::vector<std::pair<KeyT, ValueT> > >
+ typename MapType = DenseMap<KeyT, unsigned>,
+ typename VectorType = std::vector<std::pair<KeyT, ValueT>>>
class MapVector {
- typedef typename VectorType::value_type value_type;
- typedef typename VectorType::size_type size_type;
+ using value_type = typename VectorType::value_type;
+ using size_type = typename VectorType::size_type;
MapType Map;
VectorType Vector;
public:
- typedef typename VectorType::iterator iterator;
- typedef typename VectorType::const_iterator const_iterator;
- typedef typename VectorType::reverse_iterator reverse_iterator;
- typedef typename VectorType::const_reverse_iterator const_reverse_iterator;
+ using iterator = typename VectorType::iterator;
+ using const_iterator = typename VectorType::const_iterator;
+ using reverse_iterator = typename VectorType::reverse_iterator;
+ using const_reverse_iterator = typename VectorType::const_reverse_iterator;
/// Clear the MapVector and return the underlying vector.
VectorType takeVector() {
@@ -220,4 +226,4 @@ struct SmallMapVector
} // end namespace llvm
-#endif
+#endif // LLVM_ADT_MAPVECTOR_H
diff --git a/include/llvm/ADT/Optional.h b/include/llvm/ADT/Optional.h
index 701872c9f63f..b782d9da17ac 100644
--- a/include/llvm/ADT/Optional.h
+++ b/include/llvm/ADT/Optional.h
@@ -1,4 +1,4 @@
-//===-- Optional.h - Simple variant for passing optional values ---*- C++ -*-=//
+//===- Optional.h - Simple variant for passing optional values --*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -19,6 +19,8 @@
#include "llvm/ADT/None.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/Compiler.h"
+#include "llvm/Support/type_traits.h"
+#include <algorithm>
#include <cassert>
#include <new>
#include <utility>
@@ -28,15 +30,18 @@ namespace llvm {
template<typename T>
class Optional {
AlignedCharArrayUnion<T> storage;
- bool hasVal;
+ bool hasVal = false;
+
public:
- typedef T value_type;
+ using value_type = T;
+
+ Optional(NoneType) {}
+ explicit Optional() {}
- Optional(NoneType) : hasVal(false) {}
- explicit Optional() : hasVal(false) {}
Optional(const T &y) : hasVal(true) {
new (storage.buffer) T(y);
}
+
Optional(const Optional &O) : hasVal(O.hasVal) {
if (hasVal)
new (storage.buffer) T(*O);
@@ -45,12 +50,18 @@ public:
Optional(T &&y) : hasVal(true) {
new (storage.buffer) T(std::forward<T>(y));
}
+
Optional(Optional<T> &&O) : hasVal(O) {
if (O) {
new (storage.buffer) T(std::move(*O));
O.reset();
}
}
+
+ ~Optional() {
+ reset();
+ }
+
Optional &operator=(T &&y) {
if (hasVal)
**this = std::move(y);
@@ -60,6 +71,7 @@ public:
}
return *this;
}
+
Optional &operator=(Optional &&O) {
if (!O)
reset();
@@ -112,10 +124,6 @@ public:
}
}
- ~Optional() {
- reset();
- }
-
const T* getPointer() const { assert(hasVal); return reinterpret_cast<const T*>(storage.buffer); }
T* getPointer() { assert(hasVal); return reinterpret_cast<T*>(storage.buffer); }
const T& getValue() const LLVM_LVALUE_FUNCTION { assert(hasVal); return *getPointer(); }
@@ -144,8 +152,7 @@ public:
#endif
};
-template <typename T> struct isPodLike;
-template <typename T> struct isPodLike<Optional<T> > {
+template <typename T> struct isPodLike<Optional<T>> {
// An Optional<T> is pod-like if T is.
static const bool value = isPodLike<T>::value;
};
@@ -284,6 +291,6 @@ template <typename T> bool operator>=(const T &X, const Optional<T> &Y) {
return !(X < Y);
}
-} // end llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_OPTIONAL_H
diff --git a/include/llvm/ADT/PackedVector.h b/include/llvm/ADT/PackedVector.h
index 8f925f1ff5cb..95adc2926813 100644
--- a/include/llvm/ADT/PackedVector.h
+++ b/include/llvm/ADT/PackedVector.h
@@ -76,8 +76,8 @@ template <typename T, unsigned BitNum, typename BitVectorTy = BitVector>
class PackedVector : public PackedVectorBase<T, BitNum, BitVectorTy,
std::numeric_limits<T>::is_signed> {
BitVectorTy Bits;
- typedef PackedVectorBase<T, BitNum, BitVectorTy,
- std::numeric_limits<T>::is_signed> base;
+ using base = PackedVectorBase<T, BitNum, BitVectorTy,
+ std::numeric_limits<T>::is_signed>;
public:
class reference {
@@ -99,7 +99,7 @@ public:
};
PackedVector() = default;
- explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) { }
+ explicit PackedVector(unsigned size) : Bits(size << (BitNum-1)) {}
bool empty() const { return Bits.empty(); }
diff --git a/include/llvm/ADT/PointerEmbeddedInt.h b/include/llvm/ADT/PointerEmbeddedInt.h
index 2279d43405fa..34323b5b8af4 100644
--- a/include/llvm/ADT/PointerEmbeddedInt.h
+++ b/include/llvm/ADT/PointerEmbeddedInt.h
@@ -13,7 +13,10 @@
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/PointerLikeTypeTraits.h"
+#include <cassert>
#include <climits>
+#include <cstdint>
+#include <type_traits>
namespace llvm {
@@ -29,7 +32,7 @@ namespace llvm {
/// Also, the default constructed value zero initializes the integer.
template <typename IntT, int Bits = sizeof(IntT) * CHAR_BIT>
class PointerEmbeddedInt {
- uintptr_t Value;
+ uintptr_t Value = 0;
// Note: This '<' is correct; using '<=' would result in some shifts
// overflowing their storage types.
@@ -54,15 +57,12 @@ class PointerEmbeddedInt {
explicit PointerEmbeddedInt(uintptr_t Value, RawValueTag) : Value(Value) {}
public:
- PointerEmbeddedInt() : Value(0) {}
+ PointerEmbeddedInt() = default;
- PointerEmbeddedInt(IntT I) {
- *this = I;
- }
+ PointerEmbeddedInt(IntT I) { *this = I; }
PointerEmbeddedInt &operator=(IntT I) {
- assert((std::is_signed<IntT>::value ? llvm::isInt<Bits>(I)
- : llvm::isUInt<Bits>(I)) &&
+ assert((std::is_signed<IntT>::value ? isInt<Bits>(I) : isUInt<Bits>(I)) &&
"Integer has bits outside those preserved!");
Value = static_cast<uintptr_t>(I) << Shift;
return *this;
@@ -81,15 +81,17 @@ public:
// types.
template <typename IntT, int Bits>
class PointerLikeTypeTraits<PointerEmbeddedInt<IntT, Bits>> {
- typedef PointerEmbeddedInt<IntT, Bits> T;
+ using T = PointerEmbeddedInt<IntT, Bits>;
public:
static inline void *getAsVoidPointer(const T &P) {
return reinterpret_cast<void *>(P.Value);
}
+
static inline T getFromVoidPointer(void *P) {
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
+
static inline T getFromVoidPointer(const void *P) {
return T(reinterpret_cast<uintptr_t>(P), typename T::RawValueTag());
}
@@ -101,17 +103,19 @@ public:
// itself can be a key.
template <typename IntT, int Bits>
struct DenseMapInfo<PointerEmbeddedInt<IntT, Bits>> {
- typedef PointerEmbeddedInt<IntT, Bits> T;
-
- typedef DenseMapInfo<IntT> IntInfo;
+ using T = PointerEmbeddedInt<IntT, Bits>;
+ using IntInfo = DenseMapInfo<IntT>;
static inline T getEmptyKey() { return IntInfo::getEmptyKey(); }
static inline T getTombstoneKey() { return IntInfo::getTombstoneKey(); }
+
static unsigned getHashValue(const T &Arg) {
return IntInfo::getHashValue(Arg);
}
+
static bool isEqual(const T &LHS, const T &RHS) { return LHS == RHS; }
};
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_ADT_POINTEREMBEDDEDINT_H
diff --git a/include/llvm/ADT/PointerUnion.h b/include/llvm/ADT/PointerUnion.h
index 7ce70ebc8ce0..aeab641f5715 100644
--- a/include/llvm/ADT/PointerUnion.h
+++ b/include/llvm/ADT/PointerUnion.h
@@ -158,7 +158,7 @@ public:
assert(
get<PT1>() == Val.getPointer() &&
"Can't get the address because PointerLikeTypeTraits changes the ptr");
- return (PT1 *)Val.getAddrOfPointer();
+ return const_cast<PT1 *>(reinterpret_cast<const PT1 *>(Val.getAddrOfPointer()));
}
/// Assignment from nullptr which just clears the union.
diff --git a/include/llvm/ADT/ScopedHashTable.h b/include/llvm/ADT/ScopedHashTable.h
index d52128e294a3..22b0c1bdaf4d 100644
--- a/include/llvm/ADT/ScopedHashTable.h
+++ b/include/llvm/ADT/ScopedHashTable.h
@@ -109,6 +109,7 @@ private:
ScopedHashTableVal<K, V> *getLastValInScope() {
return LastValInScope;
}
+
void setLastValInScope(ScopedHashTableVal<K, V> *Val) {
LastValInScope = Val;
}
@@ -151,13 +152,14 @@ class ScopedHashTable {
public:
/// ScopeTy - This is a helpful typedef that allows clients to get easy access
/// to the name of the scope for this hash table.
- typedef ScopedHashTableScope<K, V, KInfo, AllocatorTy> ScopeTy;
- typedef unsigned size_type;
+ using ScopeTy = ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
+ using size_type = unsigned;
private:
friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
- typedef ScopedHashTableVal<K, V> ValTy;
+ using ValTy = ScopedHashTableVal<K, V>;
+
DenseMap<K, ValTy*, KInfo> TopLevelMap;
ScopeTy *CurScope = nullptr;
@@ -165,7 +167,7 @@ private:
public:
ScopedHashTable() = default;
- ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
+ ScopedHashTable(AllocatorTy A) : Allocator(A) {}
ScopedHashTable(const ScopedHashTable &) = delete;
ScopedHashTable &operator=(const ScopedHashTable &) = delete;
@@ -194,7 +196,7 @@ public:
insertIntoScope(CurScope, Key, Val);
}
- typedef ScopedHashTableIterator<K, V, KInfo> iterator;
+ using iterator = ScopedHashTableIterator<K, V, KInfo>;
iterator end() { return iterator(0); }
diff --git a/include/llvm/ADT/SmallBitVector.h b/include/llvm/ADT/SmallBitVector.h
index 0ff427066959..b6391746639b 100644
--- a/include/llvm/ADT/SmallBitVector.h
+++ b/include/llvm/ADT/SmallBitVector.h
@@ -15,8 +15,15 @@
#define LLVM_ADT_SMALLBITVECTOR_H
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/MathExtras.h"
+#include <algorithm>
#include <cassert>
+#include <climits>
+#include <cstddef>
+#include <cstdint>
+#include <limits>
+#include <utility>
namespace llvm {
@@ -29,7 +36,7 @@ class SmallBitVector {
// TODO: In "large" mode, a pointer to a BitVector is used, leading to an
// unnecessary level of indirection. It would be more efficient to use a
// pointer to memory containing size, allocation size, and the array of bits.
- uintptr_t X;
+ uintptr_t X = 1;
enum {
// The number of bits in this class.
@@ -54,7 +61,8 @@ class SmallBitVector {
"Unsupported word size");
public:
- typedef unsigned size_type;
+ using size_type = unsigned;
+
// Encapsulation of a single bit.
class reference {
SmallBitVector &TheVector;
@@ -134,21 +142,8 @@ private:
}
public:
- typedef const_set_bits_iterator_impl<SmallBitVector> const_set_bits_iterator;
- typedef const_set_bits_iterator set_iterator;
-
- const_set_bits_iterator set_bits_begin() const {
- return const_set_bits_iterator(*this);
- }
- const_set_bits_iterator set_bits_end() const {
- return const_set_bits_iterator(*this, -1);
- }
- iterator_range<const_set_bits_iterator> set_bits() const {
- return make_range(set_bits_begin(), set_bits_end());
- }
-
/// Creates an empty bitvector.
- SmallBitVector() : X(1) {}
+ SmallBitVector() = default;
/// Creates a bitvector of specified number of bits. All bits are initialized
/// to the specified value.
@@ -176,6 +171,21 @@ public:
delete getPointer();
}
+ using const_set_bits_iterator = const_set_bits_iterator_impl<SmallBitVector>;
+ using set_iterator = const_set_bits_iterator;
+
+ const_set_bits_iterator set_bits_begin() const {
+ return const_set_bits_iterator(*this);
+ }
+
+ const_set_bits_iterator set_bits_end() const {
+ return const_set_bits_iterator(*this, -1);
+ }
+
+ iterator_range<const_set_bits_iterator> set_bits() const {
+ return make_range(set_bits_begin(), set_bits_end());
+ }
+
/// Tests whether there are no bits in this bitvector.
bool empty() const {
return isSmall() ? getSmallSize() == 0 : getPointer()->empty();
@@ -677,14 +687,16 @@ operator^(const SmallBitVector &LHS, const SmallBitVector &RHS) {
return Result;
}
-} // End llvm namespace
+} // end namespace llvm
namespace std {
- /// Implement std::swap in terms of BitVector swap.
- inline void
- swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
- LHS.swap(RHS);
- }
+
+/// Implement std::swap in terms of BitVector swap.
+inline void
+swap(llvm::SmallBitVector &LHS, llvm::SmallBitVector &RHS) {
+ LHS.swap(RHS);
}
-#endif
+} // end namespace std
+
+#endif // LLVM_ADT_SMALLBITVECTOR_H
diff --git a/include/llvm/ADT/SmallSet.h b/include/llvm/ADT/SmallSet.h
index 6dac1677b7a2..d52d0f07f9a6 100644
--- a/include/llvm/ADT/SmallSet.h
+++ b/include/llvm/ADT/SmallSet.h
@@ -39,8 +39,9 @@ class SmallSet {
/// we will never use.
SmallVector<T, N> Vector;
std::set<T, C> Set;
- typedef typename SmallVector<T, N>::const_iterator VIterator;
- typedef typename SmallVector<T, N>::iterator mutable_iterator;
+
+ using VIterator = typename SmallVector<T, N>::const_iterator;
+ using mutable_iterator = typename SmallVector<T, N>::iterator;
// In small mode SmallPtrSet uses linear search for the elements, so it is
// not a good idea to choose this value too high. You may consider using a
@@ -48,7 +49,7 @@ class SmallSet {
static_assert(N <= 32, "N should be small");
public:
- typedef size_t size_type;
+ using size_type = size_t;
SmallSet() = default;
diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h
index bbea8619a673..ffcf998a3d32 100644
--- a/include/llvm/ADT/StringExtras.h
+++ b/include/llvm/ADT/StringExtras.h
@@ -14,6 +14,7 @@
#ifndef LLVM_ADT_STRINGEXTRAS_H
#define LLVM_ADT_STRINGEXTRAS_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
#include <cassert>
#include <cstddef>
@@ -40,6 +41,11 @@ static inline StringRef toStringRef(bool B) {
return StringRef(B ? "true" : "false");
}
+/// Construct a string ref from an array ref of unsigned chars.
+static inline StringRef toStringRef(ArrayRef<uint8_t> Input) {
+ return StringRef(reinterpret_cast<const char *>(Input.begin()), Input.size());
+}
+
/// Interpret the given character \p C as a hexadecimal digit and return its
/// value.
///
@@ -68,7 +74,7 @@ static inline std::string utohexstr(uint64_t X, bool LowerCase = false) {
/// Convert buffer \p Input to its hexadecimal representation.
/// The returned string is double the size of \p Input.
-static inline std::string toHex(StringRef Input) {
+inline std::string toHex(StringRef Input) {
static const char *const LUT = "0123456789ABCDEF";
size_t Length = Input.size();
@@ -82,6 +88,10 @@ static inline std::string toHex(StringRef Input) {
return Output;
}
+inline std::string toHex(ArrayRef<uint8_t> Input) {
+ return toHex(toStringRef(Input));
+}
+
static inline uint8_t hexFromNibbles(char MSB, char LSB) {
unsigned U1 = hexDigitValue(MSB);
unsigned U2 = hexDigitValue(LSB);
diff --git a/include/llvm/ADT/Triple.h b/include/llvm/ADT/Triple.h
index 07626982d289..26a991812a3a 100644
--- a/include/llvm/ADT/Triple.h
+++ b/include/llvm/ADT/Triple.h
@@ -239,7 +239,9 @@ public:
/// Default constructor is the same as an empty string and leaves all
/// triple fields unknown.
- Triple() : Data(), Arch(), Vendor(), OS(), Environment(), ObjectFormat() {}
+ Triple()
+ : Data(), Arch(), SubArch(), Vendor(), OS(), Environment(),
+ ObjectFormat() {}
explicit Triple(const Twine &Str);
Triple(const Twine &ArchStr, const Twine &VendorStr, const Twine &OSStr);
diff --git a/include/llvm/ADT/ilist_base.h b/include/llvm/ADT/ilist_base.h
index 1ffc864bea2f..3d818a48d41d 100644
--- a/include/llvm/ADT/ilist_base.h
+++ b/include/llvm/ADT/ilist_base.h
@@ -1,4 +1,4 @@
-//===- llvm/ADT/ilist_base.h - Intrusive List Base ---------------*- C++ -*-==//
+//===- llvm/ADT/ilist_base.h - Intrusive List Base --------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,15 +12,13 @@
#include "llvm/ADT/ilist_node_base.h"
#include <cassert>
-#include <cstddef>
-#include <type_traits>
namespace llvm {
/// Implementations of list algorithms using ilist_node_base.
template <bool EnableSentinelTracking> class ilist_base {
public:
- typedef ilist_node_base<EnableSentinelTracking> node_base_type;
+ using node_base_type = ilist_node_base<EnableSentinelTracking>;
static void insertBeforeImpl(node_base_type &Next, node_base_type &N) {
node_base_type &Prev = *Next.getPrev();
diff --git a/include/llvm/ADT/ilist_iterator.h b/include/llvm/ADT/ilist_iterator.h
index c848d1a134f1..671e644e0154 100644
--- a/include/llvm/ADT/ilist_iterator.h
+++ b/include/llvm/ADT/ilist_iterator.h
@@ -1,4 +1,4 @@
-//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator -------*- C++ -*-==//
+//===- llvm/ADT/ilist_iterator.h - Intrusive List Iterator ------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -23,28 +23,30 @@ namespace ilist_detail {
/// Find const-correct node types.
template <class OptionsT, bool IsConst> struct IteratorTraits;
template <class OptionsT> struct IteratorTraits<OptionsT, false> {
- typedef typename OptionsT::value_type value_type;
- typedef typename OptionsT::pointer pointer;
- typedef typename OptionsT::reference reference;
- typedef ilist_node_impl<OptionsT> *node_pointer;
- typedef ilist_node_impl<OptionsT> &node_reference;
+ using value_type = typename OptionsT::value_type;
+ using pointer = typename OptionsT::pointer;
+ using reference = typename OptionsT::reference;
+ using node_pointer = ilist_node_impl<OptionsT> *;
+ using node_reference = ilist_node_impl<OptionsT> &;
};
template <class OptionsT> struct IteratorTraits<OptionsT, true> {
- typedef const typename OptionsT::value_type value_type;
- typedef typename OptionsT::const_pointer pointer;
- typedef typename OptionsT::const_reference reference;
- typedef const ilist_node_impl<OptionsT> *node_pointer;
- typedef const ilist_node_impl<OptionsT> &node_reference;
+ using value_type = const typename OptionsT::value_type;
+ using pointer = typename OptionsT::const_pointer;
+ using reference = typename OptionsT::const_reference;
+ using node_pointer = const ilist_node_impl<OptionsT> *;
+ using node_reference = const ilist_node_impl<OptionsT> &;
};
template <bool IsReverse> struct IteratorHelper;
template <> struct IteratorHelper<false> : ilist_detail::NodeAccess {
- typedef ilist_detail::NodeAccess Access;
+ using Access = ilist_detail::NodeAccess;
+
template <class T> static void increment(T *&I) { I = Access::getNext(*I); }
template <class T> static void decrement(T *&I) { I = Access::getPrev(*I); }
};
template <> struct IteratorHelper<true> : ilist_detail::NodeAccess {
- typedef ilist_detail::NodeAccess Access;
+ using Access = ilist_detail::NodeAccess;
+
template <class T> static void increment(T *&I) { I = Access::getPrev(*I); }
template <class T> static void decrement(T *&I) { I = Access::getNext(*I); }
};
@@ -58,24 +60,23 @@ class ilist_iterator : ilist_detail::SpecificNodeAccess<OptionsT> {
friend ilist_iterator<OptionsT, !IsReverse, IsConst>;
friend ilist_iterator<OptionsT, !IsReverse, !IsConst>;
- typedef ilist_detail::IteratorTraits<OptionsT, IsConst> Traits;
- typedef ilist_detail::SpecificNodeAccess<OptionsT> Access;
+ using Traits = ilist_detail::IteratorTraits<OptionsT, IsConst>;
+ using Access = ilist_detail::SpecificNodeAccess<OptionsT>;
public:
- typedef typename Traits::value_type value_type;
- typedef typename Traits::pointer pointer;
- typedef typename Traits::reference reference;
- typedef ptrdiff_t difference_type;
- typedef std::bidirectional_iterator_tag iterator_category;
-
- typedef typename OptionsT::const_pointer const_pointer;
- typedef typename OptionsT::const_reference const_reference;
+ using value_type = typename Traits::value_type;
+ using pointer = typename Traits::pointer;
+ using reference = typename Traits::reference;
+ using difference_type = ptrdiff_t;
+ using iterator_category = std::bidirectional_iterator_tag;
+ using const_pointer = typename OptionsT::const_pointer;
+ using const_reference = typename OptionsT::const_reference;
private:
- typedef typename Traits::node_pointer node_pointer;
- typedef typename Traits::node_reference node_reference;
+ using node_pointer = typename Traits::node_pointer;
+ using node_reference = typename Traits::node_reference;
- node_pointer NodePtr;
+ node_pointer NodePtr = nullptr;
public:
/// Create from an ilist_node.
@@ -83,7 +84,7 @@ public:
explicit ilist_iterator(pointer NP) : NodePtr(Access::getNodePtr(NP)) {}
explicit ilist_iterator(reference NR) : NodePtr(Access::getNodePtr(&NR)) {}
- ilist_iterator() : NodePtr(nullptr) {}
+ ilist_iterator() = default;
// This is templated so that we can allow constructing a const iterator from
// a nonconst iterator...
@@ -184,8 +185,8 @@ template <typename From> struct simplify_type;
/// FIXME: remove this, since there is no implicit conversion to NodeTy.
template <class OptionsT, bool IsConst>
struct simplify_type<ilist_iterator<OptionsT, false, IsConst>> {
- typedef ilist_iterator<OptionsT, false, IsConst> iterator;
- typedef typename iterator::pointer SimpleType;
+ using iterator = ilist_iterator<OptionsT, false, IsConst>;
+ using SimpleType = typename iterator::pointer;
static SimpleType getSimplifiedValue(const iterator &Node) { return &*Node; }
};
diff --git a/include/llvm/ADT/ilist_node.h b/include/llvm/ADT/ilist_node.h
index 7244d0f40586..3362611697cb 100644
--- a/include/llvm/ADT/ilist_node.h
+++ b/include/llvm/ADT/ilist_node.h
@@ -1,4 +1,4 @@
-//==-- llvm/ADT/ilist_node.h - Intrusive Linked List Helper ------*- C++ -*-==//
+//===- llvm/ADT/ilist_node.h - Intrusive Linked List Helper -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,11 +21,10 @@
namespace llvm {
namespace ilist_detail {
+
struct NodeAccess;
-} // end namespace ilist_detail
-template<typename NodeTy>
-struct ilist_traits;
+} // end namespace ilist_detail
template <class OptionsT, bool IsReverse, bool IsConst> class ilist_iterator;
template <class OptionsT> class ilist_sentinel;
@@ -39,9 +38,9 @@ template <class OptionsT> class ilist_sentinel;
/// provide type safety: you can't insert nodes of \a ilist_node_impl into the
/// wrong \a simple_ilist or \a iplist.
template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
- typedef typename OptionsT::value_type value_type;
- typedef typename OptionsT::node_base_type node_base_type;
- typedef typename OptionsT::list_base_type list_base_type;
+ using value_type = typename OptionsT::value_type;
+ using node_base_type = typename OptionsT::node_base_type;
+ using list_base_type = typename OptionsT::list_base_type;
friend typename OptionsT::list_base_type;
friend struct ilist_detail::NodeAccess;
@@ -52,17 +51,18 @@ template <class OptionsT> class ilist_node_impl : OptionsT::node_base_type {
friend class ilist_iterator<OptionsT, true, true>;
protected:
- ilist_node_impl() = default;
+ using self_iterator = ilist_iterator<OptionsT, false, false>;
+ using const_self_iterator = ilist_iterator<OptionsT, false, true>;
+ using reverse_self_iterator = ilist_iterator<OptionsT, true, false>;
+ using const_reverse_self_iterator = ilist_iterator<OptionsT, true, true>;
- typedef ilist_iterator<OptionsT, false, false> self_iterator;
- typedef ilist_iterator<OptionsT, false, true> const_self_iterator;
- typedef ilist_iterator<OptionsT, true, false> reverse_self_iterator;
- typedef ilist_iterator<OptionsT, true, true> const_reverse_self_iterator;
+ ilist_node_impl() = default;
private:
ilist_node_impl *getPrev() {
return static_cast<ilist_node_impl *>(node_base_type::getPrev());
}
+
ilist_node_impl *getNext() {
return static_cast<ilist_node_impl *>(node_base_type::getNext());
}
@@ -70,6 +70,7 @@ private:
const ilist_node_impl *getPrev() const {
return static_cast<ilist_node_impl *>(node_base_type::getPrev());
}
+
const ilist_node_impl *getNext() const {
return static_cast<ilist_node_impl *>(node_base_type::getNext());
}
@@ -80,9 +81,11 @@ private:
public:
self_iterator getIterator() { return self_iterator(*this); }
const_self_iterator getIterator() const { return const_self_iterator(*this); }
+
reverse_self_iterator getReverseIterator() {
return reverse_self_iterator(*this);
}
+
const_reverse_self_iterator getReverseIterator() const {
return const_reverse_self_iterator(*this);
}
@@ -151,6 +154,7 @@ class ilist_node
};
namespace ilist_detail {
+
/// An access class for ilist_node private API.
///
/// This gives access to the private parts of ilist nodes. Nodes for an ilist
@@ -163,15 +167,18 @@ protected:
static ilist_node_impl<OptionsT> *getNodePtr(typename OptionsT::pointer N) {
return N;
}
+
template <class OptionsT>
static const ilist_node_impl<OptionsT> *
getNodePtr(typename OptionsT::const_pointer N) {
return N;
}
+
template <class OptionsT>
static typename OptionsT::pointer getValuePtr(ilist_node_impl<OptionsT> *N) {
return static_cast<typename OptionsT::pointer>(N);
}
+
template <class OptionsT>
static typename OptionsT::const_pointer
getValuePtr(const ilist_node_impl<OptionsT> *N) {
@@ -182,15 +189,18 @@ protected:
static ilist_node_impl<OptionsT> *getPrev(ilist_node_impl<OptionsT> &N) {
return N.getPrev();
}
+
template <class OptionsT>
static ilist_node_impl<OptionsT> *getNext(ilist_node_impl<OptionsT> &N) {
return N.getNext();
}
+
template <class OptionsT>
static const ilist_node_impl<OptionsT> *
getPrev(const ilist_node_impl<OptionsT> &N) {
return N.getPrev();
}
+
template <class OptionsT>
static const ilist_node_impl<OptionsT> *
getNext(const ilist_node_impl<OptionsT> &N) {
@@ -200,23 +210,27 @@ protected:
template <class OptionsT> struct SpecificNodeAccess : NodeAccess {
protected:
- typedef typename OptionsT::pointer pointer;
- typedef typename OptionsT::const_pointer const_pointer;
- typedef ilist_node_impl<OptionsT> node_type;
+ using pointer = typename OptionsT::pointer;
+ using const_pointer = typename OptionsT::const_pointer;
+ using node_type = ilist_node_impl<OptionsT>;
static node_type *getNodePtr(pointer N) {
return NodeAccess::getNodePtr<OptionsT>(N);
}
+
static const node_type *getNodePtr(const_pointer N) {
return NodeAccess::getNodePtr<OptionsT>(N);
}
+
static pointer getValuePtr(node_type *N) {
return NodeAccess::getValuePtr<OptionsT>(N);
}
+
static const_pointer getValuePtr(const node_type *N) {
return NodeAccess::getValuePtr<OptionsT>(N);
}
};
+
} // end namespace ilist_detail
template <class OptionsT>
@@ -265,6 +279,7 @@ public:
getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
return List.getPrevNode(*static_cast<NodeTy *>(this));
}
+
/// \brief Get the previous node, or \c nullptr for the list head.
const NodeTy *getPrevNode() const {
return const_cast<ilist_node_with_parent *>(this)->getPrevNode();
@@ -278,6 +293,7 @@ public:
getNodeParent()->*(ParentTy::getSublistAccess((NodeTy *)nullptr));
return List.getNextNode(*static_cast<NodeTy *>(this));
}
+
/// \brief Get the next node, or \c nullptr for the list tail.
const NodeTy *getNextNode() const {
return const_cast<ilist_node_with_parent *>(this)->getNextNode();
@@ -285,6 +301,6 @@ public:
/// @}
};
-} // End llvm namespace
+} // end namespace llvm
-#endif
+#endif // LLVM_ADT_ILIST_NODE_H
diff --git a/include/llvm/ADT/iterator.h b/include/llvm/ADT/iterator.h
index 28dcdf9613ef..15720a67c047 100644
--- a/include/llvm/ADT/iterator.h
+++ b/include/llvm/ADT/iterator.h
@@ -11,9 +11,11 @@
#define LLVM_ADT_ITERATOR_H
#include "llvm/ADT/iterator_range.h"
+#include <algorithm>
#include <cstddef>
#include <iterator>
#include <type_traits>
+#include <utility>
namespace llvm {
@@ -206,7 +208,7 @@ template <
class iterator_adaptor_base
: public iterator_facade_base<DerivedT, IteratorCategoryT, T,
DifferenceTypeT, PointerT, ReferenceT> {
- typedef typename iterator_adaptor_base::iterator_facade_base BaseT;
+ using BaseT = typename iterator_adaptor_base::iterator_facade_base;
protected:
WrappedIteratorT I;
@@ -221,7 +223,7 @@ protected:
const WrappedIteratorT &wrapped() const { return I; }
public:
- typedef DifferenceTypeT difference_type;
+ using difference_type = DifferenceTypeT;
DerivedT &operator+=(difference_type n) {
static_assert(
@@ -279,7 +281,7 @@ public:
/// which is implemented with some iterator over T*s:
///
/// \code
-/// typedef pointee_iterator<SmallVectorImpl<T *>::iterator> iterator;
+/// using iterator = pointee_iterator<SmallVectorImpl<T *>::iterator>;
/// \endcode
template <typename WrappedIteratorT,
typename T = typename std::remove_reference<
diff --git a/include/llvm/ADT/simple_ilist.h b/include/llvm/ADT/simple_ilist.h
index a1ab59170840..4c7598a1acb4 100644
--- a/include/llvm/ADT/simple_ilist.h
+++ b/include/llvm/ADT/simple_ilist.h
@@ -13,9 +13,14 @@
#include "llvm/ADT/ilist_base.h"
#include "llvm/ADT/ilist_iterator.h"
#include "llvm/ADT/ilist_node.h"
+#include "llvm/ADT/ilist_node_options.h"
+#include "llvm/Support/Compiler.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
+#include <functional>
+#include <iterator>
+#include <utility>
namespace llvm {
@@ -77,23 +82,23 @@ class simple_ilist
typename ilist_detail::compute_node_options<T, Options...>::type> {
static_assert(ilist_detail::check_options<Options...>::value,
"Unrecognized node option!");
- typedef
- typename ilist_detail::compute_node_options<T, Options...>::type OptionsT;
- typedef typename OptionsT::list_base_type list_base_type;
+ using OptionsT =
+ typename ilist_detail::compute_node_options<T, Options...>::type;
+ using list_base_type = typename OptionsT::list_base_type;
ilist_sentinel<OptionsT> Sentinel;
public:
- typedef typename OptionsT::value_type value_type;
- typedef typename OptionsT::pointer pointer;
- typedef typename OptionsT::reference reference;
- typedef typename OptionsT::const_pointer const_pointer;
- typedef typename OptionsT::const_reference const_reference;
- typedef ilist_iterator<OptionsT, false, false> iterator;
- typedef ilist_iterator<OptionsT, false, true> const_iterator;
- typedef ilist_iterator<OptionsT, true, false> reverse_iterator;
- typedef ilist_iterator<OptionsT, true, true> const_reverse_iterator;
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
+ using value_type = typename OptionsT::value_type;
+ using pointer = typename OptionsT::pointer;
+ using reference = typename OptionsT::reference;
+ using const_pointer = typename OptionsT::const_pointer;
+ using const_reference = typename OptionsT::const_reference;
+ using iterator = ilist_iterator<OptionsT, false, false>;
+ using const_iterator = ilist_iterator<OptionsT, false, true>;
+ using reverse_iterator = ilist_iterator<OptionsT, true, false>;
+ using const_reverse_iterator = ilist_iterator<OptionsT, true, true>;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
simple_ilist() = default;
~simple_ilist() = default;
diff --git a/include/llvm/Analysis/MemorySSA.h b/include/llvm/Analysis/MemorySSA.h
index f0bba8c4c020..462e4594266e 100644
--- a/include/llvm/Analysis/MemorySSA.h
+++ b/include/llvm/Analysis/MemorySSA.h
@@ -147,7 +147,6 @@ public:
MemoryAccess(const MemoryAccess &) = delete;
MemoryAccess &operator=(const MemoryAccess &) = delete;
- void *operator new(size_t, unsigned) = delete;
void *operator new(size_t) = delete;
BasicBlock *getBlock() const { return Block; }
@@ -232,7 +231,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
/// MemoryDef instead.
class MemoryUseOrDef : public MemoryAccess {
public:
- void *operator new(size_t, unsigned) = delete;
void *operator new(size_t) = delete;
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
@@ -298,7 +296,6 @@ public:
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
- void *operator new(size_t, unsigned) = delete;
static inline bool classof(const Value *MA) {
return MA->getValueID() == MemoryUseVal;
@@ -355,7 +352,6 @@ public:
// allocate space for exactly one operand
void *operator new(size_t s) { return User::operator new(s, 1); }
- void *operator new(size_t, unsigned) = delete;
static inline bool classof(const Value *MA) {
return MA->getValueID() == MemoryDefVal;
@@ -438,8 +434,6 @@ public:
allocHungoffUses(ReservedSpace);
}
- void *operator new(size_t, unsigned) = delete;
-
// Block iterator interface. This provides access to the list of incoming
// basic blocks, which parallels the list of incoming values.
typedef BasicBlock **block_iterator;
diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h
index 8ee9712b93d8..2a4b768256d1 100644
--- a/include/llvm/Analysis/ScalarEvolution.h
+++ b/include/llvm/Analysis/ScalarEvolution.h
@@ -1214,26 +1214,31 @@ public:
SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
unsigned Depth = 0);
const SCEV *getAddExpr(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
- return getAddExpr(Ops, Flags);
+ return getAddExpr(Ops, Flags, Depth);
}
const SCEV *getAddExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
- return getAddExpr(Ops, Flags);
+ return getAddExpr(Ops, Flags, Depth);
}
const SCEV *getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
const SCEV *getMulExpr(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
SmallVector<const SCEV *, 2> Ops = {LHS, RHS};
- return getMulExpr(Ops, Flags);
+ return getMulExpr(Ops, Flags, Depth);
}
const SCEV *getMulExpr(const SCEV *Op0, const SCEV *Op1, const SCEV *Op2,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap) {
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0) {
SmallVector<const SCEV *, 3> Ops = {Op0, Op1, Op2};
- return getMulExpr(Ops, Flags);
+ return getMulExpr(Ops, Flags, Depth);
}
const SCEV *getUDivExpr(const SCEV *LHS, const SCEV *RHS);
const SCEV *getUDivExactExpr(const SCEV *LHS, const SCEV *RHS);
@@ -1287,7 +1292,8 @@ public:
/// Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
const SCEV *getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap);
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap,
+ unsigned Depth = 0);
/// Return a SCEV corresponding to a conversion of the input value to the
/// specified type. If the type must be extended, it is zero extended.
@@ -1693,10 +1699,14 @@ private:
bool doesIVOverflowOnGT(const SCEV *RHS, const SCEV *Stride, bool IsSigned,
bool NoWrap);
- /// Get add expr already created or create a new one
+ /// Get add expr already created or create a new one.
const SCEV *getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SCEV::NoWrapFlags Flags);
+ /// Get mul expr already created or create a new one.
+ const SCEV *getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+ SCEV::NoWrapFlags Flags);
+
private:
FoldingSet<SCEV> UniqueSCEVs;
FoldingSet<SCEVPredicate> UniquePreds;
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index cd8c2cd24244..af2ebb7b6b44 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -235,6 +235,11 @@ public:
/// starting with the sources of divergence.
bool isSourceOfDivergence(const Value *V) const;
+ // \brief Returns true for the target specific
+ // set of operations which produce uniform result
+ // even taking non-unform arguments
+ bool isAlwaysUniform(const Value *V) const;
+
/// Returns the address space ID for a target's 'flat' address space. Note
/// this is not necessarily the same as addrspace(0), which LLVM sometimes
/// refers to as the generic address space. The flat address space is a
@@ -821,6 +826,7 @@ public:
virtual int getUserCost(const User *U) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
+ virtual bool isAlwaysUniform(const Value *V) = 0;
virtual unsigned getFlatAddressSpace() = 0;
virtual bool isLoweredToCall(const Function *F) = 0;
virtual void getUnrollingPreferences(Loop *L, UnrollingPreferences &UP) = 0;
@@ -873,7 +879,7 @@ public:
virtual int getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
Type *Ty) = 0;
virtual unsigned getNumberOfRegisters(bool Vector) = 0;
- virtual unsigned getRegisterBitWidth(bool Vector) = 0;
+ virtual unsigned getRegisterBitWidth(bool Vector) const = 0;
virtual unsigned getMinVectorRegisterBitWidth() = 0;
virtual bool shouldConsiderAddressTypePromotion(
const Instruction &I, bool &AllowPromotionWithoutCommonHeader) = 0;
@@ -998,6 +1004,10 @@ public:
return Impl.isSourceOfDivergence(V);
}
+ bool isAlwaysUniform(const Value *V) override {
+ return Impl.isAlwaysUniform(V);
+ }
+
unsigned getFlatAddressSpace() override {
return Impl.getFlatAddressSpace();
}
@@ -1119,7 +1129,7 @@ public:
unsigned getNumberOfRegisters(bool Vector) override {
return Impl.getNumberOfRegisters(Vector);
}
- unsigned getRegisterBitWidth(bool Vector) override {
+ unsigned getRegisterBitWidth(bool Vector) const override {
return Impl.getRegisterBitWidth(Vector);
}
unsigned getMinVectorRegisterBitWidth() override {
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index 72de7c12eb3e..24ac3b1213e1 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -177,6 +177,8 @@ public:
bool isSourceOfDivergence(const Value *V) { return false; }
+ bool isAlwaysUniform(const Value *V) { return false; }
+
unsigned getFlatAddressSpace () {
return -1;
}
@@ -320,7 +322,7 @@ public:
unsigned getNumberOfRegisters(bool Vector) { return 8; }
- unsigned getRegisterBitWidth(bool Vector) { return 32; }
+ unsigned getRegisterBitWidth(bool Vector) const { return 32; }
unsigned getMinVectorRegisterBitWidth() { return 128; }
diff --git a/include/llvm/Analysis/TypeMetadataUtils.h b/include/llvm/Analysis/TypeMetadataUtils.h
index 17906ba4e392..422e153a5a78 100644
--- a/include/llvm/Analysis/TypeMetadataUtils.h
+++ b/include/llvm/Analysis/TypeMetadataUtils.h
@@ -20,6 +20,13 @@
namespace llvm {
+/// The type of CFI jumptable needed for a function.
+enum CfiFunctionLinkage {
+ CFL_Definition = 0,
+ CFL_Declaration = 1,
+ CFL_WeakDeclaration = 2
+};
+
/// A call site that could be devirtualized.
struct DevirtCallSite {
/// The offset from the address point to the virtual function.
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index 612779b1ce86..e953ec8ab6ab 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -249,8 +249,8 @@ template <typename T> class ArrayRef;
};
/// Returns true if the value \p V is a pointer into a ContantDataArray.
- /// If successfull \p Index will point to a ConstantDataArray info object
- /// with an apropriate offset.
+ /// If successful \p Index will point to a ConstantDataArray info object
+ /// with an appropriate offset.
bool getConstantDataArrayInfo(const Value *V, ConstantDataArraySlice &Slice,
unsigned ElementSize, uint64_t Offset = 0);
diff --git a/include/llvm/BinaryFormat/ELF.h b/include/llvm/BinaryFormat/ELF.h
index 3724f555c283..a4450ee13b40 100644
--- a/include/llvm/BinaryFormat/ELF.h
+++ b/include/llvm/BinaryFormat/ELF.h
@@ -1,4 +1,4 @@
-//===-- llvm/BinaryFormat/ELF.h - ELF constants and structures --*- C++ -*-===//
+//===- llvm/BinaryFormat/ELF.h - ELF constants and structures ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -20,27 +20,25 @@
#ifndef LLVM_BINARYFORMAT_ELF_H
#define LLVM_BINARYFORMAT_ELF_H
-#include "llvm/Support/Compiler.h"
-#include "llvm/Support/DataTypes.h"
+#include <cstdint>
#include <cstring>
namespace llvm {
-
namespace ELF {
-typedef uint32_t Elf32_Addr; // Program address
-typedef uint32_t Elf32_Off; // File offset
-typedef uint16_t Elf32_Half;
-typedef uint32_t Elf32_Word;
-typedef int32_t Elf32_Sword;
+using Elf32_Addr = uint32_t; // Program address
+using Elf32_Off = uint32_t; // File offset
+using Elf32_Half = uint16_t;
+using Elf32_Word = uint32_t;
+using Elf32_Sword = int32_t;
-typedef uint64_t Elf64_Addr;
-typedef uint64_t Elf64_Off;
-typedef uint16_t Elf64_Half;
-typedef uint32_t Elf64_Word;
-typedef int32_t Elf64_Sword;
-typedef uint64_t Elf64_Xword;
-typedef int64_t Elf64_Sxword;
+using Elf64_Addr = uint64_t;
+using Elf64_Off = uint64_t;
+using Elf64_Half = uint16_t;
+using Elf64_Word = uint32_t;
+using Elf64_Sword = int32_t;
+using Elf64_Xword = uint64_t;
+using Elf64_Sxword = int64_t;
// Object file magic string.
static const char ElfMagic[] = {0x7f, 'E', 'L', 'F', '\0'};
@@ -75,9 +73,11 @@ struct Elf32_Ehdr {
Elf32_Half e_shentsize; // Size of an entry in the section header table
Elf32_Half e_shnum; // Number of entries in the section header table
Elf32_Half e_shstrndx; // Sect hdr table index of sect name string table
+
bool checkMagic() const {
return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
+
unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
@@ -99,9 +99,11 @@ struct Elf64_Ehdr {
Elf64_Half e_shentsize;
Elf64_Half e_shnum;
Elf64_Half e_shstrndx;
+
bool checkMagic() const {
return (memcmp(e_ident, ElfMagic, strlen(ElfMagic))) == 0;
}
+
unsigned char getFileClass() const { return e_ident[EI_CLASS]; }
unsigned char getDataEncoding() const { return e_ident[EI_DATA]; }
};
@@ -683,6 +685,7 @@ enum : unsigned {
SHT_GROUP = 17, // Section group.
SHT_SYMTAB_SHNDX = 18, // Indices for SHN_XINDEX entries.
SHT_LOOS = 0x60000000, // Lowest operating system-specific type.
+ SHT_LLVM_ODRTAB = 0x6fff4c00, // LLVM ODR table.
SHT_GNU_ATTRIBUTES = 0x6ffffff5, // Object attributes.
SHT_GNU_HASH = 0x6ffffff6, // GNU-style hash table.
SHT_GNU_verdef = 0x6ffffffd, // GNU version definitions.
@@ -1356,7 +1359,6 @@ enum {
};
} // end namespace ELF
-
} // end namespace llvm
-#endif
+#endif // LLVM_BINARYFORMAT_ELF_H
diff --git a/include/llvm/Bitcode/BitcodeReader.h b/include/llvm/Bitcode/BitcodeReader.h
index 61e4f6351b19..0e17e9a0a278 100644
--- a/include/llvm/Bitcode/BitcodeReader.h
+++ b/include/llvm/Bitcode/BitcodeReader.h
@@ -42,6 +42,12 @@ namespace llvm {
struct BitcodeFileContents;
+ /// Basic information extracted from a bitcode module to be used for LTO.
+ struct BitcodeLTOInfo {
+ bool IsThinLTO;
+ bool HasSummary;
+ };
+
/// Represents a module in a bitcode file.
class BitcodeModule {
// This covers the identification (if present) and module blocks.
@@ -90,15 +96,17 @@ namespace llvm {
/// Read the entire bitcode module and return it.
Expected<std::unique_ptr<Module>> parseModule(LLVMContext &Context);
- /// Check if the given bitcode buffer contains a summary block.
- Expected<bool> hasSummary();
+ /// Returns information about the module to be used for LTO: whether to
+ /// compile with ThinLTO, and whether it has a summary.
+ Expected<BitcodeLTOInfo> getLTOInfo();
/// Parse the specified bitcode buffer, returning the module summary index.
Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary();
/// Parse the specified bitcode buffer and merge its module summary index
/// into CombinedIndex.
- Error readSummary(ModuleSummaryIndex &CombinedIndex, unsigned ModuleId);
+ Error readSummary(ModuleSummaryIndex &CombinedIndex, StringRef ModulePath,
+ uint64_t ModuleId);
};
struct BitcodeFileContents {
@@ -147,8 +155,8 @@ namespace llvm {
Expected<std::unique_ptr<Module>> parseBitcodeFile(MemoryBufferRef Buffer,
LLVMContext &Context);
- /// Check if the given bitcode buffer contains a summary block.
- Expected<bool> hasGlobalValueSummary(MemoryBufferRef Buffer);
+ /// Returns LTO information for the specified bitcode file.
+ Expected<BitcodeLTOInfo> getBitcodeLTOInfo(MemoryBufferRef Buffer);
/// Parse the specified bitcode buffer, returning the module summary index.
Expected<std::unique_ptr<ModuleSummaryIndex>>
@@ -157,7 +165,7 @@ namespace llvm {
/// Parse the specified bitcode buffer and merge the index into CombinedIndex.
Error readModuleSummaryIndex(MemoryBufferRef Buffer,
ModuleSummaryIndex &CombinedIndex,
- unsigned ModuleId);
+ uint64_t ModuleId);
/// Parse the module summary index out of an IR file and return the module
/// summary index object if found, or an empty summary if not. If Path refers
diff --git a/include/llvm/Bitcode/BitcodeWriter.h b/include/llvm/Bitcode/BitcodeWriter.h
index 23b5ae87b278..7c3c4b2e0cbd 100644
--- a/include/llvm/Bitcode/BitcodeWriter.h
+++ b/include/llvm/Bitcode/BitcodeWriter.h
@@ -67,6 +67,10 @@ namespace llvm {
void writeModule(const Module *M, bool ShouldPreserveUseListOrder = false,
const ModuleSummaryIndex *Index = nullptr,
bool GenerateHash = false, ModuleHash *ModHash = nullptr);
+
+ void writeIndex(
+ const ModuleSummaryIndex *Index,
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex);
};
/// \brief Write the specified module to the specified raw output stream.
diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h
index a643bfd1dcea..4e3e177cac8f 100644
--- a/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/include/llvm/Bitcode/LLVMBitCodes.h
@@ -240,6 +240,14 @@ enum GlobalValueSummarySymtabCodes {
// summaries, but it can also appear in per-module summaries for PGO data.
// [valueid, guid]
FS_VALUE_GUID = 16,
+ // The list of local functions with CFI jump tables. Function names are
+ // strings in strtab.
+ // [n * name]
+ FS_CFI_FUNCTION_DEFS = 17,
+ // The list of external functions with CFI jump tables. Function names are
+ // strings in strtab.
+ // [n * name]
+ FS_CFI_FUNCTION_DECLS = 18,
};
enum MetadataCodes {
diff --git a/include/llvm/CodeGen/BasicTTIImpl.h b/include/llvm/CodeGen/BasicTTIImpl.h
index 9e33df6b55ec..5eb7a0f61eec 100644
--- a/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/include/llvm/CodeGen/BasicTTIImpl.h
@@ -93,6 +93,8 @@ public:
bool isSourceOfDivergence(const Value *V) { return false; }
+ bool isAlwaysUniform(const Value *V) { return false; }
+
unsigned getFlatAddressSpace() {
// Return an invalid address space.
return -1;
@@ -346,7 +348,7 @@ public:
unsigned getNumberOfRegisters(bool Vector) { return Vector ? 0 : 1; }
- unsigned getRegisterBitWidth(bool Vector) { return 32; }
+ unsigned getRegisterBitWidth(bool Vector) const { return 32; }
/// Estimate the overhead of scalarizing an instruction. Insert and Extract
/// are set if the result needs to be inserted and/or extracted from vectors.
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index 7d7c3e8cfd22..f32a58915118 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -82,6 +82,11 @@ public:
DenseMap<std::pair<const MachineBasicBlock *, const Value *>, unsigned>
SwiftErrorVRegUpwardsUse;
+ /// A map from instructions that define/use a swifterror value to the virtual
+ /// register that represents that def/use.
+ llvm::DenseMap<PointerIntPair<const Instruction *, 1, bool>, unsigned>
+ SwiftErrorVRegDefUses;
+
/// The swifterror argument of the current function.
const Value *SwiftErrorArg;
@@ -101,6 +106,13 @@ public:
void setCurrentSwiftErrorVReg(const MachineBasicBlock *MBB, const Value *,
unsigned);
+ /// Get or create the swifterror value virtual register for a def of a
+ /// swifterror by an instruction.
+ std::pair<unsigned, bool> getOrCreateSwiftErrorVRegDefAt(const Instruction *);
+ std::pair<unsigned, bool>
+ getOrCreateSwiftErrorVRegUseAt(const Instruction *, const MachineBasicBlock *,
+ const Value *);
+
/// ValueMap - Since we emit code for the function a basic block at a time,
/// we must remember which virtual registers hold the values for
/// cross-basic-block values.
diff --git a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
index 3148e70b56f8..5197ba869c0a 100644
--- a/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
+++ b/include/llvm/CodeGen/GlobalISel/LegalizerHelper.h
@@ -21,9 +21,11 @@
#ifndef LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
#define LLVM_CODEGEN_GLOBALISEL_MACHINELEGALIZEHELPER_H
+#include "llvm/CodeGen/GlobalISel/CallLowering.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/LowLevelType.h"
#include "llvm/CodeGen/MachineFunctionPass.h"
+#include "llvm/CodeGen/RuntimeLibcalls.h"
namespace llvm {
// Forward declarations.
@@ -99,6 +101,12 @@ private:
const LegalizerInfo &LI;
};
+/// Helper function that replaces \p MI with a libcall.
+LegalizerHelper::LegalizeResult
+replaceWithLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder,
+ RTLIB::Libcall Libcall, const CallLowering::ArgInfo &Result,
+ ArrayRef<CallLowering::ArgInfo> Args);
+
} // End namespace llvm.
#endif
diff --git a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
index db72f78c8321..4e7b8350038b 100644
--- a/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
+++ b/include/llvm/CodeGen/GlobalISel/MachineIRBuilder.h
@@ -40,8 +40,8 @@ class MachineIRBuilder {
MachineFunction *MF;
/// Information used to access the description of the opcodes.
const TargetInstrInfo *TII;
- /// Information used to verify types are consistent.
- const MachineRegisterInfo *MRI;
+ /// Information used to verify types are consistent and to create virtual registers.
+ MachineRegisterInfo *MRI;
/// Debug location to be set to any instruction we create.
DebugLoc DL;
@@ -229,6 +229,26 @@ public:
MachineInstrBuilder buildGEP(unsigned Res, unsigned Op0,
unsigned Op1);
+ /// Materialize and insert \p Res<def> = G_GEP \p Op0, (G_CONSTANT \p Value)
+ ///
+ /// G_GEP adds \p Value bytes to the pointer specified by \p Op0,
+ /// storing the resulting pointer in \p Res. If \p Value is zero then no
+ /// G_GEP or G_CONSTANT will be created and \pre Op0 will be assigned to
+ /// \p Res.
+ ///
+ /// \pre setBasicBlock or setMI must have been called.
+ /// \pre \p Op0 must be a generic virtual register with pointer type.
+ /// \pre \p ValueTy must be a scalar type.
+ /// \pre \p Res must be 0. This is to detect confusion between
+ /// materializeGEP() and buildGEP().
+ /// \post \p Res will either be a new generic virtual register of the same
+ /// type as \p Op0 or \p Op0 itself.
+ ///
+ /// \return a MachineInstrBuilder for the newly created instruction.
+ Optional<MachineInstrBuilder> materializeGEP(unsigned &Res, unsigned Op0,
+ const LLT &ValueTy,
+ uint64_t Value);
+
/// Build and insert \p Res<def> = G_PTR_MASK \p Op0, \p NumBits
///
/// G_PTR_MASK clears the low bits of a pointer operand without destroying its
diff --git a/include/llvm/CodeGen/RuntimeLibcalls.h b/include/llvm/CodeGen/RuntimeLibcalls.h
index ddfabb0c44d6..8c3aacaa8efc 100644
--- a/include/llvm/CodeGen/RuntimeLibcalls.h
+++ b/include/llvm/CodeGen/RuntimeLibcalls.h
@@ -333,12 +333,12 @@ namespace RTLIB {
MEMSET,
MEMMOVE,
- // ELEMENT-WISE ATOMIC MEMORY
- MEMCPY_ELEMENT_ATOMIC_1,
- MEMCPY_ELEMENT_ATOMIC_2,
- MEMCPY_ELEMENT_ATOMIC_4,
- MEMCPY_ELEMENT_ATOMIC_8,
- MEMCPY_ELEMENT_ATOMIC_16,
+ // ELEMENT-WISE UNORDERED-ATOMIC MEMORY of different element sizes
+ MEMCPY_ELEMENT_UNORDERED_ATOMIC_1,
+ MEMCPY_ELEMENT_UNORDERED_ATOMIC_2,
+ MEMCPY_ELEMENT_UNORDERED_ATOMIC_4,
+ MEMCPY_ELEMENT_UNORDERED_ATOMIC_8,
+ MEMCPY_ELEMENT_UNORDERED_ATOMIC_16,
// EXCEPTION HANDLING
UNWIND_RESUME,
@@ -511,9 +511,10 @@ namespace RTLIB {
/// UNKNOWN_LIBCALL if there is none.
Libcall getSYNC(unsigned Opc, MVT VT);
- /// getMEMCPY_ELEMENT_ATOMIC - Return MEMCPY_ELEMENT_ATOMIC_* value for the
- /// given element size or UNKNOW_LIBCALL if there is none.
- Libcall getMEMCPY_ELEMENT_ATOMIC(uint64_t ElementSize);
+ /// getMEMCPY_ELEMENT_UNORDERED_ATOMIC - Return
+ /// MEMCPY_ELEMENT_UNORDERED_ATOMIC_* value for the given element size or
+ /// UNKNOW_LIBCALL if there is none.
+ Libcall getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize);
}
}
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index 2ef7796a4a07..f3f3003b7e20 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -1217,6 +1217,12 @@ public:
void ReplaceAllUsesOfValuesWith(const SDValue *From, const SDValue *To,
unsigned Num);
+ /// If an existing load has uses of its chain, create a token factor node with
+ /// that chain and the new memory node's chain and update users of the old
+ /// chain to the token factor. This ensures that the new memory node will have
+ /// the same relative memory dependency position as the old load.
+ void makeEquivalentMemoryOrdering(LoadSDNode *Old, SDValue New);
+
/// Topological-sort the AllNodes list and a
/// assign a unique node id for each node in the DAG based on their
/// topological order. Returns the number of nodes.
diff --git a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
index 106a084a95c0..e4d3cc9cecfc 100644
--- a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
+++ b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h
@@ -42,9 +42,8 @@ public:
~TargetLoweringObjectFileELF() override = default;
/// Emit Obj-C garbage collection and linker options.
- void emitModuleFlags(MCStreamer &Streamer,
- ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- const TargetMachine &TM) const override;
+ void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+ const TargetMachine &TM) const override;
void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
const MCSymbol *Sym) const override;
@@ -99,9 +98,8 @@ public:
void Initialize(MCContext &Ctx, const TargetMachine &TM) override;
/// Emit the module flags that specify the garbage collection information.
- void emitModuleFlags(MCStreamer &Streamer,
- ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- const TargetMachine &TM) const override;
+ void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+ const TargetMachine &TM) const override;
MCSection *SelectSectionForGlobal(const GlobalObject *GO, SectionKind Kind,
const TargetMachine &TM) const override;
@@ -155,9 +153,8 @@ public:
const TargetMachine &TM) const override;
/// Emit Obj-C garbage collection and linker options.
- void emitModuleFlags(MCStreamer &Streamer,
- ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- const TargetMachine &TM) const override;
+ void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+ const TargetMachine &TM) const override;
MCSection *getStaticCtorSection(unsigned Priority,
const MCSymbol *KeySym) const override;
diff --git a/include/llvm/DebugInfo/CodeView/CodeView.h b/include/llvm/DebugInfo/CodeView/CodeView.h
index 251c9d1ae62c..6820e26b754c 100644
--- a/include/llvm/DebugInfo/CodeView/CodeView.h
+++ b/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -418,6 +418,8 @@ CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ProcSymFlags)
/// Corresponds to COMPILESYM2::Flags bitfield.
enum class CompileSym2Flags : uint32_t {
+ None = 0,
+ SourceLanguageMask = 0xFF,
EC = 1 << 8,
NoDbgInfo = 1 << 9,
LTCG = 1 << 10,
@@ -432,6 +434,8 @@ CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym2Flags)
/// Corresponds to COMPILESYM3::Flags bitfield.
enum class CompileSym3Flags : uint32_t {
+ None = 0,
+ SourceLanguageMask = 0xFF,
EC = 1 << 8,
NoDbgInfo = 1 << 9,
LTCG = 1 << 10,
@@ -448,6 +452,7 @@ enum class CompileSym3Flags : uint32_t {
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(CompileSym3Flags)
enum class ExportFlags : uint16_t {
+ None = 0,
IsConstant = 1 << 0,
IsData = 1 << 1,
IsPrivate = 1 << 2,
diff --git a/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h b/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
index 686b5c4f242e..1e329c7c3f14 100644
--- a/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
+++ b/include/llvm/DebugInfo/CodeView/DebugFrameDataSubsection.h
@@ -49,6 +49,7 @@ public:
Error commit(BinaryStreamWriter &Writer) const override;
void addFrameData(const FrameData &Frame);
+ void setFrames(ArrayRef<FrameData> Frames);
private:
std::vector<FrameData> Frames;
diff --git a/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h b/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h
index c9b062717baa..7484af663105 100644
--- a/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h
+++ b/include/llvm/DebugInfo/CodeView/DebugInlineeLinesSubsection.h
@@ -19,7 +19,7 @@
namespace llvm {
namespace codeview {
-class DebugInlineeLinesSubsectionsRef;
+class DebugInlineeLinesSubsectionRef;
class DebugChecksumsSubsection;
enum class InlineeLinesSignature : uint32_t {
diff --git a/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h b/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h
index 49a269d92e35..694731742064 100644
--- a/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h
+++ b/include/llvm/DebugInfo/CodeView/DebugSubsectionRecord.h
@@ -49,13 +49,13 @@ private:
class DebugSubsectionRecordBuilder {
public:
- DebugSubsectionRecordBuilder(std::unique_ptr<DebugSubsection> Subsection,
+ DebugSubsectionRecordBuilder(std::shared_ptr<DebugSubsection> Subsection,
CodeViewContainer Container);
uint32_t calculateSerializedLength();
Error commit(BinaryStreamWriter &Writer) const;
private:
- std::unique_ptr<DebugSubsection> Subsection;
+ std::shared_ptr<DebugSubsection> Subsection;
CodeViewContainer Container;
};
@@ -64,6 +64,9 @@ private:
template <> struct VarStreamArrayExtractor<codeview::DebugSubsectionRecord> {
Error operator()(BinaryStreamRef Stream, uint32_t &Length,
codeview::DebugSubsectionRecord &Info) {
+ // FIXME: We need to pass the container type through to this function. In
+ // practice this isn't super important since the subsection header describes
+ // its length and we can just skip it. It's more important when writing.
if (auto EC = codeview::DebugSubsectionRecord::initialize(
Stream, Info, codeview::CodeViewContainer::Pdb))
return EC;
diff --git a/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h b/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h
index d4a3d9195a36..75f749dfa933 100644
--- a/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h
+++ b/include/llvm/DebugInfo/CodeView/DebugSubsectionVisitor.h
@@ -12,6 +12,7 @@
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
#include "llvm/Support/Error.h"
#include <cstdint>
@@ -30,56 +31,7 @@ class DebugStringTableSubsectionRef;
class DebugSymbolRVASubsectionRef;
class DebugSymbolsSubsectionRef;
class DebugUnknownSubsectionRef;
-
-struct DebugSubsectionState {
-public:
- // If no subsections are known about initially, we find as much as we can.
- DebugSubsectionState();
-
- // If only a string table subsection is given, we find a checksums subsection.
- explicit DebugSubsectionState(const DebugStringTableSubsectionRef &Strings);
-
- // If both subsections are given, we don't need to find anything.
- DebugSubsectionState(const DebugStringTableSubsectionRef &Strings,
- const DebugChecksumsSubsectionRef &Checksums);
-
- template <typename T> void initialize(T &&FragmentRange) {
- for (const DebugSubsectionRecord &R : FragmentRange) {
- if (Strings && Checksums)
- return;
- if (R.kind() == DebugSubsectionKind::FileChecksums) {
- initializeChecksums(R);
- continue;
- }
- if (R.kind() == DebugSubsectionKind::StringTable && !Strings) {
- // While in practice we should never encounter a string table even
- // though the string table is already initialized, in theory it's
- // possible. PDBs are supposed to have one global string table and
- // then this subsection should not appear. Whereas object files are
- // supposed to have this subsection appear exactly once. However,
- // for testing purposes it's nice to be able to test this subsection
- // independently of one format or the other, so for some tests we
- // manually construct a PDB that contains this subsection in addition
- // to a global string table.
- initializeStrings(R);
- continue;
- }
- }
- }
-
- const DebugStringTableSubsectionRef &strings() const { return *Strings; }
- const DebugChecksumsSubsectionRef &checksums() const { return *Checksums; }
-
-private:
- void initializeStrings(const DebugSubsectionRecord &SR);
- void initializeChecksums(const DebugSubsectionRecord &FCR);
-
- std::unique_ptr<DebugStringTableSubsectionRef> OwnedStrings;
- std::unique_ptr<DebugChecksumsSubsectionRef> OwnedChecksums;
-
- const DebugStringTableSubsectionRef *Strings = nullptr;
- const DebugChecksumsSubsectionRef *Checksums = nullptr;
-};
+class StringsAndChecksumsRef;
class DebugSubsectionVisitor {
public:
@@ -89,38 +41,38 @@ public:
return Error::success();
}
virtual Error visitLines(DebugLinesSubsectionRef &Lines,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error visitFileChecksums(DebugChecksumsSubsectionRef &Checksums,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error visitInlineeLines(DebugInlineeLinesSubsectionRef &Inlinees,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error
visitCrossModuleExports(DebugCrossModuleExportsSubsectionRef &CSE,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error
visitCrossModuleImports(DebugCrossModuleImportsSubsectionRef &CSE,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error visitStringTable(DebugStringTableSubsectionRef &ST,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error visitSymbols(DebugSymbolsSubsectionRef &CSE,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error visitFrameData(DebugFrameDataSubsectionRef &FD,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
virtual Error visitCOFFSymbolRVAs(DebugSymbolRVASubsectionRef &RVAs,
- const DebugSubsectionState &State) = 0;
+ const StringsAndChecksumsRef &State) = 0;
};
Error visitDebugSubsection(const DebugSubsectionRecord &R,
DebugSubsectionVisitor &V,
- const DebugSubsectionState &State);
+ const StringsAndChecksumsRef &State);
namespace detail {
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
- DebugSubsectionState &State) {
+ StringsAndChecksumsRef &State) {
State.initialize(std::forward<T>(FragmentRange));
for (const DebugSubsectionRecord &L : FragmentRange) {
@@ -133,7 +85,7 @@ Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V) {
- DebugSubsectionState State;
+ StringsAndChecksumsRef State;
return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
State);
}
@@ -141,7 +93,7 @@ Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V) {
template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
const DebugStringTableSubsectionRef &Strings) {
- DebugSubsectionState State(Strings);
+ StringsAndChecksumsRef State(Strings);
return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
State);
}
@@ -150,7 +102,7 @@ template <typename T>
Error visitDebugSubsections(T &&FragmentRange, DebugSubsectionVisitor &V,
const DebugStringTableSubsectionRef &Strings,
const DebugChecksumsSubsectionRef &Checksums) {
- DebugSubsectionState State(Strings, Checksums);
+ StringsAndChecksumsRef State(Strings, Checksums);
return detail::visitDebugSubsections(std::forward<T>(FragmentRange), V,
State);
}
diff --git a/include/llvm/DebugInfo/CodeView/Formatters.h b/include/llvm/DebugInfo/CodeView/Formatters.h
index 37a91098a8b6..1fbb0dd6f9b0 100644
--- a/include/llvm/DebugInfo/CodeView/Formatters.h
+++ b/include/llvm/DebugInfo/CodeView/Formatters.h
@@ -12,7 +12,10 @@
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/TypeIndex.h"
#include "llvm/Support/FormatAdapters.h"
+#include "llvm/Support/FormatProviders.h"
+#include "llvm/Support/FormatVariadic.h"
namespace llvm {
namespace codeview {
@@ -35,6 +38,20 @@ inline detail::GuidAdapter fmt_guid(ArrayRef<uint8_t> Item) {
return detail::GuidAdapter(Item);
}
}
+
+template <> struct format_provider<codeview::TypeIndex> {
+public:
+ static void format(const codeview::TypeIndex &V, llvm::raw_ostream &Stream,
+ StringRef Style) {
+ if (V.isNoneType())
+ Stream << "<no type>";
+ else {
+ Stream << formatv("{0:X+4}", V.getIndex());
+ if (V.isSimple())
+ Stream << " (" << codeview::TypeIndex::simpleTypeName(V) << ")";
+ }
+ }
+};
}
#endif
diff --git a/include/llvm/DebugInfo/CodeView/StringsAndChecksums.h b/include/llvm/DebugInfo/CodeView/StringsAndChecksums.h
new file mode 100644
index 000000000000..708b317164fc
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/StringsAndChecksums.h
@@ -0,0 +1,106 @@
+//===- StringsAndChecksums.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_STRINGS_AND_CHECKSUMS_H
+#define LLVM_DEBUGINFO_CODEVIEW_STRINGS_AND_CHECKSUMS_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+
+#include <memory>
+
+namespace llvm {
+namespace codeview {
+
+class DebugSubsectionRecord;
+class DebugChecksumsSubsectionRef;
+class DebugStringTableSubsectionRef;
+class DebugChecksumsSubsection;
+class DebugStringTableSubsection;
+
+class StringsAndChecksumsRef {
+public:
+ // If no subsections are known about initially, we find as much as we can.
+ StringsAndChecksumsRef();
+
+ // If only a string table subsection is given, we find a checksums subsection.
+ explicit StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings);
+
+ // If both subsections are given, we don't need to find anything.
+ StringsAndChecksumsRef(const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums);
+
+ void setChecksums(const DebugChecksumsSubsectionRef &CS);
+
+ template <typename T> void initialize(T &&FragmentRange) {
+ for (const DebugSubsectionRecord &R : FragmentRange) {
+ if (Strings && Checksums)
+ return;
+ if (R.kind() == DebugSubsectionKind::FileChecksums) {
+ initializeChecksums(R);
+ continue;
+ }
+ if (R.kind() == DebugSubsectionKind::StringTable && !Strings) {
+ // While in practice we should never encounter a string table even
+ // though the string table is already initialized, in theory it's
+ // possible. PDBs are supposed to have one global string table and
+ // then this subsection should not appear. Whereas object files are
+ // supposed to have this subsection appear exactly once. However,
+ // for testing purposes it's nice to be able to test this subsection
+ // independently of one format or the other, so for some tests we
+ // manually construct a PDB that contains this subsection in addition
+ // to a global string table.
+ initializeStrings(R);
+ continue;
+ }
+ }
+ }
+
+ const DebugStringTableSubsectionRef &strings() const { return *Strings; }
+ const DebugChecksumsSubsectionRef &checksums() const { return *Checksums; }
+
+ bool hasStrings() const { return Strings != nullptr; }
+ bool hasChecksums() const { return Checksums != nullptr; }
+
+private:
+ void initializeStrings(const DebugSubsectionRecord &SR);
+ void initializeChecksums(const DebugSubsectionRecord &FCR);
+
+ std::unique_ptr<DebugStringTableSubsectionRef> OwnedStrings;
+ std::unique_ptr<DebugChecksumsSubsectionRef> OwnedChecksums;
+
+ const DebugStringTableSubsectionRef *Strings = nullptr;
+ const DebugChecksumsSubsectionRef *Checksums = nullptr;
+};
+
+class StringsAndChecksums {
+public:
+ using StringsPtr = std::shared_ptr<DebugStringTableSubsection>;
+ using ChecksumsPtr = std::shared_ptr<DebugChecksumsSubsection>;
+ // If no subsections are known about initially, we find as much as we can.
+ StringsAndChecksums() {}
+
+ void setStrings(const StringsPtr &SP) { Strings = SP; }
+ void setChecksums(const ChecksumsPtr &CP) { Checksums = CP; }
+
+ const StringsPtr &strings() const { return Strings; }
+ const ChecksumsPtr &checksums() const { return Checksums; }
+
+ bool hasStrings() const { return Strings != nullptr; }
+ bool hasChecksums() const { return Checksums != nullptr; }
+
+private:
+ StringsPtr Strings;
+ ChecksumsPtr Checksums;
+};
+
+} // namespace codeview
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/SymbolRecord.h b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
index a3e4dff647bd..5f85ed28cb3a 100644
--- a/include/llvm/DebugInfo/CodeView/SymbolRecord.h
+++ b/include/llvm/DebugInfo/CodeView/SymbolRecord.h
@@ -363,7 +363,7 @@ public:
: SymbolRecord(SymbolRecordKind::PublicSym32),
RecordOffset(RecordOffset) {}
- uint32_t Index;
+ TypeIndex Index;
uint32_t Offset;
uint16_t Segment;
StringRef Name;
@@ -379,7 +379,7 @@ public:
: SymbolRecord(SymbolRecordKind::RegisterSym),
RecordOffset(RecordOffset) {}
- uint32_t Index;
+ TypeIndex Index;
RegisterId Register;
StringRef Name;
@@ -679,7 +679,7 @@ public:
: SymbolRecord(SymbolRecordKind::FileStaticSym),
RecordOffset(RecordOffset) {}
- uint32_t Index;
+ TypeIndex Index;
uint32_t ModFilenameOffset;
LocalSymFlags Flags;
StringRef Name;
@@ -814,7 +814,7 @@ public:
uint32_t CodeOffset;
uint16_t Register;
- uint8_t CookieKind;
+ FrameCookieKind CookieKind;
uint8_t Flags;
uint32_t RecordOffset;
@@ -871,7 +871,7 @@ public:
uint32_t Offset;
TypeIndex Type;
- uint16_t Register;
+ RegisterId Register;
StringRef Name;
uint32_t RecordOffset;
diff --git a/include/llvm/DebugInfo/CodeView/TypeIndex.h b/include/llvm/DebugInfo/CodeView/TypeIndex.h
index 31eed7d3e877..10d51c2d6244 100644
--- a/include/llvm/DebugInfo/CodeView/TypeIndex.h
+++ b/include/llvm/DebugInfo/CodeView/TypeIndex.h
@@ -248,6 +248,8 @@ public:
return A.toArrayIndex() - B.toArrayIndex();
}
+ static StringRef simpleTypeName(TypeIndex TI);
+
private:
support::ulittle32_t Index;
};
diff --git a/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h b/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
index 72793e97b60d..3012b39dcc52 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h
@@ -50,6 +50,10 @@ public:
: AccelSection(AccelSection), StringSection(StringSection), Relocs(Relocs) {}
bool extract();
+ uint32_t getNumBuckets();
+ uint32_t getNumHashes();
+ uint32_t getSizeHdr();
+ uint32_t getHeaderDataLength();
void dump(raw_ostream &OS) const;
};
diff --git a/include/llvm/DebugInfo/DWARF/DWARFVerifier.h b/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
index b9f14be85926..9eb5c45faba8 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFVerifier.h
@@ -20,6 +20,7 @@ struct DWARFAttribute;
class DWARFContext;
class DWARFDie;
class DWARFUnit;
+class DWARFAcceleratorTable;
/// A class that verifies DWARF debug information given a DWARF Context.
class DWARFVerifier {
@@ -29,8 +30,9 @@ class DWARFVerifier {
/// can verify each reference points to a valid DIE and not an offset that
/// lies between to valid DIEs.
std::map<uint64_t, std::set<uint32_t>> ReferenceToDIEOffsets;
- uint32_t NumDebugInfoErrors;
- uint32_t NumDebugLineErrors;
+ uint32_t NumDebugInfoErrors = 0;
+ uint32_t NumDebugLineErrors = 0;
+ uint32_t NumAppleNamesErrors = 0;
/// Verifies the attribute's DWARF attribute and its value.
///
@@ -38,8 +40,8 @@ class DWARFVerifier {
/// - DW_AT_ranges values is a valid .debug_ranges offset
/// - DW_AT_stmt_list is a valid .debug_line offset
///
- /// @param Die The DWARF DIE that owns the attribute value
- /// @param AttrValue The DWARF attribute value to check
+ /// \param Die The DWARF DIE that owns the attribute value
+ /// \param AttrValue The DWARF attribute value to check
void verifyDebugInfoAttribute(const DWARFDie &Die, DWARFAttribute &AttrValue);
/// Verifies the attribute's DWARF form.
@@ -49,8 +51,8 @@ class DWARFVerifier {
/// - All DW_FORM_ref_addr values have valid .debug_info offsets
/// - All DW_FORM_strp values have valid .debug_str offsets
///
- /// @param Die The DWARF DIE that owns the attribute value
- /// @param AttrValue The DWARF attribute value to check
+ /// \param Die The DWARF DIE that owns the attribute value
+ /// \param AttrValue The DWARF attribute value to check
void verifyDebugInfoForm(const DWARFDie &Die, DWARFAttribute &AttrValue);
/// Verifies the all valid references that were found when iterating through
@@ -75,13 +77,13 @@ class DWARFVerifier {
public:
DWARFVerifier(raw_ostream &S, DWARFContext &D)
- : OS(S), DCtx(D), NumDebugInfoErrors(0), NumDebugLineErrors(0) {}
+ : OS(S), DCtx(D) {}
/// Verify the information in the .debug_info section.
///
/// Any errors are reported to the stream that was this object was
/// constructed with.
///
- /// @return True if the .debug_info verifies successfully, false otherwise.
+ /// \returns true if the .debug_info verifies successfully, false otherwise.
bool handleDebugInfo();
/// Verify the information in the .debug_line section.
@@ -89,8 +91,16 @@ public:
/// Any errors are reported to the stream that was this object was
/// constructed with.
///
- /// @return True if the .debug_line verifies successfully, false otherwise.
+ /// \returns true if the .debug_line verifies successfully, false otherwise.
bool handleDebugLine();
+
+ /// Verify the information in the .apple_names accelerator table.
+ ///
+ /// Any errors are reported to the stream that was this object was
+ /// constructed with.
+ ///
+ /// \returns true if the .apple_names verifies successfully, false otherwise.
+ bool handleAppleNames();
};
} // end namespace llvm
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
index 2ff166b24e68..a89e26ae943c 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
@@ -50,12 +50,14 @@ public:
void addSymbol(codeview::CVSymbol Symbol);
void
- addDebugSubsection(std::unique_ptr<codeview::DebugSubsection> Subsection);
+ addDebugSubsection(std::shared_ptr<codeview::DebugSubsection> Subsection);
uint16_t getStreamIndex() const;
StringRef getModuleName() const { return ModuleName; }
StringRef getObjFileName() const { return ObjFileName; }
+ unsigned getModuleIndex() const { return Layout.Mod; }
+
ArrayRef<std::string> source_files() const {
return makeArrayRef(SourceFiles);
}
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
index bcf1cff8f6e5..2885081628f6 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleList.h
@@ -12,6 +12,7 @@
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
+#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/Support/BinaryStreamArray.h"
#include "llvm/Support/BinaryStreamRef.h"
@@ -21,6 +22,7 @@
#include <vector>
namespace llvm {
+namespace codeview {}
namespace pdb {
class DbiModuleList;
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
index e116f314ac0e..aeb2e2ab026a 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -49,7 +49,6 @@ public:
void setPdbDllRbld(uint16_t R);
void setFlags(uint16_t F);
void setMachineType(PDB_Machine M);
- void setSectionContribs(ArrayRef<SectionContrib> SecMap);
void setSectionMap(ArrayRef<SecMapEntry> SecMap);
// Add given bytes as a new stream.
@@ -65,10 +64,8 @@ public:
Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef MsfBuffer);
- // A helper function to create Section Contributions from COFF input
- // section headers.
- static std::vector<SectionContrib>
- createSectionContribs(ArrayRef<llvm::object::coff_section> SecHdrs);
+ void addSectionContrib(DbiModuleDescriptorBuilder *ModuleDbi,
+ const llvm::object::coff_section *SecHdr);
// A helper function to create a Section Map from a COFF section header.
static std::vector<SecMapEntry>
@@ -112,7 +109,7 @@ private:
WritableBinaryStreamRef NamesBuffer;
MutableBinaryByteStream FileInfoBuffer;
- ArrayRef<SectionContrib> SectionContribs;
+ std::vector<SectionContrib> SectionContribs;
ArrayRef<SecMapEntry> SectionMap;
llvm::SmallVector<DebugStream, (int)DbgHeaderType::Max> DbgStreams;
};
diff --git a/include/llvm/DebugInfo/PDB/Native/InfoStream.h b/include/llvm/DebugInfo/PDB/Native/InfoStream.h
index 1c38c2b6194f..fc91fc7097bd 100644
--- a/include/llvm/DebugInfo/PDB/Native/InfoStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/InfoStream.h
@@ -35,6 +35,7 @@ public:
uint32_t getStreamSize() const;
+ bool containsIdStream() const;
PdbRaw_ImplVer getVersion() const;
uint32_t getSignature() const;
uint32_t getAge() const;
diff --git a/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
index a8121978d882..c744696ae250 100644
--- a/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
@@ -31,6 +31,7 @@ class ModuleDebugStreamRef {
public:
ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
std::unique_ptr<msf::MappedBlockStream> Stream);
+ ModuleDebugStreamRef(ModuleDebugStreamRef &&Other) = default;
~ModuleDebugStreamRef();
Error reload();
@@ -40,6 +41,12 @@ public:
iterator_range<codeview::CVSymbolArray::Iterator>
symbols(bool *HadError) const;
+ const codeview::CVSymbolArray &getSymbolArray() const {
+ return SymbolsSubstream;
+ }
+
+ ModuleDebugStreamRef &operator=(ModuleDebugStreamRef &&Other) = default;
+
llvm::iterator_range<DebugSubsectionIterator> subsections() const;
bool hasDebugSubsections() const;
@@ -54,7 +61,7 @@ private:
uint32_t Signature;
- std::unique_ptr<msf::MappedBlockStream> Stream;
+ std::shared_ptr<msf::MappedBlockStream> Stream;
codeview::CVSymbolArray SymbolsSubstream;
BinaryStreamRef C11LinesSubstream;
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBFile.h b/include/llvm/DebugInfo/PDB/Native/PDBFile.h
index 3bed67141c56..4d3c569c3cdf 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBFile.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBFile.h
@@ -108,6 +108,8 @@ public:
bool hasPDBTpiStream() const;
bool hasPDBStringTable();
+ uint32_t getPointerSize();
+
private:
Expected<std::unique_ptr<msf::MappedBlockStream>>
safelyCreateIndexedStream(const msf::MSFLayout &Layout,
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h b/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
index 28a14d7356d2..86ef1136b41d 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBStringTable.h
@@ -45,7 +45,7 @@ public:
FixedStreamArray<support::ulittle32_t> name_ids() const;
- codeview::DebugStringTableSubsectionRef getStringTable() const;
+ const codeview::DebugStringTableSubsectionRef &getStringTable() const;
private:
Error readHeader(BinaryStreamReader &Reader);
diff --git a/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h b/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h
index 0faa02dc4525..b57707ee7923 100644
--- a/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/PDBStringTableBuilder.h
@@ -41,10 +41,7 @@ public:
uint32_t calculateSerializedSize() const;
Error commit(BinaryStreamWriter &Writer) const;
- codeview::DebugStringTableSubsection &getStrings() { return Strings; }
- const codeview::DebugStringTableSubsection &getStrings() const {
- return Strings;
- }
+ void setStrings(const codeview::DebugStringTableSubsection &Strings);
private:
uint32_t calculateHashTableSize() const;
diff --git a/include/llvm/DebugInfo/PDB/Native/PublicsStream.h b/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
index 4a541edd6a7b..4570c80c76d7 100644
--- a/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/PublicsStream.h
@@ -35,6 +35,7 @@ public:
uint32_t getSymHash() const;
uint32_t getAddrMap() const;
uint32_t getNumBuckets() const { return NumBuckets; }
+ Expected<const codeview::CVSymbolArray &> getSymbolArray() const;
iterator_range<codeview::CVSymbolArray::Iterator>
getSymbols(bool *HadError) const;
FixedStreamArray<support::ulittle32_t> getHashBuckets() const {
diff --git a/include/llvm/DebugInfo/PDB/Native/RawConstants.h b/include/llvm/DebugInfo/PDB/Native/RawConstants.h
index e1bd86b2870b..bb1d097b5123 100644
--- a/include/llvm/DebugInfo/PDB/Native/RawConstants.h
+++ b/include/llvm/DebugInfo/PDB/Native/RawConstants.h
@@ -98,15 +98,19 @@ enum class DbgHeaderType : uint16_t {
};
enum class OMFSegDescFlags : uint16_t {
+ None = 0,
Read = 1 << 0, // Segment is readable.
Write = 1 << 1, // Segment is writable.
Execute = 1 << 2, // Segment is executable.
AddressIs32Bit = 1 << 3, // Descriptor describes a 32-bit linear address.
IsSelector = 1 << 8, // Frame represents a selector.
IsAbsoluteAddress = 1 << 9, // Frame represents an absolute address.
- IsGroup = 1 << 10 // If set, descriptor represents a group.
+ IsGroup = 1 << 10, // If set, descriptor represents a group.
+ LLVM_MARK_AS_BITMASK_ENUM(/* LargestValue = */ IsGroup)
};
+LLVM_ENABLE_BITMASK_ENUMS_IN_NAMESPACE();
+
} // end namespace pdb
} // end namespace llvm
diff --git a/include/llvm/DebugInfo/PDB/Native/SymbolStream.h b/include/llvm/DebugInfo/PDB/Native/SymbolStream.h
index 41d5e6ad64a0..17695f587849 100644
--- a/include/llvm/DebugInfo/PDB/Native/SymbolStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/SymbolStream.h
@@ -27,6 +27,10 @@ public:
~SymbolStream();
Error reload();
+ const codeview::CVSymbolArray &getSymbolArray() const {
+ return SymbolRecords;
+ }
+
iterator_range<codeview::CVSymbolArray::Iterator>
getSymbols(bool *HadError) const;
diff --git a/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h
index 21cfa83e6af4..411720d6f56b 100644
--- a/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/TpiStreamBuilder.h
@@ -58,6 +58,8 @@ public:
Error finalizeMsfLayout();
+ uint32_t getRecordCount() const { return TypeRecords.size(); }
+
Error commit(const msf::MSFLayout &Layout, WritableBinaryStreamRef Buffer);
uint32_t calculateSerializedLength();
diff --git a/include/llvm/IR/Constants.h b/include/llvm/IR/Constants.h
index bb5e1931393b..003a6d5d075d 100644
--- a/include/llvm/IR/Constants.h
+++ b/include/llvm/IR/Constants.h
@@ -68,11 +68,8 @@ protected:
void *operator new(size_t s) { return User::operator new(s, 0); }
public:
- ConstantData() = delete;
ConstantData(const ConstantData &) = delete;
- void *operator new(size_t, unsigned) = delete;
-
/// Methods to support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Value *V) {
return V->getValueID() >= ConstantDataFirstVal &&
@@ -691,8 +688,6 @@ class ConstantDataArray final : public ConstantDataSequential {
public:
ConstantDataArray(const ConstantDataArray &) = delete;
- void *operator new(size_t, unsigned) = delete;
-
/// get() constructors - Return a constant with array type with an element
/// count and element type matching the ArrayRef passed in. Note that this
/// can return a ConstantAggregateZero object.
@@ -752,8 +747,6 @@ class ConstantDataVector final : public ConstantDataSequential {
public:
ConstantDataVector(const ConstantDataVector &) = delete;
- void *operator new(size_t, unsigned) = delete;
-
/// get() constructors - Return a constant with vector type with an element
/// count and element type matching the ArrayRef passed in. Note that this
/// can return a ConstantAggregateZero object.
@@ -830,8 +823,6 @@ class BlockAddress final : public Constant {
Value *handleOperandChangeImpl(Value *From, Value *To);
public:
- void *operator new(size_t, unsigned) = delete;
-
/// Return a BlockAddress for the specified function and basic block.
static BlockAddress *get(Function *F, BasicBlock *BB);
diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h
index 2174e1f301ee..9374fe4fae76 100644
--- a/include/llvm/IR/DebugInfoMetadata.h
+++ b/include/llvm/IR/DebugInfoMetadata.h
@@ -2117,9 +2117,6 @@ public:
/// variable, or the location of a single piece of a variable, or (when using
/// DW_OP_stack_value) is the constant variable value.
///
-/// FIXME: Instead of DW_OP_plus taking an argument, this should use DW_OP_const
-/// and have DW_OP_plus consume the topmost elements on the stack.
-///
/// TODO: Co-allocate the expression elements.
/// TODO: Separate from MDNode, or otherwise drop Distinct and Temporary
/// storage types.
diff --git a/include/llvm/IR/GlobalVariable.h b/include/llvm/IR/GlobalVariable.h
index 454492769c8b..8255a4f298c0 100644
--- a/include/llvm/IR/GlobalVariable.h
+++ b/include/llvm/IR/GlobalVariable.h
@@ -78,8 +78,6 @@ public:
return User::operator new(s, 1);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
diff --git a/include/llvm/IR/IRBuilder.h b/include/llvm/IR/IRBuilder.h
index 5ddaf2b1733b..ec33f82f7022 100644
--- a/include/llvm/IR/IRBuilder.h
+++ b/include/llvm/IR/IRBuilder.h
@@ -435,27 +435,25 @@ public:
MDNode *ScopeTag = nullptr,
MDNode *NoAliasTag = nullptr);
- /// \brief Create and insert an atomic memcpy between the specified
- /// pointers.
+ /// \brief Create and insert an element unordered-atomic memcpy between the
+ /// specified pointers.
///
/// If the pointers aren't i8*, they will be converted. If a TBAA tag is
/// specified, it will be added to the instruction. Likewise with alias.scope
/// and noalias tags.
- CallInst *CreateElementAtomicMemCpy(
- Value *Dst, Value *Src, uint64_t NumElements, uint32_t ElementSize,
+ CallInst *CreateElementUnorderedAtomicMemCpy(
+ Value *Dst, Value *Src, uint64_t Size, uint32_t ElementSize,
MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr) {
- return CreateElementAtomicMemCpy(Dst, Src, getInt64(NumElements),
- ElementSize, TBAATag, TBAAStructTag,
- ScopeTag, NoAliasTag);
+ return CreateElementUnorderedAtomicMemCpy(
+ Dst, Src, getInt64(Size), ElementSize, TBAATag, TBAAStructTag, ScopeTag,
+ NoAliasTag);
}
- CallInst *CreateElementAtomicMemCpy(Value *Dst, Value *Src,
- Value *NumElements, uint32_t ElementSize,
- MDNode *TBAATag = nullptr,
- MDNode *TBAAStructTag = nullptr,
- MDNode *ScopeTag = nullptr,
- MDNode *NoAliasTag = nullptr);
+ CallInst *CreateElementUnorderedAtomicMemCpy(
+ Value *Dst, Value *Src, Value *Size, uint32_t ElementSize,
+ MDNode *TBAATag = nullptr, MDNode *TBAAStructTag = nullptr,
+ MDNode *ScopeTag = nullptr, MDNode *NoAliasTag = nullptr);
/// \brief Create and insert a memmove between the specified
/// pointers.
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index ff63da50afee..b3c6644c7e81 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -294,8 +294,6 @@ public:
return User::operator new(s, 1);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -343,8 +341,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Transparently provide more efficient getOperand methods.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
@@ -907,15 +903,11 @@ protected:
BasicBlock *InsertAtEnd);
public:
- CmpInst() = delete;
-
// allocate space for exactly two operands
void *operator new(size_t s) {
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Construct a compare instruction, given the opcode, the predicate and
/// the two operands. Optionally (if InstBefore is specified) insert the
/// instruction into a BasicBlock right before the specified instruction.
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index 6029b0a7c571..b3032f54aa42 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -337,8 +337,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Return true if this is a store to a volatile memory location.
bool isVolatile() const { return getSubclassDataFromInstruction() & 1; }
@@ -460,8 +458,6 @@ public:
return User::operator new(s, 0);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Returns the ordering effect of this fence.
AtomicOrdering getOrdering() const {
return AtomicOrdering(getSubclassDataFromInstruction() >> 1);
@@ -538,8 +534,6 @@ public:
return User::operator new(s, 3);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Return true if this is a cmpxchg from a volatile memory
/// location.
///
@@ -728,8 +722,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
BinOp getOperation() const {
return static_cast<BinOp>(getSubclassDataFromInstruction() >> 5);
}
@@ -2234,8 +2226,6 @@ public:
return User::operator new(s, 3);
}
- void *operator new(size_t, unsigned) = delete;
-
/// Return true if a shufflevector instruction can be
/// formed with the specified operands.
static bool isValidOperands(const Value *V1, const Value *V2,
@@ -2467,8 +2457,6 @@ public:
return User::operator new(s, 2);
}
- void *operator new(size_t, unsigned) = delete;
-
static InsertValueInst *Create(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const Twine &NameStr = "",
@@ -2596,11 +2584,6 @@ class PHINode : public Instruction {
allocHungoffUses(ReservedSpace);
}
- // allocate space for exactly zero operands
- void *operator new(size_t s) {
- return User::operator new(s);
- }
-
protected:
// Note: Instruction needs to be a friend here to call cloneImpl.
friend class Instruction;
@@ -2615,8 +2598,6 @@ protected:
}
public:
- void *operator new(size_t, unsigned) = delete;
-
/// Constructors - NumReservedValues is a hint for the number of incoming
/// edges that this phi node will have (use 0 if you really have no idea).
static PHINode *Create(Type *Ty, unsigned NumReservedValues,
@@ -2834,8 +2815,6 @@ protected:
LandingPadInst *cloneImpl() const;
public:
- void *operator new(size_t, unsigned) = delete;
-
/// Constructors - NumReservedClauses is a hint for the number of incoming
/// clauses that this landingpad will have (use 0 if you really have no idea).
static LandingPadInst *Create(Type *RetTy, unsigned NumReservedClauses,
@@ -3134,8 +3113,6 @@ protected:
SwitchInst *cloneImpl() const;
public:
- void *operator new(size_t, unsigned) = delete;
-
// -2
static const unsigned DefaultPseudoIndex = static_cast<unsigned>(~0L-1);
@@ -3489,8 +3466,6 @@ protected:
IndirectBrInst *cloneImpl() const;
public:
- void *operator new(size_t, unsigned) = delete;
-
static IndirectBrInst *Create(Value *Address, unsigned NumDests,
Instruction *InsertBefore = nullptr) {
return new IndirectBrInst(Address, NumDests, InsertBefore);
@@ -4173,8 +4148,6 @@ protected:
CatchSwitchInst *cloneImpl() const;
public:
- void *operator new(size_t, unsigned) = delete;
-
static CatchSwitchInst *Create(Value *ParentPad, BasicBlock *UnwindDest,
unsigned NumHandlers,
const Twine &NameStr = "",
@@ -4609,8 +4582,6 @@ public:
return User::operator new(s, 0);
}
- void *operator new(size_t, unsigned) = delete;
-
unsigned getNumSuccessors() const { return 0; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index 2ae98d9e35b0..e0dd3ca7d01e 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -205,25 +205,91 @@ namespace llvm {
};
/// This class represents atomic memcpy intrinsic
- /// TODO: Integrate this class into MemIntrinsic hierarchy.
- class ElementAtomicMemCpyInst : public IntrinsicInst {
+ /// TODO: Integrate this class into MemIntrinsic hierarchy; for now this is
+ /// C&P of all methods from that hierarchy
+ class ElementUnorderedAtomicMemCpyInst : public IntrinsicInst {
+ private:
+ enum { ARG_DEST = 0, ARG_SOURCE = 1, ARG_LENGTH = 2, ARG_ELEMENTSIZE = 3 };
+
public:
- Value *getRawDest() const { return getArgOperand(0); }
- Value *getRawSource() const { return getArgOperand(1); }
+ Value *getRawDest() const {
+ return const_cast<Value *>(getArgOperand(ARG_DEST));
+ }
+ const Use &getRawDestUse() const { return getArgOperandUse(ARG_DEST); }
+ Use &getRawDestUse() { return getArgOperandUse(ARG_DEST); }
+
+ /// Return the arguments to the instruction.
+ Value *getRawSource() const {
+ return const_cast<Value *>(getArgOperand(ARG_SOURCE));
+ }
+ const Use &getRawSourceUse() const { return getArgOperandUse(ARG_SOURCE); }
+ Use &getRawSourceUse() { return getArgOperandUse(ARG_SOURCE); }
+
+ Value *getLength() const {
+ return const_cast<Value *>(getArgOperand(ARG_LENGTH));
+ }
+ const Use &getLengthUse() const { return getArgOperandUse(ARG_LENGTH); }
+ Use &getLengthUse() { return getArgOperandUse(ARG_LENGTH); }
+
+ bool isVolatile() const { return false; }
+
+ Value *getRawElementSizeInBytes() const {
+ return const_cast<Value *>(getArgOperand(ARG_ELEMENTSIZE));
+ }
+
+ ConstantInt *getElementSizeInBytesCst() const {
+ return cast<ConstantInt>(getRawElementSizeInBytes());
+ }
+
+ uint32_t getElementSizeInBytes() const {
+ return getElementSizeInBytesCst()->getZExtValue();
+ }
+
+ /// This is just like getRawDest, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getDest() const { return getRawDest()->stripPointerCasts(); }
+
+ /// This is just like getRawSource, but it strips off any cast
+ /// instructions that feed it, giving the original input. The returned
+ /// value is guaranteed to be a pointer.
+ Value *getSource() const { return getRawSource()->stripPointerCasts(); }
+
+ unsigned getDestAddressSpace() const {
+ return cast<PointerType>(getRawDest()->getType())->getAddressSpace();
+ }
- Value *getNumElements() const { return getArgOperand(2); }
- void setNumElements(Value *V) { setArgOperand(2, V); }
+ unsigned getSourceAddressSpace() const {
+ return cast<PointerType>(getRawSource()->getType())->getAddressSpace();
+ }
- uint64_t getSrcAlignment() const { return getParamAlignment(0); }
- uint64_t getDstAlignment() const { return getParamAlignment(1); }
+ /// Set the specified arguments of the instruction.
+ void setDest(Value *Ptr) {
+ assert(getRawDest()->getType() == Ptr->getType() &&
+ "setDest called with pointer of wrong type!");
+ setArgOperand(ARG_DEST, Ptr);
+ }
+
+ void setSource(Value *Ptr) {
+ assert(getRawSource()->getType() == Ptr->getType() &&
+ "setSource called with pointer of wrong type!");
+ setArgOperand(ARG_SOURCE, Ptr);
+ }
+
+ void setLength(Value *L) {
+ assert(getLength()->getType() == L->getType() &&
+ "setLength called with value of wrong type!");
+ setArgOperand(ARG_LENGTH, L);
+ }
- uint64_t getElementSizeInBytes() const {
- Value *Arg = getArgOperand(3);
- return cast<ConstantInt>(Arg)->getZExtValue();
+ void setElementSizeInBytes(Constant *V) {
+ assert(V->getType() == Type::getInt8Ty(getContext()) &&
+ "setElementSizeInBytes called with value of wrong type!");
+ setArgOperand(ARG_ELEMENTSIZE, V);
}
static inline bool classof(const IntrinsicInst *I) {
- return I->getIntrinsicID() == Intrinsic::memcpy_element_atomic;
+ return I->getIntrinsicID() == Intrinsic::memcpy_element_unordered_atomic;
}
static inline bool classof(const Value *V) {
return isa<IntrinsicInst>(V) && classof(cast<IntrinsicInst>(V));
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 291d16fb0d9b..45936a6e9b66 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -862,11 +862,16 @@ def int_xray_customevent : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
//===------ Memory intrinsics with element-wise atomicity guarantees ------===//
//
-def int_memcpy_element_atomic : Intrinsic<[],
- [llvm_anyptr_ty, llvm_anyptr_ty,
- llvm_i64_ty, llvm_i32_ty],
- [IntrArgMemOnly, NoCapture<0>, NoCapture<1>,
- WriteOnly<0>, ReadOnly<1>]>;
+// @llvm.memcpy.element.unordered.atomic.*(dest, src, length, elementsize)
+def int_memcpy_element_unordered_atomic
+ : Intrinsic<[],
+ [
+ llvm_anyptr_ty, llvm_anyptr_ty, llvm_anyint_ty, llvm_i32_ty
+ ],
+ [
+ IntrArgMemOnly, NoCapture<0>, NoCapture<1>, WriteOnly<0>,
+ ReadOnly<1>
+ ]>;
//===------------------------ Reduction Intrinsics ------------------------===//
//
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index 144e45f18d2c..b43d58865862 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -32,6 +32,7 @@
#include <cstdint>
#include <map>
#include <memory>
+#include <set>
#include <string>
#include <utility>
#include <vector>
@@ -542,6 +543,9 @@ private:
/// considered live.
bool WithGlobalValueDeadStripping = false;
+ std::set<std::string> CfiFunctionDefs;
+ std::set<std::string> CfiFunctionDecls;
+
// YAML I/O support.
friend yaml::MappingTraits<ModuleSummaryIndex>;
@@ -567,6 +571,7 @@ public:
bool isGlobalValueLive(const GlobalValueSummary *GVS) const {
return !WithGlobalValueDeadStripping || GVS->isLive();
}
+ bool isGUIDLive(GlobalValue::GUID GUID) const;
/// Return a ValueInfo for GUID if it exists, otherwise return ValueInfo().
ValueInfo getValueInfo(GlobalValue::GUID GUID) const {
@@ -592,6 +597,12 @@ public:
return I == OidGuidMap.end() ? 0 : I->second;
}
+ std::set<std::string> &cfiFunctionDefs() { return CfiFunctionDefs; }
+ const std::set<std::string> &cfiFunctionDefs() const { return CfiFunctionDefs; }
+
+ std::set<std::string> &cfiFunctionDecls() { return CfiFunctionDecls; }
+ const std::set<std::string> &cfiFunctionDecls() const { return CfiFunctionDecls; }
+
/// Add a global value summary for a value of the given name.
void addGlobalValueSummary(StringRef ValueName,
std::unique_ptr<GlobalValueSummary> Summary) {
@@ -691,14 +702,13 @@ public:
return Pair.first;
}
- /// Add a new module path with the given \p Hash, mapped to the given \p
- /// ModID, and return an iterator to the entry in the index.
- ModulePathStringTableTy::iterator
- addModulePath(StringRef ModPath, uint64_t ModId,
- ModuleHash Hash = ModuleHash{{0}}) {
- return ModulePathStringTable.insert(std::make_pair(
- ModPath,
- std::make_pair(ModId, Hash))).first;
+ typedef ModulePathStringTableTy::value_type ModuleInfo;
+
+ /// Add a new module with the given \p Hash, mapped to the given \p
+ /// ModID, and return a reference to the module.
+ ModuleInfo *addModule(StringRef ModPath, uint64_t ModId,
+ ModuleHash Hash = ModuleHash{{0}}) {
+ return &*ModulePathStringTable.insert({ModPath, {ModId, Hash}}).first;
}
/// Check if the given Module has any functions available for exporting
diff --git a/include/llvm/IR/ModuleSummaryIndexYAML.h b/include/llvm/IR/ModuleSummaryIndexYAML.h
index 891d84c2dbca..8950c527cc18 100644
--- a/include/llvm/IR/ModuleSummaryIndexYAML.h
+++ b/include/llvm/IR/ModuleSummaryIndexYAML.h
@@ -188,6 +188,7 @@ template <> struct MappingTraits<FunctionSummaryYaml> {
LLVM_YAML_IS_STRING_MAP(TypeIdSummary)
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummaryYaml)
+LLVM_YAML_IS_SEQUENCE_VECTOR(std::string)
namespace llvm {
namespace yaml {
@@ -240,6 +241,23 @@ template <> struct MappingTraits<ModuleSummaryIndex> {
io.mapOptional("TypeIdMap", index.TypeIdMap);
io.mapOptional("WithGlobalValueDeadStripping",
index.WithGlobalValueDeadStripping);
+
+ if (io.outputting()) {
+ std::vector<std::string> CfiFunctionDefs(index.CfiFunctionDefs.begin(),
+ index.CfiFunctionDefs.end());
+ io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
+ std::vector<std::string> CfiFunctionDecls(index.CfiFunctionDecls.begin(),
+ index.CfiFunctionDecls.end());
+ io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
+ } else {
+ std::vector<std::string> CfiFunctionDefs;
+ io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
+ index.CfiFunctionDefs = {CfiFunctionDefs.begin(), CfiFunctionDefs.end()};
+ std::vector<std::string> CfiFunctionDecls;
+ io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
+ index.CfiFunctionDecls = {CfiFunctionDecls.begin(),
+ CfiFunctionDecls.end()};
+ }
}
};
diff --git a/include/llvm/IR/Operator.h b/include/llvm/IR/Operator.h
index 49fa6a6a877a..6eb5998478c6 100644
--- a/include/llvm/IR/Operator.h
+++ b/include/llvm/IR/Operator.h
@@ -35,7 +35,6 @@ public:
Operator() = delete;
~Operator() = delete;
- void *operator new(size_t, unsigned) = delete;
void *operator new(size_t s) = delete;
/// Return the opcode for this Instruction or ConstantExpr.
diff --git a/include/llvm/IR/PatternMatch.h b/include/llvm/IR/PatternMatch.h
index 542570aaaa24..015a17e8e7ca 100644
--- a/include/llvm/IR/PatternMatch.h
+++ b/include/llvm/IR/PatternMatch.h
@@ -1027,7 +1027,7 @@ struct MaxMin_match {
(TrueVal != RHS || FalseVal != LHS))
return false;
typename CmpInst_t::Predicate Pred =
- LHS == TrueVal ? Cmp->getPredicate() : Cmp->getSwappedPredicate();
+ LHS == TrueVal ? Cmp->getPredicate() : Cmp->getInversePredicate();
// Does "(x pred y) ? x : y" represent the desired max/min operation?
if (!Pred_t::match(Pred))
return false;
@@ -1138,7 +1138,7 @@ inline MaxMin_match<FCmpInst, LHS, RHS, ofmax_pred_ty> m_OrdFMax(const LHS &L,
/// semantics. In the presence of 'NaN' we have to preserve the original
/// select(fcmp(olt/le, L, R), L, R) semantics matched by this predicate.
///
-/// max(L, R) iff L and R are not NaN
+/// min(L, R) iff L and R are not NaN
/// m_OrdFMin(L, R) = R iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
@@ -1154,13 +1154,28 @@ inline MaxMin_match<FCmpInst, LHS, RHS, ofmin_pred_ty> m_OrdFMin(const LHS &L,
/// select(fcmp(ugt/ge, L, R), L, R) semantics matched by this predicate.
///
/// max(L, R) iff L and R are not NaN
-/// m_UnordFMin(L, R) = L iff L or R are NaN
+/// m_UnordFMax(L, R) = L iff L or R are NaN
template <typename LHS, typename RHS>
inline MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>
m_UnordFMax(const LHS &L, const RHS &R) {
return MaxMin_match<FCmpInst, LHS, RHS, ufmax_pred_ty>(L, R);
}
+/// \brief Match an 'unordered' floating point minimum function.
+/// Floating point has one special value 'NaN'. Therefore, there is no total
+/// order. However, if we can ignore the 'NaN' value (for example, because of a
+/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
+/// semantics. In the presence of 'NaN' we have to preserve the original
+/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
+///
+/// min(L, R) iff L and R are not NaN
+/// m_UnordFMin(L, R) = L iff L or R are NaN
+template <typename LHS, typename RHS>
+inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
+m_UnordFMin(const LHS &L, const RHS &R) {
+ return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
+}
+
//===----------------------------------------------------------------------===//
// Matchers for overflow check patterns: e.g. (a + b) u< a
//
@@ -1207,21 +1222,6 @@ m_UAddWithOverflow(const LHS_t &L, const RHS_t &R, const Sum_t &S) {
return UAddWithOverflow_match<LHS_t, RHS_t, Sum_t>(L, R, S);
}
-/// \brief Match an 'unordered' floating point minimum function.
-/// Floating point has one special value 'NaN'. Therefore, there is no total
-/// order. However, if we can ignore the 'NaN' value (for example, because of a
-/// 'no-nans-float-math' flag) a combination of a fcmp and select has 'minimum'
-/// semantics. In the presence of 'NaN' we have to preserve the original
-/// select(fcmp(ult/le, L, R), L, R) semantics matched by this predicate.
-///
-/// max(L, R) iff L and R are not NaN
-/// m_UnordFMin(L, R) = L iff L or R are NaN
-template <typename LHS, typename RHS>
-inline MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>
-m_UnordFMin(const LHS &L, const RHS &R) {
- return MaxMin_match<FCmpInst, LHS, RHS, ufmin_pred_ty>(L, R);
-}
-
template <typename Opnd_t> struct Argument_match {
unsigned OpI;
Opnd_t Val;
diff --git a/include/llvm/LTO/LTO.h b/include/llvm/LTO/LTO.h
index 774e144b3ef0..d678a68ed860 100644
--- a/include/llvm/LTO/LTO.h
+++ b/include/llvm/LTO/LTO.h
@@ -281,6 +281,16 @@ private:
bool HasModule = false;
std::unique_ptr<Module> CombinedModule;
std::unique_ptr<IRMover> Mover;
+
+ // This stores the information about a regular LTO module that we have added
+ // to the link. It will either be linked immediately (for modules without
+ // summaries) or after summary-based dead stripping (for modules with
+ // summaries).
+ struct AddedModule {
+ std::unique_ptr<Module> M;
+ std::vector<GlobalValue *> Keep;
+ };
+ std::vector<AddedModule> ModsWithSummaries;
} RegularLTO;
struct ThinLTOState {
@@ -303,9 +313,10 @@ private:
/// The unmangled name of the global.
std::string IRName;
- /// Keep track if the symbol is visible outside of ThinLTO (i.e. in
- /// either a regular object or the regular LTO partition).
- bool VisibleOutsideThinLTO = false;
+ /// Keep track if the symbol is visible outside of a module with a summary
+ /// (i.e. in either a regular object or a regular LTO module without a
+ /// summary).
+ bool VisibleOutsideSummary = false;
bool UnnamedAddr = true;
@@ -339,8 +350,9 @@ private:
// Global mapping from mangled symbol names to resolutions.
StringMap<GlobalResolution> GlobalResolutions;
- void addSymbolToGlobalRes(const InputFile::Symbol &Sym, SymbolResolution Res,
- unsigned Partition);
+ void addModuleToGlobalRes(ArrayRef<InputFile::Symbol> Syms,
+ ArrayRef<SymbolResolution> Res, unsigned Partition,
+ bool InSummary);
// These functions take a range of symbol resolutions [ResI, ResE) and consume
// the resolutions used by a single input module by incrementing ResI. After
@@ -348,10 +360,13 @@ private:
// the remaining modules in the InputFile.
Error addModule(InputFile &Input, unsigned ModI,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
- Error addRegularLTO(BitcodeModule BM,
- ArrayRef<InputFile::Symbol> Syms,
- const SymbolResolution *&ResI,
- const SymbolResolution *ResE);
+
+ Expected<RegularLTOState::AddedModule>
+ addRegularLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
+ const SymbolResolution *&ResI, const SymbolResolution *ResE);
+ Error linkRegularLTO(RegularLTOState::AddedModule Mod,
+ bool LivenessFromIndex);
+
Error addThinLTO(BitcodeModule BM, ArrayRef<InputFile::Symbol> Syms,
const SymbolResolution *&ResI, const SymbolResolution *ResE);
diff --git a/include/llvm/LTO/legacy/LTOModule.h b/include/llvm/LTO/legacy/LTOModule.h
index 2a8758587a11..017e223ed8a6 100644
--- a/include/llvm/LTO/legacy/LTOModule.h
+++ b/include/llvm/LTO/legacy/LTOModule.h
@@ -158,7 +158,7 @@ public:
private:
/// Parse metadata from the module
- // FIXME: it only parses "Linker Options" metadata at the moment
+ // FIXME: it only parses "llvm.linker.options" metadata at the moment
void parseMetadata();
/// Parse the symbols from the module and model-level ASM and add them to
diff --git a/include/llvm/MC/MCSymbolWasm.h b/include/llvm/MC/MCSymbolWasm.h
index 7d661ccc5de7..1b87095552d6 100644
--- a/include/llvm/MC/MCSymbolWasm.h
+++ b/include/llvm/MC/MCSymbolWasm.h
@@ -13,6 +13,7 @@
#include "llvm/MC/MCSymbol.h"
namespace llvm {
+
class MCSymbolWasm : public MCSymbol {
private:
bool IsFunction = false;
@@ -52,6 +53,7 @@ public:
Params = std::move(Pars);
}
};
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_MC_MCSYMBOLWASM_H
diff --git a/include/llvm/MC/MCWasmObjectWriter.h b/include/llvm/MC/MCWasmObjectWriter.h
index c250d3bf03fb..bebc0a825810 100644
--- a/include/llvm/MC/MCWasmObjectWriter.h
+++ b/include/llvm/MC/MCWasmObjectWriter.h
@@ -12,20 +12,12 @@
#include "llvm/ADT/Triple.h"
#include "llvm/BinaryFormat/Wasm.h"
-#include "llvm/MC/MCValue.h"
#include "llvm/Support/DataTypes.h"
-#include "llvm/Support/raw_ostream.h"
-#include <vector>
namespace llvm {
-class MCAssembler;
-class MCContext;
+
class MCFixup;
-class MCFragment;
class MCObjectWriter;
-class MCSectionWasm;
-class MCSymbol;
-class MCSymbolWasm;
class MCValue;
class raw_pwrite_stream;
@@ -38,8 +30,8 @@ protected:
public:
virtual ~MCWasmObjectTargetWriter();
- virtual unsigned getRelocType(MCContext &Ctx, const MCValue &Target,
- const MCFixup &Fixup, bool IsPCRel) const = 0;
+ virtual unsigned getRelocType(const MCValue &Target,
+ const MCFixup &Fixup) const = 0;
/// \name Accessors
/// @{
@@ -54,6 +46,7 @@ public:
/// \returns The constructed object writer.
MCObjectWriter *createWasmObjectWriter(MCWasmObjectTargetWriter *MOTW,
raw_pwrite_stream &OS);
+
} // End llvm namespace
#endif
diff --git a/include/llvm/Object/ArchiveWriter.h b/include/llvm/Object/ArchiveWriter.h
index 3e84a5814d79..1ed758d40df2 100644
--- a/include/llvm/Object/ArchiveWriter.h
+++ b/include/llvm/Object/ArchiveWriter.h
@@ -22,6 +22,7 @@ namespace llvm {
struct NewArchiveMember {
std::unique_ptr<MemoryBuffer> Buf;
+ StringRef MemberName;
sys::TimePoint<std::chrono::seconds> ModTime;
unsigned UID = 0, GID = 0, Perms = 0644;
diff --git a/include/llvm/Object/WindowsResource.h b/include/llvm/Object/WindowsResource.h
index c5189329d3ec..4839013c8228 100644
--- a/include/llvm/Object/WindowsResource.h
+++ b/include/llvm/Object/WindowsResource.h
@@ -118,7 +118,7 @@ public:
class TreeNode;
WindowsResourceParser();
Error parse(WindowsResource *WR);
- void printTree() const;
+ void printTree(raw_ostream &OS) const;
const TreeNode &getTree() const { return Root; }
const ArrayRef<std::vector<uint8_t>> getData() const { return Data; }
const ArrayRef<std::vector<UTF16>> getStringTable() const {
@@ -159,14 +159,16 @@ public:
TreeNode(uint16_t MajorVersion, uint16_t MinorVersion,
uint32_t Characteristics);
- void addEntry(const ResourceEntryRef &Entry);
- TreeNode &addTypeNode(const ResourceEntryRef &Entry);
- TreeNode &addNameNode(const ResourceEntryRef &Entry);
+ void addEntry(const ResourceEntryRef &Entry, bool &IsNewTypeString,
+ bool &IsNewNameString);
+ TreeNode &addTypeNode(const ResourceEntryRef &Entry, bool &IsNewTypeString);
+ TreeNode &addNameNode(const ResourceEntryRef &Entry, bool &IsNewNameString);
TreeNode &addLanguageNode(const ResourceEntryRef &Entry);
TreeNode &addChild(uint32_t ID, bool IsDataNode = false,
uint16_t MajorVersion = 0, uint16_t MinorVersion = 0,
uint32_t Characteristics = 0);
- TreeNode &addChild(ArrayRef<UTF16> NameRef);
+ TreeNode &addChild(ArrayRef<UTF16> NameRef, bool &IsNewString);
+
bool IsDataNode = false;
uint32_t StringIndex;
uint32_t DataIndex;
diff --git a/include/llvm/ObjectYAML/COFFYAML.h b/include/llvm/ObjectYAML/COFFYAML.h
index 1b5f7b00239a..719cb1acf6ef 100644
--- a/include/llvm/ObjectYAML/COFFYAML.h
+++ b/include/llvm/ObjectYAML/COFFYAML.h
@@ -16,6 +16,8 @@
#include "llvm/ADT/Optional.h"
#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/ObjectYAML/CodeViewYAMLDebugSections.h"
+#include "llvm/ObjectYAML/CodeViewYAMLTypes.h"
#include "llvm/ObjectYAML/YAML.h"
namespace llvm {
@@ -56,6 +58,8 @@ namespace COFFYAML {
COFF::section Header;
unsigned Alignment = 0;
yaml::BinaryRef SectionData;
+ std::vector<CodeViewYAML::YAMLDebugSubsection> DebugS;
+ std::vector<CodeViewYAML::LeafRecord> DebugT;
std::vector<Relocation> Relocations;
StringRef Name;
Section();
diff --git a/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h b/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h
index faa3ed8a6c52..8180e0fc83f4 100644
--- a/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h
+++ b/include/llvm/ObjectYAML/CodeViewYAMLDebugSections.h
@@ -28,6 +28,8 @@ class DebugStringTableSubsectionRef;
class DebugChecksumsSubsectionRef;
class DebugStringTableSubsection;
class DebugChecksumsSubsection;
+class StringsAndChecksums;
+class StringsAndChecksumsRef;
}
namespace CodeViewYAML {
@@ -103,25 +105,24 @@ struct InlineeInfo {
struct YAMLDebugSubsection {
static Expected<YAMLDebugSubsection>
- fromCodeViewSubection(const codeview::DebugStringTableSubsectionRef &Strings,
- const codeview::DebugChecksumsSubsectionRef &Checksums,
+ fromCodeViewSubection(const codeview::StringsAndChecksumsRef &SC,
const codeview::DebugSubsectionRecord &SS);
std::shared_ptr<detail::YAMLSubsectionBase> Subsection;
};
-Expected<std::vector<std::unique_ptr<codeview::DebugSubsection>>>
+struct DebugSubsectionState {};
+
+Expected<std::vector<std::shared_ptr<codeview::DebugSubsection>>>
toCodeViewSubsectionList(BumpPtrAllocator &Allocator,
ArrayRef<YAMLDebugSubsection> Subsections,
- codeview::DebugStringTableSubsection &Strings);
-Expected<std::vector<std::unique_ptr<codeview::DebugSubsection>>>
-toCodeViewSubsectionList(
- BumpPtrAllocator &Allocator, ArrayRef<YAMLDebugSubsection> Subsections,
- std::unique_ptr<codeview::DebugStringTableSubsection> &TakeStrings,
- codeview::DebugStringTableSubsection *StringsRef);
-
-std::unique_ptr<codeview::DebugStringTableSubsection>
-findStringTable(ArrayRef<YAMLDebugSubsection> Sections);
+ const codeview::StringsAndChecksums &SC);
+
+std::vector<YAMLDebugSubsection>
+fromDebugS(ArrayRef<uint8_t> Data, const codeview::StringsAndChecksumsRef &SC);
+
+void initializeStringsAndChecksums(ArrayRef<YAMLDebugSubsection> Sections,
+ codeview::StringsAndChecksums &SC);
} // namespace CodeViewYAML
} // namespace llvm
diff --git a/include/llvm/ObjectYAML/CodeViewYAMLTypes.h b/include/llvm/ObjectYAML/CodeViewYAMLTypes.h
index 91b75aabe7a5..e97d5f92bf7f 100644
--- a/include/llvm/ObjectYAML/CodeViewYAMLTypes.h
+++ b/include/llvm/ObjectYAML/CodeViewYAMLTypes.h
@@ -41,6 +41,9 @@ struct LeafRecord {
codeview::CVType toCodeViewRecord(codeview::TypeTableBuilder &TS) const;
static Expected<LeafRecord> fromCodeViewRecord(codeview::CVType Type);
};
+
+std::vector<LeafRecord> fromDebugT(ArrayRef<uint8_t> DebugT);
+ArrayRef<uint8_t> toDebugT(ArrayRef<LeafRecord>, BumpPtrAllocator &Alloc);
} // namespace CodeViewYAML
} // namespace llvm
diff --git a/include/llvm/Option/Arg.h b/include/llvm/Option/Arg.h
index 99d329693de2..c519a4a824c5 100644
--- a/include/llvm/Option/Arg.h
+++ b/include/llvm/Option/Arg.h
@@ -1,4 +1,4 @@
-//===--- Arg.h - Parsed Argument Classes ------------------------*- C++ -*-===//
+//===- Arg.h - Parsed Argument Classes --------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -21,7 +21,11 @@
#include <string>
namespace llvm {
+
+class raw_ostream;
+
namespace opt {
+
class ArgList;
/// \brief A concrete instance of a particular driver option.
@@ -29,9 +33,6 @@ class ArgList;
/// The Arg class encodes just enough information to be able to
/// derive the argument values efficiently.
class Arg {
- Arg(const Arg &) = delete;
- void operator=(const Arg &) = delete;
-
private:
/// \brief The option this argument is an instance of.
const Option Opt;
@@ -65,6 +66,8 @@ public:
const char *Value0, const Arg *BaseArg = nullptr);
Arg(const Option Opt, StringRef Spelling, unsigned Index,
const char *Value0, const char *Value1, const Arg *BaseArg = nullptr);
+ Arg(const Arg &) = delete;
+ Arg &operator=(const Arg &) = delete;
~Arg();
const Option &getOption() const { return Opt; }
@@ -89,6 +92,7 @@ public:
void claim() const { getBaseArg().Claimed = true; }
unsigned getNumValues() const { return Values.size(); }
+
const char *getValue(unsigned N = 0) const {
return Values[N];
}
@@ -122,6 +126,7 @@ public:
};
} // end namespace opt
+
} // end namespace llvm
-#endif
+#endif // LLVM_OPTION_ARG_H
diff --git a/include/llvm/Option/ArgList.h b/include/llvm/Option/ArgList.h
index 6a92dd01e911..aaea68bf8e27 100644
--- a/include/llvm/Option/ArgList.h
+++ b/include/llvm/Option/ArgList.h
@@ -1,4 +1,4 @@
-//===--- ArgList.h - Argument List Management -------------------*- C++ -*-===//
+//===- ArgList.h - Argument List Management ---------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,7 +10,9 @@
#ifndef LLVM_OPTION_ARGLIST_H
#define LLVM_OPTION_ARGLIST_H
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/iterator_range.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -18,15 +20,21 @@
#include "llvm/Option/Arg.h"
#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/Option.h"
+#include <algorithm>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
#include <list>
#include <memory>
#include <string>
+#include <utility>
#include <vector>
namespace llvm {
+
+class raw_ostream;
+
namespace opt {
-class ArgList;
-class Option;
/// arg_iterator - Iterates through arguments stored inside an ArgList.
template<typename BaseIter, unsigned NumOptSpecifiers = 0>
@@ -59,14 +67,14 @@ class arg_iterator {
}
}
- typedef std::iterator_traits<BaseIter> Traits;
+ using Traits = std::iterator_traits<BaseIter>;
public:
- typedef typename Traits::value_type value_type;
- typedef typename Traits::reference reference;
- typedef typename Traits::pointer pointer;
- typedef std::forward_iterator_tag iterator_category;
- typedef std::ptrdiff_t difference_type;
+ using value_type = typename Traits::value_type;
+ using reference = typename Traits::reference;
+ using pointer = typename Traits::pointer;
+ using iterator_category = std::forward_iterator_tag;
+ using difference_type = std::ptrdiff_t;
arg_iterator(
BaseIter Current, BaseIter End,
@@ -111,12 +119,12 @@ public:
/// and to iterate over groups of arguments.
class ArgList {
public:
- typedef SmallVector<Arg*, 16> arglist_type;
- typedef arg_iterator<arglist_type::iterator> iterator;
- typedef arg_iterator<arglist_type::const_iterator> const_iterator;
- typedef arg_iterator<arglist_type::reverse_iterator> reverse_iterator;
- typedef arg_iterator<arglist_type::const_reverse_iterator>
- const_reverse_iterator;
+ using arglist_type = SmallVector<Arg *, 16>;
+ using iterator = arg_iterator<arglist_type::iterator>;
+ using const_iterator = arg_iterator<arglist_type::const_iterator>;
+ using reverse_iterator = arg_iterator<arglist_type::reverse_iterator>;
+ using const_reverse_iterator =
+ arg_iterator<arglist_type::const_reverse_iterator>;
template<unsigned N> using filtered_iterator =
arg_iterator<arglist_type::const_iterator, N>;
@@ -127,7 +135,7 @@ private:
/// The internal list of arguments.
arglist_type Args;
- typedef std::pair<unsigned, unsigned> OptRange;
+ using OptRange = std::pair<unsigned, unsigned>;
static OptRange emptyRange() { return {-1u, 0u}; }
/// The first and last index of each different OptSpecifier ID.
@@ -142,6 +150,7 @@ protected:
// derived objects, but can still be used by derived objects to implement
// their own special members.
ArgList() = default;
+
// Explicit move operations to ensure the container is cleared post-move
// otherwise it could lead to a double-delete in the case of moving of an
// InputArgList which deletes the contents of the container. If we could fix
@@ -152,6 +161,7 @@ protected:
RHS.Args.clear();
RHS.OptRanges.clear();
}
+
ArgList &operator=(ArgList &&RHS) {
Args = std::move(RHS.Args);
RHS.Args.clear();
@@ -159,6 +169,7 @@ protected:
RHS.OptRanges.clear();
return *this;
}
+
// Protect the dtor to ensure this type is never destroyed polymorphically.
~ArgList() = default;
@@ -380,10 +391,12 @@ private:
public:
InputArgList(const char* const *ArgBegin, const char* const *ArgEnd);
+
InputArgList(InputArgList &&RHS)
: ArgList(std::move(RHS)), ArgStrings(std::move(RHS.ArgStrings)),
SynthesizedStrings(std::move(RHS.SynthesizedStrings)),
NumInputArgStrings(RHS.NumInputArgStrings) {}
+
InputArgList &operator=(InputArgList &&RHS) {
releaseMemory();
ArgList::operator=(std::move(RHS));
@@ -392,6 +405,7 @@ public:
NumInputArgStrings = RHS.NumInputArgStrings;
return *this;
}
+
~InputArgList() { releaseMemory(); }
const char *getArgString(unsigned Index) const override {
@@ -464,7 +478,6 @@ public:
append(MakePositionalArg(BaseArg, Opt, Value));
}
-
/// AddSeparateArg - Construct a new Positional arg for the given option
/// \p Id, with the provided \p Value and append it to the argument
/// list.
@@ -473,7 +486,6 @@ public:
append(MakeSeparateArg(BaseArg, Opt, Value));
}
-
/// AddJoinedArg - Construct a new Positional arg for the given option
/// \p Id, with the provided \p Value and append it to the argument list.
void AddJoinedArg(const Arg *BaseArg, const Option Opt,
@@ -481,7 +493,6 @@ public:
append(MakeJoinedArg(BaseArg, Opt, Value));
}
-
/// MakeFlagArg - Construct a new FlagArg for the given option \p Id.
Arg *MakeFlagArg(const Arg *BaseArg, const Option Opt) const;
@@ -504,6 +515,7 @@ public:
};
} // end namespace opt
+
} // end namespace llvm
-#endif
+#endif // LLVM_OPTION_ARGLIST_H
diff --git a/include/llvm/Option/OptSpecifier.h b/include/llvm/Option/OptSpecifier.h
index 0b2aaaec3afc..84c3cf8ad534 100644
--- a/include/llvm/Option/OptSpecifier.h
+++ b/include/llvm/Option/OptSpecifier.h
@@ -1,4 +1,4 @@
-//===--- OptSpecifier.h - Option Specifiers ---------------------*- C++ -*-===//
+//===- OptSpecifier.h - Option Specifiers -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -10,32 +10,30 @@
#ifndef LLVM_OPTION_OPTSPECIFIER_H
#define LLVM_OPTION_OPTSPECIFIER_H
-#include "llvm/Support/Compiler.h"
-
namespace llvm {
namespace opt {
- class Option;
- /// OptSpecifier - Wrapper class for abstracting references to option IDs.
- class OptSpecifier {
- unsigned ID;
+class Option;
+
+/// OptSpecifier - Wrapper class for abstracting references to option IDs.
+class OptSpecifier {
+ unsigned ID = 0;
- private:
- explicit OptSpecifier(bool) = delete;
+public:
+ OptSpecifier() = default;
+ explicit OptSpecifier(bool) = delete;
+ /*implicit*/ OptSpecifier(unsigned ID) : ID(ID) {}
+ /*implicit*/ OptSpecifier(const Option *Opt);
- public:
- OptSpecifier() : ID(0) {}
- /*implicit*/ OptSpecifier(unsigned ID) : ID(ID) {}
- /*implicit*/ OptSpecifier(const Option *Opt);
+ bool isValid() const { return ID != 0; }
- bool isValid() const { return ID != 0; }
+ unsigned getID() const { return ID; }
- unsigned getID() const { return ID; }
+ bool operator==(OptSpecifier Opt) const { return ID == Opt.getID(); }
+ bool operator!=(OptSpecifier Opt) const { return !(*this == Opt); }
+};
- bool operator==(OptSpecifier Opt) const { return ID == Opt.getID(); }
- bool operator!=(OptSpecifier Opt) const { return !(*this == Opt); }
- };
-}
-}
+} // end namespace opt
+} // end namespace llvm
-#endif
+#endif // LLVM_OPTION_OPTSPECIFIER_H
diff --git a/include/llvm/Option/OptTable.h b/include/llvm/Option/OptTable.h
index 8a323a255ca1..e0169b927319 100644
--- a/include/llvm/Option/OptTable.h
+++ b/include/llvm/Option/OptTable.h
@@ -1,4 +1,4 @@
-//===--- OptTable.h - Option Table ------------------------------*- C++ -*-===//
+//===- OptTable.h - Option Table --------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -11,12 +11,19 @@
#define LLVM_OPTION_OPTTABLE_H
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Option/OptSpecifier.h"
+#include <cassert>
+#include <string>
+#include <vector>
namespace llvm {
+
class raw_ostream;
+
namespace opt {
+
class Arg;
class ArgList;
class InputArgList;
@@ -53,12 +60,12 @@ private:
ArrayRef<Info> OptionInfos;
bool IgnoreCase;
- unsigned TheInputOptionID;
- unsigned TheUnknownOptionID;
+ unsigned TheInputOptionID = 0;
+ unsigned TheUnknownOptionID = 0;
/// The index of the first option which can be parsed (i.e., is not a
/// special option like 'input' or 'unknown', and is not an option group).
- unsigned FirstSearchableIndex;
+ unsigned FirstSearchableIndex = 0;
/// The union of all option prefixes. If an argument does not begin with
/// one of these, it is an input.
@@ -176,7 +183,9 @@ public:
void PrintHelp(raw_ostream &OS, const char *Name,
const char *Title, bool ShowHidden = false) const;
};
+
} // end namespace opt
+
} // end namespace llvm
-#endif
+#endif // LLVM_OPTION_OPTTABLE_H
diff --git a/include/llvm/Option/Option.h b/include/llvm/Option/Option.h
index 139f281b3c4c..c08834f90598 100644
--- a/include/llvm/Option/Option.h
+++ b/include/llvm/Option/Option.h
@@ -1,4 +1,4 @@
-//===--- Option.h - Abstract Driver Options ---------------------*- C++ -*-===//
+//===- Option.h - Abstract Driver Options -----------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -12,15 +12,23 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/OptSpecifier.h"
#include "llvm/Option/OptTable.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
+#include <string>
namespace llvm {
+
+class raw_ostream;
+
namespace opt {
+
class Arg;
class ArgList;
+
/// ArgStringList - Type used for constructing argv lists for subprocesses.
-typedef SmallVector<const char*, 16> ArgStringList;
+using ArgStringList = SmallVector<const char *, 16>;
/// Base flags for all options. Custom flags may be added after.
enum DriverFlag {
@@ -202,6 +210,7 @@ public:
};
} // end namespace opt
+
} // end namespace llvm
-#endif
+#endif // LLVM_OPTION_OPTION_H
diff --git a/include/llvm/Support/BinaryStreamArray.h b/include/llvm/Support/BinaryStreamArray.h
index 65ec15f6d9e0..3f5562ba7519 100644
--- a/include/llvm/Support/BinaryStreamArray.h
+++ b/include/llvm/Support/BinaryStreamArray.h
@@ -290,6 +290,12 @@ public:
return FixedStreamArrayIterator<T>(*this, size());
}
+ const T &front() const { return *begin(); }
+ const T &back() const {
+ FixedStreamArrayIterator<T> I = end();
+ return *(--I);
+ }
+
BinaryStreamRef getUnderlyingStream() const { return Stream; }
private:
diff --git a/include/llvm/Support/DebugCounter.h b/include/llvm/Support/DebugCounter.h
index 9687cb7b9d95..a533feae7fa3 100644
--- a/include/llvm/Support/DebugCounter.h
+++ b/include/llvm/Support/DebugCounter.h
@@ -121,10 +121,10 @@ public:
Us.Counters[ID] = Val;
}
- // Dump or print the current counter set.
- LLVM_DUMP_METHOD void dump() { print(dbgs()); }
+ // Dump or print the current counter set into llvm::dbgs().
+ LLVM_DUMP_METHOD void dump() const;
- void print(raw_ostream &OS);
+ void print(raw_ostream &OS) const;
// Get the counter ID for a given named counter, or return 0 if none is found.
unsigned getCounterId(const std::string &Name) const {
diff --git a/include/llvm/Support/FormatAdapters.h b/include/llvm/Support/FormatAdapters.h
index 698e134b328d..197beb7363df 100644
--- a/include/llvm/Support/FormatAdapters.h
+++ b/include/llvm/Support/FormatAdapters.h
@@ -28,14 +28,16 @@ namespace detail {
template <typename T> class AlignAdapter final : public FormatAdapter<T> {
AlignStyle Where;
size_t Amount;
+ char Fill;
public:
- AlignAdapter(T &&Item, AlignStyle Where, size_t Amount)
- : FormatAdapter<T>(std::forward<T>(Item)), Where(Where), Amount(Amount) {}
+ AlignAdapter(T &&Item, AlignStyle Where, size_t Amount, char Fill)
+ : FormatAdapter<T>(std::forward<T>(Item)), Where(Where), Amount(Amount),
+ Fill(Fill) {}
void format(llvm::raw_ostream &Stream, StringRef Style) {
auto Adapter = detail::build_format_adapter(std::forward<T>(this->Item));
- FmtAlign(Adapter, Where, Amount).format(Stream, Style);
+ FmtAlign(Adapter, Where, Amount, Fill).format(Stream, Style);
}
};
@@ -72,8 +74,9 @@ public:
}
template <typename T>
-detail::AlignAdapter<T> fmt_align(T &&Item, AlignStyle Where, size_t Amount) {
- return detail::AlignAdapter<T>(std::forward<T>(Item), Where, Amount);
+detail::AlignAdapter<T> fmt_align(T &&Item, AlignStyle Where, size_t Amount,
+ char Fill = ' ') {
+ return detail::AlignAdapter<T>(std::forward<T>(Item), Where, Amount, Fill);
}
template <typename T>
diff --git a/include/llvm/Support/FormatCommon.h b/include/llvm/Support/FormatCommon.h
index a8c5fdeb6bff..36fbad296c3f 100644
--- a/include/llvm/Support/FormatCommon.h
+++ b/include/llvm/Support/FormatCommon.h
@@ -21,9 +21,11 @@ struct FmtAlign {
detail::format_adapter &Adapter;
AlignStyle Where;
size_t Amount;
+ char Fill;
- FmtAlign(detail::format_adapter &Adapter, AlignStyle Where, size_t Amount)
- : Adapter(Adapter), Where(Where), Amount(Amount) {}
+ FmtAlign(detail::format_adapter &Adapter, AlignStyle Where, size_t Amount,
+ char Fill = ' ')
+ : Adapter(Adapter), Where(Where), Amount(Amount), Fill(Fill) {}
void format(raw_ostream &S, StringRef Options) {
// If we don't need to align, we can format straight into the underlying
@@ -48,21 +50,27 @@ struct FmtAlign {
switch (Where) {
case AlignStyle::Left:
S << Item;
- S.indent(PadAmount);
+ fill(S, PadAmount);
break;
case AlignStyle::Center: {
size_t X = PadAmount / 2;
- S.indent(X);
+ fill(S, X);
S << Item;
- S.indent(PadAmount - X);
+ fill(S, PadAmount - X);
break;
}
default:
- S.indent(PadAmount);
+ fill(S, PadAmount);
S << Item;
break;
}
}
+
+private:
+ void fill(llvm::raw_ostream &S, uint32_t Count) {
+ for (uint32_t I = 0; I < Count; ++I)
+ S << Fill;
+ }
};
}
diff --git a/include/llvm/Support/MathExtras.h b/include/llvm/Support/MathExtras.h
index bb840380d4d3..fd29865c8475 100644
--- a/include/llvm/Support/MathExtras.h
+++ b/include/llvm/Support/MathExtras.h
@@ -272,23 +272,22 @@ T reverseBits(T Val) {
// type overloading so that signed and unsigned integers can be used without
// ambiguity.
-/// Hi_32 - This function returns the high 32 bits of a 64 bit value.
+/// Return the high 32 bits of a 64 bit value.
constexpr inline uint32_t Hi_32(uint64_t Value) {
return static_cast<uint32_t>(Value >> 32);
}
-/// Lo_32 - This function returns the low 32 bits of a 64 bit value.
+/// Return the low 32 bits of a 64 bit value.
constexpr inline uint32_t Lo_32(uint64_t Value) {
return static_cast<uint32_t>(Value);
}
-/// Make_64 - This functions makes a 64-bit integer from a high / low pair of
-/// 32-bit integers.
+/// Make a 64-bit integer from a high / low pair of 32-bit integers.
constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) {
return ((uint64_t)High << 32) | (uint64_t)Low;
}
-/// isInt - Checks if an integer fits into the given bit width.
+/// Checks if an integer fits into the given bit width.
template <unsigned N> constexpr inline bool isInt(int64_t x) {
return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1)));
}
@@ -303,8 +302,7 @@ template <> constexpr inline bool isInt<32>(int64_t x) {
return static_cast<int32_t>(x) == x;
}
-/// isShiftedInt<N,S> - Checks if a signed integer is an N bit number shifted
-/// left by S.
+/// Checks if a signed integer is an N bit number shifted left by S.
template <unsigned N, unsigned S>
constexpr inline bool isShiftedInt(int64_t x) {
static_assert(
@@ -313,7 +311,7 @@ constexpr inline bool isShiftedInt(int64_t x) {
return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0);
}
-/// isUInt - Checks if an unsigned integer fits into the given bit width.
+/// Checks if an unsigned integer fits into the given bit width.
///
/// This is written as two functions rather than as simply
///
@@ -383,71 +381,63 @@ inline int64_t maxIntN(int64_t N) {
return (UINT64_C(1) << (N - 1)) - 1;
}
-/// isUIntN - Checks if an unsigned integer fits into the given (dynamic)
-/// bit width.
+/// Checks if an unsigned integer fits into the given (dynamic) bit width.
inline bool isUIntN(unsigned N, uint64_t x) {
return N >= 64 || x <= maxUIntN(N);
}
-/// isIntN - Checks if an signed integer fits into the given (dynamic)
-/// bit width.
+/// Checks if an signed integer fits into the given (dynamic) bit width.
inline bool isIntN(unsigned N, int64_t x) {
return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N));
}
-/// isMask_32 - This function returns true if the argument is a non-empty
-/// sequence of ones starting at the least significant bit with the remainder
-/// zero (32 bit version). Ex. isMask_32(0x0000FFFFU) == true.
+/// Return true if the argument is a non-empty sequence of ones starting at the
+/// least significant bit with the remainder zero (32 bit version).
+/// Ex. isMask_32(0x0000FFFFU) == true.
constexpr inline bool isMask_32(uint32_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
-/// isMask_64 - This function returns true if the argument is a non-empty
-/// sequence of ones starting at the least significant bit with the remainder
-/// zero (64 bit version).
+/// Return true if the argument is a non-empty sequence of ones starting at the
+/// least significant bit with the remainder zero (64 bit version).
constexpr inline bool isMask_64(uint64_t Value) {
return Value && ((Value + 1) & Value) == 0;
}
-/// isShiftedMask_32 - This function returns true if the argument contains a
-/// non-empty sequence of ones with the remainder zero (32 bit version.)
-/// Ex. isShiftedMask_32(0x0000FF00U) == true.
+/// Return true if the argument contains a non-empty sequence of ones with the
+/// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true.
constexpr inline bool isShiftedMask_32(uint32_t Value) {
return Value && isMask_32((Value - 1) | Value);
}
-/// isShiftedMask_64 - This function returns true if the argument contains a
-/// non-empty sequence of ones with the remainder zero (64 bit version.)
+/// Return true if the argument contains a non-empty sequence of ones with the
+/// remainder zero (64 bit version.)
constexpr inline bool isShiftedMask_64(uint64_t Value) {
return Value && isMask_64((Value - 1) | Value);
}
-/// isPowerOf2_32 - This function returns true if the argument is a power of
-/// two > 0. Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
+/// Return true if the argument is a power of two > 0.
+/// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.)
constexpr inline bool isPowerOf2_32(uint32_t Value) {
return Value && !(Value & (Value - 1));
}
-/// isPowerOf2_64 - This function returns true if the argument is a power of two
-/// > 0 (64 bit edition.)
+/// Return true if the argument is a power of two > 0 (64 bit edition.)
constexpr inline bool isPowerOf2_64(uint64_t Value) {
return Value && !(Value & (Value - int64_t(1L)));
}
-/// ByteSwap_16 - This function returns a byte-swapped representation of the
-/// 16-bit argument, Value.
+/// Return a byte-swapped representation of the 16-bit argument.
inline uint16_t ByteSwap_16(uint16_t Value) {
return sys::SwapByteOrder_16(Value);
}
-/// ByteSwap_32 - This function returns a byte-swapped representation of the
-/// 32-bit argument, Value.
+/// Return a byte-swapped representation of the 32-bit argument.
inline uint32_t ByteSwap_32(uint32_t Value) {
return sys::SwapByteOrder_32(Value);
}
-/// ByteSwap_64 - This function returns a byte-swapped representation of the
-/// 64-bit argument, Value.
+/// Return a byte-swapped representation of the 64-bit argument.
inline uint64_t ByteSwap_64(uint64_t Value) {
return sys::SwapByteOrder_64(Value);
}
@@ -455,7 +445,7 @@ inline uint64_t ByteSwap_64(uint64_t Value) {
/// \brief Count the number of ones from the most significant bit to the first
/// zero bit.
///
-/// Ex. CountLeadingOnes(0xFF0FFF00) == 8.
+/// Ex. countLeadingOnes(0xFF0FFF00) == 8.
/// Only unsigned integral types are allowed.
///
/// \param ZB the behavior on an input of all ones. Only ZB_Width and
@@ -526,7 +516,7 @@ inline unsigned countPopulation(T Value) {
return detail::PopulationCounter<T, sizeof(T)>::count(Value);
}
-/// Log2 - This function returns the log base 2 of the specified value
+/// Return the log base 2 of the specified value.
inline double Log2(double Value) {
#if defined(__ANDROID_API__) && __ANDROID_API__ < 18
return __builtin_log(Value) / __builtin_log(2.0);
@@ -535,34 +525,33 @@ inline double Log2(double Value) {
#endif
}
-/// Log2_32 - This function returns the floor log base 2 of the specified value,
-/// -1 if the value is zero. (32 bit edition.)
+/// Return the floor log base 2 of the specified value, -1 if the value is zero.
+/// (32 bit edition.)
/// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2
inline unsigned Log2_32(uint32_t Value) {
return 31 - countLeadingZeros(Value);
}
-/// Log2_64 - This function returns the floor log base 2 of the specified value,
-/// -1 if the value is zero. (64 bit edition.)
+/// Return the floor log base 2 of the specified value, -1 if the value is zero.
+/// (64 bit edition.)
inline unsigned Log2_64(uint64_t Value) {
return 63 - countLeadingZeros(Value);
}
-/// Log2_32_Ceil - This function returns the ceil log base 2 of the specified
-/// value, 32 if the value is zero. (32 bit edition).
+/// Return the ceil log base 2 of the specified value, 32 if the value is zero.
+/// (32 bit edition).
/// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3
inline unsigned Log2_32_Ceil(uint32_t Value) {
return 32 - countLeadingZeros(Value - 1);
}
-/// Log2_64_Ceil - This function returns the ceil log base 2 of the specified
-/// value, 64 if the value is zero. (64 bit edition.)
+/// Return the ceil log base 2 of the specified value, 64 if the value is zero.
+/// (64 bit edition.)
inline unsigned Log2_64_Ceil(uint64_t Value) {
return 64 - countLeadingZeros(Value - 1);
}
-/// GreatestCommonDivisor64 - Return the greatest common divisor of the two
-/// values using Euclid's algorithm.
+/// Return the greatest common divisor of the values using Euclid's algorithm.
inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
while (B) {
uint64_t T = B;
@@ -572,8 +561,7 @@ inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) {
return A;
}
-/// BitsToDouble - This function takes a 64-bit integer and returns the bit
-/// equivalent double.
+/// This function takes a 64-bit integer and returns the bit equivalent double.
inline double BitsToDouble(uint64_t Bits) {
double D;
static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
@@ -581,8 +569,7 @@ inline double BitsToDouble(uint64_t Bits) {
return D;
}
-/// BitsToFloat - This function takes a 32-bit integer and returns the bit
-/// equivalent float.
+/// This function takes a 32-bit integer and returns the bit equivalent float.
inline float BitsToFloat(uint32_t Bits) {
float F;
static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
@@ -590,10 +577,9 @@ inline float BitsToFloat(uint32_t Bits) {
return F;
}
-/// DoubleToBits - This function takes a double and returns the bit
-/// equivalent 64-bit integer. Note that copying doubles around
-/// changes the bits of NaNs on some hosts, notably x86, so this
-/// routine cannot be used if these bits are needed.
+/// This function takes a double and returns the bit equivalent 64-bit integer.
+/// Note that copying doubles around changes the bits of NaNs on some hosts,
+/// notably x86, so this routine cannot be used if these bits are needed.
inline uint64_t DoubleToBits(double Double) {
uint64_t Bits;
static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes");
@@ -601,10 +587,9 @@ inline uint64_t DoubleToBits(double Double) {
return Bits;
}
-/// FloatToBits - This function takes a float and returns the bit
-/// equivalent 32-bit integer. Note that copying floats around
-/// changes the bits of NaNs on some hosts, notably x86, so this
-/// routine cannot be used if these bits are needed.
+/// This function takes a float and returns the bit equivalent 32-bit integer.
+/// Note that copying floats around changes the bits of NaNs on some hosts,
+/// notably x86, so this routine cannot be used if these bits are needed.
inline uint32_t FloatToBits(float Float) {
uint32_t Bits;
static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes");
@@ -612,8 +597,8 @@ inline uint32_t FloatToBits(float Float) {
return Bits;
}
-/// MinAlign - A and B are either alignments or offsets. Return the minimum
-/// alignment that may be assumed after adding the two together.
+/// A and B are either alignments or offsets. Return the minimum alignment that
+/// may be assumed after adding the two together.
constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) {
// The largest power of 2 that divides both A and B.
//
@@ -642,8 +627,8 @@ inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) {
return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr;
}
-/// NextPowerOf2 - Returns the next power of two (in 64-bits)
-/// that is strictly greater than A. Returns zero on overflow.
+/// Returns the next power of two (in 64-bits) that is strictly greater than A.
+/// Returns zero on overflow.
inline uint64_t NextPowerOf2(uint64_t A) {
A |= (A >> 1);
A |= (A >> 2);
diff --git a/include/llvm/Support/ThreadPool.h b/include/llvm/Support/ThreadPool.h
index f0e3ffa0999c..9ada946c6dae 100644
--- a/include/llvm/Support/ThreadPool.h
+++ b/include/llvm/Support/ThreadPool.h
@@ -35,17 +35,8 @@ namespace llvm {
/// for some work to become available.
class ThreadPool {
public:
-#ifndef _MSC_VER
- using VoidTy = void;
using TaskTy = std::function<void()>;
using PackagedTaskTy = std::packaged_task<void()>;
-#else
- // MSVC 2013 has a bug and can't use std::packaged_task<void()>;
- // We force it to use bool(bool) instead.
- using VoidTy = bool;
- using TaskTy = std::function<bool(bool)>;
- using PackagedTaskTy = std::packaged_task<bool(bool)>;
-#endif
/// Construct a pool with the number of core available on the system (or
/// whatever the value returned by std::thread::hardware_concurrency() is).
@@ -60,30 +51,17 @@ public:
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename Function, typename... Args>
- inline std::shared_future<VoidTy> async(Function &&F, Args &&... ArgList) {
+ inline std::shared_future<void> async(Function &&F, Args &&... ArgList) {
auto Task =
std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
-#ifndef _MSC_VER
return asyncImpl(std::move(Task));
-#else
- // This lambda has to be marked mutable because MSVC 2013's std::bind call
- // operator isn't const qualified.
- return asyncImpl([Task](VoidTy) mutable -> VoidTy {
- Task();
- return VoidTy();
- });
-#endif
}
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename Function>
- inline std::shared_future<VoidTy> async(Function &&F) {
-#ifndef _MSC_VER
+ inline std::shared_future<void> async(Function &&F) {
return asyncImpl(std::forward<Function>(F));
-#else
- return asyncImpl([F] (VoidTy) -> VoidTy { F(); return VoidTy(); });
-#endif
}
/// Blocking wait for all the threads to complete and the queue to be empty.
@@ -93,7 +71,7 @@ public:
private:
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
- std::shared_future<VoidTy> asyncImpl(TaskTy F);
+ std::shared_future<void> asyncImpl(TaskTy F);
/// Threads in flight
std::vector<llvm::thread> Threads;
diff --git a/include/llvm/TableGen/Main.h b/include/llvm/TableGen/Main.h
index 866b9868deb5..ca8c95cb6da2 100644
--- a/include/llvm/TableGen/Main.h
+++ b/include/llvm/TableGen/Main.h
@@ -16,13 +16,15 @@
namespace llvm {
-class RecordKeeper;
class raw_ostream;
+class RecordKeeper;
+
/// \brief Perform the action using Records, and write output to OS.
/// \returns true on error, false otherwise
-typedef bool TableGenMainFn(raw_ostream &OS, RecordKeeper &Records);
+using TableGenMainFn = bool (raw_ostream &OS, RecordKeeper &Records);
int TableGenMain(char *argv0, TableGenMainFn *MainFn);
-}
-#endif
+} // end namespace llvm
+
+#endif // LLVM_TABLEGEN_MAIN_H
diff --git a/include/llvm/TableGen/Record.h b/include/llvm/TableGen/Record.h
index 5c3bf88fbbfa..fa9ca285bcde 100644
--- a/include/llvm/TableGen/Record.h
+++ b/include/llvm/TableGen/Record.h
@@ -38,11 +38,11 @@
namespace llvm {
class ListRecTy;
+struct MultiClass;
class Record;
class RecordKeeper;
class RecordVal;
class StringInit;
-struct MultiClass;
//===----------------------------------------------------------------------===//
// Type Classes
@@ -90,7 +90,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const RecTy &Ty) {
}
/// 'bit' - Represent a single bit
-///
class BitRecTy : public RecTy {
static BitRecTy Shared;
@@ -109,7 +108,6 @@ public:
};
/// 'bits<n>' - Represent a fixed number of bits
-///
class BitsRecTy : public RecTy {
unsigned Size;
@@ -130,7 +128,6 @@ public:
};
/// 'code' - Represent a code fragment
-///
class CodeRecTy : public RecTy {
static CodeRecTy Shared;
@@ -147,7 +144,6 @@ public:
};
/// 'int' - Represent an integer value of no particular size
-///
class IntRecTy : public RecTy {
static IntRecTy Shared;
@@ -166,7 +162,6 @@ public:
};
/// 'string' - Represent an string value
-///
class StringRecTy : public RecTy {
static StringRecTy Shared;
@@ -185,14 +180,13 @@ public:
/// 'list<Ty>' - Represent a list of values, all of which must be of
/// the specified type.
-///
class ListRecTy : public RecTy {
+ friend ListRecTy *RecTy::getListTy();
+
RecTy *Ty;
explicit ListRecTy(RecTy *T) : RecTy(ListRecTyKind), Ty(T) {}
- friend ListRecTy *RecTy::getListTy();
-
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == ListRecTyKind;
@@ -207,7 +201,6 @@ public:
};
/// 'dag' - Represent a dag fragment
-///
class DagRecTy : public RecTy {
static DagRecTy Shared;
@@ -225,14 +218,13 @@ public:
/// '[classname]' - Represent an instance of a class, such as:
/// (R32 X = EAX).
-///
class RecordRecTy : public RecTy {
+ friend class Record;
+
Record *Rec;
explicit RecordRecTy(Record *R) : RecTy(RecordRecTyKind), Rec(R) {}
- friend class Record;
-
public:
static bool classof(const RecTy *RT) {
return RT->getRecTyKind() == RecordRecTyKind;
@@ -249,7 +241,6 @@ public:
/// Find a common type that T1 and T2 convert to.
/// Return 0 if no such type exists.
-///
RecTy *resolveTypes(RecTy *T1, RecTy *T2);
//===----------------------------------------------------------------------===//
@@ -341,7 +332,6 @@ public:
/// selection operator. Given an initializer, it selects the specified bits
/// out, returning them as a new init of bits type. If it is not legal to use
/// the bit subscript operator on this initializer, return null.
- ///
virtual Init *convertInitializerBitRange(ArrayRef<unsigned> Bits) const {
return nullptr;
}
@@ -350,7 +340,6 @@ public:
/// selection operator. Given an initializer, it selects the specified list
/// elements, returning them as a new init of list type. If it is not legal
/// to take a slice of this, return null.
- ///
virtual Init *convertInitListSlice(ArrayRef<unsigned> Elements) const {
return nullptr;
}
@@ -358,7 +347,6 @@ public:
/// This method is used to implement the FieldInit class.
/// Implementors of this method should return the type of the named field if
/// they are of record type.
- ///
virtual RecTy *getFieldType(StringInit *FieldName) const {
return nullptr;
}
@@ -366,7 +354,6 @@ public:
/// This method complements getFieldType to return the
/// initializer for the specified field. If getFieldType returns non-null
/// this method should return non-null, otherwise it returns null.
- ///
virtual Init *getFieldInit(Record &R, const RecordVal *RV,
StringInit *FieldName) const {
return nullptr;
@@ -376,7 +363,6 @@ public:
/// variables which may not be defined at the time the expression is formed.
/// If a value is set for the variable later, this method will be called on
/// users of the value to allow the value to propagate out.
- ///
virtual Init *resolveReferences(Record &R, const RecordVal *RV) const {
return const_cast<Init *>(this);
}
@@ -400,7 +386,6 @@ inline raw_ostream &operator<<(raw_ostream &OS, const Init &I) {
/// This is the common super-class of types that have a specific,
/// explicit, type.
-///
class TypedInit : public Init {
RecTy *Ty;
@@ -409,8 +394,8 @@ protected:
: Init(K, Opc), Ty(T) {}
public:
- TypedInit(const TypedInit &Other) = delete;
- TypedInit &operator=(const TypedInit &Other) = delete;
+ TypedInit(const TypedInit &) = delete;
+ TypedInit &operator=(const TypedInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() >= IK_FirstTypedInit &&
@@ -438,13 +423,12 @@ public:
};
/// '?' - Represents an uninitialized value
-///
class UnsetInit : public Init {
UnsetInit() : Init(IK_UnsetInit) {}
public:
UnsetInit(const UnsetInit &) = delete;
- UnsetInit &operator=(const UnsetInit &Other) = delete;
+ UnsetInit &operator=(const UnsetInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_UnsetInit;
@@ -463,15 +447,14 @@ public:
};
/// 'true'/'false' - Represent a concrete initializer for a bit.
-///
class BitInit : public Init {
bool Value;
explicit BitInit(bool V) : Init(IK_BitInit), Value(V) {}
public:
- BitInit(const BitInit &Other) = delete;
- BitInit &operator=(BitInit &Other) = delete;
+ BitInit(const BitInit &) = delete;
+ BitInit &operator=(BitInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_BitInit;
@@ -493,7 +476,6 @@ public:
/// '{ a, b, c }' - Represents an initializer for a BitsRecTy value.
/// It contains a vector of bits, whose size is determined by the type.
-///
class BitsInit final : public TypedInit, public FoldingSetNode,
public TrailingObjects<BitsInit, Init *> {
unsigned NumBits;
@@ -502,8 +484,8 @@ class BitsInit final : public TypedInit, public FoldingSetNode,
: TypedInit(IK_BitsInit, BitsRecTy::get(N)), NumBits(N) {}
public:
- BitsInit(const BitsInit &Other) = delete;
- BitsInit &operator=(const BitsInit &Other) = delete;
+ BitsInit(const BitsInit &) = delete;
+ BitsInit &operator=(const BitsInit &) = delete;
// Do not use sized deallocation due to trailing objects.
void operator delete(void *p) { ::operator delete(p); }
@@ -552,7 +534,6 @@ public:
};
/// '7' - Represent an initialization by a literal integer value.
-///
class IntInit : public TypedInit {
int64_t Value;
@@ -560,8 +541,8 @@ class IntInit : public TypedInit {
: TypedInit(IK_IntInit, IntRecTy::get()), Value(V) {}
public:
- IntInit(const IntInit &Other) = delete;
- IntInit &operator=(const IntInit &Other) = delete;
+ IntInit(const IntInit &) = delete;
+ IntInit &operator=(const IntInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_IntInit;
@@ -590,7 +571,6 @@ public:
};
/// "foo" - Represent an initialization by a string value.
-///
class StringInit : public TypedInit {
StringRef Value;
@@ -598,8 +578,8 @@ class StringInit : public TypedInit {
: TypedInit(IK_StringInit, StringRecTy::get()), Value(V) {}
public:
- StringInit(const StringInit &Other) = delete;
- StringInit &operator=(const StringInit &Other) = delete;
+ StringInit(const StringInit &) = delete;
+ StringInit &operator=(const StringInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_StringInit;
@@ -636,8 +616,8 @@ class CodeInit : public TypedInit {
Value(V) {}
public:
- CodeInit(const StringInit &Other) = delete;
- CodeInit &operator=(const StringInit &Other) = delete;
+ CodeInit(const StringInit &) = delete;
+ CodeInit &operator=(const StringInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_CodeInit;
@@ -675,15 +655,15 @@ class ListInit final : public TypedInit, public FoldingSetNode,
unsigned NumValues;
public:
- typedef Init *const *const_iterator;
+ using const_iterator = Init *const *;
private:
explicit ListInit(unsigned N, RecTy *EltTy)
: TypedInit(IK_ListInit, ListRecTy::get(EltTy)), NumValues(N) {}
public:
- ListInit(const ListInit &Other) = delete;
- ListInit &operator=(const ListInit &Other) = delete;
+ ListInit(const ListInit &) = delete;
+ ListInit &operator=(const ListInit &) = delete;
// Do not use sized deallocation due to trailing objects.
void operator delete(void *p) { ::operator delete(p); }
@@ -744,8 +724,8 @@ protected:
: TypedInit(K, Type, Opc) {}
public:
- OpInit(const OpInit &Other) = delete;
- OpInit &operator=(OpInit &Other) = delete;
+ OpInit(const OpInit &) = delete;
+ OpInit &operator=(OpInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() >= IK_FirstOpInit &&
@@ -781,8 +761,8 @@ private:
: OpInit(IK_UnOpInit, Type, opc), LHS(lhs) {}
public:
- UnOpInit(const UnOpInit &Other) = delete;
- UnOpInit &operator=(const UnOpInit &Other) = delete;
+ UnOpInit(const UnOpInit &) = delete;
+ UnOpInit &operator=(const UnOpInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_UnOpInit;
@@ -819,7 +799,6 @@ public:
};
/// !op (X, Y) - Combine two inits.
-///
class BinOpInit : public OpInit, public FoldingSetNode {
public:
enum BinaryOp : uint8_t { ADD, AND, OR, SHL, SRA, SRL, LISTCONCAT,
@@ -832,8 +811,8 @@ private:
OpInit(IK_BinOpInit, Type, opc), LHS(lhs), RHS(rhs) {}
public:
- BinOpInit(const BinOpInit &Other) = delete;
- BinOpInit &operator=(const BinOpInit &Other) = delete;
+ BinOpInit(const BinOpInit &) = delete;
+ BinOpInit &operator=(const BinOpInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_BinOpInit;
@@ -874,7 +853,6 @@ public:
};
/// !op (X, Y, Z) - Combine two inits.
-///
class TernOpInit : public OpInit, public FoldingSetNode {
public:
enum TernaryOp : uint8_t { SUBST, FOREACH, IF };
@@ -887,8 +865,8 @@ private:
OpInit(IK_TernOpInit, Type, opc), LHS(lhs), MHS(mhs), RHS(rhs) {}
public:
- TernOpInit(const TernOpInit &Other) = delete;
- TernOpInit &operator=(const TernOpInit &Other) = delete;
+ TernOpInit(const TernOpInit &) = delete;
+ TernOpInit &operator=(const TernOpInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_TernOpInit;
@@ -935,7 +913,6 @@ public:
};
/// 'Opcode' - Represent a reference to an entire variable object.
-///
class VarInit : public TypedInit {
Init *VarName;
@@ -943,8 +920,8 @@ class VarInit : public TypedInit {
: TypedInit(IK_VarInit, T), VarName(VN) {}
public:
- VarInit(const VarInit &Other) = delete;
- VarInit &operator=(const VarInit &Other) = delete;
+ VarInit(const VarInit &) = delete;
+ VarInit &operator=(const VarInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_VarInit;
@@ -980,7 +957,6 @@ public:
};
/// Opcode{0} - Represent access to one bit of a variable or field.
-///
class VarBitInit : public Init {
TypedInit *TI;
unsigned Bit;
@@ -994,8 +970,8 @@ class VarBitInit : public Init {
}
public:
- VarBitInit(const VarBitInit &Other) = delete;
- VarBitInit &operator=(const VarBitInit &Other) = delete;
+ VarBitInit(const VarBitInit &) = delete;
+ VarBitInit &operator=(const VarBitInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_VarBitInit;
@@ -1032,8 +1008,8 @@ class VarListElementInit : public TypedInit {
}
public:
- VarListElementInit(const VarListElementInit &Other) = delete;
- void operator=(const VarListElementInit &Other) = delete;
+ VarListElementInit(const VarListElementInit &) = delete;
+ VarListElementInit &operator=(const VarListElementInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_VarListElementInit;
@@ -1057,17 +1033,16 @@ public:
};
/// AL - Represent a reference to a 'def' in the description
-///
class DefInit : public TypedInit {
+ friend class Record;
+
Record *Def;
DefInit(Record *D, RecordRecTy *T) : TypedInit(IK_DefInit, T), Def(D) {}
- friend class Record;
-
public:
- DefInit(const DefInit &Other) = delete;
- DefInit &operator=(const DefInit &Other) = delete;
+ DefInit(const DefInit &) = delete;
+ DefInit &operator=(const DefInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_DefInit;
@@ -1101,7 +1076,6 @@ public:
};
/// X.Y - Represent a reference to a subfield of a variable
-///
class FieldInit : public TypedInit {
Init *Rec; // Record we are referring to
StringInit *FieldName; // Field we are accessing
@@ -1112,8 +1086,8 @@ class FieldInit : public TypedInit {
}
public:
- FieldInit(const FieldInit &Other) = delete;
- FieldInit &operator=(const FieldInit &Other) = delete;
+ FieldInit(const FieldInit &) = delete;
+ FieldInit &operator=(const FieldInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_FieldInit;
@@ -1136,9 +1110,10 @@ public:
/// (v a, b) - Represent a DAG tree value. DAG inits are required
/// to have at least one value then a (possibly empty) list of arguments. Each
/// argument can have a name associated with it.
-///
class DagInit final : public TypedInit, public FoldingSetNode,
public TrailingObjects<DagInit, Init *, StringInit *> {
+ friend TrailingObjects;
+
Init *Val;
StringInit *ValName;
unsigned NumArgs;
@@ -1148,12 +1123,11 @@ class DagInit final : public TypedInit, public FoldingSetNode,
: TypedInit(IK_DagInit, DagRecTy::get()), Val(V), ValName(VN),
NumArgs(NumArgs), NumArgNames(NumArgNames) {}
- friend TrailingObjects;
size_t numTrailingObjects(OverloadToken<Init *>) const { return NumArgs; }
public:
- DagInit(const DagInit &Other) = delete;
- DagInit &operator=(const DagInit &Other) = delete;
+ DagInit(const DagInit &) = delete;
+ DagInit &operator=(const DagInit &) = delete;
static bool classof(const Init *I) {
return I->getKind() == IK_DagInit;
@@ -1171,19 +1145,23 @@ public:
Init *getOperator() const { return Val; }
StringInit *getName() const { return ValName; }
+
StringRef getNameStr() const {
return ValName ? ValName->getValue() : StringRef();
}
unsigned getNumArgs() const { return NumArgs; }
+
Init *getArg(unsigned Num) const {
assert(Num < NumArgs && "Arg number out of range!");
return getTrailingObjects<Init *>()[Num];
}
+
StringInit *getArgName(unsigned Num) const {
assert(Num < NumArgNames && "Arg number out of range!");
return getTrailingObjects<StringInit *>()[Num];
}
+
StringRef getArgNameStr(unsigned Num) const {
StringInit *Init = getArgName(Num);
return Init ? Init->getValue() : StringRef();
@@ -1192,6 +1170,7 @@ public:
ArrayRef<Init *> getArgs() const {
return makeArrayRef(getTrailingObjects<Init *>(), NumArgs);
}
+
ArrayRef<StringInit *> getArgNames() const {
return makeArrayRef(getTrailingObjects<StringInit *>(), NumArgNames);
}
@@ -1200,8 +1179,8 @@ public:
std::string getAsString() const override;
- typedef SmallVectorImpl<Init*>::const_iterator const_arg_iterator;
- typedef SmallVectorImpl<StringInit*>::const_iterator const_name_iterator;
+ using const_arg_iterator = SmallVectorImpl<Init*>::const_iterator;
+ using const_name_iterator = SmallVectorImpl<StringInit*>::const_iterator;
inline const_arg_iterator arg_begin() const { return getArgs().begin(); }
inline const_arg_iterator arg_end () const { return getArgs().end(); }
@@ -1231,6 +1210,7 @@ public:
class RecordVal {
friend class Record;
+
Init *Name;
PointerIntPair<RecTy *, 1, bool> TyAndPrefix;
Init *Value;
@@ -1298,7 +1278,7 @@ class Record {
// definitions that use them (e.g. Def). However, inside a multiclass they
// can't be immediately resolved so we mark them ResolveFirst to fully
// resolve them later as soon as the multiclass is instantiated.
- bool ResolveFirst;
+ bool ResolveFirst = false;
void init();
void checkName();
@@ -1308,7 +1288,7 @@ public:
explicit Record(Init *N, ArrayRef<SMLoc> locs, RecordKeeper &records,
bool Anonymous = false) :
Name(N), Locs(locs.begin(), locs.end()), TrackedRecords(records),
- ID(LastID++), IsAnonymous(Anonymous), ResolveFirst(false) {
+ ID(LastID++), IsAnonymous(Anonymous) {
init();
}
@@ -1330,6 +1310,7 @@ public:
unsigned getID() const { return ID; }
StringRef getName() const;
+
Init *getNameInit() const {
return Name;
}
@@ -1435,7 +1416,6 @@ public:
/// If there are any field references that refer to fields
/// that have been filled in, we can propagate the values now.
- ///
void resolveReferences() { resolveReferencesTo(nullptr); }
/// If anything in this record refers to RV, replace the
@@ -1468,7 +1448,6 @@ public:
/// Return the initializer for a value with the specified name,
/// or throw an exception if the field does not exist.
- ///
Init *getValueInit(StringRef FieldName) const;
/// Return true if the named field is unset.
@@ -1479,67 +1458,56 @@ public:
/// This method looks up the specified field and returns
/// its value as a string, throwing an exception if the field does not exist
/// or if the value is not a string.
- ///
StringRef getValueAsString(StringRef FieldName) const;
/// This method looks up the specified field and returns
/// its value as a BitsInit, throwing an exception if the field does not exist
/// or if the value is not the right type.
- ///
BitsInit *getValueAsBitsInit(StringRef FieldName) const;
/// This method looks up the specified field and returns
/// its value as a ListInit, throwing an exception if the field does not exist
/// or if the value is not the right type.
- ///
ListInit *getValueAsListInit(StringRef FieldName) const;
/// This method looks up the specified field and
/// returns its value as a vector of records, throwing an exception if the
/// field does not exist or if the value is not the right type.
- ///
std::vector<Record*> getValueAsListOfDefs(StringRef FieldName) const;
/// This method looks up the specified field and
/// returns its value as a vector of integers, throwing an exception if the
/// field does not exist or if the value is not the right type.
- ///
std::vector<int64_t> getValueAsListOfInts(StringRef FieldName) const;
/// This method looks up the specified field and
/// returns its value as a vector of strings, throwing an exception if the
/// field does not exist or if the value is not the right type.
- ///
std::vector<StringRef> getValueAsListOfStrings(StringRef FieldName) const;
/// This method looks up the specified field and returns its
/// value as a Record, throwing an exception if the field does not exist or if
/// the value is not the right type.
- ///
Record *getValueAsDef(StringRef FieldName) const;
/// This method looks up the specified field and returns its
/// value as a bit, throwing an exception if the field does not exist or if
/// the value is not the right type.
- ///
bool getValueAsBit(StringRef FieldName) const;
/// This method looks up the specified field and
/// returns its value as a bit. If the field is unset, sets Unset to true and
/// returns false.
- ///
bool getValueAsBitOrUnset(StringRef FieldName, bool &Unset) const;
/// This method looks up the specified field and returns its
/// value as an int64_t, throwing an exception if the field does not exist or
/// if the value is not the right type.
- ///
int64_t getValueAsInt(StringRef FieldName) const;
/// This method looks up the specified field and returns its
/// value as an Dag, throwing an exception if the field does not exist or if
/// the value is not the right type.
- ///
DagInit *getValueAsDag(StringRef FieldName) const;
};
@@ -1547,7 +1515,7 @@ raw_ostream &operator<<(raw_ostream &OS, const Record &R);
struct MultiClass {
Record Rec; // Placeholder for template args and Name.
- typedef std::vector<std::unique_ptr<Record>> RecordVector;
+ using RecordVector = std::vector<std::unique_ptr<Record>>;
RecordVector DefPrototypes;
void dump() const;
@@ -1557,7 +1525,7 @@ struct MultiClass {
};
class RecordKeeper {
- typedef std::map<std::string, std::unique_ptr<Record>> RecordMap;
+ using RecordMap = std::map<std::string, std::unique_ptr<Record>>;
RecordMap Classes, Defs;
public:
@@ -1600,7 +1568,6 @@ public:
};
/// Sorting predicate to sort record pointers by name.
-///
struct LessRecord {
bool operator()(const Record *Rec1, const Record *Rec2) const {
return StringRef(Rec1->getName()).compare_numeric(Rec2->getName()) < 0;
@@ -1619,7 +1586,6 @@ struct LessRecordByID {
/// Sorting predicate to sort record pointers by their
/// name field.
-///
struct LessRecordFieldName {
bool operator()(const Record *Rec1, const Record *Rec2) const {
return Rec1->getValueAsString("Name") < Rec2->getValueAsString("Name");
diff --git a/include/llvm/TableGen/SetTheory.h b/include/llvm/TableGen/SetTheory.h
index 818b0549b66a..4b32f9e3da8f 100644
--- a/include/llvm/TableGen/SetTheory.h
+++ b/include/llvm/TableGen/SetTheory.h
@@ -64,8 +64,8 @@ class Record;
class SetTheory {
public:
- typedef std::vector<Record*> RecVec;
- typedef SmallSetVector<Record*, 16> RecSet;
+ using RecVec = std::vector<Record *>;
+ using RecSet = SmallSetVector<Record *, 16>;
/// Operator - A callback representing a DAG operator.
class Operator {
@@ -95,7 +95,7 @@ public:
private:
// Map set defs to their fully expanded contents. This serves as a memoization
// cache and it makes it possible to return const references on queries.
- typedef std::map<Record*, RecVec> ExpandMap;
+ using ExpandMap = std::map<Record *, RecVec>;
ExpandMap Expansions;
// Known DAG operators by name.
diff --git a/include/llvm/TableGen/StringMatcher.h b/include/llvm/TableGen/StringMatcher.h
index 11a8ad8183aa..7c919ffec7b6 100644
--- a/include/llvm/TableGen/StringMatcher.h
+++ b/include/llvm/TableGen/StringMatcher.h
@@ -20,7 +20,8 @@
#include <vector>
namespace llvm {
- class raw_ostream;
+
+class raw_ostream;
/// StringMatcher - Given a list of strings and code to execute when they match,
/// output a simple switch tree to classify the input string.
@@ -30,7 +31,7 @@ namespace llvm {
///
class StringMatcher {
public:
- typedef std::pair<std::string, std::string> StringPair;
+ using StringPair = std::pair<std::string, std::string>;
private:
StringRef StrVariableName;
@@ -49,6 +50,6 @@ private:
unsigned CharNo, unsigned IndentCount) const;
};
-} // end llvm namespace.
+} // end namespace llvm
-#endif
+#endif // LLVM_TABLEGEN_STRINGMATCHER_H
diff --git a/include/llvm/Target/TargetLoweringObjectFile.h b/include/llvm/Target/TargetLoweringObjectFile.h
index 0ffd4b7f8c78..80d4d8e42e51 100644
--- a/include/llvm/Target/TargetLoweringObjectFile.h
+++ b/include/llvm/Target/TargetLoweringObjectFile.h
@@ -70,10 +70,9 @@ public:
virtual void emitPersonalityValue(MCStreamer &Streamer, const DataLayout &TM,
const MCSymbol *Sym) const;
- /// Emit the module flags that the platform cares about.
- virtual void emitModuleFlags(MCStreamer &Streamer,
- ArrayRef<Module::ModuleFlagEntry> Flags,
- const TargetMachine &TM) const {}
+ /// Emit the module-level metadata that the platform cares about.
+ virtual void emitModuleMetadata(MCStreamer &Streamer, Module &M,
+ const TargetMachine &TM) const {}
/// Given a constant with the SectionKind, return a section that it should be
/// placed in.
diff --git a/include/llvm/Target/TargetRegisterInfo.h b/include/llvm/Target/TargetRegisterInfo.h
index 4ce6d2ff5e26..86ad8ad53052 100644
--- a/include/llvm/Target/TargetRegisterInfo.h
+++ b/include/llvm/Target/TargetRegisterInfo.h
@@ -497,6 +497,16 @@ public:
/// function. Used by MachineRegisterInfo::isConstantPhysReg().
virtual bool isConstantPhysReg(unsigned PhysReg) const { return false; }
+ /// Physical registers that may be modified within a function but are
+ /// guaranteed to be restored before any uses. This is useful for targets that
+ /// have call sequences where a GOT register may be updated by the caller
+ /// prior to a call and is guaranteed to be restored (also by the caller)
+ /// after the call.
+ virtual bool isCallerPreservedPhysReg(unsigned PhysReg,
+ const MachineFunction &MF) const {
+ return false;
+ }
+
/// Prior to adding the live-out mask to a stackmap or patchpoint
/// instruction, provide the target the opportunity to adjust it (mainly to
/// remove pseudo-registers that should be ignored).
diff --git a/include/llvm/Testing/Support/Error.h b/include/llvm/Testing/Support/Error.h
new file mode 100644
index 000000000000..d52752901593
--- /dev/null
+++ b/include/llvm/Testing/Support/Error.h
@@ -0,0 +1,69 @@
+//===- llvm/Testing/Support/Error.h ---------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_SUPPORT_ERROR_H
+#define LLVM_TESTING_SUPPORT_ERROR_H
+
+#include "llvm/ADT/Optional.h"
+#include "llvm/Support/Error.h"
+#include "llvm/Testing/Support/SupportHelpers.h"
+
+#include "gmock/gmock.h"
+#include <ostream>
+
+namespace llvm {
+namespace detail {
+ErrorHolder TakeError(Error Err);
+
+template <typename T> ExpectedHolder<T> TakeExpected(Expected<T> &Exp) {
+ llvm::detail::ExpectedHolder<T> Result;
+ auto &EH = static_cast<llvm::detail::ErrorHolder &>(Result);
+ EH = TakeError(Exp.takeError());
+ if (Result.Success)
+ Result.Value = &(*Exp);
+ return Result;
+}
+
+template <typename T> ExpectedHolder<T> TakeExpected(const Expected<T> &Exp) {
+ return TakeExpected(const_cast<Expected<T> &>(Exp));
+}
+} // namespace detail
+
+#define EXPECT_THAT_ERROR(Err, Matcher) \
+ EXPECT_THAT(llvm::detail::TakeError(Err), Matcher)
+#define ASSERT_THAT_ERROR(Err, Matcher) \
+ ASSERT_THAT(llvm::detail::TakeError(Err), Matcher)
+
+#define EXPECT_THAT_EXPECTED(Err, Matcher) \
+ EXPECT_THAT(llvm::detail::TakeExpected(Err), Matcher)
+#define ASSERT_THAT_EXPECTED(Err, Matcher) \
+ ASSERT_THAT(llvm::detail::TakeExpected(Err), Matcher)
+
+MATCHER(Succeeded, "") { return arg.Success; }
+MATCHER(Failed, "") { return !arg.Success; }
+
+MATCHER_P(HasValue, value,
+ "succeeded with value " + testing::PrintToString(value)) {
+ if (!arg.Success) {
+ *result_listener << "operation failed";
+ return false;
+ }
+
+ assert(arg.Value.hasValue());
+ if (**arg.Value != value) {
+ *result_listener << "but \"" + testing::PrintToString(**arg.Value) +
+ "\" != " + testing::PrintToString(value);
+ return false;
+ }
+
+ return true;
+}
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/Testing/Support/SupportHelpers.h b/include/llvm/Testing/Support/SupportHelpers.h
new file mode 100644
index 000000000000..c4dd414b80db
--- /dev/null
+++ b/include/llvm/Testing/Support/SupportHelpers.h
@@ -0,0 +1,47 @@
+//===- Testing/Support/SupportHelpers.h -----------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
+#define LLVM_TESTING_SUPPORT_SUPPORTHELPERS_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Error.h"
+#include "gtest/gtest-printers.h"
+
+namespace llvm {
+namespace detail {
+struct ErrorHolder {
+ bool Success;
+ std::string Message;
+};
+
+template <typename T> struct ExpectedHolder : public ErrorHolder {
+ Optional<T *> Value;
+};
+
+inline void PrintTo(const ErrorHolder &Err, std::ostream *Out) {
+ *Out << (Err.Success ? "succeeded" : "failed");
+ if (!Err.Success) {
+ *Out << " (" << StringRef(Err.Message).trim().str() << ")";
+ }
+}
+
+template <typename T>
+void PrintTo(const ExpectedHolder<T> &Item, std::ostream *Out) {
+ if (Item.Success) {
+ *Out << "succeeded with value \"" << ::testing::PrintToString(**Item.Value)
+ << "\"";
+ } else {
+ PrintTo(static_cast<const ErrorHolder &>(Item), Out);
+ }
+}
+} // namespace detail
+} // namespace llvm
+
+#endif
diff --git a/include/llvm/Transforms/Scalar/GVNExpression.h b/include/llvm/Transforms/Scalar/GVNExpression.h
index 008341304995..f603ebcbca7c 100644
--- a/include/llvm/Transforms/Scalar/GVNExpression.h
+++ b/include/llvm/Transforms/Scalar/GVNExpression.h
@@ -121,10 +121,7 @@ public:
OS << "}";
}
- LLVM_DUMP_METHOD void dump() const {
- print(dbgs());
- dbgs() << "\n";
- }
+ LLVM_DUMP_METHOD void dump() const;
};
inline raw_ostream &operator<<(raw_ostream &OS, const Expression &E) {
diff --git a/include/llvm/Transforms/Utils/CodeExtractor.h b/include/llvm/Transforms/Utils/CodeExtractor.h
index 7e23544af1ab..682b353ab5ae 100644
--- a/include/llvm/Transforms/Utils/CodeExtractor.h
+++ b/include/llvm/Transforms/Utils/CodeExtractor.h
@@ -106,15 +106,32 @@ template <typename T> class ArrayRef;
/// significant impact on the cost however.
void findInputsOutputs(ValueSet &Inputs, ValueSet &Outputs,
const ValueSet &Allocas) const;
+
+ /// Check if life time marker nodes can be hoisted/sunk into the outline
+ /// region.
+ ///
+ /// Returns true if it is safe to do the code motion.
+ bool isLegalToShrinkwrapLifetimeMarkers(Instruction *AllocaAddr) const;
/// Find the set of allocas whose life ranges are contained within the
/// outlined region.
///
/// Allocas which have life_time markers contained in the outlined region
/// should be pushed to the outlined function. The address bitcasts that
/// are used by the lifetime markers are also candidates for shrink-
- /// wrapping. The instructions that need to be sinked are collected in
+ /// wrapping. The instructions that need to be sunk are collected in
/// 'Allocas'.
- void findAllocas(ValueSet &Allocas) const;
+ void findAllocas(ValueSet &SinkCands, ValueSet &HoistCands,
+ BasicBlock *&ExitBlock) const;
+
+ /// Find or create a block within the outline region for placing hoisted
+ /// code.
+ ///
+ /// CommonExitBlock is block outside the outline region. It is the common
+ /// successor of blocks inside the region. If there exists a single block
+ /// inside the region that is the predecessor of CommonExitBlock, that block
+ /// will be returned. Otherwise CommonExitBlock will be split and the
+ /// original block will be added to the outline region.
+ BasicBlock *findOrCreateBlockForHoisting(BasicBlock *CommonExitBlock);
private:
void severSplitPHINodes(BasicBlock *&Header);
diff --git a/include/llvm/Transforms/Utils/Mem2Reg.h b/include/llvm/Transforms/Utils/Mem2Reg.h
index 456876b520b0..1fe186d6c3ad 100644
--- a/include/llvm/Transforms/Utils/Mem2Reg.h
+++ b/include/llvm/Transforms/Utils/Mem2Reg.h
@@ -25,4 +25,4 @@ public:
};
}
-#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H \ No newline at end of file
+#endif // LLVM_TRANSFORMS_UTILS_MEM2REG_H
diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp
index f743cb234c45..dbb1b01b94ac 100644
--- a/lib/Analysis/BasicAliasAnalysis.cpp
+++ b/lib/Analysis/BasicAliasAnalysis.cpp
@@ -1011,10 +1011,24 @@ static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
// equal each other so we can exit early.
if (C1 && C2)
return NoAlias;
- if (isKnownNonEqual(GEP1->getOperand(GEP1->getNumOperands() - 1),
- GEP2->getOperand(GEP2->getNumOperands() - 1),
- DL))
- return NoAlias;
+ {
+ Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
+ Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
+ if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
+ // If one of the indices is a PHI node, be safe and only use
+ // computeKnownBits so we don't make any assumptions about the
+ // relationships between the two indices. This is important if we're
+ // asking about values from different loop iterations. See PR32314.
+ // TODO: We may be able to change the check so we only do this when
+ // we definitely looked through a PHINode.
+ KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
+ KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
+ if (Known1.Zero.intersects(Known2.One) ||
+ Known1.One.intersects(Known2.Zero))
+ return NoAlias;
+ } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
+ return NoAlias;
+ }
return MayAlias;
} else if (!LastIndexedStruct || !C1 || !C2) {
return MayAlias;
diff --git a/lib/Analysis/CallGraphSCCPass.cpp b/lib/Analysis/CallGraphSCCPass.cpp
index 5896e6e0902f..facda246936d 100644
--- a/lib/Analysis/CallGraphSCCPass.cpp
+++ b/lib/Analysis/CallGraphSCCPass.cpp
@@ -608,18 +608,18 @@ namespace {
}
bool runOnSCC(CallGraphSCC &SCC) override {
+ bool BannerPrinted = false;
auto PrintBannerOnce = [&] () {
- static bool BannerPrinted = false;
if (BannerPrinted)
return;
Out << Banner;
BannerPrinted = true;
};
for (CallGraphNode *CGN : SCC) {
- if (CGN->getFunction()) {
- if (isFunctionInPrintList(CGN->getFunction()->getName())) {
+ if (Function *F = CGN->getFunction()) {
+ if (!F->isDeclaration() && isFunctionInPrintList(F->getName())) {
PrintBannerOnce();
- CGN->getFunction()->print(Out);
+ F->print(Out);
}
} else if (llvm::isFunctionInPrintList("*")) {
PrintBannerOnce();
diff --git a/lib/Analysis/DivergenceAnalysis.cpp b/lib/Analysis/DivergenceAnalysis.cpp
index 1b36569f7a07..2d39a0b02150 100644
--- a/lib/Analysis/DivergenceAnalysis.cpp
+++ b/lib/Analysis/DivergenceAnalysis.cpp
@@ -241,7 +241,7 @@ void DivergencePropagator::exploreDataDependency(Value *V) {
// Follow def-use chains of V.
for (User *U : V->users()) {
Instruction *UserInst = cast<Instruction>(U);
- if (DV.insert(UserInst).second)
+ if (!TTI.isAlwaysUniform(U) && DV.insert(UserInst).second)
Worklist.push_back(UserInst);
}
}
diff --git a/lib/Analysis/MemorySSA.cpp b/lib/Analysis/MemorySSA.cpp
index e0e04a91410f..86d0d92799f2 100644
--- a/lib/Analysis/MemorySSA.cpp
+++ b/lib/Analysis/MemorySSA.cpp
@@ -1872,7 +1872,6 @@ MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequired<MemorySSAWrapperPass>();
- AU.addPreserved<MemorySSAWrapperPass>();
}
bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
@@ -1957,6 +1956,7 @@ MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
#ifdef EXPENSIVE_CHECKS
MemoryAccess *NewNoCache = Walker.findClobber(StartingAccess, Q);
assert(NewNoCache == New && "Cache made us hand back a different result?");
+ (void)NewNoCache;
#endif
if (AutoResetWalker)
resetClobberWalker();
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index b9c4716b5528..aebc80a0a885 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -149,9 +149,9 @@ static cl::opt<unsigned> MaxValueCompareDepth(
cl::init(2));
static cl::opt<unsigned>
- MaxAddExprDepth("scalar-evolution-max-addexpr-depth", cl::Hidden,
- cl::desc("Maximum depth of recursive AddExpr"),
- cl::init(32));
+ MaxArithDepth("scalar-evolution-max-arith-depth", cl::Hidden,
+ cl::desc("Maximum depth of recursive arithmetics"),
+ cl::init(32));
static cl::opt<unsigned> MaxConstantEvolvingDepth(
"scalar-evolution-max-constant-evolving-depth", cl::Hidden,
@@ -2276,8 +2276,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (Ops.size() == 1) return Ops[0];
}
- // Limit recursion calls depth
- if (Depth > MaxAddExprDepth)
+ // Limit recursion calls depth.
+ if (Depth > MaxArithDepth)
return getOrCreateAddExpr(Ops, Flags);
// Okay, check to see if the same value occurs in the operand list more than
@@ -2293,7 +2293,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
++Count;
// Merge the values into a multiply.
const SCEV *Scale = getConstant(Ty, Count);
- const SCEV *Mul = getMulExpr(Scale, Ops[i]);
+ const SCEV *Mul = getMulExpr(Scale, Ops[i], SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == Count)
return Mul;
Ops[i] = Mul;
@@ -2343,7 +2343,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
}
}
if (Ok)
- LargeOps.push_back(getMulExpr(LargeMulOps));
+ LargeOps.push_back(getMulExpr(LargeMulOps, SCEV::FlagAnyWrap, Depth + 1));
} else {
Ok = false;
break;
@@ -2417,7 +2417,8 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
if (MulOp.first != 0)
Ops.push_back(getMulExpr(
getConstant(MulOp.first),
- getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1)));
+ getAddExpr(MulOp.second, SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1));
if (Ops.empty())
return getZero(Ty);
if (Ops.size() == 1)
@@ -2445,11 +2446,12 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
Mul->op_begin()+MulOp);
MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
- InnerMul = getMulExpr(MulOps);
+ InnerMul = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
SmallVector<const SCEV *, 2> TwoOps = {getOne(Ty), InnerMul};
const SCEV *AddOne = getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
+ const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV,
+ SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == 2) return OuterMul;
if (AddOp < Idx) {
Ops.erase(Ops.begin()+AddOp);
@@ -2478,19 +2480,20 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
Mul->op_begin()+MulOp);
MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
- InnerMul1 = getMulExpr(MulOps);
+ InnerMul1 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
if (OtherMul->getNumOperands() != 2) {
SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
OtherMul->op_begin()+OMulOp);
MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
- InnerMul2 = getMulExpr(MulOps);
+ InnerMul2 = getMulExpr(MulOps, SCEV::FlagAnyWrap, Depth + 1);
}
SmallVector<const SCEV *, 2> TwoOps = {InnerMul1, InnerMul2};
const SCEV *InnerMulSum =
getAddExpr(TwoOps, SCEV::FlagAnyWrap, Depth + 1);
- const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
+ const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum,
+ SCEV::FlagAnyWrap, Depth + 1);
if (Ops.size() == 2) return OuterMul;
Ops.erase(Ops.begin()+Idx);
Ops.erase(Ops.begin()+OtherMulIdx-1);
@@ -2621,6 +2624,27 @@ ScalarEvolution::getOrCreateAddExpr(SmallVectorImpl<const SCEV *> &Ops,
return S;
}
+const SCEV *
+ScalarEvolution::getOrCreateMulExpr(SmallVectorImpl<const SCEV *> &Ops,
+ SCEV::NoWrapFlags Flags) {
+ FoldingSetNodeID ID;
+ ID.AddInteger(scMulExpr);
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ ID.AddPointer(Ops[i]);
+ void *IP = nullptr;
+ SCEVMulExpr *S =
+ static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
+ if (!S) {
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
+ UniqueSCEVs.InsertNode(S, IP);
+ }
+ S->setNoWrapFlags(Flags);
+ return S;
+}
+
static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
uint64_t k = i*j;
if (j > 1 && k / j != i) Overflow = true;
@@ -2673,7 +2697,8 @@ static bool containsConstantSomewhere(const SCEV *StartExpr) {
/// Get a canonical multiply expression, or something simpler if possible.
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
- SCEV::NoWrapFlags Flags) {
+ SCEV::NoWrapFlags Flags,
+ unsigned Depth) {
assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
"only nuw or nsw allowed");
assert(!Ops.empty() && "Cannot get empty mul!");
@@ -2690,6 +2715,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
Flags = StrengthenNoWrapFlags(this, scMulExpr, Ops, Flags);
+ // Limit recursion calls depth.
+ if (Depth > MaxArithDepth)
+ return getOrCreateMulExpr(Ops, Flags);
+
// If there are any constants, fold them together.
unsigned Idx = 0;
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
@@ -2701,8 +2730,11 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// apply this transformation as well.
if (Add->getNumOperands() == 2)
if (containsConstantSomewhere(Add))
- return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
- getMulExpr(LHSC, Add->getOperand(1)));
+ return getAddExpr(getMulExpr(LHSC, Add->getOperand(0),
+ SCEV::FlagAnyWrap, Depth + 1),
+ getMulExpr(LHSC, Add->getOperand(1),
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
++Idx;
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
@@ -2730,17 +2762,19 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
SmallVector<const SCEV *, 4> NewOps;
bool AnyFolded = false;
for (const SCEV *AddOp : Add->operands()) {
- const SCEV *Mul = getMulExpr(Ops[0], AddOp);
+ const SCEV *Mul = getMulExpr(Ops[0], AddOp, SCEV::FlagAnyWrap,
+ Depth + 1);
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
NewOps.push_back(Mul);
}
if (AnyFolded)
- return getAddExpr(NewOps);
+ return getAddExpr(NewOps, SCEV::FlagAnyWrap, Depth + 1);
} else if (const auto *AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
// Negation preserves a recurrence's no self-wrap property.
SmallVector<const SCEV *, 4> Operands;
for (const SCEV *AddRecOp : AddRec->operands())
- Operands.push_back(getMulExpr(Ops[0], AddRecOp));
+ Operands.push_back(getMulExpr(Ops[0], AddRecOp, SCEV::FlagAnyWrap,
+ Depth + 1));
return getAddRecExpr(Operands, AddRec->getLoop(),
AddRec->getNoWrapFlags(SCEV::FlagNW));
@@ -2762,18 +2796,18 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
if (Ops.size() > MulOpsInlineThreshold)
break;
- // If we have an mul, expand the mul operands onto the end of the operands
- // list.
+ // If we have an mul, expand the mul operands onto the end of the
+ // operands list.
Ops.erase(Ops.begin()+Idx);
Ops.append(Mul->op_begin(), Mul->op_end());
DeletedMul = true;
}
- // If we deleted at least one mul, we added operands to the end of the list,
- // and they are not necessarily sorted. Recurse to resort and resimplify
- // any operands we just acquired.
+ // If we deleted at least one mul, we added operands to the end of the
+ // list, and they are not necessarily sorted. Recurse to resort and
+ // resimplify any operands we just acquired.
if (DeletedMul)
- return getMulExpr(Ops);
+ return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
}
// If there are any add recurrences in the operands list, see if any other
@@ -2784,8 +2818,8 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// Scan over all recurrences, trying to fold loop invariants into them.
for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
- // Scan all of the other operands to this mul and add them to the vector if
- // they are loop invariant w.r.t. the recurrence.
+ // Scan all of the other operands to this mul and add them to the vector
+ // if they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
@@ -2801,9 +2835,10 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
SmallVector<const SCEV *, 4> NewOps;
NewOps.reserve(AddRec->getNumOperands());
- const SCEV *Scale = getMulExpr(LIOps);
+ const SCEV *Scale = getMulExpr(LIOps, SCEV::FlagAnyWrap, Depth + 1);
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
- NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
+ NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i),
+ SCEV::FlagAnyWrap, Depth + 1));
// Build the new addrec. Propagate the NUW and NSW flags if both the
// outer mul and the inner addrec are guaranteed to have no overflow.
@@ -2822,12 +2857,12 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
Ops[i] = NewRec;
break;
}
- return getMulExpr(Ops);
+ return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
}
- // Okay, if there weren't any loop invariants to be folded, check to see if
- // there are multiple AddRec's with the same loop induction variable being
- // multiplied together. If so, we can fold them.
+ // Okay, if there weren't any loop invariants to be folded, check to see
+ // if there are multiple AddRec's with the same loop induction variable
+ // being multiplied together. If so, we can fold them.
// {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
// = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
@@ -2869,7 +2904,9 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
const SCEV *CoeffTerm = getConstant(Ty, Coeff);
const SCEV *Term1 = AddRec->getOperand(y-z);
const SCEV *Term2 = OtherAddRec->getOperand(z);
- Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
+ Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1, Term2,
+ SCEV::FlagAnyWrap, Depth + 1),
+ SCEV::FlagAnyWrap, Depth + 1);
}
}
AddRecOps.push_back(Term);
@@ -2887,7 +2924,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
}
}
if (OpsModified)
- return getMulExpr(Ops);
+ return getMulExpr(Ops, SCEV::FlagAnyWrap, Depth + 1);
// Otherwise couldn't fold anything into this recurrence. Move onto the
// next one.
@@ -2895,22 +2932,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
// Okay, it looks like we really DO need an mul expr. Check to see if we
// already have one, otherwise create a new one.
- FoldingSetNodeID ID;
- ID.AddInteger(scMulExpr);
- for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- ID.AddPointer(Ops[i]);
- void *IP = nullptr;
- SCEVMulExpr *S =
- static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
- if (!S) {
- const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
- std::uninitialized_copy(Ops.begin(), Ops.end(), O);
- S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
- O, Ops.size());
- UniqueSCEVs.InsertNode(S, IP);
- }
- S->setNoWrapFlags(Flags);
- return S;
+ return getOrCreateMulExpr(Ops, Flags);
}
/// Get a canonical unsigned division expression, or something simpler if
@@ -3713,7 +3735,8 @@ const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
}
const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
- SCEV::NoWrapFlags Flags) {
+ SCEV::NoWrapFlags Flags,
+ unsigned Depth) {
// Fast path: X - X --> 0.
if (LHS == RHS)
return getZero(LHS->getType());
@@ -3747,7 +3770,7 @@ const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
// larger scope than intended.
auto NegFlags = RHSIsNotMinSigned ? SCEV::FlagNSW : SCEV::FlagAnyWrap;
- return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags);
+ return getAddExpr(LHS, getNegativeSCEV(RHS, NegFlags), AddFlags, Depth);
}
const SCEV *
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index 488cb332a0b0..92328f6e5efd 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -103,6 +103,10 @@ bool TargetTransformInfo::isSourceOfDivergence(const Value *V) const {
return TTIImpl->isSourceOfDivergence(V);
}
+bool llvm::TargetTransformInfo::isAlwaysUniform(const Value *V) const {
+ return TTIImpl->isAlwaysUniform(V);
+}
+
unsigned TargetTransformInfo::getFlatAddressSpace() const {
return TTIImpl->getFlatAddressSpace();
}
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index c0181662fd9d..b065f427b06c 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -852,7 +852,8 @@ static void computeKnownBitsFromShiftOperator(
Optional<bool> ShifterOperandIsNonZero;
// Early exit if we can't constrain any well-defined shift amount.
- if (!(ShiftAmtKZ & (BitWidth - 1)) && !(ShiftAmtKO & (BitWidth - 1))) {
+ if (!(ShiftAmtKZ & (PowerOf2Ceil(BitWidth) - 1)) &&
+ !(ShiftAmtKO & (PowerOf2Ceil(BitWidth) - 1))) {
ShifterOperandIsNonZero =
isKnownNonZero(I->getOperand(1), Depth + 1, Q);
if (!*ShifterOperandIsNonZero)
@@ -3026,7 +3027,7 @@ bool llvm::getConstantDataArrayInfo(const Value *V,
if (GV->getInitializer()->isNullValue()) {
Type *GVTy = GV->getValueType();
if ( (ArrayTy = dyn_cast<ArrayType>(GVTy)) ) {
- // A zeroinitializer for the array; There is no ConstantDataArray.
+ // A zeroinitializer for the array; there is no ConstantDataArray.
Array = nullptr;
} else {
const DataLayout &DL = GV->getParent()->getDataLayout();
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 95987fac74e1..0629c2d326ae 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -733,13 +733,13 @@ private:
std::vector<FunctionSummary::EdgeTy> makeCallList(ArrayRef<uint64_t> Record,
bool IsOldProfileFormat,
bool HasProfile);
- Error parseEntireSummary();
+ Error parseEntireSummary(unsigned ID);
Error parseModuleStringTable();
std::pair<ValueInfo, GlobalValue::GUID>
getValueInfoFromValueId(unsigned ValueId);
- ModulePathStringTableTy::iterator addThisModulePath();
+ ModuleSummaryIndex::ModuleInfo *addThisModule();
};
} // end anonymous namespace
@@ -2608,6 +2608,16 @@ Error BitcodeReader::materializeMetadata() {
if (Error Err = MDLoader->parseModuleMetadata())
return Err;
}
+
+ // Upgrade "Linker Options" module flag to "llvm.linker.options" module-level
+ // metadata.
+ if (Metadata *Val = TheModule->getModuleFlag("Linker Options")) {
+ NamedMDNode *LinkerOpts =
+ TheModule->getOrInsertNamedMetadata("llvm.linker.options");
+ for (const MDOperand &MDOptions : cast<MDNode>(Val)->operands())
+ LinkerOpts->addOperand(cast<MDNode>(MDOptions));
+ }
+
DeferredMetadataInfo.clear();
return Error::success();
}
@@ -4691,9 +4701,9 @@ ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader(
: BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex),
ModulePath(ModulePath), ModuleId(ModuleId) {}
-ModulePathStringTableTy::iterator
-ModuleSummaryIndexBitcodeReader::addThisModulePath() {
- return TheIndex.addModulePath(ModulePath, ModuleId);
+ModuleSummaryIndex::ModuleInfo *
+ModuleSummaryIndexBitcodeReader::addThisModule() {
+ return TheIndex.addModule(ModulePath, ModuleId);
}
std::pair<ValueInfo, GlobalValue::GUID>
@@ -4844,6 +4854,7 @@ Error ModuleSummaryIndexBitcodeReader::parseModule() {
return error("Invalid record");
break;
case bitc::GLOBALVAL_SUMMARY_BLOCK_ID:
+ case bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID:
assert(!SeenValueSymbolTable &&
"Already read VST when parsing summary block?");
// We might not have a VST if there were no values in the
@@ -4856,7 +4867,7 @@ Error ModuleSummaryIndexBitcodeReader::parseModule() {
SeenValueSymbolTable = true;
}
SeenGlobalValSummary = true;
- if (Error Err = parseEntireSummary())
+ if (Error Err = parseEntireSummary(Entry.ID))
return Err;
break;
case bitc::MODULE_STRTAB_BLOCK_ID:
@@ -4889,7 +4900,7 @@ Error ModuleSummaryIndexBitcodeReader::parseModule() {
case bitc::MODULE_CODE_HASH: {
if (Record.size() != 5)
return error("Invalid hash length " + Twine(Record.size()).str());
- auto &Hash = addThisModulePath()->second.second;
+ auto &Hash = addThisModule()->second.second;
int Pos = 0;
for (auto &Val : Record) {
assert(!(Val >> 32) && "Unexpected high bits set");
@@ -4964,8 +4975,8 @@ std::vector<FunctionSummary::EdgeTy> ModuleSummaryIndexBitcodeReader::makeCallLi
// Eagerly parse the entire summary block. This populates the GlobalValueSummary
// objects in the index.
-Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
- if (Stream.EnterSubBlock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID))
+Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(unsigned ID) {
+ if (Stream.EnterSubBlock(ID))
return error("Invalid record");
SmallVector<uint64_t, 64> Record;
@@ -5070,7 +5081,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
auto VIAndOriginalGUID = getValueInfoFromValueId(ValueID);
- FS->setModulePath(addThisModulePath()->first());
+ FS->setModulePath(addThisModule()->first());
FS->setOriginalName(VIAndOriginalGUID.second);
TheIndex.addGlobalValueSummary(VIAndOriginalGUID.first, std::move(FS));
break;
@@ -5090,7 +5101,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
// string table section in the per-module index, we create a single
// module path string table entry with an empty (0) ID to take
// ownership.
- AS->setModulePath(addThisModulePath()->first());
+ AS->setModulePath(addThisModule()->first());
GlobalValue::GUID AliaseeGUID =
getValueInfoFromValueId(AliaseeID).first.getGUID();
@@ -5113,7 +5124,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
std::vector<ValueInfo> Refs =
makeRefList(ArrayRef<uint64_t>(Record).slice(2));
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
- FS->setModulePath(addThisModulePath()->first());
+ FS->setModulePath(addThisModule()->first());
auto GUID = getValueInfoFromValueId(ValueID);
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
@@ -5241,6 +5252,20 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
{{Record[0], Record[1]}, {Record.begin() + 2, Record.end()}});
break;
}
+ case bitc::FS_CFI_FUNCTION_DEFS: {
+ std::set<std::string> &CfiFunctionDefs = TheIndex.cfiFunctionDefs();
+ for (unsigned I = 0; I != Record.size(); I += 2)
+ CfiFunctionDefs.insert(
+ {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])});
+ break;
+ }
+ case bitc::FS_CFI_FUNCTION_DECLS: {
+ std::set<std::string> &CfiFunctionDecls = TheIndex.cfiFunctionDecls();
+ for (unsigned I = 0; I != Record.size(); I += 2)
+ CfiFunctionDecls.insert(
+ {Strtab.data() + Record[I], static_cast<size_t>(Record[I + 1])});
+ break;
+ }
}
}
llvm_unreachable("Exit infinite loop");
@@ -5255,7 +5280,7 @@ Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() {
SmallVector<uint64_t, 64> Record;
SmallString<128> ModulePath;
- ModulePathStringTableTy::iterator LastSeenModulePath;
+ ModuleSummaryIndex::ModuleInfo *LastSeenModule = nullptr;
while (true) {
BitstreamEntry Entry = Stream.advanceSkippingSubblocks();
@@ -5282,8 +5307,8 @@ Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() {
if (convertToString(Record, 1, ModulePath))
return error("Invalid record");
- LastSeenModulePath = TheIndex.addModulePath(ModulePath, ModuleId);
- ModuleIdMap[ModuleId] = LastSeenModulePath->first();
+ LastSeenModule = TheIndex.addModule(ModulePath, ModuleId);
+ ModuleIdMap[ModuleId] = LastSeenModule->first();
ModulePath.clear();
break;
@@ -5292,15 +5317,15 @@ Error ModuleSummaryIndexBitcodeReader::parseModuleStringTable() {
case bitc::MST_CODE_HASH: {
if (Record.size() != 5)
return error("Invalid hash length " + Twine(Record.size()).str());
- if (LastSeenModulePath == TheIndex.modulePaths().end())
+ if (!LastSeenModule)
return error("Invalid hash that does not follow a module path");
int Pos = 0;
for (auto &Val : Record) {
assert(!(Val >> 32) && "Unexpected high bits set");
- LastSeenModulePath->second.second[Pos++] = Val;
+ LastSeenModule->second.second[Pos++] = Val;
}
- // Reset LastSeenModulePath to avoid overriding the hash unexpectedly.
- LastSeenModulePath = TheIndex.modulePaths().end();
+ // Reset LastSeenModule to avoid overriding the hash unexpectedly.
+ LastSeenModule = nullptr;
break;
}
}
@@ -5507,13 +5532,16 @@ BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata,
}
// Parse the specified bitcode buffer and merge the index into CombinedIndex.
+// We don't use ModuleIdentifier here because the client may need to control the
+// module path used in the combined summary (e.g. when reading summaries for
+// regular LTO modules).
Error BitcodeModule::readSummary(ModuleSummaryIndex &CombinedIndex,
- unsigned ModuleId) {
+ StringRef ModulePath, uint64_t ModuleId) {
BitstreamCursor Stream(Buffer);
Stream.JumpToBit(ModuleBit);
ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, CombinedIndex,
- ModuleIdentifier, ModuleId);
+ ModulePath, ModuleId);
return R.parseModule();
}
@@ -5533,7 +5561,7 @@ Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() {
}
// Check if the given bitcode buffer contains a global value summary block.
-Expected<bool> BitcodeModule::hasSummary() {
+Expected<BitcodeLTOInfo> BitcodeModule::getLTOInfo() {
BitstreamCursor Stream(Buffer);
Stream.JumpToBit(ModuleBit);
@@ -5547,11 +5575,14 @@ Expected<bool> BitcodeModule::hasSummary() {
case BitstreamEntry::Error:
return error("Malformed block");
case BitstreamEntry::EndBlock:
- return false;
+ return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/false};
case BitstreamEntry::SubBlock:
if (Entry.ID == bitc::GLOBALVAL_SUMMARY_BLOCK_ID)
- return true;
+ return BitcodeLTOInfo{/*IsThinLTO=*/true, /*HasSummary=*/true};
+
+ if (Entry.ID == bitc::FULL_LTO_GLOBALVAL_SUMMARY_BLOCK_ID)
+ return BitcodeLTOInfo{/*IsThinLTO=*/false, /*HasSummary=*/true};
// Ignore other sub-blocks.
if (Stream.SkipBlock())
@@ -5638,12 +5669,12 @@ Expected<std::string> llvm::getBitcodeProducerString(MemoryBufferRef Buffer) {
Error llvm::readModuleSummaryIndex(MemoryBufferRef Buffer,
ModuleSummaryIndex &CombinedIndex,
- unsigned ModuleId) {
+ uint64_t ModuleId) {
Expected<BitcodeModule> BM = getSingleModule(Buffer);
if (!BM)
return BM.takeError();
- return BM->readSummary(CombinedIndex, ModuleId);
+ return BM->readSummary(CombinedIndex, BM->getModuleIdentifier(), ModuleId);
}
Expected<std::unique_ptr<ModuleSummaryIndex>>
@@ -5655,12 +5686,12 @@ llvm::getModuleSummaryIndex(MemoryBufferRef Buffer) {
return BM->getSummary();
}
-Expected<bool> llvm::hasGlobalValueSummary(MemoryBufferRef Buffer) {
+Expected<BitcodeLTOInfo> llvm::getBitcodeLTOInfo(MemoryBufferRef Buffer) {
Expected<BitcodeModule> BM = getSingleModule(Buffer);
if (!BM)
return BM.takeError();
- return BM->hasSummary();
+ return BM->getLTOInfo();
}
Expected<std::unique_ptr<ModuleSummaryIndex>>
diff --git a/lib/Bitcode/Reader/MetadataLoader.cpp b/lib/Bitcode/Reader/MetadataLoader.cpp
index ee2fe2a0cc18..b1504a8034e0 100644
--- a/lib/Bitcode/Reader/MetadataLoader.cpp
+++ b/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -407,6 +407,11 @@ void PlaceholderQueue::flush(BitcodeReaderMetadataList &MetadataList) {
} // anonynous namespace
+static Error error(const Twine &Message) {
+ return make_error<StringError>(
+ Message, make_error_code(BitcodeError::CorruptedBitcode));
+}
+
class MetadataLoader::MetadataLoaderImpl {
BitcodeReaderMetadataList MetadataList;
BitcodeReaderValueList &ValueList;
@@ -533,6 +538,88 @@ class MetadataLoader::MetadataLoaderImpl {
}
}
+ /// Upgrade the expression from previous versions.
+ Error upgradeDIExpression(uint64_t FromVersion,
+ MutableArrayRef<uint64_t> &Expr,
+ SmallVectorImpl<uint64_t> &Buffer) {
+ auto N = Expr.size();
+ switch (FromVersion) {
+ default:
+ return error("Invalid record");
+ case 0:
+ if (N >= 3 && Expr[N - 3] == dwarf::DW_OP_bit_piece)
+ Expr[N - 3] = dwarf::DW_OP_LLVM_fragment;
+ LLVM_FALLTHROUGH;
+ case 1:
+ // Move DW_OP_deref to the end.
+ if (N && Expr[0] == dwarf::DW_OP_deref) {
+ auto End = Expr.end();
+ if (Expr.size() >= 3 &&
+ *std::prev(End, 3) == dwarf::DW_OP_LLVM_fragment)
+ End = std::prev(End, 3);
+ std::move(std::next(Expr.begin()), End, Expr.begin());
+ *std::prev(End) = dwarf::DW_OP_deref;
+ }
+ NeedDeclareExpressionUpgrade = true;
+ LLVM_FALLTHROUGH;
+ case 2: {
+ // Change DW_OP_plus to DW_OP_plus_uconst.
+ // Change DW_OP_minus to DW_OP_uconst, DW_OP_minus
+ auto SubExpr = ArrayRef<uint64_t>(Expr);
+ while (!SubExpr.empty()) {
+ // Skip past other operators with their operands
+ // for this version of the IR, obtained from
+ // from historic DIExpression::ExprOperand::getSize().
+ size_t HistoricSize;
+ switch (SubExpr.front()) {
+ default:
+ HistoricSize = 1;
+ break;
+ case dwarf::DW_OP_constu:
+ case dwarf::DW_OP_minus:
+ case dwarf::DW_OP_plus:
+ HistoricSize = 2;
+ break;
+ case dwarf::DW_OP_LLVM_fragment:
+ HistoricSize = 3;
+ break;
+ }
+
+ // If the expression is malformed, make sure we don't
+ // copy more elements than we should.
+ HistoricSize = std::min(SubExpr.size(), HistoricSize);
+ ArrayRef<uint64_t> Args = SubExpr.slice(1, HistoricSize-1);
+
+ switch (SubExpr.front()) {
+ case dwarf::DW_OP_plus:
+ Buffer.push_back(dwarf::DW_OP_plus_uconst);
+ Buffer.append(Args.begin(), Args.end());
+ break;
+ case dwarf::DW_OP_minus:
+ Buffer.push_back(dwarf::DW_OP_constu);
+ Buffer.append(Args.begin(), Args.end());
+ Buffer.push_back(dwarf::DW_OP_minus);
+ break;
+ default:
+ Buffer.push_back(*SubExpr.begin());
+ Buffer.append(Args.begin(), Args.end());
+ break;
+ }
+
+ // Continue with remaining elements.
+ SubExpr = SubExpr.slice(HistoricSize);
+ }
+ Expr = MutableArrayRef<uint64_t>(Buffer);
+ LLVM_FALLTHROUGH;
+ }
+ case 3:
+ // Up-to-date!
+ break;
+ }
+
+ return Error::success();
+ }
+
void upgradeDebugInfo() {
upgradeCUSubprograms();
upgradeCUVariables();
@@ -590,11 +677,6 @@ public:
void upgradeDebugIntrinsics(Function &F) { upgradeDeclareExpressions(F); }
};
-static Error error(const Twine &Message) {
- return make_error<StringError>(
- Message, make_error_code(BitcodeError::CorruptedBitcode));
-}
-
Expected<bool>
MetadataLoader::MetadataLoaderImpl::lazyLoadModuleMetadataBlock() {
IndexCursor = Stream;
@@ -1551,34 +1633,13 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
IsDistinct = Record[0] & 1;
uint64_t Version = Record[0] >> 1;
auto Elts = MutableArrayRef<uint64_t>(Record).slice(1);
- unsigned N = Elts.size();
- // Perform various upgrades.
- switch (Version) {
- case 0:
- if (N >= 3 && Elts[N - 3] == dwarf::DW_OP_bit_piece)
- Elts[N - 3] = dwarf::DW_OP_LLVM_fragment;
- LLVM_FALLTHROUGH;
- case 1:
- // Move DW_OP_deref to the end.
- if (N && Elts[0] == dwarf::DW_OP_deref) {
- auto End = Elts.end();
- if (Elts.size() >= 3 && *std::prev(End, 3) == dwarf::DW_OP_LLVM_fragment)
- End = std::prev(End, 3);
- std::move(std::next(Elts.begin()), End, Elts.begin());
- *std::prev(End) = dwarf::DW_OP_deref;
- }
- NeedDeclareExpressionUpgrade = true;
- LLVM_FALLTHROUGH;
- case 2:
- // Up-to-date!
- break;
- default:
- return error("Invalid record");
- }
+
+ SmallVector<uint64_t, 6> Buffer;
+ if (Error Err = upgradeDIExpression(Version, Elts, Buffer))
+ return Err;
MetadataList.assignValue(
- GET_OR_DISTINCT(DIExpression, (Context, makeArrayRef(Record).slice(1))),
- NextMetadataNo);
+ GET_OR_DISTINCT(DIExpression, (Context, Elts)), NextMetadataNo);
NextMetadataNo++;
break;
}
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index d5879fec95cb..feeba31908ae 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -77,10 +77,13 @@ protected:
/// The stream created and owned by the client.
BitstreamWriter &Stream;
+ StringTableBuilder &StrtabBuilder;
+
public:
/// Constructs a BitcodeWriterBase object that writes to the provided
/// \p Stream.
- BitcodeWriterBase(BitstreamWriter &Stream) : Stream(Stream) {}
+ BitcodeWriterBase(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder)
+ : Stream(Stream), StrtabBuilder(StrtabBuilder) {}
protected:
void writeBitcodeHeader();
@@ -97,8 +100,6 @@ class ModuleBitcodeWriter : public BitcodeWriterBase {
/// Pointer to the buffer allocated by caller for bitcode writing.
const SmallVectorImpl<char> &Buffer;
- StringTableBuilder &StrtabBuilder;
-
/// The Module to write to bitcode.
const Module &M;
@@ -142,8 +143,8 @@ public:
BitstreamWriter &Stream, bool ShouldPreserveUseListOrder,
const ModuleSummaryIndex *Index, bool GenerateHash,
ModuleHash *ModHash = nullptr)
- : BitcodeWriterBase(Stream), Buffer(Buffer), StrtabBuilder(StrtabBuilder),
- M(*M), VE(*M, ShouldPreserveUseListOrder), Index(Index),
+ : BitcodeWriterBase(Stream, StrtabBuilder), Buffer(Buffer), M(*M),
+ VE(*M, ShouldPreserveUseListOrder), Index(Index),
GenerateHash(GenerateHash), ModHash(ModHash),
BitcodeStartBit(Stream.GetCurrentBitNo()) {
// Assign ValueIds to any callee values in the index that came from
@@ -331,10 +332,11 @@ public:
/// Constructs a IndexBitcodeWriter object for the given combined index,
/// writing to the provided \p Buffer. When writing a subset of the index
/// for a distributed backend, provide a \p ModuleToSummariesForIndex map.
- IndexBitcodeWriter(BitstreamWriter &Stream, const ModuleSummaryIndex &Index,
+ IndexBitcodeWriter(BitstreamWriter &Stream, StringTableBuilder &StrtabBuilder,
+ const ModuleSummaryIndex &Index,
const std::map<std::string, GVSummaryMapTy>
*ModuleToSummariesForIndex = nullptr)
- : BitcodeWriterBase(Stream), Index(Index),
+ : BitcodeWriterBase(Stream, StrtabBuilder), Index(Index),
ModuleToSummariesForIndex(ModuleToSummariesForIndex) {
// Assign unique value ids to all summaries to be written, for use
// in writing out the call graph edges. Save the mapping from GUID
@@ -1663,7 +1665,7 @@ void ModuleBitcodeWriter::writeDIExpression(const DIExpression *N,
SmallVectorImpl<uint64_t> &Record,
unsigned Abbrev) {
Record.reserve(N->getElements().size() + 1);
- const uint64_t Version = 2 << 1;
+ const uint64_t Version = 3 << 1;
Record.push_back((uint64_t)N->isDistinct() | Version);
Record.append(N->elements_begin(), N->elements_end());
@@ -3595,6 +3597,24 @@ void IndexBitcodeWriter::writeCombinedGlobalValueSummary() {
MaybeEmitOriginalName(*AS);
}
+ if (!Index.cfiFunctionDefs().empty()) {
+ for (auto &S : Index.cfiFunctionDefs()) {
+ NameVals.push_back(StrtabBuilder.add(S));
+ NameVals.push_back(S.size());
+ }
+ Stream.EmitRecord(bitc::FS_CFI_FUNCTION_DEFS, NameVals);
+ NameVals.clear();
+ }
+
+ if (!Index.cfiFunctionDecls().empty()) {
+ for (auto &S : Index.cfiFunctionDecls()) {
+ NameVals.push_back(StrtabBuilder.add(S));
+ NameVals.push_back(S.size());
+ }
+ Stream.EmitRecord(bitc::FS_CFI_FUNCTION_DECLS, NameVals);
+ NameVals.clear();
+ }
+
Stream.ExitBlock();
}
@@ -3829,6 +3849,14 @@ void BitcodeWriter::writeModule(const Module *M,
ModuleWriter.write();
}
+void BitcodeWriter::writeIndex(
+ const ModuleSummaryIndex *Index,
+ const std::map<std::string, GVSummaryMapTy> *ModuleToSummariesForIndex) {
+ IndexBitcodeWriter IndexWriter(*Stream, StrtabBuilder, *Index,
+ ModuleToSummariesForIndex);
+ IndexWriter.write();
+}
+
/// WriteBitcodeToFile - Write the specified module to the specified output
/// stream.
void llvm::WriteBitcodeToFile(const Module *M, raw_ostream &Out,
@@ -3880,11 +3908,9 @@ void llvm::WriteIndexToFile(
SmallVector<char, 0> Buffer;
Buffer.reserve(256 * 1024);
- BitstreamWriter Stream(Buffer);
- writeBitcodeHeader(Stream);
-
- IndexBitcodeWriter IndexWriter(Stream, Index, ModuleToSummariesForIndex);
- IndexWriter.write();
+ BitcodeWriter Writer(Buffer);
+ Writer.writeIndex(&Index, ModuleToSummariesForIndex);
+ Writer.writeStrtab();
Out.write((char *)&Buffer.front(), Buffer.size());
}
diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt
index f7c09be15fb7..946067e6358f 100644
--- a/lib/CMakeLists.txt
+++ b/lib/CMakeLists.txt
@@ -24,3 +24,4 @@ add_subdirectory(Fuzzer)
add_subdirectory(Passes)
add_subdirectory(ToolDrivers)
add_subdirectory(XRay)
+add_subdirectory(Testing)
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index 407d5623d670..ad348d723bae 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -1286,11 +1286,7 @@ bool AsmPrinter::doFinalization(Module &M) {
const TargetLoweringObjectFile &TLOF = getObjFileLowering();
- // Emit module flags.
- SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
- M.getModuleFlagsMetadata(ModuleFlags);
- if (!ModuleFlags.empty())
- TLOF.emitModuleFlags(*OutStreamer, ModuleFlags, TM);
+ TLOF.emitModuleMetadata(*OutStreamer, M, TM);
if (TM.getTargetTriple().isOSBinFormatELF()) {
MachineModuleInfoELF &MMIELF = MMI->getObjFileInfo<MachineModuleInfoELF>();
diff --git a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
index 04073b3aed68..dc39d1e6cb52 100644
--- a/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfCompileUnit.cpp
@@ -552,7 +552,7 @@ DIE *DwarfCompileUnit::constructVariableDIEImpl(const DbgVariable &DV,
int Offset = TFI->getFrameIndexReference(*Asm->MF, Fragment.FI, FrameReg);
DwarfExpr.addFragmentOffset(Expr);
SmallVector<uint64_t, 8> Ops;
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(Offset);
Ops.append(Expr->elements_begin(), Expr->elements_end());
DIExpressionCursor Cursor(Ops);
@@ -821,7 +821,7 @@ void DwarfCompileUnit::addAddress(DIE &Die, dwarf::Attribute Attribute,
SmallVector<uint64_t, 8> Ops;
if (Location.isIndirect() && Location.getOffset()) {
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(Location.getOffset());
}
DIExpressionCursor Cursor(Ops);
@@ -850,7 +850,7 @@ void DwarfCompileUnit::addComplexAddress(const DbgVariable &DV, DIE &Die,
SmallVector<uint64_t, 8> Ops;
if (Location.isIndirect() && Location.getOffset()) {
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(Location.getOffset());
}
Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
index e3fd21a1fd70..75eb355bfb54 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp
@@ -1511,7 +1511,7 @@ static void emitDebugLocValue(const AsmPrinter &AP, const DIBasicType *BT,
DwarfExpr.setMemoryLocationKind();
SmallVector<uint64_t, 8> Ops;
if (Location.isIndirect() && Location.getOffset()) {
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(Location.getOffset());
}
Ops.append(DIExpr->elements_begin(), DIExpr->elements_end());
diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.h b/lib/CodeGen/AsmPrinter/DwarfDebug.h
index ebfba4cfc275..5dfe06c64ec2 100644
--- a/lib/CodeGen/AsmPrinter/DwarfDebug.h
+++ b/lib/CodeGen/AsmPrinter/DwarfDebug.h
@@ -134,6 +134,13 @@ public:
assert(!FrameIndexExprs.empty() && "Expected an MMI entry");
assert(!V.FrameIndexExprs.empty() && "Expected an MMI entry");
+ if (FrameIndexExprs.size()) {
+ auto *Expr = FrameIndexExprs.back().Expr;
+ // Get rid of duplicate non-fragment entries. More than one non-fragment
+ // dbg.declare makes no sense so ignore all but the first.
+ if (!Expr || !Expr->isFragment())
+ return;
+ }
FrameIndexExprs.append(V.FrameIndexExprs.begin(), V.FrameIndexExprs.end());
assert(all_of(FrameIndexExprs,
[](FrameIndexExpr &FIE) {
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
index d96479f43433..fe38ee805682 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.cpp
@@ -248,15 +248,25 @@ bool DwarfExpression::addMachineRegExpression(const TargetRegisterInfo &TRI,
assert(Reg.Size == 0 && "subregister has same size as superregister");
// Pattern-match combinations for which more efficient representations exist.
- // [Reg, Offset, DW_OP_plus] --> [DW_OP_breg, Offset].
- // [Reg, Offset, DW_OP_minus] --> [DW_OP_breg, -Offset].
- // If Reg is a subregister we need to mask it out before subtracting.
- if (Op && ((Op->getOp() == dwarf::DW_OP_plus) ||
- (Op->getOp() == dwarf::DW_OP_minus && !SubRegisterSizeInBits))) {
- int Offset = Op->getArg(0);
- SignedOffset = (Op->getOp() == dwarf::DW_OP_plus) ? Offset : -Offset;
+ // [Reg, DW_OP_plus_uconst, Offset] --> [DW_OP_breg, Offset].
+ if (Op && (Op->getOp() == dwarf::DW_OP_plus_uconst)) {
+ SignedOffset = Op->getArg(0);
ExprCursor.take();
}
+
+ // [Reg, DW_OP_constu, Offset, DW_OP_plus] --> [DW_OP_breg, Offset]
+ // [Reg, DW_OP_constu, Offset, DW_OP_minus] --> [DW_OP_breg,-Offset]
+ // If Reg is a subregister we need to mask it out before subtracting.
+ if (Op && Op->getOp() == dwarf::DW_OP_constu) {
+ auto N = ExprCursor.peekNext();
+ if (N && (N->getOp() == dwarf::DW_OP_plus ||
+ (N->getOp() == dwarf::DW_OP_minus && !SubRegisterSizeInBits))) {
+ int Offset = Op->getArg(0);
+ SignedOffset = (N->getOp() == dwarf::DW_OP_minus) ? -Offset : Offset;
+ ExprCursor.consume(2);
+ }
+ }
+
if (FBReg)
addFBReg(SignedOffset);
else
@@ -320,17 +330,14 @@ void DwarfExpression::addExpression(DIExpressionCursor &&ExprCursor,
LocationKind = Unknown;
return;
}
- case dwarf::DW_OP_plus:
+ case dwarf::DW_OP_plus_uconst:
assert(LocationKind != Register);
emitOp(dwarf::DW_OP_plus_uconst);
emitUnsigned(Op->getArg(0));
break;
+ case dwarf::DW_OP_plus:
case dwarf::DW_OP_minus:
- assert(LocationKind != Register);
- // There is no DW_OP_minus_uconst.
- emitOp(dwarf::DW_OP_constu);
- emitUnsigned(Op->getArg(0));
- emitOp(dwarf::DW_OP_minus);
+ emitOp(Op->getOp());
break;
case dwarf::DW_OP_deref: {
assert(LocationKind != Register);
diff --git a/lib/CodeGen/AsmPrinter/DwarfExpression.h b/lib/CodeGen/AsmPrinter/DwarfExpression.h
index de8613200067..728f8ad9225b 100644
--- a/lib/CodeGen/AsmPrinter/DwarfExpression.h
+++ b/lib/CodeGen/AsmPrinter/DwarfExpression.h
@@ -42,6 +42,9 @@ public:
DIExpressionCursor(ArrayRef<uint64_t> Expr)
: Start(Expr.begin()), End(Expr.end()) {}
+ DIExpressionCursor(const DIExpressionCursor &C)
+ : Start(C.Start), End(C.End) {}
+
/// Consume one operation.
Optional<DIExpression::ExprOperand> take() {
if (Start == End)
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 7f7d3e650e02..708f5f7536ff 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -475,7 +475,7 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
SmallVector<uint64_t, 9> Ops;
if (Location.isIndirect() && Location.getOffset()) {
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(Location.getOffset());
}
// If we started with a pointer to the __Block_byref... struct, then
@@ -487,7 +487,7 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
// DW_OP_plus_uconst ForwardingFieldOffset. Note there's no point in
// adding the offset if it's 0.
if (forwardingFieldOffset > 0) {
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(forwardingFieldOffset);
}
@@ -499,7 +499,7 @@ void DwarfUnit::addBlockByrefAddress(const DbgVariable &DV, DIE &Die,
// for the variable's field to get to the location of the actual variable:
// DW_OP_plus_uconst varFieldOffset. Again, don't add if it's 0.
if (varFieldOffset > 0) {
- Ops.push_back(dwarf::DW_OP_plus);
+ Ops.push_back(dwarf::DW_OP_plus_uconst);
Ops.push_back(varFieldOffset);
}
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index c2037cb7f1ae..37e176099ea7 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -134,7 +134,7 @@ static cl::opt<bool> DisablePreheaderProtect(
cl::desc("Disable protection against removing loop preheaders"));
static cl::opt<bool> ProfileGuidedSectionPrefix(
- "profile-guided-section-prefix", cl::Hidden, cl::init(true),
+ "profile-guided-section-prefix", cl::Hidden, cl::init(true), cl::ZeroOrMore,
cl::desc("Use profile info to add section prefix for hot/cold functions"));
static cl::opt<unsigned> FreqRatioToSkipMerge(
diff --git a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index ef5818dabe23..1d0d3dffa4c5 100644
--- a/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -82,6 +82,12 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
case TargetOpcode::G_UDIV:
assert(Size == 32 && "Unsupported size");
return RTLIB::UDIV_I32;
+ case TargetOpcode::G_SREM:
+ assert(Size == 32 && "Unsupported size");
+ return RTLIB::SREM_I32;
+ case TargetOpcode::G_UREM:
+ assert(Size == 32 && "Unsupported size");
+ return RTLIB::UREM_I32;
case TargetOpcode::G_FADD:
assert((Size == 32 || Size == 64) && "Unsupported size");
return Size == 64 ? RTLIB::ADD_F64 : RTLIB::ADD_F32;
@@ -93,43 +99,57 @@ static RTLIB::Libcall getRTLibDesc(unsigned Opcode, unsigned Size) {
llvm_unreachable("Unknown libcall function");
}
-static LegalizerHelper::LegalizeResult
-simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
- Type *OpType) {
+LegalizerHelper::LegalizeResult llvm::replaceWithLibcall(
+ MachineInstr &MI, MachineIRBuilder &MIRBuilder, RTLIB::Libcall Libcall,
+ const CallLowering::ArgInfo &Result, ArrayRef<CallLowering::ArgInfo> Args) {
auto &CLI = *MIRBuilder.getMF().getSubtarget().getCallLowering();
auto &TLI = *MIRBuilder.getMF().getSubtarget().getTargetLowering();
- auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
const char *Name = TLI.getLibcallName(Libcall);
MIRBuilder.getMF().getFrameInfo().setHasCalls(true);
- CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall),
- MachineOperand::CreateES(Name),
- {MI.getOperand(0).getReg(), OpType},
- {{MI.getOperand(1).getReg(), OpType},
- {MI.getOperand(2).getReg(), OpType}});
+ MIRBuilder.setInstr(MI);
+ if (!CLI.lowerCall(MIRBuilder, TLI.getLibcallCallingConv(Libcall),
+ MachineOperand::CreateES(Name), Result, Args))
+ return LegalizerHelper::UnableToLegalize;
+
+ // We're about to remove MI, so move the insert point after it.
+ MIRBuilder.setInsertPt(MIRBuilder.getMBB(),
+ std::next(MIRBuilder.getInsertPt()));
+
MI.eraseFromParent();
return LegalizerHelper::Legalized;
}
+static LegalizerHelper::LegalizeResult
+simpleLibcall(MachineInstr &MI, MachineIRBuilder &MIRBuilder, unsigned Size,
+ Type *OpType) {
+ auto Libcall = getRTLibDesc(MI.getOpcode(), Size);
+ return replaceWithLibcall(MI, MIRBuilder, Libcall,
+ {MI.getOperand(0).getReg(), OpType},
+ {{MI.getOperand(1).getReg(), OpType},
+ {MI.getOperand(2).getReg(), OpType}});
+}
+
LegalizerHelper::LegalizeResult
LegalizerHelper::libcall(MachineInstr &MI) {
- LLT Ty = MRI.getType(MI.getOperand(0).getReg());
- unsigned Size = Ty.getSizeInBits();
+ LLT LLTy = MRI.getType(MI.getOperand(0).getReg());
+ unsigned Size = LLTy.getSizeInBits();
auto &Ctx = MIRBuilder.getMF().getFunction()->getContext();
- MIRBuilder.setInstr(MI);
switch (MI.getOpcode()) {
default:
return UnableToLegalize;
case TargetOpcode::G_SDIV:
- case TargetOpcode::G_UDIV: {
- Type *Ty = Type::getInt32Ty(Ctx);
- return simpleLibcall(MI, MIRBuilder, Size, Ty);
+ case TargetOpcode::G_UDIV:
+ case TargetOpcode::G_SREM:
+ case TargetOpcode::G_UREM: {
+ Type *HLTy = Type::getInt32Ty(Ctx);
+ return simpleLibcall(MI, MIRBuilder, Size, HLTy);
}
case TargetOpcode::G_FADD:
case TargetOpcode::G_FPOW:
case TargetOpcode::G_FREM: {
- Type *Ty = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
- return simpleLibcall(MI, MIRBuilder, Size, Ty);
+ Type *HLTy = Size == 64 ? Type::getDoubleTy(Ctx) : Type::getFloatTy(Ctx);
+ return simpleLibcall(MI, MIRBuilder, Size, HLTy);
}
}
}
@@ -237,17 +257,18 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts =
MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() / NarrowSize;
- LLT NarrowPtrTy = LLT::pointer(
- MRI.getType(MI.getOperand(1).getReg()).getAddressSpace(), NarrowSize);
+ LLT OffsetTy = LLT::scalar(
+ MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
SmallVector<unsigned, 2> DstRegs;
for (int i = 0; i < NumParts; ++i) {
unsigned DstReg = MRI.createGenericVirtualRegister(NarrowTy);
- unsigned SrcReg = MRI.createGenericVirtualRegister(NarrowPtrTy);
- unsigned Offset = MRI.createGenericVirtualRegister(LLT::scalar(64));
+ unsigned SrcReg = 0;
+ unsigned Adjustment = i * NarrowSize / 8;
+
+ MIRBuilder.materializeGEP(SrcReg, MI.getOperand(1).getReg(), OffsetTy,
+ Adjustment);
- MIRBuilder.buildConstant(Offset, i * NarrowSize / 8);
- MIRBuilder.buildGEP(SrcReg, MI.getOperand(1).getReg(), Offset);
// TODO: This is conservatively correct, but we probably want to split the
// memory operands in the future.
MIRBuilder.buildLoad(DstReg, SrcReg, **MI.memoperands_begin());
@@ -263,17 +284,19 @@ LegalizerHelper::LegalizeResult LegalizerHelper::narrowScalar(MachineInstr &MI,
unsigned NarrowSize = NarrowTy.getSizeInBits();
int NumParts =
MRI.getType(MI.getOperand(0).getReg()).getSizeInBits() / NarrowSize;
- LLT NarrowPtrTy = LLT::pointer(
- MRI.getType(MI.getOperand(1).getReg()).getAddressSpace(), NarrowSize);
+ LLT OffsetTy = LLT::scalar(
+ MRI.getType(MI.getOperand(1).getReg()).getScalarSizeInBits());
SmallVector<unsigned, 2> SrcRegs;
extractParts(MI.getOperand(0).getReg(), NarrowTy, NumParts, SrcRegs);
for (int i = 0; i < NumParts; ++i) {
- unsigned DstReg = MRI.createGenericVirtualRegister(NarrowPtrTy);
- unsigned Offset = MRI.createGenericVirtualRegister(LLT::scalar(64));
- MIRBuilder.buildConstant(Offset, i * NarrowSize / 8);
- MIRBuilder.buildGEP(DstReg, MI.getOperand(1).getReg(), Offset);
+ unsigned DstReg = 0;
+ unsigned Adjustment = i * NarrowSize / 8;
+
+ MIRBuilder.materializeGEP(DstReg, MI.getOperand(1).getReg(), OffsetTy,
+ Adjustment);
+
// TODO: This is conservatively correct, but we probably want to split the
// memory operands in the future.
MIRBuilder.buildStore(SrcRegs[i], DstReg, **MI.memoperands_begin());
diff --git a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
index 54ef7e5c5a1b..79d312fb52ca 100644
--- a/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
+++ b/lib/CodeGen/GlobalISel/MachineIRBuilder.cpp
@@ -191,6 +191,24 @@ MachineInstrBuilder MachineIRBuilder::buildGEP(unsigned Res, unsigned Op0,
.addUse(Op1);
}
+Optional<MachineInstrBuilder>
+MachineIRBuilder::materializeGEP(unsigned &Res, unsigned Op0,
+ const LLT &ValueTy, uint64_t Value) {
+ assert(Res == 0 && "Res is a result argument");
+ assert(ValueTy.isScalar() && "invalid offset type");
+
+ if (Value == 0) {
+ Res = Op0;
+ return None;
+ }
+
+ Res = MRI->createGenericVirtualRegister(MRI->getType(Op0));
+ unsigned TmpReg = MRI->createGenericVirtualRegister(ValueTy);
+
+ buildConstant(TmpReg, Value);
+ return buildGEP(Res, Op0, TmpReg);
+}
+
MachineInstrBuilder MachineIRBuilder::buildPtrMask(unsigned Res, unsigned Op0,
uint32_t NumBits) {
assert(MRI->getType(Res).isPointer() &&
diff --git a/lib/CodeGen/MachineBlockPlacement.cpp b/lib/CodeGen/MachineBlockPlacement.cpp
index fc52b0da0d61..2d4b95974cc6 100644
--- a/lib/CodeGen/MachineBlockPlacement.cpp
+++ b/lib/CodeGen/MachineBlockPlacement.cpp
@@ -594,8 +594,8 @@ BranchProbability MachineBlockPlacement::collectViableSuccessors(
// Assume A->C is very hot (>90%), and C->D has a 50% probability, then after
// A->C is chosen as a fall-through, D won't be selected as a successor of C
// due to CFG constraint (the probability of C->D is not greater than
- // HotProb to break top-order). If we exclude E that is not in BlockFilter
- // when calculating the probability of C->D, D will be selected and we
+ // HotProb to break topo-order). If we exclude E that is not in BlockFilter
+ // when calculating the probability of C->D, D will be selected and we
// will get A C D B as the layout of this loop.
auto AdjustedSumProb = BranchProbability::getOne();
for (MachineBasicBlock *Succ : BB->successors()) {
@@ -1156,7 +1156,7 @@ void MachineBlockPlacement::precomputeTriangleChains() {
continue;
// Now we have an interesting triangle. Insert it if it's not part of an
- // existing chain
+ // existing chain.
// Note: This cannot be replaced with a call insert() or emplace() because
// the find key is BB, but the insert/emplace key is PDom.
auto Found = TriangleChainMap.find(&BB);
@@ -1298,9 +1298,9 @@ bool MachineBlockPlacement::hasBetterLayoutPredecessor(
// | | | |
// ---BB | | BB
// | | | |
- // | pred-- | Succ--
+ // | Pred-- | Succ--
// | | | |
- // ---succ ---pred--
+ // ---Succ ---Pred--
//
// cost = freq(S->Pred) + freq(BB->Succ) cost = 2 * freq (S->Pred)
// = freq(S->Pred) + freq(S->BB)
diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp
index 52d5819f8dbc..c7113f1fdc47 100644
--- a/lib/CodeGen/MachineLICM.cpp
+++ b/lib/CodeGen/MachineLICM.cpp
@@ -895,8 +895,11 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) {
// If the physreg has no defs anywhere, it's just an ambient register
// and we can freely move its uses. Alternatively, if it's allocatable,
// it could get allocated to something with a def during allocation.
- if (!MRI->isConstantPhysReg(Reg))
- return false;
+ // However, if the physreg is known to always be caller saved/restored
+ // then this use is safe to hoist.
+ if (!MRI->isConstantPhysReg(Reg) &&
+ !(TRI->isCallerPreservedPhysReg(Reg, *I.getParent()->getParent())))
+ return false;
// Otherwise it's safe to move.
continue;
} else if (!MO.isDead()) {
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index a0967f574006..2d4422d94a17 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -2217,7 +2217,8 @@ SDValue DAGCombiner::visitADDCARRYLike(SDValue N0, SDValue N1, SDValue CarryIn,
SDNode *N) {
// Iff the flag result is dead:
// (addcarry (add|uaddo X, Y), 0, Carry) -> (addcarry X, Y, Carry)
- if ((N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::UADDO) &&
+ if ((N0.getOpcode() == ISD::ADD ||
+ (N0.getOpcode() == ISD::UADDO && N0.getResNo() == 0)) &&
isNullConstant(N1) && !N->hasAnyUseOfValue(1))
return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(),
N0.getOperand(0), N0.getOperand(1), CarryIn);
@@ -12460,10 +12461,27 @@ bool DAGCombiner::MergeStoresOfConstantsOrVecElts(
LSBaseSDNode *FirstInChain = StoreNodes[0].MemNode;
SDValue NewChain = getMergeStoreChains(StoreNodes, NumStores);
- SDValue NewStore = DAG.getStore(NewChain, DL, StoredVal,
- FirstInChain->getBasePtr(),
- FirstInChain->getPointerInfo(),
- FirstInChain->getAlignment());
+
+ // make sure we use trunc store if it's necessary to be legal.
+ SDValue NewStore;
+ if (TLI.isTypeLegal(StoredVal.getValueType())) {
+ NewStore = DAG.getStore(NewChain, DL, StoredVal, FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(),
+ FirstInChain->getAlignment());
+ } else { // Must be realized as a trunc store
+ EVT LegalizedStoredValueTy =
+ TLI.getTypeToTransformTo(*DAG.getContext(), StoredVal.getValueType());
+ unsigned LegalizedStoreSize = LegalizedStoredValueTy.getSizeInBits();
+ ConstantSDNode *C = cast<ConstantSDNode>(StoredVal);
+ SDValue ExtendedStoreVal =
+ DAG.getConstant(C->getAPIntValue().zextOrTrunc(LegalizedStoreSize), DL,
+ LegalizedStoredValueTy);
+ NewStore = DAG.getTruncStore(
+ NewChain, DL, ExtendedStoreVal, FirstInChain->getBasePtr(),
+ FirstInChain->getPointerInfo(), StoredVal.getValueType() /*TVT*/,
+ FirstInChain->getAlignment(),
+ FirstInChain->getMemOperand()->getFlags());
+ }
// Replace all merged stores with the new store.
for (unsigned i = 0; i < NumStores; ++i)
@@ -12731,8 +12749,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
IsFast) {
LastLegalType = i + 1;
// Or check whether a truncstore is legal.
- } else if (!LegalTypes &&
- TLI.getTypeAction(Context, StoreTy) ==
+ } else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy =
TLI.getTypeToTransformTo(Context, StoredVal.getValueType());
@@ -12947,8 +12964,7 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
else if (TLI.getTypeAction(Context, StoreTy) ==
TargetLowering::TypePromoteInteger) {
EVT LegalizedStoredValueTy = TLI.getTypeToTransformTo(Context, StoreTy);
- if (!LegalTypes &&
- TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
+ if (TLI.isTruncStoreLegal(LegalizedStoredValueTy, StoreTy) &&
TLI.canMergeStoresTo(FirstStoreAS, LegalizedStoredValueTy) &&
TLI.isLoadExtLegal(ISD::ZEXTLOAD, LegalizedStoredValueTy,
StoreTy) &&
@@ -12958,8 +12974,8 @@ bool DAGCombiner::MergeConsecutiveStores(StoreSDNode *St) {
TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
FirstStoreAS, FirstStoreAlign, &IsFastSt) &&
IsFastSt &&
- TLI.allowsMemoryAccess(Context, DL, LegalizedStoredValueTy,
- FirstLoadAS, FirstLoadAlign, &IsFastLd) &&
+ TLI.allowsMemoryAccess(Context, DL, StoreTy, FirstLoadAS,
+ FirstLoadAlign, &IsFastLd) &&
IsFastLd)
LastLegalIntegerType = i + 1;
}
@@ -13189,10 +13205,6 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
Chain = ST->getChain();
}
- // Try transforming N to an indexed store.
- if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
- return SDValue(N, 0);
-
// FIXME: is there such a thing as a truncating indexed store?
if (ST->isTruncatingStore() && ST->isUnindexed() &&
Value.getValueType().isInteger()) {
@@ -13287,6 +13299,10 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
}
}
+ // Try transforming N to an indexed store.
+ if (CombineToPreIndexedLoadStore(N) || CombineToPostIndexedLoadStore(N))
+ return SDValue(N, 0);
+
// Turn 'store float 1.0, Ptr' -> 'store int 0x12345678, Ptr'
//
// Make sure to do this only after attempting to merge stores in order to
@@ -14692,21 +14708,7 @@ static SDValue narrowExtractedVectorLoad(SDNode *Extract, SelectionDAG &DAG) {
MachineMemOperand *MMO = MF.getMachineMemOperand(Ld->getMemOperand(), Offset,
VT.getStoreSize());
SDValue NewLd = DAG.getLoad(VT, DL, Ld->getChain(), NewAddr, MMO);
-
- // The new load must have the same position as the old load in terms of memory
- // dependency. Create a TokenFactor for Ld and NewLd and update uses of Ld's
- // output chain to use that TokenFactor.
- // TODO: This code is based on a similar sequence in x86 lowering. It should
- // be moved to a helper function, so it can be shared and reused.
- if (Ld->hasAnyUseOfValue(1)) {
- SDValue OldChain = SDValue(Ld, 1);
- SDValue NewChain = SDValue(NewLd.getNode(), 1);
- SDValue TokenFactor = DAG.getNode(ISD::TokenFactor, DL, MVT::Other,
- OldChain, NewChain);
- DAG.ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
- DAG.UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
- }
-
+ DAG.makeEquivalentMemoryOrdering(Ld, NewLd);
return NewLd;
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 606b8952f3c1..b736037d71dd 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -523,3 +523,29 @@ void FunctionLoweringInfo::setCurrentSwiftErrorVReg(
const MachineBasicBlock *MBB, const Value *Val, unsigned VReg) {
SwiftErrorVRegDefMap[std::make_pair(MBB, Val)] = VReg;
}
+
+std::pair<unsigned, bool>
+FunctionLoweringInfo::getOrCreateSwiftErrorVRegDefAt(const Instruction *I) {
+ auto Key = PointerIntPair<const Instruction *, 1, bool>(I, true);
+ auto It = SwiftErrorVRegDefUses.find(Key);
+ if (It == SwiftErrorVRegDefUses.end()) {
+ auto &DL = MF->getDataLayout();
+ const TargetRegisterClass *RC = TLI->getRegClassFor(TLI->getPointerTy(DL));
+ unsigned VReg = MF->getRegInfo().createVirtualRegister(RC);
+ SwiftErrorVRegDefUses[Key] = VReg;
+ return std::make_pair(VReg, true);
+ }
+ return std::make_pair(It->second, false);
+}
+
+std::pair<unsigned, bool>
+FunctionLoweringInfo::getOrCreateSwiftErrorVRegUseAt(const Instruction *I, const MachineBasicBlock *MBB, const Value *Val) {
+ auto Key = PointerIntPair<const Instruction *, 1, bool>(I, false);
+ auto It = SwiftErrorVRegDefUses.find(Key);
+ if (It == SwiftErrorVRegDefUses.end()) {
+ unsigned VReg = getOrCreateSwiftErrorVReg(MBB, Val);
+ SwiftErrorVRegDefUses[Key] = VReg;
+ return std::make_pair(VReg, true);
+ }
+ return std::make_pair(It->second, false);
+}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index e54eaa3b81be..15e87b7af18d 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2192,19 +2192,6 @@ static bool isSinCosLibcallAvailable(SDNode *Node, const TargetLowering &TLI) {
return TLI.getLibcallName(LC) != nullptr;
}
-/// Return true if sincos libcall is available and can be used to combine sin
-/// and cos.
-static bool canCombineSinCosLibcall(SDNode *Node, const TargetLowering &TLI,
- const TargetMachine &TM) {
- if (!isSinCosLibcallAvailable(Node, TLI))
- return false;
- // GNU sin/cos functions set errno while sincos does not. Therefore
- // combining sin and cos is only safe if unsafe-fpmath is enabled.
- if (TM.getTargetTriple().isGNUEnvironment() && !TM.Options.UnsafeFPMath)
- return false;
- return true;
-}
-
/// Only issue sincos libcall if both sin and cos are needed.
static bool useSinCos(SDNode *Node) {
unsigned OtherOpcode = Node->getOpcode() == ISD::FSIN
@@ -3247,7 +3234,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// Turn fsin / fcos into ISD::FSINCOS node if there are a pair of fsin /
// fcos which share the same operand and both are used.
if ((TLI.isOperationLegalOrCustom(ISD::FSINCOS, VT) ||
- canCombineSinCosLibcall(Node, TLI, TM))
+ isSinCosLibcallAvailable(Node, TLI))
&& useSinCos(Node)) {
SDVTList VTs = DAG.getVTList(VT, VT);
Tmp1 = DAG.getNode(ISD::FSINCOS, dl, VTs, Node->getOperand(0));
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 0d5e07ded25c..a3ba52a148ee 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -1828,10 +1828,11 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
TLI.isOperationLegalOrCustom(N->getOpcode() == ISD::ADD ?
ISD::UADDO : ISD::USUBO,
TLI.getTypeToExpandTo(*DAG.getContext(), NVT));
+ TargetLoweringBase::BooleanContent BoolType = TLI.getBooleanContents(NVT);
+
if (hasOVF) {
EVT OvfVT = getSetCCResultType(NVT);
SDVTList VTList = DAG.getVTList(NVT, OvfVT);
- TargetLoweringBase::BooleanContent BoolType = TLI.getBooleanContents(NVT);
int RevOpc;
if (N->getOpcode() == ISD::ADD) {
RevOpc = ISD::SUB;
@@ -1864,6 +1865,13 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
Hi = DAG.getNode(ISD::ADD, dl, NVT, makeArrayRef(HiOps, 2));
SDValue Cmp1 = DAG.getSetCC(dl, getSetCCResultType(NVT), Lo, LoOps[0],
ISD::SETULT);
+
+ if (BoolType == TargetLoweringBase::ZeroOrOneBooleanContent) {
+ SDValue Carry = DAG.getZExtOrTrunc(Cmp1, dl, NVT);
+ Hi = DAG.getNode(ISD::ADD, dl, NVT, Hi, Carry);
+ return;
+ }
+
SDValue Carry1 = DAG.getSelect(dl, NVT, Cmp1,
DAG.getConstant(1, dl, NVT),
DAG.getConstant(0, dl, NVT));
@@ -1878,9 +1886,14 @@ void DAGTypeLegalizer::ExpandIntRes_ADDSUB(SDNode *N,
SDValue Cmp =
DAG.getSetCC(dl, getSetCCResultType(LoOps[0].getValueType()),
LoOps[0], LoOps[1], ISD::SETULT);
- SDValue Borrow = DAG.getSelect(dl, NVT, Cmp,
- DAG.getConstant(1, dl, NVT),
- DAG.getConstant(0, dl, NVT));
+
+ SDValue Borrow;
+ if (BoolType == TargetLoweringBase::ZeroOrOneBooleanContent)
+ Borrow = DAG.getZExtOrTrunc(Cmp, dl, NVT);
+ else
+ Borrow = DAG.getSelect(dl, NVT, Cmp, DAG.getConstant(1, dl, NVT),
+ DAG.getConstant(0, dl, NVT));
+
Hi = DAG.getNode(ISD::SUB, dl, NVT, Hi, Borrow);
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index dff8bd2ad37d..7abdc76cb004 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -7244,6 +7244,24 @@ void SelectionDAG::TransferDbgValues(SDValue From, SDValue To) {
AddDbgValue(I, ToNode, false);
}
+void SelectionDAG::makeEquivalentMemoryOrdering(LoadSDNode *OldLoad,
+ SDValue NewMemOp) {
+ assert(isa<MemSDNode>(NewMemOp.getNode()) && "Expected a memop node");
+ if (!OldLoad->hasAnyUseOfValue(1))
+ return;
+
+ // The new memory operation must have the same position as the old load in
+ // terms of memory dependency. Create a TokenFactor for the old load and new
+ // memory operation and update uses of the old load's output chain to use that
+ // TokenFactor.
+ SDValue OldChain = SDValue(OldLoad, 1);
+ SDValue NewChain = SDValue(NewMemOp.getNode(), 1);
+ SDValue TokenFactor =
+ getNode(ISD::TokenFactor, SDLoc(OldLoad), MVT::Other, OldChain, NewChain);
+ ReplaceAllUsesOfValueWith(OldChain, TokenFactor);
+ UpdateNodeOperands(TokenFactor.getNode(), OldChain, NewChain);
+}
+
//===----------------------------------------------------------------------===//
// SDNode Class
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index d34ac40b9496..f9f431db55be 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -1496,9 +1496,10 @@ void SelectionDAGBuilder::visitRet(const ReturnInst &I) {
true /*isfixed*/, 1 /*origidx*/,
0 /*partOffs*/));
// Create SDNode for the swifterror virtual register.
- OutVals.push_back(DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(
- FuncInfo.MBB, FuncInfo.SwiftErrorArg),
- EVT(TLI.getPointerTy(DL))));
+ OutVals.push_back(
+ DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVRegUseAt(
+ &I, FuncInfo.MBB, FuncInfo.SwiftErrorArg).first,
+ EVT(TLI.getPointerTy(DL))));
}
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
@@ -3581,8 +3582,7 @@ void SelectionDAGBuilder::visitLoad(const LoadInst &I) {
}
void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- assert(TLI.supportSwiftError() &&
+ assert(DAG.getTargetLoweringInfo().supportSwiftError() &&
"call visitStoreToSwiftError when backend supports swifterror");
SmallVector<EVT, 4> ValueVTs;
@@ -3595,15 +3595,15 @@ void SelectionDAGBuilder::visitStoreToSwiftError(const StoreInst &I) {
SDValue Src = getValue(SrcV);
// Create a virtual register, then update the virtual register.
- auto &DL = DAG.getDataLayout();
- const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
- unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
+ unsigned VReg; bool CreatedVReg;
+ std::tie(VReg, CreatedVReg) = FuncInfo.getOrCreateSwiftErrorVRegDefAt(&I);
// Chain, DL, Reg, N or Chain, DL, Reg, N, Glue
// Chain can be getRoot or getControlRoot.
SDValue CopyNode = DAG.getCopyToReg(getRoot(), getCurSDLoc(), VReg,
SDValue(Src.getNode(), Src.getResNo()));
DAG.setRoot(CopyNode);
- FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
+ if (CreatedVReg)
+ FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, I.getOperand(1), VReg);
}
void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
@@ -3633,7 +3633,8 @@ void SelectionDAGBuilder::visitLoadFromSwiftError(const LoadInst &I) {
// Chain, DL, Reg, VT, Glue or Chain, DL, Reg, VT
SDValue L = DAG.getCopyFromReg(
getRoot(), getCurSDLoc(),
- FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, SV), ValueVTs[0]);
+ FuncInfo.getOrCreateSwiftErrorVRegUseAt(&I, FuncInfo.MBB, SV).first,
+ ValueVTs[0]);
setValue(&I, L);
}
@@ -4942,11 +4943,12 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
updateDAGForMaybeTailCall(MM);
return nullptr;
}
- case Intrinsic::memcpy_element_atomic: {
- SDValue Dst = getValue(I.getArgOperand(0));
- SDValue Src = getValue(I.getArgOperand(1));
- SDValue NumElements = getValue(I.getArgOperand(2));
- SDValue ElementSize = getValue(I.getArgOperand(3));
+ case Intrinsic::memcpy_element_unordered_atomic: {
+ const ElementUnorderedAtomicMemCpyInst &MI =
+ cast<ElementUnorderedAtomicMemCpyInst>(I);
+ SDValue Dst = getValue(MI.getRawDest());
+ SDValue Src = getValue(MI.getRawSource());
+ SDValue Length = getValue(MI.getLength());
// Emit a library call.
TargetLowering::ArgListTy Args;
@@ -4958,18 +4960,13 @@ SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I, unsigned Intrinsic) {
Entry.Node = Src;
Args.push_back(Entry);
- Entry.Ty = I.getArgOperand(2)->getType();
- Entry.Node = NumElements;
- Args.push_back(Entry);
-
- Entry.Ty = Type::getInt32Ty(*DAG.getContext());
- Entry.Node = ElementSize;
+ Entry.Ty = MI.getLength()->getType();
+ Entry.Node = Length;
Args.push_back(Entry);
- uint64_t ElementSizeConstant =
- cast<ConstantInt>(I.getArgOperand(3))->getZExtValue();
+ uint64_t ElementSizeConstant = MI.getElementSizeInBytes();
RTLIB::Libcall LibraryCall =
- RTLIB::getMEMCPY_ELEMENT_ATOMIC(ElementSizeConstant);
+ RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(ElementSizeConstant);
if (LibraryCall == RTLIB::UNKNOWN_LIBCALL)
report_fatal_error("Unsupported element size");
@@ -6030,9 +6027,11 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
SwiftErrorVal = V;
// We find the virtual register for the actual swifterror argument.
// Instead of using the Value, we use the virtual register instead.
- Entry.Node =
- DAG.getRegister(FuncInfo.getOrCreateSwiftErrorVReg(FuncInfo.MBB, V),
- EVT(TLI.getPointerTy(DL)));
+ Entry.Node = DAG.getRegister(FuncInfo
+ .getOrCreateSwiftErrorVRegUseAt(
+ CS.getInstruction(), FuncInfo.MBB, V)
+ .first,
+ EVT(TLI.getPointerTy(DL)));
}
Args.push_back(Entry);
@@ -6073,11 +6072,13 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
if (SwiftErrorVal && TLI.supportSwiftError()) {
// Get the last element of InVals.
SDValue Src = CLI.InVals.back();
- const TargetRegisterClass *RC = TLI.getRegClassFor(TLI.getPointerTy(DL));
- unsigned VReg = FuncInfo.MF->getRegInfo().createVirtualRegister(RC);
+ unsigned VReg; bool CreatedVReg;
+ std::tie(VReg, CreatedVReg) =
+ FuncInfo.getOrCreateSwiftErrorVRegDefAt(CS.getInstruction());
SDValue CopyNode = CLI.DAG.getCopyToReg(Result.second, CLI.DL, VReg, Src);
// We update the virtual register for the actual swifterror argument.
- FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
+ if (CreatedVReg)
+ FuncInfo.setCurrentSwiftErrorVReg(FuncInfo.MBB, SwiftErrorVal, VReg);
DAG.setRoot(CopyNode);
}
}
diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
index b67f11f85b70..dcccd17bb98e 100644
--- a/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
+++ b/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp
@@ -1055,6 +1055,7 @@ static void setupSwiftErrorVals(const Function &Fn, const TargetLowering *TLI,
FuncInfo->SwiftErrorVals.clear();
FuncInfo->SwiftErrorVRegDefMap.clear();
FuncInfo->SwiftErrorVRegUpwardsUse.clear();
+ FuncInfo->SwiftErrorVRegDefUses.clear();
FuncInfo->SwiftErrorArg = nullptr;
// Check if function has a swifterror argument.
@@ -1278,6 +1279,80 @@ static void propagateSwiftErrorVRegs(FunctionLoweringInfo *FuncInfo) {
}
}
+void preassignSwiftErrorRegs(const TargetLowering *TLI,
+ FunctionLoweringInfo *FuncInfo,
+ BasicBlock::const_iterator Begin,
+ BasicBlock::const_iterator End) {
+ if (!TLI->supportSwiftError() || FuncInfo->SwiftErrorVals.empty())
+ return;
+
+ // Iterator over instructions and assign vregs to swifterror defs and uses.
+ for (auto It = Begin; It != End; ++It) {
+ ImmutableCallSite CS(&*It);
+ if (CS) {
+ // A call-site with a swifterror argument is both use and def.
+ const Value *SwiftErrorAddr = nullptr;
+ for (auto &Arg : CS.args()) {
+ if (!Arg->isSwiftError())
+ continue;
+ // Use of swifterror.
+ assert(!SwiftErrorAddr && "Cannot have multiple swifterror arguments");
+ SwiftErrorAddr = &*Arg;
+ assert(SwiftErrorAddr->isSwiftError() &&
+ "Must have a swifterror value argument");
+ unsigned VReg; bool CreatedReg;
+ std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
+ &*It, FuncInfo->MBB, SwiftErrorAddr);
+ assert(CreatedReg);
+ }
+ if (!SwiftErrorAddr)
+ continue;
+
+ // Def of swifterror.
+ unsigned VReg; bool CreatedReg;
+ std::tie(VReg, CreatedReg) =
+ FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
+ assert(CreatedReg);
+ FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
+
+ // A load is a use.
+ } else if (const LoadInst *LI = dyn_cast<const LoadInst>(&*It)) {
+ const Value *V = LI->getOperand(0);
+ if (!V->isSwiftError())
+ continue;
+
+ unsigned VReg; bool CreatedReg;
+ std::tie(VReg, CreatedReg) =
+ FuncInfo->getOrCreateSwiftErrorVRegUseAt(LI, FuncInfo->MBB, V);
+ assert(CreatedReg);
+
+ // A store is a def.
+ } else if (const StoreInst *SI = dyn_cast<const StoreInst>(&*It)) {
+ const Value *SwiftErrorAddr = SI->getOperand(1);
+ if (!SwiftErrorAddr->isSwiftError())
+ continue;
+
+ // Def of swifterror.
+ unsigned VReg; bool CreatedReg;
+ std::tie(VReg, CreatedReg) =
+ FuncInfo->getOrCreateSwiftErrorVRegDefAt(&*It);
+ assert(CreatedReg);
+ FuncInfo->setCurrentSwiftErrorVReg(FuncInfo->MBB, SwiftErrorAddr, VReg);
+
+ // A return in a swiferror returning function is a use.
+ } else if (const ReturnInst *R = dyn_cast<const ReturnInst>(&*It)) {
+ const Function *F = R->getParent()->getParent();
+ if(!F->getAttributes().hasAttrSomewhere(Attribute::SwiftError))
+ continue;
+
+ unsigned VReg; bool CreatedReg;
+ std::tie(VReg, CreatedReg) = FuncInfo->getOrCreateSwiftErrorVRegUseAt(
+ R, FuncInfo->MBB, FuncInfo->SwiftErrorArg);
+ assert(CreatedReg);
+ }
+ }
+}
+
void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FastISelFailed = false;
// Initialize the Fast-ISel state, if needed.
@@ -1384,6 +1459,10 @@ void SelectionDAGISel::SelectAllBasicBlocks(const Function &Fn) {
FastIS->startNewBlock();
unsigned NumFastIselRemaining = std::distance(Begin, End);
+
+ // Pre-assign swifterror vregs.
+ preassignSwiftErrorRegs(TLI, FuncInfo, Begin, End);
+
// Do FastISel on as many instructions as possible.
for (; BI != Begin; --BI) {
const Instruction *Inst = &*std::prev(BI);
diff --git a/lib/CodeGen/SplitKit.cpp b/lib/CodeGen/SplitKit.cpp
index 3a50aaa69985..008b984dd961 100644
--- a/lib/CodeGen/SplitKit.cpp
+++ b/lib/CodeGen/SplitKit.cpp
@@ -569,8 +569,7 @@ SlotIndex SplitEditor::buildCopy(unsigned FromReg, unsigned ToReg,
// Greedy heuristic: Keep iterating keeping the best covering subreg index
// each time.
- LaneBitmask LanesLeft =
- LaneMask & ~(TRI.getSubRegIndexLaneMask(BestCover));
+ LaneBitmask LanesLeft = LaneMask & ~(TRI.getSubRegIndexLaneMask(BestIdx));
while (LanesLeft.any()) {
unsigned BestIdx = 0;
int BestCover = INT_MIN;
diff --git a/lib/CodeGen/StackColoring.cpp b/lib/CodeGen/StackColoring.cpp
index acb3676fdd71..6bac39c7ee77 100644
--- a/lib/CodeGen/StackColoring.cpp
+++ b/lib/CodeGen/StackColoring.cpp
@@ -86,10 +86,134 @@ STATISTIC(StackSpaceSaved, "Number of bytes saved due to merging slots.");
STATISTIC(StackSlotMerged, "Number of stack slot merged.");
STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region");
+//===----------------------------------------------------------------------===//
+// StackColoring Pass
+//===----------------------------------------------------------------------===//
+//
+// Stack Coloring reduces stack usage by merging stack slots when they
+// can't be used together. For example, consider the following C program:
+//
+// void bar(char *, int);
+// void foo(bool var) {
+// A: {
+// char z[4096];
+// bar(z, 0);
+// }
+//
+// char *p;
+// char x[4096];
+// char y[4096];
+// if (var) {
+// p = x;
+// } else {
+// bar(y, 1);
+// p = y + 1024;
+// }
+// B:
+// bar(p, 2);
+// }
+//
+// Naively-compiled, this program would use 12k of stack space. However, the
+// stack slot corresponding to `z` is always destroyed before either of the
+// stack slots for `x` or `y` are used, and then `x` is only used if `var`
+// is true, while `y` is only used if `var` is false. So in no time are 2
+// of the stack slots used together, and therefore we can merge them,
+// compiling the function using only a single 4k alloca:
+//
+// void foo(bool var) { // equivalent
+// char x[4096];
+// char *p;
+// bar(x, 0);
+// if (var) {
+// p = x;
+// } else {
+// bar(x, 1);
+// p = x + 1024;
+// }
+// bar(p, 2);
+// }
+//
+// This is an important optimization if we want stack space to be under
+// control in large functions, both open-coded ones and ones created by
+// inlining.
//
// Implementation Notes:
// ---------------------
//
+// An important part of the above reasoning is that `z` can't be accessed
+// while the latter 2 calls to `bar` are running. This is justified because
+// `z`'s lifetime is over after we exit from block `A:`, so any further
+// accesses to it would be UB. The way we represent this information
+// in LLVM is by having frontends delimit blocks with `lifetime.start`
+// and `lifetime.end` intrinsics.
+//
+// The effect of these intrinsics seems to be as follows (maybe I should
+// specify this in the reference?):
+//
+// L1) at start, each stack-slot is marked as *out-of-scope*, unless no
+// lifetime intrinsic refers to that stack slot, in which case
+// it is marked as *in-scope*.
+// L2) on a `lifetime.start`, a stack slot is marked as *in-scope* and
+// the stack slot is overwritten with `undef`.
+// L3) on a `lifetime.end`, a stack slot is marked as *out-of-scope*.
+// L4) on function exit, all stack slots are marked as *out-of-scope*.
+// L5) `lifetime.end` is a no-op when called on a slot that is already
+// *out-of-scope*.
+// L6) memory accesses to *out-of-scope* stack slots are UB.
+// L7) when a stack-slot is marked as *out-of-scope*, all pointers to it
+// are invalidated, unless the slot is "degenerate". This is used to
+// justify not marking slots as in-use until the pointer to them is
+// used, but feels a bit hacky in the presence of things like LICM. See
+// the "Degenerate Slots" section for more details.
+//
+// Now, let's ground stack coloring on these rules. We'll define a slot
+// as *in-use* at a (dynamic) point in execution if it either can be
+// written to at that point, or if it has a live and non-undef content
+// at that point.
+//
+// Obviously, slots that are never *in-use* together can be merged, and
+// in our example `foo`, the slots for `x`, `y` and `z` are never
+// in-use together (of course, sometimes slots that *are* in-use together
+// might still be mergable, but we don't care about that here).
+//
+// In this implementation, we successively merge pairs of slots that are
+// not *in-use* together. We could be smarter - for example, we could merge
+// a single large slot with 2 small slots, or we could construct the
+// interference graph and run a "smart" graph coloring algorithm, but with
+// that aside, how do we find out whether a pair of slots might be *in-use*
+// together?
+//
+// From our rules, we see that *out-of-scope* slots are never *in-use*,
+// and from (L7) we see that "non-degenerate" slots remain non-*in-use*
+// until their address is taken. Therefore, we can approximate slot activity
+// using dataflow.
+//
+// A subtle point: naively, we might try to figure out which pairs of
+// stack-slots interfere by propagating `S in-use` through the CFG for every
+// stack-slot `S`, and having `S` and `T` interfere if there is a CFG point in
+// which they are both *in-use*.
+//
+// That is sound, but overly conservative in some cases: in our (artificial)
+// example `foo`, either `x` or `y` might be in use at the label `B:`, but
+// as `x` is only in use if we came in from the `var` edge and `y` only
+// if we came from the `!var` edge, they still can't be in use together.
+// See PR32488 for an important real-life case.
+//
+// If we wanted to find all points of interference precisely, we could
+// propagate `S in-use` and `S&T in-use` predicates through the CFG. That
+// would be precise, but requires propagating `O(n^2)` dataflow facts.
+//
+// However, we aren't interested in the *set* of points of interference
+// between 2 stack slots, only *whether* there *is* such a point. So we
+// can rely on a little trick: for `S` and `T` to be in-use together,
+// one of them needs to become in-use while the other is in-use (or
+// they might both become in use simultaneously). We can check this
+// by also keeping track of the points at which a stack slot might *start*
+// being in-use.
+//
+// Exact first use:
+// ----------------
+//
// Consider the following motivating example:
//
// int foo() {
@@ -158,6 +282,9 @@ STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region");
// lifetime, we can additionally overlap b1 and b5, giving us a 3*1024
// byte stack (better).
//
+// Degenerate Slots:
+// -----------------
+//
// Relying entirely on first-use of stack slots is problematic,
// however, due to the fact that optimizations can sometimes migrate
// uses of a variable outside of its lifetime start/end region. Here
@@ -237,10 +364,6 @@ STATISTIC(EscapedAllocas, "Number of allocas that escaped the lifetime region");
// for "b" then it will appear that 'b' has a degenerate lifetime.
//
-//===----------------------------------------------------------------------===//
-// StackColoring Pass
-//===----------------------------------------------------------------------===//
-
namespace {
/// StackColoring - A machine pass for merging disjoint stack allocations,
/// marked by the LIFETIME_START and LIFETIME_END pseudo instructions.
@@ -271,8 +394,11 @@ class StackColoring : public MachineFunctionPass {
/// Maps basic blocks to a serial number.
SmallVector<const MachineBasicBlock*, 8> BasicBlockNumbering;
- /// Maps liveness intervals for each slot.
+ /// Maps slots to their use interval. Outside of this interval, slots
+ /// values are either dead or `undef` and they will not be written to.
SmallVector<std::unique_ptr<LiveInterval>, 16> Intervals;
+ /// Maps slots to the points where they can become in-use.
+ SmallVector<SmallVector<SlotIndex, 4>, 16> LiveStarts;
/// VNInfo is used for the construction of LiveIntervals.
VNInfo::Allocator VNInfoAllocator;
/// SlotIndex analysis object.
@@ -672,15 +798,22 @@ void StackColoring::calculateLocalLiveness()
void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
SmallVector<SlotIndex, 16> Starts;
- SmallVector<SlotIndex, 16> Finishes;
+ SmallVector<bool, 16> DefinitelyInUse;
// For each block, find which slots are active within this block
// and update the live intervals.
for (const MachineBasicBlock &MBB : *MF) {
Starts.clear();
Starts.resize(NumSlots);
- Finishes.clear();
- Finishes.resize(NumSlots);
+ DefinitelyInUse.clear();
+ DefinitelyInUse.resize(NumSlots);
+
+ // Start the interval of the slots that we previously found to be 'in-use'.
+ BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB];
+ for (int pos = MBBLiveness.LiveIn.find_first(); pos != -1;
+ pos = MBBLiveness.LiveIn.find_next(pos)) {
+ Starts[pos] = Indexes->getMBBStartIdx(&MBB);
+ }
// Create the interval for the basic blocks containing lifetime begin/end.
for (const MachineInstr &MI : MBB) {
@@ -692,66 +825,35 @@ void StackColoring::calculateLiveIntervals(unsigned NumSlots) {
SlotIndex ThisIndex = Indexes->getInstructionIndex(MI);
for (auto Slot : slots) {
if (IsStart) {
- if (!Starts[Slot].isValid() || Starts[Slot] > ThisIndex)
+ // If a slot is already definitely in use, we don't have to emit
+ // a new start marker because there is already a pre-existing
+ // one.
+ if (!DefinitelyInUse[Slot]) {
+ LiveStarts[Slot].push_back(ThisIndex);
+ DefinitelyInUse[Slot] = true;
+ }
+ if (!Starts[Slot].isValid())
Starts[Slot] = ThisIndex;
} else {
- if (!Finishes[Slot].isValid() || Finishes[Slot] < ThisIndex)
- Finishes[Slot] = ThisIndex;
+ if (Starts[Slot].isValid()) {
+ VNInfo *VNI = Intervals[Slot]->getValNumInfo(0);
+ Intervals[Slot]->addSegment(
+ LiveInterval::Segment(Starts[Slot], ThisIndex, VNI));
+ Starts[Slot] = SlotIndex(); // Invalidate the start index
+ DefinitelyInUse[Slot] = false;
+ }
}
}
}
- // Create the interval of the blocks that we previously found to be 'alive'.
- BlockLifetimeInfo &MBBLiveness = BlockLiveness[&MBB];
- for (unsigned pos : MBBLiveness.LiveIn.set_bits()) {
- Starts[pos] = Indexes->getMBBStartIdx(&MBB);
- }
- for (unsigned pos : MBBLiveness.LiveOut.set_bits()) {
- Finishes[pos] = Indexes->getMBBEndIdx(&MBB);
- }
-
+ // Finish up started segments
for (unsigned i = 0; i < NumSlots; ++i) {
- //
- // When LifetimeStartOnFirstUse is turned on, data flow analysis
- // is forward (from starts to ends), not bidirectional. A
- // consequence of this is that we can wind up in situations
- // where Starts[i] is invalid but Finishes[i] is valid and vice
- // versa. Example:
- //
- // LIFETIME_START x
- // if (...) {
- // <use of x>
- // throw ...;
- // }
- // LIFETIME_END x
- // return 2;
- //
- //
- // Here the slot for "x" will not be live into the block
- // containing the "return 2" (since lifetimes start with first
- // use, not at the dominating LIFETIME_START marker).
- //
- if (Starts[i].isValid() && !Finishes[i].isValid()) {
- Finishes[i] = Indexes->getMBBEndIdx(&MBB);
- }
if (!Starts[i].isValid())
continue;
- assert(Starts[i] && Finishes[i] && "Invalid interval");
- VNInfo *ValNum = Intervals[i]->getValNumInfo(0);
- SlotIndex S = Starts[i];
- SlotIndex F = Finishes[i];
- if (S < F) {
- // We have a single consecutive region.
- Intervals[i]->addSegment(LiveInterval::Segment(S, F, ValNum));
- } else {
- // We have two non-consecutive regions. This happens when
- // LIFETIME_START appears after the LIFETIME_END marker.
- SlotIndex NewStart = Indexes->getMBBStartIdx(&MBB);
- SlotIndex NewFin = Indexes->getMBBEndIdx(&MBB);
- Intervals[i]->addSegment(LiveInterval::Segment(NewStart, F, ValNum));
- Intervals[i]->addSegment(LiveInterval::Segment(S, NewFin, ValNum));
- }
+ SlotIndex EndIdx = Indexes->getMBBEndIdx(&MBB);
+ VNInfo *VNI = Intervals[i]->getValNumInfo(0);
+ Intervals[i]->addSegment(LiveInterval::Segment(Starts[i], EndIdx, VNI));
}
}
}
@@ -981,6 +1083,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
BasicBlockNumbering.clear();
Markers.clear();
Intervals.clear();
+ LiveStarts.clear();
VNInfoAllocator.Reset();
unsigned NumSlots = MFI->getObjectIndexEnd();
@@ -992,6 +1095,7 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
SmallVector<int, 8> SortedSlots;
SortedSlots.reserve(NumSlots);
Intervals.reserve(NumSlots);
+ LiveStarts.resize(NumSlots);
unsigned NumMarkers = collectMarkers(NumSlots);
@@ -1063,6 +1167,9 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
return MFI->getObjectSize(LHS) > MFI->getObjectSize(RHS);
});
+ for (auto &s : LiveStarts)
+ std::sort(s.begin(), s.end());
+
bool Changed = true;
while (Changed) {
Changed = false;
@@ -1078,12 +1185,22 @@ bool StackColoring::runOnMachineFunction(MachineFunction &Func) {
int SecondSlot = SortedSlots[J];
LiveInterval *First = &*Intervals[FirstSlot];
LiveInterval *Second = &*Intervals[SecondSlot];
+ auto &FirstS = LiveStarts[FirstSlot];
+ auto &SecondS = LiveStarts[SecondSlot];
assert (!First->empty() && !Second->empty() && "Found an empty range");
- // Merge disjoint slots.
- if (!First->overlaps(*Second)) {
+ // Merge disjoint slots. This is a little bit tricky - see the
+ // Implementation Notes section for an explanation.
+ if (!First->isLiveAtIndexes(SecondS) &&
+ !Second->isLiveAtIndexes(FirstS)) {
Changed = true;
First->MergeSegmentsInAsValue(*Second, First->getValNumInfo(0));
+
+ int OldSize = FirstS.size();
+ FirstS.append(SecondS.begin(), SecondS.end());
+ auto Mid = FirstS.begin() + OldSize;
+ std::inplace_merge(FirstS.begin(), Mid, FirstS.end());
+
SlotRemap[SecondSlot] = FirstSlot;
SortedSlots[J] = -1;
DEBUG(dbgs()<<"Merging #"<<FirstSlot<<" and slots #"<<
diff --git a/lib/CodeGen/TargetLoweringBase.cpp b/lib/CodeGen/TargetLoweringBase.cpp
index 581cfaf60755..e9d38c10c860 100644
--- a/lib/CodeGen/TargetLoweringBase.cpp
+++ b/lib/CodeGen/TargetLoweringBase.cpp
@@ -374,11 +374,16 @@ static void InitLibcallNames(const char **Names, const Triple &TT) {
Names[RTLIB::MEMCPY] = "memcpy";
Names[RTLIB::MEMMOVE] = "memmove";
Names[RTLIB::MEMSET] = "memset";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_1] = "__llvm_memcpy_element_atomic_1";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_2] = "__llvm_memcpy_element_atomic_2";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_4] = "__llvm_memcpy_element_atomic_4";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_8] = "__llvm_memcpy_element_atomic_8";
- Names[RTLIB::MEMCPY_ELEMENT_ATOMIC_16] = "__llvm_memcpy_element_atomic_16";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_1] =
+ "__llvm_memcpy_element_unordered_atomic_1";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_2] =
+ "__llvm_memcpy_element_unordered_atomic_2";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_4] =
+ "__llvm_memcpy_element_unordered_atomic_4";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_8] =
+ "__llvm_memcpy_element_unordered_atomic_8";
+ Names[RTLIB::MEMCPY_ELEMENT_UNORDERED_ATOMIC_16] =
+ "__llvm_memcpy_element_unordered_atomic_16";
Names[RTLIB::UNWIND_RESUME] = "_Unwind_Resume";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_1] = "__sync_val_compare_and_swap_1";
Names[RTLIB::SYNC_VAL_COMPARE_AND_SWAP_2] = "__sync_val_compare_and_swap_2";
@@ -781,22 +786,21 @@ RTLIB::Libcall RTLIB::getSYNC(unsigned Opc, MVT VT) {
return UNKNOWN_LIBCALL;
}
-RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_ATOMIC(uint64_t ElementSize) {
+RTLIB::Libcall RTLIB::getMEMCPY_ELEMENT_UNORDERED_ATOMIC(uint64_t ElementSize) {
switch (ElementSize) {
case 1:
- return MEMCPY_ELEMENT_ATOMIC_1;
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_1;
case 2:
- return MEMCPY_ELEMENT_ATOMIC_2;
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_2;
case 4:
- return MEMCPY_ELEMENT_ATOMIC_4;
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_4;
case 8:
- return MEMCPY_ELEMENT_ATOMIC_8;
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_8;
case 16:
- return MEMCPY_ELEMENT_ATOMIC_16;
+ return MEMCPY_ELEMENT_UNORDERED_ATOMIC_16;
default:
return UNKNOWN_LIBCALL;
}
-
}
/// InitCmpLibcallCCs - Set default comparison libcall CC.
diff --git a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index a0c68e1dcce8..6922e33c8d6c 100644
--- a/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -61,9 +61,11 @@
using namespace llvm;
using namespace dwarf;
-static void GetObjCImageInfo(ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- unsigned &Version, unsigned &Flags,
+static void GetObjCImageInfo(Module &M, unsigned &Version, unsigned &Flags,
StringRef &Section) {
+ SmallVector<Module::ModuleFlagEntry, 8> ModuleFlags;
+ M.getModuleFlagsMetadata(ModuleFlags);
+
for (const auto &MFE: ModuleFlags) {
// Ignore flags with 'Require' behaviour.
if (MFE.Behavior == Module::Require)
@@ -88,14 +90,13 @@ static void GetObjCImageInfo(ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
// ELF
//===----------------------------------------------------------------------===//
-void TargetLoweringObjectFileELF::emitModuleFlags(
- MCStreamer &Streamer, ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- const TargetMachine &TM) const {
+void TargetLoweringObjectFileELF::emitModuleMetadata(
+ MCStreamer &Streamer, Module &M, const TargetMachine &TM) const {
unsigned Version = 0;
unsigned Flags = 0;
StringRef Section;
- GetObjCImageInfo(ModuleFlags, Version, Flags, Section);
+ GetObjCImageInfo(M, Version, Flags, Section);
if (Section.empty())
return;
@@ -618,20 +619,10 @@ void TargetLoweringObjectFileMachO::Initialize(MCContext &Ctx,
}
}
-/// emitModuleFlags - Perform code emission for module flags.
-void TargetLoweringObjectFileMachO::emitModuleFlags(
- MCStreamer &Streamer, ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- const TargetMachine &TM) const {
- MDNode *LinkerOptions = nullptr;
-
- for (const auto &MFE : ModuleFlags) {
- StringRef Key = MFE.Key->getString();
- if (Key == "Linker Options")
- LinkerOptions = cast<MDNode>(MFE.Val);
- }
-
+void TargetLoweringObjectFileMachO::emitModuleMetadata(
+ MCStreamer &Streamer, Module &M, const TargetMachine &TM) const {
// Emit the linker options if present.
- if (LinkerOptions) {
+ if (auto *LinkerOptions = M.getNamedMetadata("llvm.linker.options")) {
for (const auto &Option : LinkerOptions->operands()) {
SmallVector<std::string, 4> StrOptions;
for (const auto &Piece : cast<MDNode>(Option)->operands())
@@ -643,7 +634,8 @@ void TargetLoweringObjectFileMachO::emitModuleFlags(
unsigned VersionVal = 0;
unsigned ImageInfoFlags = 0;
StringRef SectionVal;
- GetObjCImageInfo(ModuleFlags, VersionVal, ImageInfoFlags, SectionVal);
+
+ GetObjCImageInfo(M, VersionVal, ImageInfoFlags, SectionVal);
// The section is mandatory. If we don't have it, then we don't have GC info.
if (SectionVal.empty())
@@ -1159,18 +1151,9 @@ MCSection *TargetLoweringObjectFileCOFF::getSectionForJumpTable(
COFF::IMAGE_COMDAT_SELECT_ASSOCIATIVE, UniqueID);
}
-void TargetLoweringObjectFileCOFF::emitModuleFlags(
- MCStreamer &Streamer, ArrayRef<Module::ModuleFlagEntry> ModuleFlags,
- const TargetMachine &TM) const {
- MDNode *LinkerOptions = nullptr;
-
- for (const auto &MFE : ModuleFlags) {
- StringRef Key = MFE.Key->getString();
- if (Key == "Linker Options")
- LinkerOptions = cast<MDNode>(MFE.Val);
- }
-
- if (LinkerOptions) {
+void TargetLoweringObjectFileCOFF::emitModuleMetadata(
+ MCStreamer &Streamer, Module &M, const TargetMachine &TM) const {
+ if (NamedMDNode *LinkerOptions = M.getNamedMetadata("llvm.linker.options")) {
// Emit the linker options to the linker .drectve section. According to the
// spec, this section is a space-separated string containing flags for
// linker.
@@ -1190,7 +1173,7 @@ void TargetLoweringObjectFileCOFF::emitModuleFlags(
unsigned Flags = 0;
StringRef Section;
- GetObjCImageInfo(ModuleFlags, Version, Flags, Section);
+ GetObjCImageInfo(M, Version, Flags, Section);
if (Section.empty())
return;
diff --git a/lib/DebugInfo/CodeView/CMakeLists.txt b/lib/DebugInfo/CodeView/CMakeLists.txt
index 2f9e8981b698..f916695a8439 100644
--- a/lib/DebugInfo/CodeView/CMakeLists.txt
+++ b/lib/DebugInfo/CodeView/CMakeLists.txt
@@ -20,6 +20,7 @@ add_llvm_library(LLVMDebugInfoCodeView
LazyRandomTypeCollection.cpp
Line.cpp
RecordSerialization.cpp
+ StringsAndChecksums.cpp
SymbolRecordMapping.cpp
SymbolDumper.cpp
SymbolSerializer.cpp
@@ -32,7 +33,7 @@ add_llvm_library(LLVMDebugInfoCodeView
TypeSerializer.cpp
TypeStreamMerger.cpp
TypeTableCollection.cpp
-
+
ADDITIONAL_HEADER_DIRS
${LLVM_MAIN_INCLUDE_DIR}/llvm/DebugInfo/CodeView
)
diff --git a/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp b/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp
index 6e647c4b976b..de02525270c4 100644
--- a/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp
+++ b/lib/DebugInfo/CodeView/DebugStringTableSubsection.cpp
@@ -58,6 +58,10 @@ Error DebugStringTableSubsection::commit(BinaryStreamWriter &Writer) const {
uint32_t Begin = Writer.getOffset();
uint32_t End = Begin + StringSize;
+ // Write a null string at the beginning.
+ if (auto EC = Writer.writeCString(StringRef()))
+ return EC;
+
for (auto &Pair : Strings) {
StringRef S = Pair.getKey();
uint32_t Offset = Begin + Pair.getValue();
@@ -68,6 +72,7 @@ Error DebugStringTableSubsection::commit(BinaryStreamWriter &Writer) const {
}
Writer.setOffset(End);
+ assert((End - Begin) == StringSize);
return Error::success();
}
diff --git a/lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp b/lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp
index e9124e68fe82..334c5e002bbc 100644
--- a/lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp
+++ b/lib/DebugInfo/CodeView/DebugSubsectionRecord.cpp
@@ -50,7 +50,7 @@ DebugSubsectionKind DebugSubsectionRecord::kind() const { return Kind; }
BinaryStreamRef DebugSubsectionRecord::getRecordData() const { return Data; }
DebugSubsectionRecordBuilder::DebugSubsectionRecordBuilder(
- std::unique_ptr<DebugSubsection> Subsection, CodeViewContainer Container)
+ std::shared_ptr<DebugSubsection> Subsection, CodeViewContainer Container)
: Subsection(std::move(Subsection)), Container(Container) {}
uint32_t DebugSubsectionRecordBuilder::calculateSerializedLength() {
diff --git a/lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp b/lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp
index 8550107741ce..9b824333369b 100644
--- a/lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp
+++ b/lib/DebugInfo/CodeView/DebugSubsectionVisitor.cpp
@@ -26,40 +26,9 @@
using namespace llvm;
using namespace llvm::codeview;
-DebugSubsectionState::DebugSubsectionState() {}
-
-DebugSubsectionState::DebugSubsectionState(
- const DebugStringTableSubsectionRef &Strings)
- : Strings(&Strings) {}
-
-DebugSubsectionState::DebugSubsectionState(
- const DebugStringTableSubsectionRef &Strings,
- const DebugChecksumsSubsectionRef &Checksums)
- : Strings(&Strings), Checksums(&Checksums) {}
-
-void DebugSubsectionState::initializeStrings(const DebugSubsectionRecord &SR) {
- assert(SR.kind() == DebugSubsectionKind::StringTable);
- assert(!Strings && "Found a string table even though we already have one!");
-
- OwnedStrings = llvm::make_unique<DebugStringTableSubsectionRef>();
- consumeError(OwnedStrings->initialize(SR.getRecordData()));
- Strings = OwnedStrings.get();
-}
-
-void DebugSubsectionState::initializeChecksums(
- const DebugSubsectionRecord &FCR) {
- assert(FCR.kind() == DebugSubsectionKind::FileChecksums);
- if (Checksums)
- return;
-
- OwnedChecksums = llvm::make_unique<DebugChecksumsSubsectionRef>();
- consumeError(OwnedChecksums->initialize(FCR.getRecordData()));
- Checksums = OwnedChecksums.get();
-}
-
-Error llvm::codeview::visitDebugSubsection(const DebugSubsectionRecord &R,
- DebugSubsectionVisitor &V,
- const DebugSubsectionState &State) {
+Error llvm::codeview::visitDebugSubsection(
+ const DebugSubsectionRecord &R, DebugSubsectionVisitor &V,
+ const StringsAndChecksumsRef &State) {
BinaryStreamReader Reader(R.getRecordData());
switch (R.kind()) {
case DebugSubsectionKind::Lines: {
diff --git a/lib/DebugInfo/CodeView/StringsAndChecksums.cpp b/lib/DebugInfo/CodeView/StringsAndChecksums.cpp
new file mode 100644
index 000000000000..928bf8c94f73
--- /dev/null
+++ b/lib/DebugInfo/CodeView/StringsAndChecksums.cpp
@@ -0,0 +1,55 @@
+//===- StringsAndChecksums.cpp ----------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/DebugInfo/CodeView/StringsAndChecksums.h"
+#include "llvm/DebugInfo/CodeView/DebugChecksumsSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugStringTableSubsection.h"
+#include "llvm/DebugInfo/CodeView/DebugSubsectionRecord.h"
+
+using namespace llvm;
+using namespace llvm::codeview;
+
+StringsAndChecksumsRef::StringsAndChecksumsRef() {}
+
+StringsAndChecksumsRef::StringsAndChecksumsRef(
+ const DebugStringTableSubsectionRef &Strings)
+ : Strings(&Strings) {}
+
+StringsAndChecksumsRef::StringsAndChecksumsRef(
+ const DebugStringTableSubsectionRef &Strings,
+ const DebugChecksumsSubsectionRef &Checksums)
+ : Strings(&Strings), Checksums(&Checksums) {}
+
+void StringsAndChecksumsRef::initializeStrings(
+ const DebugSubsectionRecord &SR) {
+ assert(SR.kind() == DebugSubsectionKind::StringTable);
+ assert(!Strings && "Found a string table even though we already have one!");
+
+ OwnedStrings = llvm::make_unique<DebugStringTableSubsectionRef>();
+ consumeError(OwnedStrings->initialize(SR.getRecordData()));
+ Strings = OwnedStrings.get();
+}
+
+void StringsAndChecksumsRef::setChecksums(
+ const DebugChecksumsSubsectionRef &CS) {
+ OwnedChecksums = llvm::make_unique<DebugChecksumsSubsectionRef>();
+ *OwnedChecksums = CS;
+ Checksums = OwnedChecksums.get();
+}
+
+void StringsAndChecksumsRef::initializeChecksums(
+ const DebugSubsectionRecord &FCR) {
+ assert(FCR.kind() == DebugSubsectionKind::FileChecksums);
+ if (Checksums)
+ return;
+
+ OwnedChecksums = llvm::make_unique<DebugChecksumsSubsectionRef>();
+ consumeError(OwnedChecksums->initialize(FCR.getRecordData()));
+ Checksums = OwnedChecksums.get();
+}
diff --git a/lib/DebugInfo/CodeView/SymbolDumper.cpp b/lib/DebugInfo/CodeView/SymbolDumper.cpp
index 66045933ce9b..36abafc079ed 100644
--- a/lib/DebugInfo/CodeView/SymbolDumper.cpp
+++ b/lib/DebugInfo/CodeView/SymbolDumper.cpp
@@ -212,7 +212,7 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
FileStaticSym &FileStatic) {
DictScope S(W, "FileStatic");
- W.printNumber("Index", FileStatic.Index);
+ printTypeIndex("Index", FileStatic.Index);
W.printNumber("ModFilenameOffset", FileStatic.ModFilenameOffset);
W.printFlags("Flags", uint16_t(FileStatic.Flags), getLocalFlagNames());
W.printString("Name", FileStatic.Name);
@@ -516,7 +516,7 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
RegisterSym &Register) {
DictScope S(W, "RegisterSym");
- W.printNumber("Type", Register.Index);
+ printTypeIndex("Type", Register.Index);
W.printEnum("Seg", uint16_t(Register.Register), getRegisterNames());
W.printString("Name", Register.Name);
return Error::success();
@@ -524,7 +524,7 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR, PublicSym32 &Public) {
DictScope S(W, "PublicSym");
- W.printNumber("Type", Public.Index);
+ printTypeIndex("Type", Public.Index);
W.printNumber("Seg", Public.Segment);
W.printNumber("Off", Public.Offset);
W.printString("Name", Public.Name);
@@ -631,7 +631,7 @@ Error CVSymbolDumperImpl::visitKnownRecord(CVSymbol &CVR,
W.printHex("Offset", RegRel.Offset);
printTypeIndex("Type", RegRel.Type);
- W.printHex("Register", RegRel.Register);
+ W.printEnum("Register", uint16_t(RegRel.Register), getRegisterNames());
W.printString("VarName", RegRel.Name);
return Error::success();
}
diff --git a/lib/DebugInfo/CodeView/SymbolRecordMapping.cpp b/lib/DebugInfo/CodeView/SymbolRecordMapping.cpp
index ea46841a70f6..d731dc1b0a37 100644
--- a/lib/DebugInfo/CodeView/SymbolRecordMapping.cpp
+++ b/lib/DebugInfo/CodeView/SymbolRecordMapping.cpp
@@ -307,7 +307,7 @@ Error SymbolRecordMapping::visitKnownRecord(CVSymbol &CVR,
error(IO.mapInteger(FrameCookie.CodeOffset));
error(IO.mapInteger(FrameCookie.Register));
- error(IO.mapInteger(FrameCookie.CookieKind));
+ error(IO.mapEnum(FrameCookie.CookieKind));
error(IO.mapInteger(FrameCookie.Flags));
return Error::success();
@@ -439,7 +439,7 @@ Error SymbolRecordMapping::visitKnownRecord(CVSymbol &CVR,
error(IO.mapInteger(RegRel.Offset));
error(IO.mapInteger(RegRel.Type));
- error(IO.mapInteger(RegRel.Register));
+ error(IO.mapEnum(RegRel.Register));
error(IO.mapStringZ(RegRel.Name));
return Error::success();
diff --git a/lib/DebugInfo/CodeView/TypeDatabase.cpp b/lib/DebugInfo/CodeView/TypeDatabase.cpp
index af05d2dc294b..08f848b36a9d 100644
--- a/lib/DebugInfo/CodeView/TypeDatabase.cpp
+++ b/lib/DebugInfo/CodeView/TypeDatabase.cpp
@@ -12,59 +12,6 @@
using namespace llvm;
using namespace llvm::codeview;
-namespace {
-struct SimpleTypeEntry {
- StringRef Name;
- SimpleTypeKind Kind;
-};
-}
-
-/// The names here all end in "*". If the simple type is a pointer type, we
-/// return the whole name. Otherwise we lop off the last character in our
-/// StringRef.
-static const SimpleTypeEntry SimpleTypeNames[] = {
- {"void*", SimpleTypeKind::Void},
- {"<not translated>*", SimpleTypeKind::NotTranslated},
- {"HRESULT*", SimpleTypeKind::HResult},
- {"signed char*", SimpleTypeKind::SignedCharacter},
- {"unsigned char*", SimpleTypeKind::UnsignedCharacter},
- {"char*", SimpleTypeKind::NarrowCharacter},
- {"wchar_t*", SimpleTypeKind::WideCharacter},
- {"char16_t*", SimpleTypeKind::Character16},
- {"char32_t*", SimpleTypeKind::Character32},
- {"__int8*", SimpleTypeKind::SByte},
- {"unsigned __int8*", SimpleTypeKind::Byte},
- {"short*", SimpleTypeKind::Int16Short},
- {"unsigned short*", SimpleTypeKind::UInt16Short},
- {"__int16*", SimpleTypeKind::Int16},
- {"unsigned __int16*", SimpleTypeKind::UInt16},
- {"long*", SimpleTypeKind::Int32Long},
- {"unsigned long*", SimpleTypeKind::UInt32Long},
- {"int*", SimpleTypeKind::Int32},
- {"unsigned*", SimpleTypeKind::UInt32},
- {"__int64*", SimpleTypeKind::Int64Quad},
- {"unsigned __int64*", SimpleTypeKind::UInt64Quad},
- {"__int64*", SimpleTypeKind::Int64},
- {"unsigned __int64*", SimpleTypeKind::UInt64},
- {"__int128*", SimpleTypeKind::Int128},
- {"unsigned __int128*", SimpleTypeKind::UInt128},
- {"__half*", SimpleTypeKind::Float16},
- {"float*", SimpleTypeKind::Float32},
- {"float*", SimpleTypeKind::Float32PartialPrecision},
- {"__float48*", SimpleTypeKind::Float48},
- {"double*", SimpleTypeKind::Float64},
- {"long double*", SimpleTypeKind::Float80},
- {"__float128*", SimpleTypeKind::Float128},
- {"_Complex float*", SimpleTypeKind::Complex32},
- {"_Complex double*", SimpleTypeKind::Complex64},
- {"_Complex long double*", SimpleTypeKind::Complex80},
- {"_Complex __float128*", SimpleTypeKind::Complex128},
- {"bool*", SimpleTypeKind::Boolean8},
- {"__bool16*", SimpleTypeKind::Boolean16},
- {"__bool32*", SimpleTypeKind::Boolean32},
- {"__bool64*", SimpleTypeKind::Boolean64},
-};
-
TypeDatabase::TypeDatabase(uint32_t Capacity) : TypeNameStorage(Allocator) {
CVUDTNames.resize(Capacity);
TypeRecords.resize(Capacity);
@@ -103,22 +50,8 @@ StringRef TypeDatabase::saveTypeName(StringRef TypeName) {
}
StringRef TypeDatabase::getTypeName(TypeIndex Index) const {
- if (Index.isNoneType())
- return "<no type>";
-
- if (Index.isSimple()) {
- // This is a simple type.
- for (const auto &SimpleTypeName : SimpleTypeNames) {
- if (SimpleTypeName.Kind == Index.getSimpleKind()) {
- if (Index.getSimpleMode() == SimpleTypeMode::Direct)
- return SimpleTypeName.Name.drop_back(1);
- // Otherwise, this is a pointer type. We gloss over the distinction
- // between near, far, 64, 32, etc, and just give a pointer type.
- return SimpleTypeName.Name;
- }
- }
- return "<unknown simple type>";
- }
+ if (Index.isNoneType() || Index.isSimple())
+ return TypeIndex::simpleTypeName(Index);
if (contains(Index))
return CVUDTNames[Index.toArrayIndex()];
diff --git a/lib/DebugInfo/CodeView/TypeIndex.cpp b/lib/DebugInfo/CodeView/TypeIndex.cpp
index 20ba6470cd5b..24fe5fcb28d4 100644
--- a/lib/DebugInfo/CodeView/TypeIndex.cpp
+++ b/lib/DebugInfo/CodeView/TypeIndex.cpp
@@ -15,11 +15,88 @@
using namespace llvm;
using namespace llvm::codeview;
+namespace {
+struct SimpleTypeEntry {
+ StringRef Name;
+ SimpleTypeKind Kind;
+};
+
+/// The names here all end in "*". If the simple type is a pointer type, we
+/// return the whole name. Otherwise we lop off the last character in our
+/// StringRef.
+static const SimpleTypeEntry SimpleTypeNames[] = {
+ {"void*", SimpleTypeKind::Void},
+ {"<not translated>*", SimpleTypeKind::NotTranslated},
+ {"HRESULT*", SimpleTypeKind::HResult},
+ {"signed char*", SimpleTypeKind::SignedCharacter},
+ {"unsigned char*", SimpleTypeKind::UnsignedCharacter},
+ {"char*", SimpleTypeKind::NarrowCharacter},
+ {"wchar_t*", SimpleTypeKind::WideCharacter},
+ {"char16_t*", SimpleTypeKind::Character16},
+ {"char32_t*", SimpleTypeKind::Character32},
+ {"__int8*", SimpleTypeKind::SByte},
+ {"unsigned __int8*", SimpleTypeKind::Byte},
+ {"short*", SimpleTypeKind::Int16Short},
+ {"unsigned short*", SimpleTypeKind::UInt16Short},
+ {"__int16*", SimpleTypeKind::Int16},
+ {"unsigned __int16*", SimpleTypeKind::UInt16},
+ {"long*", SimpleTypeKind::Int32Long},
+ {"unsigned long*", SimpleTypeKind::UInt32Long},
+ {"int*", SimpleTypeKind::Int32},
+ {"unsigned*", SimpleTypeKind::UInt32},
+ {"__int64*", SimpleTypeKind::Int64Quad},
+ {"unsigned __int64*", SimpleTypeKind::UInt64Quad},
+ {"__int64*", SimpleTypeKind::Int64},
+ {"unsigned __int64*", SimpleTypeKind::UInt64},
+ {"__int128*", SimpleTypeKind::Int128},
+ {"unsigned __int128*", SimpleTypeKind::UInt128},
+ {"__half*", SimpleTypeKind::Float16},
+ {"float*", SimpleTypeKind::Float32},
+ {"float*", SimpleTypeKind::Float32PartialPrecision},
+ {"__float48*", SimpleTypeKind::Float48},
+ {"double*", SimpleTypeKind::Float64},
+ {"long double*", SimpleTypeKind::Float80},
+ {"__float128*", SimpleTypeKind::Float128},
+ {"_Complex float*", SimpleTypeKind::Complex32},
+ {"_Complex double*", SimpleTypeKind::Complex64},
+ {"_Complex long double*", SimpleTypeKind::Complex80},
+ {"_Complex __float128*", SimpleTypeKind::Complex128},
+ {"bool*", SimpleTypeKind::Boolean8},
+ {"__bool16*", SimpleTypeKind::Boolean16},
+ {"__bool32*", SimpleTypeKind::Boolean32},
+ {"__bool64*", SimpleTypeKind::Boolean64},
+};
+} // namespace
+
+StringRef TypeIndex::simpleTypeName(TypeIndex TI) {
+ assert(TI.isNoneType() || TI.isSimple());
+
+ if (TI.isNoneType())
+ return "<no type>";
+
+ // This is a simple type.
+ for (const auto &SimpleTypeName : SimpleTypeNames) {
+ if (SimpleTypeName.Kind == TI.getSimpleKind()) {
+ if (TI.getSimpleMode() == SimpleTypeMode::Direct)
+ return SimpleTypeName.Name.drop_back(1);
+ // Otherwise, this is a pointer type. We gloss over the distinction
+ // between near, far, 64, 32, etc, and just give a pointer type.
+ return SimpleTypeName.Name;
+ }
+ }
+ return "<unknown simple type>";
+}
+
void llvm::codeview::printTypeIndex(ScopedPrinter &Printer, StringRef FieldName,
TypeIndex TI, TypeCollection &Types) {
StringRef TypeName;
- if (!TI.isNoneType())
- TypeName = Types.getTypeName(TI);
+ if (!TI.isNoneType()) {
+ if (TI.isSimple())
+ TypeName = TypeIndex::simpleTypeName(TI);
+ else
+ TypeName = Types.getTypeName(TI);
+ }
+
if (!TypeName.empty())
Printer.printHex(FieldName, TypeName, TI.getIndex());
else
diff --git a/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp b/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
index 11e2e215303c..8704cea60786 100644
--- a/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
+++ b/lib/DebugInfo/CodeView/TypeIndexDiscovery.cpp
@@ -34,7 +34,7 @@ static inline PointerMode getPointerMode(uint32_t Attrs) {
static inline bool isMemberPointer(uint32_t Attrs) {
PointerMode Mode = getPointerMode(Attrs);
return Mode == PointerMode::PointerToDataMember ||
- Mode == PointerMode::PointerToDataMember;
+ Mode == PointerMode::PointerToMemberFunction;
}
static inline uint32_t getEncodedIntegerLength(ArrayRef<uint8_t> Data) {
diff --git a/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp b/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
index 97b52f0fbdd6..87009bf1b6a1 100644
--- a/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
+++ b/lib/DebugInfo/DWARF/DWARFAcceleratorTable.cpp
@@ -55,6 +55,13 @@ bool DWARFAcceleratorTable::extract() {
return true;
}
+uint32_t DWARFAcceleratorTable::getNumBuckets() { return Hdr.NumBuckets; }
+uint32_t DWARFAcceleratorTable::getNumHashes() { return Hdr.NumHashes; }
+uint32_t DWARFAcceleratorTable::getSizeHdr() { return sizeof(Hdr); }
+uint32_t DWARFAcceleratorTable::getHeaderDataLength() {
+ return Hdr.HeaderDataLength;
+}
+
LLVM_DUMP_METHOD void DWARFAcceleratorTable::dump(raw_ostream &OS) const {
// Dump the header.
OS << "Magic = " << format("0x%08x", Hdr.Magic) << '\n'
diff --git a/lib/DebugInfo/DWARF/DWARFContext.cpp b/lib/DebugInfo/DWARF/DWARFContext.cpp
index 42ab48808f9a..9bafcde57f0a 100644
--- a/lib/DebugInfo/DWARF/DWARFContext.cpp
+++ b/lib/DebugInfo/DWARF/DWARFContext.cpp
@@ -425,248 +425,6 @@ DWARFDie DWARFContext::getDIEForOffset(uint32_t Offset) {
return DWARFDie();
}
-namespace {
-
-class Verifier {
- raw_ostream &OS;
- DWARFContext &DCtx;
-public:
- Verifier(raw_ostream &S, DWARFContext &D) : OS(S), DCtx(D) {}
-
- bool HandleDebugInfo() {
- bool Success = true;
- // A map that tracks all references (converted absolute references) so we
- // can verify each reference points to a valid DIE and not an offset that
- // lies between to valid DIEs.
- std::map<uint64_t, std::set<uint32_t>> ReferenceToDIEOffsets;
-
- OS << "Verifying .debug_info...\n";
- for (const auto &CU : DCtx.compile_units()) {
- unsigned NumDies = CU->getNumDIEs();
- for (unsigned I = 0; I < NumDies; ++I) {
- auto Die = CU->getDIEAtIndex(I);
- const auto Tag = Die.getTag();
- if (Tag == DW_TAG_null)
- continue;
- for (auto AttrValue : Die.attributes()) {
- const auto Attr = AttrValue.Attr;
- const auto Form = AttrValue.Value.getForm();
- switch (Attr) {
- case DW_AT_ranges:
- // Make sure the offset in the DW_AT_ranges attribute is valid.
- if (auto SectionOffset = AttrValue.Value.getAsSectionOffset()) {
- if (*SectionOffset >= DCtx.getRangeSection().Data.size()) {
- Success = false;
- OS << "error: DW_AT_ranges offset is beyond .debug_ranges "
- "bounds:\n";
- Die.dump(OS, 0);
- OS << "\n";
- }
- } else {
- Success = false;
- OS << "error: DIE has invalid DW_AT_ranges encoding:\n";
- Die.dump(OS, 0);
- OS << "\n";
- }
- break;
- case DW_AT_stmt_list:
- // Make sure the offset in the DW_AT_stmt_list attribute is valid.
- if (auto SectionOffset = AttrValue.Value.getAsSectionOffset()) {
- if (*SectionOffset >= DCtx.getLineSection().Data.size()) {
- Success = false;
- OS << "error: DW_AT_stmt_list offset is beyond .debug_line "
- "bounds: "
- << format("0x%08" PRIx32, *SectionOffset) << "\n";
- CU->getUnitDIE().dump(OS, 0);
- OS << "\n";
- }
- } else {
- Success = false;
- OS << "error: DIE has invalid DW_AT_stmt_list encoding:\n";
- Die.dump(OS, 0);
- OS << "\n";
- }
- break;
-
- default:
- break;
- }
- switch (Form) {
- case DW_FORM_ref1:
- case DW_FORM_ref2:
- case DW_FORM_ref4:
- case DW_FORM_ref8:
- case DW_FORM_ref_udata: {
- // Verify all CU relative references are valid CU offsets.
- Optional<uint64_t> RefVal = AttrValue.Value.getAsReference();
- assert(RefVal);
- if (RefVal) {
- auto DieCU = Die.getDwarfUnit();
- auto CUSize = DieCU->getNextUnitOffset() - DieCU->getOffset();
- auto CUOffset = AttrValue.Value.getRawUValue();
- if (CUOffset >= CUSize) {
- Success = false;
- OS << "error: " << FormEncodingString(Form) << " CU offset "
- << format("0x%08" PRIx32, CUOffset)
- << " is invalid (must be less than CU size of "
- << format("0x%08" PRIx32, CUSize) << "):\n";
- Die.dump(OS, 0);
- OS << "\n";
- } else {
- // Valid reference, but we will verify it points to an actual
- // DIE later.
- ReferenceToDIEOffsets[*RefVal].insert(Die.getOffset());
- }
- }
- break;
- }
- case DW_FORM_ref_addr: {
- // Verify all absolute DIE references have valid offsets in the
- // .debug_info section.
- Optional<uint64_t> RefVal = AttrValue.Value.getAsReference();
- assert(RefVal);
- if (RefVal) {
- if(*RefVal >= DCtx.getInfoSection().Data.size()) {
- Success = false;
- OS << "error: DW_FORM_ref_addr offset beyond .debug_info "
- "bounds:\n";
- Die.dump(OS, 0);
- OS << "\n";
- } else {
- // Valid reference, but we will verify it points to an actual
- // DIE later.
- ReferenceToDIEOffsets[*RefVal].insert(Die.getOffset());
- }
- }
- break;
- }
- case DW_FORM_strp: {
- auto SecOffset = AttrValue.Value.getAsSectionOffset();
- assert(SecOffset); // DW_FORM_strp is a section offset.
- if (SecOffset && *SecOffset >= DCtx.getStringSection().size()) {
- Success = false;
- OS << "error: DW_FORM_strp offset beyond .debug_str bounds:\n";
- Die.dump(OS, 0);
- OS << "\n";
- }
- break;
- }
- default:
- break;
- }
- }
- }
- }
-
- // Take all references and make sure they point to an actual DIE by
- // getting the DIE by offset and emitting an error
- OS << "Verifying .debug_info references...\n";
- for (auto Pair: ReferenceToDIEOffsets) {
- auto Die = DCtx.getDIEForOffset(Pair.first);
- if (Die)
- continue;
- Success = false;
- OS << "error: invalid DIE reference " << format("0x%08" PRIx64, Pair.first)
- << ". Offset is in between DIEs:\n";
- for (auto Offset: Pair.second) {
- auto ReferencingDie = DCtx.getDIEForOffset(Offset);
- ReferencingDie.dump(OS, 0);
- OS << "\n";
- }
- OS << "\n";
- }
- return Success;
- }
-
- bool HandleDebugLine() {
- std::map<uint64_t, DWARFDie> StmtListToDie;
- bool Success = true;
- OS << "Verifying .debug_line...\n";
- for (const auto &CU : DCtx.compile_units()) {
- uint32_t LineTableOffset = 0;
- auto CUDie = CU->getUnitDIE();
- auto StmtFormValue = CUDie.find(DW_AT_stmt_list);
- if (!StmtFormValue) {
- // No line table for this compile unit.
- continue;
- }
- // Get the attribute value as a section offset. No need to produce an
- // error here if the encoding isn't correct because we validate this in
- // the .debug_info verifier.
- if (auto StmtSectionOffset = toSectionOffset(StmtFormValue)) {
- LineTableOffset = *StmtSectionOffset;
- if (LineTableOffset >= DCtx.getLineSection().Data.size()) {
- // Make sure we don't get a valid line table back if the offset
- // is wrong.
- assert(DCtx.getLineTableForUnit(CU.get()) == nullptr);
- // Skip this line table as it isn't valid. No need to create an error
- // here because we validate this in the .debug_info verifier.
- continue;
- } else {
- auto Iter = StmtListToDie.find(LineTableOffset);
- if (Iter != StmtListToDie.end()) {
- Success = false;
- OS << "error: two compile unit DIEs, "
- << format("0x%08" PRIx32, Iter->second.getOffset()) << " and "
- << format("0x%08" PRIx32, CUDie.getOffset())
- << ", have the same DW_AT_stmt_list section offset:\n";
- Iter->second.dump(OS, 0);
- CUDie.dump(OS, 0);
- OS << '\n';
- // Already verified this line table before, no need to do it again.
- continue;
- }
- StmtListToDie[LineTableOffset] = CUDie;
- }
- }
- auto LineTable = DCtx.getLineTableForUnit(CU.get());
- if (!LineTable) {
- Success = false;
- OS << "error: .debug_line[" << format("0x%08" PRIx32, LineTableOffset)
- << "] was not able to be parsed for CU:\n";
- CUDie.dump(OS, 0);
- OS << '\n';
- continue;
- }
- uint32_t MaxFileIndex = LineTable->Prologue.FileNames.size();
- uint64_t PrevAddress = 0;
- uint32_t RowIndex = 0;
- for (const auto &Row : LineTable->Rows) {
- if (Row.Address < PrevAddress) {
- Success = false;
- OS << "error: .debug_line[" << format("0x%08" PRIx32, LineTableOffset)
- << "] row[" << RowIndex
- << "] decreases in address from previous row:\n";
-
- DWARFDebugLine::Row::dumpTableHeader(OS);
- if (RowIndex > 0)
- LineTable->Rows[RowIndex - 1].dump(OS);
- Row.dump(OS);
- OS << '\n';
- }
-
- if (Row.File > MaxFileIndex) {
- Success = false;
- OS << "error: .debug_line[" << format("0x%08" PRIx32, LineTableOffset)
- << "][" << RowIndex << "] has invalid file index " << Row.File
- << " (valid values are [1," << MaxFileIndex << "]):\n";
- DWARFDebugLine::Row::dumpTableHeader(OS);
- Row.dump(OS);
- OS << '\n';
- }
- if (Row.EndSequence)
- PrevAddress = 0;
- else
- PrevAddress = Row.Address;
- ++RowIndex;
- }
- }
- return Success;
- }
-};
-
-} // anonymous namespace
-
bool DWARFContext::verify(raw_ostream &OS, DIDumpType DumpType) {
bool Success = true;
DWARFVerifier verifier(OS, *this);
@@ -678,8 +436,13 @@ bool DWARFContext::verify(raw_ostream &OS, DIDumpType DumpType) {
if (!verifier.handleDebugLine())
Success = false;
}
+ if (DumpType == DIDT_All || DumpType == DIDT_AppleNames) {
+ if (!verifier.handleAppleNames())
+ Success = false;
+ }
return Success;
}
+
const DWARFUnitIndex &DWARFContext::getCUIndex() {
if (CUIndex)
return *CUIndex;
@@ -1250,7 +1013,7 @@ DWARFContextInMemory::DWARFContextInMemory(const object::ObjectFile &Obj,
continue;
RelSecName = RelSecName.substr(
- RelSecName.find_first_not_of("._")); // Skip . and _ prefixes.
+ RelSecName.find_first_not_of("._z")); // Skip . and _ prefixes.
// TODO: Add support for relocations in other sections as needed.
// Record relocations for the debug_info and debug_line sections.
diff --git a/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp b/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp
index e6e007896cc8..cf9fec2b3254 100644
--- a/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp
+++ b/lib/DebugInfo/DWARF/DWARFDebugFrame.cpp
@@ -514,6 +514,20 @@ static uint64_t readPointer(const DataExtractor &Data, uint32_t &Offset,
}
}
+// This is a workaround for old compilers which do not allow
+// noreturn attribute usage in lambdas. Once the support for those
+// compilers are phased out, we can remove this and return back to
+// a ReportError lambda: [StartOffset](const char *ErrorMsg).
+#define ReportError(ErrorMsg) ReportErrorImpl(StartOffset,ErrorMsg)
+static void LLVM_ATTRIBUTE_NORETURN
+ReportErrorImpl(uint32_t StartOffset, const char *ErrorMsg) {
+ std::string Str;
+ raw_string_ostream OS(Str);
+ OS << format(ErrorMsg, StartOffset);
+ OS.flush();
+ report_fatal_error(Str);
+}
+
void DWARFDebugFrame::parse(DataExtractor Data) {
uint32_t Offset = 0;
DenseMap<uint32_t, CIE *> CIEs;
@@ -521,14 +535,6 @@ void DWARFDebugFrame::parse(DataExtractor Data) {
while (Data.isValidOffset(Offset)) {
uint32_t StartOffset = Offset;
- auto ReportError = [StartOffset](const char *ErrorMsg) {
- std::string Str;
- raw_string_ostream OS(Str);
- OS << format(ErrorMsg, StartOffset);
- OS.flush();
- report_fatal_error(Str);
- };
-
bool IsDWARF64 = false;
uint64_t Length = Data.getU32(&Offset);
uint64_t Id;
@@ -585,7 +591,6 @@ void DWARFDebugFrame::parse(DataExtractor Data) {
switch (AugmentationString[i]) {
default:
ReportError("Unknown augmentation character in entry at %lx");
- llvm_unreachable("ReportError should not return.");
case 'L':
LSDAPointerEncoding = Data.getU8(&Offset);
break;
diff --git a/lib/DebugInfo/DWARF/DWARFVerifier.cpp b/lib/DebugInfo/DWARF/DWARFVerifier.cpp
index 8a544296f65c..a6240fb60143 100644
--- a/lib/DebugInfo/DWARF/DWARFVerifier.cpp
+++ b/lib/DebugInfo/DWARF/DWARFVerifier.cpp
@@ -14,6 +14,7 @@
#include "llvm/DebugInfo/DWARF/DWARFDie.h"
#include "llvm/DebugInfo/DWARF/DWARFFormValue.h"
#include "llvm/DebugInfo/DWARF/DWARFSection.h"
+#include "llvm/DebugInfo/DWARF/DWARFAcceleratorTable.h"
#include "llvm/Support/raw_ostream.h"
#include <map>
#include <set>
@@ -275,3 +276,36 @@ bool DWARFVerifier::handleDebugLine() {
verifyDebugLineRows();
return NumDebugLineErrors == 0;
}
+
+bool DWARFVerifier::handleAppleNames() {
+ NumAppleNamesErrors = 0;
+ OS << "Verifying .apple_names...\n";
+
+ DataExtractor AppleNamesSection(DCtx.getAppleNamesSection().Data,
+ DCtx.isLittleEndian(), 0);
+ DataExtractor StrData(DCtx.getStringSection(), DCtx.isLittleEndian(), 0);
+ DWARFAcceleratorTable AppleNames(AppleNamesSection, StrData,
+ DCtx.getAppleNamesSection().Relocs);
+
+ if (!AppleNames.extract()) {
+ OS << "error: cannot extract .apple_names accelerator table\n";
+ return false;
+ }
+
+ // Verify that all buckets have a valid hash index or are empty
+ uint32_t NumBuckets = AppleNames.getNumBuckets();
+ uint32_t NumHashes = AppleNames.getNumHashes();
+
+ uint32_t BucketsOffset =
+ AppleNames.getSizeHdr() + AppleNames.getHeaderDataLength();
+
+ for (uint32_t BucketIdx = 0; BucketIdx < NumBuckets; ++BucketIdx) {
+ uint32_t HashIdx = AppleNamesSection.getU32(&BucketsOffset);
+ if (HashIdx >= NumHashes && HashIdx != UINT32_MAX) {
+ OS << format("error: Bucket[%d] has invalid hash index: [%d]\n",
+ BucketIdx, HashIdx);
+ ++NumAppleNamesErrors;
+ }
+ }
+ return NumAppleNamesErrors == 0;
+}
diff --git a/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp b/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
index 396dffaa68b1..81a9d3eeec61 100644
--- a/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp
@@ -177,7 +177,7 @@ Error DbiModuleDescriptorBuilder::commit(BinaryStreamWriter &ModiWriter,
}
void DbiModuleDescriptorBuilder::addDebugSubsection(
- std::unique_ptr<DebugSubsection> Subsection) {
+ std::shared_ptr<DebugSubsection> Subsection) {
assert(Subsection);
C13Builders.push_back(llvm::make_unique<DebugSubsectionRecordBuilder>(
std::move(Subsection), CodeViewContainer::Pdb));
diff --git a/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp b/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
index 355c7b57f4d1..e7304b444f23 100644
--- a/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp
@@ -45,10 +45,6 @@ void DbiStreamBuilder::setFlags(uint16_t F) { Flags = F; }
void DbiStreamBuilder::setMachineType(PDB_Machine M) { MachineType = M; }
-void DbiStreamBuilder::setSectionContribs(ArrayRef<SectionContrib> Arr) {
- SectionContribs = Arr;
-}
-
void DbiStreamBuilder::setSectionMap(ArrayRef<SecMapEntry> SecMap) {
SectionMap = SecMap;
}
@@ -293,23 +289,17 @@ static uint16_t toSecMapFlags(uint32_t Flags) {
return Ret;
}
-// A utility function to create Section Contributions
-// for a given input sections.
-std::vector<SectionContrib> DbiStreamBuilder::createSectionContribs(
- ArrayRef<object::coff_section> SecHdrs) {
- std::vector<SectionContrib> Ret;
-
- // Create a SectionContrib for each input section.
- for (auto &Sec : SecHdrs) {
- Ret.emplace_back();
- auto &Entry = Ret.back();
- memset(&Entry, 0, sizeof(Entry));
-
- Entry.Off = Sec.PointerToRawData;
- Entry.Size = Sec.SizeOfRawData;
- Entry.Characteristics = Sec.Characteristics;
- }
- return Ret;
+void DbiStreamBuilder::addSectionContrib(DbiModuleDescriptorBuilder *ModuleDbi,
+ const object::coff_section *SecHdr) {
+ SectionContrib SC;
+ memset(&SC, 0, sizeof(SC));
+ SC.ISect = (uint16_t)~0U; // This represents nil.
+ SC.Off = SecHdr->PointerToRawData;
+ SC.Size = SecHdr->SizeOfRawData;
+ SC.Characteristics = SecHdr->Characteristics;
+ // Use the module index in the module dbi stream or nil (-1).
+ SC.Imod = ModuleDbi ? ModuleDbi->getModuleIndex() : (uint16_t)~0U;
+ SectionContribs.emplace_back(SC);
}
// A utility function to create a Section Map for a given list of COFF sections.
@@ -372,7 +362,7 @@ Error DbiStreamBuilder::commit(const msf::MSFLayout &Layout,
if (!SectionContribs.empty()) {
if (auto EC = Writer.writeEnum(DbiSecContribVer60))
return EC;
- if (auto EC = Writer.writeArray(SectionContribs))
+ if (auto EC = Writer.writeArray(makeArrayRef(SectionContribs)))
return EC;
}
diff --git a/lib/DebugInfo/PDB/Native/InfoStream.cpp b/lib/DebugInfo/PDB/Native/InfoStream.cpp
index 7c6069652da6..a3979d480bf4 100644
--- a/lib/DebugInfo/PDB/Native/InfoStream.cpp
+++ b/lib/DebugInfo/PDB/Native/InfoStream.cpp
@@ -102,6 +102,10 @@ InfoStream::named_streams() const {
return NamedStreams.entries();
}
+bool InfoStream::containsIdStream() const {
+ return !!(Features & PdbFeatureContainsIdStream);
+}
+
PdbRaw_ImplVer InfoStream::getVersion() const {
return static_cast<PdbRaw_ImplVer>(Version);
}
diff --git a/lib/DebugInfo/PDB/Native/PDBFile.cpp b/lib/DebugInfo/PDB/Native/PDBFile.cpp
index 1254e23c73eb..a9597cdf4c4d 100644
--- a/lib/DebugInfo/PDB/Native/PDBFile.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBFile.cpp
@@ -363,6 +363,16 @@ Expected<PDBStringTable &> PDBFile::getStringTable() {
return *Strings;
}
+uint32_t PDBFile::getPointerSize() {
+ auto DbiS = getPDBDbiStream();
+ if (!DbiS)
+ return 0;
+ PDB_Machine Machine = DbiS->getMachineType();
+ if (Machine == PDB_Machine::Amd64)
+ return 8;
+ return 4;
+}
+
bool PDBFile::hasPDBDbiStream() const { return StreamDBI < getNumStreams(); }
bool PDBFile::hasPDBGlobalsStream() {
diff --git a/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp b/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp
index 2c6465e6fb2a..12b0c3b36c1d 100644
--- a/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBFileBuilder.cpp
@@ -80,6 +80,16 @@ Error PDBFileBuilder::addNamedStream(StringRef Name, uint32_t Size) {
}
Expected<msf::MSFLayout> PDBFileBuilder::finalizeMsfLayout() {
+
+ if (Ipi && Ipi->getRecordCount() > 0) {
+ // In theory newer PDBs always have an ID stream, but by saying that we're
+ // only going to *really* have an ID stream if there is at least one ID
+ // record, we leave open the opportunity to test older PDBs such as those
+ // that don't have an ID stream.
+ auto &Info = getInfoBuilder();
+ Info.addFeature(PdbRaw_FeatureSig::VC140);
+ }
+
uint32_t StringsLen = Strings.calculateSerializedSize();
if (auto EC = addNamedStream("/names", StringsLen))
diff --git a/lib/DebugInfo/PDB/Native/PDBStringTable.cpp b/lib/DebugInfo/PDB/Native/PDBStringTable.cpp
index 6013c342cf02..f9f8ac219d35 100644
--- a/lib/DebugInfo/PDB/Native/PDBStringTable.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBStringTable.cpp
@@ -56,7 +56,8 @@ Error PDBStringTable::readStrings(BinaryStreamReader &Reader) {
return Error::success();
}
-codeview::DebugStringTableSubsectionRef PDBStringTable::getStringTable() const {
+const codeview::DebugStringTableSubsectionRef &
+PDBStringTable::getStringTable() const {
return Strings;
}
diff --git a/lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp b/lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp
index a472181a4895..90acfadd311f 100644
--- a/lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp
+++ b/lib/DebugInfo/PDB/Native/PDBStringTableBuilder.cpp
@@ -52,6 +52,11 @@ uint32_t PDBStringTableBuilder::calculateSerializedSize() const {
return Size;
}
+void PDBStringTableBuilder::setStrings(
+ const codeview::DebugStringTableSubsection &Strings) {
+ this->Strings = Strings;
+}
+
Error PDBStringTableBuilder::writeHeader(BinaryStreamWriter &Writer) const {
// Write a header
PDBStringTableHeader H;
diff --git a/lib/DebugInfo/PDB/Native/PublicsStream.cpp b/lib/DebugInfo/PDB/Native/PublicsStream.cpp
index 091ac67035dc..8f3474b9ce19 100644
--- a/lib/DebugInfo/PDB/Native/PublicsStream.cpp
+++ b/lib/DebugInfo/PDB/Native/PublicsStream.cpp
@@ -130,4 +130,13 @@ PublicsStream::getSymbols(bool *HadError) const {
return SS.getSymbols(HadError);
}
+Expected<const codeview::CVSymbolArray &>
+PublicsStream::getSymbolArray() const {
+ auto SymbolS = Pdb.getPDBSymbolStream();
+ if (!SymbolS)
+ return SymbolS.takeError();
+
+ return SymbolS->getSymbolArray();
+}
+
Error PublicsStream::commit() { return Error::success(); }
diff --git a/lib/DebugInfo/PDB/Native/TpiHashing.cpp b/lib/DebugInfo/PDB/Native/TpiHashing.cpp
index 16904a5a27ed..91b8d648fcf9 100644
--- a/lib/DebugInfo/PDB/Native/TpiHashing.cpp
+++ b/lib/DebugInfo/PDB/Native/TpiHashing.cpp
@@ -9,6 +9,7 @@
#include "llvm/DebugInfo/PDB/Native/TpiHashing.h"
+#include "llvm/DebugInfo/CodeView/TypeDeserializer.h"
#include "llvm/DebugInfo/PDB/Native/Hash.h"
#include "llvm/DebugInfo/PDB/Native/RawError.h"
diff --git a/lib/DebugInfo/PDB/UDTLayout.cpp b/lib/DebugInfo/PDB/UDTLayout.cpp
index aacefae80c3a..da353cb6977c 100644
--- a/lib/DebugInfo/PDB/UDTLayout.cpp
+++ b/lib/DebugInfo/PDB/UDTLayout.cpp
@@ -181,13 +181,14 @@ void UDTLayoutBase::initializeChildren(const PDBSymbol &Sym) {
if (Data->getDataKind() == PDB_DataKind::Member)
Members.push_back(std::move(Data));
else
- Other.push_back(std::move(Child));
+ Other.push_back(std::move(Data));
} else if (auto VT = unique_dyn_cast<PDBSymbolTypeVTable>(Child))
VTables.push_back(std::move(VT));
else if (auto Func = unique_dyn_cast<PDBSymbolFunc>(Child))
Funcs.push_back(std::move(Func));
- else
+ else {
Other.push_back(std::move(Child));
+ }
}
// We don't want to have any re-allocations in the list of bases, so make
diff --git a/lib/Fuzzer/FuzzerDriver.cpp b/lib/Fuzzer/FuzzerDriver.cpp
index 9aad3771784d..0453a7f443b5 100644
--- a/lib/Fuzzer/FuzzerDriver.cpp
+++ b/lib/Fuzzer/FuzzerDriver.cpp
@@ -553,12 +553,12 @@ int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
return RunInMultipleProcesses(Args, Flags.workers, Flags.jobs);
const size_t kMaxSaneLen = 1 << 20;
- const size_t kMinDefaultLen = 64;
+ const size_t kMinDefaultLen = 4096;
FuzzingOptions Options;
Options.Verbosity = Flags.verbosity;
Options.MaxLen = Flags.max_len;
Options.ExperimentalLenControl = Flags.experimental_len_control;
- if (Flags.experimental_len_control && Flags.max_len == 64)
+ if (Flags.experimental_len_control && Flags.max_len == kMinDefaultLen)
Options.MaxLen = 1 << 20;
Options.UnitTimeoutSec = Flags.timeout;
Options.ErrorExitCode = Flags.error_exitcode;
diff --git a/lib/Fuzzer/FuzzerLoop.cpp b/lib/Fuzzer/FuzzerLoop.cpp
index f6083282ab61..fbf18357ede6 100644
--- a/lib/Fuzzer/FuzzerLoop.cpp
+++ b/lib/Fuzzer/FuzzerLoop.cpp
@@ -301,7 +301,9 @@ void Fuzzer::SetMaxInputLen(size_t MaxInputLen) {
this->MaxInputLen = MaxInputLen;
this->MaxMutationLen = MaxInputLen;
AllocateCurrentUnitData();
- Printf("INFO: -max_len is not provided, using %zd\n", MaxInputLen);
+ Printf("INFO: -max_len is not provided; "
+ "libFuzzer will not generate inputs larger than %zd bytes\n",
+ MaxInputLen);
}
void Fuzzer::SetMaxMutationLen(size_t MaxMutationLen) {
diff --git a/lib/Fuzzer/FuzzerTracePC.cpp b/lib/Fuzzer/FuzzerTracePC.cpp
index ea93468ea0ed..6f5c7be41062 100644
--- a/lib/Fuzzer/FuzzerTracePC.cpp
+++ b/lib/Fuzzer/FuzzerTracePC.cpp
@@ -53,6 +53,17 @@ size_t TracePC::GetTotalPCCoverage() {
return Res;
}
+
+void TracePC::HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop) {
+ if (Start == Stop) return;
+ if (NumModulesWithInline8bitCounters &&
+ ModuleCounters[NumModulesWithInline8bitCounters-1].Start == Start) return;
+ assert(NumModulesWithInline8bitCounters <
+ sizeof(ModuleCounters) / sizeof(ModuleCounters[0]));
+ ModuleCounters[NumModulesWithInline8bitCounters++] = {Start, Stop};
+ NumInline8bitCounters += Stop - Start;
+}
+
void TracePC::HandleInit(uint32_t *Start, uint32_t *Stop) {
if (Start == Stop || *Start) return;
assert(NumModules < sizeof(Modules) / sizeof(Modules[0]));
@@ -76,6 +87,13 @@ void TracePC::PrintModuleInfo() {
for (size_t i = 0; i < NumModules; i++)
Printf("[%p, %p), ", Modules[i].Start, Modules[i].Stop);
Printf("\n");
+ if (NumModulesWithInline8bitCounters) {
+ Printf("INFO: Loaded %zd modules with %zd inline 8-bit counters\n",
+ NumModulesWithInline8bitCounters, NumInline8bitCounters);
+ for (size_t i = 0; i < NumModulesWithInline8bitCounters; i++)
+ Printf("[%p, %p), ", ModuleCounters[i].Start, ModuleCounters[i].Stop);
+ Printf("\n");
+ }
}
ATTRIBUTE_NO_SANITIZE_ALL
@@ -304,6 +322,11 @@ void __sanitizer_cov_trace_pc_guard_init(uint32_t *Start, uint32_t *Stop) {
}
ATTRIBUTE_INTERFACE
+void __sanitizer_cov_8bit_counters_init(uint8_t *Start, uint8_t *Stop) {
+ fuzzer::TPC.HandleInline8bitCountersInit(Start, Stop);
+}
+
+ATTRIBUTE_INTERFACE
ATTRIBUTE_NO_SANITIZE_ALL
void __sanitizer_cov_trace_pc_indir(uintptr_t Callee) {
uintptr_t PC = reinterpret_cast<uintptr_t>(__builtin_return_address(0));
diff --git a/lib/Fuzzer/FuzzerTracePC.h b/lib/Fuzzer/FuzzerTracePC.h
index 6523fa06005c..5ec8c590b4df 100644
--- a/lib/Fuzzer/FuzzerTracePC.h
+++ b/lib/Fuzzer/FuzzerTracePC.h
@@ -51,7 +51,8 @@ class TracePC {
// How many bits of PC are used from __sanitizer_cov_trace_pc.
static const size_t kTracePcBits = 18;
- void HandleInit(uint32_t *start, uint32_t *stop);
+ void HandleInit(uint32_t *Start, uint32_t *Stop);
+ void HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop);
void HandleCallerCallee(uintptr_t Caller, uintptr_t Callee);
template <class T> void HandleCmp(uintptr_t PC, T Arg1, T Arg2);
size_t GetTotalPCCoverage();
@@ -104,6 +105,10 @@ private:
size_t NumModules; // linker-initialized.
size_t NumGuards; // linker-initialized.
+ struct { uint8_t *Start, *Stop; } ModuleCounters[4096];
+ size_t NumModulesWithInline8bitCounters; // linker-initialized.
+ size_t NumInline8bitCounters;
+
uint8_t *Counters() const;
uintptr_t *PCs() const;
@@ -118,12 +123,24 @@ void ForEachNonZeroByte(const uint8_t *Begin, const uint8_t *End,
size_t FirstFeature, Callback Handle8bitCounter) {
typedef uintptr_t LargeType;
const size_t Step = sizeof(LargeType) / sizeof(uint8_t);
- assert(!(reinterpret_cast<uintptr_t>(Begin) % 64));
- for (auto P = Begin; P < End; P += Step)
+ const size_t StepMask = Step - 1;
+ auto P = Begin;
+ // Iterate by 1 byte until either the alignment boundary or the end.
+ for (; reinterpret_cast<uintptr_t>(P) & StepMask && P < End; P++)
+ if (uint8_t V = *P)
+ Handle8bitCounter(FirstFeature + P - Begin, V);
+
+ // Iterate by Step bytes at a time.
+ for (; P < End; P += Step)
if (LargeType Bundle = *reinterpret_cast<const LargeType *>(P))
for (size_t I = 0; I < Step; I++, Bundle >>= 8)
if (uint8_t V = Bundle & 0xff)
Handle8bitCounter(FirstFeature + P - Begin + I, V);
+
+ // Iterate by 1 byte until the end.
+ for (; P < End; P++)
+ if (uint8_t V = *P)
+ Handle8bitCounter(FirstFeature + P - Begin, V);
}
template <class Callback> // bool Callback(size_t Feature)
@@ -145,8 +162,16 @@ void TracePC::CollectFeatures(Callback HandleFeature) const {
HandleFeature(Idx * 8 + Bit);
};
- ForEachNonZeroByte(Counters, Counters + N, 0, Handle8bitCounter);
- ForEachNonZeroByte(ExtraCountersBegin(), ExtraCountersEnd(), N * 8,
+ size_t FirstFeature = 0;
+ ForEachNonZeroByte(Counters, Counters + N, FirstFeature, Handle8bitCounter);
+ FirstFeature += N * 8;
+ for (size_t i = 0; i < NumModulesWithInline8bitCounters; i++) {
+ ForEachNonZeroByte(ModuleCounters[i].Start, ModuleCounters[i].Stop,
+ FirstFeature, Handle8bitCounter);
+ FirstFeature += 8 * (ModuleCounters[i].Stop - ModuleCounters[i].Start);
+ }
+
+ ForEachNonZeroByte(ExtraCountersBegin(), ExtraCountersEnd(), FirstFeature,
Handle8bitCounter);
if (UseValueProfile)
diff --git a/lib/Fuzzer/test/AbsNegAndConstant64Test.cpp b/lib/Fuzzer/test/AbsNegAndConstant64Test.cpp
index dfb6007b7970..b5a61ddca715 100644
--- a/lib/Fuzzer/test/AbsNegAndConstant64Test.cpp
+++ b/lib/Fuzzer/test/AbsNegAndConstant64Test.cpp
@@ -9,7 +9,7 @@
#include <cstring>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- if (Size < 16) return 0;
+ if (Size < 16 || Size > 64) return 0;
int64_t x;
uint64_t y;
memcpy(&x, Data, sizeof(x));
diff --git a/lib/Fuzzer/test/CMakeLists.txt b/lib/Fuzzer/test/CMakeLists.txt
index b39938a705f6..1cf6c9502a2b 100644
--- a/lib/Fuzzer/test/CMakeLists.txt
+++ b/lib/Fuzzer/test/CMakeLists.txt
@@ -206,6 +206,9 @@ include_directories(..)
add_subdirectory(no-coverage)
add_subdirectory(trace-pc)
add_subdirectory(ubsan)
+if (NOT MSVC)
+ add_subdirectory(inline-8bit-counters)
+endif()
add_library(LLVMFuzzer-DSO1 SHARED DSO1.cpp)
add_library(LLVMFuzzer-DSO2 SHARED DSO2.cpp)
diff --git a/lib/Fuzzer/test/FourIndependentBranchesTest.cpp b/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
index bbf5ea235c7a..ba963d9b1de8 100644
--- a/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
+++ b/lib/Fuzzer/test/FourIndependentBranchesTest.cpp
@@ -8,6 +8,7 @@
#include <iostream>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ if (Size > 64) return 0;
int bits = 0;
if (Size > 0 && Data[0] == 'F') bits |= 1;
if (Size > 1 && Data[1] == 'U') bits |= 2;
diff --git a/lib/Fuzzer/test/FuzzerUnittest.cpp b/lib/Fuzzer/test/FuzzerUnittest.cpp
index c8beb4331bfa..812894fd947f 100644
--- a/lib/Fuzzer/test/FuzzerUnittest.cpp
+++ b/lib/Fuzzer/test/FuzzerUnittest.cpp
@@ -772,4 +772,16 @@ TEST(Fuzzer, ForEachNonZeroByte) {
Expected = {{108, 1}, {109, 2}, {118, 3}, {120, 4},
{135, 5}, {137, 6}, {146, 7}, {163, 8}};
EXPECT_EQ(Res, Expected);
+
+ Res.clear();
+ ForEachNonZeroByte(Ar + 9, Ar + N, 109, CB);
+ Expected = { {109, 2}, {118, 3}, {120, 4},
+ {135, 5}, {137, 6}, {146, 7}, {163, 8}};
+ EXPECT_EQ(Res, Expected);
+
+ Res.clear();
+ ForEachNonZeroByte(Ar + 9, Ar + N - 9, 109, CB);
+ Expected = { {109, 2}, {118, 3}, {120, 4},
+ {135, 5}, {137, 6}, {146, 7}};
+ EXPECT_EQ(Res, Expected);
}
diff --git a/lib/Fuzzer/test/ShrinkControlFlowTest.cpp b/lib/Fuzzer/test/ShrinkControlFlowTest.cpp
index d09542963626..37eeede7cbff 100644
--- a/lib/Fuzzer/test/ShrinkControlFlowTest.cpp
+++ b/lib/Fuzzer/test/ShrinkControlFlowTest.cpp
@@ -11,6 +11,7 @@
static volatile int Sink;
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ if (Size > 64) return 0;
int8_t Ids[256];
memset(Ids, -1, sizeof(Ids));
for (size_t i = 0; i < Size; i++)
diff --git a/lib/Fuzzer/test/SimpleHashTest.cpp b/lib/Fuzzer/test/SimpleHashTest.cpp
index 99e96cb25dcd..a3f4211ebeef 100644
--- a/lib/Fuzzer/test/SimpleHashTest.cpp
+++ b/lib/Fuzzer/test/SimpleHashTest.cpp
@@ -26,7 +26,7 @@ static uint32_t simple_hash(const uint8_t *Data, size_t Size) {
}
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
- if (Size < 14)
+ if (Size < 14 || Size > 64)
return 0;
uint32_t Hash = simple_hash(&Data[0], Size - 4);
diff --git a/lib/Fuzzer/test/SingleStrncmpTest.cpp b/lib/Fuzzer/test/SingleStrncmpTest.cpp
index b302670fb743..b38c7995d8ff 100644
--- a/lib/Fuzzer/test/SingleStrncmpTest.cpp
+++ b/lib/Fuzzer/test/SingleStrncmpTest.cpp
@@ -8,6 +8,7 @@
#include <cstring>
extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size) {
+ if (Size > 64) return 0;
char *S = (char*)Data;
volatile auto Strncmp = &(strncmp); // Make sure strncmp is not inlined.
if (Size >= 6 && !Strncmp(S, "qwerty", 6)) {
diff --git a/lib/Fuzzer/test/TableLookupTest.cpp b/lib/Fuzzer/test/TableLookupTest.cpp
index 8126eeabaf42..4d8ab0611cde 100644
--- a/lib/Fuzzer/test/TableLookupTest.cpp
+++ b/lib/Fuzzer/test/TableLookupTest.cpp