aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2017-05-02 18:30:13 +0000
committerDimitry Andric <dim@FreeBSD.org>2017-05-02 18:30:13 +0000
commita303c417bbdb53703c2c17398b08486bde78f1f6 (patch)
tree98366d6b93d863cefdc53f16c66c0c5ae7fb2261
parent12f3ca4cdb95b193af905a00e722a4dcb40b3de3 (diff)
downloadsrc-a303c417bbdb53703c2c17398b08486bde78f1f6.tar.gz
src-a303c417bbdb53703c2c17398b08486bde78f1f6.zip
Vendor import of llvm trunk r301939:vendor/llvm/llvm-trunk-r301939
Notes
Notes: svn path=/vendor/llvm/dist/; revision=317683 svn path=/vendor/llvm/llvm-trunk-r301939/; revision=317684; tag=vendor/llvm/llvm-trunk-r301939
-rw-r--r--cmake/modules/HandleLLVMOptions.cmake18
-rw-r--r--cmake/modules/VersionFromVCS.cmake6
-rw-r--r--docs/AMDGPUUsage.rst2
-rw-r--r--docs/CMakeLists.txt4
-rw-r--r--docs/LangRef.rst11
-rw-r--r--docs/ProgrammersManual.rst19
-rw-r--r--docs/README.txt15
-rw-r--r--docs/StackMaps.rst7
-rw-r--r--docs/TableGen/LangIntro.rst9
-rw-r--r--docs/doxygen.cfg.in21
-rw-r--r--include/llvm/ADT/APInt.h68
-rw-r--r--include/llvm/Analysis/AssumptionCache.h14
-rw-r--r--include/llvm/Analysis/CGSCCPassManager.h8
-rw-r--r--include/llvm/Analysis/CallGraph.h2
-rw-r--r--include/llvm/Analysis/IVUsers.h2
-rw-r--r--include/llvm/Analysis/InlineCost.h4
-rw-r--r--include/llvm/Analysis/InstructionSimplify.h261
-rw-r--r--include/llvm/Analysis/MemoryBuiltins.h2
-rw-r--r--include/llvm/Analysis/ScalarEvolutionExpander.h2
-rw-r--r--include/llvm/Analysis/TargetTransformInfo.h12
-rw-r--r--include/llvm/Analysis/TargetTransformInfoImpl.h6
-rw-r--r--include/llvm/Analysis/ValueTracking.h4
-rw-r--r--include/llvm/Bitcode/BitcodeReader.h16
-rw-r--r--include/llvm/Bitcode/LLVMBitCodes.h3
-rw-r--r--include/llvm/CodeGen/BasicTTIImpl.h56
-rw-r--r--include/llvm/CodeGen/FunctionLoweringInfo.h14
-rw-r--r--include/llvm/CodeGen/GlobalISel/InstructionSelector.h3
-rw-r--r--include/llvm/CodeGen/ISDOpcodes.h13
-rw-r--r--include/llvm/CodeGen/MIRYamlMapping.h4
-rw-r--r--include/llvm/CodeGen/MachineFrameInfo.h19
-rw-r--r--include/llvm/CodeGen/SelectionDAG.h36
-rw-r--r--include/llvm/CodeGen/SelectionDAGNodes.h136
-rw-r--r--include/llvm/CodeGen/ValueTypes.td2
-rw-r--r--include/llvm/DebugInfo/CodeView/CVRecord.h6
-rw-r--r--include/llvm/DebugInfo/CodeView/CodeView.h5
-rw-r--r--include/llvm/DebugInfo/CodeView/Line.h21
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugFileChecksumFragment.h91
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugFragment.h48
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugFragmentRecord.h78
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugFragmentVisitor.h68
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.h103
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugLineFragment.h137
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleDebugUnknownFragment.h33
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleSubstream.h87
-rw-r--r--include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h132
-rw-r--r--include/llvm/DebugInfo/CodeView/TypeDatabase.h1
-rw-r--r--include/llvm/DebugInfo/DIContext.h4
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFContext.h2
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFDebugLine.h181
-rw-r--r--include/llvm/DebugInfo/DWARF/DWARFFormValue.h1
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h (renamed from include/llvm/DebugInfo/PDB/Native/ModInfo.h)33
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h101
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStream.h4
-rw-r--r--include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h9
-rw-r--r--include/llvm/DebugInfo/PDB/Native/ModInfoBuilder.h74
-rw-r--r--include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h (renamed from include/llvm/DebugInfo/PDB/Native/ModStream.h)30
-rw-r--r--include/llvm/DebugInfo/PDB/Native/ModuleDebugStreamBuilder.h0
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h2
-rw-r--r--include/llvm/DebugInfo/PDB/Native/RawTypes.h6
-rw-r--r--include/llvm/DebugInfo/PDB/Native/StringTableBuilder.h1
-rw-r--r--include/llvm/DebugInfo/Symbolize/Symbolize.h5
-rw-r--r--include/llvm/IR/Argument.h2
-rw-r--r--include/llvm/IR/Attributes.h5
-rw-r--r--include/llvm/IR/Attributes.td3
-rw-r--r--include/llvm/IR/CallSite.h15
-rw-r--r--include/llvm/IR/CallingConv.h4
-rw-r--r--include/llvm/IR/DIBuilder.h29
-rw-r--r--include/llvm/IR/DebugInfoMetadata.h118
-rw-r--r--include/llvm/IR/Function.h20
-rw-r--r--include/llvm/IR/InstrTypes.h12
-rw-r--r--include/llvm/IR/Instructions.h14
-rw-r--r--include/llvm/IR/IntrinsicInst.h4
-rw-r--r--include/llvm/IR/Intrinsics.td12
-rw-r--r--include/llvm/IR/IntrinsicsAMDGPU.td171
-rw-r--r--include/llvm/IR/ModuleSummaryIndex.h14
-rw-r--r--include/llvm/IR/ValueHandle.h161
-rw-r--r--include/llvm/InitializePasses.h1
-rw-r--r--include/llvm/MC/ConstantPools.h4
-rw-r--r--include/llvm/MC/LaneBitmask.h9
-rw-r--r--include/llvm/MC/MCAssembler.h34
-rw-r--r--include/llvm/MC/MCContext.h6
-rw-r--r--include/llvm/MC/MCDwarf.h8
-rw-r--r--include/llvm/MC/MCExpr.h3
-rw-r--r--include/llvm/MC/MCFragment.h4
-rw-r--r--include/llvm/MC/MCInst.h5
-rw-r--r--include/llvm/MC/MCLinkerOptimizationHint.h8
-rw-r--r--include/llvm/MC/MCParser/MCAsmLexer.h9
-rw-r--r--include/llvm/MC/MCParser/MCAsmParser.h6
-rw-r--r--include/llvm/MC/MCParser/MCTargetAsmParser.h2
-rw-r--r--include/llvm/MC/MCRegisterInfo.h8
-rw-r--r--include/llvm/MC/MCSection.h10
-rw-r--r--include/llvm/MC/MCSectionWasm.h6
-rw-r--r--include/llvm/MC/MCStreamer.h3
-rw-r--r--include/llvm/MC/MCSubtargetInfo.h8
-rw-r--r--include/llvm/MC/MCSymbol.h4
-rw-r--r--include/llvm/MC/MCWasmObjectWriter.h4
-rw-r--r--include/llvm/Object/Binary.h3
-rw-r--r--include/llvm/Object/COFF.h9
-rw-r--r--include/llvm/Object/COFFImportFile.h6
-rw-r--r--include/llvm/Object/ELF.h1
-rw-r--r--include/llvm/Object/ModuleSummaryIndexObjectFile.h112
-rw-r--r--include/llvm/Support/AArch64TargetParser.def2
-rw-r--r--include/llvm/Support/BinaryStreamArray.h99
-rw-r--r--include/llvm/Support/BinaryStreamReader.h6
-rw-r--r--include/llvm/Support/BinaryStreamWriter.h2
-rw-r--r--include/llvm/Support/CMakeLists.txt7
-rw-r--r--include/llvm/Support/DynamicLibrary.h7
-rw-r--r--include/llvm/Support/ELFRelocs/AArch64.def37
-rw-r--r--include/llvm/Support/KnownBits.h20
-rw-r--r--include/llvm/Support/LEB128.h25
-rw-r--r--include/llvm/Support/ScopedPrinter.h7
-rw-r--r--include/llvm/Support/StringSaver.h2
-rw-r--r--include/llvm/Support/Wasm.h4
-rw-r--r--include/llvm/Target/Target.td6
-rw-r--r--include/llvm/Target/TargetLowering.h89
-rw-r--r--include/llvm/Transforms/Scalar/NaryReassociate.h2
-rw-r--r--include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h53
-rw-r--r--include/llvm/Transforms/Utils/Cloning.h4
-rw-r--r--include/llvm/Transforms/Utils/Local.h3
-rw-r--r--include/llvm/Transforms/Utils/ModuleUtils.h11
-rw-r--r--include/llvm/Transforms/Utils/SimplifyIndVar.h4
-rw-r--r--include/llvm/Transforms/Utils/ValueMapper.h2
-rw-r--r--include/llvm/Transforms/Vectorize/SLPVectorizer.h6
-rw-r--r--lib/Analysis/AssumptionCache.cpp7
-rw-r--r--lib/Analysis/CFLGraph.h2
-rw-r--r--lib/Analysis/CallGraphSCCPass.cpp5
-rw-r--r--lib/Analysis/DemandedBits.cpp4
-rw-r--r--lib/Analysis/InlineCost.cpp140
-rw-r--r--lib/Analysis/InstructionSimplify.cpp372
-rw-r--r--lib/Analysis/LazyValueInfo.cpp2
-rw-r--r--lib/Analysis/Lint.cpp2
-rw-r--r--lib/Analysis/PHITransAddr.cpp4
-rw-r--r--lib/Analysis/ScalarEvolution.cpp224
-rw-r--r--lib/Analysis/ScalarEvolutionExpander.cpp9
-rw-r--r--lib/Analysis/TargetTransformInfo.cpp6
-rw-r--r--lib/Analysis/ValueTracking.cpp117
-rw-r--r--lib/AsmParser/LLLexer.cpp2
-rw-r--r--lib/AsmParser/LLParser.cpp26
-rw-r--r--lib/AsmParser/LLToken.h2
-rw-r--r--lib/Bitcode/Reader/BitcodeReader.cpp116
-rw-r--r--lib/Bitcode/Reader/MetadataLoader.cpp64
-rw-r--r--lib/Bitcode/Reader/ValueList.cpp2
-rw-r--r--lib/Bitcode/Reader/ValueList.h2
-rw-r--r--lib/Bitcode/Writer/BitcodeWriter.cpp5
-rw-r--r--lib/CodeGen/AsmPrinter/AsmPrinter.cpp2
-rw-r--r--lib/CodeGen/AsmPrinter/CodeViewDebug.cpp24
-rw-r--r--lib/CodeGen/AsmPrinter/CodeViewDebug.h2
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.cpp15
-rw-r--r--lib/CodeGen/AsmPrinter/DwarfUnit.h4
-rw-r--r--lib/CodeGen/CMakeLists.txt1
-rw-r--r--lib/CodeGen/CodeGenPrepare.cpp11
-rw-r--r--lib/CodeGen/DFAPacketizer.cpp6
-rw-r--r--lib/CodeGen/GlobalISel/CallLowering.cpp4
-rw-r--r--lib/CodeGen/GlobalISel/IRTranslator.cpp5
-rw-r--r--lib/CodeGen/GlobalISel/InstructionSelector.cpp5
-rw-r--r--lib/CodeGen/MIRParser/MIRParser.cpp3
-rw-r--r--lib/CodeGen/MIRPrinter.cpp3
-rw-r--r--lib/CodeGen/MachineFrameInfo.cpp218
-rw-r--r--lib/CodeGen/MachineFunction.cpp208
-rw-r--r--lib/CodeGen/MachineInstr.cpp11
-rw-r--r--lib/CodeGen/SelectionDAG/DAGCombiner.cpp311
-rw-r--r--lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp30
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeDAG.cpp4
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp171
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeTypes.h3
-rw-r--r--lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp27
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAG.cpp745
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp203
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h24
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp2
-rw-r--r--lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp22
-rw-r--r--lib/CodeGen/SelectionDAG/StatepointLowering.cpp21
-rw-r--r--lib/CodeGen/SelectionDAG/TargetLowering.cpp352
-rw-r--r--lib/CodeGen/StackMaps.cpp18
-rw-r--r--lib/CodeGen/TargetLoweringBase.cpp25
-rw-r--r--lib/CodeGen/UnreachableBlockElim.cpp29
-rw-r--r--lib/DebugInfo/CodeView/CMakeLists.txt9
-rw-r--r--lib/DebugInfo/CodeView/EnumTables.cpp28
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugFileChecksumFragment.cpp102
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugFragment.cpp16
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugFragmentRecord.cpp84
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugFragmentVisitor.cpp52
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.cpp116
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugLineFragment.cpp155
-rw-r--r--lib/DebugInfo/CodeView/ModuleDebugUnknownFragment.cpp10
-rw-r--r--lib/DebugInfo/CodeView/ModuleSubstream.cpp43
-rw-r--r--lib/DebugInfo/CodeView/ModuleSubstreamVisitor.cpp106
-rw-r--r--lib/DebugInfo/CodeView/TypeDatabase.cpp4
-rw-r--r--lib/DebugInfo/DWARF/DWARFContext.cpp113
-rw-r--r--lib/DebugInfo/DWARF/DWARFDebugLine.cpp357
-rw-r--r--lib/DebugInfo/DWARF/DWARFDebugPubTable.cpp2
-rw-r--r--lib/DebugInfo/DWARF/DWARFFormValue.cpp6
-rw-r--r--lib/DebugInfo/DWARF/DWARFGdbIndex.cpp5
-rw-r--r--lib/DebugInfo/PDB/CMakeLists.txt6
-rw-r--r--lib/DebugInfo/PDB/Native/DbiModuleDescriptor.cpp (renamed from lib/DebugInfo/PDB/Native/ModInfo.cpp)46
-rw-r--r--lib/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.cpp (renamed from lib/DebugInfo/PDB/Native/ModInfoBuilder.cpp)110
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStream.cpp13
-rw-r--r--lib/DebugInfo/PDB/Native/DbiStreamBuilder.cpp15
-rw-r--r--lib/DebugInfo/PDB/Native/ModuleDebugStream.cpp (renamed from lib/DebugInfo/PDB/Native/ModStream.cpp)40
-rw-r--r--lib/DebugInfo/PDB/Native/ModuleDebugStreamBuilder.cpp0
-rw-r--r--lib/DebugInfo/PDB/Native/StringTableBuilder.cpp6
-rw-r--r--lib/DebugInfo/Symbolize/Symbolize.cpp7
-rw-r--r--lib/ExecutionEngine/Interpreter/Execution.cpp2
-rw-r--r--lib/Fuzzer/FuzzerInternal.h2
-rw-r--r--lib/Fuzzer/test/cxxstring.test2
-rw-r--r--lib/IR/AsmWriter.cpp36
-rw-r--r--lib/IR/Attributes.cpp39
-rw-r--r--lib/IR/ConstantRange.cpp105
-rw-r--r--lib/IR/DIBuilder.cpp37
-rw-r--r--lib/IR/DebugInfoMetadata.cpp87
-rw-r--r--lib/IR/Function.cpp12
-rw-r--r--lib/IR/Instructions.cpp3
-rw-r--r--lib/IR/LLVMContextImpl.h29
-rw-r--r--lib/IR/Metadata.cpp4
-rw-r--r--lib/IR/ModuleSummaryIndex.cpp48
-rw-r--r--lib/IR/Value.cpp60
-rw-r--r--lib/IR/Verifier.cpp47
-rw-r--r--lib/LTO/LTO.cpp9
-rw-r--r--lib/LTO/ThinLTOCodeGenerator.cpp20
-rw-r--r--lib/MC/ELFObjectWriter.cpp8
-rw-r--r--lib/MC/MCCodeView.cpp8
-rw-r--r--lib/MC/MCParser/AsmParser.cpp56
-rw-r--r--lib/MC/MCParser/MCAsmLexer.cpp2
-rw-r--r--lib/MC/StringTableBuilder.cpp2
-rw-r--r--lib/MC/WasmObjectWriter.cpp27
-rw-r--r--lib/MC/WinCOFFObjectWriter.cpp15
-rw-r--r--lib/Object/CMakeLists.txt1
-rw-r--r--lib/Object/ELF.cpp66
-rw-r--r--lib/Object/ModuleSummaryIndexObjectFile.cpp129
-rw-r--r--lib/Passes/PassBuilder.cpp8
-rw-r--r--lib/Passes/PassRegistry.def1
-rw-r--r--lib/Support/APInt.cpp34
-rw-r--r--lib/Support/CMakeLists.txt1
-rw-r--r--lib/Support/DynamicLibrary.cpp237
-rw-r--r--lib/Support/PrettyStackTrace.cpp1
-rw-r--r--lib/Support/ScopedPrinter.cpp6
-rw-r--r--lib/Support/SearchForAddressOfSpecialSymbol.cpp58
-rw-r--r--lib/Support/SourceMgr.cpp4
-rw-r--r--lib/Support/Unix/DynamicLibrary.inc131
-rw-r--r--lib/Support/Windows/DynamicLibrary.inc218
-rw-r--r--lib/Target/AArch64/AArch64AsmPrinter.cpp3
-rw-r--r--lib/Target/AArch64/AArch64FastISel.cpp15
-rw-r--r--lib/Target/AArch64/AArch64ISelDAGToDAG.cpp45
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.cpp42
-rw-r--r--lib/Target/AArch64/AArch64ISelLowering.h4
-rw-r--r--lib/Target/AArch64/AArch64InstrInfo.td4
-rw-r--r--lib/Target/AArch64/AArch64InstructionSelector.cpp20
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.cpp68
-rw-r--r--lib/Target/AArch64/AArch64Subtarget.h6
-rw-r--r--lib/Target/AArch64/AArch64TargetMachine.cpp62
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp128
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.cpp1
-rw-r--r--lib/Target/AArch64/MCTargetDesc/AArch64MCExpr.h3
-rw-r--r--lib/Target/AMDGPU/AMDGPU.td2
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.cpp341
-rw-r--r--lib/Target/AMDGPU/AMDGPUAsmPrinter.h33
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp4
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.cpp31
-rw-r--r--lib/Target/AMDGPU/AMDGPUISelLowering.h10
-rw-r--r--lib/Target/AMDGPU/AMDGPUInstrInfo.td9
-rw-r--r--lib/Target/AMDGPU/AMDGPUMachineFunction.cpp1
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetMachine.cpp1
-rw-r--r--lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp1
-rw-r--r--lib/Target/AMDGPU/GCNSchedStrategy.cpp3
-rw-r--r--lib/Target/AMDGPU/R600Intrinsics.td2
-rw-r--r--lib/Target/AMDGPU/SIAnnotateControlFlow.cpp18
-rw-r--r--lib/Target/AMDGPU/SIDefines.h1
-rw-r--r--lib/Target/AMDGPU/SIFixSGPRCopies.cpp13
-rw-r--r--lib/Target/AMDGPU/SIISelLowering.cpp87
-rw-r--r--lib/Target/AMDGPU/SIInstructions.td23
-rw-r--r--lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp1
-rw-r--r--lib/Target/ARM/ARM.h6
-rw-r--r--lib/Target/ARM/ARMCallLowering.cpp15
-rw-r--r--lib/Target/ARM/ARMFastISel.cpp24
-rw-r--r--lib/Target/ARM/ARMISelLowering.cpp46
-rw-r--r--lib/Target/ARM/ARMISelLowering.h3
-rw-r--r--lib/Target/ARM/ARMInstrInfo.td12
-rw-r--r--lib/Target/ARM/ARMInstructionSelector.cpp77
-rw-r--r--lib/Target/ARM/ARMInstructionSelector.h42
-rw-r--r--lib/Target/ARM/ARMTargetMachine.cpp3
-rw-r--r--lib/Target/ARM/CMakeLists.txt5
-rw-r--r--lib/Target/AVR/AVRFrameLowering.cpp28
-rw-r--r--lib/Target/AVR/AVRISelLowering.cpp31
-rw-r--r--lib/Target/AVR/AVRISelLowering.h2
-rw-r--r--lib/Target/AVR/AVRInstrInfo.td41
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.cpp4
-rw-r--r--lib/Target/AVR/MCTargetDesc/AVRMCCodeEmitter.h3
-rw-r--r--lib/Target/BPF/Disassembler/BPFDisassembler.cpp26
-rw-r--r--lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp82
-rw-r--r--lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp3
-rw-r--r--lib/Target/Hexagon/HexagonBitTracker.cpp9
-rw-r--r--lib/Target/Hexagon/HexagonCFGOptimizer.cpp13
-rw-r--r--lib/Target/Hexagon/HexagonDepITypes.h1
-rw-r--r--lib/Target/Hexagon/HexagonDepITypes.td1
-rw-r--r--lib/Target/Hexagon/HexagonDepInstrInfo.td272
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormats.td3
-rw-r--r--lib/Target/Hexagon/HexagonInstrFormatsV60.td6
-rw-r--r--lib/Target/Hexagon/HexagonLoopIdiomRecognition.cpp6
-rw-r--r--lib/Target/Hexagon/HexagonMachineScheduler.h11
-rw-r--r--lib/Target/Hexagon/HexagonRegisterInfo.td2
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp13
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonBaseInfo.h5
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.cpp274
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCChecker.h126
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCCompound.cpp7
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.cpp11
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCInstrInfo.h1
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.cpp100
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonMCShuffler.h35
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.cpp120
-rw-r--r--lib/Target/Hexagon/MCTargetDesc/HexagonShuffler.h32
-rw-r--r--lib/Target/Hexagon/RDFLiveness.cpp9
-rw-r--r--lib/Target/Lanai/LanaiRegisterInfo.cpp6
-rw-r--r--lib/Target/Lanai/LanaiRegisterInfo.h2
-rw-r--r--lib/Target/Mips/CMakeLists.txt1
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsELFObjectWriter.cpp2
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsFixupKinds.h3
-rw-r--r--lib/Target/Mips/MCTargetDesc/MipsMCCodeEmitter.cpp3
-rw-r--r--lib/Target/Mips/MicroMipsSizeReduction.cpp335
-rw-r--r--lib/Target/Mips/Mips.h1
-rw-r--r--lib/Target/Mips/MipsFastISel.cpp30
-rw-r--r--lib/Target/Mips/MipsTargetMachine.cpp1
-rw-r--r--lib/Target/NVPTX/NVPTXAsmPrinter.cpp4
-rw-r--r--lib/Target/NVPTX/NVPTXLowerArgs.cpp2
-rw-r--r--lib/Target/PowerPC/PPCISelDAGToDAG.cpp23
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.cpp69
-rw-r--r--lib/Target/PowerPC/PPCISelLowering.h3
-rw-r--r--lib/Target/PowerPC/PPCInstrVSX.td14
-rw-r--r--lib/Target/Sparc/SparcISelLowering.cpp16
-rw-r--r--lib/Target/Sparc/SparcISelLowering.h3
-rw-r--r--lib/Target/SystemZ/SystemZISelDAGToDAG.cpp19
-rw-r--r--lib/Target/SystemZ/SystemZISelLowering.cpp19
-rw-r--r--lib/Target/SystemZ/SystemZTargetTransformInfo.cpp1
-rw-r--r--lib/Target/TargetLoweringObjectFile.cpp2
-rw-r--r--lib/Target/WebAssembly/WebAssemblyISelDAGToDAG.cpp1
-rw-r--r--lib/Target/WebAssembly/WebAssemblyInstrMemory.td14
-rw-r--r--lib/Target/X86/X86FastISel.cpp15
-rw-r--r--lib/Target/X86/X86ISelDAGToDAG.cpp8
-rw-r--r--lib/Target/X86/X86ISelLowering.cpp412
-rw-r--r--lib/Target/X86/X86ISelLowering.h10
-rw-r--r--lib/Target/X86/X86InstrCompiler.td10
-rw-r--r--lib/Target/X86/X86InstrInfo.td10
-rw-r--r--lib/Target/X86/X86InstructionSelector.cpp98
-rw-r--r--lib/Target/X86/X86LegalizerInfo.cpp20
-rw-r--r--lib/Target/X86/X86OptimizeLEAs.cpp54
-rw-r--r--lib/Target/X86/X86SelectionDAGInfo.cpp108
-rw-r--r--lib/Target/X86/X86Subtarget.cpp11
-rw-r--r--lib/Target/X86/X86Subtarget.h9
-rw-r--r--lib/Target/X86/X86TargetMachine.cpp12
-rw-r--r--lib/Target/XCore/XCoreISelLowering.cpp60
-rw-r--r--lib/Target/XCore/XCoreISelLowering.h3
-rw-r--r--lib/Target/XCore/XCoreLowerThreadLocal.cpp10
-rw-r--r--lib/Transforms/IPO/ArgumentPromotion.cpp29
-rw-r--r--lib/Transforms/IPO/FunctionAttrs.cpp5
-rw-r--r--lib/Transforms/IPO/FunctionImport.cpp2
-rw-r--r--lib/Transforms/IPO/GlobalOpt.cpp6
-rw-r--r--lib/Transforms/IPO/LLVMBuild.txt2
-rw-r--r--lib/Transforms/IPO/MergeFunctions.cpp24
-rw-r--r--lib/Transforms/IPO/PartialInlining.cpp391
-rw-r--r--lib/Transforms/IPO/PassManagerBuilder.cpp15
-rw-r--r--lib/Transforms/IPO/ThinLTOBitcodeWriter.cpp35
-rw-r--r--lib/Transforms/InstCombine/InstCombineAddSub.cpp12
-rw-r--r--lib/Transforms/InstCombine/InstCombineAndOrXor.cpp43
-rw-r--r--lib/Transforms/InstCombine/InstCombineCalls.cpp18
-rw-r--r--lib/Transforms/InstCombine/InstCombineCompares.cpp13
-rw-r--r--lib/Transforms/InstCombine/InstCombineInternal.h19
-rw-r--r--lib/Transforms/InstCombine/InstCombineMulDivRem.cpp19
-rw-r--r--lib/Transforms/InstCombine/InstCombinePHI.cpp2
-rw-r--r--lib/Transforms/InstCombine/InstCombineSelect.cpp3
-rw-r--r--lib/Transforms/InstCombine/InstCombineShifts.cpp6
-rw-r--r--lib/Transforms/InstCombine/InstCombineSimplifyDemanded.cpp10
-rw-r--r--lib/Transforms/InstCombine/InstCombineVectorOps.cpp8
-rw-r--r--lib/Transforms/InstCombine/InstructionCombining.cpp58
-rw-r--r--lib/Transforms/Instrumentation/AddressSanitizer.cpp169
-rw-r--r--lib/Transforms/Instrumentation/IndirectCallPromotion.cpp24
-rw-r--r--lib/Transforms/Instrumentation/MemorySanitizer.cpp4
-rw-r--r--lib/Transforms/ObjCARC/ObjCARC.h13
-rw-r--r--lib/Transforms/ObjCARC/PtrState.cpp36
-rw-r--r--lib/Transforms/Scalar/CMakeLists.txt1
-rw-r--r--lib/Transforms/Scalar/CorrelatedValuePropagation.cpp17
-rw-r--r--lib/Transforms/Scalar/EarlyCSE.cpp49
-rw-r--r--lib/Transforms/Scalar/GVN.cpp2
-rw-r--r--lib/Transforms/Scalar/IndVarSimplify.cpp31
-rw-r--r--lib/Transforms/Scalar/InferAddressSpaces.cpp85
-rw-r--r--lib/Transforms/Scalar/JumpThreading.cpp62
-rw-r--r--lib/Transforms/Scalar/LoopIdiomRecognize.cpp4
-rw-r--r--lib/Transforms/Scalar/LoopInstSimplify.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopRotation.cpp12
-rw-r--r--lib/Transforms/Scalar/LoopSimplifyCFG.cpp2
-rw-r--r--lib/Transforms/Scalar/LoopStrengthReduce.cpp53
-rw-r--r--lib/Transforms/Scalar/LoopUnswitch.cpp12
-rw-r--r--lib/Transforms/Scalar/MemCpyOptimizer.cpp2
-rw-r--r--lib/Transforms/Scalar/NaryReassociate.cpp10
-rw-r--r--lib/Transforms/Scalar/NewGVN.cpp36
-rw-r--r--lib/Transforms/Scalar/Reassociate.cpp4
-rw-r--r--lib/Transforms/Scalar/RewriteStatepointsForGC.cpp72
-rw-r--r--lib/Transforms/Scalar/SROA.cpp26
-rw-r--r--lib/Transforms/Scalar/Scalar.cpp2
-rw-r--r--lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp2
-rw-r--r--lib/Transforms/Scalar/SimpleLoopUnswitch.cpp626
-rw-r--r--lib/Transforms/Utils/BasicBlockUtils.cpp4
-rw-r--r--lib/Transforms/Utils/CloneFunction.cpp4
-rw-r--r--lib/Transforms/Utils/InlineFunction.cpp4
-rw-r--r--lib/Transforms/Utils/LibCallsShrinkWrap.cpp74
-rw-r--r--lib/Transforms/Utils/Local.cpp60
-rw-r--r--lib/Transforms/Utils/LoopSimplify.cpp4
-rw-r--r--lib/Transforms/Utils/LoopUnroll.cpp4
-rw-r--r--lib/Transforms/Utils/ModuleUtils.cpp32
-rw-r--r--lib/Transforms/Utils/PromoteMemoryToRegister.cpp9
-rw-r--r--lib/Transforms/Utils/SimplifyCFG.cpp6
-rw-r--r--lib/Transforms/Utils/SimplifyIndVar.cpp8
-rw-r--r--lib/Transforms/Utils/SimplifyInstructions.cpp3
-rw-r--r--lib/Transforms/Vectorize/SLPVectorizer.cpp27
-rw-r--r--test/Analysis/AliasSet/unknown-inst-tracking.ll25
-rw-r--r--test/Analysis/ScalarEvolution/flags-from-poison.ll2
-rw-r--r--test/Assembler/dinamespace.ll6
-rw-r--r--test/Assembler/disubprogram.ll16
-rw-r--r--test/Bitcode/DINamespace.ll4
-rw-r--r--test/Bitcode/attributes.ll10
-rw-r--r--test/Bitcode/compatibility.ll14
-rw-r--r--test/CodeGen/AArch64/arm64-anyregcc.ll194
-rw-r--r--test/CodeGen/AArch64/arm64-stackmap.ll76
-rw-r--r--test/CodeGen/AArch64/arm64-tls-dynamics.ll32
-rw-r--r--test/CodeGen/AArch64/stackmap-liveness.ll3
-rw-r--r--test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll4
-rw-r--r--test/CodeGen/AMDGPU/inline-asm.ll14
-rw-r--r--test/CodeGen/AMDGPU/llvm.amdgcn.init.exec.ll80
-rw-r--r--test/CodeGen/AMDGPU/zext-lid.ll39
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-irtranslator.ll85
-rw-r--r--test/CodeGen/ARM/GlobalISel/arm-isel.ll11
-rw-r--r--test/CodeGen/ARM/bool-ext-inc.ll23
-rw-r--r--test/CodeGen/AVR/calling-conv/c/stack.ll8
-rw-r--r--test/CodeGen/AVR/return.ll24
-rw-r--r--test/CodeGen/AVR/rot.ll55
-rw-r--r--test/CodeGen/AVR/varargs.ll6
-rw-r--r--test/CodeGen/BPF/mem_offset_be.ll18
-rw-r--r--test/CodeGen/Hexagon/cfgopt-fall-through.ll71
-rw-r--r--test/CodeGen/Hexagon/rdf-def-mask.ll52
-rw-r--r--test/CodeGen/Hexagon/unreachable-mbb-phi-subreg.mir25
-rw-r--r--test/CodeGen/MIR/Generic/frame-info.mir1
-rw-r--r--test/CodeGen/Mips/llvm-ir/add.ll26
-rw-r--r--test/CodeGen/Mips/llvm-ir/sub.ll12
-rw-r--r--test/CodeGen/Mips/micromips-sizereduction/micromips-lwsp-swsp.ll11
-rw-r--r--test/CodeGen/NVPTX/f16-instructions.ll15
-rw-r--r--test/CodeGen/PowerPC/build-vector-tests.ll46
-rw-r--r--test/CodeGen/PowerPC/ppc64-anyregcc.ll194
-rw-r--r--test/CodeGen/PowerPC/ppc64-i128-abi.ll20
-rw-r--r--test/CodeGen/PowerPC/ppc64-stackmap.ll76
-rw-r--r--test/CodeGen/PowerPC/swaps-le-1.ll46
-rw-r--r--test/CodeGen/PowerPC/swaps-le-2.ll31
-rw-r--r--test/CodeGen/PowerPC/vsx-ldst.ll6
-rw-r--r--test/CodeGen/PowerPC/vsx.ll20
-rw-r--r--test/CodeGen/X86/GlobalISel/binop.ll22
-rw-r--r--test/CodeGen/X86/GlobalISel/callingconv.ll38
-rw-r--r--test/CodeGen/X86/GlobalISel/ext-x86-64.ll29
-rw-r--r--test/CodeGen/X86/GlobalISel/ext.ll64
-rw-r--r--test/CodeGen/X86/GlobalISel/irtranslator-call.ll1
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-ext-x86-64.mir172
-rw-r--r--test/CodeGen/X86/GlobalISel/legalize-ext.mir116
-rw-r--r--test/CodeGen/X86/GlobalISel/memop-x32.ll101
-rw-r--r--test/CodeGen/X86/GlobalISel/memop.ll17
-rw-r--r--test/CodeGen/X86/GlobalISel/regbankselect-X86_64.mir (renamed from test/CodeGen/X86/GlobalISel/X86-regbankselect.mir)0
-rw-r--r--test/CodeGen/X86/GlobalISel/select-add.mir74
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir66
-rw-r--r--test/CodeGen/X86/GlobalISel/select-ext.mir129
-rw-r--r--test/CodeGen/X86/GlobalISel/select-inc.mir37
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop-x32.mir310
-rw-r--r--test/CodeGen/X86/GlobalISel/select-memop.mir55
-rw-r--r--test/CodeGen/X86/addcarry.ll (renamed from test/CodeGen/X86/adde-carry.ll)46
-rw-r--r--test/CodeGen/X86/all-ones-vector.ll334
-rw-r--r--test/CodeGen/X86/anyregcc.ll182
-rw-r--r--test/CodeGen/X86/avx-intrinsics-fast-isel.ll54
-rw-r--r--test/CodeGen/X86/avx512-intrinsics-fast-isel.ll120
-rw-r--r--test/CodeGen/X86/bool-ext-inc.ll42
-rw-r--r--test/CodeGen/X86/bswap_tree.ll53
-rw-r--r--test/CodeGen/X86/cast-vsel.ll611
-rw-r--r--test/CodeGen/X86/clz.ll85
-rw-r--r--test/CodeGen/X86/deopt-bundles.ll88
-rw-r--r--test/CodeGen/X86/deopt-intrinsic-cconv.ll8
-rw-r--r--test/CodeGen/X86/deopt-intrinsic.ll16
-rw-r--r--test/CodeGen/X86/inline-0bh.ll17
-rw-r--r--test/CodeGen/X86/known-bits.ll22
-rw-r--r--test/CodeGen/X86/known-signbits-vector.ll28
-rw-r--r--test/CodeGen/X86/lea-opt-with-debug.mir93
-rw-r--r--test/CodeGen/X86/mul-i1024.ll2187
-rw-r--r--test/CodeGen/X86/mul-i256.ll19
-rw-r--r--test/CodeGen/X86/mul-i512.ll166
-rw-r--r--test/CodeGen/X86/patchpoint-invoke.ll2
-rw-r--r--test/CodeGen/X86/pr14657.ll325
-rw-r--r--test/CodeGen/X86/pr28129.ll87
-rw-r--r--test/CodeGen/X86/pr31088.ll162
-rw-r--r--test/CodeGen/X86/sse2-intrinsics-fast-isel.ll16
-rw-r--r--test/CodeGen/X86/stack-protector-dbginfo.ll2
-rw-r--r--test/CodeGen/X86/stackmap-fast-isel.ll58
-rw-r--r--test/CodeGen/X86/stackmap-large-constants.ll16
-rw-r--r--test/CodeGen/X86/stackmap-large-location-size.ll172
-rw-r--r--test/CodeGen/X86/stackmap-liveness.ll10
-rw-r--r--test/CodeGen/X86/stackmap.ll141
-rw-r--r--test/CodeGen/X86/statepoint-allocas.ll34
-rw-r--r--test/CodeGen/X86/statepoint-live-in.ll20
-rw-r--r--test/CodeGen/X86/statepoint-stackmap-format.ll98
-rw-r--r--test/CodeGen/X86/statepoint-vector.ll48
-rw-r--r--test/CodeGen/X86/vector-shuffle-combining.ll34
-rw-r--r--test/CodeGen/X86/widened-broadcast.ll69
-rw-r--r--test/DebugInfo/AMDGPU/dbg-value-sched-crash.ll95
-rw-r--r--test/DebugInfo/COFF/cpp-mangling.ll2
-rw-r--r--test/DebugInfo/COFF/scopes.ll4
-rw-r--r--test/DebugInfo/Generic/dwarf-public-names.ll2
-rw-r--r--test/DebugInfo/Generic/namespace.ll22
-rw-r--r--test/DebugInfo/Generic/namespace_function_definition.ll2
-rw-r--r--test/DebugInfo/Generic/namespace_inline_function_definition.ll2
-rw-r--r--test/DebugInfo/Generic/thrownTypes.ll38
-rw-r--r--test/DebugInfo/PDB/Inputs/simple-line-info.yaml43
-rw-r--r--test/DebugInfo/PDB/pdbdump-headers.test50
-rw-r--r--test/DebugInfo/PDB/pdbdump-yaml-lineinfo-write.test71
-rw-r--r--test/DebugInfo/PDB/pdbdump-yaml-lineinfo.test116
-rw-r--r--test/DebugInfo/X86/dwarf-linkage-names.ll2
-rw-r--r--test/DebugInfo/X86/dwarf-public-names.ll4
-rw-r--r--test/DebugInfo/X86/generate-odr-hash.ll8
-rw-r--r--test/DebugInfo/X86/gnu-public-names-tu.ll2
-rw-r--r--test/DebugInfo/X86/gnu-public-names.ll10
-rw-r--r--test/DebugInfo/X86/inline-namespace.ll4
-rw-r--r--test/DebugInfo/X86/lexical-block-file-inline.ll2
-rw-r--r--test/DebugInfo/X86/multiple-at-const-val.ll2
-rw-r--r--test/DebugInfo/X86/parameters.ll2
-rw-r--r--test/DebugInfo/X86/pr19307.ll8
-rw-r--r--test/DebugInfo/X86/union-template.ll2
-rw-r--r--test/DebugInfo/dwarfdump-dump-gdbindex.test4
-rw-r--r--test/Feature/optnone-opt.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/global_metadata.ll16
-rw-r--r--test/Instrumentation/AddressSanitizer/global_metadata_darwin.ll8
-rw-r--r--test/Instrumentation/AddressSanitizer/global_metadata_windows.ll2
-rw-r--r--test/Instrumentation/AddressSanitizer/instrument_global.ll8
-rw-r--r--test/Instrumentation/AddressSanitizer/no-globals.ll12
-rw-r--r--test/Linker/2011-08-18-unique-class-type.ll2
-rw-r--r--test/Linker/2011-08-18-unique-class-type2.ll2
-rw-r--r--test/Linker/pr26037.ll2
-rw-r--r--test/MC/AArch64/adrp-relocation.s6
-rw-r--r--test/MC/AArch64/arm32-elf-relocs.s163
-rw-r--r--test/MC/AArch64/arm32-large-relocs.s31
-rw-r--r--test/MC/AArch64/arm32-tls-relocs.s290
-rw-r--r--test/MC/AArch64/arm64-elf-reloc-condbr.s9
-rw-r--r--test/MC/AArch64/arm64-elf-relocs.s138
-rw-r--r--test/MC/AArch64/arm64-tls-relocs.s24
-rw-r--r--test/MC/AArch64/directive-arch-negative.s8
-rw-r--r--test/MC/AArch64/elf-reloc-ldrlit.s12
-rw-r--r--test/MC/AArch64/elf-reloc-pcreladdressing-ilp32.s17
-rw-r--r--test/MC/AArch64/elf-reloc-tstb.s10
-rw-r--r--test/MC/AArch64/elf-reloc-uncondbrimm.s10
-rw-r--r--test/MC/AArch64/error-location.s2
-rw-r--r--test/MC/AArch64/ilp32-diagnostics.s32
-rw-r--r--test/MC/AArch64/inline-asm-modifiers.s2
-rw-r--r--test/MC/AArch64/lp64-diagnostics.s13
-rw-r--r--test/MC/AArch64/tls-relocs.s4
-rw-r--r--test/MC/AMDGPU/vop_dpp.s262
-rw-r--r--test/MC/AMDGPU/vop_dpp_expr.s23
-rw-r--r--test/MC/AVR/inst-lds.s4
-rw-r--r--test/MC/AVR/inst-sts.s4
-rw-r--r--test/MC/AsmParser/altmacro_expression.s65
-rw-r--r--test/MC/AsmParser/negativ_altmacro_expression.s34
-rw-r--r--test/MC/ELF/section-numeric-invalid-type.s2
-rw-r--r--test/MC/Hexagon/PacketRules/registers_readonly.s5
-rw-r--r--test/MC/Hexagon/PacketRules/solo.s5
-rw-r--r--test/MC/Hexagon/multiple_errs.s10
-rw-r--r--test/MC/Hexagon/registers_readonly.s7
-rw-r--r--test/MC/Hexagon/ro-c9.s6
-rw-r--r--test/MC/Hexagon/ro-cc9.s7
-rw-r--r--test/MC/Mips/relocation.s8
-rw-r--r--test/MC/WebAssembly/reloc-code.ll59
-rw-r--r--test/MC/WebAssembly/reloc-data.ll26
-rw-r--r--test/MC/WebAssembly/sections.ll62
-rw-r--r--test/MC/X86/pr27884.s7
-rw-r--r--test/Other/new-pm-defaults.ll10
-rw-r--r--test/TableGen/GlobalISelEmitter.td22
-rw-r--r--test/ThinLTO/X86/debuginfo-cu-import.ll2
-rw-r--r--test/Transforms/CodeExtractor/MultipleExitBranchProb.ll6
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineAnd.ll56
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineAndOr.ll63
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineOptRemark.ll40
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineOr.ll97
-rw-r--r--test/Transforms/CodeExtractor/PartialInlineOrAnd.ll71
-rw-r--r--test/Transforms/CodeExtractor/SingleCondition.ll23
-rw-r--r--test/Transforms/CodeExtractor/unreachable-block.ll6
-rw-r--r--test/Transforms/EarlyCSE/guards.ll346
-rw-r--r--test/Transforms/GVNHoist/hoist-inline.ll2
-rw-r--r--test/Transforms/GlobalOpt/localize-constexpr-debuginfo.ll70
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/infer-addrspacecast.ll43
-rw-r--r--test/Transforms/InferAddressSpaces/AMDGPU/infer-getelementptr.ll39
-rw-r--r--test/Transforms/InferAddressSpaces/NVPTX/bug31948.ll2
-rw-r--r--test/Transforms/Inline/AArch64/switch.ll123
-rw-r--r--test/Transforms/InstCombine/amdgcn-intrinsics.ll6
-rw-r--r--test/Transforms/InstCombine/and-or-not.ll42
-rw-r--r--test/Transforms/InstCombine/and.ll13
-rw-r--r--test/Transforms/InstCombine/apint-and.ll126
-rw-r--r--test/Transforms/InstCombine/apint-and1.ll57
-rw-r--r--test/Transforms/InstCombine/apint-and2.ll82
-rw-r--r--test/Transforms/InstCombine/apint-not.ll25
-rw-r--r--test/Transforms/InstCombine/apint-or.ll23
-rw-r--r--test/Transforms/InstCombine/assume2.ll8
-rw-r--r--test/Transforms/InstCombine/demorgan-zext.ll81
-rw-r--r--test/Transforms/InstCombine/demorgan.ll501
-rw-r--r--test/Transforms/InstCombine/not.ll39
-rw-r--r--test/Transforms/InstSimplify/shufflevector.ll8
-rw-r--r--test/Transforms/JumpThreading/fold-not-thread.ll111
-rw-r--r--test/Transforms/LoopUnswitch/pr32818.ll19
-rw-r--r--test/Transforms/NewGVN/pr32852.ll24
-rw-r--r--test/Transforms/ObjCARC/rv.ll25
-rw-r--r--test/Transforms/PGOProfile/memop_size_opt_zero.ll19
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2006-06-13-SingleEntryPHI.ll35
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2006-06-27-DeadSwitchCase.ll25
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-05-09-Unreachable.ll28
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-05-09-tl.ll95
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-07-12-ExitDomInfo.ll45
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-07-13-DomInfo.ll27
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-07-18-DomInfo.ll66
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-08-01-Dom.ll30
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-08-01-LCSSA.ll55
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2007-10-04-DomFrontier.ll29
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2008-06-02-DomInfo.ll26
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2008-06-17-DomFrontier.ll22
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2010-11-18-LCSSA.ll28
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2011-06-02-CritSwitch.ll28
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2011-09-26-EHCrash.ll63
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2012-04-02-IndirectBr.ll41
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2012-04-30-LoopUnswitch-LPad-Crash.ll97
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2012-05-20-Phi.ll25
-rw-r--r--test/Transforms/SimpleLoopUnswitch/2015-09-18-Addrspace.ll28
-rw-r--r--test/Transforms/SimpleLoopUnswitch/LIV-loop-condtion.ll28
-rw-r--r--test/Transforms/SimpleLoopUnswitch/basictest.ll184
-rw-r--r--test/Transforms/SimpleLoopUnswitch/cleanuppad.ll44
-rw-r--r--test/Transforms/SimpleLoopUnswitch/copy-metadata.ll34
-rw-r--r--test/Transforms/SimpleLoopUnswitch/crash.ll66
-rw-r--r--test/Transforms/SimpleLoopUnswitch/exponential-behavior.ll51
-rw-r--r--test/Transforms/SimpleLoopUnswitch/infinite-loop.ll64
-rw-r--r--test/Transforms/SimpleLoopUnswitch/msan.ll141
-rw-r--r--test/Transforms/SimpleLoopUnswitch/preserve-analyses.ll129
-rw-r--r--test/Transforms/SimpleLoopUnswitch/trivial-unswitch.ll185
-rw-r--r--test/Transforms/SimplifyCFG/speculate-call.ll23
-rw-r--r--test/Verifier/DISubprogram.ll22
-rw-r--r--test/Verifier/speculatable-callsite-invalid.ll24
-rw-r--r--test/Verifier/speculatable-callsite.ll20
-rw-r--r--test/tools/llvm-lto/error.ll2
-rw-r--r--test/tools/llvm-pdbdump/raw-stream-data.test47
-rw-r--r--test/tools/llvm-readobj/Inputs/const-import.libbin0 -> 1200 bytes
-rw-r--r--test/tools/llvm-readobj/coff-const-import.test7
-rw-r--r--test/tools/llvm-readobj/reloc-types.test4
-rw-r--r--test/tools/llvm-readobj/relocations.test5
-rw-r--r--test/tools/llvm-readobj/resources.test19
-rw-r--r--test/tools/llvm-readobj/sections.test5
-rw-r--r--tools/llvm-dwarfdump/llvm-dwarfdump.cpp53
-rw-r--r--tools/llvm-link/CMakeLists.txt1
-rw-r--r--tools/llvm-link/llvm-link.cpp2
-rw-r--r--tools/llvm-lto/llvm-lto.cpp10
-rw-r--r--tools/llvm-pdbdump/C13DebugFragmentVisitor.cpp87
-rw-r--r--tools/llvm-pdbdump/C13DebugFragmentVisitor.h60
-rw-r--r--tools/llvm-pdbdump/CMakeLists.txt3
-rw-r--r--tools/llvm-pdbdump/CompactTypeDumpVisitor.cpp14
-rw-r--r--tools/llvm-pdbdump/CompactTypeDumpVisitor.h2
-rw-r--r--tools/llvm-pdbdump/LLVMOutputStyle.cpp282
-rw-r--r--tools/llvm-pdbdump/PdbYaml.cpp36
-rw-r--r--tools/llvm-pdbdump/PdbYaml.h31
-rw-r--r--tools/llvm-pdbdump/StreamUtil.cpp2
-rw-r--r--tools/llvm-pdbdump/YAMLOutputStyle.cpp177
-rw-r--r--tools/llvm-pdbdump/YAMLOutputStyle.h4
-rw-r--r--tools/llvm-pdbdump/fuzzer/llvm-pdbdump-fuzzer.cpp4
-rw-r--r--tools/llvm-pdbdump/llvm-pdbdump.cpp100
-rw-r--r--tools/llvm-pdbdump/llvm-pdbdump.h2
-rw-r--r--tools/llvm-readobj/COFFDumper.cpp204
-rw-r--r--tools/llvm-readobj/ELFDumper.cpp58
-rw-r--r--tools/llvm-readobj/ObjDumper.h1
-rw-r--r--tools/llvm-readobj/WasmDumper.cpp33
-rw-r--r--tools/llvm-readobj/llvm-readobj.cpp6
-rw-r--r--tools/opt/BreakpointPrinter.cpp2
-rw-r--r--unittests/ADT/APIntTest.cpp30
-rw-r--r--unittests/DebugInfo/DWARF/DWARFDebugInfoTest.cpp241
-rw-r--r--unittests/IR/IRBuilderTest.cpp19
-rw-r--r--unittests/IR/MetadataTest.cpp135
-rw-r--r--unittests/IR/ValueHandleTest.cpp80
-rw-r--r--unittests/Support/BinaryStreamTest.cpp18
-rw-r--r--unittests/Support/CMakeLists.txt2
-rw-r--r--unittests/Support/DynamicLibrary/CMakeLists.txt19
-rw-r--r--unittests/Support/DynamicLibrary/DynamicLibraryTest.cpp133
-rw-r--r--unittests/Support/DynamicLibrary/PipSqueak.cxx46
-rw-r--r--unittests/Support/DynamicLibrary/PipSqueak.h19
-rw-r--r--unittests/Target/AArch64/InstSizes.cpp3
-rw-r--r--utils/TableGen/CodeGenDAGPatterns.cpp3
-rw-r--r--utils/TableGen/CodeGenIntrinsics.h7
-rw-r--r--utils/TableGen/CodeGenTarget.cpp6
-rw-r--r--utils/TableGen/GlobalISelEmitter.cpp91
-rw-r--r--utils/TableGen/IntrinsicEmitter.cpp14
-rw-r--r--utils/TableGen/SubtargetFeatureInfo.cpp22
-rw-r--r--utils/TableGen/SubtargetFeatureInfo.h36
-rw-r--r--utils/TableGen/X86RecognizableInstr.cpp4
695 files changed, 22215 insertions, 10211 deletions
diff --git a/cmake/modules/HandleLLVMOptions.cmake b/cmake/modules/HandleLLVMOptions.cmake
index 882d68e6b608..3dd16d51f0b7 100644
--- a/cmake/modules/HandleLLVMOptions.cmake
+++ b/cmake/modules/HandleLLVMOptions.cmake
@@ -17,6 +17,9 @@ else()
set(LINKER_IS_LLD_LINK FALSE)
endif()
+set(LLVM_ENABLE_LTO OFF CACHE STRING "Build LLVM with LTO. May be specified as Thin or Full to use a particular kind of LTO")
+string(TOUPPER "${LLVM_ENABLE_LTO}" uppercase_LLVM_ENABLE_LTO)
+
# Ninja Job Pool support
# The following only works with the Ninja generator in CMake >= 3.0.
set(LLVM_PARALLEL_COMPILE_JOBS "" CACHE STRING
@@ -32,16 +35,19 @@ endif()
set(LLVM_PARALLEL_LINK_JOBS "" CACHE STRING
"Define the maximum number of concurrent link jobs.")
-if(LLVM_PARALLEL_LINK_JOBS)
- if(NOT CMAKE_MAKE_PROGRAM MATCHES "ninja")
- message(WARNING "Job pooling is only available with Ninja generators.")
- else()
+if(CMAKE_MAKE_PROGRAM MATCHES "ninja")
+ if(NOT LLVM_PARALLEL_LINK_JOBS AND uppercase_LLVM_ENABLE_LTO STREQUAL "THIN")
+ message(STATUS "ThinLTO provides its own parallel linking - limiting parallel link jobs to 2.")
+ set(LLVM_PARALLEL_LINK_JOBS "2")
+ endif()
+ if(LLVM_PARALLEL_LINK_JOBS)
set_property(GLOBAL APPEND PROPERTY JOB_POOLS link_job_pool=${LLVM_PARALLEL_LINK_JOBS})
set(CMAKE_JOB_POOL_LINK link_job_pool)
endif()
+elseif(LLVM_PARALLEL_LINK_JOBS)
+ message(WARNING "Job pooling is only available with Ninja generators.")
endif()
-
if (LINKER_IS_LLD_LINK)
# Pass /MANIFEST:NO so that CMake doesn't run mt.exe on our binaries. Adding
# manifests with mt.exe breaks LLD's symbol tables and takes as much time as
@@ -724,8 +730,6 @@ append_if(LLVM_BUILD_INSTRUMENTED_COVERAGE "-fprofile-instr-generate='${LLVM_PRO
CMAKE_EXE_LINKER_FLAGS
CMAKE_SHARED_LINKER_FLAGS)
-set(LLVM_ENABLE_LTO OFF CACHE STRING "Build LLVM with LTO. May be specified as Thin or Full to use a particular kind of LTO")
-string(TOUPPER "${LLVM_ENABLE_LTO}" uppercase_LLVM_ENABLE_LTO)
if(LLVM_ENABLE_LTO AND LLVM_ON_WIN32 AND NOT LINKER_IS_LLD_LINK)
message(FATAL_ERROR "When compiling for Windows, LLVM_ENABLE_LTO requires using lld as the linker (point CMAKE_LINKER at lld-link.exe)")
endif()
diff --git a/cmake/modules/VersionFromVCS.cmake b/cmake/modules/VersionFromVCS.cmake
index 983b48fefa0e..552fe77cdfb6 100644
--- a/cmake/modules/VersionFromVCS.cmake
+++ b/cmake/modules/VersionFromVCS.cmake
@@ -33,7 +33,8 @@ function(add_version_info_from_vcs VERS)
execute_process(COMMAND ${git_executable} rev-parse --git-dir
WORKING_DIRECTORY ${SOURCE_DIR}/cmake
RESULT_VARIABLE git_result
- OUTPUT_VARIABLE git_dir)
+ OUTPUT_VARIABLE git_dir
+ ERROR_QUIET)
if(git_result EQUAL 0)
# Try to get a ref-id
string(STRIP "${git_dir}" git_dir)
@@ -45,7 +46,8 @@ function(add_version_info_from_vcs VERS)
WORKING_DIRECTORY ${SOURCE_DIR}
TIMEOUT 5
RESULT_VARIABLE git_result
- OUTPUT_VARIABLE git_output)
+ OUTPUT_VARIABLE git_output
+ ERROR_QUIET)
if( git_result EQUAL 0 )
string(REGEX MATCH "URL: ([^ \n]*)" svn_url ${git_output})
if(svn_url)
diff --git a/docs/AMDGPUUsage.rst b/docs/AMDGPUUsage.rst
index 97497057fc96..81c067b317d3 100644
--- a/docs/AMDGPUUsage.rst
+++ b/docs/AMDGPUUsage.rst
@@ -83,7 +83,7 @@ handler as follows:
Usage Code Sequence Description
=============== ============= ===============================================
llvm.trap s_endpgm Causes wavefront to be terminated.
- llvm.debugtrap Nothing. Compiler warning generated that there is no trap handler installed.
+ llvm.debugtrap Nothing Compiler warning generated that there is no trap handler installed.
=============== ============= ===============================================
Assembler
diff --git a/docs/CMakeLists.txt b/docs/CMakeLists.txt
index ad2178dc5875..6dff219ae37f 100644
--- a/docs/CMakeLists.txt
+++ b/docs/CMakeLists.txt
@@ -1,8 +1,8 @@
if (DOXYGEN_FOUND)
if (LLVM_ENABLE_DOXYGEN)
- set(abs_top_srcdir ${LLVM_MAIN_SRC_DIR})
- set(abs_top_builddir ${LLVM_BINARY_DIR})
+ set(abs_top_srcdir ${CMAKE_CURRENT_SOURCE_DIR})
+ set(abs_top_builddir ${CMAKE_CURRENT_BINARY_DIR})
if (HAVE_DOT)
set(DOT ${LLVM_PATH_DOT})
diff --git a/docs/LangRef.rst b/docs/LangRef.rst
index b0a31589cc4f..bf4973ca9aed 100644
--- a/docs/LangRef.rst
+++ b/docs/LangRef.rst
@@ -1535,6 +1535,17 @@ example:
``sanitize_thread``
This attribute indicates that ThreadSanitizer checks
(dynamic thread safety analysis) are enabled for this function.
+``speculatable``
+ This function attribute indicates that the function does not have any
+ effects besides calculating its result and does not have undefined behavior.
+ Note that ``speculatable`` is not enough to conclude that along any
+ particular exection path the number of calls to this function will not be
+ externally observable. This attribute is only valid on functions
+ and declarations, not on individual call sites. If a function is
+ incorrectly marked as speculatable and really does exhibit
+ undefined behavior, the undefined behavior may be observed even
+ if the call site is dead code.
+
``ssp``
This attribute indicates that the function should emit a stack
smashing protector. It is in the form of a "canary" --- a random value
diff --git a/docs/ProgrammersManual.rst b/docs/ProgrammersManual.rst
index 4fb67e1e6d5f..d115a9cf6de8 100644
--- a/docs/ProgrammersManual.rst
+++ b/docs/ProgrammersManual.rst
@@ -776,22 +776,21 @@ readability.
Using cantFail to simplify safe callsites
"""""""""""""""""""""""""""""""""""""""""
-Some functions may only fail for a subset of their inputs. For such functions
-call-sites using known-safe inputs can assume that the result will be a success
-value.
+Some functions may only fail for a subset of their inputs, so calls using known
+safe inputs can be assumed to succeed.
The cantFail functions encapsulate this by wrapping an assertion that their
argument is a success value and, in the case of Expected<T>, unwrapping the
-T value from the Expected<T> argument:
+T value:
.. code-block:: c++
- Error mayFail(int X);
- Expected<int> mayFail2(int X);
+ Error onlyFailsForSomeXValues(int X);
+ Expected<int> onlyFailsForSomeXValues2(int X);
void foo() {
- cantFail(mayFail(KnownSafeValue));
- int Y = cantFail(mayFail2(KnownSafeValue));
+ cantFail(onlyFailsForSomeXValues(KnownSafeValue));
+ int Y = cantFail(onlyFailsForSomeXValues2(KnownSafeValue));
...
}
@@ -801,8 +800,8 @@ terminate the program on an error input, cantFile simply asserts that the result
is success. In debug builds this will result in an assertion failure if an error
is encountered. In release builds the behavior of cantFail for failure values is
undefined. As such, care must be taken in the use of cantFail: clients must be
-certain that a cantFail wrapped call really can not fail under any
-circumstances.
+certain that a cantFail wrapped call really can not fail with the given
+arguments.
Use of the cantFail functions should be rare in library code, but they are
likely to be of more use in tool and unit-test code where inputs and/or
diff --git a/docs/README.txt b/docs/README.txt
index 6c6e5b90ecf2..f1c74261ce4d 100644
--- a/docs/README.txt
+++ b/docs/README.txt
@@ -51,3 +51,18 @@ running:
cd docs/
make -f Makefile.sphinx linkcheck
+
+Doxygen page Output
+==============
+
+Install doxygen <http://www.stack.nl/~dimitri/doxygen/download.html> and dot2tex <https://dot2tex.readthedocs.io/en/latest>.
+
+ cd <build-dir>
+ cmake -DLLVM_ENABLE_DOXYGEN=On <llvm-top-src-dir>
+ make doxygen-llvm # for LLVM docs
+ make doxygen-clang # for clang docs
+
+It will generate html in
+
+ <build-dir>/docs/doxygen/html # for LLVM docs
+ <build-dir>/tools/clang/docs/doxygen/html # for clang docs
diff --git a/docs/StackMaps.rst b/docs/StackMaps.rst
index a78fde16c2be..99c5e5fbe4de 100644
--- a/docs/StackMaps.rst
+++ b/docs/StackMaps.rst
@@ -319,7 +319,7 @@ format of this section follows:
.. code-block:: none
Header {
- uint8 : Stack Map Version (current version is 2)
+ uint8 : Stack Map Version (current version is 3)
uint8 : Reserved (expected to be 0)
uint16 : Reserved (expected to be 0)
}
@@ -341,10 +341,13 @@ format of this section follows:
uint16 : NumLocations
Location[NumLocations] {
uint8 : Register | Direct | Indirect | Constant | ConstantIndex
- uint8 : Reserved (location flags)
+ uint8 : Reserved (expected to be 0)
+ uint16 : Location Size
uint16 : Dwarf RegNum
+ uint16 : Reserved (expected to be 0)
int32 : Offset or SmallConstant
}
+ uint32 : Padding (only if required to align to 8 byte)
uint16 : Padding
uint16 : NumLiveOuts
LiveOuts[NumLiveOuts]
diff --git a/docs/TableGen/LangIntro.rst b/docs/TableGen/LangIntro.rst
index d8bd17d750b8..460ff9067f20 100644
--- a/docs/TableGen/LangIntro.rst
+++ b/docs/TableGen/LangIntro.rst
@@ -58,6 +58,10 @@ types are:
The 'string' type represents an ordered sequence of characters of arbitrary
length.
+``code``
+ The `code` type represents a code fragment, which can be single/multi-line
+ string literal.
+
``bits<n>``
A 'bits' type is an arbitrary, but fixed, size integer that is broken up
into individual bits. This type is useful because it can handle some bits
@@ -105,7 +109,7 @@ supported include:
hexadecimal integer value
``"foo"``
- string value
+ a single-line string value, can be assigned to ``string`` or ``code`` variable.
``[{ ... }]``
usually called a "code fragment", but is just a multiline string literal
@@ -126,7 +130,8 @@ supported include:
access to one bit of a value
``value{15-17}``
- access to multiple bits of a value
+ access to an ordered sequence of bits of a value, in particular ``value{15-17}``
+ produces an order that is the reverse of ``value{17-15}``.
``DEF``
reference to a record definition
diff --git a/docs/doxygen.cfg.in b/docs/doxygen.cfg.in
index 451eaf4d2fcc..e3c7f479ac4e 100644
--- a/docs/doxygen.cfg.in
+++ b/docs/doxygen.cfg.in
@@ -58,7 +58,7 @@ PROJECT_LOGO =
# entered, it will be relative to the location where doxygen was started. If
# left blank the current directory will be used.
-OUTPUT_DIRECTORY = @abs_top_builddir@/docs/doxygen
+OUTPUT_DIRECTORY = @abs_top_builddir@/doxygen
# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub-
# directories (in 2 levels) under the output directory of each output format and
@@ -132,7 +132,7 @@ INLINE_INHERITED_MEMB = NO
# shortest path that makes the file name unique will be used
# The default value is: YES.
-FULL_PATH_NAMES = NO
+FULL_PATH_NAMES = YES
# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
# Stripping is only done if one of the specified strings matches the left-hand
@@ -144,7 +144,7 @@ FULL_PATH_NAMES = NO
# will be relative from the directory where doxygen is started.
# This tag requires that the tag FULL_PATH_NAMES is set to YES.
-STRIP_FROM_PATH = ../..
+STRIP_FROM_PATH = @abs_top_srcdir@/..
# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
# path mentioned in the documentation of a class, which tells the reader which
@@ -153,7 +153,8 @@ STRIP_FROM_PATH = ../..
# specify the list of include paths that are normally passed to the compiler
# using the -I flag.
-STRIP_FROM_INC_PATH =
+STRIP_FROM_INC_PATH = @abs_top_srcdir@/../include
+STRIP_FROM_INC_PATH += @abs_top_srcdir@/../lib
# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
# less readable) file names. This can be useful is your file systems doesn't
@@ -513,7 +514,7 @@ SHOW_GROUPED_MEMB_INC = NO
# files with double quotes in the documentation rather than with sharp brackets.
# The default value is: NO.
-FORCE_LOCAL_INCLUDES = NO
+FORCE_LOCAL_INCLUDES = YES
# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
# documentation for inline members.
@@ -743,9 +744,9 @@ WARN_LOGFILE =
# spaces.
# Note: If this tag is empty the current directory is searched.
-INPUT = @abs_top_srcdir@/include \
- @abs_top_srcdir@/lib \
- @abs_top_srcdir@/docs/doxygen-mainpage.dox
+INPUT = @abs_top_srcdir@/../include \
+ @abs_top_srcdir@/../lib \
+ @abs_top_srcdir@/doxygen-mainpage.dox
# This tag can be used to specify the character encoding of the source files
# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
@@ -813,7 +814,7 @@ EXCLUDE_SYMBOLS =
# that contain example code fragments that are included (see the \include
# command).
-EXAMPLE_PATH = @abs_top_srcdir@/examples
+EXAMPLE_PATH = @abs_top_srcdir@/../examples
# If the value of the EXAMPLE_PATH tag contains directories, you can use the
# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
@@ -833,7 +834,7 @@ EXAMPLE_RECURSIVE = YES
# that contain images that are to be included in the documentation (see the
# \image command).
-IMAGE_PATH = @abs_top_srcdir@/docs/img
+IMAGE_PATH = @abs_top_srcdir@/img
# The INPUT_FILTER tag can be used to specify a program that doxygen should
# invoke to filter for each input file. Doxygen will invoke the filter program
diff --git a/include/llvm/ADT/APInt.h b/include/llvm/ADT/APInt.h
index d0104c3f0fa9..6d74f344aad5 100644
--- a/include/llvm/ADT/APInt.h
+++ b/include/llvm/ADT/APInt.h
@@ -366,7 +366,7 @@ public:
/// that 0 is not a positive value.
///
/// \returns true if this APInt is positive.
- bool isStrictlyPositive() const { return isNonNegative() && !!*this; }
+ bool isStrictlyPositive() const { return isNonNegative() && !isNullValue(); }
/// \brief Determine if all bits are set
///
@@ -377,6 +377,12 @@ public:
return countPopulationSlowCase() == BitWidth;
}
+ /// \brief Determine if all bits are clear
+ ///
+ /// This checks to see if the value has all bits of the APInt are clear or
+ /// not.
+ bool isNullValue() const { return !*this; }
+
/// \brief Determine if this is the largest unsigned value.
///
/// This checks to see if the value of this APInt is the maximum unsigned
@@ -395,7 +401,7 @@ public:
///
/// This checks to see if the value of this APInt is the minimum unsigned
/// value for the APInt's bit width.
- bool isMinValue() const { return !*this; }
+ bool isMinValue() const { return isNullValue(); }
/// \brief Determine if this is the smallest signed value.
///
@@ -611,15 +617,7 @@ public:
}
/// \brief Return a value containing V broadcasted over NewLen bits.
- static APInt getSplat(unsigned NewLen, const APInt &V) {
- assert(NewLen >= V.getBitWidth() && "Can't splat to smaller bit width!");
-
- APInt Val = V.zextOrSelf(NewLen);
- for (unsigned I = V.getBitWidth(); I < NewLen; I <<= 1)
- Val |= Val << I;
-
- return Val;
- }
+ static APInt getSplat(unsigned NewLen, const APInt &V);
/// \brief Determine if two APInts have the same value, after zero-extending
/// one of them (if needed!) to ensure that the bit-widths match.
@@ -687,7 +685,9 @@ public:
///
/// \returns true if *this is zero, false otherwise.
bool operator!() const {
- return *this == 0;
+ if (isSingleWord())
+ return VAL == 0;
+ return countLeadingZerosSlowCase() == BitWidth;
}
/// @}
@@ -874,6 +874,13 @@ public:
return *this;
}
+ /// \brief Left-shift assignment function.
+ ///
+ /// Shifts *this left by shiftAmt and assigns the result to *this.
+ ///
+ /// \returns *this after shifting left by ShiftAmt
+ APInt &operator<<=(const APInt &ShiftAmt);
+
/// @}
/// \name Binary Operators
/// @{
@@ -981,7 +988,11 @@ public:
/// \brief Left-shift function.
///
/// Left-shift this APInt by shiftAmt.
- APInt shl(const APInt &shiftAmt) const;
+ APInt shl(const APInt &ShiftAmt) const {
+ APInt R(*this);
+ R <<= ShiftAmt;
+ return R;
+ }
/// \brief Rotate left by rotateAmt.
APInt rotl(const APInt &rotateAmt) const;
@@ -1333,7 +1344,14 @@ public:
/// \brief Set a given bit to 1.
///
/// Set the given bit to 1 whose position is given as "bitPosition".
- void setBit(unsigned bitPosition);
+ void setBit(unsigned BitPosition) {
+ assert(BitPosition <= BitWidth && "BitPosition out of range");
+ WordType Mask = maskBit(BitPosition);
+ if (isSingleWord())
+ VAL |= Mask;
+ else
+ pVal[whichWord(BitPosition)] |= Mask;
+ }
/// Set the sign bit to 1.
void setSignBit() {
@@ -1344,13 +1362,9 @@ public:
void setBits(unsigned loBit, unsigned hiBit) {
assert(hiBit <= BitWidth && "hiBit out of range");
assert(loBit <= BitWidth && "loBit out of range");
+ assert(loBit <= hiBit && "loBit greater than hiBit");
if (loBit == hiBit)
return;
- if (loBit > hiBit) {
- setLowBits(hiBit);
- setHighBits(BitWidth - loBit);
- return;
- }
if (loBit < APINT_BITS_PER_WORD && hiBit <= APINT_BITS_PER_WORD) {
uint64_t mask = WORD_MAX >> (APINT_BITS_PER_WORD - (hiBit - loBit));
mask <<= loBit;
@@ -1389,7 +1403,19 @@ public:
/// \brief Set a given bit to 0.
///
/// Set the given bit to 0 whose position is given as "bitPosition".
- void clearBit(unsigned bitPosition);
+ void clearBit(unsigned BitPosition) {
+ assert(BitPosition <= BitWidth && "BitPosition out of range");
+ WordType Mask = ~maskBit(BitPosition);
+ if (isSingleWord())
+ VAL &= Mask;
+ else
+ pVal[whichWord(BitPosition)] &= Mask;
+ }
+
+ /// Set the sign bit to 0.
+ void clearSignBit() {
+ clearBit(BitWidth - 1);
+ }
/// \brief Toggle every bit to its opposite value.
void flipAllBits() {
@@ -1695,7 +1721,7 @@ public:
return VAL - 1;
// Handle the zero case.
- if (!getBoolValue())
+ if (isNullValue())
return UINT32_MAX;
// The non-zero case is handled by computing:
diff --git a/include/llvm/Analysis/AssumptionCache.h b/include/llvm/Analysis/AssumptionCache.h
index f833f417c7dd..04c6fd70e07f 100644
--- a/include/llvm/Analysis/AssumptionCache.h
+++ b/include/llvm/Analysis/AssumptionCache.h
@@ -43,7 +43,7 @@ class AssumptionCache {
/// \brief Vector of weak value handles to calls of the @llvm.assume
/// intrinsic.
- SmallVector<WeakVH, 4> AssumeHandles;
+ SmallVector<WeakTrackingVH, 4> AssumeHandles;
class AffectedValueCallbackVH final : public CallbackVH {
AssumptionCache *AC;
@@ -62,12 +62,12 @@ class AssumptionCache {
/// \brief A map of values about which an assumption might be providing
/// information to the relevant set of assumptions.
using AffectedValuesMap =
- DenseMap<AffectedValueCallbackVH, SmallVector<WeakVH, 1>,
- AffectedValueCallbackVH::DMI>;
+ DenseMap<AffectedValueCallbackVH, SmallVector<WeakTrackingVH, 1>,
+ AffectedValueCallbackVH::DMI>;
AffectedValuesMap AffectedValues;
/// Get the vector of assumptions which affect a value from the cache.
- SmallVector<WeakVH, 1> &getOrInsertAffectedValues(Value *V);
+ SmallVector<WeakTrackingVH, 1> &getOrInsertAffectedValues(Value *V);
/// Copy affected values in the cache for OV to be affected values for NV.
void copyAffectedValuesInCache(Value *OV, Value *NV);
@@ -120,20 +120,20 @@ public:
/// FIXME: We should replace this with pointee_iterator<filter_iterator<...>>
/// when we can write that to filter out the null values. Then caller code
/// will become simpler.
- MutableArrayRef<WeakVH> assumptions() {
+ MutableArrayRef<WeakTrackingVH> assumptions() {
if (!Scanned)
scanFunction();
return AssumeHandles;
}
/// \brief Access the list of assumptions which affect this value.
- MutableArrayRef<WeakVH> assumptionsFor(const Value *V) {
+ MutableArrayRef<WeakTrackingVH> assumptionsFor(const Value *V) {
if (!Scanned)
scanFunction();
auto AVI = AffectedValues.find_as(const_cast<Value *>(V));
if (AVI == AffectedValues.end())
- return MutableArrayRef<WeakVH>();
+ return MutableArrayRef<WeakTrackingVH>();
return AVI->second;
}
diff --git a/include/llvm/Analysis/CGSCCPassManager.h b/include/llvm/Analysis/CGSCCPassManager.h
index 398bbfb0c413..a15a9e18c815 100644
--- a/include/llvm/Analysis/CGSCCPassManager.h
+++ b/include/llvm/Analysis/CGSCCPassManager.h
@@ -646,7 +646,7 @@ public:
LazyCallGraph::SCC *C = &InitialC;
// Collect value handles for all of the indirect call sites.
- SmallVector<WeakVH, 8> CallHandles;
+ SmallVector<WeakTrackingVH, 8> CallHandles;
// Struct to track the counts of direct and indirect calls in each function
// of the SCC.
@@ -658,7 +658,7 @@ public:
// Put value handles on all of the indirect calls and return the number of
// direct calls for each function in the SCC.
auto ScanSCC = [](LazyCallGraph::SCC &C,
- SmallVectorImpl<WeakVH> &CallHandles) {
+ SmallVectorImpl<WeakTrackingVH> &CallHandles) {
assert(CallHandles.empty() && "Must start with a clear set of handles.");
SmallVector<CallCount, 4> CallCounts;
@@ -671,7 +671,7 @@ public:
++Count.Direct;
} else {
++Count.Indirect;
- CallHandles.push_back(WeakVH(&I));
+ CallHandles.push_back(WeakTrackingVH(&I));
}
}
}
@@ -699,7 +699,7 @@ public:
"Cannot have changed the size of the SCC!");
// Check whether any of the handles were devirtualized.
- auto IsDevirtualizedHandle = [&](WeakVH &CallH) {
+ auto IsDevirtualizedHandle = [&](WeakTrackingVH &CallH) {
if (!CallH)
return false;
auto CS = CallSite(CallH);
diff --git a/include/llvm/Analysis/CallGraph.h b/include/llvm/Analysis/CallGraph.h
index ea85436ee580..cc4788d3edae 100644
--- a/include/llvm/Analysis/CallGraph.h
+++ b/include/llvm/Analysis/CallGraph.h
@@ -172,7 +172,7 @@ class CallGraphNode {
public:
/// \brief A pair of the calling instruction (a call or invoke)
/// and the call graph node being called.
- typedef std::pair<WeakVH, CallGraphNode *> CallRecord;
+ typedef std::pair<WeakTrackingVH, CallGraphNode *> CallRecord;
public:
typedef std::vector<CallRecord> CalledFunctionsVector;
diff --git a/include/llvm/Analysis/IVUsers.h b/include/llvm/Analysis/IVUsers.h
index bb572dd5603b..035b974c5c1d 100644
--- a/include/llvm/Analysis/IVUsers.h
+++ b/include/llvm/Analysis/IVUsers.h
@@ -80,7 +80,7 @@ private:
/// OperandValToReplace - The Value of the operand in the user instruction
/// that this IVStrideUse is representing.
- WeakVH OperandValToReplace;
+ WeakTrackingVH OperandValToReplace;
/// PostIncLoops - The set of loops for which Expr has been adjusted to
/// use post-inc mode. This corresponds with SCEVExpander's post-inc concept.
diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h
index 17e5cb6db02d..d91d08a524dc 100644
--- a/include/llvm/Analysis/InlineCost.h
+++ b/include/llvm/Analysis/InlineCost.h
@@ -160,6 +160,10 @@ InlineParams getInlineParams(int Threshold);
/// the -Oz flag.
InlineParams getInlineParams(unsigned OptLevel, unsigned SizeOptLevel);
+/// Return the cost associated with a callsite, including paramater passing
+/// and the call/return instruction.
+int getCallsiteCost(CallSite CS, const DataLayout &DL);
+
/// \brief Get an InlineCost object representing the cost of inlining this
/// callsite.
///
diff --git a/include/llvm/Analysis/InstructionSimplify.h b/include/llvm/Analysis/InstructionSimplify.h
index 25240dae75e7..bf73e099a2bf 100644
--- a/include/llvm/Analysis/InstructionSimplify.h
+++ b/include/llvm/Analysis/InstructionSimplify.h
@@ -35,35 +35,41 @@
#include "llvm/IR/User.h"
namespace llvm {
- template<typename T>
- class ArrayRef;
- class AssumptionCache;
- class DominatorTree;
- class Instruction;
- class DataLayout;
- class FastMathFlags;
- class OptimizationRemarkEmitter;
- class TargetLibraryInfo;
- class Type;
- class Value;
-
- struct SimplifyQuery {
- const DataLayout &DL;
- const TargetLibraryInfo *TLI = nullptr;
- const DominatorTree *DT = nullptr;
- AssumptionCache *AC = nullptr;
- const Instruction *CxtI = nullptr;
- SimplifyQuery(const DataLayout &DL) : DL(DL) {}
-
- SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC = nullptr,
- const Instruction *CXTI = nullptr)
- : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI) {}
- SimplifyQuery getWithInstruction(Instruction *I) const {
- SimplifyQuery Copy(*this);
- Copy.CxtI = I;
- return Copy;
- }
+class Function;
+template <typename T, typename... TArgs> class AnalysisManager;
+template <class T> class ArrayRef;
+class AssumptionCache;
+class DominatorTree;
+class Instruction;
+class DataLayout;
+class FastMathFlags;
+struct LoopStandardAnalysisResults;
+class OptimizationRemarkEmitter;
+class Pass;
+class TargetLibraryInfo;
+class Type;
+class Value;
+
+struct SimplifyQuery {
+ const DataLayout &DL;
+ const TargetLibraryInfo *TLI = nullptr;
+ const DominatorTree *DT = nullptr;
+ AssumptionCache *AC = nullptr;
+ const Instruction *CxtI = nullptr;
+
+ SimplifyQuery(const DataLayout &DL, const Instruction *CXTI = nullptr)
+ : DL(DL), CxtI(CXTI) {}
+
+ SimplifyQuery(const DataLayout &DL, const TargetLibraryInfo *TLI,
+ const DominatorTree *DT = nullptr,
+ AssumptionCache *AC = nullptr,
+ const Instruction *CXTI = nullptr)
+ : DL(DL), TLI(TLI), DT(DT), AC(AC), CxtI(CXTI) {}
+ SimplifyQuery getWithInstruction(Instruction *I) const {
+ SimplifyQuery Copy(*this);
+ Copy.CxtI = I;
+ return Copy;
+ }
};
// NOTE: the explicit multiple argument versions of these functions are
@@ -73,257 +79,103 @@ namespace llvm {
/// Given operands for an Add, fold the result or return null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
- Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a Sub, fold the result or return null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
- Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FAdd, fold the result or return null.
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
- Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FSub, fold the result or return null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
- Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FMul, fold the result or return null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
- Value *SimplifyFMulInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a Mul, fold the result or return null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an SDiv, fold the result or return null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a UDiv, fold the result or return null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FDiv, fold the result or return null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
- Value *SimplifyFDivInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an SRem, fold the result or return null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a URem, fold the result or return null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FRem, fold the result or return null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const SimplifyQuery &Q);
- Value *SimplifyFRemInst(Value *LHS, Value *RHS, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a Shl, fold the result or return null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Q);
- Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a LShr, fold the result or return null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
- Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a AShr, fold the result or return nulll.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q);
- Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an And, fold the result or return null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an Or, fold the result or return null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an Xor, fold the result or return null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const SimplifyQuery &Q);
- Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an ICmpInst, fold the result or return null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
- Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FCmpInst, fold the result or return null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
- Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- FastMathFlags FMF, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a SelectInst, fold the result or return null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q);
- Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a GetElementPtrInst, fold the result or return null.
Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
const SimplifyQuery &Q);
- Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an InsertValueInst, fold the result or return null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q);
- Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
- ArrayRef<unsigned> Idxs, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an ExtractValueInst, fold the result or return null.
Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q);
- Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an ExtractElementInst, fold the result or return null.
Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
const SimplifyQuery &Q);
- Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a CastInst, fold the result or return null.
Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
const SimplifyQuery &Q);
- Value *SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a ShuffleVectorInst, fold the result or return null.
Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
Type *RetTy, const SimplifyQuery &Q);
- Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
- Type *RetTy, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
//=== Helper functions for higher up the class hierarchy.
@@ -331,63 +183,29 @@ namespace llvm {
/// Given operands for a CmpInst, fold the result or return null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
- Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for a BinaryOperator, fold the result or return null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const SimplifyQuery &Q);
- Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given operands for an FP BinaryOperator, fold the result or return null.
/// In contrast to SimplifyBinOp, try to use FastMathFlag when folding the
/// result. In case we don't need FastMathFlags, simply fall to SimplifyBinOp.
Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q);
- Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- FastMathFlags FMF, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given a function and iterators over arguments, fold the result or return
/// null.
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const SimplifyQuery &Q);
- Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// Given a function and set of arguments, fold the result or return null.
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const SimplifyQuery &Q);
- Value *SimplifyCall(Value *V, ArrayRef<Value *> Args, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- const Instruction *CxtI = nullptr);
/// See if we can compute a simplified version of this instruction. If not,
/// return null.
Value *SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
OptimizationRemarkEmitter *ORE = nullptr);
- Value *SimplifyInstruction(Instruction *I, const DataLayout &DL,
- const TargetLibraryInfo *TLI = nullptr,
- const DominatorTree *DT = nullptr,
- AssumptionCache *AC = nullptr,
- OptimizationRemarkEmitter *ORE = nullptr);
/// Replace all uses of 'I' with 'SimpleV' and simplify the uses recursively.
///
@@ -411,6 +229,15 @@ namespace llvm {
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr,
AssumptionCache *AC = nullptr);
+ // These helper functions return a SimplifyQuery structure that contains as
+ // many of the optional analysis we use as are currently valid. This is the
+ // strongly preferred way of constructing SimplifyQuery in passes.
+ const SimplifyQuery getBestSimplifyQuery(Pass &, Function &);
+ template <class T, class... TArgs>
+ const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &,
+ Function &);
+ const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &,
+ const DataLayout &);
} // end namespace llvm
#endif
diff --git a/include/llvm/Analysis/MemoryBuiltins.h b/include/llvm/Analysis/MemoryBuiltins.h
index 743faf2b67db..60dafccd84bd 100644
--- a/include/llvm/Analysis/MemoryBuiltins.h
+++ b/include/llvm/Analysis/MemoryBuiltins.h
@@ -235,7 +235,7 @@ class ObjectSizeOffsetEvaluator
: public InstVisitor<ObjectSizeOffsetEvaluator, SizeOffsetEvalType> {
typedef IRBuilder<TargetFolder> BuilderTy;
- typedef std::pair<WeakVH, WeakVH> WeakEvalType;
+ typedef std::pair<WeakTrackingVH, WeakTrackingVH> WeakEvalType;
typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy;
diff --git a/include/llvm/Analysis/ScalarEvolutionExpander.h b/include/llvm/Analysis/ScalarEvolutionExpander.h
index 517592a3d049..7d16f34e54cb 100644
--- a/include/llvm/Analysis/ScalarEvolutionExpander.h
+++ b/include/llvm/Analysis/ScalarEvolutionExpander.h
@@ -189,7 +189,7 @@ namespace llvm {
/// replace congruent phis with their most canonical representative. Return
/// the number of phis eliminated.
unsigned replaceCongruentIVs(Loop *L, const DominatorTree *DT,
- SmallVectorImpl<WeakVH> &DeadInsts,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts,
const TargetTransformInfo *TTI = nullptr);
/// Insert code to directly compute the specified SCEV expression into the
diff --git a/include/llvm/Analysis/TargetTransformInfo.h b/include/llvm/Analysis/TargetTransformInfo.h
index 67196687d556..b9639dba1881 100644
--- a/include/llvm/Analysis/TargetTransformInfo.h
+++ b/include/llvm/Analysis/TargetTransformInfo.h
@@ -197,6 +197,12 @@ public:
int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) const;
+ /// \return The estimated number of case clusters when lowering \p 'SI'.
+ /// \p JTSize Set a jump table size only when \p SI is suitable for a jump
+ /// table.
+ unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+ unsigned &JTSize) const;
+
/// \brief Estimate the cost of a given IR user when lowered.
///
/// This can estimate the cost of either a ConstantExpr or Instruction when
@@ -764,6 +770,8 @@ public:
ArrayRef<Type *> ParamTys) = 0;
virtual int getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
ArrayRef<const Value *> Arguments) = 0;
+ virtual unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+ unsigned &JTSize) = 0;
virtual int getUserCost(const User *U) = 0;
virtual bool hasBranchDivergence() = 0;
virtual bool isSourceOfDivergence(const Value *V) = 0;
@@ -1067,6 +1075,10 @@ public:
unsigned getMaxInterleaveFactor(unsigned VF) override {
return Impl.getMaxInterleaveFactor(VF);
}
+ unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+ unsigned &JTSize) override {
+ return Impl.getEstimatedNumberOfCaseClusters(SI, JTSize);
+ }
unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info,
diff --git a/include/llvm/Analysis/TargetTransformInfoImpl.h b/include/llvm/Analysis/TargetTransformInfoImpl.h
index 9ab6b7445ab8..d7fda9e14b05 100644
--- a/include/llvm/Analysis/TargetTransformInfoImpl.h
+++ b/include/llvm/Analysis/TargetTransformInfoImpl.h
@@ -114,6 +114,12 @@ public:
return TTI::TCC_Free;
}
+ unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+ unsigned &JTSize) {
+ JTSize = 0;
+ return SI.getNumCases();
+ }
+
unsigned getCallCost(FunctionType *FTy, int NumArgs) {
assert(FTy && "FunctionType must be provided to this routine.");
diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h
index 764308dceed9..a54c39e3ea3a 100644
--- a/include/llvm/Analysis/ValueTracking.h
+++ b/include/llvm/Analysis/ValueTracking.h
@@ -60,7 +60,7 @@ template <typename T> class ArrayRef;
/// \p KnownZero the set of bits that are known to be zero
/// \p KnownOne the set of bits that are known to be one
void computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
- APInt &KnownZero, APInt &KnownOne);
+ KnownBits &Known);
/// Return true if LHS and RHS have no common bits set.
bool haveNoCommonBitsSet(const Value *LHS, const Value *RHS,
const DataLayout &DL,
@@ -417,7 +417,7 @@ template <typename T> class ArrayRef;
///
/// Note that this currently only considers the basic block that is
/// the parent of I.
- bool isKnownNotFullPoison(const Instruction *PoisonI);
+ bool programUndefinedIfFullPoison(const Instruction *PoisonI);
/// \brief Specific patterns of select instructions we can match.
enum SelectPatternFlavor {
diff --git a/include/llvm/Bitcode/BitcodeReader.h b/include/llvm/Bitcode/BitcodeReader.h
index 0701ddbb7f1c..54f990d00233 100644
--- a/include/llvm/Bitcode/BitcodeReader.h
+++ b/include/llvm/Bitcode/BitcodeReader.h
@@ -93,6 +93,10 @@ namespace llvm {
/// Parse the specified bitcode buffer, returning the module summary index.
Expected<std::unique_ptr<ModuleSummaryIndex>> getSummary();
+
+ /// Parse the specified bitcode buffer and merge its module summary index
+ /// into CombinedIndex.
+ Error readSummary(ModuleSummaryIndex &CombinedIndex, unsigned ModuleId);
};
/// Returns a list of modules in the specified bitcode buffer.
@@ -141,6 +145,18 @@ namespace llvm {
Expected<std::unique_ptr<ModuleSummaryIndex>>
getModuleSummaryIndex(MemoryBufferRef Buffer);
+ /// Parse the specified bitcode buffer and merge the index into CombinedIndex.
+ Error readModuleSummaryIndex(MemoryBufferRef Buffer,
+ ModuleSummaryIndex &CombinedIndex,
+ unsigned ModuleId);
+
+ /// Parse the module summary index out of an IR file and return the module
+ /// summary index object if found, or an empty summary if not. If Path refers
+ /// to an empty file and the -ignore-empty-index-file cl::opt flag is passed
+ /// this function will return nullptr.
+ Expected<std::unique_ptr<ModuleSummaryIndex>>
+ getModuleSummaryIndexForFile(StringRef Path);
+
/// isBitcodeWrapper - Return true if the given bytes are the magic bytes
/// for an LLVM IR bitcode wrapper.
///
diff --git a/include/llvm/Bitcode/LLVMBitCodes.h b/include/llvm/Bitcode/LLVMBitCodes.h
index 03eac80bc1e8..8ee1e4b583b6 100644
--- a/include/llvm/Bitcode/LLVMBitCodes.h
+++ b/include/llvm/Bitcode/LLVMBitCodes.h
@@ -545,7 +545,8 @@ enum AttributeKindCodes {
ATTR_KIND_INACCESSIBLEMEM_ONLY = 49,
ATTR_KIND_INACCESSIBLEMEM_OR_ARGMEMONLY = 50,
ATTR_KIND_ALLOC_SIZE = 51,
- ATTR_KIND_WRITEONLY = 52
+ ATTR_KIND_WRITEONLY = 52,
+ ATTR_KIND_SPECULATABLE = 53
};
enum ComdatSelectionKindCodes {
diff --git a/include/llvm/CodeGen/BasicTTIImpl.h b/include/llvm/CodeGen/BasicTTIImpl.h
index e30e947f787f..32542fa87463 100644
--- a/include/llvm/CodeGen/BasicTTIImpl.h
+++ b/include/llvm/CodeGen/BasicTTIImpl.h
@@ -171,6 +171,62 @@ public:
return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
}
+ unsigned getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+ unsigned &JumpTableSize) {
+ /// Try to find the estimated number of clusters. Note that the number of
+ /// clusters identified in this function could be different from the actural
+ /// numbers found in lowering. This function ignore switches that are
+ /// lowered with a mix of jump table / bit test / BTree. This function was
+ /// initially intended to be used when estimating the cost of switch in
+ /// inline cost heuristic, but it's a generic cost model to be used in other
+ /// places (e.g., in loop unrolling).
+ unsigned N = SI.getNumCases();
+ const TargetLoweringBase *TLI = getTLI();
+ const DataLayout &DL = this->getDataLayout();
+
+ JumpTableSize = 0;
+ bool IsJTAllowed = TLI->areJTsAllowed(SI.getParent()->getParent());
+
+ // Early exit if both a jump table and bit test are not allowed.
+ if (N < 1 || (!IsJTAllowed && DL.getPointerSizeInBits() < N))
+ return N;
+
+ APInt MaxCaseVal = SI.case_begin()->getCaseValue()->getValue();
+ APInt MinCaseVal = MaxCaseVal;
+ for (auto CI : SI.cases()) {
+ const APInt &CaseVal = CI.getCaseValue()->getValue();
+ if (CaseVal.sgt(MaxCaseVal))
+ MaxCaseVal = CaseVal;
+ if (CaseVal.slt(MinCaseVal))
+ MinCaseVal = CaseVal;
+ }
+
+ // Check if suitable for a bit test
+ if (N <= DL.getPointerSizeInBits()) {
+ SmallPtrSet<const BasicBlock *, 4> Dests;
+ for (auto I : SI.cases())
+ Dests.insert(I.getCaseSuccessor());
+
+ if (TLI->isSuitableForBitTests(Dests.size(), N, MinCaseVal, MaxCaseVal,
+ DL))
+ return 1;
+ }
+
+ // Check if suitable for a jump table.
+ if (IsJTAllowed) {
+ if (N < 2 || N < TLI->getMinimumJumpTableEntries())
+ return N;
+ uint64_t Range =
+ (MaxCaseVal - MinCaseVal).getLimitedValue(UINT64_MAX - 1) + 1;
+ // Check whether a range of clusters is dense enough for a jump table
+ if (TLI->isSuitableForJumpTable(&SI, N, Range)) {
+ JumpTableSize = Range;
+ return 1;
+ }
+ }
+ return N;
+ }
+
unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
diff --git a/include/llvm/CodeGen/FunctionLoweringInfo.h b/include/llvm/CodeGen/FunctionLoweringInfo.h
index 75cd7da9d6b9..14ee5019ef2f 100644
--- a/include/llvm/CodeGen/FunctionLoweringInfo.h
+++ b/include/llvm/CodeGen/FunctionLoweringInfo.h
@@ -25,6 +25,7 @@
#include "llvm/CodeGen/MachineBasicBlock.h"
#include "llvm/IR/InlineAsm.h"
#include "llvm/IR/Instructions.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Target/TargetRegisterInfo.h"
#include <vector>
@@ -171,9 +172,8 @@ public:
struct LiveOutInfo {
unsigned NumSignBits : 31;
unsigned IsValid : 1;
- APInt KnownOne, KnownZero;
- LiveOutInfo() : NumSignBits(0), IsValid(true), KnownOne(1, 0),
- KnownZero(1, 0) {}
+ KnownBits Known;
+ LiveOutInfo() : NumSignBits(0), IsValid(true), Known(1) {}
};
/// Record the preferred extend type (ISD::SIGN_EXTEND or ISD::ZERO_EXTEND)
@@ -247,16 +247,16 @@ public:
/// AddLiveOutRegInfo - Adds LiveOutInfo for a register.
void AddLiveOutRegInfo(unsigned Reg, unsigned NumSignBits,
- const APInt &KnownZero, const APInt &KnownOne) {
+ const KnownBits &Known) {
// Only install this information if it tells us something.
- if (NumSignBits == 1 && KnownZero == 0 && KnownOne == 0)
+ if (NumSignBits == 1 && Known.Zero == 0 && Known.One == 0)
return;
LiveOutRegInfo.grow(Reg);
LiveOutInfo &LOI = LiveOutRegInfo[Reg];
LOI.NumSignBits = NumSignBits;
- LOI.KnownOne = KnownOne;
- LOI.KnownZero = KnownZero;
+ LOI.Known.One = Known.One;
+ LOI.Known.Zero = Known.Zero;
}
/// ComputePHILiveOutRegInfo - Compute LiveOutInfo for a PHI's destination
diff --git a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
index 899563acc330..45f25f96ec1f 100644
--- a/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
+++ b/include/llvm/CodeGen/GlobalISel/InstructionSelector.h
@@ -61,9 +61,6 @@ class InstructionSelector {
public:
virtual ~InstructionSelector() {}
- /// This is executed before selecting a function.
- virtual void beginFunction(const MachineFunction &MF) {}
-
/// Select the (possibly generic) instruction \p I to only use target-specific
/// opcodes. It is OK to insert multiple instructions, but they cannot be
/// generic pre-isel instructions.
diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h
index ee3fd0bdda2a..ca0f3fbad892 100644
--- a/include/llvm/CodeGen/ISDOpcodes.h
+++ b/include/llvm/CodeGen/ISDOpcodes.h
@@ -216,6 +216,9 @@ namespace ISD {
/// These nodes take two operands of the same value type, and produce two
/// results. The first result is the normal add or sub result, the second
/// result is the carry flag result.
+ /// FIXME: These nodes are deprecated in favor of ADDCARRY and SUBCARRY.
+ /// They are kept around for now to provide a smooth transition path
+ /// toward the use of ADDCARRY/SUBCARRY and will eventually be removed.
ADDC, SUBC,
/// Carry-using nodes for multiple precision addition and subtraction. These
@@ -227,6 +230,16 @@ namespace ISD {
/// values.
ADDE, SUBE,
+ /// Carry-using nodes for multiple precision addition and subtraction.
+ /// These nodes take three operands: The first two are the normal lhs and
+ /// rhs to the add or sub, and the third is a boolean indicating if there
+ /// is an incoming carry. These nodes produce two results: the normal
+ /// result of the add or sub, and the output carry so they can be chained
+ /// together. The use of this opcode is preferable to adde/sube if the
+ /// target supports it, as the carry is a regular value rather than a
+ /// glue, which allows further optimisation.
+ ADDCARRY, SUBCARRY,
+
/// RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition.
/// These nodes take two operands: the normal LHS and RHS to the add. They
/// produce two results: the normal result of the add, and a boolean that
diff --git a/include/llvm/CodeGen/MIRYamlMapping.h b/include/llvm/CodeGen/MIRYamlMapping.h
index 38cf8aa165a4..47b40de6fe1f 100644
--- a/include/llvm/CodeGen/MIRYamlMapping.h
+++ b/include/llvm/CodeGen/MIRYamlMapping.h
@@ -345,7 +345,7 @@ struct MachineFrameInfo {
bool HasCalls = false;
StringValue StackProtector;
// TODO: Serialize FunctionContextIdx
- unsigned MaxCallFrameSize = 0;
+ unsigned MaxCallFrameSize = ~0u; ///< ~0u means: not computed yet.
bool HasOpaqueSPAdjustment = false;
bool HasVAStart = false;
bool HasMustTailInVarArgFunc = false;
@@ -366,7 +366,7 @@ template <> struct MappingTraits<MachineFrameInfo> {
YamlIO.mapOptional("hasCalls", MFI.HasCalls);
YamlIO.mapOptional("stackProtector", MFI.StackProtector,
StringValue()); // Don't print it out when it's empty.
- YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize);
+ YamlIO.mapOptional("maxCallFrameSize", MFI.MaxCallFrameSize, ~0u);
YamlIO.mapOptional("hasOpaqueSPAdjustment", MFI.HasOpaqueSPAdjustment);
YamlIO.mapOptional("hasVAStart", MFI.HasVAStart);
YamlIO.mapOptional("hasMustTailInVarArgFunc", MFI.HasMustTailInVarArgFunc);
diff --git a/include/llvm/CodeGen/MachineFrameInfo.h b/include/llvm/CodeGen/MachineFrameInfo.h
index 5c9728b0a51e..61be9f775c97 100644
--- a/include/llvm/CodeGen/MachineFrameInfo.h
+++ b/include/llvm/CodeGen/MachineFrameInfo.h
@@ -21,15 +21,9 @@
namespace llvm {
class raw_ostream;
-class DataLayout;
-class TargetRegisterClass;
-class Type;
class MachineFunction;
class MachineBasicBlock;
-class TargetFrameLowering;
-class TargetMachine;
class BitVector;
-class Value;
class AllocaInst;
/// The CalleeSavedInfo class tracks the information need to locate where a
@@ -226,7 +220,7 @@ class MachineFrameInfo {
/// setup/destroy pseudo instructions (as defined in the TargetFrameInfo
/// class). This information is important for frame pointer elimination.
/// It is only valid during and after prolog/epilog code insertion.
- unsigned MaxCallFrameSize = 0;
+ unsigned MaxCallFrameSize = ~0u;
/// The prolog/epilog code inserter fills in this vector with each
/// callee saved register saved in the frame. Beyond its use by the prolog/
@@ -531,7 +525,16 @@ public:
/// CallFrameSetup/Destroy pseudo instructions are used by the target, and
/// then only during or after prolog/epilog code insertion.
///
- unsigned getMaxCallFrameSize() const { return MaxCallFrameSize; }
+ unsigned getMaxCallFrameSize() const {
+ // TODO: Enable this assert when targets are fixed.
+ //assert(isMaxCallFrameSizeComputed() && "MaxCallFrameSize not computed yet");
+ if (!isMaxCallFrameSizeComputed())
+ return 0;
+ return MaxCallFrameSize;
+ }
+ bool isMaxCallFrameSizeComputed() const {
+ return MaxCallFrameSize != ~0u;
+ }
void setMaxCallFrameSize(unsigned S) { MaxCallFrameSize = S; }
/// Create a new object at a fixed location on the stack.
diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h
index 4bb658898fb5..9e1d148c7ce5 100644
--- a/include/llvm/CodeGen/SelectionDAG.h
+++ b/include/llvm/CodeGen/SelectionDAG.h
@@ -33,6 +33,7 @@
namespace llvm {
+struct KnownBits;
class MachineConstantPoolValue;
class MachineFunction;
class MDNode;
@@ -687,6 +688,10 @@ public:
/// Example: shuffle A, B, <0,5,2,7> -> shuffle B, A, <4,1,6,3>
SDValue getCommutedVectorShuffle(const ShuffleVectorSDNode &SV);
+ /// Convert Op, which must be of float type, to the
+ /// float type VT, by either extending or rounding (by truncation).
+ SDValue getFPExtendOrRound(SDValue Op, const SDLoc &DL, EVT VT);
+
/// Convert Op, which must be of integer type, to the
/// integer type VT, by either any-extending or truncating it.
SDValue getAnyExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT);
@@ -773,7 +778,7 @@ public:
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDUse> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT,
- ArrayRef<SDValue> Ops, const SDNodeFlags *Flags = nullptr);
+ ArrayRef<SDValue> Ops, const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, ArrayRef<EVT> ResultTys,
ArrayRef<SDValue> Ops);
SDValue getNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
@@ -781,9 +786,10 @@ public:
// Specialize based on number of operands.
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT);
- SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N);
+ SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N,
+ const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
- SDValue N2, const SDNodeFlags *Flags = nullptr);
+ SDValue N2, const SDNodeFlags Flags = SDNodeFlags());
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
SDValue N2, SDValue N3);
SDValue getNode(unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N1,
@@ -1103,7 +1109,7 @@ public:
/// Get the specified node if it's already available, or else return NULL.
SDNode *getNodeIfExists(unsigned Opcode, SDVTList VTs, ArrayRef<SDValue> Ops,
- const SDNodeFlags *Flags = nullptr);
+ const SDNodeFlags Flags = SDNodeFlags());
/// Creates a SDDbgValue node.
SDDbgValue *getDbgValue(MDNode *Var, MDNode *Expr, SDNode *N, unsigned R,
@@ -1266,7 +1272,7 @@ public:
SDValue FoldConstantVectorArithmetic(unsigned Opcode, const SDLoc &DL, EVT VT,
ArrayRef<SDValue> Ops,
- const SDNodeFlags *Flags = nullptr);
+ const SDNodeFlags Flags = SDNodeFlags());
/// Constant fold a setcc to true or false.
SDValue FoldSetCC(EVT VT, SDValue N1, SDValue N2, ISD::CondCode Cond,
@@ -1283,21 +1289,19 @@ public:
const;
/// Determine which bits of Op are known to be either zero or one and return
- /// them in the KnownZero/KnownOne bitsets. For vectors, the known bits are
- /// those that are shared by every vector element.
+ /// them in Known. For vectors, the known bits are those that are shared by
+ /// every vector element.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
- void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
- unsigned Depth = 0) const;
+ void computeKnownBits(SDValue Op, KnownBits &Known, unsigned Depth = 0) const;
/// Determine which bits of Op are known to be either zero or one and return
- /// them in the KnownZero/KnownOne bitsets. The DemandedElts argument allows
- /// us to only collect the known bits that are shared by the requested vector
- /// elements.
+ /// them in Known. The DemandedElts argument allows us to only collect the
+ /// known bits that are shared by the requested vector elements.
/// Targets can implement the computeKnownBitsForTargetNode method in the
/// TargetLowering class to allow target nodes to be understood.
- void computeKnownBits(SDValue Op, APInt &KnownZero, APInt &KnownOne,
- const APInt &DemandedElts, unsigned Depth = 0) const;
+ void computeKnownBits(SDValue Op, KnownBits &Known, const APInt &DemandedElts,
+ unsigned Depth = 0) const;
/// Used to represent the possible overflow behavior of an operation.
/// Never: the operation cannot overflow.
@@ -1439,10 +1443,6 @@ private:
void allnodes_clear();
- SDNode *GetBinarySDNode(unsigned Opcode, const SDLoc &DL, SDVTList VTs,
- SDValue N1, SDValue N2,
- const SDNodeFlags *Flags = nullptr);
-
/// Look up the node specified by ID in CSEMap. If it exists, return it. If
/// not, return the insertion token that will make insertion faster. This
/// overload is for nodes other than Constant or ConstantFP, use the other one
diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h
index 81cc0b39cf87..35ddcf80c91f 100644
--- a/include/llvm/CodeGen/SelectionDAGNodes.h
+++ b/include/llvm/CodeGen/SelectionDAGNodes.h
@@ -341,6 +341,11 @@ template<> struct simplify_type<SDUse> {
/// the backend.
struct SDNodeFlags {
private:
+ // This bit is used to determine if the flags are in a defined state.
+ // Flag bits can only be masked out during intersection if the masking flags
+ // are defined.
+ bool AnyDefined : 1;
+
bool NoUnsignedWrap : 1;
bool NoSignedWrap : 1;
bool Exact : 1;
@@ -355,22 +360,57 @@ private:
public:
/// Default constructor turns off all optimization flags.
SDNodeFlags()
- : NoUnsignedWrap(false), NoSignedWrap(false), Exact(false),
- UnsafeAlgebra(false), NoNaNs(false), NoInfs(false),
+ : AnyDefined(false), NoUnsignedWrap(false), NoSignedWrap(false),
+ Exact(false), UnsafeAlgebra(false), NoNaNs(false), NoInfs(false),
NoSignedZeros(false), AllowReciprocal(false), VectorReduction(false),
AllowContract(false) {}
+ /// Sets the state of the flags to the defined state.
+ void setDefined() { AnyDefined = true; }
+ /// Returns true if the flags are in a defined state.
+ bool isDefined() const { return AnyDefined; }
+
// These are mutators for each flag.
- void setNoUnsignedWrap(bool b) { NoUnsignedWrap = b; }
- void setNoSignedWrap(bool b) { NoSignedWrap = b; }
- void setExact(bool b) { Exact = b; }
- void setUnsafeAlgebra(bool b) { UnsafeAlgebra = b; }
- void setNoNaNs(bool b) { NoNaNs = b; }
- void setNoInfs(bool b) { NoInfs = b; }
- void setNoSignedZeros(bool b) { NoSignedZeros = b; }
- void setAllowReciprocal(bool b) { AllowReciprocal = b; }
- void setVectorReduction(bool b) { VectorReduction = b; }
- void setAllowContract(bool b) { AllowContract = b; }
+ void setNoUnsignedWrap(bool b) {
+ setDefined();
+ NoUnsignedWrap = b;
+ }
+ void setNoSignedWrap(bool b) {
+ setDefined();
+ NoSignedWrap = b;
+ }
+ void setExact(bool b) {
+ setDefined();
+ Exact = b;
+ }
+ void setUnsafeAlgebra(bool b) {
+ setDefined();
+ UnsafeAlgebra = b;
+ }
+ void setNoNaNs(bool b) {
+ setDefined();
+ NoNaNs = b;
+ }
+ void setNoInfs(bool b) {
+ setDefined();
+ NoInfs = b;
+ }
+ void setNoSignedZeros(bool b) {
+ setDefined();
+ NoSignedZeros = b;
+ }
+ void setAllowReciprocal(bool b) {
+ setDefined();
+ AllowReciprocal = b;
+ }
+ void setVectorReduction(bool b) {
+ setDefined();
+ VectorReduction = b;
+ }
+ void setAllowContract(bool b) {
+ setDefined();
+ AllowContract = b;
+ }
// These are accessors for each flag.
bool hasNoUnsignedWrap() const { return NoUnsignedWrap; }
@@ -385,17 +425,20 @@ public:
bool hasAllowContract() const { return AllowContract; }
/// Clear any flags in this flag set that aren't also set in Flags.
- void intersectWith(const SDNodeFlags *Flags) {
- NoUnsignedWrap &= Flags->NoUnsignedWrap;
- NoSignedWrap &= Flags->NoSignedWrap;
- Exact &= Flags->Exact;
- UnsafeAlgebra &= Flags->UnsafeAlgebra;
- NoNaNs &= Flags->NoNaNs;
- NoInfs &= Flags->NoInfs;
- NoSignedZeros &= Flags->NoSignedZeros;
- AllowReciprocal &= Flags->AllowReciprocal;
- VectorReduction &= Flags->VectorReduction;
- AllowContract &= Flags->AllowContract;
+ /// If the given Flags are undefined then don't do anything.
+ void intersectWith(const SDNodeFlags Flags) {
+ if (!Flags.isDefined())
+ return;
+ NoUnsignedWrap &= Flags.NoUnsignedWrap;
+ NoSignedWrap &= Flags.NoSignedWrap;
+ Exact &= Flags.Exact;
+ UnsafeAlgebra &= Flags.UnsafeAlgebra;
+ NoNaNs &= Flags.NoNaNs;
+ NoInfs &= Flags.NoInfs;
+ NoSignedZeros &= Flags.NoSignedZeros;
+ AllowReciprocal &= Flags.AllowReciprocal;
+ VectorReduction &= Flags.VectorReduction;
+ AllowContract &= Flags.AllowContract;
}
};
@@ -527,6 +570,8 @@ private:
/// Return a pointer to the specified value type.
static const EVT *getValueTypeList(EVT VT);
+ SDNodeFlags Flags;
+
public:
/// Unique and persistent id per SDNode in the DAG.
/// Used for debug printing.
@@ -799,12 +844,12 @@ public:
return nullptr;
}
- /// This could be defined as a virtual function and implemented more simply
- /// and directly, but it is not to avoid creating a vtable for this class.
- const SDNodeFlags *getFlags() const;
+ const SDNodeFlags getFlags() const { return Flags; }
+ void setFlags(SDNodeFlags NewFlags) { Flags = NewFlags; }
/// Clear any flags in this node that aren't also set in Flags.
- void intersectFlagsWith(const SDNodeFlags *Flags);
+ /// If Flags is not in a defined state then this has no effect.
+ void intersectFlagsWith(const SDNodeFlags Flags);
/// Return the number of values defined/returned by this operator.
unsigned getNumValues() const { return NumValues; }
@@ -1032,43 +1077,6 @@ inline void SDUse::setNode(SDNode *N) {
if (N) N->addUse(*this);
}
-/// Returns true if the opcode is a binary operation with flags.
-static bool isBinOpWithFlags(unsigned Opcode) {
- switch (Opcode) {
- case ISD::SDIV:
- case ISD::UDIV:
- case ISD::SRA:
- case ISD::SRL:
- case ISD::MUL:
- case ISD::ADD:
- case ISD::SUB:
- case ISD::SHL:
- case ISD::FADD:
- case ISD::FDIV:
- case ISD::FMUL:
- case ISD::FREM:
- case ISD::FSUB:
- return true;
- default:
- return false;
- }
-}
-
-/// This class is an extension of BinarySDNode
-/// used from those opcodes that have associated extra flags.
-class BinaryWithFlagsSDNode : public SDNode {
-public:
- SDNodeFlags Flags;
-
- BinaryWithFlagsSDNode(unsigned Opc, unsigned Order, const DebugLoc &dl,
- SDVTList VTs, const SDNodeFlags &NodeFlags)
- : SDNode(Opc, Order, dl, VTs), Flags(NodeFlags) {}
-
- static bool classof(const SDNode *N) {
- return isBinOpWithFlags(N->getOpcode());
- }
-};
-
/// This class is used to form a handle around another node that
/// is persistent and is updated across invocations of replaceAllUsesWith on its
/// operand. This node should be directly created by end-users and not added to
diff --git a/include/llvm/CodeGen/ValueTypes.td b/include/llvm/CodeGen/ValueTypes.td
index cd8434475451..b87a5e56699e 100644
--- a/include/llvm/CodeGen/ValueTypes.td
+++ b/include/llvm/CodeGen/ValueTypes.td
@@ -42,7 +42,7 @@ def v64i1 : ValueType<64 , 19>; // 64 x i1 vector value
def v512i1 : ValueType<512, 20>; // 512 x i1 vector value
def v1024i1: ValueType<1024,21>; //1024 x i1 vector value
-def v1i8 : ValueType<16, 22>; // 1 x i8 vector value
+def v1i8 : ValueType<8, 22>; // 1 x i8 vector value
def v2i8 : ValueType<16 , 23>; // 2 x i8 vector value
def v4i8 : ValueType<32 , 24>; // 4 x i8 vector value
def v8i8 : ValueType<64 , 25>; // 8 x i8 vector value
diff --git a/include/llvm/DebugInfo/CodeView/CVRecord.h b/include/llvm/DebugInfo/CodeView/CVRecord.h
index 487f3b6446fa..086d6dff11c5 100644
--- a/include/llvm/DebugInfo/CodeView/CVRecord.h
+++ b/include/llvm/DebugInfo/CodeView/CVRecord.h
@@ -50,8 +50,10 @@ public:
template <typename Kind>
struct VarStreamArrayExtractor<codeview::CVRecord<Kind>> {
- Error operator()(BinaryStreamRef Stream, uint32_t &Len,
- codeview::CVRecord<Kind> &Item) const {
+ typedef void ContextType;
+
+ static Error extract(BinaryStreamRef Stream, uint32_t &Len,
+ codeview::CVRecord<Kind> &Item, void *Ctx) {
using namespace codeview;
const RecordPrefix *Prefix = nullptr;
BinaryStreamReader Reader(Stream);
diff --git a/include/llvm/DebugInfo/CodeView/CodeView.h b/include/llvm/DebugInfo/CodeView/CodeView.h
index e599f8a19e34..f881ad0c9d80 100644
--- a/include/llvm/DebugInfo/CodeView/CodeView.h
+++ b/include/llvm/DebugInfo/CodeView/CodeView.h
@@ -291,7 +291,7 @@ enum class ModifierOptions : uint16_t {
};
CV_DEFINE_ENUM_CLASS_FLAGS_OPERATORS(ModifierOptions)
-enum class ModuleSubstreamKind : uint32_t {
+enum class ModuleDebugFragmentKind : uint32_t {
None = 0,
Symbols = 0xf1,
Lines = 0xf2,
@@ -547,7 +547,8 @@ enum class TrampolineType : uint16_t { TrampIncremental, BranchIsland };
enum class FileChecksumKind : uint8_t { None, MD5, SHA1, SHA256 };
enum LineFlags : uint16_t {
- HaveColumns = 1, // CV_LINES_HAVE_COLUMNS
+ LF_None = 0,
+ LF_HaveColumns = 1, // CV_LINES_HAVE_COLUMNS
};
}
}
diff --git a/include/llvm/DebugInfo/CodeView/Line.h b/include/llvm/DebugInfo/CodeView/Line.h
index 975b503fe30b..ac229c337513 100644
--- a/include/llvm/DebugInfo/CodeView/Line.h
+++ b/include/llvm/DebugInfo/CodeView/Line.h
@@ -127,27 +127,6 @@ public:
bool isNeverStepInto() const { return LineInf.isNeverStepInto(); }
};
-enum class InlineeLinesSignature : uint32_t {
- Normal, // CV_INLINEE_SOURCE_LINE_SIGNATURE
- ExtraFiles // CV_INLINEE_SOURCE_LINE_SIGNATURE_EX
-};
-
-struct InlineeSourceLine {
- TypeIndex Inlinee; // ID of the function that was inlined.
- ulittle32_t FileID; // Offset into FileChecksums subsection.
- ulittle32_t SourceLineNum; // First line of inlined code.
- // If extra files present:
- // ulittle32_t ExtraFileCount;
- // ulittle32_t Files[];
-};
-
-struct FileChecksum {
- ulittle32_t FileNameOffset; // Byte offset of filename in global string table.
- uint8_t ChecksumSize; // Number of bytes of checksum.
- uint8_t ChecksumKind; // FileChecksumKind
- // Checksum bytes follow.
-};
-
} // namespace codeview
} // namespace llvm
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugFileChecksumFragment.h b/include/llvm/DebugInfo/CodeView/ModuleDebugFileChecksumFragment.h
new file mode 100644
index 000000000000..a5a3b851b841
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugFileChecksumFragment.h
@@ -0,0 +1,91 @@
+//===- ModuleDebugFileChecksumFragment.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFILECHECKSUMFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFILECHECKSUMFRAGMENT_H
+
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
+#include "llvm/Support/Allocator.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Endian.h"
+
+namespace llvm {
+namespace codeview {
+
+struct FileChecksumEntry {
+ uint32_t FileNameOffset; // Byte offset of filename in global stringtable.
+ FileChecksumKind Kind; // The type of checksum.
+ ArrayRef<uint8_t> Checksum; // The bytes of the checksum.
+};
+}
+}
+
+namespace llvm {
+template <> struct VarStreamArrayExtractor<codeview::FileChecksumEntry> {
+public:
+ typedef void ContextType;
+
+ static Error extract(BinaryStreamRef Stream, uint32_t &Len,
+ codeview::FileChecksumEntry &Item, void *Ctx);
+};
+}
+
+namespace llvm {
+namespace codeview {
+class ModuleDebugFileChecksumFragmentRef final : public ModuleDebugFragmentRef {
+ typedef VarStreamArray<codeview::FileChecksumEntry> FileChecksumArray;
+ typedef FileChecksumArray::Iterator Iterator;
+
+public:
+ ModuleDebugFileChecksumFragmentRef()
+ : ModuleDebugFragmentRef(ModuleDebugFragmentKind::FileChecksums) {}
+
+ static bool classof(const ModuleDebugFragmentRef *S) {
+ return S->kind() == ModuleDebugFragmentKind::FileChecksums;
+ }
+
+ Error initialize(BinaryStreamReader Reader);
+
+ Iterator begin() const { return Checksums.begin(); }
+ Iterator end() const { return Checksums.end(); }
+
+ const FileChecksumArray &getArray() const { return Checksums; }
+
+private:
+ FileChecksumArray Checksums;
+};
+
+class ModuleDebugFileChecksumFragment final : public ModuleDebugFragment {
+public:
+ ModuleDebugFileChecksumFragment();
+
+ static bool classof(const ModuleDebugFragment *S) {
+ return S->kind() == ModuleDebugFragmentKind::FileChecksums;
+ }
+
+ void addChecksum(uint32_t StringTableOffset, FileChecksumKind Kind,
+ ArrayRef<uint8_t> Bytes);
+
+ uint32_t calculateSerializedLength() override;
+ Error commit(BinaryStreamWriter &Writer) override;
+ uint32_t mapChecksumOffset(uint32_t StringTableOffset) const;
+
+private:
+ DenseMap<uint32_t, uint32_t> OffsetMap;
+ uint32_t SerializedSize = 0;
+ llvm::BumpPtrAllocator Storage;
+ std::vector<FileChecksumEntry> Checksums;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugFragment.h b/include/llvm/DebugInfo/CodeView/ModuleDebugFragment.h
new file mode 100644
index 000000000000..a5311cae9480
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugFragment.h
@@ -0,0 +1,48 @@
+//===- ModuleDebugFragment.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENT_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Casting.h"
+
+namespace llvm {
+namespace codeview {
+
+class ModuleDebugFragmentRef {
+public:
+ explicit ModuleDebugFragmentRef(ModuleDebugFragmentKind Kind) : Kind(Kind) {}
+ virtual ~ModuleDebugFragmentRef();
+
+ ModuleDebugFragmentKind kind() const { return Kind; }
+
+protected:
+ ModuleDebugFragmentKind Kind;
+};
+
+class ModuleDebugFragment {
+public:
+ explicit ModuleDebugFragment(ModuleDebugFragmentKind Kind) : Kind(Kind) {}
+ virtual ~ModuleDebugFragment();
+
+ ModuleDebugFragmentKind kind() const { return Kind; }
+
+ virtual Error commit(BinaryStreamWriter &Writer) = 0;
+ virtual uint32_t calculateSerializedLength() = 0;
+
+protected:
+ ModuleDebugFragmentKind Kind;
+};
+
+} // namespace codeview
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENT_H
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugFragmentRecord.h b/include/llvm/DebugInfo/CodeView/ModuleDebugFragmentRecord.h
new file mode 100644
index 000000000000..b98c8605592c
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugFragmentRecord.h
@@ -0,0 +1,78 @@
+//===- ModuleDebugFragment.h ------------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTRECORD_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTRECORD_H
+
+#include "llvm/DebugInfo/CodeView/CodeView.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamRef.h"
+#include "llvm/Support/BinaryStreamWriter.h"
+#include "llvm/Support/Endian.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class ModuleDebugFragment;
+
+// Corresponds to the `CV_DebugSSubsectionHeader_t` structure.
+struct ModuleDebugFragmentHeader {
+ support::ulittle32_t Kind; // codeview::ModuleDebugFragmentKind enum
+ support::ulittle32_t Length; // number of bytes occupied by this record.
+};
+
+class ModuleDebugFragmentRecord {
+public:
+ ModuleDebugFragmentRecord();
+ ModuleDebugFragmentRecord(ModuleDebugFragmentKind Kind, BinaryStreamRef Data);
+
+ static Error initialize(BinaryStreamRef Stream,
+ ModuleDebugFragmentRecord &Info);
+
+ uint32_t getRecordLength() const;
+ ModuleDebugFragmentKind kind() const;
+ BinaryStreamRef getRecordData() const;
+
+private:
+ ModuleDebugFragmentKind Kind;
+ BinaryStreamRef Data;
+};
+
+class ModuleDebugFragmentRecordBuilder {
+public:
+ ModuleDebugFragmentRecordBuilder(ModuleDebugFragmentKind Kind,
+ ModuleDebugFragment &Frag);
+ uint32_t calculateSerializedLength();
+ Error commit(BinaryStreamWriter &Writer);
+
+private:
+ ModuleDebugFragmentKind Kind;
+ ModuleDebugFragment &Frag;
+};
+
+typedef VarStreamArray<ModuleDebugFragmentRecord> ModuleDebugFragmentArray;
+
+} // namespace codeview
+
+template <>
+struct VarStreamArrayExtractor<codeview::ModuleDebugFragmentRecord> {
+ typedef void ContextType;
+
+ static Error extract(BinaryStreamRef Stream, uint32_t &Length,
+ codeview::ModuleDebugFragmentRecord &Info, void *Ctx) {
+ if (auto EC = codeview::ModuleDebugFragmentRecord::initialize(Stream, Info))
+ return EC;
+ Length = Info.getRecordLength();
+ return Error::success();
+ }
+};
+} // namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTRECORD_H
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugFragmentVisitor.h b/include/llvm/DebugInfo/CodeView/ModuleDebugFragmentVisitor.h
new file mode 100644
index 000000000000..1f55d2024203
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugFragmentVisitor.h
@@ -0,0 +1,68 @@
+//===- ModuleDebugFragmentVisitor.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTVISITOR_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTVISITOR_H
+
+#include "llvm/Support/Error.h"
+#include <cstdint>
+
+namespace llvm {
+
+namespace codeview {
+
+class ModuleDebugFileChecksumFragmentRef;
+class ModuleDebugFragmentRecord;
+class ModuleDebugInlineeLineFragmentRef;
+class ModuleDebugLineFragmentRef;
+class ModuleDebugUnknownFragmentRef;
+
+class ModuleDebugFragmentVisitor {
+public:
+ virtual ~ModuleDebugFragmentVisitor() = default;
+
+ virtual Error visitUnknown(ModuleDebugUnknownFragmentRef &Unknown) {
+ return Error::success();
+ }
+ virtual Error visitLines(ModuleDebugLineFragmentRef &Lines) {
+ return Error::success();
+ }
+
+ virtual Error
+ visitFileChecksums(ModuleDebugFileChecksumFragmentRef &Checksums) {
+ return Error::success();
+ }
+
+ virtual Error visitInlineeLines(ModuleDebugInlineeLineFragmentRef &Inlinees) {
+ return Error::success();
+ }
+
+ virtual Error finished() { return Error::success(); }
+};
+
+Error visitModuleDebugFragment(const ModuleDebugFragmentRecord &R,
+ ModuleDebugFragmentVisitor &V);
+
+template <typename T>
+Error visitModuleDebugFragments(T &&FragmentRange,
+ ModuleDebugFragmentVisitor &V) {
+ for (const auto &L : FragmentRange) {
+ if (auto EC = visitModuleDebugFragment(L, V))
+ return EC;
+ }
+ if (auto EC = V.finished())
+ return EC;
+ return Error::success();
+}
+
+} // end namespace codeview
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGFRAGMENTVISITOR_H
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.h b/include/llvm/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.h
new file mode 100644
index 000000000000..177367c111c3
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.h
@@ -0,0 +1,103 @@
+//===- ModuleDebugInlineeLinesFragment.h ------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGINLINEELINESFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGINLINEELINESFRAGMENT_H
+
+#include "llvm/DebugInfo/CodeView/Line.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+class ModuleDebugInlineeLineFragmentRef;
+
+enum class InlineeLinesSignature : uint32_t {
+ Normal, // CV_INLINEE_SOURCE_LINE_SIGNATURE
+ ExtraFiles // CV_INLINEE_SOURCE_LINE_SIGNATURE_EX
+};
+
+struct InlineeSourceLineHeader {
+ TypeIndex Inlinee; // ID of the function that was inlined.
+ support::ulittle32_t FileID; // Offset into FileChecksums subsection.
+ support::ulittle32_t SourceLineNum; // First line of inlined code.
+ // If extra files present:
+ // ulittle32_t ExtraFileCount;
+ // ulittle32_t Files[];
+};
+
+struct InlineeSourceLine {
+ const InlineeSourceLineHeader *Header;
+ FixedStreamArray<support::ulittle32_t> ExtraFiles;
+};
+}
+
+template <> struct VarStreamArrayExtractor<codeview::InlineeSourceLine> {
+ typedef codeview::ModuleDebugInlineeLineFragmentRef ContextType;
+
+ static Error extract(BinaryStreamRef Stream, uint32_t &Len,
+ codeview::InlineeSourceLine &Item,
+ ContextType *Fragment);
+};
+
+namespace codeview {
+class ModuleDebugInlineeLineFragmentRef final : public ModuleDebugFragmentRef {
+ typedef VarStreamArray<InlineeSourceLine> LinesArray;
+ typedef LinesArray::Iterator Iterator;
+
+public:
+ ModuleDebugInlineeLineFragmentRef();
+
+ static bool classof(const ModuleDebugFragmentRef *S) {
+ return S->kind() == ModuleDebugFragmentKind::InlineeLines;
+ }
+
+ Error initialize(BinaryStreamReader Reader);
+ bool hasExtraFiles() const;
+
+ Iterator begin() const { return Lines.begin(); }
+ Iterator end() const { return Lines.end(); }
+
+private:
+ InlineeLinesSignature Signature;
+ VarStreamArray<InlineeSourceLine> Lines;
+};
+
+class ModuleDebugInlineeLineFragment final : public ModuleDebugFragment {
+public:
+ explicit ModuleDebugInlineeLineFragment(bool HasExtraFiles);
+
+ static bool classof(const ModuleDebugFragment *S) {
+ return S->kind() == ModuleDebugFragmentKind::InlineeLines;
+ }
+
+ Error commit(BinaryStreamWriter &Writer) override;
+ uint32_t calculateSerializedLength() override;
+
+ void addInlineSite(TypeIndex FuncId, uint32_t FileOffset,
+ uint32_t SourceLine);
+ void addExtraFile(uint32_t FileOffset);
+
+private:
+ bool HasExtraFiles = false;
+ uint32_t ExtraFileCount = 0;
+
+ struct Entry {
+ std::vector<support::ulittle32_t> ExtraFiles;
+ InlineeSourceLineHeader Header;
+ };
+ std::vector<Entry> Entries;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugLineFragment.h b/include/llvm/DebugInfo/CodeView/ModuleDebugLineFragment.h
new file mode 100644
index 000000000000..dcfe86dd8503
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugLineFragment.h
@@ -0,0 +1,137 @@
+//===- ModuleDebugLineFragment.h --------------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGLINEFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGLINEFRAGMENT_H
+
+#include "llvm/DebugInfo/CodeView/Line.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
+#include "llvm/Support/BinaryStreamArray.h"
+#include "llvm/Support/BinaryStreamReader.h"
+#include "llvm/Support/Error.h"
+
+namespace llvm {
+namespace codeview {
+
+// Corresponds to the `CV_DebugSLinesHeader_t` structure.
+struct LineFragmentHeader {
+ support::ulittle32_t RelocOffset; // Code offset of line contribution.
+ support::ulittle16_t RelocSegment; // Code segment of line contribution.
+ support::ulittle16_t Flags; // See LineFlags enumeration.
+ support::ulittle32_t CodeSize; // Code size of this line contribution.
+};
+
+// Corresponds to the `CV_DebugSLinesFileBlockHeader_t` structure.
+struct LineBlockFragmentHeader {
+ support::ulittle32_t NameIndex; // Offset of FileChecksum entry in File
+ // checksums buffer. The checksum entry then
+ // contains another offset into the string
+ // table of the actual name.
+ support::ulittle32_t NumLines; // Number of lines
+ support::ulittle32_t BlockSize; // Code size of block, in bytes.
+ // The following two variable length arrays appear immediately after the
+ // header. The structure definitions follow.
+ // LineNumberEntry Lines[NumLines];
+ // ColumnNumberEntry Columns[NumLines];
+};
+
+// Corresponds to `CV_Line_t` structure
+struct LineNumberEntry {
+ support::ulittle32_t Offset; // Offset to start of code bytes for line number
+ support::ulittle32_t Flags; // Start:24, End:7, IsStatement:1
+};
+
+// Corresponds to `CV_Column_t` structure
+struct ColumnNumberEntry {
+ support::ulittle16_t StartColumn;
+ support::ulittle16_t EndColumn;
+};
+
+struct LineColumnEntry {
+ support::ulittle32_t NameIndex;
+ FixedStreamArray<LineNumberEntry> LineNumbers;
+ FixedStreamArray<ColumnNumberEntry> Columns;
+};
+
+class LineColumnExtractor {
+public:
+ typedef const LineFragmentHeader ContextType;
+
+ static Error extract(BinaryStreamRef Stream, uint32_t &Len,
+ LineColumnEntry &Item, const LineFragmentHeader *Header);
+};
+
+class ModuleDebugLineFragmentRef final : public ModuleDebugFragmentRef {
+ friend class LineColumnExtractor;
+ typedef VarStreamArray<LineColumnEntry, LineColumnExtractor> LineInfoArray;
+ typedef LineInfoArray::Iterator Iterator;
+
+public:
+ ModuleDebugLineFragmentRef();
+
+ static bool classof(const ModuleDebugFragmentRef *S) {
+ return S->kind() == ModuleDebugFragmentKind::Lines;
+ }
+
+ Error initialize(BinaryStreamReader Reader);
+
+ Iterator begin() const { return LinesAndColumns.begin(); }
+ Iterator end() const { return LinesAndColumns.end(); }
+
+ const LineFragmentHeader *header() const { return Header; }
+
+ bool hasColumnInfo() const;
+
+private:
+ const LineFragmentHeader *Header = nullptr;
+ LineInfoArray LinesAndColumns;
+};
+
+class ModuleDebugLineFragment final : public ModuleDebugFragment {
+ struct Block {
+ Block(uint32_t ChecksumBufferOffset)
+ : ChecksumBufferOffset(ChecksumBufferOffset) {}
+
+ uint32_t ChecksumBufferOffset;
+ std::vector<LineNumberEntry> Lines;
+ std::vector<ColumnNumberEntry> Columns;
+ };
+
+public:
+ ModuleDebugLineFragment();
+
+ static bool classof(const ModuleDebugFragment *S) {
+ return S->kind() == ModuleDebugFragmentKind::Lines;
+ }
+
+ void createBlock(uint32_t ChecksumBufferOffset);
+ void addLineInfo(uint32_t Offset, const LineInfo &Line);
+ void addLineAndColumnInfo(uint32_t Offset, const LineInfo &Line,
+ uint32_t ColStart, uint32_t ColEnd);
+
+ uint32_t calculateSerializedLength() override;
+ Error commit(BinaryStreamWriter &Writer) override;
+
+ void setRelocationAddress(uint16_t Segment, uint16_t Offset);
+ void setCodeSize(uint32_t Size);
+ void setFlags(LineFlags Flags);
+
+ bool hasColumnInfo() const;
+
+private:
+ uint16_t RelocOffset = 0;
+ uint16_t RelocSegment = 0;
+ uint32_t CodeSize = 0;
+ LineFlags Flags = LF_None;
+ std::vector<Block> Blocks;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/ModuleDebugUnknownFragment.h b/include/llvm/DebugInfo/CodeView/ModuleDebugUnknownFragment.h
new file mode 100644
index 000000000000..b8c1c02e5cf1
--- /dev/null
+++ b/include/llvm/DebugInfo/CodeView/ModuleDebugUnknownFragment.h
@@ -0,0 +1,33 @@
+//===- ModuleDebugUnknownFragment.h -----------------------------*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGUNKNOWNFRAGMENT_H
+#define LLVM_DEBUGINFO_CODEVIEW_MODULEDEBUGUNKNOWNFRAGMENT_H
+
+#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
+#include "llvm/Support/BinaryStreamRef.h"
+
+namespace llvm {
+namespace codeview {
+
+class ModuleDebugUnknownFragmentRef final : public ModuleDebugFragmentRef {
+public:
+ ModuleDebugUnknownFragmentRef(ModuleDebugFragmentKind Kind,
+ BinaryStreamRef Data)
+ : ModuleDebugFragmentRef(Kind), Data(Data) {}
+
+ BinaryStreamRef getData() const { return Data; }
+
+private:
+ BinaryStreamRef Data;
+};
+}
+}
+
+#endif
diff --git a/include/llvm/DebugInfo/CodeView/ModuleSubstream.h b/include/llvm/DebugInfo/CodeView/ModuleSubstream.h
deleted file mode 100644
index a1c5c93cc3f8..000000000000
--- a/include/llvm/DebugInfo/CodeView/ModuleSubstream.h
+++ /dev/null
@@ -1,87 +0,0 @@
-//===- ModuleSubstream.h ----------------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAM_H
-#define LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAM_H
-
-#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include "llvm/Support/BinaryStreamArray.h"
-#include "llvm/Support/BinaryStreamRef.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/Error.h"
-
-namespace llvm {
-namespace codeview {
-
-// Corresponds to the `CV_DebugSSubsectionHeader_t` structure.
-struct ModuleSubsectionHeader {
- support::ulittle32_t Kind; // codeview::ModuleSubstreamKind enum
- support::ulittle32_t Length; // number of bytes occupied by this record.
-};
-
-// Corresponds to the `CV_DebugSLinesHeader_t` structure.
-struct LineSubstreamHeader {
- support::ulittle32_t RelocOffset; // Code offset of line contribution.
- support::ulittle16_t RelocSegment; // Code segment of line contribution.
- support::ulittle16_t Flags; // See LineFlags enumeration.
- support::ulittle32_t CodeSize; // Code size of this line contribution.
-};
-
-// Corresponds to the `CV_DebugSLinesFileBlockHeader_t` structure.
-struct LineFileBlockHeader {
- support::ulittle32_t NameIndex; // Index in DBI name buffer of filename.
- support::ulittle32_t NumLines; // Number of lines
- support::ulittle32_t BlockSize; // Code size of block, in bytes.
- // The following two variable length arrays appear immediately after the
- // header. The structure definitions follow.
- // LineNumberEntry Lines[NumLines];
- // ColumnNumberEntry Columns[NumLines];
-};
-
-// Corresponds to `CV_Line_t` structure
-struct LineNumberEntry {
- support::ulittle32_t Offset; // Offset to start of code bytes for line number
- support::ulittle32_t Flags; // Start:24, End:7, IsStatement:1
-};
-
-// Corresponds to `CV_Column_t` structure
-struct ColumnNumberEntry {
- support::ulittle16_t StartColumn;
- support::ulittle16_t EndColumn;
-};
-
-class ModuleSubstream {
-public:
- ModuleSubstream();
- ModuleSubstream(ModuleSubstreamKind Kind, BinaryStreamRef Data);
- static Error initialize(BinaryStreamRef Stream, ModuleSubstream &Info);
- uint32_t getRecordLength() const;
- ModuleSubstreamKind getSubstreamKind() const;
- BinaryStreamRef getRecordData() const;
-
-private:
- ModuleSubstreamKind Kind;
- BinaryStreamRef Data;
-};
-
-typedef VarStreamArray<ModuleSubstream> ModuleSubstreamArray;
-} // namespace codeview
-
-template <> struct VarStreamArrayExtractor<codeview::ModuleSubstream> {
- Error operator()(BinaryStreamRef Stream, uint32_t &Length,
- codeview::ModuleSubstream &Info) const {
- if (auto EC = codeview::ModuleSubstream::initialize(Stream, Info))
- return EC;
- Length = Info.getRecordLength();
- return Error::success();
- }
-};
-} // namespace llvm
-
-#endif // LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAM_H
diff --git a/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h b/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h
deleted file mode 100644
index 31344a9427db..000000000000
--- a/include/llvm/DebugInfo/CodeView/ModuleSubstreamVisitor.h
+++ /dev/null
@@ -1,132 +0,0 @@
-//===- ModuleSubstreamVisitor.h ---------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAMVISITOR_H
-#define LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAMVISITOR_H
-
-#include "llvm/ADT/ArrayRef.h"
-#include "llvm/DebugInfo/CodeView/CodeView.h"
-#include "llvm/DebugInfo/CodeView/CodeViewError.h"
-#include "llvm/DebugInfo/CodeView/Line.h"
-#include "llvm/DebugInfo/CodeView/ModuleSubstream.h"
-#include "llvm/Support/BinaryStreamArray.h"
-#include "llvm/Support/BinaryStreamReader.h"
-#include "llvm/Support/BinaryStreamRef.h"
-#include "llvm/Support/Endian.h"
-#include "llvm/Support/Error.h"
-#include <cstdint>
-
-namespace llvm {
-
-namespace codeview {
-
-struct LineColumnEntry {
- support::ulittle32_t NameIndex;
- FixedStreamArray<LineNumberEntry> LineNumbers;
- FixedStreamArray<ColumnNumberEntry> Columns;
-};
-
-struct FileChecksumEntry {
- uint32_t FileNameOffset; // Byte offset of filename in global stringtable.
- FileChecksumKind Kind; // The type of checksum.
- ArrayRef<uint8_t> Checksum; // The bytes of the checksum.
-};
-
-typedef VarStreamArray<LineColumnEntry> LineInfoArray;
-typedef VarStreamArray<FileChecksumEntry> FileChecksumArray;
-
-class IModuleSubstreamVisitor {
-public:
- virtual ~IModuleSubstreamVisitor() = default;
-
- virtual Error visitUnknown(ModuleSubstreamKind Kind,
- BinaryStreamRef Data) = 0;
- virtual Error visitSymbols(BinaryStreamRef Data);
- virtual Error visitLines(BinaryStreamRef Data,
- const LineSubstreamHeader *Header,
- const LineInfoArray &Lines);
- virtual Error visitStringTable(BinaryStreamRef Data);
- virtual Error visitFileChecksums(BinaryStreamRef Data,
- const FileChecksumArray &Checksums);
- virtual Error visitFrameData(BinaryStreamRef Data);
- virtual Error visitInlineeLines(BinaryStreamRef Data);
- virtual Error visitCrossScopeImports(BinaryStreamRef Data);
- virtual Error visitCrossScopeExports(BinaryStreamRef Data);
- virtual Error visitILLines(BinaryStreamRef Data);
- virtual Error visitFuncMDTokenMap(BinaryStreamRef Data);
- virtual Error visitTypeMDTokenMap(BinaryStreamRef Data);
- virtual Error visitMergedAssemblyInput(BinaryStreamRef Data);
- virtual Error visitCoffSymbolRVA(BinaryStreamRef Data);
-};
-
-Error visitModuleSubstream(const ModuleSubstream &R,
- IModuleSubstreamVisitor &V);
-} // end namespace codeview
-
-template <> class VarStreamArrayExtractor<codeview::LineColumnEntry> {
-public:
- VarStreamArrayExtractor(const codeview::LineSubstreamHeader *Header)
- : Header(Header) {}
-
- Error operator()(BinaryStreamRef Stream, uint32_t &Len,
- codeview::LineColumnEntry &Item) const {
- using namespace codeview;
- const LineFileBlockHeader *BlockHeader;
- BinaryStreamReader Reader(Stream);
- if (auto EC = Reader.readObject(BlockHeader))
- return EC;
- bool HasColumn = Header->Flags & uint32_t(LineFlags::HaveColumns);
- uint32_t LineInfoSize =
- BlockHeader->NumLines *
- (sizeof(LineNumberEntry) + (HasColumn ? sizeof(ColumnNumberEntry) : 0));
- if (BlockHeader->BlockSize < sizeof(LineFileBlockHeader))
- return make_error<CodeViewError>(cv_error_code::corrupt_record,
- "Invalid line block record size");
- uint32_t Size = BlockHeader->BlockSize - sizeof(LineFileBlockHeader);
- if (LineInfoSize > Size)
- return make_error<CodeViewError>(cv_error_code::corrupt_record,
- "Invalid line block record size");
- // The value recorded in BlockHeader->BlockSize includes the size of
- // LineFileBlockHeader.
- Len = BlockHeader->BlockSize;
- Item.NameIndex = BlockHeader->NameIndex;
- if (auto EC = Reader.readArray(Item.LineNumbers, BlockHeader->NumLines))
- return EC;
- if (HasColumn) {
- if (auto EC = Reader.readArray(Item.Columns, BlockHeader->NumLines))
- return EC;
- }
- return Error::success();
- }
-
-private:
- const codeview::LineSubstreamHeader *Header;
-};
-
-template <> class VarStreamArrayExtractor<codeview::FileChecksumEntry> {
-public:
- Error operator()(BinaryStreamRef Stream, uint32_t &Len,
- codeview::FileChecksumEntry &Item) const {
- using namespace codeview;
- const FileChecksum *Header;
- BinaryStreamReader Reader(Stream);
- if (auto EC = Reader.readObject(Header))
- return EC;
- Item.FileNameOffset = Header->FileNameOffset;
- Item.Kind = static_cast<FileChecksumKind>(Header->ChecksumKind);
- if (auto EC = Reader.readBytes(Item.Checksum, Header->ChecksumSize))
- return EC;
- Len = sizeof(FileChecksum) + Header->ChecksumSize;
- return Error::success();
- }
-};
-
-} // end namespace llvm
-
-#endif // LLVM_DEBUGINFO_CODEVIEW_MODULESUBSTREAMVISITOR_H
diff --git a/include/llvm/DebugInfo/CodeView/TypeDatabase.h b/include/llvm/DebugInfo/CodeView/TypeDatabase.h
index 54ad862cfa7e..220de4bf0ee4 100644
--- a/include/llvm/DebugInfo/CodeView/TypeDatabase.h
+++ b/include/llvm/DebugInfo/CodeView/TypeDatabase.h
@@ -35,6 +35,7 @@ public:
StringRef getTypeName(TypeIndex Index) const;
const CVType &getTypeRecord(TypeIndex Index) const;
+ CVType &getTypeRecord(TypeIndex Index);
bool containsTypeIndex(TypeIndex Index) const;
diff --git a/include/llvm/DebugInfo/DIContext.h b/include/llvm/DebugInfo/DIContext.h
index e3386a8dcd24..d51408122fc9 100644
--- a/include/llvm/DebugInfo/DIContext.h
+++ b/include/llvm/DebugInfo/DIContext.h
@@ -161,6 +161,10 @@ public:
virtual void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All,
bool DumpEH = false, bool SummarizeTypes = false) = 0;
+ virtual bool verify(raw_ostream &OS, DIDumpType DumpType = DIDT_All) {
+ // No verifier? Just say things went well.
+ return true;
+ }
virtual DILineInfo getLineInfoForAddress(uint64_t Address,
DILineInfoSpecifier Specifier = DILineInfoSpecifier()) = 0;
virtual DILineInfoTable getLineInfoForAddressRange(uint64_t Address,
diff --git a/include/llvm/DebugInfo/DWARF/DWARFContext.h b/include/llvm/DebugInfo/DWARF/DWARFContext.h
index d89e2c684cd3..3c04c6716ea3 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFContext.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFContext.h
@@ -106,6 +106,8 @@ public:
void dump(raw_ostream &OS, DIDumpType DumpType = DIDT_All,
bool DumpEH = false, bool SummarizeTypes = false) override;
+ bool verify(raw_ostream &OS, DIDumpType DumpType = DIDT_All) override;
+
typedef DWARFUnitSection<DWARFCompileUnit>::iterator_range cu_iterator_range;
typedef DWARFUnitSection<DWARFTypeUnit>::iterator_range tu_iterator_range;
typedef iterator_range<decltype(TUs)::iterator> tu_section_iterator_range;
diff --git a/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h b/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
index e5bb24707b63..dd0e2648bf30 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFDebugLine.h
@@ -24,7 +24,8 @@ class raw_ostream;
class DWARFDebugLine {
public:
- DWARFDebugLine(const RelocAddrMap* LineInfoRelocMap) : RelocMap(LineInfoRelocMap) {}
+ DWARFDebugLine(const RelocAddrMap *LineInfoRelocMap)
+ : RelocMap(LineInfoRelocMap) {}
struct FileNameEntry {
FileNameEntry() = default;
@@ -38,50 +39,46 @@ public:
struct Prologue {
Prologue();
- // The size in bytes of the statement information for this compilation unit
- // (not including the total_length field itself).
+ /// The size in bytes of the statement information for this compilation unit
+ /// (not including the total_length field itself).
uint64_t TotalLength;
- // Version identifier for the statement information format.
+ /// Version identifier for the statement information format.
uint16_t Version;
- // The number of bytes following the prologue_length field to the beginning
- // of the first byte of the statement program itself.
+ /// The number of bytes following the prologue_length field to the beginning
+ /// of the first byte of the statement program itself.
uint64_t PrologueLength;
- // The size in bytes of the smallest target machine instruction. Statement
- // program opcodes that alter the address register first multiply their
- // operands by this value.
+ /// The size in bytes of the smallest target machine instruction. Statement
+ /// program opcodes that alter the address register first multiply their
+ /// operands by this value.
uint8_t MinInstLength;
- // The maximum number of individual operations that may be encoded in an
- // instruction.
+ /// The maximum number of individual operations that may be encoded in an
+ /// instruction.
uint8_t MaxOpsPerInst;
- // The initial value of theis_stmtregister.
+ /// The initial value of theis_stmtregister.
uint8_t DefaultIsStmt;
- // This parameter affects the meaning of the special opcodes. See below.
+ /// This parameter affects the meaning of the special opcodes. See below.
int8_t LineBase;
- // This parameter affects the meaning of the special opcodes. See below.
+ /// This parameter affects the meaning of the special opcodes. See below.
uint8_t LineRange;
- // The number assigned to the first special opcode.
+ /// The number assigned to the first special opcode.
uint8_t OpcodeBase;
std::vector<uint8_t> StandardOpcodeLengths;
- std::vector<const char*> IncludeDirectories;
+ std::vector<const char *> IncludeDirectories;
std::vector<FileNameEntry> FileNames;
bool IsDWARF64;
- uint32_t sizeofTotalLength() const {
- return IsDWARF64 ? 12 : 4;
- }
+ uint32_t sizeofTotalLength() const { return IsDWARF64 ? 12 : 4; }
- uint32_t sizeofPrologueLength() const {
- return IsDWARF64 ? 8 : 4;
- }
+ uint32_t sizeofPrologueLength() const { return IsDWARF64 ? 8 : 4; }
- // Length of the prologue in bytes.
+ /// Length of the prologue in bytes.
uint32_t getLength() const {
return PrologueLength + sizeofTotalLength() + sizeof(Version) +
sizeofPrologueLength();
}
- // Length of the line table data in bytes (not including the prologue).
+ /// Length of the line table data in bytes (not including the prologue).
uint32_t getStatementTableLength() const {
return TotalLength + sizeofTotalLength() - getLength();
}
@@ -92,70 +89,70 @@ public:
void clear();
void dump(raw_ostream &OS) const;
- bool parse(DataExtractor debug_line_data, uint32_t *offset_ptr);
+ bool parse(DataExtractor DebugLineData, uint32_t *OffsetPtr);
};
- // Standard .debug_line state machine structure.
+ /// Standard .debug_line state machine structure.
struct Row {
- explicit Row(bool default_is_stmt = false);
+ explicit Row(bool DefaultIsStmt = false);
/// Called after a row is appended to the matrix.
void postAppend();
- void reset(bool default_is_stmt);
+ void reset(bool DefaultIsStmt);
void dump(raw_ostream &OS) const;
- static bool orderByAddress(const Row& LHS, const Row& RHS) {
+ static bool orderByAddress(const Row &LHS, const Row &RHS) {
return LHS.Address < RHS.Address;
}
- // The program-counter value corresponding to a machine instruction
- // generated by the compiler.
+ /// The program-counter value corresponding to a machine instruction
+ /// generated by the compiler.
uint64_t Address;
- // An unsigned integer indicating a source line number. Lines are numbered
- // beginning at 1. The compiler may emit the value 0 in cases where an
- // instruction cannot be attributed to any source line.
+ /// An unsigned integer indicating a source line number. Lines are numbered
+ /// beginning at 1. The compiler may emit the value 0 in cases where an
+ /// instruction cannot be attributed to any source line.
uint32_t Line;
- // An unsigned integer indicating a column number within a source line.
- // Columns are numbered beginning at 1. The value 0 is reserved to indicate
- // that a statement begins at the 'left edge' of the line.
+ /// An unsigned integer indicating a column number within a source line.
+ /// Columns are numbered beginning at 1. The value 0 is reserved to indicate
+ /// that a statement begins at the 'left edge' of the line.
uint16_t Column;
- // An unsigned integer indicating the identity of the source file
- // corresponding to a machine instruction.
+ /// An unsigned integer indicating the identity of the source file
+ /// corresponding to a machine instruction.
uint16_t File;
- // An unsigned integer representing the DWARF path discriminator value
- // for this location.
+ /// An unsigned integer representing the DWARF path discriminator value
+ /// for this location.
uint32_t Discriminator;
- // An unsigned integer whose value encodes the applicable instruction set
- // architecture for the current instruction.
+ /// An unsigned integer whose value encodes the applicable instruction set
+ /// architecture for the current instruction.
uint8_t Isa;
- // A boolean indicating that the current instruction is the beginning of a
- // statement.
- uint8_t IsStmt:1,
- // A boolean indicating that the current instruction is the
- // beginning of a basic block.
- BasicBlock:1,
- // A boolean indicating that the current address is that of the
- // first byte after the end of a sequence of target machine
- // instructions.
- EndSequence:1,
- // A boolean indicating that the current address is one (of possibly
- // many) where execution should be suspended for an entry breakpoint
- // of a function.
- PrologueEnd:1,
- // A boolean indicating that the current address is one (of possibly
- // many) where execution should be suspended for an exit breakpoint
- // of a function.
- EpilogueBegin:1;
+ /// A boolean indicating that the current instruction is the beginning of a
+ /// statement.
+ uint8_t IsStmt : 1,
+ /// A boolean indicating that the current instruction is the
+ /// beginning of a basic block.
+ BasicBlock : 1,
+ /// A boolean indicating that the current address is that of the
+ /// first byte after the end of a sequence of target machine
+ /// instructions.
+ EndSequence : 1,
+ /// A boolean indicating that the current address is one (of possibly
+ /// many) where execution should be suspended for an entry breakpoint
+ /// of a function.
+ PrologueEnd : 1,
+ /// A boolean indicating that the current address is one (of possibly
+ /// many) where execution should be suspended for an exit breakpoint
+ /// of a function.
+ EpilogueBegin : 1;
};
- // Represents a series of contiguous machine instructions. Line table for each
- // compilation unit may consist of multiple sequences, which are not
- // guaranteed to be in the order of ascending instruction address.
+ /// Represents a series of contiguous machine instructions. Line table for
+ /// each compilation unit may consist of multiple sequences, which are not
+ /// guaranteed to be in the order of ascending instruction address.
struct Sequence {
Sequence();
- // Sequence describes instructions at address range [LowPC, HighPC)
- // and is described by line table rows [FirstRowIndex, LastRowIndex).
+ /// Sequence describes instructions at address range [LowPC, HighPC)
+ /// and is described by line table rows [FirstRowIndex, LastRowIndex).
uint64_t LowPC;
uint64_t HighPC;
unsigned FirstRowIndex;
@@ -164,7 +161,7 @@ public:
void reset();
- static bool orderByLowPC(const Sequence& LHS, const Sequence& RHS) {
+ static bool orderByLowPC(const Sequence &LHS, const Sequence &RHS) {
return LHS.LowPC < RHS.LowPC;
}
@@ -172,42 +169,38 @@ public:
return !Empty && (LowPC < HighPC) && (FirstRowIndex < LastRowIndex);
}
- bool containsPC(uint64_t pc) const {
- return (LowPC <= pc && pc < HighPC);
- }
+ bool containsPC(uint64_t PC) const { return (LowPC <= PC && PC < HighPC); }
};
struct LineTable {
LineTable();
- // Represents an invalid row
+ /// Represents an invalid row
const uint32_t UnknownRowIndex = UINT32_MAX;
- void appendRow(const DWARFDebugLine::Row &R) {
- Rows.push_back(R);
- }
+ void appendRow(const DWARFDebugLine::Row &R) { Rows.push_back(R); }
void appendSequence(const DWARFDebugLine::Sequence &S) {
Sequences.push_back(S);
}
- // Returns the index of the row with file/line info for a given address,
- // or UnknownRowIndex if there is no such row.
- uint32_t lookupAddress(uint64_t address) const;
+ /// Returns the index of the row with file/line info for a given address,
+ /// or UnknownRowIndex if there is no such row.
+ uint32_t lookupAddress(uint64_t Address) const;
- bool lookupAddressRange(uint64_t address, uint64_t size,
- std::vector<uint32_t> &result) const;
+ bool lookupAddressRange(uint64_t Address, uint64_t Size,
+ std::vector<uint32_t> &Result) const;
bool hasFileAtIndex(uint64_t FileIndex) const;
- // Extracts filename by its index in filename table in prologue.
- // Returns true on success.
+ /// Extracts filename by its index in filename table in prologue.
+ /// Returns true on success.
bool getFileNameByIndex(uint64_t FileIndex, const char *CompDir,
DILineInfoSpecifier::FileLineInfoKind Kind,
std::string &Result) const;
- // Fills the Result argument with the file and line information
- // corresponding to Address. Returns true on success.
+ /// Fills the Result argument with the file and line information
+ /// corresponding to Address. Returns true on success.
bool getFileLineInfoForAddress(uint64_t Address, const char *CompDir,
DILineInfoSpecifier::FileLineInfoKind Kind,
DILineInfo &Result) const;
@@ -216,8 +209,8 @@ public:
void clear();
/// Parse prologue and all rows.
- bool parse(DataExtractor debug_line_data, const RelocAddrMap *RMap,
- uint32_t *offset_ptr);
+ bool parse(DataExtractor DebugLineData, const RelocAddrMap *RMap,
+ uint32_t *OffsetPtr);
struct Prologue Prologue;
typedef std::vector<Row> RowVector;
@@ -228,25 +221,25 @@ public:
SequenceVector Sequences;
private:
- uint32_t findRowInSeq(const DWARFDebugLine::Sequence &seq,
- uint64_t address) const;
+ uint32_t findRowInSeq(const DWARFDebugLine::Sequence &Seq,
+ uint64_t Address) const;
};
- const LineTable *getLineTable(uint32_t offset) const;
- const LineTable *getOrParseLineTable(DataExtractor debug_line_data,
- uint32_t offset);
+ const LineTable *getLineTable(uint32_t Offset) const;
+ const LineTable *getOrParseLineTable(DataExtractor DebugLineData,
+ uint32_t Offset);
private:
struct ParsingState {
ParsingState(struct LineTable *LT);
void resetRowAndSequence();
- void appendRowToMatrix(uint32_t offset);
+ void appendRowToMatrix(uint32_t Offset);
- // Line table we're currently parsing.
+ /// Line table we're currently parsing.
struct LineTable *LineTable;
- // The row number that starts at zero for the prologue, and increases for
- // each row added to the matrix.
+ /// The row number that starts at zero for the prologue, and increases for
+ /// each row added to the matrix.
unsigned RowNumber;
struct Row Row;
struct Sequence Sequence;
diff --git a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
index c8d7a0c1ac7a..36b27228f5c6 100644
--- a/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
+++ b/include/llvm/DebugInfo/DWARF/DWARFFormValue.h
@@ -59,6 +59,7 @@ public:
DWARFFormValue(dwarf::Form F = dwarf::Form(0)) : Form(F) {}
dwarf::Form getForm() const { return Form; }
+ uint64_t getRawUValue() const { return Value.uval; }
void setForm(dwarf::Form F) { Form = F; }
void setUValue(uint64_t V) { Value.uval = V; }
void setSValue(int64_t V) { Value.sval = V; }
diff --git a/include/llvm/DebugInfo/PDB/Native/ModInfo.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
index d26d0d618449..879cb4285cd7 100644
--- a/include/llvm/DebugInfo/PDB/Native/ModInfo.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h
@@ -1,4 +1,4 @@
-//===- ModInfo.h - PDB module information -----------------------*- C++ -*-===//
+//===- DbiModuleDescriptor.h - PDB module information -----------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,8 +7,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_PDB_RAW_MODINFO_H
-#define LLVM_DEBUGINFO_PDB_RAW_MODINFO_H
+#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTOR_H
+#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTOR_H
#include "llvm/ADT/StringRef.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
@@ -22,21 +22,21 @@ namespace llvm {
namespace pdb {
-class ModInfo {
+class DbiModuleDescriptor {
friend class DbiStreamBuilder;
public:
- ModInfo();
- ModInfo(const ModInfo &Info);
- ~ModInfo();
+ DbiModuleDescriptor();
+ DbiModuleDescriptor(const DbiModuleDescriptor &Info);
+ ~DbiModuleDescriptor();
- static Error initialize(BinaryStreamRef Stream, ModInfo &Info);
+ static Error initialize(BinaryStreamRef Stream, DbiModuleDescriptor &Info);
bool hasECInfo() const;
uint16_t getTypeServerIndex() const;
uint16_t getModuleStreamIndex() const;
uint32_t getSymbolDebugInfoByteSize() const;
- uint32_t getLineInfoByteSize() const;
+ uint32_t getC11LineInfoByteSize() const;
uint32_t getC13LineInfoByteSize() const;
uint32_t getNumberOfFiles() const;
uint32_t getSourceFileNameIndex() const;
@@ -54,19 +54,20 @@ private:
};
struct ModuleInfoEx {
- ModuleInfoEx(const ModInfo &Info) : Info(Info) {}
+ ModuleInfoEx(const DbiModuleDescriptor &Info) : Info(Info) {}
ModuleInfoEx(const ModuleInfoEx &Ex) = default;
- ModInfo Info;
+ DbiModuleDescriptor Info;
std::vector<StringRef> SourceFiles;
};
} // end namespace pdb
-template <> struct VarStreamArrayExtractor<pdb::ModInfo> {
- Error operator()(BinaryStreamRef Stream, uint32_t &Length,
- pdb::ModInfo &Info) const {
- if (auto EC = pdb::ModInfo::initialize(Stream, Info))
+template <> struct VarStreamArrayExtractor<pdb::DbiModuleDescriptor> {
+ typedef void ContextType;
+ static Error extract(BinaryStreamRef Stream, uint32_t &Length,
+ pdb::DbiModuleDescriptor &Info, void *Ctx) {
+ if (auto EC = pdb::DbiModuleDescriptor::initialize(Stream, Info))
return EC;
Length = Info.getRecordLength();
return Error::success();
@@ -75,4 +76,4 @@ template <> struct VarStreamArrayExtractor<pdb::ModInfo> {
} // end namespace llvm
-#endif // LLVM_DEBUGINFO_PDB_RAW_MODINFO_H
+#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTOR_H
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
new file mode 100644
index 000000000000..8cc5db981f56
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/DbiModuleDescriptorBuilder.h
@@ -0,0 +1,101 @@
+//===- DbiModuleDescriptorBuilder.h - PDB module information ----*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTORBUILDER_H
+#define LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTORBUILDER_H
+
+#include "llvm/ADT/StringRef.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugFileChecksumFragment.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugLineFragment.h"
+#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
+#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
+#include "llvm/Support/Error.h"
+#include <cstdint>
+#include <string>
+#include <vector>
+
+namespace llvm {
+class BinaryStreamWriter;
+
+namespace codeview {
+class ModuleDebugFragmentRecordBuilder;
+}
+
+namespace msf {
+class MSFBuilder;
+struct MSFLayout;
+}
+namespace pdb {
+
+class DbiModuleDescriptorBuilder {
+ friend class DbiStreamBuilder;
+
+public:
+ DbiModuleDescriptorBuilder(StringRef ModuleName, uint32_t ModIndex,
+ msf::MSFBuilder &Msf);
+ ~DbiModuleDescriptorBuilder();
+
+ DbiModuleDescriptorBuilder(const DbiModuleDescriptorBuilder &) = delete;
+ DbiModuleDescriptorBuilder &
+ operator=(const DbiModuleDescriptorBuilder &) = delete;
+
+ void setObjFileName(StringRef Name);
+ void addSymbol(codeview::CVSymbol Symbol);
+
+ void addC13Fragment(std::unique_ptr<codeview::ModuleDebugLineFragment> Lines);
+ void addC13Fragment(
+ std::unique_ptr<codeview::ModuleDebugInlineeLineFragment> Inlinees);
+ void setC13FileChecksums(
+ std::unique_ptr<codeview::ModuleDebugFileChecksumFragment> Checksums);
+
+ uint16_t getStreamIndex() const;
+ StringRef getModuleName() const { return ModuleName; }
+ StringRef getObjFileName() const { return ObjFileName; }
+
+ ArrayRef<std::string> source_files() const {
+ return makeArrayRef(SourceFiles);
+ }
+
+ uint32_t calculateSerializedLength() const;
+
+ void finalize();
+ Error finalizeMsfLayout();
+
+ Error commit(BinaryStreamWriter &ModiWriter, const msf::MSFLayout &MsfLayout,
+ WritableBinaryStreamRef MsfBuffer);
+
+private:
+ uint32_t calculateC13DebugInfoSize() const;
+
+ void addSourceFile(StringRef Path);
+ msf::MSFBuilder &MSF;
+
+ uint32_t SymbolByteSize = 0;
+ std::string ModuleName;
+ std::string ObjFileName;
+ std::vector<std::string> SourceFiles;
+ std::vector<codeview::CVSymbol> Symbols;
+
+ std::unique_ptr<codeview::ModuleDebugFileChecksumFragment> ChecksumInfo;
+ std::vector<std::unique_ptr<codeview::ModuleDebugLineFragment>> LineInfo;
+ std::vector<std::unique_ptr<codeview::ModuleDebugInlineeLineFragment>>
+ Inlinees;
+
+ std::vector<std::unique_ptr<codeview::ModuleDebugFragmentRecordBuilder>>
+ C13Builders;
+
+ ModuleInfoHeader Layout;
+};
+
+} // end namespace pdb
+
+} // end namespace llvm
+
+#endif // LLVM_DEBUGINFO_PDB_RAW_DBIMODULEDESCRIPTORBUILDER_H
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStream.h b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
index f49f5aaefaca..84ae57f2e23a 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStream.h
@@ -10,9 +10,9 @@
#ifndef LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAM_H
#define LLVM_DEBUGINFO_PDB_RAW_PDBDBISTREAM_H
-#include "llvm/DebugInfo/CodeView/ModuleSubstream.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugFragment.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
-#include "llvm/DebugInfo/PDB/Native/ModInfo.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/DebugInfo/PDB/Native/RawConstants.h"
#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
#include "llvm/DebugInfo/PDB/Native/StringTable.h"
diff --git a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
index 16426bd93847..bcac182e2145 100644
--- a/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/DbiStreamBuilder.h
@@ -31,7 +31,7 @@ struct coff_section;
namespace pdb {
class DbiStream;
struct DbiStreamHeader;
-class ModInfoBuilder;
+class DbiModuleDescriptorBuilder;
class PDBFile;
class DbiStreamBuilder {
@@ -57,8 +57,9 @@ public:
uint32_t calculateSerializedLength() const;
- Expected<ModInfoBuilder &> addModuleInfo(StringRef ModuleName);
+ Expected<DbiModuleDescriptorBuilder &> addModuleInfo(StringRef ModuleName);
Error addModuleSourceFile(StringRef Module, StringRef File);
+ Expected<uint32_t> getSourceFileNameIndex(StringRef FileName);
Error finalizeMsfLayout();
@@ -103,8 +104,8 @@ private:
const DbiStreamHeader *Header;
- StringMap<std::unique_ptr<ModInfoBuilder>> ModiMap;
- std::vector<ModInfoBuilder *> ModiList;
+ StringMap<std::unique_ptr<DbiModuleDescriptorBuilder>> ModiMap;
+ std::vector<DbiModuleDescriptorBuilder *> ModiList;
StringMap<uint32_t> SourceFileNames;
diff --git a/include/llvm/DebugInfo/PDB/Native/ModInfoBuilder.h b/include/llvm/DebugInfo/PDB/Native/ModInfoBuilder.h
deleted file mode 100644
index 605fd2483c3b..000000000000
--- a/include/llvm/DebugInfo/PDB/Native/ModInfoBuilder.h
+++ /dev/null
@@ -1,74 +0,0 @@
-//===- ModInfoBuilder.h - PDB module information ----------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_DEBUGINFO_PDB_RAW_MODINFOBUILDER_H
-#define LLVM_DEBUGINFO_PDB_RAW_MODINFOBUILDER_H
-
-#include "llvm/ADT/StringRef.h"
-#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
-#include "llvm/DebugInfo/PDB/Native/RawTypes.h"
-#include "llvm/Support/Error.h"
-#include <cstdint>
-#include <string>
-#include <vector>
-
-namespace llvm {
-class BinaryStreamWriter;
-
-namespace msf {
-class MSFBuilder;
-struct MSFLayout;
-}
-namespace pdb {
-
-class ModInfoBuilder {
- friend class DbiStreamBuilder;
-
-public:
- ModInfoBuilder(StringRef ModuleName, uint32_t ModIndex, msf::MSFBuilder &Msf);
-
- ModInfoBuilder(const ModInfoBuilder &) = delete;
- ModInfoBuilder &operator=(const ModInfoBuilder &) = delete;
-
- void setObjFileName(StringRef Name);
- void addSymbol(codeview::CVSymbol Symbol);
-
- uint16_t getStreamIndex() const;
- StringRef getModuleName() const { return ModuleName; }
- StringRef getObjFileName() const { return ObjFileName; }
-
- ArrayRef<std::string> source_files() const {
- return makeArrayRef(SourceFiles);
- }
-
- uint32_t calculateSerializedLength() const;
-
- void finalize();
- Error finalizeMsfLayout();
-
- Error commit(BinaryStreamWriter &ModiWriter, const msf::MSFLayout &MsfLayout,
- WritableBinaryStreamRef MsfBuffer);
-
-private:
- void addSourceFile(StringRef Path);
- msf::MSFBuilder &MSF;
-
- uint32_t SymbolByteSize = 0;
- std::string ModuleName;
- std::string ObjFileName;
- std::vector<std::string> SourceFiles;
- std::vector<codeview::CVSymbol> Symbols;
- ModuleInfoHeader Layout;
-};
-
-} // end namespace pdb
-
-} // end namespace llvm
-
-#endif // LLVM_DEBUGINFO_PDB_RAW_MODINFOBUILDER_H
diff --git a/include/llvm/DebugInfo/PDB/Native/ModStream.h b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
index b12d4ff375f3..2c95690ed580 100644
--- a/include/llvm/DebugInfo/PDB/Native/ModStream.h
+++ b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStream.h
@@ -1,4 +1,4 @@
-//===- ModStream.h - PDB Module Info Stream Access ------------------------===//
+//===- ModuleDebugStream.h - PDB Module Info Stream Access ----------------===//
//
// The LLVM Compiler Infrastructure
//
@@ -7,12 +7,12 @@
//
//===----------------------------------------------------------------------===//
-#ifndef LLVM_DEBUGINFO_PDB_RAW_MODSTREAM_H
-#define LLVM_DEBUGINFO_PDB_RAW_MODSTREAM_H
+#ifndef LLVM_DEBUGINFO_PDB_RAW_MODULEDEBUGSTREAM_H
+#define LLVM_DEBUGINFO_PDB_RAW_MODULEDEBUGSTREAM_H
#include "llvm/ADT/iterator_range.h"
#include "llvm/DebugInfo/CodeView/CVRecord.h"
-#include "llvm/DebugInfo/CodeView/ModuleSubstream.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugFragmentRecord.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/MSF/MappedBlockStream.h"
#include "llvm/Support/BinaryStreamArray.h"
@@ -22,13 +22,16 @@
namespace llvm {
namespace pdb {
class PDBFile;
-class ModInfo;
+class DbiModuleDescriptor;
+
+class ModuleDebugStreamRef {
+ typedef codeview::ModuleDebugFragmentArray::Iterator
+ LinesAndChecksumsIterator;
-class ModStream {
public:
- ModStream(const ModInfo &Module,
- std::unique_ptr<msf::MappedBlockStream> Stream);
- ~ModStream();
+ ModuleDebugStreamRef(const DbiModuleDescriptor &Module,
+ std::unique_ptr<msf::MappedBlockStream> Stream);
+ ~ModuleDebugStreamRef();
Error reload();
@@ -37,26 +40,25 @@ public:
iterator_range<codeview::CVSymbolArray::Iterator>
symbols(bool *HadError) const;
- iterator_range<codeview::ModuleSubstreamArray::Iterator>
- lines(bool *HadError) const;
+ llvm::iterator_range<LinesAndChecksumsIterator> linesAndChecksums() const;
bool hasLineInfo() const;
Error commit();
private:
- const ModInfo &Mod;
+ const DbiModuleDescriptor &Mod;
uint32_t Signature;
std::unique_ptr<msf::MappedBlockStream> Stream;
codeview::CVSymbolArray SymbolsSubstream;
- BinaryStreamRef LinesSubstream;
+ BinaryStreamRef C11LinesSubstream;
BinaryStreamRef C13LinesSubstream;
BinaryStreamRef GlobalRefsSubstream;
- codeview::ModuleSubstreamArray LineInfo;
+ codeview::ModuleDebugFragmentArray LinesAndChecksums;
};
}
}
diff --git a/include/llvm/DebugInfo/PDB/Native/ModuleDebugStreamBuilder.h b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStreamBuilder.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/include/llvm/DebugInfo/PDB/Native/ModuleDebugStreamBuilder.h
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
index 8eeaf3e0ea49..b1d980679a45 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeCompilandSymbol.h
@@ -10,7 +10,7 @@
#ifndef LLVM_DEBUGINFO_PDB_NATIVE_NATIVECOMPILANDSYMBOL_H
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVECOMPILANDSYMBOL_H
-#include "llvm/DebugInfo/PDB/Native/ModInfo.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/DebugInfo/PDB/Native/NativeRawSymbol.h"
namespace llvm {
diff --git a/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h b/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
index 60a55ee50cc4..18022f599bba 100644
--- a/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
+++ b/include/llvm/DebugInfo/PDB/Native/NativeEnumModules.h
@@ -11,7 +11,7 @@
#define LLVM_DEBUGINFO_PDB_NATIVE_NATIVEENUMMODULES_H
#include "llvm/DebugInfo/PDB/IPDBEnumChildren.h"
-#include "llvm/DebugInfo/PDB/Native/ModInfo.h"
+#include "llvm/DebugInfo/PDB/Native/DbiModuleDescriptor.h"
#include "llvm/DebugInfo/PDB/PDBSymbol.h"
namespace llvm {
namespace pdb {
diff --git a/include/llvm/DebugInfo/PDB/Native/RawTypes.h b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
index 1b2631efce70..e1c6cf0021d5 100644
--- a/include/llvm/DebugInfo/PDB/Native/RawTypes.h
+++ b/include/llvm/DebugInfo/PDB/Native/RawTypes.h
@@ -200,7 +200,7 @@ struct FileInfoSubstreamHeader {
};
struct ModInfoFlags {
- /// uint16_t fWritten : 1; // True if ModInfo is dirty
+ /// uint16_t fWritten : 1; // True if DbiModuleDescriptor is dirty
/// uint16_t fECEnabled : 1; // Is EC symbolic info present? (What is EC?)
/// uint16_t unused : 6; // Reserved
/// uint16_t iTSM : 8; // Type Server Index for this module
@@ -231,8 +231,8 @@ struct ModuleInfoHeader {
/// Size of local symbol debug info in above stream
support::ulittle32_t SymBytes;
- /// Size of line number debug info in above stream
- support::ulittle32_t LineBytes;
+ /// Size of C11 line number info in above stream
+ support::ulittle32_t C11Bytes;
/// Size of C13 line number info in above stream
support::ulittle32_t C13Bytes;
diff --git a/include/llvm/DebugInfo/PDB/Native/StringTableBuilder.h b/include/llvm/DebugInfo/PDB/Native/StringTableBuilder.h
index dd0f40b1978d..9c4b12e33ba0 100644
--- a/include/llvm/DebugInfo/PDB/Native/StringTableBuilder.h
+++ b/include/llvm/DebugInfo/PDB/Native/StringTableBuilder.h
@@ -29,6 +29,7 @@ public:
// If string S does not exist in the string table, insert it.
// Returns the ID for S.
uint32_t insert(StringRef S);
+ uint32_t getStringIndex(StringRef S);
uint32_t finalize();
Error commit(BinaryStreamWriter &Writer) const;
diff --git a/include/llvm/DebugInfo/Symbolize/Symbolize.h b/include/llvm/DebugInfo/Symbolize/Symbolize.h
index 9253adf7eedd..5103cc03a6bd 100644
--- a/include/llvm/DebugInfo/Symbolize/Symbolize.h
+++ b/include/llvm/DebugInfo/Symbolize/Symbolize.h
@@ -56,8 +56,9 @@ public:
Expected<DIGlobal> symbolizeData(const std::string &ModuleName,
uint64_t ModuleOffset);
void flush();
- static std::string DemangleName(const std::string &Name,
- const SymbolizableModule *ModInfo);
+ static std::string
+ DemangleName(const std::string &Name,
+ const SymbolizableModule *DbiModuleDescriptor);
private:
// Bundles together object file with code/data and object file with
diff --git a/include/llvm/IR/Argument.h b/include/llvm/IR/Argument.h
index 5c05f19abc1f..5ed6d030c984 100644
--- a/include/llvm/IR/Argument.h
+++ b/include/llvm/IR/Argument.h
@@ -115,8 +115,6 @@ public:
void addAttr(Attribute Attr);
/// Remove attributes from an argument.
- void removeAttr(AttributeList AS);
-
void removeAttr(Attribute::AttrKind Kind);
/// Check if an argument has a given attribute.
diff --git a/include/llvm/IR/Attributes.h b/include/llvm/IR/Attributes.h
index e2cd4c236fcc..af46034d5a9e 100644
--- a/include/llvm/IR/Attributes.h
+++ b/include/llvm/IR/Attributes.h
@@ -457,8 +457,11 @@ public:
/// \brief Return the attribute object that exists at the given index.
Attribute getAttribute(unsigned Index, StringRef Kind) const;
+ /// \brief Return the alignment of the return value.
+ unsigned getRetAlignment() const;
+
/// \brief Return the alignment for the specified function parameter.
- unsigned getParamAlignment(unsigned Index) const;
+ unsigned getParamAlignment(unsigned ArgNo) const;
/// \brief Get the stack alignment.
unsigned getStackAlignment(unsigned Index) const;
diff --git a/include/llvm/IR/Attributes.td b/include/llvm/IR/Attributes.td
index 7b63638a3f6a..75867a6e5833 100644
--- a/include/llvm/IR/Attributes.td
+++ b/include/llvm/IR/Attributes.td
@@ -137,6 +137,9 @@ def SExt : EnumAttr<"signext">;
/// +1 bias 0 means unaligned (different from alignstack=(1)).
def StackAlignment : EnumAttr<"alignstack">;
+/// Function can be speculated.
+def Speculatable : EnumAttr<"speculatable">;
+
/// Stack protection.
def StackProtect : EnumAttr<"ssp">;
diff --git a/include/llvm/IR/CallSite.h b/include/llvm/IR/CallSite.h
index 79f59557a5d6..bad1d4e383d5 100644
--- a/include/llvm/IR/CallSite.h
+++ b/include/llvm/IR/CallSite.h
@@ -386,20 +386,25 @@ public:
CALLSITE_DELEGATE_GETTER(dataOperandHasImpliedAttr(i, Kind));
}
+ /// Extract the alignment of the return value.
+ unsigned getRetAlignment() const {
+ CALLSITE_DELEGATE_GETTER(getRetAlignment());
+ }
+
/// Extract the alignment for a call or parameter (0=unknown).
- uint16_t getParamAlignment(uint16_t i) const {
- CALLSITE_DELEGATE_GETTER(getParamAlignment(i));
+ unsigned getParamAlignment(unsigned ArgNo) const {
+ CALLSITE_DELEGATE_GETTER(getParamAlignment(ArgNo));
}
/// Extract the number of dereferenceable bytes for a call or parameter
/// (0=unknown).
- uint64_t getDereferenceableBytes(uint16_t i) const {
+ uint64_t getDereferenceableBytes(unsigned i) const {
CALLSITE_DELEGATE_GETTER(getDereferenceableBytes(i));
}
/// Extract the number of dereferenceable_or_null bytes for a call or
/// parameter (0=unknown).
- uint64_t getDereferenceableOrNullBytes(uint16_t i) const {
+ uint64_t getDereferenceableOrNullBytes(unsigned i) const {
CALLSITE_DELEGATE_GETTER(getDereferenceableOrNullBytes(i));
}
@@ -599,7 +604,7 @@ public:
bool isReturnNonNull() const {
if (hasRetAttr(Attribute::NonNull))
return true;
- else if (getDereferenceableBytes(0) > 0 &&
+ else if (getDereferenceableBytes(AttributeList::ReturnIndex) > 0 &&
getType()->getPointerAddressSpace() == 0)
return true;
diff --git a/include/llvm/IR/CallingConv.h b/include/llvm/IR/CallingConv.h
index 9cfbda1f6857..604e99c8b52c 100644
--- a/include/llvm/IR/CallingConv.h
+++ b/include/llvm/IR/CallingConv.h
@@ -196,6 +196,10 @@ namespace CallingConv {
/// Register calling convention used for parameters transfer optimization
X86_RegCall = 92,
+ /// Calling convention used for Mesa hull shaders. (= tessellation control
+ /// shaders)
+ AMDGPU_HS = 93,
+
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
diff --git a/include/llvm/IR/DIBuilder.h b/include/llvm/IR/DIBuilder.h
index a4b2a02d5050..4afb5d9d63b2 100644
--- a/include/llvm/IR/DIBuilder.h
+++ b/include/llvm/IR/DIBuilder.h
@@ -577,15 +577,14 @@ namespace llvm {
/// These flags are used to emit dwarf attributes.
/// \param isOptimized True if optimization is ON.
/// \param TParams Function template parameters.
- DISubprogram *createFunction(DIScope *Scope, StringRef Name,
- StringRef LinkageName, DIFile *File,
- unsigned LineNo, DISubroutineType *Ty,
- bool isLocalToUnit, bool isDefinition,
- unsigned ScopeLine,
- DINode::DIFlags Flags = DINode::FlagZero,
- bool isOptimized = false,
- DITemplateParameterArray TParams = nullptr,
- DISubprogram *Decl = nullptr);
+ /// \param ThrownTypes Exception types this function may throw.
+ DISubprogram *createFunction(
+ DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
+ unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
+ bool isDefinition, unsigned ScopeLine,
+ DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
+ DITemplateParameterArray TParams = nullptr,
+ DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);
/// Identical to createFunction,
/// except that the resulting DbgNode is meant to be RAUWed.
@@ -595,7 +594,7 @@ namespace llvm {
bool isDefinition, unsigned ScopeLine,
DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
DITemplateParameterArray TParams = nullptr,
- DISubprogram *Decl = nullptr);
+ DISubprogram *Decl = nullptr, DITypeArray ThrownTypes = nullptr);
/// Create a new descriptor for the specified C++ method.
/// See comments in \a DISubprogram* for descriptions of these fields.
@@ -619,23 +618,23 @@ namespace llvm {
/// This flags are used to emit dwarf attributes.
/// \param isOptimized True if optimization is ON.
/// \param TParams Function template parameters.
+ /// \param ThrownTypes Exception types this function may throw.
DISubprogram *createMethod(
DIScope *Scope, StringRef Name, StringRef LinkageName, DIFile *File,
unsigned LineNo, DISubroutineType *Ty, bool isLocalToUnit,
bool isDefinition, unsigned Virtuality = 0, unsigned VTableIndex = 0,
int ThisAdjustment = 0, DIType *VTableHolder = nullptr,
DINode::DIFlags Flags = DINode::FlagZero, bool isOptimized = false,
- DITemplateParameterArray TParams = nullptr);
+ DITemplateParameterArray TParams = nullptr,
+ DITypeArray ThrownTypes = nullptr);
/// This creates new descriptor for a namespace with the specified
/// parent scope.
/// \param Scope Namespace scope
/// \param Name Name of this namespace
- /// \param File Source file
- /// \param LineNo Line number
/// \param ExportSymbols True for C++ inline namespaces.
- DINamespace *createNameSpace(DIScope *Scope, StringRef Name, DIFile *File,
- unsigned LineNo, bool ExportSymbols);
+ DINamespace *createNameSpace(DIScope *Scope, StringRef Name,
+ bool ExportSymbols);
/// This creates new descriptor for a module with the specified
/// parent scope.
diff --git a/include/llvm/IR/DebugInfoMetadata.h b/include/llvm/IR/DebugInfoMetadata.h
index 8041e35e0e0a..0331d5229e7f 100644
--- a/include/llvm/IR/DebugInfoMetadata.h
+++ b/include/llvm/IR/DebugInfoMetadata.h
@@ -56,6 +56,8 @@
namespace llvm {
+class DIBuilder;
+
template <typename T> class Optional;
/// Holds a subclass of DINode.
@@ -433,7 +435,7 @@ public:
/// Return the raw underlying file.
///
- /// An \a DIFile is an \a DIScope, but it doesn't point at a separate file
+ /// A \a DIFile is a \a DIScope, but it doesn't point at a separate file
/// (it\em is the file). If \c this is an \a DIFile, we need to return \c
/// this. Otherwise, return the first operand, which is where all other
/// subclasses store their file pointer.
@@ -1509,14 +1511,14 @@ class DISubprogram : public DILocalScope {
unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
bool IsOptimized, DICompileUnit *Unit,
DITemplateParameterArray TemplateParams, DISubprogram *Declaration,
- DILocalVariableArray Variables, StorageType Storage,
- bool ShouldCreate = true) {
+ DILocalVariableArray Variables, DITypeArray ThrownTypes,
+ StorageType Storage, bool ShouldCreate = true) {
return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
getCanonicalMDString(Context, LinkageName), File, Line, Type,
IsLocalToUnit, IsDefinition, ScopeLine, ContainingType,
Virtuality, VirtualIndex, ThisAdjustment, Flags, IsOptimized,
Unit, TemplateParams.get(), Declaration, Variables.get(),
- Storage, ShouldCreate);
+ ThrownTypes.get(), Storage, ShouldCreate);
}
static DISubprogram *
getImpl(LLVMContext &Context, Metadata *Scope, MDString *Name,
@@ -1525,15 +1527,16 @@ class DISubprogram : public DILocalScope {
Metadata *ContainingType, unsigned Virtuality, unsigned VirtualIndex,
int ThisAdjustment, DIFlags Flags, bool IsOptimized, Metadata *Unit,
Metadata *TemplateParams, Metadata *Declaration, Metadata *Variables,
- StorageType Storage, bool ShouldCreate = true);
+ Metadata *ThrownTypes, StorageType Storage, bool ShouldCreate = true);
TempDISubprogram cloneImpl() const {
- return getTemporary(
- getContext(), getScope(), getName(), getLinkageName(), getFile(),
- getLine(), getType(), isLocalToUnit(), isDefinition(), getScopeLine(),
- getContainingType(), getVirtuality(), getVirtualIndex(),
- getThisAdjustment(), getFlags(), isOptimized(), getUnit(),
- getTemplateParams(), getDeclaration(), getVariables());
+ return getTemporary(getContext(), getScope(), getName(), getLinkageName(),
+ getFile(), getLine(), getType(), isLocalToUnit(),
+ isDefinition(), getScopeLine(), getContainingType(),
+ getVirtuality(), getVirtualIndex(), getThisAdjustment(),
+ getFlags(), isOptimized(), getUnit(),
+ getTemplateParams(), getDeclaration(), getVariables(),
+ getThrownTypes());
}
public:
@@ -1546,11 +1549,12 @@ public:
bool IsOptimized, DICompileUnit *Unit,
DITemplateParameterArray TemplateParams = nullptr,
DISubprogram *Declaration = nullptr,
- DILocalVariableArray Variables = nullptr),
+ DILocalVariableArray Variables = nullptr,
+ DITypeArray ThrownTypes = nullptr),
(Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit,
IsDefinition, ScopeLine, ContainingType, Virtuality,
VirtualIndex, ThisAdjustment, Flags, IsOptimized, Unit,
- TemplateParams, Declaration, Variables))
+ TemplateParams, Declaration, Variables, ThrownTypes))
DEFINE_MDNODE_GET(
DISubprogram,
(Metadata * Scope, MDString *Name, MDString *LinkageName, Metadata *File,
@@ -1558,10 +1562,12 @@ public:
unsigned ScopeLine, Metadata *ContainingType, unsigned Virtuality,
unsigned VirtualIndex, int ThisAdjustment, DIFlags Flags,
bool IsOptimized, Metadata *Unit, Metadata *TemplateParams = nullptr,
- Metadata *Declaration = nullptr, Metadata *Variables = nullptr),
+ Metadata *Declaration = nullptr, Metadata *Variables = nullptr,
+ Metadata *ThrownTypes = nullptr),
(Scope, Name, LinkageName, File, Line, Type, IsLocalToUnit, IsDefinition,
ScopeLine, ContainingType, Virtuality, VirtualIndex, ThisAdjustment,
- Flags, IsOptimized, Unit, TemplateParams, Declaration, Variables))
+ Flags, IsOptimized, Unit, TemplateParams, Declaration, Variables,
+ ThrownTypes))
TempDISubprogram clone() const { return cloneImpl(); }
@@ -1610,11 +1616,7 @@ public:
DIScopeRef getScope() const { return DIScopeRef(getRawScope()); }
StringRef getName() const { return getStringOperand(2); }
- StringRef getDisplayName() const { return getStringOperand(3); }
- StringRef getLinkageName() const { return getStringOperand(4); }
-
- MDString *getRawName() const { return getOperandAs<MDString>(2); }
- MDString *getRawLinkageName() const { return getOperandAs<MDString>(4); }
+ StringRef getLinkageName() const { return getStringOperand(3); }
DISubroutineType *getType() const {
return cast_or_null<DISubroutineType>(getRawType());
@@ -1626,9 +1628,7 @@ public:
DICompileUnit *getUnit() const {
return cast_or_null<DICompileUnit>(getRawUnit());
}
- void replaceUnit(DICompileUnit *CU) {
- replaceOperandWith(7, CU);
- }
+ void replaceUnit(DICompileUnit *CU) { replaceOperandWith(5, CU); }
DITemplateParameterArray getTemplateParams() const {
return cast_or_null<MDTuple>(getRawTemplateParams());
}
@@ -1638,14 +1638,26 @@ public:
DILocalVariableArray getVariables() const {
return cast_or_null<MDTuple>(getRawVariables());
}
+ DITypeArray getThrownTypes() const {
+ return cast_or_null<MDTuple>(getRawThrownTypes());
+ }
Metadata *getRawScope() const { return getOperand(1); }
- Metadata *getRawType() const { return getOperand(5); }
- Metadata *getRawContainingType() const { return getOperand(6); }
- Metadata *getRawUnit() const { return getOperand(7); }
- Metadata *getRawTemplateParams() const { return getOperand(8); }
- Metadata *getRawDeclaration() const { return getOperand(9); }
- Metadata *getRawVariables() const { return getOperand(10); }
+ MDString *getRawName() const { return getOperandAs<MDString>(2); }
+ MDString *getRawLinkageName() const { return getOperandAs<MDString>(3); }
+ Metadata *getRawType() const { return getOperand(4); }
+ Metadata *getRawUnit() const { return getOperand(5); }
+ Metadata *getRawDeclaration() const { return getOperand(6); }
+ Metadata *getRawVariables() const { return getOperand(7); }
+ Metadata *getRawContainingType() const {
+ return getNumOperands() > 8 ? getOperandAs<Metadata>(8) : nullptr;
+ }
+ Metadata *getRawTemplateParams() const {
+ return getNumOperands() > 9 ? getOperandAs<Metadata>(9) : nullptr;
+ }
+ Metadata *getRawThrownTypes() const {
+ return getNumOperands() > 10 ? getOperandAs<Metadata>(10) : nullptr;
+ }
/// Check if this subprogram describes the given function.
///
@@ -1841,45 +1853,40 @@ class DINamespace : public DIScope {
friend class LLVMContextImpl;
friend class MDNode;
- unsigned Line;
unsigned ExportSymbols : 1;
- DINamespace(LLVMContext &Context, StorageType Storage, unsigned Line,
- bool ExportSymbols, ArrayRef<Metadata *> Ops)
+ DINamespace(LLVMContext &Context, StorageType Storage, bool ExportSymbols,
+ ArrayRef<Metadata *> Ops)
: DIScope(Context, DINamespaceKind, Storage, dwarf::DW_TAG_namespace,
Ops),
- Line(Line), ExportSymbols(ExportSymbols) {}
+ ExportSymbols(ExportSymbols) {}
~DINamespace() = default;
static DINamespace *getImpl(LLVMContext &Context, DIScope *Scope,
- DIFile *File, StringRef Name, unsigned Line,
- bool ExportSymbols, StorageType Storage,
- bool ShouldCreate = true) {
- return getImpl(Context, Scope, File, getCanonicalMDString(Context, Name),
- Line, ExportSymbols, Storage, ShouldCreate);
+ StringRef Name, bool ExportSymbols,
+ StorageType Storage, bool ShouldCreate = true) {
+ return getImpl(Context, Scope, getCanonicalMDString(Context, Name),
+ ExportSymbols, Storage, ShouldCreate);
}
static DINamespace *getImpl(LLVMContext &Context, Metadata *Scope,
- Metadata *File, MDString *Name, unsigned Line,
- bool ExportSymbols, StorageType Storage,
- bool ShouldCreate = true);
+ MDString *Name, bool ExportSymbols,
+ StorageType Storage, bool ShouldCreate = true);
TempDINamespace cloneImpl() const {
- return getTemporary(getContext(), getScope(), getFile(), getName(),
- getLine(), getExportSymbols());
+ return getTemporary(getContext(), getScope(), getName(),
+ getExportSymbols());
}
public:
- DEFINE_MDNODE_GET(DINamespace, (DIScope * Scope, DIFile *File, StringRef Name,
- unsigned Line, bool ExportSymbols),
- (Scope, File, Name, Line, ExportSymbols))
DEFINE_MDNODE_GET(DINamespace,
- (Metadata * Scope, Metadata *File, MDString *Name,
- unsigned Line, bool ExportSymbols),
- (Scope, File, Name, Line, ExportSymbols))
+ (DIScope *Scope, StringRef Name, bool ExportSymbols),
+ (Scope, Name, ExportSymbols))
+ DEFINE_MDNODE_GET(DINamespace,
+ (Metadata *Scope, MDString *Name, bool ExportSymbols),
+ (Scope, Name, ExportSymbols))
TempDINamespace clone() const { return cloneImpl(); }
- unsigned getLine() const { return Line; }
bool getExportSymbols() const { return ExportSymbols; }
DIScope *getScope() const { return cast_or_null<DIScope>(getRawScope()); }
StringRef getName() const { return getStringOperand(2); }
@@ -2265,6 +2272,17 @@ public:
/// Return whether this is a piece of an aggregate variable.
bool isFragment() const { return getFragmentInfo().hasValue(); }
+
+ /// Append \p Ops with operations to apply the \p Offset.
+ static void appendOffset(SmallVectorImpl<uint64_t> &Ops, int64_t Offset);
+
+ /// Constants for DIExpression::prepend.
+ enum { NoDeref = false, WithDeref = true, WithStackValue = true };
+
+ /// Prepend \p DIExpr with a deref and offset operation and optionally turn it
+ /// into a stack value.
+ static DIExpression *prepend(const DIExpression *DIExpr, bool Deref,
+ int64_t Offset = 0, bool StackValue = false);
};
/// Global variables.
diff --git a/include/llvm/IR/Function.h b/include/llvm/IR/Function.h
index a3762a44ccb6..9e723f977755 100644
--- a/include/llvm/IR/Function.h
+++ b/include/llvm/IR/Function.h
@@ -316,18 +316,20 @@ public:
void addDereferenceableOrNullAttr(unsigned i, uint64_t Bytes);
/// @brief Extract the alignment for a call or parameter (0=unknown).
- unsigned getParamAlignment(unsigned i) const {
- return AttributeSets.getParamAlignment(i);
+ unsigned getParamAlignment(unsigned ArgNo) const {
+ return AttributeSets.getParamAlignment(ArgNo);
}
/// @brief Extract the number of dereferenceable bytes for a call or
/// parameter (0=unknown).
+ /// @param i AttributeList index, referring to a return value or argument.
uint64_t getDereferenceableBytes(unsigned i) const {
return AttributeSets.getDereferenceableBytes(i);
}
/// @brief Extract the number of dereferenceable_or_null bytes for a call or
/// parameter (0=unknown).
+ /// @param i AttributeList index, referring to a return value or argument.
uint64_t getDereferenceableOrNullBytes(unsigned i) const {
return AttributeSets.getDereferenceableOrNullBytes(i);
}
@@ -416,6 +418,14 @@ public:
removeFnAttr(Attribute::Convergent);
}
+ /// @brief Determine if the call has sideeffects.
+ bool isSpeculatable() const {
+ return hasFnAttribute(Attribute::Speculatable);
+ }
+ void setSpeculatable() {
+ addFnAttr(Attribute::Speculatable);
+ }
+
/// Determine if the function is known not to recurse, directly or
/// indirectly.
bool doesNotRecurse() const {
@@ -440,10 +450,10 @@ public:
}
/// @brief Determine if the function returns a structure through first
- /// pointer argument.
+ /// or second pointer argument.
bool hasStructRetAttr() const {
- return AttributeSets.hasAttribute(1, Attribute::StructRet) ||
- AttributeSets.hasAttribute(2, Attribute::StructRet);
+ return AttributeSets.hasParamAttribute(0, Attribute::StructRet) ||
+ AttributeSets.hasParamAttribute(1, Attribute::StructRet);
}
/// @brief Determine if the parameter or return value is marked with NoAlias
diff --git a/include/llvm/IR/InstrTypes.h b/include/llvm/IR/InstrTypes.h
index 518094735d72..6795b029cce9 100644
--- a/include/llvm/IR/InstrTypes.h
+++ b/include/llvm/IR/InstrTypes.h
@@ -1059,18 +1059,6 @@ public:
return isFalseWhenEqual(getPredicate());
}
- /// @brief Determine if Pred1 implies Pred2 is true when two compares have
- /// matching operands.
- bool isImpliedTrueByMatchingCmp(Predicate Pred2) const {
- return isImpliedTrueByMatchingCmp(getPredicate(), Pred2);
- }
-
- /// @brief Determine if Pred1 implies Pred2 is false when two compares have
- /// matching operands.
- bool isImpliedFalseByMatchingCmp(Predicate Pred2) const {
- return isImpliedFalseByMatchingCmp(getPredicate(), Pred2);
- }
-
/// @returns true if the predicate is unsigned, false otherwise.
/// @brief Determine if the predicate is an unsigned operation.
static bool isUnsigned(Predicate predicate);
diff --git a/include/llvm/IR/Instructions.h b/include/llvm/IR/Instructions.h
index d23c1ddf9257..4d3f1dc267f2 100644
--- a/include/llvm/IR/Instructions.h
+++ b/include/llvm/IR/Instructions.h
@@ -1714,9 +1714,12 @@ public:
/// (\p i - 1) in the operand list.
bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
+ /// Extract the alignment of the return value.
+ unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
+
/// Extract the alignment for a call or parameter (0=unknown).
- unsigned getParamAlignment(unsigned i) const {
- return Attrs.getParamAlignment(i);
+ unsigned getParamAlignment(unsigned ArgNo) const {
+ return Attrs.getParamAlignment(ArgNo);
}
/// Extract the number of dereferenceable bytes for a call or
@@ -3804,9 +3807,12 @@ public:
/// (\p i - 1) in the operand list.
bool dataOperandHasImpliedAttr(unsigned i, Attribute::AttrKind Kind) const;
+ /// Extract the alignment of the return value.
+ unsigned getRetAlignment() const { return Attrs.getRetAlignment(); }
+
/// Extract the alignment for a call or parameter (0=unknown).
- unsigned getParamAlignment(unsigned i) const {
- return Attrs.getParamAlignment(i);
+ unsigned getParamAlignment(unsigned ArgNo) const {
+ return Attrs.getParamAlignment(ArgNo);
}
/// Extract the number of dereferenceable bytes for a call or
diff --git a/include/llvm/IR/IntrinsicInst.h b/include/llvm/IR/IntrinsicInst.h
index f69b5bfc0be2..05e3315cbab2 100644
--- a/include/llvm/IR/IntrinsicInst.h
+++ b/include/llvm/IR/IntrinsicInst.h
@@ -201,8 +201,8 @@ namespace llvm {
Value *getNumElements() const { return getArgOperand(2); }
void setNumElements(Value *V) { setArgOperand(2, V); }
- uint64_t getSrcAlignment() const { return getParamAlignment(1); }
- uint64_t getDstAlignment() const { return getParamAlignment(2); }
+ uint64_t getSrcAlignment() const { return getParamAlignment(0); }
+ uint64_t getDstAlignment() const { return getParamAlignment(1); }
uint64_t getElementSizeInBytes() const {
Value *Arg = getArgOperand(3);
diff --git a/include/llvm/IR/Intrinsics.td b/include/llvm/IR/Intrinsics.td
index 309b21489224..39b992cd06a8 100644
--- a/include/llvm/IR/Intrinsics.td
+++ b/include/llvm/IR/Intrinsics.td
@@ -98,6 +98,18 @@ def IntrNoDuplicate : IntrinsicProperty;
// Parallels the convergent attribute on LLVM IR functions.
def IntrConvergent : IntrinsicProperty;
+// This property indicates that the intrinsic is safe to speculate.
+def IntrSpeculatable : IntrinsicProperty;
+
+// This property can be used to override the 'has no other side effects'
+// language of the IntrNoMem, IntrReadMem, IntrWriteMem, and IntrArgMemOnly
+// intrinsic properties. By default, intrinsics are assumed to have side
+// effects, so this property is only necessary if you have defined one of
+// the memory properties listed above.
+// For this property, 'side effects' has the same meaning as 'side effects'
+// defined by the hasSideEffects property of the TableGen Instruction class.
+def IntrHasSideEffects : IntrinsicProperty;
+
//===----------------------------------------------------------------------===//
// Types used by intrinsics.
//===----------------------------------------------------------------------===//
diff --git a/include/llvm/IR/IntrinsicsAMDGPU.td b/include/llvm/IR/IntrinsicsAMDGPU.td
index 21d8a15e7e7a..d7413fe9e56f 100644
--- a/include/llvm/IR/IntrinsicsAMDGPU.td
+++ b/include/llvm/IR/IntrinsicsAMDGPU.td
@@ -12,10 +12,10 @@
//===----------------------------------------------------------------------===//
class AMDGPUReadPreloadRegisterIntrinsic
- : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
- : Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>, GCCBuiltin<name>;
+ : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin<name>;
let TargetPrefix = "r600" in {
@@ -47,7 +47,8 @@ def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
// AS 7 is PARAM_I_ADDRESS, used for kernel arguments
def int_r600_implicitarg_ptr :
GCCBuiltin<"__builtin_r600_implicitarg_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [], [IntrNoMem]>;
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
+ [IntrNoMem, IntrSpeculatable]>;
def int_r600_rat_store_typed :
// 1st parameter: Data
@@ -57,15 +58,15 @@ def int_r600_rat_store_typed :
GCCBuiltin<"__builtin_r600_rat_store_typed">;
def int_r600_recipsqrt_ieee : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_r600_recipsqrt_clamped : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_r600_cube : Intrinsic<
- [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem]
+ [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
>;
} // End TargetPrefix = "r600"
@@ -82,31 +83,51 @@ defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
def int_amdgcn_dispatch_ptr :
GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
+ [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_queue_ptr :
GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
+ [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_kernarg_segment_ptr :
GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
+ [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_implicitarg_ptr :
GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
+ [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_groupstaticsize :
GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
- Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_dispatch_id :
GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
- Intrinsic<[llvm_i64_ty], [], [IntrNoMem]>;
+ Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_implicit_buffer_ptr :
GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
- Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [], [IntrNoMem]>;
+ Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 2>], [],
+ [IntrNoMem, IntrSpeculatable]>;
+
+// Set EXEC to the 64-bit value given.
+// This is always moved to the beginning of the basic block.
+def int_amdgcn_init_exec : Intrinsic<[],
+ [llvm_i64_ty], // 64-bit literal constant
+ [IntrConvergent]>;
+
+// Set EXEC according to a thread count packed in an SGPR input:
+// thread_count = (input >> bitoffset) & 0x7f;
+// This is always moved to the beginning of the basic block.
+def int_amdgcn_init_exec_from_input : Intrinsic<[],
+ [llvm_i32_ty, // 32-bit SGPR input
+ llvm_i32_ty], // bit offset of the thread count
+ [IntrConvergent]>;
+
//===----------------------------------------------------------------------===//
// Instruction Intrinsics
@@ -135,115 +156,129 @@ def int_amdgcn_div_scale : Intrinsic<
// second. (0 = first, 1 = second).
[llvm_anyfloat_ty, llvm_i1_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
- [IntrNoMem]
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
- [IntrNoMem]
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
- [IntrNoMem]
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_trig_preop : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_sin : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cos : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_log_clamp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
- Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_rcp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
- Intrinsic<[llvm_float_ty], [llvm_float_ty], [IntrNoMem]
+ Intrinsic<[llvm_float_ty], [llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_rsq : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_rsq_legacy : GCCBuiltin<"__builtin_amdgcn_rsq_legacy">,
Intrinsic<
- [llvm_float_ty], [llvm_float_ty], [IntrNoMem]
+ [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_rsq_clamp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
def int_amdgcn_ldexp : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_frexp_mant : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_frexp_exp : Intrinsic<
- [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem]
+ [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
>;
// v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
// and always uses rtz, so is not suitable for implementing the OpenCL
// fract function. It should be ok on VI.
def int_amdgcn_fract : Intrinsic<
- [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]
+ [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cvt_pkrtz : Intrinsic<
- [llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ [llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_class : Intrinsic<
- [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty], [IntrNoMem]
+ [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">,
Intrinsic<[llvm_anyfloat_ty],
- [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]
+ [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
Intrinsic<[llvm_float_ty],
- [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
Intrinsic<[llvm_float_ty],
- [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
Intrinsic<[llvm_float_ty],
- [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
Intrinsic<[llvm_float_ty],
- [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ [llvm_float_ty, llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
// v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
// should be used.
def int_amdgcn_sffbh :
- Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>], [IntrNoMem]>;
+ Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
+ [IntrNoMem, IntrSpeculatable]
+>;
// Fields should mirror atomicrmw
@@ -527,7 +562,9 @@ def int_amdgcn_s_decperflevel :
def int_amdgcn_s_getreg :
GCCBuiltin<"__builtin_amdgcn_s_getreg">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrReadMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
+ [IntrReadMem, IntrSpeculatable]
+>;
// __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
// param values: 0 = P10, 1 = P20, 2 = P0
@@ -535,23 +572,24 @@ def int_amdgcn_interp_mov :
GCCBuiltin<"__builtin_amdgcn_interp_mov">,
Intrinsic<[llvm_float_ty],
[llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>;
+ [IntrNoMem, IntrSpeculatable]>;
// __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
+// This intrinsic reads from lds, but the memory values are constant,
+// so it behaves like IntrNoMem.
def int_amdgcn_interp_p1 :
GCCBuiltin<"__builtin_amdgcn_interp_p1">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>; // This intrinsic reads from lds, but the memory
- // values are constant, so it behaves like IntrNoMem.
+ [IntrNoMem, IntrSpeculatable]>;
// __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
def int_amdgcn_interp_p2 :
GCCBuiltin<"__builtin_amdgcn_interp_p2">,
Intrinsic<[llvm_float_ty],
[llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
- [IntrNoMem]>; // See int_amdgcn_v_interp_p1 for why this is
- // IntrNoMem.
+ [IntrNoMem, IntrSpeculatable]>;
+ // See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
// Pixel shaders only: whether the current pixel is live (i.e. not a helper
// invocation for derivative computation).
@@ -574,48 +612,68 @@ def int_amdgcn_ds_swizzle :
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty],
- [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]
+ [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
def int_amdgcn_lerp :
GCCBuiltin<"__builtin_amdgcn_lerp">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_sad_u8 :
GCCBuiltin<"__builtin_amdgcn_sad_u8">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_msad_u8 :
GCCBuiltin<"__builtin_amdgcn_msad_u8">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_sad_hi_u8 :
GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_sad_u16 :
GCCBuiltin<"__builtin_amdgcn_sad_u16">,
- Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_qsad_pk_u16_u8 :
GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
- Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_mqsad_pk_u16_u8 :
GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
- Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_mqsad_u32_u8 :
GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
- Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_cvt_pk_u8_f32 :
GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
- Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
+ Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
+ [IntrNoMem, IntrSpeculatable]
+>;
def int_amdgcn_icmp :
Intrinsic<[llvm_i64_ty], [llvm_anyint_ty, LLVMMatchType<0>, llvm_i32_ty],
@@ -716,6 +774,7 @@ def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
// Emit 2.5 ulp, no denormal division. Should only be inserted by
// pass based on !fpmath metadata.
def int_amdgcn_fdiv_fast : Intrinsic<
- [llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]
+ [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
+ [IntrNoMem, IntrSpeculatable]
>;
}
diff --git a/include/llvm/IR/ModuleSummaryIndex.h b/include/llvm/IR/ModuleSummaryIndex.h
index 9c0a4159cad2..a7274fbfbced 100644
--- a/include/llvm/IR/ModuleSummaryIndex.h
+++ b/include/llvm/IR/ModuleSummaryIndex.h
@@ -644,13 +644,6 @@ public:
return It->second.second;
}
- /// Add the given per-module index into this module index/summary,
- /// assigning it the given module ID. Each module merged in should have
- /// a unique ID, necessary for consistent renaming of promoted
- /// static (local) variables.
- void mergeFrom(std::unique_ptr<ModuleSummaryIndex> Other,
- uint64_t NextModuleId);
-
/// Convenience method for creating a promoted global name
/// for the given value name of a local, and its original module's ID.
static std::string getGlobalNameForLocal(StringRef Name, ModuleHash ModHash) {
@@ -703,13 +696,6 @@ public:
return &I->second;
}
- /// Remove entries in the GlobalValueMap that have empty summaries due to the
- /// eager nature of map entry creation during VST parsing. These would
- /// also be suppressed during combined index generation in mergeFrom(),
- /// but if there was only one module or this was the first module we might
- /// not invoke mergeFrom.
- void removeEmptySummaryEntries();
-
/// Collect for the given module the list of function it defines
/// (GUID -> Summary).
void collectDefinedFunctionsForModule(StringRef ModulePath,
diff --git a/include/llvm/IR/ValueHandle.h b/include/llvm/IR/ValueHandle.h
index 4838bac9e0f7..393618d5511b 100644
--- a/include/llvm/IR/ValueHandle.h
+++ b/include/llvm/IR/ValueHandle.h
@@ -34,19 +34,14 @@ protected:
///
/// This is to avoid having a vtable for the light-weight handle pointers. The
/// fully general Callback version does have a vtable.
- enum HandleBaseKind {
- Assert,
- Callback,
- Tracking,
- Weak
- };
+ enum HandleBaseKind { Assert, Callback, Weak, WeakTracking };
ValueHandleBase(const ValueHandleBase &RHS)
: ValueHandleBase(RHS.PrevPair.getInt(), RHS) {}
ValueHandleBase(HandleBaseKind Kind, const ValueHandleBase &RHS)
- : PrevPair(nullptr, Kind), Next(nullptr), V(RHS.V) {
- if (isValid(V))
+ : PrevPair(nullptr, Kind), Next(nullptr), Val(RHS.getValPtr()) {
+ if (isValid(getValPtr()))
AddToExistingUseList(RHS.getPrevPtr());
}
@@ -54,43 +49,51 @@ private:
PointerIntPair<ValueHandleBase**, 2, HandleBaseKind> PrevPair;
ValueHandleBase *Next;
- Value* V;
+ Value *Val;
+
+ void setValPtr(Value *V) { Val = V; }
public:
explicit ValueHandleBase(HandleBaseKind Kind)
- : PrevPair(nullptr, Kind), Next(nullptr), V(nullptr) {}
+ : PrevPair(nullptr, Kind), Next(nullptr), Val(nullptr) {}
ValueHandleBase(HandleBaseKind Kind, Value *V)
- : PrevPair(nullptr, Kind), Next(nullptr), V(V) {
- if (isValid(V))
+ : PrevPair(nullptr, Kind), Next(nullptr), Val(V) {
+ if (isValid(getValPtr()))
AddToUseList();
}
~ValueHandleBase() {
- if (isValid(V))
+ if (isValid(getValPtr()))
RemoveFromUseList();
}
Value *operator=(Value *RHS) {
- if (V == RHS) return RHS;
- if (isValid(V)) RemoveFromUseList();
- V = RHS;
- if (isValid(V)) AddToUseList();
+ if (getValPtr() == RHS)
+ return RHS;
+ if (isValid(getValPtr()))
+ RemoveFromUseList();
+ setValPtr(RHS);
+ if (isValid(getValPtr()))
+ AddToUseList();
return RHS;
}
Value *operator=(const ValueHandleBase &RHS) {
- if (V == RHS.V) return RHS.V;
- if (isValid(V)) RemoveFromUseList();
- V = RHS.V;
- if (isValid(V)) AddToExistingUseList(RHS.getPrevPtr());
- return V;
+ if (getValPtr() == RHS.getValPtr())
+ return RHS.getValPtr();
+ if (isValid(getValPtr()))
+ RemoveFromUseList();
+ setValPtr(RHS.getValPtr());
+ if (isValid(getValPtr()))
+ AddToExistingUseList(RHS.getPrevPtr());
+ return getValPtr();
}
- Value *operator->() const { return V; }
- Value &operator*() const { return *V; }
+ Value *operator->() const { return getValPtr(); }
+ Value &operator*() const { return *getValPtr(); }
protected:
- Value *getValPtr() const { return V; }
+ Value *getValPtr() const { return Val; }
static bool isValid(Value *V) {
return V &&
@@ -105,7 +108,7 @@ protected:
///
/// This should only be used if a derived class has manually removed the
/// handle from the use list.
- void clearValPtr() { V = nullptr; }
+ void clearValPtr() { setValPtr(nullptr); }
public:
// Callbacks made from Value.
@@ -131,19 +134,16 @@ private:
void AddToUseList();
};
-/// \brief Value handle that is nullable, but tries to track the Value.
+/// \brief A nullable Value handle that is nullable.
///
-/// This is a value handle that tries hard to point to a Value, even across
-/// RAUW operations, but will null itself out if the value is destroyed. this
-/// is useful for advisory sorts of information, but should not be used as the
-/// key of a map (since the map would have to rearrange itself when the pointer
-/// changes).
+/// This is a value handle that points to a value, and nulls itself
+/// out if that value is deleted.
class WeakVH : public ValueHandleBase {
public:
WeakVH() : ValueHandleBase(Weak) {}
WeakVH(Value *P) : ValueHandleBase(Weak, P) {}
WeakVH(const WeakVH &RHS)
- : ValueHandleBase(Weak, RHS) {}
+ : ValueHandleBase(Weak, RHS) {}
WeakVH &operator=(const WeakVH &RHS) = default;
@@ -170,6 +170,51 @@ template <> struct simplify_type<const WeakVH> {
static SimpleType getSimplifiedValue(const WeakVH &WVH) { return WVH; }
};
+/// \brief Value handle that is nullable, but tries to track the Value.
+///
+/// This is a value handle that tries hard to point to a Value, even across
+/// RAUW operations, but will null itself out if the value is destroyed. this
+/// is useful for advisory sorts of information, but should not be used as the
+/// key of a map (since the map would have to rearrange itself when the pointer
+/// changes).
+class WeakTrackingVH : public ValueHandleBase {
+public:
+ WeakTrackingVH() : ValueHandleBase(WeakTracking) {}
+ WeakTrackingVH(Value *P) : ValueHandleBase(WeakTracking, P) {}
+ WeakTrackingVH(const WeakTrackingVH &RHS)
+ : ValueHandleBase(WeakTracking, RHS) {}
+
+ WeakTrackingVH &operator=(const WeakTrackingVH &RHS) = default;
+
+ Value *operator=(Value *RHS) {
+ return ValueHandleBase::operator=(RHS);
+ }
+ Value *operator=(const ValueHandleBase &RHS) {
+ return ValueHandleBase::operator=(RHS);
+ }
+
+ operator Value*() const {
+ return getValPtr();
+ }
+
+ bool pointsToAliveValue() const {
+ return ValueHandleBase::isValid(getValPtr());
+ }
+};
+
+// Specialize simplify_type to allow WeakTrackingVH to participate in
+// dyn_cast, isa, etc.
+template <> struct simplify_type<WeakTrackingVH> {
+ typedef Value *SimpleType;
+ static SimpleType getSimplifiedValue(WeakTrackingVH &WVH) { return WVH; }
+};
+template <> struct simplify_type<const WeakTrackingVH> {
+ typedef Value *SimpleType;
+ static SimpleType getSimplifiedValue(const WeakTrackingVH &WVH) {
+ return WVH;
+ }
+};
+
/// \brief Value handle that asserts if the Value is deleted.
///
/// This is a Value Handle that points to a value and asserts out if the value
@@ -272,39 +317,37 @@ struct isPodLike<AssertingVH<T> > {
/// to a Value (or subclass) across some operations which may move that value,
/// but should never destroy it or replace it with some unacceptable type.
///
-/// It is an error to do anything with a TrackingVH whose value has been
-/// destroyed, except to destruct it.
-///
/// It is an error to attempt to replace a value with one of a type which is
/// incompatible with any of its outstanding TrackingVHs.
-template<typename ValueTy>
-class TrackingVH : public ValueHandleBase {
- void CheckValidity() const {
- Value *VP = ValueHandleBase::getValPtr();
-
- // Null is always ok.
- if (!VP) return;
+///
+/// It is an error to read from a TrackingVH that does not point to a valid
+/// value. A TrackingVH is said to not point to a valid value if either it
+/// hasn't yet been assigned a value yet or because the value it was tracking
+/// has since been deleted.
+///
+/// Assigning a value to a TrackingVH is always allowed, even if said TrackingVH
+/// no longer points to a valid value.
+template <typename ValueTy> class TrackingVH {
+ WeakTrackingVH InnerHandle;
- // Check that this value is valid (i.e., it hasn't been deleted). We
- // explicitly delay this check until access to avoid requiring clients to be
- // unnecessarily careful w.r.t. destruction.
- assert(ValueHandleBase::isValid(VP) && "Tracked Value was deleted!");
+public:
+ ValueTy *getValPtr() const {
+ assert(InnerHandle.pointsToAliveValue() &&
+ "TrackingVH must be non-null and valid on dereference!");
// Check that the value is a member of the correct subclass. We would like
// to check this property on assignment for better debugging, but we don't
// want to require a virtual interface on this VH. Instead we allow RAUW to
// replace this value with a value of an invalid type, and check it here.
- assert(isa<ValueTy>(VP) &&
+ assert(isa<ValueTy>(InnerHandle) &&
"Tracked Value was replaced by one with an invalid type!");
+ return cast<ValueTy>(InnerHandle);
}
- ValueTy *getValPtr() const {
- CheckValidity();
- return (ValueTy*)ValueHandleBase::getValPtr();
- }
void setValPtr(ValueTy *P) {
- CheckValidity();
- ValueHandleBase::operator=(GetAsValue(P));
+ // Assigning to non-valid TrackingVH's are fine so we just unconditionally
+ // assign here.
+ InnerHandle = GetAsValue(P);
}
// Convert a ValueTy*, which may be const, to the type the base
@@ -313,8 +356,8 @@ class TrackingVH : public ValueHandleBase {
static Value *GetAsValue(const Value *V) { return const_cast<Value*>(V); }
public:
- TrackingVH() : ValueHandleBase(Tracking) {}
- TrackingVH(ValueTy *P) : ValueHandleBase(Tracking, GetAsValue(P)) {}
+ TrackingVH() {}
+ TrackingVH(ValueTy *P) { setValPtr(P); }
operator ValueTy*() const {
return getValPtr();
@@ -359,7 +402,8 @@ public:
///
/// Called when this->getValPtr() is destroyed, inside ~Value(), so you
/// may call any non-virtual Value method on getValPtr(), but no subclass
- /// methods. If WeakVH were implemented as a CallbackVH, it would use this
+ /// methods. If WeakTrackingVH were implemented as a CallbackVH, it would use
+ /// this
/// method to call setValPtr(NULL). AssertingVH would use this method to
/// cause an assertion failure.
///
@@ -370,7 +414,8 @@ public:
/// \brief Callback for Value RAUW.
///
/// Called when this->getValPtr()->replaceAllUsesWith(new_value) is called,
- /// _before_ any of the uses have actually been replaced. If WeakVH were
+ /// _before_ any of the uses have actually been replaced. If WeakTrackingVH
+ /// were
/// implemented as a CallbackVH, it would use this method to call
/// setValPtr(new_value). AssertingVH would do nothing in this method.
virtual void allUsesReplacedWith(Value *) {}
diff --git a/include/llvm/InitializePasses.h b/include/llvm/InitializePasses.h
index 15c8ff6d04de..44ff4c1a581b 100644
--- a/include/llvm/InitializePasses.h
+++ b/include/llvm/InitializePasses.h
@@ -329,6 +329,7 @@ void initializeSeparateConstOffsetFromGEPPass(PassRegistry&);
void initializeShadowStackGCLoweringPass(PassRegistry&);
void initializeShrinkWrapPass(PassRegistry&);
void initializeSimpleInlinerPass(PassRegistry&);
+void initializeSimpleLoopUnswitchLegacyPassPass(PassRegistry&);
void initializeSingleLoopExtractorPass(PassRegistry&);
void initializeSinkingLegacyPassPass(PassRegistry&);
void initializeSjLjEHPreparePass(PassRegistry&);
diff --git a/include/llvm/MC/ConstantPools.h b/include/llvm/MC/ConstantPools.h
index 643902377dd3..c34211c2bd12 100644
--- a/include/llvm/MC/ConstantPools.h
+++ b/include/llvm/MC/ConstantPools.h
@@ -42,7 +42,7 @@ struct ConstantPoolEntry {
// A class to keep track of assembler-generated constant pools that are use to
// implement the ldr-pseudo.
class ConstantPool {
- typedef SmallVector<ConstantPoolEntry, 4> EntryVecTy;
+ using EntryVecTy = SmallVector<ConstantPoolEntry, 4>;
EntryVecTy Entries;
DenseMap<int64_t, const MCSymbolRefExpr *> CachedEntries;
@@ -80,7 +80,7 @@ class AssemblerConstantPools {
// sections in a stable order to ensure that we have print the
// constant pools in a deterministic order when printing an assembly
// file.
- typedef MapVector<MCSection *, ConstantPool> ConstantPoolMapTy;
+ using ConstantPoolMapTy = MapVector<MCSection *, ConstantPool>;
ConstantPoolMapTy ConstantPools;
public:
diff --git a/include/llvm/MC/LaneBitmask.h b/include/llvm/MC/LaneBitmask.h
index 89e60928405d..5ca06d1148e2 100644
--- a/include/llvm/MC/LaneBitmask.h
+++ b/include/llvm/MC/LaneBitmask.h
@@ -1,4 +1,4 @@
-//===-- llvm/MC/LaneBitmask.h -----------------------------------*- C++ -*-===//
+//===- llvm/MC/LaneBitmask.h ------------------------------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
@@ -30,14 +30,16 @@
#ifndef LLVM_MC_LANEBITMASK_H
#define LLVM_MC_LANEBITMASK_H
+#include "llvm/Support/Compiler.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/Printable.h"
#include "llvm/Support/raw_ostream.h"
namespace llvm {
+
struct LaneBitmask {
// When changing the underlying type, change the format string as well.
- typedef unsigned Type;
+ using Type = unsigned;
enum : unsigned { BitWidth = 8*sizeof(Type) };
constexpr static const char *const FormatStr = "%08X";
@@ -84,6 +86,7 @@ namespace llvm {
OS << format(LaneBitmask::FormatStr, LaneMask.getAsInteger());
});
}
-}
+
+} // end namespace llvm
#endif // LLVM_MC_LANEBITMASK_H
diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h
index c29abaa03a6d..185b892d9621 100644
--- a/include/llvm/MC/MCAssembler.h
+++ b/include/llvm/MC/MCAssembler.h
@@ -60,36 +60,36 @@ class MCAssembler {
friend class MCAsmLayout;
public:
- typedef std::vector<MCSection *> SectionListType;
- typedef std::vector<const MCSymbol *> SymbolDataListType;
+ using SectionListType = std::vector<MCSection *>;
+ using SymbolDataListType = std::vector<const MCSymbol *>;
- typedef pointee_iterator<SectionListType::const_iterator> const_iterator;
- typedef pointee_iterator<SectionListType::iterator> iterator;
+ using const_iterator = pointee_iterator<SectionListType::const_iterator>;
+ using iterator = pointee_iterator<SectionListType::iterator>;
- typedef pointee_iterator<SymbolDataListType::const_iterator>
- const_symbol_iterator;
- typedef pointee_iterator<SymbolDataListType::iterator> symbol_iterator;
+ using const_symbol_iterator =
+ pointee_iterator<SymbolDataListType::const_iterator>;
+ using symbol_iterator = pointee_iterator<SymbolDataListType::iterator>;
- typedef iterator_range<symbol_iterator> symbol_range;
- typedef iterator_range<const_symbol_iterator> const_symbol_range;
+ using symbol_range = iterator_range<symbol_iterator>;
+ using const_symbol_range = iterator_range<const_symbol_iterator>;
- typedef std::vector<IndirectSymbolData>::const_iterator
- const_indirect_symbol_iterator;
- typedef std::vector<IndirectSymbolData>::iterator indirect_symbol_iterator;
+ using const_indirect_symbol_iterator =
+ std::vector<IndirectSymbolData>::const_iterator;
+ using indirect_symbol_iterator = std::vector<IndirectSymbolData>::iterator;
- typedef std::vector<DataRegionData>::const_iterator
- const_data_region_iterator;
- typedef std::vector<DataRegionData>::iterator data_region_iterator;
+ using const_data_region_iterator =
+ std::vector<DataRegionData>::const_iterator;
+ using data_region_iterator = std::vector<DataRegionData>::iterator;
/// MachO specific deployment target version info.
// A Major version of 0 indicates that no version information was supplied
// and so the corresponding load command should not be emitted.
- typedef struct {
+ using VersionMinInfoType = struct {
MCVersionMinType Kind;
unsigned Major;
unsigned Minor;
unsigned Update;
- } VersionMinInfoType;
+ };
private:
MCContext &Context;
diff --git a/include/llvm/MC/MCContext.h b/include/llvm/MC/MCContext.h
index b3106936e27f..9bea19631303 100644
--- a/include/llvm/MC/MCContext.h
+++ b/include/llvm/MC/MCContext.h
@@ -46,17 +46,19 @@ namespace llvm {
class MCSectionELF;
class MCSectionMachO;
class MCSectionWasm;
+ class MCStreamer;
class MCSymbol;
class MCSymbolELF;
class MCSymbolWasm;
class SMLoc;
+ class SourceMgr;
/// Context object for machine code objects. This class owns all of the
/// sections that it creates.
///
class MCContext {
public:
- typedef StringMap<MCSymbol *, BumpPtrAllocator &> SymbolTable;
+ using SymbolTable = StringMap<MCSymbol *, BumpPtrAllocator &>;
private:
/// The SourceMgr for this object, if any.
@@ -223,10 +225,12 @@ namespace llvm {
std::string SectionName;
StringRef GroupName;
unsigned UniqueID;
+
WasmSectionKey(StringRef SectionName, StringRef GroupName,
unsigned UniqueID)
: SectionName(SectionName), GroupName(GroupName), UniqueID(UniqueID) {
}
+
bool operator<(const WasmSectionKey &Other) const {
if (SectionName != Other.SectionName)
return SectionName < Other.SectionName;
diff --git a/include/llvm/MC/MCDwarf.h b/include/llvm/MC/MCDwarf.h
index 0d69c2005cb4..79f1b9525019 100644
--- a/include/llvm/MC/MCDwarf.h
+++ b/include/llvm/MC/MCDwarf.h
@@ -168,10 +168,10 @@ public:
MCLineDivisions[Sec].push_back(LineEntry);
}
- typedef std::vector<MCDwarfLineEntry> MCDwarfLineEntryCollection;
- typedef MCDwarfLineEntryCollection::iterator iterator;
- typedef MCDwarfLineEntryCollection::const_iterator const_iterator;
- typedef MapVector<MCSection *, MCDwarfLineEntryCollection> MCLineDivisionMap;
+ using MCDwarfLineEntryCollection = std::vector<MCDwarfLineEntry>;
+ using iterator = MCDwarfLineEntryCollection::iterator;
+ using const_iterator = MCDwarfLineEntryCollection::const_iterator;
+ using MCLineDivisionMap = MapVector<MCSection *, MCDwarfLineEntryCollection>;
private:
// A collection of MCDwarfLineEntry for each section.
diff --git a/include/llvm/MC/MCExpr.h b/include/llvm/MC/MCExpr.h
index c850abf42e2c..a91a31414bdb 100644
--- a/include/llvm/MC/MCExpr.h
+++ b/include/llvm/MC/MCExpr.h
@@ -28,7 +28,8 @@ class MCSymbol;
class MCValue;
class raw_ostream;
class StringRef;
-typedef DenseMap<const MCSection *, uint64_t> SectionAddrMap;
+
+using SectionAddrMap = DenseMap<const MCSection *, uint64_t>;
/// \brief Base class for the full range of assembler expressions which are
/// needed for parsing.
diff --git a/include/llvm/MC/MCFragment.h b/include/llvm/MC/MCFragment.h
index fc8257f90a9f..0ca530c45102 100644
--- a/include/llvm/MC/MCFragment.h
+++ b/include/llvm/MC/MCFragment.h
@@ -200,8 +200,8 @@ protected:
Sec) {}
public:
- typedef SmallVectorImpl<MCFixup>::const_iterator const_fixup_iterator;
- typedef SmallVectorImpl<MCFixup>::iterator fixup_iterator;
+ using const_fixup_iterator = SmallVectorImpl<MCFixup>::const_iterator;
+ using fixup_iterator = SmallVectorImpl<MCFixup>::iterator;
SmallVectorImpl<MCFixup> &getFixups() { return Fixups; }
const SmallVectorImpl<MCFixup> &getFixups() const { return Fixups; }
diff --git a/include/llvm/MC/MCInst.h b/include/llvm/MC/MCInst.h
index 702279659371..9bf440ea96d2 100644
--- a/include/llvm/MC/MCInst.h
+++ b/include/llvm/MC/MCInst.h
@@ -176,8 +176,9 @@ public:
void addOperand(const MCOperand &Op) { Operands.push_back(Op); }
- typedef SmallVectorImpl<MCOperand>::iterator iterator;
- typedef SmallVectorImpl<MCOperand>::const_iterator const_iterator;
+ using iterator = SmallVectorImpl<MCOperand>::iterator;
+ using const_iterator = SmallVectorImpl<MCOperand>::const_iterator;
+
void clear() { Operands.clear(); }
void erase(iterator I) { Operands.erase(I); }
size_t size() const { return Operands.size(); }
diff --git a/include/llvm/MC/MCLinkerOptimizationHint.h b/include/llvm/MC/MCLinkerOptimizationHint.h
index 0c3525bbeda6..f0fd07f43cf3 100644
--- a/include/llvm/MC/MCLinkerOptimizationHint.h
+++ b/include/llvm/MC/MCLinkerOptimizationHint.h
@@ -111,7 +111,7 @@ class MCLOHDirective {
const MCAsmLayout &Layout) const;
public:
- typedef SmallVectorImpl<MCSymbol *> LOHArgs;
+ using LOHArgs = SmallVectorImpl<MCSymbol *>;
MCLOHDirective(MCLOHType Kind, const LOHArgs &Args)
: Kind(Kind), Args(Args.begin(), Args.end()) {
@@ -140,7 +140,7 @@ class MCLOHContainer {
SmallVector<MCLOHDirective, 32> Directives;
public:
- typedef SmallVectorImpl<MCLOHDirective> LOHDirectives;
+ using LOHDirectives = SmallVectorImpl<MCLOHDirective>;
MCLOHContainer() = default;
@@ -179,8 +179,8 @@ public:
};
// Add types for specialized template using MCSymbol.
-typedef MCLOHDirective::LOHArgs MCLOHArgs;
-typedef MCLOHContainer::LOHDirectives MCLOHDirectives;
+using MCLOHArgs = MCLOHDirective::LOHArgs;
+using MCLOHDirectives = MCLOHContainer::LOHDirectives;
} // end namespace llvm
diff --git a/include/llvm/MC/MCParser/MCAsmLexer.h b/include/llvm/MC/MCParser/MCAsmLexer.h
index 7ddc7722e512..7836ece2d688 100644
--- a/include/llvm/MC/MCParser/MCAsmLexer.h
+++ b/include/llvm/MC/MCParser/MCAsmLexer.h
@@ -161,6 +161,7 @@ protected: // Can only create subclasses.
bool IsAtStartOfStatement = true;
AsmCommentConsumer *CommentConsumer = nullptr;
+ bool AltMacroMode;
MCAsmLexer();
virtual AsmToken LexToken() = 0;
@@ -175,6 +176,14 @@ public:
MCAsmLexer &operator=(const MCAsmLexer &) = delete;
virtual ~MCAsmLexer();
+ bool IsaAltMacroMode() {
+ return AltMacroMode;
+ }
+
+ void SetAltMacroMode(bool AltMacroSet) {
+ AltMacroMode = AltMacroSet;
+ }
+
/// Consume the next token from the input stream and return it.
///
/// The lexer will continuosly return the end-of-file token once the end of
diff --git a/include/llvm/MC/MCParser/MCAsmParser.h b/include/llvm/MC/MCParser/MCAsmParser.h
index 6763374185ec..75d45f490bde 100644
--- a/include/llvm/MC/MCParser/MCAsmParser.h
+++ b/include/llvm/MC/MCParser/MCAsmParser.h
@@ -67,9 +67,9 @@ public:
/// assembly parsers.
class MCAsmParser {
public:
- typedef bool (*DirectiveHandler)(MCAsmParserExtension*, StringRef, SMLoc);
- typedef std::pair<MCAsmParserExtension*, DirectiveHandler>
- ExtensionDirectiveHandler;
+ using DirectiveHandler = bool (*)(MCAsmParserExtension*, StringRef, SMLoc);
+ using ExtensionDirectiveHandler =
+ std::pair<MCAsmParserExtension*, DirectiveHandler>;
struct MCPendingError {
SMLoc Loc;
diff --git a/include/llvm/MC/MCParser/MCTargetAsmParser.h b/include/llvm/MC/MCParser/MCTargetAsmParser.h
index c81a7624011f..b8d3180cd49c 100644
--- a/include/llvm/MC/MCParser/MCTargetAsmParser.h
+++ b/include/llvm/MC/MCParser/MCTargetAsmParser.h
@@ -27,7 +27,7 @@ class MCStreamer;
class MCSubtargetInfo;
template <typename T> class SmallVectorImpl;
-typedef SmallVectorImpl<std::unique_ptr<MCParsedAsmOperand>> OperandVector;
+using OperandVector = SmallVectorImpl<std::unique_ptr<MCParsedAsmOperand>>;
enum AsmRewriteKind {
AOK_Delete = 0, // Rewrite should be ignored.
diff --git a/include/llvm/MC/MCRegisterInfo.h b/include/llvm/MC/MCRegisterInfo.h
index 015d0b96d9f2..de98abe0dc46 100644
--- a/include/llvm/MC/MCRegisterInfo.h
+++ b/include/llvm/MC/MCRegisterInfo.h
@@ -27,13 +27,13 @@ namespace llvm {
/// An unsigned integer type large enough to represent all physical registers,
/// but not necessarily virtual registers.
-typedef uint16_t MCPhysReg;
+using MCPhysReg = uint16_t;
/// MCRegisterClass - Base class of TargetRegisterClass.
class MCRegisterClass {
public:
- typedef const MCPhysReg* iterator;
- typedef const MCPhysReg* const_iterator;
+ using iterator = const MCPhysReg*;
+ using const_iterator = const MCPhysReg*;
const iterator RegsBegin;
const uint8_t *const RegSet;
@@ -134,7 +134,7 @@ struct MCRegisterDesc {
///
class MCRegisterInfo {
public:
- typedef const MCRegisterClass *regclass_iterator;
+ using regclass_iterator = const MCRegisterClass *;
/// DwarfLLVMRegPair - Emitted by tablegen so Dwarf<->LLVM reg mappings can be
/// performed with a binary search.
diff --git a/include/llvm/MC/MCSection.h b/include/llvm/MC/MCSection.h
index 2974d8f1b80b..7bfffbcdb7c2 100644
--- a/include/llvm/MC/MCSection.h
+++ b/include/llvm/MC/MCSection.h
@@ -47,13 +47,13 @@ public:
BundleLockedAlignToEnd
};
- typedef iplist<MCFragment> FragmentListType;
+ using FragmentListType = iplist<MCFragment>;
- typedef FragmentListType::const_iterator const_iterator;
- typedef FragmentListType::iterator iterator;
+ using const_iterator = FragmentListType::const_iterator;
+ using iterator = FragmentListType::iterator;
- typedef FragmentListType::const_reverse_iterator const_reverse_iterator;
- typedef FragmentListType::reverse_iterator reverse_iterator;
+ using const_reverse_iterator = FragmentListType::const_reverse_iterator;
+ using reverse_iterator = FragmentListType::reverse_iterator;
private:
MCSymbol *Begin;
diff --git a/include/llvm/MC/MCSectionWasm.h b/include/llvm/MC/MCSectionWasm.h
index 4e19196175c0..29d62a7a6f82 100644
--- a/include/llvm/MC/MCSectionWasm.h
+++ b/include/llvm/MC/MCSectionWasm.h
@@ -26,6 +26,7 @@ class MCSymbol;
/// This represents a section on wasm.
class MCSectionWasm final : public MCSection {
+private:
/// This is the name of the section. The referenced memory is owned by
/// TargetLoweringObjectFileWasm's WasmUniqueMap.
StringRef SectionName;
@@ -40,10 +41,11 @@ class MCSectionWasm final : public MCSection {
const MCSymbolWasm *Group;
- // The offset of the MC function section in the wasm code section.
+ // The offset of the MC function/data section in the wasm code/data section.
+ // For data relocations the offset is relative to start of the data payload
+ // itself and does not include the size of the section header.
uint64_t SectionOffset;
-private:
friend class MCContext;
MCSectionWasm(StringRef Section, unsigned type, unsigned flags, SectionKind K,
const MCSymbolWasm *group, unsigned UniqueID, MCSymbol *Begin)
diff --git a/include/llvm/MC/MCStreamer.h b/include/llvm/MC/MCStreamer.h
index eb301031ba3f..5390e7942424 100644
--- a/include/llvm/MC/MCStreamer.h
+++ b/include/llvm/MC/MCStreamer.h
@@ -44,12 +44,11 @@ class MCInstPrinter;
class MCSection;
class MCStreamer;
class MCSymbolRefExpr;
-class MCSymbolWasm;
class MCSubtargetInfo;
class raw_ostream;
class Twine;
-typedef std::pair<MCSection *, const MCExpr *> MCSectionSubPair;
+using MCSectionSubPair = std::pair<MCSection *, const MCExpr *>;
/// Target specific streamer interface. This is used so that targets can
/// implement support for target specific assembly directives.
diff --git a/include/llvm/MC/MCSubtargetInfo.h b/include/llvm/MC/MCSubtargetInfo.h
index bb16463588c3..d1d5d070bf5b 100644
--- a/include/llvm/MC/MCSubtargetInfo.h
+++ b/include/llvm/MC/MCSubtargetInfo.h
@@ -26,6 +26,7 @@
#include <string>
namespace llvm {
+
class MachineInstr;
class MCInst;
@@ -63,8 +64,7 @@ public:
MCSubtargetInfo() = delete;
MCSubtargetInfo &operator=(const MCSubtargetInfo &) = delete;
MCSubtargetInfo &operator=(MCSubtargetInfo &&) = delete;
-
- virtual ~MCSubtargetInfo() {}
+ virtual ~MCSubtargetInfo() = default;
/// getTargetTriple - Return the target triple string.
const Triple &getTargetTriple() const { return TargetTriple; }
@@ -178,11 +178,11 @@ public:
/// Returns string representation of scheduler comment
virtual std::string getSchedInfoStr(const MachineInstr &MI) const {
- return std::string();
+ return {};
}
virtual std::string getSchedInfoStr(MCInst const &MCI) const {
- return std::string();
+ return {};
}
};
diff --git a/include/llvm/MC/MCSymbol.h b/include/llvm/MC/MCSymbol.h
index e8432afd8627..9b1cc6e7d7e8 100644
--- a/include/llvm/MC/MCSymbol.h
+++ b/include/llvm/MC/MCSymbol.h
@@ -145,10 +145,10 @@ protected:
/// MCSymbol contains a uint64_t so is probably aligned to 8. On a 32-bit
/// system, the name is a pointer so isn't going to satisfy the 8 byte
/// alignment of uint64_t. Account for that here.
- typedef union {
+ using NameEntryStorageTy = union {
const StringMapEntry<bool> *NameEntry;
uint64_t AlignmentPadding;
- } NameEntryStorageTy;
+ };
MCSymbol(SymbolKind Kind, const StringMapEntry<bool> *Name, bool isTemporary)
: IsTemporary(isTemporary), IsRedefinable(false), IsUsed(false),
diff --git a/include/llvm/MC/MCWasmObjectWriter.h b/include/llvm/MC/MCWasmObjectWriter.h
index 6e458eaac9c8..a4dd382706d7 100644
--- a/include/llvm/MC/MCWasmObjectWriter.h
+++ b/include/llvm/MC/MCWasmObjectWriter.h
@@ -32,12 +32,12 @@ class raw_pwrite_stream;
struct WasmRelocationEntry {
uint64_t Offset; // Where is the relocation.
const MCSymbolWasm *Symbol; // The symbol to relocate with.
- uint64_t Addend; // A value to add to the symbol.
+ int64_t Addend; // A value to add to the symbol.
unsigned Type; // The type of the relocation.
MCSectionWasm *FixupSection;// The section the relocation is targeting.
WasmRelocationEntry(uint64_t Offset, const MCSymbolWasm *Symbol,
- uint64_t Addend, unsigned Type,
+ int64_t Addend, unsigned Type,
MCSectionWasm *FixupSection)
: Offset(Offset), Symbol(Symbol), Addend(Addend), Type(Type),
FixupSection(FixupSection) {}
diff --git a/include/llvm/Object/Binary.h b/include/llvm/Object/Binary.h
index 06788326ff57..f42048e48ee3 100644
--- a/include/llvm/Object/Binary.h
+++ b/include/llvm/Object/Binary.h
@@ -42,7 +42,6 @@ protected:
ID_MachOUniversalBinary,
ID_COFFImportFile,
ID_IR, // LLVM IR
- ID_ModuleSummaryIndex, // Module summary index
// Object and children.
ID_StartObjects,
@@ -128,8 +127,6 @@ public:
return TypeID == ID_IR;
}
- bool isModuleSummaryIndex() const { return TypeID == ID_ModuleSummaryIndex; }
-
bool isLittleEndian() const {
return !(TypeID == ID_ELF32B || TypeID == ID_ELF64B ||
TypeID == ID_MachO32B || TypeID == ID_MachO64B);
diff --git a/include/llvm/Object/COFF.h b/include/llvm/Object/COFF.h
index e0bb8f1cf3dd..1b6aaf4be666 100644
--- a/include/llvm/Object/COFF.h
+++ b/include/llvm/Object/COFF.h
@@ -623,6 +623,15 @@ struct coff_base_reloc_block_entry {
int getOffset() const { return Data & ((1 << 12) - 1); }
};
+struct coff_resource_dir_table {
+ support::ulittle32_t Characteristics;
+ support::ulittle32_t TimeDateStamp;
+ support::ulittle16_t MajorVersion;
+ support::ulittle16_t MinorVersion;
+ support::ulittle16_t NumberOfNameEntries;
+ support::ulittle16_t NumberOfIDEntries;
+};
+
class COFFObjectFile : public ObjectFile {
private:
friend class ImportDirectoryEntryRef;
diff --git a/include/llvm/Object/COFFImportFile.h b/include/llvm/Object/COFFImportFile.h
index 4192fe7e5c90..78d9d679acd3 100644
--- a/include/llvm/Object/COFFImportFile.h
+++ b/include/llvm/Object/COFFImportFile.h
@@ -53,7 +53,7 @@ public:
basic_symbol_iterator symbol_end() const override {
DataRefImpl Symb;
- Symb.p = isCode() ? 2 : 1;
+ Symb.p = isData() ? 1 : 2;
return BasicSymbolRef(Symb, this);
}
@@ -63,8 +63,8 @@ public:
}
private:
- bool isCode() const {
- return getCOFFImportHeader()->getType() == COFF::IMPORT_CODE;
+ bool isData() const {
+ return getCOFFImportHeader()->getType() == COFF::IMPORT_DATA;
}
};
diff --git a/include/llvm/Object/ELF.h b/include/llvm/Object/ELF.h
index 9c72bd4023d8..42fdfe3e5a74 100644
--- a/include/llvm/Object/ELF.h
+++ b/include/llvm/Object/ELF.h
@@ -32,6 +32,7 @@ namespace llvm {
namespace object {
StringRef getELFRelocationTypeName(uint32_t Machine, uint32_t Type);
+StringRef getELFSectionTypeName(uint32_t Machine, uint32_t Type);
// Subclasses of ELFFile may need this for template instantiation
inline std::pair<unsigned char, unsigned char>
diff --git a/include/llvm/Object/ModuleSummaryIndexObjectFile.h b/include/llvm/Object/ModuleSummaryIndexObjectFile.h
deleted file mode 100644
index f733f861e2c0..000000000000
--- a/include/llvm/Object/ModuleSummaryIndexObjectFile.h
+++ /dev/null
@@ -1,112 +0,0 @@
-//===- ModuleSummaryIndexObjectFile.h - Summary index file implementation -===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-//
-// This file declares the ModuleSummaryIndexObjectFile template class.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_OBJECT_MODULESUMMARYINDEXOBJECTFILE_H
-#define LLVM_OBJECT_MODULESUMMARYINDEXOBJECTFILE_H
-
-#include "llvm/ADT/StringRef.h"
-#include "llvm/Object/Binary.h"
-#include "llvm/Object/SymbolicFile.h"
-#include "llvm/Support/Error.h"
-#include "llvm/Support/ErrorHandling.h"
-#include "llvm/Support/ErrorOr.h"
-#include "llvm/Support/MemoryBuffer.h"
-#include <memory>
-#include <system_error>
-
-namespace llvm {
-
-class ModuleSummaryIndex;
-
-namespace object {
-
-class ObjectFile;
-
-/// This class is used to read just the module summary index related
-/// sections out of the given object (which may contain a single module's
-/// bitcode or be a combined index bitcode file). It builds a ModuleSummaryIndex
-/// object.
-class ModuleSummaryIndexObjectFile : public SymbolicFile {
- std::unique_ptr<ModuleSummaryIndex> Index;
-
-public:
- ModuleSummaryIndexObjectFile(MemoryBufferRef Object,
- std::unique_ptr<ModuleSummaryIndex> I);
- ~ModuleSummaryIndexObjectFile() override;
-
- // TODO: Walk through GlobalValueMap entries for symbols.
- // However, currently these interfaces are not used by any consumers.
- void moveSymbolNext(DataRefImpl &Symb) const override {
- llvm_unreachable("not implemented");
- }
-
- std::error_code printSymbolName(raw_ostream &OS,
- DataRefImpl Symb) const override {
- llvm_unreachable("not implemented");
- return std::error_code();
- }
-
- uint32_t getSymbolFlags(DataRefImpl Symb) const override {
- llvm_unreachable("not implemented");
- return 0;
- }
-
- basic_symbol_iterator symbol_begin() const override {
- llvm_unreachable("not implemented");
- return basic_symbol_iterator(BasicSymbolRef());
- }
- basic_symbol_iterator symbol_end() const override {
- llvm_unreachable("not implemented");
- return basic_symbol_iterator(BasicSymbolRef());
- }
-
- const ModuleSummaryIndex &getIndex() const {
- return const_cast<ModuleSummaryIndexObjectFile *>(this)->getIndex();
- }
- ModuleSummaryIndex &getIndex() { return *Index; }
- std::unique_ptr<ModuleSummaryIndex> takeIndex();
-
- static inline bool classof(const Binary *v) {
- return v->isModuleSummaryIndex();
- }
-
- /// \brief Finds and returns bitcode embedded in the given object file, or an
- /// error code if not found.
- static ErrorOr<MemoryBufferRef> findBitcodeInObject(const ObjectFile &Obj);
-
- /// \brief Finds and returns bitcode in the given memory buffer (which may
- /// be either a bitcode file or a native object file with embedded bitcode),
- /// or an error code if not found.
- static ErrorOr<MemoryBufferRef>
- findBitcodeInMemBuffer(MemoryBufferRef Object);
-
- /// \brief Parse module summary index in the given memory buffer.
- /// Return new ModuleSummaryIndexObjectFile instance containing parsed module
- /// summary/index.
- static Expected<std::unique_ptr<ModuleSummaryIndexObjectFile>>
- create(MemoryBufferRef Object);
-};
-
-} // end namespace object
-
-/// Parse the module summary index out of an IR file and return the module
-/// summary index object if found, or nullptr if not. If Identifier is
-/// non-empty, it is used as the module ID (module path) in the resulting
-/// index. This can be used when the index is being read from a file
-/// containing minimized bitcode just for the thin link.
-Expected<std::unique_ptr<ModuleSummaryIndex>>
-getModuleSummaryIndexForFile(StringRef Path, StringRef Identifier = "");
-
-} // end namespace llvm
-
-#endif // LLVM_OBJECT_MODULESUMMARYINDEXOBJECTFILE_H
diff --git a/include/llvm/Support/AArch64TargetParser.def b/include/llvm/Support/AArch64TargetParser.def
index 46d253bf0ec7..1700deadeaef 100644
--- a/include/llvm/Support/AArch64TargetParser.def
+++ b/include/llvm/Support/AArch64TargetParser.def
@@ -21,7 +21,7 @@ AARCH64_ARCH("invalid", AK_INVALID, nullptr, nullptr,
AARCH64_ARCH("armv8-a", AK_ARMV8A, "8-A", "v8", ARMBuildAttrs::CPUArch::v8_A,
FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
- AArch64::AEK_SIMD | AArch64::AEK_LSE))
+ AArch64::AEK_SIMD))
AARCH64_ARCH("armv8.1-a", AK_ARMV8_1A, "8.1-A", "v8.1a",
ARMBuildAttrs::CPUArch::v8_A, FK_CRYPTO_NEON_FP_ARMV8,
(AArch64::AEK_CRC | AArch64::AEK_CRYPTO | AArch64::AEK_FP |
diff --git a/include/llvm/Support/BinaryStreamArray.h b/include/llvm/Support/BinaryStreamArray.h
index 21b2474660f2..748a62be231e 100644
--- a/include/llvm/Support/BinaryStreamArray.h
+++ b/include/llvm/Support/BinaryStreamArray.h
@@ -42,10 +42,12 @@ namespace llvm {
/// having to specify a second template argument to VarStreamArray (documented
/// below).
template <typename T> struct VarStreamArrayExtractor {
+ typedef void Context;
+
// Method intentionally deleted. You must provide an explicit specialization
// with the following method implemented.
- Error operator()(BinaryStreamRef Stream, uint32_t &Len,
- T &Item) const = delete;
+ static Error extract(BinaryStreamRef Stream, uint32_t &Len, T &Item,
+ Context *Ctx) = delete;
};
/// VarStreamArray represents an array of variable length records backed by a
@@ -64,82 +66,87 @@ template <typename T> struct VarStreamArrayExtractor {
/// If you do not specify an Extractor type, you are expected to specialize
/// VarStreamArrayExtractor<T> for your ValueType.
///
-/// By default an Extractor is default constructed in the class, but in some
-/// cases you might find it useful for an Extractor to maintain state across
-/// extractions. In this case you can provide your own Extractor through a
-/// secondary constructor. The following examples show various ways of
-/// creating a VarStreamArray.
-///
-/// // Will use VarStreamArrayExtractor<MyType> as the extractor.
-/// VarStreamArray<MyType> MyTypeArray;
-///
-/// // Will use a default-constructed MyExtractor as the extractor.
-/// VarStreamArray<MyType, MyExtractor> MyTypeArray2;
-///
-/// // Will use the specific instance of MyExtractor provided.
-/// // MyExtractor need not be default-constructible in this case.
-/// MyExtractor E(SomeContext);
-/// VarStreamArray<MyType, MyExtractor> MyTypeArray3(E);
+/// The default extractor type is stateless, but by specializing
+/// VarStreamArrayExtractor or defining your own custom extractor type and
+/// adding the appropriate ContextType typedef to the class, you can pass a
+/// context field during construction of the VarStreamArray that will be
+/// passed to each call to extract.
///
-template <typename ValueType, typename Extractor> class VarStreamArrayIterator;
+template <typename ValueType, typename ExtractorType>
+class VarStreamArrayIterator;
template <typename ValueType,
- typename Extractor = VarStreamArrayExtractor<ValueType>>
-
+ typename ExtractorType = VarStreamArrayExtractor<ValueType>>
class VarStreamArray {
- friend class VarStreamArrayIterator<ValueType, Extractor>;
-
public:
- typedef VarStreamArrayIterator<ValueType, Extractor> Iterator;
+ typedef typename ExtractorType::ContextType ContextType;
+ typedef VarStreamArrayIterator<ValueType, ExtractorType> Iterator;
+ friend Iterator;
VarStreamArray() = default;
- explicit VarStreamArray(const Extractor &E) : E(E) {}
- explicit VarStreamArray(BinaryStreamRef Stream) : Stream(Stream) {}
- VarStreamArray(BinaryStreamRef Stream, const Extractor &E)
- : Stream(Stream), E(E) {}
+ explicit VarStreamArray(BinaryStreamRef Stream,
+ ContextType *Context = nullptr)
+ : Stream(Stream), Context(Context) {}
- VarStreamArray(const VarStreamArray<ValueType, Extractor> &Other)
- : Stream(Other.Stream), E(Other.E) {}
+ VarStreamArray(const VarStreamArray<ValueType, ExtractorType> &Other)
+ : Stream(Other.Stream), Context(Other.Context) {}
Iterator begin(bool *HadError = nullptr) const {
- return Iterator(*this, E, HadError);
+ if (empty())
+ return end();
+
+ return Iterator(*this, Context, HadError);
}
- Iterator end() const { return Iterator(E); }
+ Iterator end() const { return Iterator(); }
- const Extractor &getExtractor() const { return E; }
+ bool empty() const { return Stream.getLength() == 0; }
+
+ /// \brief given an offset into the array's underlying stream, return an
+ /// iterator to the record at that offset. This is considered unsafe
+ /// since the behavior is undefined if \p Offset does not refer to the
+ /// beginning of a valid record.
+ Iterator at(uint32_t Offset) const {
+ return Iterator(*this, Context, Stream.drop_front(Offset), nullptr);
+ }
BinaryStreamRef getUnderlyingStream() const { return Stream; }
private:
BinaryStreamRef Stream;
- Extractor E;
+ ContextType *Context = nullptr;
};
-template <typename ValueType, typename Extractor>
+template <typename ValueType, typename ExtractorType>
class VarStreamArrayIterator
- : public iterator_facade_base<VarStreamArrayIterator<ValueType, Extractor>,
- std::forward_iterator_tag, ValueType> {
- typedef VarStreamArrayIterator<ValueType, Extractor> IterType;
- typedef VarStreamArray<ValueType, Extractor> ArrayType;
+ : public iterator_facade_base<
+ VarStreamArrayIterator<ValueType, ExtractorType>,
+ std::forward_iterator_tag, ValueType> {
+ typedef typename ExtractorType::ContextType ContextType;
+ typedef VarStreamArrayIterator<ValueType, ExtractorType> IterType;
+ typedef VarStreamArray<ValueType, ExtractorType> ArrayType;
public:
- VarStreamArrayIterator(const ArrayType &Array, const Extractor &E,
- bool *HadError = nullptr)
- : IterRef(Array.Stream), Array(&Array), HadError(HadError), Extract(E) {
+ VarStreamArrayIterator(const ArrayType &Array, ContextType *Context,
+ BinaryStreamRef Stream, bool *HadError = nullptr)
+ : IterRef(Stream), Context(Context), Array(&Array), HadError(HadError) {
if (IterRef.getLength() == 0)
moveToEnd();
else {
- auto EC = Extract(IterRef, ThisLen, ThisValue);
+ auto EC = ExtractorType::extract(IterRef, ThisLen, ThisValue, Context);
if (EC) {
consumeError(std::move(EC));
markError();
}
}
}
+
+ VarStreamArrayIterator(const ArrayType &Array, ContextType *Context,
+ bool *HadError = nullptr)
+ : VarStreamArrayIterator(Array, Context, Array.Stream, HadError) {}
+
VarStreamArrayIterator() = default;
- explicit VarStreamArrayIterator(const Extractor &E) : Extract(E) {}
~VarStreamArrayIterator() = default;
bool operator==(const IterType &R) const {
@@ -178,7 +185,7 @@ public:
moveToEnd();
} else {
// There is some data after the current record.
- auto EC = Extract(IterRef, ThisLen, ThisValue);
+ auto EC = ExtractorType::extract(IterRef, ThisLen, ThisValue, Context);
if (EC) {
consumeError(std::move(EC));
markError();
@@ -205,11 +212,11 @@ private:
ValueType ThisValue;
BinaryStreamRef IterRef;
+ ContextType *Context{nullptr};
const ArrayType *Array{nullptr};
uint32_t ThisLen{0};
bool HasError{false};
bool *HadError{nullptr};
- Extractor Extract;
};
template <typename T> class FixedStreamArrayIterator;
diff --git a/include/llvm/Support/BinaryStreamReader.h b/include/llvm/Support/BinaryStreamReader.h
index d994fa0f49d0..f30d82d81b25 100644
--- a/include/llvm/Support/BinaryStreamReader.h
+++ b/include/llvm/Support/BinaryStreamReader.h
@@ -172,11 +172,13 @@ public:
/// \returns a success error code if the data was successfully read, otherwise
/// returns an appropriate error code.
template <typename T, typename U>
- Error readArray(VarStreamArray<T, U> &Array, uint32_t Size) {
+ Error
+ readArray(VarStreamArray<T, U> &Array, uint32_t Size,
+ typename VarStreamArray<T, U>::ContextType *Context = nullptr) {
BinaryStreamRef S;
if (auto EC = readStreamRef(S, Size))
return EC;
- Array = VarStreamArray<T, U>(S, Array.getExtractor());
+ Array = VarStreamArray<T, U>(S, Context);
return Error::success();
}
diff --git a/include/llvm/Support/BinaryStreamWriter.h b/include/llvm/Support/BinaryStreamWriter.h
index 64f26b24543d..6734a797ccc4 100644
--- a/include/llvm/Support/BinaryStreamWriter.h
+++ b/include/llvm/Support/BinaryStreamWriter.h
@@ -30,6 +30,8 @@ namespace llvm {
/// although no methods are overridable.
class BinaryStreamWriter {
public:
+ // FIXME: We should be able to slice and drop_front etc on Writers / Readers.
+
BinaryStreamWriter() = default;
explicit BinaryStreamWriter(WritableBinaryStreamRef Stream);
virtual ~BinaryStreamWriter() {}
diff --git a/include/llvm/Support/CMakeLists.txt b/include/llvm/Support/CMakeLists.txt
index b4b993705745..c58ccf216303 100644
--- a/include/llvm/Support/CMakeLists.txt
+++ b/include/llvm/Support/CMakeLists.txt
@@ -14,10 +14,15 @@ macro(find_first_existing_vc_file out_var path)
execute_process(COMMAND ${git_executable} rev-parse --git-dir
WORKING_DIRECTORY ${path}/cmake
RESULT_VARIABLE git_result
- OUTPUT_VARIABLE git_dir)
+ OUTPUT_VARIABLE git_dir
+ ERROR_QUIET)
if(git_result EQUAL 0)
string(STRIP "${git_dir}" git_dir)
set(${out_var} "${git_dir}/logs/HEAD")
+ # some branchless cases (e.g. 'repo') may not yet have .git/logs/HEAD
+ if (NOT EXISTS "${git_dir}/logs/HEAD")
+ file(WRITE "${git_dir}/logs/HEAD" "")
+ endif()
else()
find_first_existing_file(${out_var}
"${path}/.svn/wc.db" # SVN 1.7
diff --git a/include/llvm/Support/DynamicLibrary.h b/include/llvm/Support/DynamicLibrary.h
index aa9bb8938ad3..a8874a10d461 100644
--- a/include/llvm/Support/DynamicLibrary.h
+++ b/include/llvm/Support/DynamicLibrary.h
@@ -58,7 +58,7 @@ namespace sys {
void *getAddressOfSymbol(const char *symbolName);
/// This function permanently loads the dynamic library at the given path.
- /// The library will only be unloaded when the program terminates.
+ /// The library will only be unloaded when llvm_shutdown() is called.
/// This returns a valid DynamicLibrary instance on success and an invalid
/// instance on failure (see isValid()). \p *errMsg will only be modified
/// if the library fails to load.
@@ -71,7 +71,8 @@ namespace sys {
/// Registers an externally loaded library. The library will be unloaded
/// when the program terminates.
///
- /// It is safe to call this function multiple times for the same library.
+ /// It is safe to call this function multiple times for the same library,
+ /// though ownership is only taken if there was no error.
///
/// \returns An empty \p DynamicLibrary if the library was already loaded.
static DynamicLibrary addPermanentLibrary(void *handle,
@@ -106,6 +107,8 @@ namespace sys {
/// libraries.
/// @brief Add searchable symbol/value pair.
static void AddSymbol(StringRef symbolName, void *symbolValue);
+
+ class HandleSet;
};
} // End sys namespace
diff --git a/include/llvm/Support/ELFRelocs/AArch64.def b/include/llvm/Support/ELFRelocs/AArch64.def
index c21df07d2dbc..4afcd7d1f093 100644
--- a/include/llvm/Support/ELFRelocs/AArch64.def
+++ b/include/llvm/Support/ELFRelocs/AArch64.def
@@ -109,8 +109,8 @@ ELF_RELOC(R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC, 0x22f)
ELF_RELOC(R_AARCH64_TLSDESC_LD_PREL19, 0x230)
ELF_RELOC(R_AARCH64_TLSDESC_ADR_PREL21, 0x231)
ELF_RELOC(R_AARCH64_TLSDESC_ADR_PAGE21, 0x232)
-ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12_NC, 0x233)
-ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12_NC, 0x234)
+ELF_RELOC(R_AARCH64_TLSDESC_LD64_LO12, 0x233)
+ELF_RELOC(R_AARCH64_TLSDESC_ADD_LO12, 0x234)
ELF_RELOC(R_AARCH64_TLSDESC_OFF_G1, 0x235)
ELF_RELOC(R_AARCH64_TLSDESC_OFF_G0_NC, 0x236)
ELF_RELOC(R_AARCH64_TLSDESC_LDR, 0x237)
@@ -144,21 +144,28 @@ ELF_RELOC(R_AARCH64_P32_ADR_PREL_LO21, 0x00a)
ELF_RELOC(R_AARCH64_P32_ADR_PREL_PG_HI21, 0x00b)
ELF_RELOC(R_AARCH64_P32_ADD_ABS_LO12_NC, 0x00c)
ELF_RELOC(R_AARCH64_P32_LDST8_ABS_LO12_NC, 0x00d)
+ELF_RELOC(R_AARCH64_P32_LDST16_ABS_LO12_NC, 0x00e)
+ELF_RELOC(R_AARCH64_P32_LDST32_ABS_LO12_NC, 0x00f)
+ELF_RELOC(R_AARCH64_P32_LDST64_ABS_LO12_NC, 0x010)
+ELF_RELOC(R_AARCH64_P32_LDST128_ABS_LO12_NC, 0x011)
ELF_RELOC(R_AARCH64_P32_TSTBR14, 0x012)
ELF_RELOC(R_AARCH64_P32_CONDBR19, 0x013)
ELF_RELOC(R_AARCH64_P32_JUMP26, 0x014)
ELF_RELOC(R_AARCH64_P32_CALL26, 0x015)
-ELF_RELOC(R_AARCH64_P32_LDST16_ABS_LO12_NC, 0x00e)
-ELF_RELOC(R_AARCH64_P32_LDST32_ABS_LO12_NC, 0x00f)
-ELF_RELOC(R_AARCH64_P32_LDST64_ABS_LO12_NC, 0x010)
ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0, 0x016)
ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G0_NC, 0x017)
ELF_RELOC(R_AARCH64_P32_MOVW_PREL_G1, 0x018)
-ELF_RELOC(R_AARCH64_P32_LDST128_ABS_LO12_NC, 0x011)
ELF_RELOC(R_AARCH64_P32_GOT_LD_PREL19, 0x019)
ELF_RELOC(R_AARCH64_P32_ADR_GOT_PAGE, 0x01a)
-ELF_RELOC(R_AARCH64_P32_LD64_GOT_LO12_NC, 0x01b)
+ELF_RELOC(R_AARCH64_P32_LD32_GOT_LO12_NC, 0x01b)
ELF_RELOC(R_AARCH64_P32_LD32_GOTPAGE_LO14, 0x01c)
+ELF_RELOC(R_AARCH64_P32_TLSGD_ADR_PREL21, 0x050)
+ELF_RELOC(R_AARCH64_P32_TLSGD_ADR_PAGE21, 0x051)
+ELF_RELOC(R_AARCH64_P32_TLSGD_ADD_LO12_NC, 0x052)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADR_PREL21, 0x053)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADR_PAGE21, 0x054)
+ELF_RELOC(R_AARCH64_P32_TLSLD_ADD_LO12_NC, 0x055)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LD_PREL19, 0x056)
ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G1, 0x057)
ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0, 0x058)
ELF_RELOC(R_AARCH64_P32_TLSLD_MOVW_DTPREL_G0_NC, 0x059)
@@ -173,6 +180,8 @@ ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12, 0x061)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST32_DTPREL_LO12_NC, 0x062)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12, 0x063)
ELF_RELOC(R_AARCH64_P32_TLSLD_LDST64_DTPREL_LO12_NC, 0x064)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12, 0x065)
+ELF_RELOC(R_AARCH64_P32_TLSLD_LDST128_DTPREL_LO12_NC,0x066)
ELF_RELOC(R_AARCH64_P32_TLSIE_ADR_GOTTPREL_PAGE21, 0x067)
ELF_RELOC(R_AARCH64_P32_TLSIE_LD32_GOTTPREL_LO12_NC, 0x068)
ELF_RELOC(R_AARCH64_P32_TLSIE_LD_GOTTPREL_PREL19, 0x069)
@@ -190,12 +199,20 @@ ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12, 0x074)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST32_TPREL_LO12_NC, 0x075)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12, 0x076)
ELF_RELOC(R_AARCH64_P32_TLSLE_LDST64_TPREL_LO12_NC, 0x077)
-ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PAGE21, 0x051)
-ELF_RELOC(R_AARCH64_P32_TLSDESC_LD32_LO12_NC, 0x07d)
-ELF_RELOC(R_AARCH64_P32_TLSDESC_ADD_LO12_NC, 0x034)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12, 0x078)
+ELF_RELOC(R_AARCH64_P32_TLSLE_LDST128_TPREL_LO12_NC, 0x079)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_LD_PREL19, 0x07a)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PREL21, 0x07b)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADR_PAGE21, 0x07c)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_LD32_LO12, 0x07d)
+ELF_RELOC(R_AARCH64_P32_TLSDESC_ADD_LO12, 0x07e)
ELF_RELOC(R_AARCH64_P32_TLSDESC_CALL, 0x07f)
ELF_RELOC(R_AARCH64_P32_COPY, 0x0b4)
ELF_RELOC(R_AARCH64_P32_GLOB_DAT, 0x0b5)
ELF_RELOC(R_AARCH64_P32_JUMP_SLOT, 0x0b6)
ELF_RELOC(R_AARCH64_P32_RELATIVE, 0x0b7)
+ELF_RELOC(R_AARCH64_P32_TLS_DTPREL, 0x0b8)
+ELF_RELOC(R_AARCH64_P32_TLS_DTPMOD, 0x0b9)
+ELF_RELOC(R_AARCH64_P32_TLS_TPREL, 0x0ba)
+ELF_RELOC(R_AARCH64_P32_TLSDESC, 0x0bb)
ELF_RELOC(R_AARCH64_P32_IRELATIVE, 0x0bc)
diff --git a/include/llvm/Support/KnownBits.h b/include/llvm/Support/KnownBits.h
index 08d4dedd0ac8..292ea9e4b717 100644
--- a/include/llvm/Support/KnownBits.h
+++ b/include/llvm/Support/KnownBits.h
@@ -19,7 +19,7 @@
namespace llvm {
-// For now this is a simple wrapper around two APInts.
+// Struct for tracking the known zeros and ones of a value.
struct KnownBits {
APInt Zero;
APInt One;
@@ -36,6 +36,24 @@ struct KnownBits {
"Zero and One should have the same width!");
return Zero.getBitWidth();
}
+
+ /// Returns true if this value is known to be negative.
+ bool isNegative() const { return One.isSignBitSet(); }
+
+ /// Returns true if this value is known to be non-negative.
+ bool isNonNegative() const { return Zero.isSignBitSet(); }
+
+ /// Make this value negative.
+ void makeNegative() {
+ assert(!isNonNegative() && "Can't make a non-negative value negative");
+ One.setSignBit();
+ }
+
+ /// Make this value negative.
+ void makeNonNegative() {
+ assert(!isNegative() && "Can't make a negative value non-negative");
+ Zero.setSignBit();
+ }
};
} // end namespace llvm
diff --git a/include/llvm/Support/LEB128.h b/include/llvm/Support/LEB128.h
index ff775f3b7b36..29640db69218 100644
--- a/include/llvm/Support/LEB128.h
+++ b/include/llvm/Support/LEB128.h
@@ -45,8 +45,7 @@ inline void encodeSLEB128(int64_t Value, raw_ostream &OS,
/// Utility function to encode a SLEB128 value to a buffer. Returns
/// the length in bytes of the encoded value.
-inline unsigned encodeSLEB128(int64_t Value, uint8_t *p,
- unsigned Padding = 0) {
+inline unsigned encodeSLEB128(int64_t Value, uint8_t *p, unsigned Padding = 0) {
uint8_t *orig_p = p;
bool More;
do {
@@ -111,7 +110,6 @@ inline unsigned encodeULEB128(uint64_t Value, uint8_t *p,
return (unsigned)(p - orig_p);
}
-
/// Utility function to decode a ULEB128 value.
inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = nullptr,
const uint8_t *end = nullptr,
@@ -119,19 +117,19 @@ inline uint64_t decodeULEB128(const uint8_t *p, unsigned *n = nullptr,
const uint8_t *orig_p = p;
uint64_t Value = 0;
unsigned Shift = 0;
- if(error)
+ if (error)
*error = nullptr;
do {
- if(end && p == end){
- if(error)
+ if (end && p == end) {
+ if (error)
*error = "malformed uleb128, extends past end";
if (n)
*n = (unsigned)(p - orig_p);
return 0;
}
uint64_t Slice = *p & 0x7f;
- if(Shift >= 64 || Slice << Shift >> Shift != Slice){
- if(error)
+ if (Shift >= 64 || Slice << Shift >> Shift != Slice) {
+ if (error)
*error = "uleb128 too big for uint64";
if (n)
*n = (unsigned)(p - orig_p);
@@ -154,15 +152,15 @@ inline int64_t decodeSLEB128(const uint8_t *p, unsigned *n = nullptr,
unsigned Shift = 0;
uint8_t Byte;
do {
- if(end && p == end){
- if(error)
+ if (end && p == end) {
+ if (error)
*error = "malformed sleb128, extends past end";
if (n)
*n = (unsigned)(p - orig_p);
return 0;
}
Byte = *p++;
- Value |= ((Byte & 0x7f) << Shift);
+ Value |= (int64_t(Byte & 0x7f) << Shift);
Shift += 7;
} while (Byte >= 128);
// Sign extend negative numbers.
@@ -173,13 +171,12 @@ inline int64_t decodeSLEB128(const uint8_t *p, unsigned *n = nullptr,
return Value;
}
-
/// Utility function to get the size of the ULEB128-encoded value.
extern unsigned getULEB128Size(uint64_t Value);
/// Utility function to get the size of the SLEB128-encoded value.
extern unsigned getSLEB128Size(int64_t Value);
-} // namespace llvm
+} // namespace llvm
-#endif // LLVM_SYSTEM_LEB128_H
+#endif // LLVM_SYSTEM_LEB128_H
diff --git a/include/llvm/Support/ScopedPrinter.h b/include/llvm/Support/ScopedPrinter.h
index a2f2e0985431..1b6651932212 100644
--- a/include/llvm/Support/ScopedPrinter.h
+++ b/include/llvm/Support/ScopedPrinter.h
@@ -295,6 +295,11 @@ public:
printBinaryImpl(Label, StringRef(), V, false);
}
+ void printBinaryBlock(StringRef Label, ArrayRef<uint8_t> Value,
+ uint32_t StartOffset) {
+ printBinaryImpl(Label, StringRef(), Value, true, StartOffset);
+ }
+
void printBinaryBlock(StringRef Label, ArrayRef<uint8_t> Value) {
printBinaryImpl(Label, StringRef(), Value, true);
}
@@ -333,7 +338,7 @@ private:
}
void printBinaryImpl(StringRef Label, StringRef Str, ArrayRef<uint8_t> Value,
- bool Block);
+ bool Block, uint32_t StartOffset = 0);
raw_ostream &OS;
int IndentLevel;
diff --git a/include/llvm/Support/StringSaver.h b/include/llvm/Support/StringSaver.h
index fcddd4cde5b6..e85b2895ce51 100644
--- a/include/llvm/Support/StringSaver.h
+++ b/include/llvm/Support/StringSaver.h
@@ -26,7 +26,7 @@ public:
StringRef save(const char *S) { return save(StringRef(S)); }
StringRef save(StringRef S);
StringRef save(const Twine &S) { return save(StringRef(S.str())); }
- StringRef save(std::string &S) { return save(StringRef(S)); }
+ StringRef save(const std::string &S) { return save(StringRef(S)); }
};
}
#endif
diff --git a/include/llvm/Support/Wasm.h b/include/llvm/Support/Wasm.h
index 8e6c418c8189..a48dfe10b3bb 100644
--- a/include/llvm/Support/Wasm.h
+++ b/include/llvm/Support/Wasm.h
@@ -24,6 +24,8 @@ namespace wasm {
const char WasmMagic[] = {'\0', 'a', 's', 'm'};
// Wasm binary format version
const uint32_t WasmVersion = 0x1;
+// Wasm uses a 64k page size
+const uint32_t WasmPageSize = 65536;
struct WasmObjectHeader {
StringRef Magic;
@@ -106,7 +108,7 @@ struct WasmRelocation {
uint32_t Type; // The type of the relocation.
int32_t Index; // Index into function to global index space.
uint64_t Offset; // Offset from the start of the section.
- uint64_t Addend; // A value to add to the symbol.
+ int64_t Addend; // A value to add to the symbol.
};
enum : unsigned {
diff --git a/include/llvm/Target/Target.td b/include/llvm/Target/Target.td
index b21689e0e134..d7fbca93f59b 100644
--- a/include/llvm/Target/Target.td
+++ b/include/llvm/Target/Target.td
@@ -530,6 +530,12 @@ class Predicate<string cond> {
/// PredicateName - User-level name to use for the predicate. Mainly for use
/// in diagnostics such as missing feature errors in the asm matcher.
string PredicateName = "";
+
+ /// Setting this to '1' indicates that the predicate must be recomputed on
+ /// every function change. Most predicates can leave this at '0'.
+ ///
+ /// Ignored by SelectionDAG, it always recomputes the predicate on every use.
+ bit RecomputePerFunction = 0;
}
/// NoHonorSignDependentRounding - This predicate is true if support for
diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h
index 51f11e1a9a25..aa9230044b1f 100644
--- a/include/llvm/Target/TargetLowering.h
+++ b/include/llvm/Target/TargetLowering.h
@@ -69,6 +69,7 @@ class CCValAssign;
class FastISel;
class FunctionLoweringInfo;
class IntrinsicInst;
+struct KnownBits;
class MachineBasicBlock;
class MachineFunction;
class MachineInstr;
@@ -774,6 +775,74 @@ public:
return (!isTypeLegal(VT) && getOperationAction(Op, VT) == Custom);
}
+ /// Return true if lowering to a jump table is allowed.
+ bool areJTsAllowed(const Function *Fn) const {
+ if (Fn->getFnAttribute("no-jump-tables").getValueAsString() == "true")
+ return false;
+
+ return isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
+ isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
+ }
+
+ /// Check whether the range [Low,High] fits in a machine word.
+ bool rangeFitsInWord(const APInt &Low, const APInt &High,
+ const DataLayout &DL) const {
+ // FIXME: Using the pointer type doesn't seem ideal.
+ uint64_t BW = DL.getPointerSizeInBits();
+ uint64_t Range = (High - Low).getLimitedValue(UINT64_MAX - 1) + 1;
+ return Range <= BW;
+ }
+
+ /// Return true if lowering to a jump table is suitable for a set of case
+ /// clusters which may contain \p NumCases cases, \p Range range of values.
+ /// FIXME: This function check the maximum table size and density, but the
+ /// minimum size is not checked. It would be nice if the the minimum size is
+ /// also combined within this function. Currently, the minimum size check is
+ /// performed in findJumpTable() in SelectionDAGBuiler and
+ /// getEstimatedNumberOfCaseClusters() in BasicTTIImpl.
+ bool isSuitableForJumpTable(const SwitchInst *SI, uint64_t NumCases,
+ uint64_t Range) const {
+ const bool OptForSize = SI->getParent()->getParent()->optForSize();
+ const unsigned MinDensity = getMinimumJumpTableDensity(OptForSize);
+ const unsigned MaxJumpTableSize =
+ OptForSize || getMaximumJumpTableSize() == 0
+ ? UINT_MAX
+ : getMaximumJumpTableSize();
+ // Check whether a range of clusters is dense enough for a jump table.
+ if (Range <= MaxJumpTableSize &&
+ (NumCases * 100 >= Range * MinDensity)) {
+ return true;
+ }
+ return false;
+ }
+
+ /// Return true if lowering to a bit test is suitable for a set of case
+ /// clusters which contains \p NumDests unique destinations, \p Low and
+ /// \p High as its lowest and highest case values, and expects \p NumCmps
+ /// case value comparisons. Check if the number of destinations, comparison
+ /// metric, and range are all suitable.
+ bool isSuitableForBitTests(unsigned NumDests, unsigned NumCmps,
+ const APInt &Low, const APInt &High,
+ const DataLayout &DL) const {
+ // FIXME: I don't think NumCmps is the correct metric: a single case and a
+ // range of cases both require only one branch to lower. Just looking at the
+ // number of clusters and destinations should be enough to decide whether to
+ // build bit tests.
+
+ // To lower a range with bit tests, the range must fit the bitwidth of a
+ // machine word.
+ if (!rangeFitsInWord(Low, High, DL))
+ return false;
+
+ // Decide whether it's profitable to lower this range with bit tests. Each
+ // destination requires a bit test and branch, and there is an overall range
+ // check branch. For a small number of clusters, separate comparisons might
+ // be cheaper, and for many destinations, splitting the range might be
+ // better.
+ return (NumDests == 1 && NumCmps >= 3) || (NumDests == 2 && NumCmps >= 5) ||
+ (NumDests == 3 && NumCmps >= 6);
+ }
+
/// Return true if the specified operation is illegal on this target or
/// unlikely to be made legal with custom lowering. This is used to help guide
/// high-level lowering decisions.
@@ -1148,6 +1217,9 @@ public:
/// Return lower limit for number of blocks in a jump table.
unsigned getMinimumJumpTableEntries() const;
+ /// Return lower limit of the density in a jump table.
+ unsigned getMinimumJumpTableDensity(bool OptForSize) const;
+
/// Return upper limit for number of entries in a jump table.
/// Zero if no limit.
unsigned getMaximumJumpTableSize() const;
@@ -2025,6 +2097,12 @@ public:
return LibcallCallingConvs[Call];
}
+ /// Execute target specific actions to finalize target lowering.
+ /// This is used to set extra flags in MachineFrameInformation and freezing
+ /// the set of reserved registers.
+ /// The default implementation just freezes the set of reserved registers.
+ virtual void finalizeLowering(MachineFunction &MF) const;
+
private:
const TargetMachine &TM;
@@ -2442,7 +2520,7 @@ public:
/// with TLO.New will be incorrect when this parameter is true and TLO.Old
/// has multiple uses.
bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
- APInt &KnownZero, APInt &KnownOne,
+ KnownBits &Known,
TargetLoweringOpt &TLO,
unsigned Depth = 0,
bool AssumeSingleUse = false) const;
@@ -2456,8 +2534,7 @@ public:
/// argument allows us to only collect the known bits that are shared by the
/// requested vector elements.
virtual void computeKnownBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
+ KnownBits &Known,
const APInt &DemandedElts,
const SelectionDAG &DAG,
unsigned Depth = 0) const;
@@ -2584,12 +2661,6 @@ public:
return false;
}
- /// Return true if the MachineFunction contains a COPY which would imply
- /// HasCopyImplyingStackAdjustment.
- virtual bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const {
- return false;
- }
-
/// Perform necessary initialization to handle a subset of CSRs explicitly
/// via copies. This function is called at the beginning of instruction
/// selection.
diff --git a/include/llvm/Transforms/Scalar/NaryReassociate.h b/include/llvm/Transforms/Scalar/NaryReassociate.h
index a74bb6cc4194..f35707eeb3f0 100644
--- a/include/llvm/Transforms/Scalar/NaryReassociate.h
+++ b/include/llvm/Transforms/Scalar/NaryReassociate.h
@@ -167,7 +167,7 @@ private:
// foo(a + b);
// if (p2)
// bar(a + b);
- DenseMap<const SCEV *, SmallVector<WeakVH, 2>> SeenExprs;
+ DenseMap<const SCEV *, SmallVector<WeakTrackingVH, 2>> SeenExprs;
};
} // namespace llvm
diff --git a/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h b/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h
new file mode 100644
index 000000000000..d7282ac6a781
--- /dev/null
+++ b/include/llvm/Transforms/Scalar/SimpleLoopUnswitch.h
@@ -0,0 +1,53 @@
+//===- SimpleLoopUnswitch.h - Hoist loop-invariant control flow -*- C++ -*-===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
+#define LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
+
+#include "llvm/Analysis/LoopAnalysisManager.h"
+#include "llvm/Analysis/LoopInfo.h"
+#include "llvm/IR/PassManager.h"
+#include "llvm/Transforms/Scalar/LoopPassManager.h"
+
+namespace llvm {
+
+/// This pass transforms loops that contain branches on loop-invariant
+/// conditions to have multiple loops. For example, it turns the left into the
+/// right code:
+///
+/// for (...) if (lic)
+/// A for (...)
+/// if (lic) A; B; C
+/// B else
+/// C for (...)
+/// A; C
+///
+/// This can increase the size of the code exponentially (doubling it every time
+/// a loop is unswitched) so we only unswitch if the resultant code will be
+/// smaller than a threshold.
+///
+/// This pass expects LICM to be run before it to hoist invariant conditions out
+/// of the loop, to make the unswitching opportunity obvious.
+///
+class SimpleLoopUnswitchPass : public PassInfoMixin<SimpleLoopUnswitchPass> {
+public:
+ SimpleLoopUnswitchPass() = default;
+
+ PreservedAnalyses run(Loop &L, LoopAnalysisManager &AM,
+ LoopStandardAnalysisResults &AR, LPMUpdater &U);
+};
+
+/// Create the legacy pass object for the simple loop unswitcher.
+///
+/// See the documentaion for `SimpleLoopUnswitchPass` for details.
+Pass *createSimpleLoopUnswitchLegacyPass();
+
+} // end namespace llvm
+
+#endif // LLVM_TRANSFORMS_SCALAR_SIMPLELOOPUNSWITCH_H
diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h
index 337305a0a82c..0a8903a6ed7b 100644
--- a/include/llvm/Transforms/Utils/Cloning.h
+++ b/include/llvm/Transforms/Utils/Cloning.h
@@ -74,7 +74,7 @@ struct ClonedCodeInfo {
/// All cloned call sites that have operand bundles attached are appended to
/// this vector. This vector may contain nulls or undefs if some of the
/// originally inserted callsites were DCE'ed after they were cloned.
- std::vector<WeakVH> OperandBundleCallSites;
+ std::vector<WeakTrackingVH> OperandBundleCallSites;
ClonedCodeInfo() = default;
};
@@ -192,7 +192,7 @@ public:
/// InlinedCalls - InlineFunction fills this in with callsites that were
/// inlined from the callee. This is only filled in if CG is non-null.
- SmallVector<WeakVH, 8> InlinedCalls;
+ SmallVector<WeakTrackingVH, 8> InlinedCalls;
/// All of the new call sites inlined into the caller.
///
diff --git a/include/llvm/Transforms/Utils/Local.h b/include/llvm/Transforms/Utils/Local.h
index 4933712fb8ad..b5a5f4c2704c 100644
--- a/include/llvm/Transforms/Utils/Local.h
+++ b/include/llvm/Transforms/Utils/Local.h
@@ -286,9 +286,6 @@ DbgDeclareInst *FindAllocaDbgDeclare(Value *V);
/// Finds the llvm.dbg.value intrinsics describing a value.
void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
-/// Constants for \p replaceDbgDeclare and friends.
-enum { NoDeref = false, WithDeref = true };
-
/// Replaces llvm.dbg.declare instruction when the address it describes
/// is replaced with a new value. If Deref is true, an additional DW_OP_deref is
/// prepended to the expression. If Offset is non-zero, a constant displacement
diff --git a/include/llvm/Transforms/Utils/ModuleUtils.h b/include/llvm/Transforms/Utils/ModuleUtils.h
index f5e843e2e8b5..e9793fe4b666 100644
--- a/include/llvm/Transforms/Utils/ModuleUtils.h
+++ b/include/llvm/Transforms/Utils/ModuleUtils.h
@@ -84,6 +84,17 @@ void appendToCompilerUsed(Module &M, ArrayRef<GlobalValue *> Values);
void filterDeadComdatFunctions(
Module &M, SmallVectorImpl<Function *> &DeadComdatFunctions);
+/// \brief Produce a unique identifier for this module by taking the MD5 sum of
+/// the names of the module's strong external symbols.
+///
+/// This identifier is normally guaranteed to be unique, or the program would
+/// fail to link due to multiply defined symbols.
+///
+/// If the module has no strong external symbols (such a module may still have a
+/// semantic effect if it performs global initialization), we cannot produce a
+/// unique identifier for this module, so we return the empty string.
+std::string getUniqueModuleId(Module *M);
+
} // End llvm namespace
#endif // LLVM_TRANSFORMS_UTILS_MODULEUTILS_H
diff --git a/include/llvm/Transforms/Utils/SimplifyIndVar.h b/include/llvm/Transforms/Utils/SimplifyIndVar.h
index 6cdeeeb60a65..8d50aeb10d6e 100644
--- a/include/llvm/Transforms/Utils/SimplifyIndVar.h
+++ b/include/llvm/Transforms/Utils/SimplifyIndVar.h
@@ -46,13 +46,13 @@ public:
/// simplifyUsersOfIV - Simplify instructions that use this induction variable
/// by using ScalarEvolution to analyze the IV's recurrence.
bool simplifyUsersOfIV(PHINode *CurrIV, ScalarEvolution *SE, DominatorTree *DT,
- LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead,
+ LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead,
IVVisitor *V = nullptr);
/// SimplifyLoopIVs - Simplify users of induction variables within this
/// loop. This does not actually change or add IVs.
bool simplifyLoopIVs(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
- LoopInfo *LI, SmallVectorImpl<WeakVH> &Dead);
+ LoopInfo *LI, SmallVectorImpl<WeakTrackingVH> &Dead);
} // end namespace llvm
diff --git a/include/llvm/Transforms/Utils/ValueMapper.h b/include/llvm/Transforms/Utils/ValueMapper.h
index 950ad92afcd7..e44dc437342d 100644
--- a/include/llvm/Transforms/Utils/ValueMapper.h
+++ b/include/llvm/Transforms/Utils/ValueMapper.h
@@ -23,7 +23,7 @@ namespace llvm {
class Value;
class Instruction;
-typedef ValueMap<const Value *, WeakVH> ValueToValueMapTy;
+typedef ValueMap<const Value *, WeakTrackingVH> ValueToValueMapTy;
/// This is a class that can be implemented by clients to remap types when
/// cloning constants and instructions.
diff --git a/include/llvm/Transforms/Vectorize/SLPVectorizer.h b/include/llvm/Transforms/Vectorize/SLPVectorizer.h
index d669a8e5b615..10338f7937e8 100644
--- a/include/llvm/Transforms/Vectorize/SLPVectorizer.h
+++ b/include/llvm/Transforms/Vectorize/SLPVectorizer.h
@@ -40,8 +40,8 @@ class BoUpSLP;
struct SLPVectorizerPass : public PassInfoMixin<SLPVectorizerPass> {
typedef SmallVector<StoreInst *, 8> StoreList;
typedef MapVector<Value *, StoreList> StoreListMap;
- typedef SmallVector<WeakVH, 8> WeakVHList;
- typedef MapVector<Value *, WeakVHList> WeakVHListMap;
+ typedef SmallVector<WeakTrackingVH, 8> WeakTrackingVHList;
+ typedef MapVector<Value *, WeakTrackingVHList> WeakTrackingVHListMap;
ScalarEvolution *SE = nullptr;
TargetTransformInfo *TTI = nullptr;
@@ -111,7 +111,7 @@ private:
StoreListMap Stores;
/// The getelementptr instructions in a basic block organized by base pointer.
- WeakVHListMap GEPs;
+ WeakTrackingVHListMap GEPs;
};
}
diff --git a/lib/Analysis/AssumptionCache.cpp b/lib/Analysis/AssumptionCache.cpp
index 1fae94724487..0468c794e81d 100644
--- a/lib/Analysis/AssumptionCache.cpp
+++ b/lib/Analysis/AssumptionCache.cpp
@@ -29,15 +29,16 @@ static cl::opt<bool>
cl::desc("Enable verification of assumption cache"),
cl::init(false));
-SmallVector<WeakVH, 1> &AssumptionCache::getOrInsertAffectedValues(Value *V) {
+SmallVector<WeakTrackingVH, 1> &
+AssumptionCache::getOrInsertAffectedValues(Value *V) {
// Try using find_as first to avoid creating extra value handles just for the
// purpose of doing the lookup.
auto AVI = AffectedValues.find_as(V);
if (AVI != AffectedValues.end())
return AVI->second;
- auto AVIP = AffectedValues.insert({
- AffectedValueCallbackVH(V, this), SmallVector<WeakVH, 1>()});
+ auto AVIP = AffectedValues.insert(
+ {AffectedValueCallbackVH(V, this), SmallVector<WeakTrackingVH, 1>()});
return AVIP.first->second;
}
diff --git a/lib/Analysis/CFLGraph.h b/lib/Analysis/CFLGraph.h
index 75726e84569b..06410bf01dd6 100644
--- a/lib/Analysis/CFLGraph.h
+++ b/lib/Analysis/CFLGraph.h
@@ -429,7 +429,7 @@ template <typename CFLAA> class CFLGraphBuilder {
if (Inst->getType()->isPointerTy()) {
auto *Fn = CS.getCalledFunction();
- if (Fn == nullptr || !Fn->doesNotAlias(0))
+ if (Fn == nullptr || !Fn->doesNotAlias(AttributeList::ReturnIndex))
// No need to call addNode() since we've added Inst at the
// beginning of this function and we know it is not a global.
Graph.addAttr(InstantiatedValue{Inst, 0}, getAttrUnknown());
diff --git a/lib/Analysis/CallGraphSCCPass.cpp b/lib/Analysis/CallGraphSCCPass.cpp
index ea70f5752c61..8058e5b1935c 100644
--- a/lib/Analysis/CallGraphSCCPass.cpp
+++ b/lib/Analysis/CallGraphSCCPass.cpp
@@ -204,7 +204,7 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
// Get the set of call sites currently in the function.
for (CallGraphNode::iterator I = CGN->begin(), E = CGN->end(); I != E; ) {
// If this call site is null, then the function pass deleted the call
- // entirely and the WeakVH nulled it out.
+ // entirely and the WeakTrackingVH nulled it out.
if (!I->first ||
// If we've already seen this call site, then the FunctionPass RAUW'd
// one call with another, which resulted in two "uses" in the edge
@@ -347,7 +347,8 @@ bool CGPassManager::RefreshCallGraph(const CallGraphSCC &CurSCC, CallGraph &CG,
DevirtualizedCall = true;
// After scanning this function, if we still have entries in callsites, then
- // they are dangling pointers. WeakVH should save us for this, so abort if
+ // they are dangling pointers. WeakTrackingVH should save us for this, so
+ // abort if
// this happens.
assert(CallSites.empty() && "Dangling pointers found in call sites map");
diff --git a/lib/Analysis/DemandedBits.cpp b/lib/Analysis/DemandedBits.cpp
index 285339deaaf5..9f5dc5318239 100644
--- a/lib/Analysis/DemandedBits.cpp
+++ b/lib/Analysis/DemandedBits.cpp
@@ -181,7 +181,7 @@ void DemandedBits::determineLiveOperandBits(
// bits, then we must keep the highest input bit.
if ((AOut & APInt::getHighBitsSet(BitWidth, ShiftAmt))
.getBoolValue())
- AB.setBit(BitWidth-1);
+ AB.setSignBit();
// If the shift is exact, then the low bits are not dead
// (they must be zero).
@@ -239,7 +239,7 @@ void DemandedBits::determineLiveOperandBits(
if ((AOut & APInt::getHighBitsSet(AOut.getBitWidth(),
AOut.getBitWidth() - BitWidth))
.getBoolValue())
- AB.setBit(BitWidth-1);
+ AB.setSignBit();
break;
case Instruction::Select:
if (OperandNo != 0)
diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp
index 788f908bafca..100a591e452c 100644
--- a/lib/Analysis/InlineCost.cpp
+++ b/lib/Analysis/InlineCost.cpp
@@ -54,6 +54,11 @@ static cl::opt<int>
cl::init(45),
cl::desc("Threshold for inlining cold callsites"));
+static cl::opt<bool>
+ EnableGenericSwitchCost("inline-generic-switch-cost", cl::Hidden,
+ cl::init(false),
+ cl::desc("Enable generic switch cost model"));
+
// We introduce this threshold to help performance of instrumentation based
// PGO before we actually hook up inliner with analysis passes such as BPI and
// BFI.
@@ -998,11 +1003,72 @@ bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
if (isa<ConstantInt>(V))
return true;
- // Otherwise, we need to accumulate a cost proportional to the number of
- // distinct successor blocks. This fan-out in the CFG cannot be represented
- // for free even if we can represent the core switch as a jumptable that
- // takes a single instruction.
- //
+ if (EnableGenericSwitchCost) {
+ // Assume the most general case where the swith is lowered into
+ // either a jump table, bit test, or a balanced binary tree consisting of
+ // case clusters without merging adjacent clusters with the same
+ // destination. We do not consider the switches that are lowered with a mix
+ // of jump table/bit test/binary search tree. The cost of the switch is
+ // proportional to the size of the tree or the size of jump table range.
+
+ // Exit early for a large switch, assuming one case needs at least one
+ // instruction.
+ // FIXME: This is not true for a bit test, but ignore such case for now to
+ // save compile-time.
+ int64_t CostLowerBound =
+ std::min((int64_t)INT_MAX,
+ (int64_t)SI.getNumCases() * InlineConstants::InstrCost + Cost);
+
+ if (CostLowerBound > Threshold) {
+ Cost = CostLowerBound;
+ return false;
+ }
+
+ unsigned JumpTableSize = 0;
+ unsigned NumCaseCluster =
+ TTI.getEstimatedNumberOfCaseClusters(SI, JumpTableSize);
+
+ // If suitable for a jump table, consider the cost for the table size and
+ // branch to destination.
+ if (JumpTableSize) {
+ int64_t JTCost = (int64_t)JumpTableSize * InlineConstants::InstrCost +
+ 4 * InlineConstants::InstrCost;
+ Cost = std::min((int64_t)INT_MAX, JTCost + Cost);
+ return false;
+ }
+
+ // Considering forming a binary search, we should find the number of nodes
+ // which is same as the number of comparisons when lowered. For a given
+ // number of clusters, n, we can define a recursive function, f(n), to find
+ // the number of nodes in the tree. The recursion is :
+ // f(n) = 1 + f(n/2) + f (n - n/2), when n > 3,
+ // and f(n) = n, when n <= 3.
+ // This will lead a binary tree where the leaf should be either f(2) or f(3)
+ // when n > 3. So, the number of comparisons from leaves should be n, while
+ // the number of non-leaf should be :
+ // 2^(log2(n) - 1) - 1
+ // = 2^log2(n) * 2^-1 - 1
+ // = n / 2 - 1.
+ // Considering comparisons from leaf and non-leaf nodes, we can estimate the
+ // number of comparisons in a simple closed form :
+ // n + n / 2 - 1 = n * 3 / 2 - 1
+ if (NumCaseCluster <= 3) {
+ // Suppose a comparison includes one compare and one conditional branch.
+ Cost += NumCaseCluster * 2 * InlineConstants::InstrCost;
+ return false;
+ }
+ int64_t ExpectedNumberOfCompare = 3 * (uint64_t)NumCaseCluster / 2 - 1;
+ uint64_t SwitchCost =
+ ExpectedNumberOfCompare * 2 * InlineConstants::InstrCost;
+ Cost = std::min((uint64_t)INT_MAX, SwitchCost + Cost);
+ return false;
+ }
+
+ // Use a simple switch cost model where we accumulate a cost proportional to
+ // the number of distinct successor blocks. This fan-out in the CFG cannot
+ // be represented for free even if we can represent the core switch as a
+ // jumptable that takes a single instruction.
+ ///
// NB: We convert large switches which are just used to initialize large phi
// nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
// inlining those. It will prevent inlining in cases where the optimization
@@ -1217,36 +1283,10 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// the rest of the function body.
Threshold += (SingleBBBonus + FiftyPercentVectorBonus);
- // Give out bonuses per argument, as the instructions setting them up will
- // be gone after inlining.
- for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
- if (CS.isByValArgument(I)) {
- // We approximate the number of loads and stores needed by dividing the
- // size of the byval type by the target's pointer size.
- PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
- unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
- unsigned PointerSize = DL.getPointerSizeInBits();
- // Ceiling division.
- unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
-
- // If it generates more than 8 stores it is likely to be expanded as an
- // inline memcpy so we take that as an upper bound. Otherwise we assume
- // one load and one store per word copied.
- // FIXME: The maxStoresPerMemcpy setting from the target should be used
- // here instead of a magic number of 8, but it's not available via
- // DataLayout.
- NumStores = std::min(NumStores, 8U);
+ // Give out bonuses for the callsite, as the instructions setting them up
+ // will be gone after inlining.
+ Cost -= getCallsiteCost(CS, DL);
- Cost -= 2 * NumStores * InlineConstants::InstrCost;
- } else {
- // For non-byval arguments subtract off one instruction per call
- // argument.
- Cost -= InlineConstants::InstrCost;
- }
- }
- // The call instruction also disappears after inlining.
- Cost -= InlineConstants::InstrCost + InlineConstants::CallPenalty;
-
// If there is only one call of the function, and it has internal linkage,
// the cost of inlining it drops dramatically.
bool OnlyOneCallAndLocalLinkage =
@@ -1431,6 +1471,38 @@ static bool functionsHaveCompatibleAttributes(Function *Caller,
AttributeFuncs::areInlineCompatible(*Caller, *Callee);
}
+int llvm::getCallsiteCost(CallSite CS, const DataLayout &DL) {
+ int Cost = 0;
+ for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
+ if (CS.isByValArgument(I)) {
+ // We approximate the number of loads and stores needed by dividing the
+ // size of the byval type by the target's pointer size.
+ PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
+ unsigned TypeSize = DL.getTypeSizeInBits(PTy->getElementType());
+ unsigned PointerSize = DL.getPointerSizeInBits();
+ // Ceiling division.
+ unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
+
+ // If it generates more than 8 stores it is likely to be expanded as an
+ // inline memcpy so we take that as an upper bound. Otherwise we assume
+ // one load and one store per word copied.
+ // FIXME: The maxStoresPerMemcpy setting from the target should be used
+ // here instead of a magic number of 8, but it's not available via
+ // DataLayout.
+ NumStores = std::min(NumStores, 8U);
+
+ Cost += 2 * NumStores * InlineConstants::InstrCost;
+ } else {
+ // For non-byval arguments subtract off one instruction per call
+ // argument.
+ Cost += InlineConstants::InstrCost;
+ }
+ }
+ // The call instruction also disappears after inlining.
+ Cost += InlineConstants::InstrCost + InlineConstants::CallPenalty;
+ return Cost;
+}
+
InlineCost llvm::getInlineCost(
CallSite CS, const InlineParams &Params, TargetTransformInfo &CalleeTTI,
std::function<AssumptionCache &(Function &)> &GetAssumptionCache,
diff --git a/lib/Analysis/InstructionSimplify.cpp b/lib/Analysis/InstructionSimplify.cpp
index e720e3ebecdb..2f25a1183668 100644
--- a/lib/Analysis/InstructionSimplify.cpp
+++ b/lib/Analysis/InstructionSimplify.cpp
@@ -21,8 +21,10 @@
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/AssumptionCache.h"
#include "llvm/Analysis/CaptureTracking.h"
#include "llvm/Analysis/ConstantFolding.h"
+#include "llvm/Analysis/LoopAnalysisManager.h"
#include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/OptimizationDiagnosticInfo.h"
#include "llvm/Analysis/ValueTracking.h"
@@ -584,14 +586,6 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Query) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query, RecursionLimit);
}
@@ -800,14 +794,6 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Q) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
}
@@ -954,27 +940,10 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
}
Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFAddInst(Op0, Op1, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const SimplifyQuery &Q) {
return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
}
-Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFSubInst(Op0, Op1, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const SimplifyQuery &Q) {
@@ -982,26 +951,10 @@ Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFMulInst(Op0, Op1, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const SimplifyQuery &Q) {
return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
}
-Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyMulInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
}
@@ -1124,13 +1077,6 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifySDivInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
}
@@ -1155,13 +1101,6 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyUDivInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
}
@@ -1208,15 +1147,6 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFDivInst(Op0, Op1, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const SimplifyQuery &Q) {
return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
}
@@ -1263,13 +1193,6 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifySRemInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
}
@@ -1294,13 +1217,6 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyURemInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
}
@@ -1328,15 +1244,6 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
}
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFRemInst(Op0, Op1, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
const SimplifyQuery &Q) {
return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
}
@@ -1465,14 +1372,6 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
}
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
- const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const SimplifyQuery &Q) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
}
@@ -1494,15 +1393,6 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyLShrInst(Op0, Op1, isExact, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q) {
return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
}
@@ -1533,15 +1423,6 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
}
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyAShrInst(Op0, Op1, isExact, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const SimplifyQuery &Q) {
return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
}
@@ -1793,13 +1674,6 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyAndInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
}
@@ -2023,13 +1897,6 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyOrInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
}
@@ -2075,13 +1942,6 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
return nullptr;
}
-Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyXorInst(Op0, Op1, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
}
@@ -3449,15 +3309,6 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyICmpInst(Predicate, LHS, RHS, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q) {
return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
}
@@ -3587,15 +3438,6 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- FastMathFlags FMF, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
}
@@ -3845,9 +3687,9 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
return TrueVal;
if (isa<UndefValue>(CondVal)) { // select undef, X, Y -> X or Y
- if (isa<Constant>(TrueVal))
- return TrueVal;
- return FalseVal;
+ if (isa<Constant>(FalseVal))
+ return FalseVal;
+ return TrueVal;
}
if (isa<UndefValue>(TrueVal)) // select C, undef, X -> X
return FalseVal;
@@ -3862,15 +3704,6 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
}
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifySelectInst(Cond, TrueVal, FalseVal, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const SimplifyQuery &Q) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
}
@@ -3988,14 +3821,6 @@ static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
}
Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyGEPInst(SrcTy, Ops, {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
-Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
const SimplifyQuery &Q) {
return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
}
@@ -4029,14 +3854,6 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
return nullptr;
}
-Value *llvm::SimplifyInsertValueInst(
- Value *Agg, Value *Val, ArrayRef<unsigned> Idxs, const DataLayout &DL,
- const TargetLibraryInfo *TLI, const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyInsertValueInst(Agg, Val, Idxs, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q) {
@@ -4069,16 +3886,6 @@ static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
}
Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT,
- AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyExtractValueInst(Agg, Idxs, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
const SimplifyQuery &Q) {
return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
}
@@ -4108,13 +3915,6 @@ static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx, const SimplifyQ
return nullptr;
}
-Value *llvm::SimplifyExtractElementInst(
- Value *Vec, Value *Idx, const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC, const Instruction *CxtI) {
- return ::SimplifyExtractElementInst(Vec, Idx, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
const SimplifyQuery &Q) {
return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
@@ -4188,15 +3988,6 @@ static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
}
Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
- const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyCastInst(CastOpc, Op, Ty, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
const SimplifyQuery &Q) {
return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
}
@@ -4258,53 +4049,68 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
Type *RetTy, const SimplifyQuery &Q,
unsigned MaxRecurse) {
+ if (isa<UndefValue>(Mask))
+ return UndefValue::get(RetTy);
+
Type *InVecTy = Op0->getType();
unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
unsigned InVecNumElts = InVecTy->getVectorNumElements();
- auto *Op0Const = dyn_cast<Constant>(Op0);
- auto *Op1Const = dyn_cast<Constant>(Op1);
-
- // If all operands are constant, constant fold the shuffle.
- if (Op0Const && Op1Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
+ SmallVector<int, 32> Indices;
+ ShuffleVectorInst::getShuffleMask(Mask, Indices);
+ assert(MaskNumElts == Indices.size() &&
+ "Size of Indices not same as number of mask elements?");
- // If only one of the operands is constant, constant fold the shuffle if the
- // mask does not select elements from the variable operand.
+ // Canonicalization: If mask does not select elements from an input vector,
+ // replace that input vector with undef.
bool MaskSelects0 = false, MaskSelects1 = false;
for (unsigned i = 0; i != MaskNumElts; ++i) {
- int Idx = ShuffleVectorInst::getMaskValue(Mask, i);
- if (Idx == -1)
+ if (Indices[i] == -1)
continue;
- if ((unsigned)Idx < InVecNumElts)
+ if ((unsigned)Indices[i] < InVecNumElts)
MaskSelects0 = true;
else
MaskSelects1 = true;
}
- if (!MaskSelects0 && Op1Const)
- return ConstantFoldShuffleVectorInstruction(UndefValue::get(InVecTy),
- Op1Const, Mask);
- if (!MaskSelects1 && Op0Const)
- return ConstantFoldShuffleVectorInstruction(Op0Const,
- UndefValue::get(InVecTy), Mask);
+ if (!MaskSelects0)
+ Op0 = UndefValue::get(InVecTy);
+ if (!MaskSelects1)
+ Op1 = UndefValue::get(InVecTy);
+
+ auto *Op0Const = dyn_cast<Constant>(Op0);
+ auto *Op1Const = dyn_cast<Constant>(Op1);
+
+ // If all operands are constant, constant fold the shuffle.
+ if (Op0Const && Op1Const)
+ return ConstantFoldShuffleVectorInstruction(Op0Const, Op1Const, Mask);
+
+ // Canonicalization: if only one input vector is constant, it shall be the
+ // second one.
+ if (Op0Const && !Op1Const) {
+ std::swap(Op0, Op1);
+ for (auto &Idx : Indices) {
+ if (Idx == -1)
+ continue;
+ Idx = Idx < (int)MaskNumElts ? Idx + MaskNumElts : Idx - MaskNumElts;
+ }
+ Mask = ConstantDataVector::get(
+ Mask->getContext(),
+ makeArrayRef(reinterpret_cast<uint32_t *>(Indices.data()),
+ MaskNumElts));
+ }
// A shuffle of a splat is always the splat itself. Legal if the shuffle's
// value type is same as the input vectors' type.
if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
- if (!MaskSelects1 && RetTy == InVecTy &&
+ if (isa<UndefValue>(Op1) && RetTy == InVecTy &&
OpShuf->getMask()->getSplatValue())
return Op0;
- if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op1))
- if (!MaskSelects0 && RetTy == InVecTy &&
- OpShuf->getMask()->getSplatValue())
- return Op1;
// Don't fold a shuffle with undef mask elements. This may get folded in a
// better way using demanded bits or other analysis.
// TODO: Should we allow this?
- for (unsigned i = 0; i != MaskNumElts; ++i)
- if (ShuffleVectorInst::getMaskValue(Mask, i) == -1)
- return nullptr;
+ if (find(Indices, -1) != Indices.end())
+ return nullptr;
// Check if every element of this shuffle can be mapped back to the
// corresponding element of a single root vector. If so, we don't need this
@@ -4324,14 +4130,6 @@ static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
}
/// Given operands for a ShuffleVectorInst, fold the result or return null.
-Value *llvm::SimplifyShuffleVectorInst(
- Value *Op0, Value *Op1, Constant *Mask, Type *RetTy,
- const DataLayout &DL, const TargetLibraryInfo *TLI, const DominatorTree *DT,
- AssumptionCache *AC, const Instruction *CxtI) {
- return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy,
- {DL, TLI, DT, AC, CxtI}, RecursionLimit);
-}
-
Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1, Constant *Mask,
Type *RetTy, const SimplifyQuery &Q) {
return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
@@ -4407,28 +4205,11 @@ static Value *SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyBinOp(Opcode, LHS, RHS, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const SimplifyQuery &Q) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
}
Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
- FastMathFlags FMF, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyFPBinOp(unsigned Opcode, Value *LHS, Value *RHS,
FastMathFlags FMF, const SimplifyQuery &Q) {
return ::SimplifyFPBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
}
@@ -4442,14 +4223,6 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
}
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
- const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyCmpInst(Predicate, LHS, RHS, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const SimplifyQuery &Q) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
}
@@ -4673,42 +4446,21 @@ static Value *SimplifyCall(Value *V, IterTy ArgBegin, IterTy ArgEnd,
}
Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
- User::op_iterator ArgEnd, const DataLayout &DL,
- const TargetLibraryInfo *TLI, const DominatorTree *DT,
- AssumptionCache *AC, const Instruction *CxtI) {
- return ::SimplifyCall(V, ArgBegin, ArgEnd, {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const SimplifyQuery &Q) {
return ::SimplifyCall(V, ArgBegin, ArgEnd, Q, RecursionLimit);
}
Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
- const DataLayout &DL, const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- const Instruction *CxtI) {
- return ::SimplifyCall(V, Args.begin(), Args.end(), {DL, TLI, DT, AC, CxtI},
- RecursionLimit);
-}
-
-Value *llvm::SimplifyCall(Value *V, ArrayRef<Value *> Args,
const SimplifyQuery &Q) {
return ::SimplifyCall(V, Args.begin(), Args.end(), Q, RecursionLimit);
}
/// See if we can compute a simplified version of this instruction.
/// If not, this returns null.
-Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout &DL,
- const TargetLibraryInfo *TLI,
- const DominatorTree *DT, AssumptionCache *AC,
- OptimizationRemarkEmitter *ORE) {
- return SimplifyInstruction(I, {DL, TLI, DT, AC, I}, ORE);
-}
-Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &Q,
+Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
OptimizationRemarkEmitter *ORE) {
+ const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
Value *Result;
switch (I->getOpcode()) {
@@ -4905,7 +4657,7 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
I = Worklist[Idx];
// See if this instruction simplifies.
- SimpleV = SimplifyInstruction(I, DL, TLI, DT, AC);
+ SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
if (!SimpleV)
continue;
@@ -4944,3 +4696,31 @@ bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
assert(SimpleV && "Must provide a simplified value.");
return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC);
}
+
+namespace llvm {
+const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
+ auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
+ auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
+ auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
+ auto *TLI = TLIWP ? &TLIWP->getTLI() : nullptr;
+ auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
+ auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
+ return {F.getParent()->getDataLayout(), TLI, DT, AC};
+}
+
+const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
+ const DataLayout &DL) {
+ return {DL, &AR.TLI, &AR.DT, &AR.AC};
+}
+
+template <class T, class... TArgs>
+const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
+ Function &F) {
+ auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
+ auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
+ auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
+ return {F.getParent()->getDataLayout(), TLI, DT, AC};
+}
+template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
+ Function &);
+}
diff --git a/lib/Analysis/LazyValueInfo.cpp b/lib/Analysis/LazyValueInfo.cpp
index ad01f7f2f215..a98383eaf4aa 100644
--- a/lib/Analysis/LazyValueInfo.cpp
+++ b/lib/Analysis/LazyValueInfo.cpp
@@ -920,7 +920,7 @@ bool LazyValueInfoImpl::solveBlockValueNonLocal(LVILatticeVal &BBLV,
// value is overdefined.
if (BB == &BB->getParent()->getEntryBlock()) {
assert(isa<Argument>(Val) && "Unknown live-in to the entry block");
- // Bofore giving up, see if we can prove the pointer non-null local to
+ // Before giving up, see if we can prove the pointer non-null local to
// this particular block.
if (Val->getType()->isPointerTy() &&
(isKnownNonNull(Val) || isObjectDereferencedInBlock(Val, BB))) {
diff --git a/lib/Analysis/Lint.cpp b/lib/Analysis/Lint.cpp
index 0f04af54cdc7..598138246445 100644
--- a/lib/Analysis/Lint.cpp
+++ b/lib/Analysis/Lint.cpp
@@ -699,7 +699,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// As a last resort, try SimplifyInstruction or constant folding.
if (Instruction *Inst = dyn_cast<Instruction>(V)) {
- if (Value *W = SimplifyInstruction(Inst, *DL, TLI, DT, AC))
+ if (Value *W = SimplifyInstruction(Inst, {*DL, TLI, DT, AC}))
return findValueImpl(W, OffsetOk, Visited);
} else if (auto *C = dyn_cast<Constant>(V)) {
if (Value *W = ConstantFoldConstant(C, *DL, TLI))
diff --git a/lib/Analysis/PHITransAddr.cpp b/lib/Analysis/PHITransAddr.cpp
index 84ecd4ab9809..682af4dc708e 100644
--- a/lib/Analysis/PHITransAddr.cpp
+++ b/lib/Analysis/PHITransAddr.cpp
@@ -227,7 +227,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
// Simplify the GEP to handle 'gep x, 0' -> x etc.
if (Value *V = SimplifyGEPInst(GEP->getSourceElementType(),
- GEPOps, DL, TLI, DT, AC)) {
+ GEPOps, {DL, TLI, DT, AC})) {
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
RemoveInstInputs(GEPOps[i], InstInputs);
@@ -276,7 +276,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
}
// See if the add simplifies away.
- if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT, AC)) {
+ if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, {DL, TLI, DT, AC})) {
// If we simplified the operands, the LHS is no longer an input, but Res
// is.
RemoveInstInputs(LHS, InstInputs);
diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp
index 3ac4bf1276eb..bd747f7c0b7a 100644
--- a/lib/Analysis/ScalarEvolution.cpp
+++ b/lib/Analysis/ScalarEvolution.cpp
@@ -4108,127 +4108,128 @@ const SCEV *ScalarEvolution::createAddRecFromPHI(PHINode *PN) {
break;
}
}
- if (BEValueV && StartValueV) {
- // While we are analyzing this PHI node, handle its value symbolically.
- const SCEV *SymbolicName = getUnknown(PN);
- assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
- "PHI node already processed?");
- ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
-
- // Using this symbolic name for the PHI, analyze the value coming around
- // the back-edge.
- const SCEV *BEValue = getSCEV(BEValueV);
-
- // NOTE: If BEValue is loop invariant, we know that the PHI node just
- // has a special value for the first iteration of the loop.
-
- // If the value coming around the backedge is an add with the symbolic
- // value we just inserted, then we found a simple induction variable!
- if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
- // If there is a single occurrence of the symbolic value, replace it
- // with a recurrence.
- unsigned FoundIndex = Add->getNumOperands();
- for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
- if (Add->getOperand(i) == SymbolicName)
- if (FoundIndex == e) {
- FoundIndex = i;
- break;
- }
+ if (!BEValueV || !StartValueV)
+ return nullptr;
- if (FoundIndex != Add->getNumOperands()) {
- // Create an add with everything but the specified operand.
- SmallVector<const SCEV *, 8> Ops;
- for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
- if (i != FoundIndex)
- Ops.push_back(Add->getOperand(i));
- const SCEV *Accum = getAddExpr(Ops);
-
- // This is not a valid addrec if the step amount is varying each
- // loop iteration, but is not itself an addrec in this loop.
- if (isLoopInvariant(Accum, L) ||
- (isa<SCEVAddRecExpr>(Accum) &&
- cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
- SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
-
- if (auto BO = MatchBinaryOp(BEValueV, DT)) {
- if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
- if (BO->IsNUW)
- Flags = setFlags(Flags, SCEV::FlagNUW);
- if (BO->IsNSW)
- Flags = setFlags(Flags, SCEV::FlagNSW);
- }
- } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
- // If the increment is an inbounds GEP, then we know the address
- // space cannot be wrapped around. We cannot make any guarantee
- // about signed or unsigned overflow because pointers are
- // unsigned but we may have a negative index from the base
- // pointer. We can guarantee that no unsigned wrap occurs if the
- // indices form a positive value.
- if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
- Flags = setFlags(Flags, SCEV::FlagNW);
-
- const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
- if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
- Flags = setFlags(Flags, SCEV::FlagNUW);
- }
+ // While we are analyzing this PHI node, handle its value symbolically.
+ const SCEV *SymbolicName = getUnknown(PN);
+ assert(ValueExprMap.find_as(PN) == ValueExprMap.end() &&
+ "PHI node already processed?");
+ ValueExprMap.insert({SCEVCallbackVH(PN, this), SymbolicName});
+
+ // Using this symbolic name for the PHI, analyze the value coming around
+ // the back-edge.
+ const SCEV *BEValue = getSCEV(BEValueV);
+
+ // NOTE: If BEValue is loop invariant, we know that the PHI node just
+ // has a special value for the first iteration of the loop.
+
+ // If the value coming around the backedge is an add with the symbolic
+ // value we just inserted, then we found a simple induction variable!
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
+ // If there is a single occurrence of the symbolic value, replace it
+ // with a recurrence.
+ unsigned FoundIndex = Add->getNumOperands();
+ for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
+ if (Add->getOperand(i) == SymbolicName)
+ if (FoundIndex == e) {
+ FoundIndex = i;
+ break;
+ }
- // We cannot transfer nuw and nsw flags from subtraction
- // operations -- sub nuw X, Y is not the same as add nuw X, -Y
- // for instance.
+ if (FoundIndex != Add->getNumOperands()) {
+ // Create an add with everything but the specified operand.
+ SmallVector<const SCEV *, 8> Ops;
+ for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
+ if (i != FoundIndex)
+ Ops.push_back(Add->getOperand(i));
+ const SCEV *Accum = getAddExpr(Ops);
+
+ // This is not a valid addrec if the step amount is varying each
+ // loop iteration, but is not itself an addrec in this loop.
+ if (isLoopInvariant(Accum, L) ||
+ (isa<SCEVAddRecExpr>(Accum) &&
+ cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
+
+ if (auto BO = MatchBinaryOp(BEValueV, DT)) {
+ if (BO->Opcode == Instruction::Add && BO->LHS == PN) {
+ if (BO->IsNUW)
+ Flags = setFlags(Flags, SCEV::FlagNUW);
+ if (BO->IsNSW)
+ Flags = setFlags(Flags, SCEV::FlagNSW);
+ }
+ } else if (GEPOperator *GEP = dyn_cast<GEPOperator>(BEValueV)) {
+ // If the increment is an inbounds GEP, then we know the address
+ // space cannot be wrapped around. We cannot make any guarantee
+ // about signed or unsigned overflow because pointers are
+ // unsigned but we may have a negative index from the base
+ // pointer. We can guarantee that no unsigned wrap occurs if the
+ // indices form a positive value.
+ if (GEP->isInBounds() && GEP->getOperand(0) == PN) {
+ Flags = setFlags(Flags, SCEV::FlagNW);
+
+ const SCEV *Ptr = getSCEV(GEP->getPointerOperand());
+ if (isKnownPositive(getMinusSCEV(getSCEV(GEP), Ptr)))
+ Flags = setFlags(Flags, SCEV::FlagNUW);
}
- const SCEV *StartVal = getSCEV(StartValueV);
- const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
+ // We cannot transfer nuw and nsw flags from subtraction
+ // operations -- sub nuw X, Y is not the same as add nuw X, -Y
+ // for instance.
+ }
- // Okay, for the entire analysis of this edge we assumed the PHI
- // to be symbolic. We now need to go back and purge all of the
- // entries for the scalars that use the symbolic expression.
- forgetSymbolicName(PN, SymbolicName);
- ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
+ const SCEV *StartVal = getSCEV(StartValueV);
+ const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
- // We can add Flags to the post-inc expression only if we
- // know that it us *undefined behavior* for BEValueV to
- // overflow.
- if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
- if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
- (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
+ // Okay, for the entire analysis of this edge we assumed the PHI
+ // to be symbolic. We now need to go back and purge all of the
+ // entries for the scalars that use the symbolic expression.
+ forgetSymbolicName(PN, SymbolicName);
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
- return PHISCEV;
- }
+ // We can add Flags to the post-inc expression only if we
+ // know that it us *undefined behavior* for BEValueV to
+ // overflow.
+ if (auto *BEInst = dyn_cast<Instruction>(BEValueV))
+ if (isLoopInvariant(Accum, L) && isAddRecNeverPoison(BEInst, L))
+ (void)getAddRecExpr(getAddExpr(StartVal, Accum), Accum, L, Flags);
+
+ return PHISCEV;
}
- } else {
- // Otherwise, this could be a loop like this:
- // i = 0; for (j = 1; ..; ++j) { .... i = j; }
- // In this case, j = {1,+,1} and BEValue is j.
- // Because the other in-value of i (0) fits the evolution of BEValue
- // i really is an addrec evolution.
- //
- // We can generalize this saying that i is the shifted value of BEValue
- // by one iteration:
- // PHI(f(0), f({1,+,1})) --> f({0,+,1})
- const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
- const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this);
- if (Shifted != getCouldNotCompute() &&
- Start != getCouldNotCompute()) {
- const SCEV *StartVal = getSCEV(StartValueV);
- if (Start == StartVal) {
- // Okay, for the entire analysis of this edge we assumed the PHI
- // to be symbolic. We now need to go back and purge all of the
- // entries for the scalars that use the symbolic expression.
- forgetSymbolicName(PN, SymbolicName);
- ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
- return Shifted;
- }
+ }
+ } else {
+ // Otherwise, this could be a loop like this:
+ // i = 0; for (j = 1; ..; ++j) { .... i = j; }
+ // In this case, j = {1,+,1} and BEValue is j.
+ // Because the other in-value of i (0) fits the evolution of BEValue
+ // i really is an addrec evolution.
+ //
+ // We can generalize this saying that i is the shifted value of BEValue
+ // by one iteration:
+ // PHI(f(0), f({1,+,1})) --> f({0,+,1})
+ const SCEV *Shifted = SCEVShiftRewriter::rewrite(BEValue, L, *this);
+ const SCEV *Start = SCEVInitRewriter::rewrite(Shifted, L, *this);
+ if (Shifted != getCouldNotCompute() &&
+ Start != getCouldNotCompute()) {
+ const SCEV *StartVal = getSCEV(StartValueV);
+ if (Start == StartVal) {
+ // Okay, for the entire analysis of this edge we assumed the PHI
+ // to be symbolic. We now need to go back and purge all of the
+ // entries for the scalars that use the symbolic expression.
+ forgetSymbolicName(PN, SymbolicName);
+ ValueExprMap[SCEVCallbackVH(PN, this)] = Shifted;
+ return Shifted;
}
}
-
- // Remove the temporary PHI node SCEV that has been inserted while intending
- // to create an AddRecExpr for this PHI node. We can not keep this temporary
- // as it will prevent later (possibly simpler) SCEV expressions to be added
- // to the ValueExprMap.
- eraseValueFromMap(PN);
}
+ // Remove the temporary PHI node SCEV that has been inserted while intending
+ // to create an AddRecExpr for this PHI node. We can not keep this temporary
+ // as it will prevent later (possibly simpler) SCEV expressions to be added
+ // to the ValueExprMap.
+ eraseValueFromMap(PN);
+
return nullptr;
}
@@ -4388,7 +4389,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = SimplifyInstruction(PN, getDataLayout(), &TLI, &DT, &AC))
+ if (Value *V = SimplifyInstruction(PN, {getDataLayout(), &TLI, &DT, &AC}))
if (LI.replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
@@ -5028,7 +5029,8 @@ bool ScalarEvolution::isSCEVExprNeverPoison(const Instruction *I) {
return false;
// Only proceed if we can prove that I does not yield poison.
- if (!isKnownNotFullPoison(I)) return false;
+ if (!programUndefinedIfFullPoison(I))
+ return false;
// At this point we know that if I is executed, then it does not wrap
// according to at least one of NSW or NUW. If I is not executed, then we do
diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp
index 6dd10441c4cb..86cbd79aa84e 100644
--- a/lib/Analysis/ScalarEvolutionExpander.cpp
+++ b/lib/Analysis/ScalarEvolutionExpander.cpp
@@ -1772,9 +1772,10 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
///
/// This does not depend on any SCEVExpander state but should be used in
/// the same context that SCEVExpander is used.
-unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
- SmallVectorImpl<WeakVH> &DeadInsts,
- const TargetTransformInfo *TTI) {
+unsigned
+SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
+ SmallVectorImpl<WeakTrackingVH> &DeadInsts,
+ const TargetTransformInfo *TTI) {
// Find integer phis in order of increasing width.
SmallVector<PHINode*, 8> Phis;
for (auto &I : *L->getHeader()) {
@@ -1799,7 +1800,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
// so narrow phis can reuse them.
for (PHINode *Phi : Phis) {
auto SimplifyPHINode = [&](PHINode *PN) -> Value * {
- if (Value *V = SimplifyInstruction(PN, DL, &SE.TLI, &SE.DT, &SE.AC))
+ if (Value *V = SimplifyInstruction(PN, {DL, &SE.TLI, &SE.DT, &SE.AC}))
return V;
if (!SE.isSCEVable(PN->getType()))
return nullptr;
diff --git a/lib/Analysis/TargetTransformInfo.cpp b/lib/Analysis/TargetTransformInfo.cpp
index d73b1a128031..26d606cce9bb 100644
--- a/lib/Analysis/TargetTransformInfo.cpp
+++ b/lib/Analysis/TargetTransformInfo.cpp
@@ -83,6 +83,12 @@ int TargetTransformInfo::getIntrinsicCost(
return Cost;
}
+unsigned
+TargetTransformInfo::getEstimatedNumberOfCaseClusters(const SwitchInst &SI,
+ unsigned &JTSize) const {
+ return TTIImpl->getEstimatedNumberOfCaseClusters(SI, JTSize);
+}
+
int TargetTransformInfo::getUserCost(const User *U) const {
int Cost = TTIImpl->getUserCost(U);
assert(Cost >= 0 && "TTI should not produce negative costs!");
diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp
index af964b6259bb..dc151f232670 100644
--- a/lib/Analysis/ValueTracking.cpp
+++ b/lib/Analysis/ValueTracking.cpp
@@ -296,12 +296,12 @@ static void computeKnownBitsAddSub(bool Add, const Value *Op0, const Value *Op1,
if (NSW) {
// Adding two non-negative numbers, or subtracting a negative number from
// a non-negative one, can't wrap into negative.
- if (LHSKnown.Zero.isSignBitSet() && Known2.Zero.isSignBitSet())
- KnownOut.Zero.setSignBit();
+ if (LHSKnown.isNonNegative() && Known2.isNonNegative())
+ KnownOut.makeNonNegative();
// Adding two negative numbers, or subtracting a non-negative number from
// a negative one, can't wrap into non-negative.
- else if (LHSKnown.One.isSignBitSet() && Known2.One.isSignBitSet())
- KnownOut.One.setSignBit();
+ else if (LHSKnown.isNegative() && Known2.isNegative())
+ KnownOut.makeNegative();
}
}
}
@@ -321,10 +321,10 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// The product of a number with itself is non-negative.
isKnownNonNegative = true;
} else {
- bool isKnownNonNegativeOp1 = Known.Zero.isSignBitSet();
- bool isKnownNonNegativeOp0 = Known2.Zero.isSignBitSet();
- bool isKnownNegativeOp1 = Known.One.isSignBitSet();
- bool isKnownNegativeOp0 = Known2.One.isSignBitSet();
+ bool isKnownNonNegativeOp1 = Known.isNonNegative();
+ bool isKnownNonNegativeOp0 = Known2.isNonNegative();
+ bool isKnownNegativeOp1 = Known.isNegative();
+ bool isKnownNegativeOp0 = Known2.isNegative();
// The product of two numbers with the same sign is non-negative.
isKnownNonNegative = (isKnownNegativeOp1 && isKnownNegativeOp0) ||
(isKnownNonNegativeOp1 && isKnownNonNegativeOp0);
@@ -360,21 +360,20 @@ static void computeKnownBitsMul(const Value *Op0, const Value *Op1, bool NSW,
// which case we prefer to follow the result of the direct computation,
// though as the program is invoking undefined behaviour we can choose
// whatever we like here.
- if (isKnownNonNegative && !Known.One.isSignBitSet())
- Known.Zero.setSignBit();
- else if (isKnownNegative && !Known.Zero.isSignBitSet())
- Known.One.setSignBit();
+ if (isKnownNonNegative && !Known.isNegative())
+ Known.makeNonNegative();
+ else if (isKnownNegative && !Known.isNonNegative())
+ Known.makeNegative();
}
void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
- APInt &KnownZero,
- APInt &KnownOne) {
- unsigned BitWidth = KnownZero.getBitWidth();
+ KnownBits &Known) {
+ unsigned BitWidth = Known.getBitWidth();
unsigned NumRanges = Ranges.getNumOperands() / 2;
assert(NumRanges >= 1);
- KnownZero.setAllBits();
- KnownOne.setAllBits();
+ Known.Zero.setAllBits();
+ Known.One.setAllBits();
for (unsigned i = 0; i < NumRanges; ++i) {
ConstantInt *Lower =
@@ -388,8 +387,8 @@ void llvm::computeKnownBitsFromRangeMetadata(const MDNode &Ranges,
(Range.getUnsignedMax() ^ Range.getUnsignedMin()).countLeadingZeros();
APInt Mask = APInt::getHighBitsSet(BitWidth, CommonPrefixBits);
- KnownOne &= Range.getUnsignedMax() & Mask;
- KnownZero &= ~Range.getUnsignedMax() & Mask;
+ Known.One &= Range.getUnsignedMax() & Mask;
+ Known.Zero &= ~Range.getUnsignedMax() & Mask;
}
}
@@ -709,9 +708,9 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.Zero.isSignBitSet()) {
+ if (RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
- Known.Zero.setSignBit();
+ Known.makeNonNegative();
}
// assume(v >_s c) where c is at least -1.
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
@@ -720,9 +719,9 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.One.isAllOnesValue() || RHSKnown.Zero.isSignBitSet()) {
+ if (RHSKnown.One.isAllOnesValue() || RHSKnown.isNonNegative()) {
// We know that the sign bit is zero.
- Known.Zero.setSignBit();
+ Known.makeNonNegative();
}
// assume(v <=_s c) where c is negative
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
@@ -731,9 +730,9 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.One.isSignBitSet()) {
+ if (RHSKnown.isNegative()) {
// We know that the sign bit is one.
- Known.One.setSignBit();
+ Known.makeNegative();
}
// assume(v <_s c) where c is non-positive
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
@@ -742,9 +741,9 @@ static void computeKnownBitsFromAssume(const Value *V, KnownBits &Known,
KnownBits RHSKnown(BitWidth);
computeKnownBits(A, RHSKnown, Depth+1, Query(Q, I));
- if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.One.isSignBitSet()) {
+ if (RHSKnown.Zero.isAllOnesValue() || RHSKnown.isNegative()) {
// We know that the sign bit is one.
- Known.One.setSignBit();
+ Known.makeNegative();
}
// assume(v <=_u c)
} else if (match(Arg, m_ICmp(Pred, m_V, m_Value(A))) &&
@@ -902,7 +901,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
default: break;
case Instruction::Load:
if (MDNode *MD = cast<LoadInst>(I)->getMetadata(LLVMContext::MD_range))
- computeKnownBitsFromRangeMetadata(*MD, Known.Zero, Known.One);
+ computeKnownBitsFromRangeMetadata(*MD, Known);
break;
case Instruction::And: {
// If either the LHS or the RHS are Zero, the result is zero.
@@ -992,23 +991,23 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
unsigned MaxHighZeros = 0;
if (SPF == SPF_SMAX) {
// If both sides are negative, the result is negative.
- if (Known.One.isSignBitSet() && Known2.One.isSignBitSet())
+ if (Known.isNegative() && Known2.isNegative())
// We can derive a lower bound on the result by taking the max of the
// leading one bits.
MaxHighOnes = std::max(Known.One.countLeadingOnes(),
Known2.One.countLeadingOnes());
// If either side is non-negative, the result is non-negative.
- else if (Known.Zero.isSignBitSet() || Known2.Zero.isSignBitSet())
+ else if (Known.isNonNegative() || Known2.isNonNegative())
MaxHighZeros = 1;
} else if (SPF == SPF_SMIN) {
// If both sides are non-negative, the result is non-negative.
- if (Known.Zero.isSignBitSet() && Known2.Zero.isSignBitSet())
+ if (Known.isNonNegative() && Known2.isNonNegative())
// We can derive an upper bound on the result by taking the max of the
// leading zero bits.
MaxHighZeros = std::max(Known.Zero.countLeadingOnes(),
Known2.Zero.countLeadingOnes());
// If either side is negative, the result is negative.
- else if (Known.One.isSignBitSet() || Known2.One.isSignBitSet())
+ else if (Known.isNegative() || Known2.isNegative())
MaxHighOnes = 1;
} else if (SPF == SPF_UMAX) {
// We can derive a lower bound on the result by taking the max of the
@@ -1163,12 +1162,12 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// If the first operand is non-negative or has all low bits zero, then
// the upper bits are all zero.
- if (Known2.Zero.isSignBitSet() || ((Known2.Zero & LowBits) == LowBits))
+ if (Known2.isNonNegative() || LowBits.isSubsetOf(Known2.Zero))
Known.Zero |= ~LowBits;
// If the first operand is negative and not all low bits are zero, then
// the upper bits are all one.
- if (Known2.One.isSignBitSet() && ((Known2.One & LowBits) != 0))
+ if (Known2.isNegative() && LowBits.intersects(Known2.One))
Known.One |= ~LowBits;
assert((Known.Zero & Known.One) == 0 && "Bits known to be one AND zero?");
@@ -1180,8 +1179,8 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// remainder is zero.
computeKnownBits(I->getOperand(0), Known2, Depth + 1, Q);
// If it's known zero, our sign bit is also zero.
- if (Known2.Zero.isSignBitSet())
- Known.Zero.setSignBit();
+ if (Known2.isNonNegative())
+ Known.makeNonNegative();
break;
case Instruction::URem: {
@@ -1321,25 +1320,25 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// (add non-negative, non-negative) --> non-negative
// (add negative, negative) --> negative
if (Opcode == Instruction::Add) {
- if (Known2.Zero.isSignBitSet() && Known3.Zero.isSignBitSet())
- Known.Zero.setSignBit();
- else if (Known2.One.isSignBitSet() && Known3.One.isSignBitSet())
- Known.One.setSignBit();
+ if (Known2.isNonNegative() && Known3.isNonNegative())
+ Known.makeNonNegative();
+ else if (Known2.isNegative() && Known3.isNegative())
+ Known.makeNegative();
}
// (sub nsw non-negative, negative) --> non-negative
// (sub nsw negative, non-negative) --> negative
else if (Opcode == Instruction::Sub && LL == I) {
- if (Known2.Zero.isSignBitSet() && Known3.One.isSignBitSet())
- Known.Zero.setSignBit();
- else if (Known2.One.isSignBitSet() && Known3.Zero.isSignBitSet())
- Known.One.setSignBit();
+ if (Known2.isNonNegative() && Known3.isNegative())
+ Known.makeNonNegative();
+ else if (Known2.isNegative() && Known3.isNonNegative())
+ Known.makeNegative();
}
// (mul nsw non-negative, non-negative) --> non-negative
- else if (Opcode == Instruction::Mul && Known2.Zero.isSignBitSet() &&
- Known3.Zero.isSignBitSet())
- Known.Zero.setSignBit();
+ else if (Opcode == Instruction::Mul && Known2.isNonNegative() &&
+ Known3.isNonNegative())
+ Known.makeNonNegative();
}
break;
@@ -1384,7 +1383,7 @@ static void computeKnownBitsFromOperator(const Operator *I, KnownBits &Known,
// and then intersect with known bits based on other properties of the
// function.
if (MDNode *MD = cast<Instruction>(I)->getMetadata(LLVMContext::MD_range))
- computeKnownBitsFromRangeMetadata(*MD, Known.Zero, Known.One);
+ computeKnownBitsFromRangeMetadata(*MD, Known);
if (const Value *RV = ImmutableCallSite(I).getReturnedArgOperand()) {
computeKnownBits(RV, Known2, Depth + 1, Q);
Known.Zero |= Known2.Zero;
@@ -1599,8 +1598,8 @@ void ComputeSignBit(const Value *V, bool &KnownZero, bool &KnownOne,
}
KnownBits Bits(BitWidth);
computeKnownBits(V, Bits, Depth, Q);
- KnownOne = Bits.One.isSignBitSet();
- KnownZero = Bits.Zero.isSignBitSet();
+ KnownOne = Bits.isNegative();
+ KnownZero = Bits.isNonNegative();
}
/// Return true if the given value is known to have exactly one
@@ -2221,7 +2220,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// If we are subtracting one from a positive number, there is no carry
// out of the result.
- if (Known.Zero.isSignBitSet())
+ if (Known.isNonNegative())
return Tmp;
}
@@ -2245,7 +2244,7 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// If the input is known to be positive (the sign bit is known clear),
// the output of the NEG has the same number of sign bits as the input.
- if (Known.Zero.isSignBitSet())
+ if (Known.isNonNegative())
return Tmp2;
// Otherwise, we treat this like a SUB.
@@ -2302,10 +2301,10 @@ static unsigned ComputeNumSignBitsImpl(const Value *V, unsigned Depth,
// If we know that the sign bit is either zero or one, determine the number of
// identical bits in the top of the input value.
- if (Known.Zero.isSignBitSet())
+ if (Known.isNonNegative())
return std::max(FirstAnswer, Known.Zero.countLeadingOnes());
- if (Known.One.isSignBitSet())
+ if (Known.isNegative())
return std::max(FirstAnswer, Known.One.countLeadingOnes());
// computeKnownBits gave us no extra information about the top bits.
@@ -3198,7 +3197,7 @@ Value *llvm::GetUnderlyingObject(Value *V, const DataLayout &DL,
// See if InstructionSimplify knows any relevant tricks.
if (Instruction *I = dyn_cast<Instruction>(V))
// TODO: Acquire a DominatorTree and AssumptionCache and use them.
- if (Value *Simplified = SimplifyInstruction(I, DL, nullptr)) {
+ if (Value *Simplified = SimplifyInstruction(I, {DL, I})) {
V = Simplified;
continue;
}
@@ -3319,12 +3318,18 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
LI->getAlignment(), DL, CtxI, DT);
}
case Instruction::Call: {
+ auto *CI = cast<const CallInst>(Inst);
+ const Function *Callee = CI->getCalledFunction();
+ if (Callee && Callee->isSpeculatable())
+ return true;
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
switch (II->getIntrinsicID()) {
// These synthetic intrinsics have no side-effects and just mark
// information about their operands.
// FIXME: There are other no-op synthetic instructions that potentially
// should be considered at least *safe* to speculate...
+ // FIXME: The speculatable attribute should be added to all these
+ // intrinsics and this case statement should be removed.
case Intrinsic::dbg_declare:
case Intrinsic::dbg_value:
return true;
@@ -3836,7 +3841,7 @@ const Value *llvm::getGuaranteedNonFullPoisonOp(const Instruction *I) {
}
}
-bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
+bool llvm::programUndefinedIfFullPoison(const Instruction *PoisonI) {
// We currently only look for uses of poison values within the same basic
// block, as that makes it easier to guarantee that the uses will be
// executed given that PoisonI is executed.
diff --git a/lib/AsmParser/LLLexer.cpp b/lib/AsmParser/LLLexer.cpp
index 49a8ce4bed0b..a49276099f19 100644
--- a/lib/AsmParser/LLLexer.cpp
+++ b/lib/AsmParser/LLLexer.cpp
@@ -601,6 +601,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(hhvm_ccc);
KEYWORD(cxx_fast_tlscc);
KEYWORD(amdgpu_vs);
+ KEYWORD(amdgpu_hs);
KEYWORD(amdgpu_gs);
KEYWORD(amdgpu_ps);
KEYWORD(amdgpu_cs);
@@ -648,6 +649,7 @@ lltok::Kind LLLexer::LexIdentifier() {
KEYWORD(returned);
KEYWORD(returns_twice);
KEYWORD(signext);
+ KEYWORD(speculatable);
KEYWORD(sret);
KEYWORD(ssp);
KEYWORD(sspreq);
diff --git a/lib/AsmParser/LLParser.cpp b/lib/AsmParser/LLParser.cpp
index c7076ed0dd81..97a567565b47 100644
--- a/lib/AsmParser/LLParser.cpp
+++ b/lib/AsmParser/LLParser.cpp
@@ -1095,6 +1095,7 @@ bool LLParser::ParseFnAttributeValuePairs(AttrBuilder &B,
case lltok::kw_readonly: B.addAttribute(Attribute::ReadOnly); break;
case lltok::kw_returns_twice:
B.addAttribute(Attribute::ReturnsTwice); break;
+ case lltok::kw_speculatable: B.addAttribute(Attribute::Speculatable); break;
case lltok::kw_ssp: B.addAttribute(Attribute::StackProtect); break;
case lltok::kw_sspreq: B.addAttribute(Attribute::StackProtectReq); break;
case lltok::kw_sspstrong:
@@ -1667,8 +1668,7 @@ void LLParser::ParseOptionalDLLStorageClass(unsigned &Res) {
/// ::= 'hhvm_ccc'
/// ::= 'cxx_fast_tlscc'
/// ::= 'amdgpu_vs'
-/// ::= 'amdgpu_tcs'
-/// ::= 'amdgpu_tes'
+/// ::= 'amdgpu_hs'
/// ::= 'amdgpu_gs'
/// ::= 'amdgpu_ps'
/// ::= 'amdgpu_cs'
@@ -1710,6 +1710,7 @@ bool LLParser::ParseOptionalCallingConv(unsigned &CC) {
case lltok::kw_hhvm_ccc: CC = CallingConv::HHVM_C; break;
case lltok::kw_cxx_fast_tlscc: CC = CallingConv::CXX_FAST_TLS; break;
case lltok::kw_amdgpu_vs: CC = CallingConv::AMDGPU_VS; break;
+ case lltok::kw_amdgpu_hs: CC = CallingConv::AMDGPU_HS; break;
case lltok::kw_amdgpu_gs: CC = CallingConv::AMDGPU_GS; break;
case lltok::kw_amdgpu_ps: CC = CallingConv::AMDGPU_PS; break;
case lltok::kw_amdgpu_cs: CC = CallingConv::AMDGPU_CS; break;
@@ -4071,7 +4072,7 @@ bool LLParser::ParseDICompileUnit(MDNode *&Result, bool IsDistinct) {
/// virtuality: DW_VIRTUALTIY_pure_virtual,
/// virtualIndex: 10, thisAdjustment: 4, flags: 11,
/// isOptimized: false, templateParams: !4, declaration: !5,
-/// variables: !6)
+/// variables: !6, thrownTypes: !7)
bool LLParser::ParseDISubprogram(MDNode *&Result, bool IsDistinct) {
auto Loc = Lex.getLoc();
#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED) \
@@ -4093,7 +4094,8 @@ bool LLParser::ParseDISubprogram(MDNode *&Result, bool IsDistinct) {
OPTIONAL(unit, MDField, ); \
OPTIONAL(templateParams, MDField, ); \
OPTIONAL(declaration, MDField, ); \
- OPTIONAL(variables, MDField, );
+ OPTIONAL(variables, MDField, ); \
+ OPTIONAL(thrownTypes, MDField, );
PARSE_MD_FIELDS();
#undef VISIT_MD_FIELDS
@@ -4103,12 +4105,12 @@ bool LLParser::ParseDISubprogram(MDNode *&Result, bool IsDistinct) {
"missing 'distinct', required for !DISubprogram when 'isDefinition'");
Result = GET_OR_DISTINCT(
- DISubprogram, (Context, scope.Val, name.Val, linkageName.Val, file.Val,
- line.Val, type.Val, isLocal.Val, isDefinition.Val,
- scopeLine.Val, containingType.Val, virtuality.Val,
- virtualIndex.Val, thisAdjustment.Val, flags.Val,
- isOptimized.Val, unit.Val, templateParams.Val,
- declaration.Val, variables.Val));
+ DISubprogram,
+ (Context, scope.Val, name.Val, linkageName.Val, file.Val, line.Val,
+ type.Val, isLocal.Val, isDefinition.Val, scopeLine.Val,
+ containingType.Val, virtuality.Val, virtualIndex.Val, thisAdjustment.Val,
+ flags.Val, isOptimized.Val, unit.Val, templateParams.Val,
+ declaration.Val, variables.Val, thrownTypes.Val));
return false;
}
@@ -4148,15 +4150,13 @@ bool LLParser::ParseDILexicalBlockFile(MDNode *&Result, bool IsDistinct) {
bool LLParser::ParseDINamespace(MDNode *&Result, bool IsDistinct) {
#define VISIT_MD_FIELDS(OPTIONAL, REQUIRED) \
REQUIRED(scope, MDField, ); \
- OPTIONAL(file, MDField, ); \
OPTIONAL(name, MDStringField, ); \
- OPTIONAL(line, LineField, ); \
OPTIONAL(exportSymbols, MDBoolField, );
PARSE_MD_FIELDS();
#undef VISIT_MD_FIELDS
Result = GET_OR_DISTINCT(DINamespace,
- (Context, scope.Val, file.Val, name.Val, line.Val, exportSymbols.Val));
+ (Context, scope.Val, name.Val, exportSymbols.Val));
return false;
}
diff --git a/lib/AsmParser/LLToken.h b/lib/AsmParser/LLToken.h
index 33f8e63daa05..6c8ed7da495d 100644
--- a/lib/AsmParser/LLToken.h
+++ b/lib/AsmParser/LLToken.h
@@ -153,6 +153,7 @@ enum Kind {
kw_hhvm_ccc,
kw_cxx_fast_tlscc,
kw_amdgpu_vs,
+ kw_amdgpu_hs,
kw_amdgpu_gs,
kw_amdgpu_ps,
kw_amdgpu_cs,
@@ -198,6 +199,7 @@ enum Kind {
kw_returned,
kw_returns_twice,
kw_signext,
+ kw_speculatable,
kw_ssp,
kw_sspreq,
kw_sspstrong,
diff --git a/lib/Bitcode/Reader/BitcodeReader.cpp b/lib/Bitcode/Reader/BitcodeReader.cpp
index 6d727ce83346..8b6f79a81b93 100644
--- a/lib/Bitcode/Reader/BitcodeReader.cpp
+++ b/lib/Bitcode/Reader/BitcodeReader.cpp
@@ -93,6 +93,13 @@ static cl::opt<bool> PrintSummaryGUIDs(
cl::desc(
"Print the global id for each value when reading the module summary"));
+// FIXME: This flag should either be removed or moved to clang as a driver flag.
+static llvm::cl::opt<bool> IgnoreEmptyThinLTOIndexFile(
+ "ignore-empty-index-file", llvm::cl::ZeroOrMore,
+ llvm::cl::desc(
+ "Ignore an empty index file and perform non-ThinLTO compilation"),
+ llvm::cl::init(false));
+
namespace {
enum {
@@ -706,11 +713,20 @@ class ModuleSummaryIndexBitcodeReader : public BitcodeReaderBase {
/// Original source file name recorded in a bitcode record.
std::string SourceFileName;
+ /// The string identifier given to this module by the client, normally the
+ /// path to the bitcode file.
+ StringRef ModulePath;
+
+ /// For per-module summary indexes, the unique numerical identifier given to
+ /// this module by the client.
+ unsigned ModuleId;
+
public:
ModuleSummaryIndexBitcodeReader(BitstreamCursor Stream, StringRef Strtab,
- ModuleSummaryIndex &TheIndex);
+ ModuleSummaryIndex &TheIndex,
+ StringRef ModulePath, unsigned ModuleId);
- Error parseModule(StringRef ModulePath);
+ Error parseModule();
private:
void setValueGUID(uint64_t ValueID, StringRef ValueName,
@@ -723,11 +739,13 @@ private:
std::vector<FunctionSummary::EdgeTy> makeCallList(ArrayRef<uint64_t> Record,
bool IsOldProfileFormat,
bool HasProfile);
- Error parseEntireSummary(StringRef ModulePath);
+ Error parseEntireSummary();
Error parseModuleStringTable();
std::pair<GlobalValue::GUID, GlobalValue::GUID>
getGUIDFromValueId(unsigned ValueId);
+
+ ModulePathStringTableTy::iterator addThisModulePath();
};
} // end anonymous namespace
@@ -1119,6 +1137,7 @@ static uint64_t getRawAttributeMask(Attribute::AttrKind Val) {
case Attribute::SwiftSelf: return 1ULL << 51;
case Attribute::SwiftError: return 1ULL << 52;
case Attribute::WriteOnly: return 1ULL << 53;
+ case Attribute::Speculatable: return 1ULL << 54;
case Attribute::Dereferenceable:
llvm_unreachable("dereferenceable attribute not supported in raw format");
break;
@@ -1315,6 +1334,8 @@ static Attribute::AttrKind getAttrFromCode(uint64_t Code) {
return Attribute::ReturnsTwice;
case bitc::ATTR_KIND_S_EXT:
return Attribute::SExt;
+ case bitc::ATTR_KIND_SPECULATABLE:
+ return Attribute::Speculatable;
case bitc::ATTR_KIND_STACK_ALIGNMENT:
return Attribute::StackAlignment;
case bitc::ATTR_KIND_STACK_PROTECT:
@@ -4666,8 +4687,15 @@ std::vector<StructType *> BitcodeReader::getIdentifiedStructTypes() const {
}
ModuleSummaryIndexBitcodeReader::ModuleSummaryIndexBitcodeReader(
- BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex)
- : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex) {}
+ BitstreamCursor Cursor, StringRef Strtab, ModuleSummaryIndex &TheIndex,
+ StringRef ModulePath, unsigned ModuleId)
+ : BitcodeReaderBase(std::move(Cursor), Strtab), TheIndex(TheIndex),
+ ModulePath(ModulePath), ModuleId(ModuleId) {}
+
+ModulePathStringTableTy::iterator
+ModuleSummaryIndexBitcodeReader::addThisModulePath() {
+ return TheIndex.addModulePath(ModulePath, ModuleId);
+}
std::pair<GlobalValue::GUID, GlobalValue::GUID>
ModuleSummaryIndexBitcodeReader::getGUIDFromValueId(unsigned ValueId) {
@@ -4777,7 +4805,7 @@ Error ModuleSummaryIndexBitcodeReader::parseValueSymbolTable(
// Parse just the blocks needed for building the index out of the module.
// At the end of this routine the module Index is populated with a map
// from global value id to GlobalValueSummary objects.
-Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) {
+Error ModuleSummaryIndexBitcodeReader::parseModule() {
if (Stream.EnterSubBlock(bitc::MODULE_BLOCK_ID))
return error("Invalid record");
@@ -4828,7 +4856,7 @@ Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) {
SeenValueSymbolTable = true;
}
SeenGlobalValSummary = true;
- if (Error Err = parseEntireSummary(ModulePath))
+ if (Error Err = parseEntireSummary())
return Err;
break;
case bitc::MODULE_STRTAB_BLOCK_ID:
@@ -4861,12 +4889,7 @@ Error ModuleSummaryIndexBitcodeReader::parseModule(StringRef ModulePath) {
case bitc::MODULE_CODE_HASH: {
if (Record.size() != 5)
return error("Invalid hash length " + Twine(Record.size()).str());
- if (TheIndex.modulePaths().empty())
- // We always seed the index with the module.
- TheIndex.addModulePath(ModulePath, 0);
- if (TheIndex.modulePaths().size() != 1)
- return error("Don't expect multiple modules defined?");
- auto &Hash = TheIndex.modulePaths().begin()->second.second;
+ auto &Hash = addThisModulePath()->second.second;
int Pos = 0;
for (auto &Val : Record) {
assert(!(Val >> 32) && "Unexpected high bits set");
@@ -4941,8 +4964,7 @@ std::vector<FunctionSummary::EdgeTy> ModuleSummaryIndexBitcodeReader::makeCallLi
// Eagerly parse the entire summary block. This populates the GlobalValueSummary
// objects in the index.
-Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
- StringRef ModulePath) {
+Error ModuleSummaryIndexBitcodeReader::parseEntireSummary() {
if (Stream.EnterSubBlock(bitc::GLOBALVAL_SUMMARY_BLOCK_ID))
return error("Invalid record");
SmallVector<uint64_t, 64> Record;
@@ -4966,7 +4988,6 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
// "OriginalName" attachement.
GlobalValueSummary *LastSeenSummary = nullptr;
GlobalValue::GUID LastSeenGUID = 0;
- bool Combined = false;
// We can expect to see any number of type ID information records before
// each function summary records; these variables store the information
@@ -4985,16 +5006,6 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
case BitstreamEntry::Error:
return error("Malformed block");
case BitstreamEntry::EndBlock:
- // For a per-module index, remove any entries that still have empty
- // summaries. The VST parsing creates entries eagerly for all symbols,
- // but not all have associated summaries (e.g. it doesn't know how to
- // distinguish between VST_CODE_ENTRY for function declarations vs global
- // variables with initializers that end up with a summary). Remove those
- // entries now so that we don't need to rely on the combined index merger
- // to clean them up (especially since that may not run for the first
- // module's index if we merge into that).
- if (!Combined)
- TheIndex.removeEmptySummaryEntries();
return Error::success();
case BitstreamEntry::Record:
// The interesting case.
@@ -5058,7 +5069,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
PendingTypeTestAssumeConstVCalls.clear();
PendingTypeCheckedLoadConstVCalls.clear();
auto GUID = getGUIDFromValueId(ValueID);
- FS->setModulePath(TheIndex.addModulePath(ModulePath, 0)->first());
+ FS->setModulePath(addThisModulePath()->first());
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
break;
@@ -5078,13 +5089,14 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
// string table section in the per-module index, we create a single
// module path string table entry with an empty (0) ID to take
// ownership.
- AS->setModulePath(TheIndex.addModulePath(ModulePath, 0)->first());
+ AS->setModulePath(addThisModulePath()->first());
GlobalValue::GUID AliaseeGUID = getGUIDFromValueId(AliaseeID).first;
- auto *AliaseeSummary = TheIndex.getGlobalValueSummary(AliaseeGUID);
- if (!AliaseeSummary)
+ auto AliaseeInModule =
+ TheIndex.findSummaryInModule(AliaseeGUID, ModulePath);
+ if (!AliaseeInModule)
return error("Alias expects aliasee summary to be parsed");
- AS->setAliasee(AliaseeSummary);
+ AS->setAliasee(AliaseeInModule);
auto GUID = getGUIDFromValueId(ValueID);
AS->setOriginalName(GUID.second);
@@ -5099,7 +5111,7 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
std::vector<ValueInfo> Refs =
makeRefList(ArrayRef<uint64_t>(Record).slice(2));
auto FS = llvm::make_unique<GlobalVarSummary>(Flags, std::move(Refs));
- FS->setModulePath(TheIndex.addModulePath(ModulePath, 0)->first());
+ FS->setModulePath(addThisModulePath()->first());
auto GUID = getGUIDFromValueId(ValueID);
FS->setOriginalName(GUID.second);
TheIndex.addGlobalValueSummary(GUID.first, std::move(FS));
@@ -5143,7 +5155,6 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
LastSeenGUID = GUID;
FS->setModulePath(ModuleIdMap[ModuleId]);
TheIndex.addGlobalValueSummary(GUID, std::move(FS));
- Combined = true;
break;
}
// FS_COMBINED_ALIAS: [valueid, modid, flags, valueid]
@@ -5169,7 +5180,6 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
LastSeenGUID = GUID;
TheIndex.addGlobalValueSummary(GUID, std::move(AS));
- Combined = true;
break;
}
// FS_COMBINED_GLOBALVAR_INIT_REFS: [valueid, modid, flags, n x valueid]
@@ -5186,7 +5196,6 @@ Error ModuleSummaryIndexBitcodeReader::parseEntireSummary(
GlobalValue::GUID GUID = getGUIDFromValueId(ValueID).first;
LastSeenGUID = GUID;
TheIndex.addGlobalValueSummary(GUID, std::move(FS));
- Combined = true;
break;
}
// FS_COMBINED_ORIGINAL_NAME: [original_name]
@@ -5486,15 +5495,27 @@ BitcodeModule::getLazyModule(LLVMContext &Context, bool ShouldLazyLoadMetadata,
return getModuleImpl(Context, false, ShouldLazyLoadMetadata, IsImporting);
}
+// Parse the specified bitcode buffer and merge the index into CombinedIndex.
+Error BitcodeModule::readSummary(ModuleSummaryIndex &CombinedIndex,
+ unsigned ModuleId) {
+ BitstreamCursor Stream(Buffer);
+ Stream.JumpToBit(ModuleBit);
+
+ ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, CombinedIndex,
+ ModuleIdentifier, ModuleId);
+ return R.parseModule();
+}
+
// Parse the specified bitcode buffer, returning the function info index.
Expected<std::unique_ptr<ModuleSummaryIndex>> BitcodeModule::getSummary() {
BitstreamCursor Stream(Buffer);
Stream.JumpToBit(ModuleBit);
auto Index = llvm::make_unique<ModuleSummaryIndex>();
- ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index);
+ ModuleSummaryIndexBitcodeReader R(std::move(Stream), Strtab, *Index,
+ ModuleIdentifier, 0);
- if (Error Err = R.parseModule(ModuleIdentifier))
+ if (Error Err = R.parseModule())
return std::move(Err);
return std::move(Index);
@@ -5604,6 +5625,16 @@ Expected<std::string> llvm::getBitcodeProducerString(MemoryBufferRef Buffer) {
return readIdentificationCode(*StreamOrErr);
}
+Error llvm::readModuleSummaryIndex(MemoryBufferRef Buffer,
+ ModuleSummaryIndex &CombinedIndex,
+ unsigned ModuleId) {
+ Expected<BitcodeModule> BM = getSingleModule(Buffer);
+ if (!BM)
+ return BM.takeError();
+
+ return BM->readSummary(CombinedIndex, ModuleId);
+}
+
Expected<std::unique_ptr<ModuleSummaryIndex>>
llvm::getModuleSummaryIndex(MemoryBufferRef Buffer) {
Expected<BitcodeModule> BM = getSingleModule(Buffer);
@@ -5620,3 +5651,14 @@ Expected<bool> llvm::hasGlobalValueSummary(MemoryBufferRef Buffer) {
return BM->hasSummary();
}
+
+Expected<std::unique_ptr<ModuleSummaryIndex>>
+llvm::getModuleSummaryIndexForFile(StringRef Path) {
+ ErrorOr<std::unique_ptr<MemoryBuffer>> FileOrErr =
+ MemoryBuffer::getFileOrSTDIN(Path);
+ if (!FileOrErr)
+ return errorCodeToError(FileOrErr.getError());
+ if (IgnoreEmptyThinLTOIndexFile && !(*FileOrErr)->getBufferSize())
+ return nullptr;
+ return getModuleSummaryIndex(**FileOrErr);
+}
diff --git a/lib/Bitcode/Reader/MetadataLoader.cpp b/lib/Bitcode/Reader/MetadataLoader.cpp
index d089684a052f..42135e5949ce 100644
--- a/lib/Bitcode/Reader/MetadataLoader.cpp
+++ b/lib/Bitcode/Reader/MetadataLoader.cpp
@@ -474,8 +474,8 @@ class MetadataLoader::MetadataLoaderImpl {
for (auto CU_SP : CUSubprograms)
if (auto *SPs = dyn_cast_or_null<MDTuple>(CU_SP.second))
for (auto &Op : SPs->operands())
- if (auto *SP = dyn_cast_or_null<MDNode>(Op))
- SP->replaceOperandWith(7, CU_SP.first);
+ if (auto *SP = dyn_cast_or_null<DISubprogram>(Op))
+ SP->replaceUnit(CU_SP.first);
CUSubprograms.clear();
}
@@ -1298,7 +1298,7 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
break;
}
case bitc::METADATA_SUBPROGRAM: {
- if (Record.size() < 18 || Record.size() > 20)
+ if (Record.size() < 18 || Record.size() > 21)
return error("Invalid record");
IsDistinct =
@@ -1314,29 +1314,31 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
unsigned Offset = Record.size() >= 19 ? 1 : 0;
bool HasFn = Offset && !HasUnit;
bool HasThisAdj = Record.size() >= 20;
+ bool HasThrownTypes = Record.size() >= 21;
DISubprogram *SP = GET_OR_DISTINCT(
- DISubprogram, (Context,
- getDITypeRefOrNull(Record[1]), // scope
- getMDString(Record[2]), // name
- getMDString(Record[3]), // linkageName
- getMDOrNull(Record[4]), // file
- Record[5], // line
- getMDOrNull(Record[6]), // type
- Record[7], // isLocal
- Record[8], // isDefinition
- Record[9], // scopeLine
- getDITypeRefOrNull(Record[10]), // containingType
- Record[11], // virtuality
- Record[12], // virtualIndex
- HasThisAdj ? Record[19] : 0, // thisAdjustment
- static_cast<DINode::DIFlags>(Record[13] // flags
- ),
- Record[14], // isOptimized
- HasUnit ? CUorFn : nullptr, // unit
- getMDOrNull(Record[15 + Offset]), // templateParams
- getMDOrNull(Record[16 + Offset]), // declaration
- getMDOrNull(Record[17 + Offset]) // variables
- ));
+ DISubprogram,
+ (Context,
+ getDITypeRefOrNull(Record[1]), // scope
+ getMDString(Record[2]), // name
+ getMDString(Record[3]), // linkageName
+ getMDOrNull(Record[4]), // file
+ Record[5], // line
+ getMDOrNull(Record[6]), // type
+ Record[7], // isLocal
+ Record[8], // isDefinition
+ Record[9], // scopeLine
+ getDITypeRefOrNull(Record[10]), // containingType
+ Record[11], // virtuality
+ Record[12], // virtualIndex
+ HasThisAdj ? Record[19] : 0, // thisAdjustment
+ static_cast<DINode::DIFlags>(Record[13]), // flags
+ Record[14], // isOptimized
+ HasUnit ? CUorFn : nullptr, // unit
+ getMDOrNull(Record[15 + Offset]), // templateParams
+ getMDOrNull(Record[16 + Offset]), // declaration
+ getMDOrNull(Record[17 + Offset]), // variables
+ HasThrownTypes ? getMDOrNull(Record[20]) : nullptr // thrownTypes
+ ));
MetadataList.assignValue(SP, NextMetadataNo);
NextMetadataNo++;
@@ -1381,16 +1383,20 @@ Error MetadataLoader::MetadataLoaderImpl::parseOneMetadata(
break;
}
case bitc::METADATA_NAMESPACE: {
- if (Record.size() != 5)
+ // Newer versions of DINamespace dropped file and line.
+ MDString *Name;
+ if (Record.size() == 3)
+ Name = getMDString(Record[2]);
+ else if (Record.size() == 5)
+ Name = getMDString(Record[3]);
+ else
return error("Invalid record");
IsDistinct = Record[0] & 1;
bool ExportSymbols = Record[0] & 2;
MetadataList.assignValue(
GET_OR_DISTINCT(DINamespace,
- (Context, getMDOrNull(Record[1]),
- getMDOrNull(Record[2]), getMDString(Record[3]),
- Record[4], ExportSymbols)),
+ (Context, getMDOrNull(Record[1]), Name, ExportSymbols)),
NextMetadataNo);
NextMetadataNo++;
break;
diff --git a/lib/Bitcode/Reader/ValueList.cpp b/lib/Bitcode/Reader/ValueList.cpp
index 7152a51cea6e..d1a2a11bbfad 100644
--- a/lib/Bitcode/Reader/ValueList.cpp
+++ b/lib/Bitcode/Reader/ValueList.cpp
@@ -58,7 +58,7 @@ void BitcodeReaderValueList::assignValue(Value *V, unsigned Idx) {
if (Idx >= size())
resize(Idx + 1);
- WeakVH &OldV = ValuePtrs[Idx];
+ WeakTrackingVH &OldV = ValuePtrs[Idx];
if (!OldV) {
OldV = V;
return;
diff --git a/lib/Bitcode/Reader/ValueList.h b/lib/Bitcode/Reader/ValueList.h
index 3119d7735e22..72775a3cf3bc 100644
--- a/lib/Bitcode/Reader/ValueList.h
+++ b/lib/Bitcode/Reader/ValueList.h
@@ -20,7 +20,7 @@ namespace llvm {
class Constant;
class BitcodeReaderValueList {
- std::vector<WeakVH> ValuePtrs;
+ std::vector<WeakTrackingVH> ValuePtrs;
/// As we resolve forward-referenced constants, we add information about them
/// to this vector. This allows us to resolve them in bulk instead of
diff --git a/lib/Bitcode/Writer/BitcodeWriter.cpp b/lib/Bitcode/Writer/BitcodeWriter.cpp
index e5aba03c8dc1..8aa7d0daf070 100644
--- a/lib/Bitcode/Writer/BitcodeWriter.cpp
+++ b/lib/Bitcode/Writer/BitcodeWriter.cpp
@@ -688,6 +688,8 @@ static uint64_t getAttrKindEncoding(Attribute::AttrKind Kind) {
return bitc::ATTR_KIND_RETURNS_TWICE;
case Attribute::SExt:
return bitc::ATTR_KIND_S_EXT;
+ case Attribute::Speculatable:
+ return bitc::ATTR_KIND_SPECULATABLE;
case Attribute::StackAlignment:
return bitc::ATTR_KIND_STACK_ALIGNMENT;
case Attribute::StackProtect:
@@ -1608,6 +1610,7 @@ void ModuleBitcodeWriter::writeDISubprogram(const DISubprogram *N,
Record.push_back(VE.getMetadataOrNullID(N->getDeclaration()));
Record.push_back(VE.getMetadataOrNullID(N->getVariables().get()));
Record.push_back(N->getThisAdjustment());
+ Record.push_back(VE.getMetadataOrNullID(N->getThrownTypes().get()));
Stream.EmitRecord(bitc::METADATA_SUBPROGRAM, Record, Abbrev);
Record.clear();
@@ -1643,9 +1646,7 @@ void ModuleBitcodeWriter::writeDINamespace(const DINamespace *N,
unsigned Abbrev) {
Record.push_back(N->isDistinct() | N->getExportSymbols() << 1);
Record.push_back(VE.getMetadataOrNullID(N->getScope()));
- Record.push_back(VE.getMetadataOrNullID(N->getFile()));
Record.push_back(VE.getMetadataOrNullID(N->getRawName()));
- Record.push_back(N->getLine());
Stream.EmitRecord(bitc::METADATA_NAMESPACE, Record, Abbrev);
Record.clear();
diff --git a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index d99065b1b67a..b11e30c359b3 100644
--- a/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -820,7 +820,7 @@ static bool emitDebugValueComment(const MachineInstr *MI, AsmPrinter &AP) {
const DILocalVariable *V = MI->getDebugVariable();
if (auto *SP = dyn_cast<DISubprogram>(V->getScope())) {
- StringRef Name = SP->getDisplayName();
+ StringRef Name = SP->getName();
if (!Name.empty())
OS << Name << ":";
}
diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
index 2571f6869651..786b11618d75 100644
--- a/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
+++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.cpp
@@ -17,6 +17,7 @@
#include "llvm/DebugInfo/CodeView/CVTypeVisitor.h"
#include "llvm/DebugInfo/CodeView/CodeView.h"
#include "llvm/DebugInfo/CodeView/Line.h"
+#include "llvm/DebugInfo/CodeView/ModuleDebugInlineeLinesFragment.h"
#include "llvm/DebugInfo/CodeView/SymbolRecord.h"
#include "llvm/DebugInfo/CodeView/TypeDatabase.h"
#include "llvm/DebugInfo/CodeView/TypeDumpVisitor.h"
@@ -237,7 +238,7 @@ TypeIndex CodeViewDebug::getFuncIdForSubprogram(const DISubprogram *SP) {
// The display name includes function template arguments. Drop them to match
// MSVC.
- StringRef DisplayName = SP->getDisplayName().split('<').first;
+ StringRef DisplayName = SP->getName().split('<').first;
const DIScope *Scope = SP->getScope().resolve();
TypeIndex TI;
@@ -392,7 +393,7 @@ void CodeViewDebug::endModule() {
// subprograms.
switchToDebugSectionForSymbol(nullptr);
- MCSymbol *CompilerInfo = beginCVSubsection(ModuleSubstreamKind::Symbols);
+ MCSymbol *CompilerInfo = beginCVSubsection(ModuleDebugFragmentKind::Symbols);
emitCompilerInformation();
endCVSubsection(CompilerInfo);
@@ -416,7 +417,7 @@ void CodeViewDebug::endModule() {
// Emit UDT records for any types used by global variables.
if (!GlobalUDTs.empty()) {
- MCSymbol *SymbolsEnd = beginCVSubsection(ModuleSubstreamKind::Symbols);
+ MCSymbol *SymbolsEnd = beginCVSubsection(ModuleDebugFragmentKind::Symbols);
emitDebugInfoForUDTs(GlobalUDTs);
endCVSubsection(SymbolsEnd);
}
@@ -644,7 +645,8 @@ void CodeViewDebug::emitInlineeLinesSubsection() {
return;
OS.AddComment("Inlinee lines subsection");
- MCSymbol *InlineEnd = beginCVSubsection(ModuleSubstreamKind::InlineeLines);
+ MCSymbol *InlineEnd =
+ beginCVSubsection(ModuleDebugFragmentKind::InlineeLines);
// We don't provide any extra file info.
// FIXME: Find out if debuggers use this info.
@@ -657,7 +659,7 @@ void CodeViewDebug::emitInlineeLinesSubsection() {
OS.AddBlankLine();
unsigned FileId = maybeRecordFile(SP->getFile());
- OS.AddComment("Inlined function " + SP->getDisplayName() + " starts at " +
+ OS.AddComment("Inlined function " + SP->getName() + " starts at " +
SP->getFilename() + Twine(':') + Twine(SP->getLine()));
OS.AddBlankLine();
// The filechecksum table uses 8 byte entries for now, and file ids start at
@@ -759,9 +761,9 @@ void CodeViewDebug::emitDebugInfoForFunction(const Function *GV,
// If we have a display name, build the fully qualified name by walking the
// chain of scopes.
- if (!SP->getDisplayName().empty())
+ if (!SP->getName().empty())
FuncName =
- getFullyQualifiedName(SP->getScope().resolve(), SP->getDisplayName());
+ getFullyQualifiedName(SP->getScope().resolve(), SP->getName());
// If our DISubprogram name is empty, use the mangled name.
if (FuncName.empty())
@@ -769,7 +771,7 @@ void CodeViewDebug::emitDebugInfoForFunction(const Function *GV,
// Emit a symbol subsection, required by VS2012+ to find function boundaries.
OS.AddComment("Symbol subsection for " + Twine(FuncName));
- MCSymbol *SymbolsEnd = beginCVSubsection(ModuleSubstreamKind::Symbols);
+ MCSymbol *SymbolsEnd = beginCVSubsection(ModuleDebugFragmentKind::Symbols);
{
MCSymbol *ProcRecordBegin = MMI->getContext().createTempSymbol(),
*ProcRecordEnd = MMI->getContext().createTempSymbol();
@@ -2114,7 +2116,7 @@ void CodeViewDebug::beginInstruction(const MachineInstr *MI) {
maybeRecordLocation(DL, Asm->MF);
}
-MCSymbol *CodeViewDebug::beginCVSubsection(ModuleSubstreamKind Kind) {
+MCSymbol *CodeViewDebug::beginCVSubsection(ModuleDebugFragmentKind Kind) {
MCSymbol *BeginLabel = MMI->getContext().createTempSymbol(),
*EndLabel = MMI->getContext().createTempSymbol();
OS.EmitIntValue(unsigned(Kind), 4);
@@ -2174,7 +2176,7 @@ void CodeViewDebug::emitDebugInfoForGlobals() {
if (!GV->hasComdat() && !GV->isDeclarationForLinker()) {
if (!EndLabel) {
OS.AddComment("Symbol subsection for globals");
- EndLabel = beginCVSubsection(ModuleSubstreamKind::Symbols);
+ EndLabel = beginCVSubsection(ModuleDebugFragmentKind::Symbols);
}
// FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
emitDebugInfoForGlobal(GVE->getVariable(), GV, Asm->getSymbol(GV));
@@ -2192,7 +2194,7 @@ void CodeViewDebug::emitDebugInfoForGlobals() {
OS.AddComment("Symbol subsection for " +
Twine(GlobalValue::getRealLinkageName(GV->getName())));
switchToDebugSectionForSymbol(GVSym);
- EndLabel = beginCVSubsection(ModuleSubstreamKind::Symbols);
+ EndLabel = beginCVSubsection(ModuleDebugFragmentKind::Symbols);
// FIXME: emitDebugInfoForGlobal() doesn't handle DIExpressions.
emitDebugInfoForGlobal(GVE->getVariable(), GV, GVSym);
endCVSubsection(EndLabel);
diff --git a/lib/CodeGen/AsmPrinter/CodeViewDebug.h b/lib/CodeGen/AsmPrinter/CodeViewDebug.h
index 343384c51772..46b2daa1e007 100644
--- a/lib/CodeGen/AsmPrinter/CodeViewDebug.h
+++ b/lib/CodeGen/AsmPrinter/CodeViewDebug.h
@@ -216,7 +216,7 @@ class LLVM_LIBRARY_VISIBILITY CodeViewDebug : public DebugHandlerBase {
/// Opens a subsection of the given kind in a .debug$S codeview section.
/// Returns an end label for use with endCVSubsection when the subsection is
/// finished.
- MCSymbol *beginCVSubsection(codeview::ModuleSubstreamKind Kind);
+ MCSymbol *beginCVSubsection(codeview::ModuleDebugFragmentKind Kind);
void endCVSubsection(MCSymbol *EndLabel);
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
index 16fb20dd7e20..8d25def7772c 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.cpp
@@ -375,10 +375,6 @@ void DwarfUnit::addSourceLine(DIE &Die, const DIObjCProperty *Ty) {
addSourceLine(Die, Ty->getLine(), Ty->getFilename(), Ty->getDirectory());
}
-void DwarfUnit::addSourceLine(DIE &Die, const DINamespace *NS) {
- addSourceLine(Die, NS->getLine(), NS->getFilename(), NS->getDirectory());
-}
-
/* Byref variables, in Blocks, are declared by the programmer as "SomeType
VarName;", but the compiler creates a __Block_byref_x_VarName struct, and
gives the variable VarName either the struct, or a pointer to the struct, as
@@ -662,6 +658,14 @@ void DwarfUnit::addTemplateParams(DIE &Buffer, DINodeArray TParams) {
}
}
+/// Add thrown types.
+void DwarfUnit::addThrownTypes(DIE &Die, DINodeArray ThrownTypes) {
+ for (const auto *Ty : ThrownTypes) {
+ DIE &TT = createAndAddDIE(dwarf::DW_TAG_thrown_type, Die);
+ addType(TT, cast<DIType>(Ty));
+ }
+}
+
DIE *DwarfUnit::getOrCreateContextDIE(const DIScope *Context) {
if (!Context || isa<DIFile>(Context))
return &getUnitDie();
@@ -1077,7 +1081,6 @@ DIE *DwarfUnit::getOrCreateNameSpace(const DINamespace *NS) {
Name = "(anonymous namespace)";
DD->addAccelNamespace(Name, NDie);
addGlobalName(Name, NDie, NS->getScope());
- addSourceLine(NDie, NS);
if (NS->getExportSymbols())
addFlag(NDie, dwarf::DW_AT_export_symbols);
return &NDie;
@@ -1249,6 +1252,8 @@ void DwarfUnit::applySubprogramAttributes(const DISubprogram *SP, DIE &SPDie,
constructSubprogramArguments(SPDie, Args);
}
+ addThrownTypes(SPDie, SP->getThrownTypes());
+
if (SP->isArtificial())
addFlag(SPDie, dwarf::DW_AT_artificial);
diff --git a/lib/CodeGen/AsmPrinter/DwarfUnit.h b/lib/CodeGen/AsmPrinter/DwarfUnit.h
index e84df4650882..8fc841703e23 100644
--- a/lib/CodeGen/AsmPrinter/DwarfUnit.h
+++ b/lib/CodeGen/AsmPrinter/DwarfUnit.h
@@ -210,7 +210,6 @@ public:
void addSourceLine(DIE &Die, const DIGlobalVariable *G);
void addSourceLine(DIE &Die, const DISubprogram *SP);
void addSourceLine(DIE &Die, const DIType *Ty);
- void addSourceLine(DIE &Die, const DINamespace *NS);
void addSourceLine(DIE &Die, const DIObjCProperty *Ty);
/// Add constant value entry in variable DIE.
@@ -230,6 +229,9 @@ public:
/// Add template parameters in buffer.
void addTemplateParams(DIE &Buffer, DINodeArray TParams);
+ /// Add thrown types.
+ void addThrownTypes(DIE &Die, DINodeArray ThrownTypes);
+
// FIXME: Should be reformulated in terms of addComplexAddress.
/// Start with the address based on the location provided, and generate the
/// DWARF information necessary to find the actual Block variable (navigating
diff --git a/lib/CodeGen/CMakeLists.txt b/lib/CodeGen/CMakeLists.txt
index 0912d9f68aff..26da748fa244 100644
--- a/lib/CodeGen/CMakeLists.txt
+++ b/lib/CodeGen/CMakeLists.txt
@@ -65,6 +65,7 @@ add_llvm_library(LLVMCodeGen
MachineCSE.cpp
MachineDominanceFrontier.cpp
MachineDominators.cpp
+ MachineFrameInfo.cpp
MachineFunction.cpp
MachineFunctionPass.cpp
MachineFunctionPrinterPass.cpp
diff --git a/lib/CodeGen/CodeGenPrepare.cpp b/lib/CodeGen/CodeGenPrepare.cpp
index c862cfd28add..c6c93811a0f9 100644
--- a/lib/CodeGen/CodeGenPrepare.cpp
+++ b/lib/CodeGen/CodeGenPrepare.cpp
@@ -2226,10 +2226,11 @@ bool CodeGenPrepare::optimizeCallInst(CallInst *CI, bool& ModifiedDT) {
ConstantInt *RetVal =
lowerObjectSizeCall(II, *DL, TLInfo, /*MustSucceed=*/true);
// Substituting this can cause recursive simplifications, which can
- // invalidate our iterator. Use a WeakVH to hold onto it in case this
+ // invalidate our iterator. Use a WeakTrackingVH to hold onto it in case
+ // this
// happens.
Value *CurValue = &*CurInstIterator;
- WeakVH IterHandle(CurValue);
+ WeakTrackingVH IterHandle(CurValue);
replaceAndRecursivelySimplify(CI, RetVal, TLInfo, nullptr);
@@ -4442,9 +4443,9 @@ bool CodeGenPrepare::optimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// using it.
if (Repl->use_empty()) {
// This can cause recursive deletion, which can invalidate our iterator.
- // Use a WeakVH to hold onto it in case this happens.
+ // Use a WeakTrackingVH to hold onto it in case this happens.
Value *CurValue = &*CurInstIterator;
- WeakVH IterHandle(CurValue);
+ WeakTrackingVH IterHandle(CurValue);
BasicBlock *BB = CurInstIterator->getParent();
RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
@@ -5959,7 +5960,7 @@ bool CodeGenPrepare::optimizeInst(Instruction *I, bool& ModifiedDT) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
- if (Value *V = SimplifyInstruction(P, *DL, TLInfo, nullptr)) {
+ if (Value *V = SimplifyInstruction(P, {*DL, TLInfo})) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
++NumPHIsElim;
diff --git a/lib/CodeGen/DFAPacketizer.cpp b/lib/CodeGen/DFAPacketizer.cpp
index 7b1b2d64fccc..65f58e5686e0 100644
--- a/lib/CodeGen/DFAPacketizer.cpp
+++ b/lib/CodeGen/DFAPacketizer.cpp
@@ -213,10 +213,8 @@ VLIWPacketizerList::VLIWPacketizerList(MachineFunction &mf,
VLIWPacketizerList::~VLIWPacketizerList() {
- if (VLIWScheduler)
- delete VLIWScheduler;
- if (ResourceTracker)
- delete ResourceTracker;
+ delete VLIWScheduler;
+ delete ResourceTracker;
}
diff --git a/lib/CodeGen/GlobalISel/CallLowering.cpp b/lib/CodeGen/GlobalISel/CallLowering.cpp
index 035a2ac78ed9..ebfe6cb3b733 100644
--- a/lib/CodeGen/GlobalISel/CallLowering.cpp
+++ b/lib/CodeGen/GlobalISel/CallLowering.cpp
@@ -83,8 +83,8 @@ void CallLowering::setArgFlags(CallLowering::ArgInfo &Arg, unsigned OpIdx,
// For ByVal, alignment should be passed from FE. BE will guess if
// this info is not there but there are cases it cannot get right.
unsigned FrameAlign;
- if (FuncInfo.getParamAlignment(OpIdx))
- FrameAlign = FuncInfo.getParamAlignment(OpIdx);
+ if (FuncInfo.getParamAlignment(OpIdx - 1))
+ FrameAlign = FuncInfo.getParamAlignment(OpIdx - 1);
else
FrameAlign = getTLI()->getByValTypeAlignment(ElementTy, DL);
Arg.Flags.setByValAlign(FrameAlign);
diff --git a/lib/CodeGen/GlobalISel/IRTranslator.cpp b/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 5fb8dfc95d3f..75be7a55bd2a 100644
--- a/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -1199,9 +1199,8 @@ bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
finishPendingPhis();
- // Now that the MachineFrameInfo has been configured, no further changes to
- // the reserved registers are possible.
- MRI->freezeReservedRegs(*MF);
+ auto &TLI = *MF->getSubtarget().getTargetLowering();
+ TLI.finalizeLowering(*MF);
// Merge the argument lowering and constants block with its single
// successor, the LLVM-IR entry block. We want the basic block to
diff --git a/lib/CodeGen/GlobalISel/InstructionSelector.cpp b/lib/CodeGen/GlobalISel/InstructionSelector.cpp
index 942680b6fff3..c67da8629a3b 100644
--- a/lib/CodeGen/GlobalISel/InstructionSelector.cpp
+++ b/lib/CodeGen/GlobalISel/InstructionSelector.cpp
@@ -58,10 +58,11 @@ bool InstructionSelector::constrainSelectedInstRegOperands(
MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
Reg, OpI));
- // Tie uses to defs as indicated in MCInstrDesc.
+ // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
+ // done.
if (MO.isUse()) {
int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
- if (DefIdx != -1)
+ if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
I.tieOperands(DefIdx, OpI);
}
}
diff --git a/lib/CodeGen/MIRParser/MIRParser.cpp b/lib/CodeGen/MIRParser/MIRParser.cpp
index a2773cccc5db..bd04acd049db 100644
--- a/lib/CodeGen/MIRParser/MIRParser.cpp
+++ b/lib/CodeGen/MIRParser/MIRParser.cpp
@@ -541,7 +541,8 @@ bool MIRParserImpl::initializeFrameInfo(PerFunctionMIParsingState &PFS,
MFI.ensureMaxAlignment(YamlMFI.MaxAlignment);
MFI.setAdjustsStack(YamlMFI.AdjustsStack);
MFI.setHasCalls(YamlMFI.HasCalls);
- MFI.setMaxCallFrameSize(YamlMFI.MaxCallFrameSize);
+ if (YamlMFI.MaxCallFrameSize != ~0u)
+ MFI.setMaxCallFrameSize(YamlMFI.MaxCallFrameSize);
MFI.setHasOpaqueSPAdjustment(YamlMFI.HasOpaqueSPAdjustment);
MFI.setHasVAStart(YamlMFI.HasVAStart);
MFI.setHasMustTailInVarArgFunc(YamlMFI.HasMustTailInVarArgFunc);
diff --git a/lib/CodeGen/MIRPrinter.cpp b/lib/CodeGen/MIRPrinter.cpp
index b6624b88fe23..d017b21f0a59 100644
--- a/lib/CodeGen/MIRPrinter.cpp
+++ b/lib/CodeGen/MIRPrinter.cpp
@@ -286,7 +286,8 @@ void MIRPrinter::convert(ModuleSlotTracker &MST,
YamlMFI.MaxAlignment = MFI.getMaxAlignment();
YamlMFI.AdjustsStack = MFI.adjustsStack();
YamlMFI.HasCalls = MFI.hasCalls();
- YamlMFI.MaxCallFrameSize = MFI.getMaxCallFrameSize();
+ YamlMFI.MaxCallFrameSize = MFI.isMaxCallFrameSizeComputed()
+ ? MFI.getMaxCallFrameSize() : ~0u;
YamlMFI.HasOpaqueSPAdjustment = MFI.hasOpaqueSPAdjustment();
YamlMFI.HasVAStart = MFI.hasVAStart();
YamlMFI.HasMustTailInVarArgFunc = MFI.hasMustTailInVarArgFunc();
diff --git a/lib/CodeGen/MachineFrameInfo.cpp b/lib/CodeGen/MachineFrameInfo.cpp
new file mode 100644
index 000000000000..7de8434df806
--- /dev/null
+++ b/lib/CodeGen/MachineFrameInfo.cpp
@@ -0,0 +1,218 @@
+//===-- MachineFrameInfo.cpp ---------------------------------------------===//
+//
+// The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+//
+/// \file Implements MachineFrameInfo that manages the stack frame.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/CodeGen/MachineFrameInfo.h"
+
+#include "llvm/ADT/BitVector.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/Support/Debug.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/Target/TargetFrameLowering.h"
+#include "llvm/Target/TargetRegisterInfo.h"
+#include "llvm/Target/TargetSubtargetInfo.h"
+#include <cassert>
+
+#define DEBUG_TYPE "codegen"
+
+using namespace llvm;
+
+void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
+ if (!StackRealignable)
+ assert(Align <= StackAlignment &&
+ "For targets without stack realignment, Align is out of limit!");
+ if (MaxAlignment < Align) MaxAlignment = Align;
+}
+
+/// Clamp the alignment if requested and emit a warning.
+static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
+ unsigned StackAlign) {
+ if (!ShouldClamp || Align <= StackAlign)
+ return Align;
+ DEBUG(dbgs() << "Warning: requested alignment " << Align
+ << " exceeds the stack alignment " << StackAlign
+ << " when stack realignment is off" << '\n');
+ return StackAlign;
+}
+
+int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
+ bool isSS, const AllocaInst *Alloca) {
+ assert(Size != 0 && "Cannot allocate zero size stack objects!");
+ Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
+ Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
+ !isSS));
+ int Index = (int)Objects.size() - NumFixedObjects - 1;
+ assert(Index >= 0 && "Bad frame index!");
+ ensureMaxAlignment(Alignment);
+ return Index;
+}
+
+int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
+ unsigned Alignment) {
+ Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
+ CreateStackObject(Size, Alignment, true);
+ int Index = (int)Objects.size() - NumFixedObjects - 1;
+ ensureMaxAlignment(Alignment);
+ return Index;
+}
+
+int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
+ const AllocaInst *Alloca) {
+ HasVarSizedObjects = true;
+ Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
+ Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
+ ensureMaxAlignment(Alignment);
+ return (int)Objects.size()-NumFixedObjects-1;
+}
+
+int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
+ bool Immutable, bool isAliased) {
+ assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
+ // The alignment of the frame index can be determined from its offset from
+ // the incoming frame position. If the frame object is at offset 32 and
+ // the stack is guaranteed to be 16-byte aligned, then we know that the
+ // object is 16-byte aligned. Note that unlike the non-fixed case, if the
+ // stack needs realignment, we can't assume that the stack will in fact be
+ // aligned.
+ unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
+ Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
+ Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
+ /*isSS*/ false,
+ /*Alloca*/ nullptr, isAliased));
+ return -++NumFixedObjects;
+}
+
+int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
+ int64_t SPOffset,
+ bool Immutable) {
+ unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
+ Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
+ Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
+ /*isSS*/ true,
+ /*Alloca*/ nullptr,
+ /*isAliased*/ false));
+ return -++NumFixedObjects;
+}
+
+BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
+ const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
+ BitVector BV(TRI->getNumRegs());
+
+ // Before CSI is calculated, no registers are considered pristine. They can be
+ // freely used and PEI will make sure they are saved.
+ if (!isCalleeSavedInfoValid())
+ return BV;
+
+ const MachineRegisterInfo &MRI = MF.getRegInfo();
+ for (const MCPhysReg *CSR = MRI.getCalleeSavedRegs(); CSR && *CSR;
+ ++CSR)
+ BV.set(*CSR);
+
+ // Saved CSRs are not pristine.
+ for (auto &I : getCalleeSavedInfo())
+ for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
+ BV.reset(*S);
+
+ return BV;
+}
+
+unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
+ const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
+ const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
+ unsigned MaxAlign = getMaxAlignment();
+ int Offset = 0;
+
+ // This code is very, very similar to PEI::calculateFrameObjectOffsets().
+ // It really should be refactored to share code. Until then, changes
+ // should keep in mind that there's tight coupling between the two.
+
+ for (int i = getObjectIndexBegin(); i != 0; ++i) {
+ int FixedOff = -getObjectOffset(i);
+ if (FixedOff > Offset) Offset = FixedOff;
+ }
+ for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
+ if (isDeadObjectIndex(i))
+ continue;
+ Offset += getObjectSize(i);
+ unsigned Align = getObjectAlignment(i);
+ // Adjust to alignment boundary
+ Offset = (Offset+Align-1)/Align*Align;
+
+ MaxAlign = std::max(Align, MaxAlign);
+ }
+
+ if (adjustsStack() && TFI->hasReservedCallFrame(MF))
+ Offset += getMaxCallFrameSize();
+
+ // Round up the size to a multiple of the alignment. If the function has
+ // any calls or alloca's, align to the target's StackAlignment value to
+ // ensure that the callee's frame or the alloca data is suitably aligned;
+ // otherwise, for leaf functions, align to the TransientStackAlignment
+ // value.
+ unsigned StackAlign;
+ if (adjustsStack() || hasVarSizedObjects() ||
+ (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
+ StackAlign = TFI->getStackAlignment();
+ else
+ StackAlign = TFI->getTransientStackAlignment();
+
+ // If the frame pointer is eliminated, all frame offsets will be relative to
+ // SP not FP. Align to MaxAlign so this works.
+ StackAlign = std::max(StackAlign, MaxAlign);
+ unsigned AlignMask = StackAlign - 1;
+ Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
+
+ return (unsigned)Offset;
+}
+
+void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
+ if (Objects.empty()) return;
+
+ const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
+ int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
+
+ OS << "Frame Objects:\n";
+
+ for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
+ const StackObject &SO = Objects[i];
+ OS << " fi#" << (int)(i-NumFixedObjects) << ": ";
+ if (SO.Size == ~0ULL) {
+ OS << "dead\n";
+ continue;
+ }
+ if (SO.Size == 0)
+ OS << "variable sized";
+ else
+ OS << "size=" << SO.Size;
+ OS << ", align=" << SO.Alignment;
+
+ if (i < NumFixedObjects)
+ OS << ", fixed";
+ if (i < NumFixedObjects || SO.SPOffset != -1) {
+ int64_t Off = SO.SPOffset - ValOffset;
+ OS << ", at location [SP";
+ if (Off > 0)
+ OS << "+" << Off;
+ else if (Off < 0)
+ OS << Off;
+ OS << "]";
+ }
+ OS << "\n";
+ }
+}
+
+#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
+LLVM_DUMP_METHOD void MachineFrameInfo::dump(const MachineFunction &MF) const {
+ print(MF, dbgs());
+}
+#endif
diff --git a/lib/CodeGen/MachineFunction.cpp b/lib/CodeGen/MachineFunction.cpp
index c9767a25e908..ac4ccb81b884 100644
--- a/lib/CodeGen/MachineFunction.cpp
+++ b/lib/CodeGen/MachineFunction.cpp
@@ -757,214 +757,6 @@ void llvm::addLandingPadInfo(const LandingPadInst &I, MachineBasicBlock &MBB) {
/// \}
//===----------------------------------------------------------------------===//
-// MachineFrameInfo implementation
-//===----------------------------------------------------------------------===//
-
-/// Make sure the function is at least Align bytes aligned.
-void MachineFrameInfo::ensureMaxAlignment(unsigned Align) {
- if (!StackRealignable)
- assert(Align <= StackAlignment &&
- "For targets without stack realignment, Align is out of limit!");
- if (MaxAlignment < Align) MaxAlignment = Align;
-}
-
-/// Clamp the alignment if requested and emit a warning.
-static inline unsigned clampStackAlignment(bool ShouldClamp, unsigned Align,
- unsigned StackAlign) {
- if (!ShouldClamp || Align <= StackAlign)
- return Align;
- DEBUG(dbgs() << "Warning: requested alignment " << Align
- << " exceeds the stack alignment " << StackAlign
- << " when stack realignment is off" << '\n');
- return StackAlign;
-}
-
-/// Create a new statically sized stack object, returning a nonnegative
-/// identifier to represent it.
-int MachineFrameInfo::CreateStackObject(uint64_t Size, unsigned Alignment,
- bool isSS, const AllocaInst *Alloca) {
- assert(Size != 0 && "Cannot allocate zero size stack objects!");
- Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
- Objects.push_back(StackObject(Size, Alignment, 0, false, isSS, Alloca,
- !isSS));
- int Index = (int)Objects.size() - NumFixedObjects - 1;
- assert(Index >= 0 && "Bad frame index!");
- ensureMaxAlignment(Alignment);
- return Index;
-}
-
-/// Create a new statically sized stack object that represents a spill slot,
-/// returning a nonnegative identifier to represent it.
-int MachineFrameInfo::CreateSpillStackObject(uint64_t Size,
- unsigned Alignment) {
- Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
- CreateStackObject(Size, Alignment, true);
- int Index = (int)Objects.size() - NumFixedObjects - 1;
- ensureMaxAlignment(Alignment);
- return Index;
-}
-
-/// Notify the MachineFrameInfo object that a variable sized object has been
-/// created. This must be created whenever a variable sized object is created,
-/// whether or not the index returned is actually used.
-int MachineFrameInfo::CreateVariableSizedObject(unsigned Alignment,
- const AllocaInst *Alloca) {
- HasVarSizedObjects = true;
- Alignment = clampStackAlignment(!StackRealignable, Alignment, StackAlignment);
- Objects.push_back(StackObject(0, Alignment, 0, false, false, Alloca, true));
- ensureMaxAlignment(Alignment);
- return (int)Objects.size()-NumFixedObjects-1;
-}
-
-/// Create a new object at a fixed location on the stack.
-/// All fixed objects should be created before other objects are created for
-/// efficiency. By default, fixed objects are immutable. This returns an
-/// index with a negative value.
-int MachineFrameInfo::CreateFixedObject(uint64_t Size, int64_t SPOffset,
- bool Immutable, bool isAliased) {
- assert(Size != 0 && "Cannot allocate zero size fixed stack objects!");
- // The alignment of the frame index can be determined from its offset from
- // the incoming frame position. If the frame object is at offset 32 and
- // the stack is guaranteed to be 16-byte aligned, then we know that the
- // object is 16-byte aligned. Note that unlike the non-fixed case, if the
- // stack needs realignment, we can't assume that the stack will in fact be
- // aligned.
- unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
- Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
- Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
- /*isSS*/ false,
- /*Alloca*/ nullptr, isAliased));
- return -++NumFixedObjects;
-}
-
-/// Create a spill slot at a fixed location on the stack.
-/// Returns an index with a negative value.
-int MachineFrameInfo::CreateFixedSpillStackObject(uint64_t Size,
- int64_t SPOffset,
- bool Immutable) {
- unsigned Align = MinAlign(SPOffset, ForcedRealign ? 1 : StackAlignment);
- Align = clampStackAlignment(!StackRealignable, Align, StackAlignment);
- Objects.insert(Objects.begin(), StackObject(Size, Align, SPOffset, Immutable,
- /*isSS*/ true,
- /*Alloca*/ nullptr,
- /*isAliased*/ false));
- return -++NumFixedObjects;
-}
-
-BitVector MachineFrameInfo::getPristineRegs(const MachineFunction &MF) const {
- const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
- BitVector BV(TRI->getNumRegs());
-
- // Before CSI is calculated, no registers are considered pristine. They can be
- // freely used and PEI will make sure they are saved.
- if (!isCalleeSavedInfoValid())
- return BV;
-
- const MachineRegisterInfo &MRI = MF.getRegInfo();
- for (const MCPhysReg *CSR = MRI.getCalleeSavedRegs(); CSR && *CSR;
- ++CSR)
- BV.set(*CSR);
-
- // Saved CSRs are not pristine.
- for (auto &I : getCalleeSavedInfo())
- for (MCSubRegIterator S(I.getReg(), TRI, true); S.isValid(); ++S)
- BV.reset(*S);
-
- return BV;
-}
-
-unsigned MachineFrameInfo::estimateStackSize(const MachineFunction &MF) const {
- const TargetFrameLowering *TFI = MF.getSubtarget().getFrameLowering();
- const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
- unsigned MaxAlign = getMaxAlignment();
- int Offset = 0;
-
- // This code is very, very similar to PEI::calculateFrameObjectOffsets().
- // It really should be refactored to share code. Until then, changes
- // should keep in mind that there's tight coupling between the two.
-
- for (int i = getObjectIndexBegin(); i != 0; ++i) {
- int FixedOff = -getObjectOffset(i);
- if (FixedOff > Offset) Offset = FixedOff;
- }
- for (unsigned i = 0, e = getObjectIndexEnd(); i != e; ++i) {
- if (isDeadObjectIndex(i))
- continue;
- Offset += getObjectSize(i);
- unsigned Align = getObjectAlignment(i);
- // Adjust to alignment boundary
- Offset = (Offset+Align-1)/Align*Align;
-
- MaxAlign = std::max(Align, MaxAlign);
- }
-
- if (adjustsStack() && TFI->hasReservedCallFrame(MF))
- Offset += getMaxCallFrameSize();
-
- // Round up the size to a multiple of the alignment. If the function has
- // any calls or alloca's, align to the target's StackAlignment value to
- // ensure that the callee's frame or the alloca data is suitably aligned;
- // otherwise, for leaf functions, align to the TransientStackAlignment
- // value.
- unsigned StackAlign;
- if (adjustsStack() || hasVarSizedObjects() ||
- (RegInfo->needsStackRealignment(MF) && getObjectIndexEnd() != 0))
- StackAlign = TFI->getStackAlignment();
- else
- StackAlign = TFI->getTransientStackAlignment();
-
- // If the frame pointer is eliminated, all frame offsets will be relative to
- // SP not FP. Align to MaxAlign so this works.
- StackAlign = std::max(StackAlign, MaxAlign);
- unsigned AlignMask = StackAlign - 1;
- Offset = (Offset + AlignMask) & ~uint64_t(AlignMask);
-
- return (unsigned)Offset;
-}
-
-void MachineFrameInfo::print(const MachineFunction &MF, raw_ostream &OS) const{
- if (Objects.empty()) return;
-
- const TargetFrameLowering *FI = MF.getSubtarget().getFrameLowering();
- int ValOffset = (FI ? FI->getOffsetOfLocalArea() : 0);
-
- OS << "Frame Objects:\n";
-
- for (unsigned i = 0, e = Objects.size(); i != e; ++i) {
- const StackObject &SO = Objects[i];
- OS << " fi#" << (int)(i-NumFixedObjects) << ": ";
- if (SO.Size == ~0ULL) {
- OS << "dead\n";
- continue;
- }
- if (SO.Size == 0)
- OS << "variable sized";
- else
- OS << "size=" << SO.Size;
- OS << ", align=" << SO.Alignment;
-
- if (i < NumFixedObjects)
- OS << ", fixed";
- if (i < NumFixedObjects || SO.SPOffset != -1) {
- int64_t Off = SO.SPOffset - ValOffset;
- OS << ", at location [SP";
- if (Off > 0)
- OS << "+" << Off;
- else if (Off < 0)
- OS << Off;
- OS << "]";
- }
- OS << "\n";
- }
-}
-
-#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
-LLVM_DUMP_METHOD void MachineFrameInfo::dump(const MachineFunction &MF) const {
- print(MF, dbgs());
-}
-#endif
-
-//===----------------------------------------------------------------------===//
// MachineJumpTableInfo implementation
//===----------------------------------------------------------------------===//
diff --git a/lib/CodeGen/MachineInstr.cpp b/lib/CodeGen/MachineInstr.cpp
index 1faf6292a9c1..d665201a5d17 100644
--- a/lib/CodeGen/MachineInstr.cpp
+++ b/lib/CodeGen/MachineInstr.cpp
@@ -2350,7 +2350,7 @@ MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
const MachineInstr &Orig,
int FrameIndex) {
const MDNode *Var = Orig.getDebugVariable();
- auto *Expr = cast_or_null<DIExpression>(Orig.getDebugExpression());
+ const auto *Expr = cast_or_null<DIExpression>(Orig.getDebugExpression());
bool IsIndirect = Orig.isIndirectDebugValue();
uint64_t Offset = IsIndirect ? Orig.getOperand(1).getImm() : 0;
DebugLoc DL = Orig.getDebugLoc();
@@ -2359,13 +2359,8 @@ MachineInstr *llvm::buildDbgValueForSpill(MachineBasicBlock &BB,
// If the DBG_VALUE already was a memory location, add an extra
// DW_OP_deref. Otherwise just turning this from a register into a
// memory/indirect location is sufficient.
- if (IsIndirect) {
- SmallVector<uint64_t, 8> Ops;
- Ops.push_back(dwarf::DW_OP_deref);
- if (Expr)
- Ops.append(Expr->elements_begin(), Expr->elements_end());
- Expr = DIExpression::get(Expr->getContext(), Ops);
- }
+ if (IsIndirect)
+ Expr = DIExpression::prepend(Expr, DIExpression::WithDeref);
return BuildMI(BB, I, DL, Orig.getDesc())
.addFrameIndex(FrameIndex)
.addImm(Offset)
diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index 1251ae6262b8..dc0276d57667 100644
--- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -33,6 +33,7 @@
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetLowering.h"
@@ -236,10 +237,13 @@ namespace {
SDValue visitSUB(SDNode *N);
SDValue visitADDC(SDNode *N);
SDValue visitUADDO(SDNode *N);
+ SDValue visitUADDOLike(SDValue N0, SDValue N1, SDNode *N);
SDValue visitSUBC(SDNode *N);
SDValue visitUSUBO(SDNode *N);
SDValue visitADDE(SDNode *N);
+ SDValue visitADDCARRY(SDNode *N);
SDValue visitSUBE(SDNode *N);
+ SDValue visitSUBCARRY(SDNode *N);
SDValue visitMUL(SDNode *N);
SDValue useDivRem(SDNode *N);
SDValue visitSDIV(SDNode *N);
@@ -369,14 +373,14 @@ namespace {
SDValue BuildSDIVPow2(SDNode *N);
SDValue BuildUDIV(SDNode *N);
SDValue BuildLogBase2(SDValue Op, const SDLoc &DL);
- SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags);
- SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags);
- SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags);
- SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags, bool Recip);
+ SDValue BuildReciprocalEstimate(SDValue Op, SDNodeFlags Flags);
+ SDValue buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags);
+ SDValue buildSqrtEstimate(SDValue Op, SDNodeFlags Flags);
+ SDValue buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags, bool Recip);
SDValue buildSqrtNROneConst(SDValue Op, SDValue Est, unsigned Iterations,
- SDNodeFlags *Flags, bool Reciprocal);
+ SDNodeFlags Flags, bool Reciprocal);
SDValue buildSqrtNRTwoConst(SDValue Op, SDValue Est, unsigned Iterations,
- SDNodeFlags *Flags, bool Reciprocal);
+ SDNodeFlags Flags, bool Reciprocal);
SDValue MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
bool DemandHighBits = true);
SDValue MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1);
@@ -396,6 +400,7 @@ namespace {
SDValue createBuildVecShuffle(const SDLoc &DL, SDNode *N,
ArrayRef<int> VectorMask, SDValue VecIn1,
SDValue VecIn2, unsigned LeftIdx);
+ SDValue matchVSelectOpSizesWithSetCC(SDNode *N);
SDValue GetDemandedBits(SDValue V, const APInt &Mask);
@@ -644,7 +649,7 @@ static char isNegatibleForFree(SDValue Op, bool LegalOperations,
case ISD::FSUB:
// We can't turn -(A-B) into B-A when we honor signed zeros.
if (!Options->NoSignedZerosFPMath &&
- !Op.getNode()->getFlags()->hasNoSignedZeros())
+ !Op.getNode()->getFlags().hasNoSignedZeros())
return 0;
// fold (fneg (fsub A, B)) -> (fsub B, A)
@@ -682,7 +687,7 @@ static SDValue GetNegatedExpression(SDValue Op, SelectionDAG &DAG,
assert(Depth <= 6 && "GetNegatedExpression doesn't match isNegatibleForFree");
- const SDNodeFlags *Flags = Op.getNode()->getFlags();
+ const SDNodeFlags Flags = Op.getNode()->getFlags();
switch (Op.getOpcode()) {
default: llvm_unreachable("Unknown code");
@@ -965,8 +970,8 @@ CommitTargetLoweringOpt(const TargetLowering::TargetLoweringOpt &TLO) {
/// things it uses can be simplified by bit propagation. If so, return true.
bool DAGCombiner::SimplifyDemandedBits(SDValue Op, const APInt &Demanded) {
TargetLowering::TargetLoweringOpt TLO(DAG, LegalTypes, LegalOperations);
- APInt KnownZero, KnownOne;
- if (!TLI.SimplifyDemandedBits(Op, Demanded, KnownZero, KnownOne, TLO))
+ KnownBits Known;
+ if (!TLI.SimplifyDemandedBits(Op, Demanded, Known, TLO))
return false;
// Revisit the node.
@@ -1412,7 +1417,9 @@ SDValue DAGCombiner::visit(SDNode *N) {
case ISD::SUBC: return visitSUBC(N);
case ISD::USUBO: return visitUSUBO(N);
case ISD::ADDE: return visitADDE(N);
+ case ISD::ADDCARRY: return visitADDCARRY(N);
case ISD::SUBE: return visitSUBE(N);
+ case ISD::SUBCARRY: return visitSUBCARRY(N);
case ISD::MUL: return visitMUL(N);
case ISD::SDIV: return visitSDIV(N);
case ISD::UDIV: return visitUDIV(N);
@@ -1866,14 +1873,31 @@ SDValue DAGCombiner::visitADD(SDNode *N) {
if (isNullConstant(N1))
return N0;
- // fold ((c1-A)+c2) -> (c1+c2)-A
if (isConstantOrConstantVector(N1, /* NoOpaque */ true)) {
- if (N0.getOpcode() == ISD::SUB)
- if (isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) {
- return DAG.getNode(ISD::SUB, DL, VT,
- DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)),
- N0.getOperand(1));
+ // fold ((c1-A)+c2) -> (c1+c2)-A
+ if (N0.getOpcode() == ISD::SUB &&
+ isConstantOrConstantVector(N0.getOperand(0), /* NoOpaque */ true)) {
+ // FIXME: Adding 2 constants should be handled by FoldConstantArithmetic.
+ return DAG.getNode(ISD::SUB, DL, VT,
+ DAG.getNode(ISD::ADD, DL, VT, N1, N0.getOperand(0)),
+ N0.getOperand(1));
+ }
+
+ // add (sext i1 X), 1 -> zext (not i1 X)
+ // We don't transform this pattern:
+ // add (zext i1 X), -1 -> sext (not i1 X)
+ // because most (?) targets generate better code for the zext form.
+ if (N0.getOpcode() == ISD::SIGN_EXTEND && N0.hasOneUse() &&
+ isOneConstantOrOneSplatConstant(N1)) {
+ SDValue X = N0.getOperand(0);
+ if ((!LegalOperations ||
+ (TLI.isOperationLegal(ISD::XOR, X.getValueType()) &&
+ TLI.isOperationLegal(ISD::ZERO_EXTEND, VT))) &&
+ X.getScalarValueSizeInBits() == 1) {
+ SDValue Not = DAG.getNOT(DL, X, X.getValueType());
+ return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, Not);
}
+ }
}
if (SDValue NewSel = foldBinOpIntoSelect(N))
@@ -1992,6 +2016,11 @@ SDValue DAGCombiner::visitADDLike(SDValue N0, SDValue N1, SDNode *LocReference)
}
}
+ // (add X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
+ if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1)))
+ return DAG.getNode(ISD::ADDCARRY, DL, N1->getVTList(),
+ N0, N1.getOperand(0), N1.getOperand(2));
+
return SDValue();
}
@@ -2055,6 +2084,26 @@ SDValue DAGCombiner::visitUADDO(SDNode *N) {
return CombineTo(N, DAG.getNode(ISD::ADD, DL, VT, N0, N1),
DAG.getConstant(0, DL, CarryVT));
+ if (SDValue Combined = visitUADDOLike(N0, N1, N))
+ return Combined;
+
+ if (SDValue Combined = visitUADDOLike(N1, N0, N))
+ return Combined;
+
+ return SDValue();
+}
+
+SDValue DAGCombiner::visitUADDOLike(SDValue N0, SDValue N1, SDNode *N) {
+ // (uaddo X, (addcarry Y, 0, Carry)) -> (addcarry X, Y, Carry)
+ // If Y + 1 cannot overflow.
+ if (N1.getOpcode() == ISD::ADDCARRY && isNullConstant(N1.getOperand(1))) {
+ SDValue Y = N1.getOperand(0);
+ SDValue One = DAG.getConstant(1, SDLoc(N), Y.getValueType());
+ if (DAG.computeOverflowKind(Y, One) == SelectionDAG::OFK_Never)
+ return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(), N0, Y,
+ N1.getOperand(2));
+ }
+
return SDValue();
}
@@ -2077,6 +2126,25 @@ SDValue DAGCombiner::visitADDE(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitADDCARRY(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue CarryIn = N->getOperand(2);
+
+ // canonicalize constant to RHS
+ ConstantSDNode *N0C = dyn_cast<ConstantSDNode>(N0);
+ ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1);
+ if (N0C && !N1C)
+ return DAG.getNode(ISD::ADDCARRY, SDLoc(N), N->getVTList(),
+ N1, N0, CarryIn);
+
+ // fold (addcarry x, y, false) -> (uaddo x, y)
+ if (isNullConstant(CarryIn))
+ return DAG.getNode(ISD::UADDO, SDLoc(N), N->getVTList(), N0, N1);
+
+ return SDValue();
+}
+
// Since it may not be valid to emit a fold to zero for vector initializers
// check if we can before folding.
static SDValue tryFoldToZero(const SDLoc &DL, const TargetLowering &TLI, EVT VT,
@@ -2143,13 +2211,13 @@ SDValue DAGCombiner::visitSUB(SDNode *N) {
}
// 0 - X --> 0 if the sub is NUW.
- if (N->getFlags()->hasNoUnsignedWrap())
+ if (N->getFlags().hasNoUnsignedWrap())
return N0;
if (DAG.MaskedValueIsZero(N1, ~APInt::getSignMask(BitWidth))) {
// N1 is either 0 or the minimum signed value. If the sub is NSW, then
// N1 must be 0 because negating the minimum signed value is undefined.
- if (N->getFlags()->hasNoSignedWrap())
+ if (N->getFlags().hasNoSignedWrap())
return N0;
// 0 - X --> X if X is 0 or the minimum signed value.
@@ -2309,6 +2377,18 @@ SDValue DAGCombiner::visitSUBE(SDNode *N) {
return SDValue();
}
+SDValue DAGCombiner::visitSUBCARRY(SDNode *N) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
+ SDValue CarryIn = N->getOperand(2);
+
+ // fold (subcarry x, y, false) -> (usubo x, y)
+ if (isNullConstant(CarryIn))
+ return DAG.getNode(ISD::USUBO, SDLoc(N), N->getVTList(), N0, N1);
+
+ return SDValue();
+}
+
SDValue DAGCombiner::visitMUL(SDNode *N) {
SDValue N0 = N->getOperand(0);
SDValue N1 = N->getOperand(1);
@@ -2589,9 +2669,8 @@ SDValue DAGCombiner::visitSDIV(SDNode *N) {
// better results in that case. The target-specific lowering should learn how
// to handle exact sdivs efficiently.
if (N1C && !N1C->isNullValue() && !N1C->isOpaque() &&
- !cast<BinaryWithFlagsSDNode>(N)->Flags.hasExact() &&
- (N1C->getAPIntValue().isPowerOf2() ||
- (-N1C->getAPIntValue()).isPowerOf2())) {
+ !N->getFlags().hasExact() && (N1C->getAPIntValue().isPowerOf2() ||
+ (-N1C->getAPIntValue()).isPowerOf2())) {
// Target-specific implementation of sdiv x, pow2.
if (SDValue Res = BuildSDIVPow2(N))
return Res;
@@ -3766,7 +3845,7 @@ SDValue DAGCombiner::MatchBSwapHWordLow(SDNode *N, SDValue N0, SDValue N1,
EVT VT = N->getValueType(0);
if (VT != MVT::i64 && VT != MVT::i32 && VT != MVT::i16)
return SDValue();
- if (!TLI.isOperationLegal(ISD::BSWAP, VT))
+ if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
return SDValue();
// Recognize (and (shl a, 8), 0xff), (and (srl a, 8), 0xff00)
@@ -3880,8 +3959,15 @@ static bool isBSwapHWordElement(SDValue N, MutableArrayRef<SDNode *> Parts) {
SDValue N0 = N.getOperand(0);
unsigned Opc0 = N0.getOpcode();
+ if (Opc0 != ISD::AND && Opc0 != ISD::SHL && Opc0 != ISD::SRL)
+ return false;
- ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ ConstantSDNode *N1C = nullptr;
+ // SHL or SRL: look upstream for AND mask operand
+ if (Opc == ISD::AND)
+ N1C = dyn_cast<ConstantSDNode>(N.getOperand(1));
+ else if (Opc0 == ISD::AND)
+ N1C = dyn_cast<ConstantSDNode>(N0.getOperand(1));
if (!N1C)
return false;
@@ -3952,7 +4038,7 @@ SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
EVT VT = N->getValueType(0);
if (VT != MVT::i32)
return SDValue();
- if (!TLI.isOperationLegal(ISD::BSWAP, VT))
+ if (!TLI.isOperationLegalOrCustom(ISD::BSWAP, VT))
return SDValue();
// Look for either
@@ -3967,18 +4053,16 @@ SDValue DAGCombiner::MatchBSwapHWord(SDNode *N, SDValue N0, SDValue N1) {
if (N1.getOpcode() == ISD::OR &&
N00.getNumOperands() == 2 && N01.getNumOperands() == 2) {
// (or (or (and), (and)), (or (and), (and)))
- SDValue N000 = N00.getOperand(0);
- if (!isBSwapHWordElement(N000, Parts))
+ if (!isBSwapHWordElement(N00, Parts))
return SDValue();
- SDValue N001 = N00.getOperand(1);
- if (!isBSwapHWordElement(N001, Parts))
+ if (!isBSwapHWordElement(N01, Parts))
return SDValue();
- SDValue N010 = N01.getOperand(0);
- if (!isBSwapHWordElement(N010, Parts))
+ SDValue N10 = N1.getOperand(0);
+ if (!isBSwapHWordElement(N10, Parts))
return SDValue();
- SDValue N011 = N01.getOperand(1);
- if (!isBSwapHWordElement(N011, Parts))
+ SDValue N11 = N1.getOperand(1);
+ if (!isBSwapHWordElement(N11, Parts))
return SDValue();
} else {
// (or (or (or (and), (and)), (and)), (and))
@@ -5322,7 +5406,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
// fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
// fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
if (N1C && (N0.getOpcode() == ISD::SRL || N0.getOpcode() == ISD::SRA) &&
- cast<BinaryWithFlagsSDNode>(N0)->Flags.hasExact()) {
+ N0->getFlags().hasExact()) {
if (ConstantSDNode *N0C1 = isConstOrConstSplat(N0.getOperand(1))) {
uint64_t C1 = N0C1->getZExtValue();
uint64_t C2 = N1C->getZExtValue();
@@ -5347,7 +5431,7 @@ SDValue DAGCombiner::visitSHL(SDNode *N) {
APInt Mask = APInt::getHighBitsSet(OpSizeInBits, OpSizeInBits - c1);
SDValue Shift;
if (c2 > c1) {
- Mask = Mask.shl(c2 - c1);
+ Mask <<= c2 - c1;
SDLoc DL(N);
Shift = DAG.getNode(ISD::SHL, DL, VT, N0.getOperand(0),
DAG.getConstant(c2 - c1, DL, N1.getValueType()));
@@ -5680,20 +5764,20 @@ SDValue DAGCombiner::visitSRL(SDNode *N) {
// fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
if (N1C && N0.getOpcode() == ISD::CTLZ &&
N1C->getAPIntValue() == Log2_32(OpSizeInBits)) {
- APInt KnownZero, KnownOne;
- DAG.computeKnownBits(N0.getOperand(0), KnownZero, KnownOne);
+ KnownBits Known;
+ DAG.computeKnownBits(N0.getOperand(0), Known);
// If any of the input bits are KnownOne, then the input couldn't be all
// zeros, thus the result of the srl will always be zero.
- if (KnownOne.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);
+ if (Known.One.getBoolValue()) return DAG.getConstant(0, SDLoc(N0), VT);
// If all of the bits input the to ctlz node are known to be zero, then
// the result of the ctlz is "32" and the result of the shift is one.
- APInt UnknownBits = ~KnownZero;
+ APInt UnknownBits = ~Known.Zero;
if (UnknownBits == 0) return DAG.getConstant(1, SDLoc(N0), VT);
// Otherwise, check to see if there is exactly one bit input to the ctlz.
- if ((UnknownBits & (UnknownBits - 1)) == 0) {
+ if (UnknownBits.isPowerOf2()) {
// Okay, we know that only that the single bit specified by UnknownBits
// could be set on input to the CTLZ node. If this bit is set, the SRL
// will return 0, if it is clear, it returns 1. Change the CTLZ/SRL pair
@@ -6889,6 +6973,51 @@ SDValue DAGCombiner::CombineExtLoad(SDNode *N) {
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
+/// If we're narrowing or widening the result of a vector select and the final
+/// size is the same size as a setcc (compare) feeding the select, then try to
+/// apply the cast operation to the select's operands because matching vector
+/// sizes for a select condition and other operands should be more efficient.
+SDValue DAGCombiner::matchVSelectOpSizesWithSetCC(SDNode *Cast) {
+ unsigned CastOpcode = Cast->getOpcode();
+ assert((CastOpcode == ISD::SIGN_EXTEND || CastOpcode == ISD::ZERO_EXTEND ||
+ CastOpcode == ISD::TRUNCATE || CastOpcode == ISD::FP_EXTEND ||
+ CastOpcode == ISD::FP_ROUND) &&
+ "Unexpected opcode for vector select narrowing/widening");
+
+ // We only do this transform before legal ops because the pattern may be
+ // obfuscated by target-specific operations after legalization. Do not create
+ // an illegal select op, however, because that may be difficult to lower.
+ EVT VT = Cast->getValueType(0);
+ if (LegalOperations || !TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
+ return SDValue();
+
+ SDValue VSel = Cast->getOperand(0);
+ if (VSel.getOpcode() != ISD::VSELECT || !VSel.hasOneUse() ||
+ VSel.getOperand(0).getOpcode() != ISD::SETCC)
+ return SDValue();
+
+ // Does the setcc have the same vector size as the casted select?
+ SDValue SetCC = VSel.getOperand(0);
+ EVT SetCCVT = getSetCCResultType(SetCC.getOperand(0).getValueType());
+ if (SetCCVT.getSizeInBits() != VT.getSizeInBits())
+ return SDValue();
+
+ // cast (vsel (setcc X), A, B) --> vsel (setcc X), (cast A), (cast B)
+ SDValue A = VSel.getOperand(1);
+ SDValue B = VSel.getOperand(2);
+ SDValue CastA, CastB;
+ SDLoc DL(Cast);
+ if (CastOpcode == ISD::FP_ROUND) {
+ // FP_ROUND (fptrunc) has an extra flag operand to pass along.
+ CastA = DAG.getNode(CastOpcode, DL, VT, A, Cast->getOperand(1));
+ CastB = DAG.getNode(CastOpcode, DL, VT, B, Cast->getOperand(1));
+ } else {
+ CastA = DAG.getNode(CastOpcode, DL, VT, A);
+ CastB = DAG.getNode(CastOpcode, DL, VT, B);
+ }
+ return DAG.getNode(ISD::VSELECT, DL, VT, SetCC, CastA, CastB);
+}
+
SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -7112,19 +7241,21 @@ SDValue DAGCombiner::visitSIGN_EXTEND(SDNode *N) {
DAG.SignBitIsZero(N0))
return DAG.getNode(ISD::ZERO_EXTEND, DL, VT, N0);
+ if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
+ return NewVSel;
+
return SDValue();
}
// isTruncateOf - If N is a truncate of some other value, return true, record
-// the value being truncated in Op and which of Op's bits are zero in KnownZero.
-// This function computes KnownZero to avoid a duplicated call to
+// the value being truncated in Op and which of Op's bits are zero/one in Known.
+// This function computes KnownBits to avoid a duplicated call to
// computeKnownBits in the caller.
static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
- APInt &KnownZero) {
- APInt KnownOne;
+ KnownBits &Known) {
if (N->getOpcode() == ISD::TRUNCATE) {
Op = N->getOperand(0);
- DAG.computeKnownBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, Known);
return true;
}
@@ -7143,9 +7274,9 @@ static bool isTruncateOf(SelectionDAG &DAG, SDValue N, SDValue &Op,
else
return false;
- DAG.computeKnownBits(Op, KnownZero, KnownOne);
+ DAG.computeKnownBits(Op, Known);
- if (!(KnownZero | APInt(Op.getValueSizeInBits(), 1)).isAllOnesValue())
+ if (!(Known.Zero | 1).isAllOnesValue())
return false;
return true;
@@ -7170,8 +7301,8 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
// This is valid when the truncated bits of x are already zero.
// FIXME: We should extend this to work for vectors too.
SDValue Op;
- APInt KnownZero;
- if (!VT.isVector() && isTruncateOf(DAG, N0, Op, KnownZero)) {
+ KnownBits Known;
+ if (!VT.isVector() && isTruncateOf(DAG, N0, Op, Known)) {
APInt TruncatedBits =
(Op.getValueSizeInBits() == N0.getValueSizeInBits()) ?
APInt(Op.getValueSizeInBits(), 0) :
@@ -7179,7 +7310,7 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
N0.getValueSizeInBits(),
std::min(Op.getValueSizeInBits(),
VT.getSizeInBits()));
- if (TruncatedBits == (KnownZero & TruncatedBits)) {
+ if (TruncatedBits.isSubsetOf(Known.Zero)) {
if (VT.bitsGT(Op.getValueType()))
return DAG.getNode(ISD::ZERO_EXTEND, SDLoc(N), VT, Op);
if (VT.bitsLT(Op.getValueType()))
@@ -7446,6 +7577,9 @@ SDValue DAGCombiner::visitZERO_EXTEND(SDNode *N) {
ShAmt);
}
+ if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
+ return NewVSel;
+
return SDValue();
}
@@ -7802,7 +7936,7 @@ SDValue DAGCombiner::ReduceLoadWidth(SDNode *N) {
SDValue NewPtr = DAG.getNode(ISD::ADD, DL,
PtrType, LN0->getBasePtr(),
DAG.getConstant(PtrOff, DL, PtrType),
- &Flags);
+ Flags);
AddToWorklist(NewPtr.getNode());
SDValue Load;
@@ -8228,17 +8362,21 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
return SDValue(N, 0);
// (trunc adde(X, Y, Carry)) -> (adde trunc(X), trunc(Y), Carry)
+ // (trunc addcarry(X, Y, Carry)) -> (addcarry trunc(X), trunc(Y), Carry)
// When the adde's carry is not used.
- if (N0.getOpcode() == ISD::ADDE && N0.hasOneUse() &&
- !N0.getNode()->hasAnyUseOfValue(1) &&
- (!LegalOperations || TLI.isOperationLegal(ISD::ADDE, VT))) {
+ if ((N0.getOpcode() == ISD::ADDE || N0.getOpcode() == ISD::ADDCARRY) &&
+ N0.hasOneUse() && !N0.getNode()->hasAnyUseOfValue(1) &&
+ (!LegalOperations || TLI.isOperationLegal(N0.getOpcode(), VT))) {
SDLoc SL(N);
auto X = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(0));
auto Y = DAG.getNode(ISD::TRUNCATE, SL, VT, N0.getOperand(1));
- return DAG.getNode(ISD::ADDE, SL, DAG.getVTList(VT, MVT::Glue),
- X, Y, N0.getOperand(2));
+ auto VTs = DAG.getVTList(VT, N0->getValueType(1));
+ return DAG.getNode(N0.getOpcode(), SL, VTs, X, Y, N0.getOperand(2));
}
+ if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
+ return NewVSel;
+
return SDValue();
}
@@ -8701,7 +8839,7 @@ ConstantFoldBITCASTofBUILD_VECTOR(SDNode *BV, EVT DstEltVT) {
}
static bool isContractable(SDNode *N) {
- SDNodeFlags F = cast<BinaryWithFlagsSDNode>(N)->Flags;
+ SDNodeFlags F = N->getFlags();
return F.hasAllowContract() || F.hasUnsafeAlgebra();
}
@@ -9287,7 +9425,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
- const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
+ const SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector())
@@ -9318,7 +9456,7 @@ SDValue DAGCombiner::visitFADD(SDNode *N) {
GetNegatedExpression(N0, DAG, LegalOperations), Flags);
// FIXME: Auto-upgrade the target/function-level option.
- if (Options.NoSignedZerosFPMath || N->getFlags()->hasNoSignedZeros()) {
+ if (Options.NoSignedZerosFPMath || N->getFlags().hasNoSignedZeros()) {
// fold (fadd A, 0) -> A
if (ConstantFPSDNode *N1C = isConstOrConstSplatFP(N1))
if (N1C->isZero())
@@ -9441,7 +9579,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
- const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
+ const SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector())
@@ -9461,7 +9599,7 @@ SDValue DAGCombiner::visitFSUB(SDNode *N) {
GetNegatedExpression(N1, DAG, LegalOperations), Flags);
// FIXME: Auto-upgrade the target/function-level option.
- if (Options.NoSignedZerosFPMath || N->getFlags()->hasNoSignedZeros()) {
+ if (Options.NoSignedZerosFPMath || N->getFlags().hasNoSignedZeros()) {
// (fsub 0, B) -> -B
if (N0CFP && N0CFP->isZero()) {
if (isNegatibleForFree(N1, LegalOperations, TLI, &Options))
@@ -9512,7 +9650,7 @@ SDValue DAGCombiner::visitFMUL(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
- const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
+ const SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector()) {
@@ -9656,7 +9794,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
isConstantFPBuildVectorOrConstantFP(N2.getOperand(1))) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getNode(ISD::FADD, DL, VT, N1, N2.getOperand(1),
- &Flags), &Flags);
+ Flags), Flags);
}
// (fma (fmul x, c1), c2, y) -> (fma x, c1*c2, y)
@@ -9666,7 +9804,7 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
return DAG.getNode(ISD::FMA, DL, VT,
N0.getOperand(0),
DAG.getNode(ISD::FMUL, DL, VT, N1, N0.getOperand(1),
- &Flags),
+ Flags),
N2);
}
}
@@ -9692,16 +9830,16 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
if (N1CFP && N0 == N2) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getNode(ISD::FADD, DL, VT, N1,
- DAG.getConstantFP(1.0, DL, VT), &Flags),
- &Flags);
+ DAG.getConstantFP(1.0, DL, VT), Flags),
+ Flags);
}
// (fma x, c, (fneg x)) -> (fmul x, (c-1))
if (N1CFP && N2.getOpcode() == ISD::FNEG && N2.getOperand(0) == N0) {
return DAG.getNode(ISD::FMUL, DL, VT, N0,
DAG.getNode(ISD::FADD, DL, VT, N1,
- DAG.getConstantFP(-1.0, DL, VT), &Flags),
- &Flags);
+ DAG.getConstantFP(-1.0, DL, VT), Flags),
+ Flags);
}
}
@@ -9717,8 +9855,8 @@ SDValue DAGCombiner::visitFMA(SDNode *N) {
// is the critical path is increased from "one FDIV" to "one FDIV + one FMUL".
SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
bool UnsafeMath = DAG.getTarget().Options.UnsafeFPMath;
- const SDNodeFlags *Flags = N->getFlags();
- if (!UnsafeMath && !Flags->hasAllowReciprocal())
+ const SDNodeFlags Flags = N->getFlags();
+ if (!UnsafeMath && !Flags.hasAllowReciprocal())
return SDValue();
// Skip if current node is a reciprocal.
@@ -9741,7 +9879,7 @@ SDValue DAGCombiner::combineRepeatedFPDivisors(SDNode *N) {
if (U->getOpcode() == ISD::FDIV && U->getOperand(1) == N1) {
// This division is eligible for optimization only if global unsafe math
// is enabled or if this division allows reciprocal formation.
- if (UnsafeMath || U->getFlags()->hasAllowReciprocal())
+ if (UnsafeMath || U->getFlags().hasAllowReciprocal())
Users.insert(U);
}
}
@@ -9780,7 +9918,7 @@ SDValue DAGCombiner::visitFDIV(SDNode *N) {
EVT VT = N->getValueType(0);
SDLoc DL(N);
const TargetOptions &Options = DAG.getTarget().Options;
- SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(N)->Flags;
+ SDNodeFlags Flags = N->getFlags();
// fold vector ops
if (VT.isVector())
@@ -9894,8 +10032,7 @@ SDValue DAGCombiner::visitFREM(SDNode *N) {
// fold (frem c1, c2) -> fmod(c1,c2)
if (N0CFP && N1CFP)
- return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1,
- &cast<BinaryWithFlagsSDNode>(N)->Flags);
+ return DAG.getNode(ISD::FREM, SDLoc(N), VT, N0, N1, N->getFlags());
if (SDValue NewSel = foldBinOpIntoSelect(N))
return NewSel;
@@ -9915,7 +10052,7 @@ SDValue DAGCombiner::visitFSQRT(SDNode *N) {
// For now, create a Flags object for use with all unsafe math transforms.
SDNodeFlags Flags;
Flags.setUnsafeAlgebra(true);
- return buildSqrtEstimate(N0, &Flags);
+ return buildSqrtEstimate(N0, Flags);
}
/// copysign(x, fp_extend(y)) -> copysign(x, y)
@@ -10190,6 +10327,9 @@ SDValue DAGCombiner::visitFP_ROUND(SDNode *N) {
Tmp, N0.getOperand(1));
}
+ if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
+ return NewVSel;
+
return SDValue();
}
@@ -10256,6 +10396,9 @@ SDValue DAGCombiner::visitFP_EXTEND(SDNode *N) {
return SDValue(N, 0); // Return N so it doesn't get rechecked!
}
+ if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
+ return NewVSel;
+
return SDValue();
}
@@ -10341,10 +10484,10 @@ SDValue DAGCombiner::visitFNEG(SDNode *N) {
if (Level >= AfterLegalizeDAG &&
(TLI.isFPImmLegal(CVal, VT) ||
TLI.isOperationLegal(ISD::ConstantFP, VT)))
- return DAG.getNode(ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
- DAG.getNode(ISD::FNEG, SDLoc(N), VT,
- N0.getOperand(1)),
- &cast<BinaryWithFlagsSDNode>(N0)->Flags);
+ return DAG.getNode(
+ ISD::FMUL, SDLoc(N), VT, N0.getOperand(0),
+ DAG.getNode(ISD::FNEG, SDLoc(N), VT, N0.getOperand(1)),
+ N0->getFlags());
}
}
@@ -15832,7 +15975,7 @@ SDValue DAGCombiner::BuildLogBase2(SDValue V, const SDLoc &DL) {
/// =>
/// X_{i+1} = X_i (2 - A X_i) = X_i + X_i (1 - A X_i) [this second form
/// does not require additional intermediate precision]
-SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags) {
+SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags Flags) {
if (Level >= AfterLegalizeDAG)
return SDValue();
@@ -15887,7 +16030,7 @@ SDValue DAGCombiner::BuildReciprocalEstimate(SDValue Op, SDNodeFlags *Flags) {
/// As a result, we precompute A/2 prior to the iteration loop.
SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
unsigned Iterations,
- SDNodeFlags *Flags, bool Reciprocal) {
+ SDNodeFlags Flags, bool Reciprocal) {
EVT VT = Arg.getValueType();
SDLoc DL(Arg);
SDValue ThreeHalves = DAG.getConstantFP(1.5, DL, VT);
@@ -15931,7 +16074,7 @@ SDValue DAGCombiner::buildSqrtNROneConst(SDValue Arg, SDValue Est,
/// X_{i+1} = (-0.5 * X_i) * (A * X_i * X_i + (-3.0))
SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
unsigned Iterations,
- SDNodeFlags *Flags, bool Reciprocal) {
+ SDNodeFlags Flags, bool Reciprocal) {
EVT VT = Arg.getValueType();
SDLoc DL(Arg);
SDValue MinusThree = DAG.getConstantFP(-3.0, DL, VT);
@@ -15976,7 +16119,7 @@ SDValue DAGCombiner::buildSqrtNRTwoConst(SDValue Arg, SDValue Est,
/// Build code to calculate either rsqrt(Op) or sqrt(Op). In the latter case
/// Op*rsqrt(Op) is actually computed, so additional postprocessing is needed if
/// Op can be zero.
-SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags,
+SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags Flags,
bool Reciprocal) {
if (Level >= AfterLegalizeDAG)
return SDValue();
@@ -16029,11 +16172,11 @@ SDValue DAGCombiner::buildSqrtEstimateImpl(SDValue Op, SDNodeFlags *Flags,
return SDValue();
}
-SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags *Flags) {
+SDValue DAGCombiner::buildRsqrtEstimate(SDValue Op, SDNodeFlags Flags) {
return buildSqrtEstimateImpl(Op, Flags, true);
}
-SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags *Flags) {
+SDValue DAGCombiner::buildSqrtEstimate(SDValue Op, SDNodeFlags Flags) {
return buildSqrtEstimateImpl(Op, Flags, false);
}
diff --git a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
index 377a5237f15a..a0135dc40b87 100644
--- a/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
+++ b/lib/CodeGen/SelectionDAG/FunctionLoweringInfo.cpp
@@ -400,10 +400,10 @@ FunctionLoweringInfo::GetLiveOutRegInfo(unsigned Reg, unsigned BitWidth) {
if (!LOI->IsValid)
return nullptr;
- if (BitWidth > LOI->KnownZero.getBitWidth()) {
+ if (BitWidth > LOI->Known.getBitWidth()) {
LOI->NumSignBits = 1;
- LOI->KnownZero = LOI->KnownZero.zextOrTrunc(BitWidth);
- LOI->KnownOne = LOI->KnownOne.zextOrTrunc(BitWidth);
+ LOI->Known.Zero = LOI->Known.Zero.zextOrTrunc(BitWidth);
+ LOI->Known.One = LOI->Known.One.zextOrTrunc(BitWidth);
}
return LOI;
@@ -436,17 +436,15 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
Value *V = PN->getIncomingValue(0);
if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
DestLOI.NumSignBits = 1;
- APInt Zero(BitWidth, 0);
- DestLOI.KnownZero = Zero;
- DestLOI.KnownOne = Zero;
+ DestLOI.Known = KnownBits(BitWidth);
return;
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val = CI->getValue().zextOrTrunc(BitWidth);
DestLOI.NumSignBits = Val.getNumSignBits();
- DestLOI.KnownZero = ~Val;
- DestLOI.KnownOne = Val;
+ DestLOI.Known.Zero = ~Val;
+ DestLOI.Known.One = Val;
} else {
assert(ValueMap.count(V) && "V should have been placed in ValueMap when its"
"CopyToReg node was created.");
@@ -463,25 +461,23 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
DestLOI = *SrcLOI;
}
- assert(DestLOI.KnownZero.getBitWidth() == BitWidth &&
- DestLOI.KnownOne.getBitWidth() == BitWidth &&
+ assert(DestLOI.Known.Zero.getBitWidth() == BitWidth &&
+ DestLOI.Known.One.getBitWidth() == BitWidth &&
"Masks should have the same bit width as the type.");
for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i) {
Value *V = PN->getIncomingValue(i);
if (isa<UndefValue>(V) || isa<ConstantExpr>(V)) {
DestLOI.NumSignBits = 1;
- APInt Zero(BitWidth, 0);
- DestLOI.KnownZero = Zero;
- DestLOI.KnownOne = Zero;
+ DestLOI.Known = KnownBits(BitWidth);
return;
}
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
APInt Val = CI->getValue().zextOrTrunc(BitWidth);
DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, Val.getNumSignBits());
- DestLOI.KnownZero &= ~Val;
- DestLOI.KnownOne &= Val;
+ DestLOI.Known.Zero &= ~Val;
+ DestLOI.Known.One &= Val;
continue;
}
@@ -498,8 +494,8 @@ void FunctionLoweringInfo::ComputePHILiveOutRegInfo(const PHINode *PN) {
return;
}
DestLOI.NumSignBits = std::min(DestLOI.NumSignBits, SrcLOI->NumSignBits);
- DestLOI.KnownZero &= SrcLOI->KnownZero;
- DestLOI.KnownOne &= SrcLOI->KnownOne;
+ DestLOI.Known.Zero &= SrcLOI->Known.Zero;
+ DestLOI.Known.One &= SrcLOI->Known.One;
}
}
diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index fdebb8bd00db..2654b3ad7a62 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2589,7 +2589,7 @@ SDValue SelectionDAGLegalize::ExpandBITREVERSE(SDValue Op, const SDLoc &dl) {
DAG.getNode(ISD::SRL, dl, VT, Op, DAG.getConstant(I - J, dl, SHVT));
APInt Shift(Sz, 1);
- Shift = Shift.shl(J);
+ Shift <<= J;
Tmp2 = DAG.getNode(ISD::AND, dl, VT, Tmp2, DAG.getConstant(Shift, dl, VT));
Tmp = DAG.getNode(ISD::OR, dl, VT, Tmp, Tmp2);
}
@@ -3253,7 +3253,7 @@ bool SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT VT = Node->getValueType(0);
if (TLI.isOperationLegalOrCustom(ISD::FADD, VT) &&
TLI.isOperationLegalOrCustom(ISD::FNEG, VT)) {
- const SDNodeFlags *Flags = &cast<BinaryWithFlagsSDNode>(Node)->Flags;
+ const SDNodeFlags Flags = Node->getFlags();
Tmp1 = DAG.getNode(ISD::FNEG, dl, VT, Node->getOperand(1));
Tmp1 = DAG.getNode(ISD::FADD, dl, VT, Node->getOperand(0), Tmp1, Flags);
Results.push_back(Tmp1);
diff --git a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 9ed70c9b4db9..92b0d2ae4015 100644
--- a/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -21,6 +21,7 @@
#include "LegalizeTypes.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/KnownBits.h"
#include "llvm/Support/raw_ostream.h"
using namespace llvm;
@@ -134,6 +135,9 @@ void DAGTypeLegalizer::PromoteIntegerResult(SDNode *N, unsigned ResNo) {
case ISD::SMULO:
case ISD::UMULO: Res = PromoteIntRes_XMULO(N, ResNo); break;
+ case ISD::ADDCARRY:
+ case ISD::SUBCARRY: Res = PromoteIntRes_ADDSUBCARRY(N, ResNo); break;
+
case ISD::ATOMIC_LOAD:
Res = PromoteIntRes_Atomic0(cast<AtomicSDNode>(N)); break;
@@ -510,9 +514,14 @@ SDValue DAGTypeLegalizer::PromoteIntRes_Overflow(SDNode *N) {
// Simply change the return type of the boolean result.
EVT NVT = TLI.getTypeToTransformTo(*DAG.getContext(), N->getValueType(1));
EVT ValueVTs[] = { N->getValueType(0), NVT };
- SDValue Ops[] = { N->getOperand(0), N->getOperand(1) };
+ SDValue Ops[3] = { N->getOperand(0), N->getOperand(1) };
+ unsigned NumOps = N->getNumOperands();
+ assert(NumOps <= 3 && "Too many operands");
+ if (NumOps == 3)
+ Ops[2] = N->getOperand(2);
+
SDValue Res = DAG.getNode(N->getOpcode(), SDLoc(N),
- DAG.getVTList(ValueVTs), Ops);
+ DAG.getVTList(ValueVTs), makeArrayRef(Ops, NumOps));
// Modified the sum result - switch anything that used the old sum to use
// the new one.
@@ -762,6 +771,12 @@ SDValue DAGTypeLegalizer::PromoteIntRes_UADDSUBO(SDNode *N, unsigned ResNo) {
return Res;
}
+SDValue DAGTypeLegalizer::PromoteIntRes_ADDSUBCARRY(SDNode *N, unsigned ResNo) {
+ if (ResNo == 1)
+ return PromoteIntRes_Overflow(N);
+ llvm_unreachable("Not implemented");
+}
+
SDValue DAGTypeLegalizer::PromoteIntRes_XMULO(SDNode *N, unsigned ResNo) {
// Promote the overflow bit trivially.
if (ResNo == 1)
@@ -924,6 +939,9 @@ bool DAGTypeLegalizer::PromoteIntegerOperand(SDNode *N, unsigned OpNo) {
case ISD::SRL:
case ISD::ROTL:
case ISD::ROTR: Res = PromoteIntOp_Shift(N); break;
+
+ case ISD::ADDCARRY:
+ case ISD::SUBCARRY: Res = PromoteIntOp_ADDSUBCARRY(N, OpNo); break;
}
// If the result is null, the sub-method took care of registering results etc.
@@ -1276,6 +1294,30 @@ SDValue DAGTypeLegalizer::PromoteIntOp_ZERO_EXTEND(SDNode *N) {
N->getOperand(0).getValueType().getScalarType());
}
+S