aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Basic/Targets/AArch64.cpp
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:49 +0000
committerDimitry Andric <dim@FreeBSD.org>2019-08-20 20:50:49 +0000
commit2298981669bf3bd63335a4be179bc0f96823a8f4 (patch)
tree1cbe2eb27f030d2d70b80ee5ca3c86bee7326a9f /lib/Basic/Targets/AArch64.cpp
parent9a83721404652cea39e9f02ae3e3b5c964602a5c (diff)
downloadsrc-2298981669bf3bd63335a4be179bc0f96823a8f4.tar.gz
src-2298981669bf3bd63335a4be179bc0f96823a8f4.zip
Vendor import of stripped clang trunk r366426 (just before thevendor/clang/clang-trunk-r366426
Notes
Notes: svn path=/vendor/clang/dist/; revision=351280 svn path=/vendor/clang/clang-trunk-r366426/; revision=351281; tag=vendor/clang/clang-trunk-r366426
Diffstat (limited to 'lib/Basic/Targets/AArch64.cpp')
-rw-r--r--lib/Basic/Targets/AArch64.cpp108
1 files changed, 81 insertions, 27 deletions
diff --git a/lib/Basic/Targets/AArch64.cpp b/lib/Basic/Targets/AArch64.cpp
index 62919a02dcb9..74ac69ab8946 100644
--- a/lib/Basic/Targets/AArch64.cpp
+++ b/lib/Basic/Targets/AArch64.cpp
@@ -1,9 +1,8 @@
//===--- AArch64.cpp - Implement AArch64 target feature support -----------===//
//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
@@ -50,6 +49,7 @@ AArch64TargetInfo::AArch64TargetInfo(const llvm::Triple &Triple,
// All AArch64 implementations support ARMv8 FP, which makes half a legal type.
HasLegalHalfType = true;
+ HasFloat16 = true;
LongWidth = LongAlign = PointerWidth = PointerAlign = 64;
MaxVectorAlign = 128;
@@ -118,6 +118,28 @@ void AArch64TargetInfo::getTargetDefinesARMV82A(const LangOptions &Opts,
getTargetDefinesARMV81A(Opts, Builder);
}
+void AArch64TargetInfo::getTargetDefinesARMV83A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__ARM_FEATURE_JCVT", "1");
+ // Also include the Armv8.2 defines
+ getTargetDefinesARMV82A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV84A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.3 defines
+ // FIXME: Armv8.4 makes some extensions mandatory. Handle them here.
+ getTargetDefinesARMV83A(Opts, Builder);
+}
+
+void AArch64TargetInfo::getTargetDefinesARMV85A(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ // Also include the Armv8.4 defines
+ // FIXME: Armv8.5 makes some extensions mandatory. Handle them here.
+ getTargetDefinesARMV84A(Opts, Builder);
+}
+
+
void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
// Target identification.
@@ -177,13 +199,13 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (FPU & SveMode)
Builder.defineMacro("__ARM_FEATURE_SVE", "1");
- if (CRC)
+ if (HasCRC)
Builder.defineMacro("__ARM_FEATURE_CRC32", "1");
- if (Crypto)
+ if (HasCrypto)
Builder.defineMacro("__ARM_FEATURE_CRYPTO", "1");
- if (Unaligned)
+ if (HasUnaligned)
Builder.defineMacro("__ARM_FEATURE_UNALIGNED", "1");
if ((FPU & NeonMode) && HasFullFP16)
@@ -194,6 +216,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasDotProd)
Builder.defineMacro("__ARM_FEATURE_DOTPROD", "1");
+ if (HasMTE)
+ Builder.defineMacro("__ARM_FEATURE_MEMORY_TAGGING", "1");
+
if ((FPU & NeonMode) && HasFP16FML)
Builder.defineMacro("__ARM_FEATURE_FP16FML", "1");
@@ -206,6 +231,15 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::AArch64::ArchKind::ARMV8_2A:
getTargetDefinesARMV82A(Opts, Builder);
break;
+ case llvm::AArch64::ArchKind::ARMV8_3A:
+ getTargetDefinesARMV83A(Opts, Builder);
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_4A:
+ getTargetDefinesARMV84A(Opts, Builder);
+ break;
+ case llvm::AArch64::ArchKind::ARMV8_5A:
+ getTargetDefinesARMV85A(Opts, Builder);
+ break;
}
// All of the __sync_(bool|val)_compare_and_swap_(1|2|4|8) builtins work.
@@ -229,12 +263,13 @@ bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
FPU = FPUMode;
- CRC = 0;
- Crypto = 0;
- Unaligned = 1;
- HasFullFP16 = 0;
- HasDotProd = 0;
- HasFP16FML = 0;
+ HasCRC = false;
+ HasCrypto = false;
+ HasUnaligned = true;
+ HasFullFP16 = false;
+ HasDotProd = false;
+ HasFP16FML = false;
+ HasMTE = false;
ArchKind = llvm::AArch64::ArchKind::ARMV8A;
for (const auto &Feature : Features) {
@@ -243,21 +278,29 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
if (Feature == "+sve")
FPU |= SveMode;
if (Feature == "+crc")
- CRC = 1;
+ HasCRC = true;
if (Feature == "+crypto")
- Crypto = 1;
+ HasCrypto = true;
if (Feature == "+strict-align")
- Unaligned = 0;
+ HasUnaligned = false;
if (Feature == "+v8.1a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_1A;
if (Feature == "+v8.2a")
ArchKind = llvm::AArch64::ArchKind::ARMV8_2A;
+ if (Feature == "+v8.3a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_3A;
+ if (Feature == "+v8.4a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_4A;
+ if (Feature == "+v8.5a")
+ ArchKind = llvm::AArch64::ArchKind::ARMV8_5A;
if (Feature == "+fullfp16")
- HasFullFP16 = 1;
+ HasFullFP16 = true;
if (Feature == "+dotprod")
- HasDotProd = 1;
+ HasDotProd = true;
if (Feature == "+fp16fml")
- HasFP16FML = 1;
+ HasFP16FML = true;
+ if (Feature == "+mte")
+ HasMTE = true;
}
setDataLayout();
@@ -528,16 +571,10 @@ MicrosoftARM64TargetInfo::MicrosoftARM64TargetInfo(const llvm::Triple &Triple,
TheCXXABI.set(TargetCXXABI::Microsoft);
}
-void MicrosoftARM64TargetInfo::getVisualStudioDefines(
- const LangOptions &Opts, MacroBuilder &Builder) const {
- WindowsTargetInfo<AArch64leTargetInfo>::getVisualStudioDefines(Opts, Builder);
- Builder.defineMacro("_M_ARM64", "1");
-}
-
void MicrosoftARM64TargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
- WindowsTargetInfo::getTargetDefines(Opts, Builder);
- getVisualStudioDefines(Opts, Builder);
+ WindowsARM64TargetInfo::getTargetDefines(Opts, Builder);
+ Builder.defineMacro("_M_ARM64", "1");
}
TargetInfo::CallingConvKind
@@ -545,6 +582,23 @@ MicrosoftARM64TargetInfo::getCallingConvKind(bool ClangABICompat4) const {
return CCK_MicrosoftWin64;
}
+unsigned MicrosoftARM64TargetInfo::getMinGlobalAlign(uint64_t TypeSize) const {
+ unsigned Align = WindowsARM64TargetInfo::getMinGlobalAlign(TypeSize);
+
+ // MSVC does size based alignment for arm64 based on alignment section in
+ // below document, replicate that to keep alignment consistent with object
+ // files compiled by MSVC.
+ // https://docs.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions
+ if (TypeSize >= 512) { // TypeSize >= 64 bytes
+ Align = std::max(Align, 128u); // align type at least 16 bytes
+ } else if (TypeSize >= 64) { // TypeSize >= 8 bytes
+ Align = std::max(Align, 64u); // align type at least 8 butes
+ } else if (TypeSize >= 16) { // TypeSize >= 2 bytes
+ Align = std::max(Align, 32u); // align type at least 4 bytes
+ }
+ return Align;
+}
+
MinGWARM64TargetInfo::MinGWARM64TargetInfo(const llvm::Triple &Triple,
const TargetOptions &Opts)
: WindowsARM64TargetInfo(Triple, Opts) {