diff options
author | Jung-uk Kim <jkim@FreeBSD.org> | 2015-12-03 17:22:58 +0000 |
---|---|---|
committer | Jung-uk Kim <jkim@FreeBSD.org> | 2015-12-03 17:22:58 +0000 |
commit | 737d7e8d3945c206c037e139055821aa0c64bb8e (patch) | |
tree | b0284af4e4144e27eb9f39e88c53868060774b16 /crypto/sha | |
parent | e9fcefce9bb70f20c272a996443928c5f6ab8cd8 (diff) | |
download | src-737d7e8d3945c206c037e139055821aa0c64bb8e.tar.gz src-737d7e8d3945c206c037e139055821aa0c64bb8e.zip |
Import OpenSSL 1.0.2e.vendor/openssl/1.0.2e
Notes
Notes:
svn path=/vendor-crypto/openssl/dist/; revision=291707
svn path=/vendor-crypto/openssl/1.0.2e/; revision=291708; tag=vendor/openssl/1.0.2e
Diffstat (limited to 'crypto/sha')
-rw-r--r-- | crypto/sha/asm/sha1-586.pl | 4 | ||||
-rwxr-xr-x | crypto/sha/asm/sha1-mb-x86_64.pl | 2 | ||||
-rwxr-xr-x | crypto/sha/asm/sha1-x86_64.pl | 2 | ||||
-rwxr-xr-x | crypto/sha/asm/sha256-586.pl | 2 | ||||
-rwxr-xr-x | crypto/sha/asm/sha256-mb-x86_64.pl | 2 | ||||
-rwxr-xr-x | crypto/sha/asm/sha512-586.pl | 2 | ||||
-rwxr-xr-x | crypto/sha/asm/sha512-parisc.pl | 2 | ||||
-rwxr-xr-x | crypto/sha/asm/sha512-x86_64.pl | 2 |
8 files changed, 9 insertions, 9 deletions
diff --git a/crypto/sha/asm/sha1-586.pl b/crypto/sha/asm/sha1-586.pl index 4895eb3ddf85..e0b5d83b6201 100644 --- a/crypto/sha/asm/sha1-586.pl +++ b/crypto/sha/asm/sha1-586.pl @@ -66,9 +66,9 @@ # switch to AVX alone improves performance by as little as 4% in # comparison to SSSE3 code path. But below result doesn't look like # 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as -# pair of µ-ops, and it's the additional µ-ops, two per round, that +# pair of µ-ops, and it's the additional µ-ops, two per round, that # make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded -# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with +# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with # equivalent 'sh[rl]d' that is responsible for the impressive 5.1 # cycles per processed byte. But 'sh[rl]d' is not something that used # to be fast, nor does it appear to be fast in upcoming Bulldozer diff --git a/crypto/sha/asm/sha1-mb-x86_64.pl b/crypto/sha/asm/sha1-mb-x86_64.pl index a8ee075eaaa0..f856bb888b0e 100755 --- a/crypto/sha/asm/sha1-mb-x86_64.pl +++ b/crypto/sha/asm/sha1-mb-x86_64.pl @@ -58,7 +58,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && $avx = ($1>=10) + ($1>=11); } -if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) { +if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) { $avx = ($2>=3.0) + ($2>3.0); } diff --git a/crypto/sha/asm/sha1-x86_64.pl b/crypto/sha/asm/sha1-x86_64.pl index 9bb6b498190f..9a6acc347d33 100755 --- a/crypto/sha/asm/sha1-x86_64.pl +++ b/crypto/sha/asm/sha1-x86_64.pl @@ -107,7 +107,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && $avx = ($1>=10) + ($1>=11); } -if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([2-9]\.[0-9]+)/) { +if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([2-9]\.[0-9]+)/) { $avx = ($2>=3.0) + ($2>3.0); } diff --git a/crypto/sha/asm/sha256-586.pl b/crypto/sha/asm/sha256-586.pl index 6462e45ba75b..e9077143817c 100755 --- a/crypto/sha/asm/sha256-586.pl +++ b/crypto/sha/asm/sha256-586.pl @@ -10,7 +10,7 @@ # SHA256 block transform for x86. September 2007. # # Performance improvement over compiler generated code varies from -# 10% to 40% [see below]. Not very impressive on some µ-archs, but +# 10% to 40% [see below]. Not very impressive on some µ-archs, but # it's 5 times smaller and optimizies amount of writes. # # May 2012. diff --git a/crypto/sha/asm/sha256-mb-x86_64.pl b/crypto/sha/asm/sha256-mb-x86_64.pl index adf2ddccd18b..3d37ae31ad3e 100755 --- a/crypto/sha/asm/sha256-mb-x86_64.pl +++ b/crypto/sha/asm/sha256-mb-x86_64.pl @@ -59,7 +59,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && $avx = ($1>=10) + ($1>=11); } -if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) { +if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) { $avx = ($2>=3.0) + ($2>3.0); } diff --git a/crypto/sha/asm/sha512-586.pl b/crypto/sha/asm/sha512-586.pl index e96ec00314a4..2f6a202c3765 100755 --- a/crypto/sha/asm/sha512-586.pl +++ b/crypto/sha/asm/sha512-586.pl @@ -37,7 +37,7 @@ # # IALU code-path is optimized for elder Pentiums. On vanilla Pentium # performance improvement over compiler generated code reaches ~60%, -# while on PIII - ~35%. On newer µ-archs improvement varies from 15% +# while on PIII - ~35%. On newer µ-archs improvement varies from 15% # to 50%, but it's less important as they are expected to execute SSE2 # code-path, which is commonly ~2-3x faster [than compiler generated # code]. SSE2 code-path is as fast as original sha512-sse2.pl, even diff --git a/crypto/sha/asm/sha512-parisc.pl b/crypto/sha/asm/sha512-parisc.pl index fc0e15b3c059..6cad72e25573 100755 --- a/crypto/sha/asm/sha512-parisc.pl +++ b/crypto/sha/asm/sha512-parisc.pl @@ -19,7 +19,7 @@ # SHA512 performance is >2.9x better than gcc 3.2 generated code on # PA-7100LC, PA-RISC 1.1 processor. Then implementation detects if the # code is executed on PA-RISC 2.0 processor and switches to 64-bit -# code path delivering adequate peformance even in "blended" 32-bit +# code path delivering adequate performance even in "blended" 32-bit # build. Though 64-bit code is not any faster than code generated by # vendor compiler on PA-8600... # diff --git a/crypto/sha/asm/sha512-x86_64.pl b/crypto/sha/asm/sha512-x86_64.pl index b7b44b441136..58665667f149 100755 --- a/crypto/sha/asm/sha512-x86_64.pl +++ b/crypto/sha/asm/sha512-x86_64.pl @@ -124,7 +124,7 @@ if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && $avx = ($1>=10) + ($1>=11); } -if (!$avx && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/) { +if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|based on LLVM) ([3-9]\.[0-9]+)/) { $avx = ($2>=3.0) + ($2>3.0); } |