aboutsummaryrefslogtreecommitdiffstats
path: root/lib/Headers/avx512vlvbmi2intrin.h
diff options
context:
space:
mode:
Diffstat (limited to 'lib/Headers/avx512vlvbmi2intrin.h')
-rw-r--r--lib/Headers/avx512vlvbmi2intrin.h171
1 files changed, 80 insertions, 91 deletions
diff --git a/lib/Headers/avx512vlvbmi2intrin.h b/lib/Headers/avx512vlvbmi2intrin.h
index d1ec4976f274..5b05376fc4b2 100644
--- a/lib/Headers/avx512vlvbmi2intrin.h
+++ b/lib/Headers/avx512vlvbmi2intrin.h
@@ -31,13 +31,8 @@
/* Define the default attributes for the functions in this file. */
#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("avx512vl,avx512vbmi2")))
-static __inline __m128i __DEFAULT_FN_ATTRS
-_mm128_setzero_hi(void) {
- return (__m128i)(__v8hi){ 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
+_mm_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D,
(__v8hi) __S,
@@ -45,15 +40,15 @@ _mm128_mask_compress_epi16(__m128i __S, __mmask8 __U, __m128i __D)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_compress_epi16(__mmask8 __U, __m128i __D)
+_mm_maskz_compress_epi16(__mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compresshi128_mask ((__v8hi) __D,
- (__v8hi) _mm128_setzero_hi(),
+ (__v8hi) _mm_setzero_si128(),
__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
+_mm_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D,
(__v16qi) __S,
@@ -61,29 +56,29 @@ _mm128_mask_compress_epi8(__m128i __S, __mmask16 __U, __m128i __D)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_compress_epi8(__mmask16 __U, __m128i __D)
+_mm_maskz_compress_epi8(__mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_compressqi128_mask ((__v16qi) __D,
- (__v16qi) _mm128_setzero_hi(),
+ (__v16qi) _mm_setzero_si128(),
__U);
}
static __inline__ void __DEFAULT_FN_ATTRS
-_mm128_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
+_mm_mask_compressstoreu_epi16(void *__P, __mmask8 __U, __m128i __D)
{
__builtin_ia32_compressstorehi128_mask ((__v8hi *) __P, (__v8hi) __D,
__U);
}
static __inline__ void __DEFAULT_FN_ATTRS
-_mm128_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
+_mm_mask_compressstoreu_epi8(void *__P, __mmask16 __U, __m128i __D)
{
__builtin_ia32_compressstoreqi128_mask ((__v16qi *) __P, (__v16qi) __D,
__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
+_mm_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D,
(__v8hi) __S,
@@ -91,15 +86,15 @@ _mm128_mask_expand_epi16(__m128i __S, __mmask8 __U, __m128i __D)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expand_epi16(__mmask8 __U, __m128i __D)
+_mm_maskz_expand_epi16(__mmask8 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandhi128_mask ((__v8hi) __D,
- (__v8hi) _mm128_setzero_hi(),
+ (__v8hi) _mm_setzero_si128(),
__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
+_mm_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D,
(__v16qi) __S,
@@ -107,15 +102,15 @@ _mm128_mask_expand_epi8(__m128i __S, __mmask16 __U, __m128i __D)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expand_epi8(__mmask16 __U, __m128i __D)
+_mm_maskz_expand_epi8(__mmask16 __U, __m128i __D)
{
return (__m128i) __builtin_ia32_expandqi128_mask ((__v16qi) __D,
- (__v16qi) _mm128_setzero_hi(),
+ (__v16qi) _mm_setzero_si128(),
__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
+_mm_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P,
(__v8hi) __S,
@@ -123,15 +118,15 @@ _mm128_mask_expandloadu_epi16(__m128i __S, __mmask8 __U, void const *__P)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
+_mm_maskz_expandloadu_epi16(__mmask8 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadhi128_mask ((const __v8hi *)__P,
- (__v8hi) _mm128_setzero_hi(),
+ (__v8hi) _mm_setzero_si128(),
__U);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
+_mm_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P,
(__v16qi) __S,
@@ -139,19 +134,13 @@ _mm128_mask_expandloadu_epi8(__m128i __S, __mmask16 __U, void const *__P)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
+_mm_maskz_expandloadu_epi8(__mmask16 __U, void const *__P)
{
return (__m128i) __builtin_ia32_expandloadqi128_mask ((const __v16qi *)__P,
- (__v16qi) _mm128_setzero_hi(),
+ (__v16qi) _mm_setzero_si128(),
__U);
}
-static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_setzero_hi(void) {
- return (__m256i)(__v16hi){ 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0 };
-}
-
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_compress_epi16(__m256i __S, __mmask16 __U, __m256i __D)
{
@@ -164,7 +153,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_compress_epi16(__mmask16 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_compresshi256_mask ((__v16hi) __D,
- (__v16hi) _mm256_setzero_hi(),
+ (__v16hi) _mm256_setzero_si256(),
__U);
}
@@ -180,7 +169,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_compress_epi8(__mmask32 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_compressqi256_mask ((__v32qi) __D,
- (__v32qi) _mm256_setzero_hi(),
+ (__v32qi) _mm256_setzero_si256(),
__U);
}
@@ -210,7 +199,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_expand_epi16(__mmask16 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_expandhi256_mask ((__v16hi) __D,
- (__v16hi) _mm256_setzero_hi(),
+ (__v16hi) _mm256_setzero_si256(),
__U);
}
@@ -226,7 +215,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_expand_epi8(__mmask32 __U, __m256i __D)
{
return (__m256i) __builtin_ia32_expandqi256_mask ((__v32qi) __D,
- (__v32qi) _mm256_setzero_hi(),
+ (__v32qi) _mm256_setzero_si256(),
__U);
}
@@ -242,7 +231,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_expandloadu_epi16(__mmask16 __U, void const *__P)
{
return (__m256i) __builtin_ia32_expandloadhi256_mask ((const __v16hi *)__P,
- (__v16hi) _mm256_setzero_hi(),
+ (__v16hi) _mm256_setzero_si256(),
__U);
}
@@ -258,7 +247,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
{
return (__m256i) __builtin_ia32_expandloadqi256_mask ((const __v32qi *)__P,
- (__v32qi) _mm256_setzero_hi(),
+ (__v32qi) _mm256_setzero_si256(),
__U);
}
@@ -270,23 +259,23 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__mmask8)(U)); })
#define _mm256_maskz_shldi_epi64(U, A, B, I) \
- _mm256_mask_shldi_epi64(_mm256_setzero_hi(), (U), (A), (B), (I))
+ _mm256_mask_shldi_epi64(_mm256_setzero_si256(), (U), (A), (B), (I))
#define _mm256_shldi_epi64(A, B, I) \
_mm256_mask_shldi_epi64(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
-#define _mm128_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_shldi_epi64(S, U, A, B, I) __extension__ ({ \
(__m128i)__builtin_ia32_vpshldq128_mask((__v2di)(A), \
(__v2di)(B), \
(int)(I), \
(__v2di)(S), \
(__mmask8)(U)); })
-#define _mm128_maskz_shldi_epi64(U, A, B, I) \
- _mm128_mask_shldi_epi64(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shldi_epi64(U, A, B, I) \
+ _mm_mask_shldi_epi64(_mm_setzero_si128(), (U), (A), (B), (I))
-#define _mm128_shldi_epi64(A, B, I) \
- _mm128_mask_shldi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shldi_epi64(A, B, I) \
+ _mm_mask_shldi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
#define _mm256_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
(__m256i)__builtin_ia32_vpshldd256_mask((__v8si)(A), \
@@ -296,23 +285,23 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__mmask8)(U)); })
#define _mm256_maskz_shldi_epi32(U, A, B, I) \
- _mm256_mask_shldi_epi32(_mm256_setzero_hi(), (U), (A), (B), (I))
+ _mm256_mask_shldi_epi32(_mm256_setzero_si256(), (U), (A), (B), (I))
#define _mm256_shldi_epi32(A, B, I) \
_mm256_mask_shldi_epi32(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
-#define _mm128_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_shldi_epi32(S, U, A, B, I) __extension__ ({ \
(__m128i)__builtin_ia32_vpshldd128_mask((__v4si)(A), \
(__v4si)(B), \
(int)(I), \
(__v4si)(S), \
(__mmask8)(U)); })
-#define _mm128_maskz_shldi_epi32(U, A, B, I) \
- _mm128_mask_shldi_epi32(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shldi_epi32(U, A, B, I) \
+ _mm_mask_shldi_epi32(_mm_setzero_si128(), (U), (A), (B), (I))
-#define _mm128_shldi_epi32(A, B, I) \
- _mm128_mask_shldi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shldi_epi32(A, B, I) \
+ _mm_mask_shldi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
#define _mm256_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
(__m256i)__builtin_ia32_vpshldw256_mask((__v16hi)(A), \
@@ -322,23 +311,23 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__mmask16)(U)); })
#define _mm256_maskz_shldi_epi16(U, A, B, I) \
- _mm256_mask_shldi_epi16(_mm256_setzero_hi(), (U), (A), (B), (I))
+ _mm256_mask_shldi_epi16(_mm256_setzero_si256(), (U), (A), (B), (I))
#define _mm256_shldi_epi16(A, B, I) \
_mm256_mask_shldi_epi16(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
-#define _mm128_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_shldi_epi16(S, U, A, B, I) __extension__ ({ \
(__m128i)__builtin_ia32_vpshldw128_mask((__v8hi)(A), \
(__v8hi)(B), \
(int)(I), \
(__v8hi)(S), \
(__mmask8)(U)); })
-#define _mm128_maskz_shldi_epi16(U, A, B, I) \
- _mm128_mask_shldi_epi16(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shldi_epi16(U, A, B, I) \
+ _mm_mask_shldi_epi16(_mm_setzero_si128(), (U), (A), (B), (I))
-#define _mm128_shldi_epi16(A, B, I) \
- _mm128_mask_shldi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shldi_epi16(A, B, I) \
+ _mm_mask_shldi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
#define _mm256_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
(__m256i)__builtin_ia32_vpshrdq256_mask((__v4di)(A), \
@@ -348,23 +337,23 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__mmask8)(U)); })
#define _mm256_maskz_shrdi_epi64(U, A, B, I) \
- _mm256_mask_shrdi_epi64(_mm256_setzero_hi(), (U), (A), (B), (I))
+ _mm256_mask_shrdi_epi64(_mm256_setzero_si256(), (U), (A), (B), (I))
#define _mm256_shrdi_epi64(A, B, I) \
_mm256_mask_shrdi_epi64(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
-#define _mm128_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_shrdi_epi64(S, U, A, B, I) __extension__ ({ \
(__m128i)__builtin_ia32_vpshrdq128_mask((__v2di)(A), \
(__v2di)(B), \
(int)(I), \
(__v2di)(S), \
(__mmask8)(U)); })
-#define _mm128_maskz_shrdi_epi64(U, A, B, I) \
- _mm128_mask_shrdi_epi64(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shrdi_epi64(U, A, B, I) \
+ _mm_mask_shrdi_epi64(_mm_setzero_si128(), (U), (A), (B), (I))
-#define _mm128_shrdi_epi64(A, B, I) \
- _mm128_mask_shrdi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shrdi_epi64(A, B, I) \
+ _mm_mask_shrdi_epi64(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
#define _mm256_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
(__m256i)__builtin_ia32_vpshrdd256_mask((__v8si)(A), \
@@ -374,23 +363,23 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__mmask8)(U)); })
#define _mm256_maskz_shrdi_epi32(U, A, B, I) \
- _mm256_mask_shrdi_epi32(_mm256_setzero_hi(), (U), (A), (B), (I))
+ _mm256_mask_shrdi_epi32(_mm256_setzero_si256(), (U), (A), (B), (I))
#define _mm256_shrdi_epi32(A, B, I) \
_mm256_mask_shrdi_epi32(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
-#define _mm128_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_shrdi_epi32(S, U, A, B, I) __extension__ ({ \
(__m128i)__builtin_ia32_vpshrdd128_mask((__v4si)(A), \
(__v4si)(B), \
(int)(I), \
(__v4si)(S), \
(__mmask8)(U)); })
-#define _mm128_maskz_shrdi_epi32(U, A, B, I) \
- _mm128_mask_shrdi_epi32(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shrdi_epi32(U, A, B, I) \
+ _mm_mask_shrdi_epi32(_mm_setzero_si128(), (U), (A), (B), (I))
-#define _mm128_shrdi_epi32(A, B, I) \
- _mm128_mask_shrdi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shrdi_epi32(A, B, I) \
+ _mm_mask_shrdi_epi32(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
#define _mm256_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
(__m256i)__builtin_ia32_vpshrdw256_mask((__v16hi)(A), \
@@ -400,23 +389,23 @@ _mm256_maskz_expandloadu_epi8(__mmask32 __U, void const *__P)
(__mmask16)(U)); })
#define _mm256_maskz_shrdi_epi16(U, A, B, I) \
- _mm256_mask_shrdi_epi16(_mm256_setzero_hi(), (U), (A), (B), (I))
+ _mm256_mask_shrdi_epi16(_mm256_setzero_si256(), (U), (A), (B), (I))
#define _mm256_shrdi_epi16(A, B, I) \
_mm256_mask_shrdi_epi16(_mm256_undefined_si256(), (__mmask8)(-1), (A), (B), (I))
-#define _mm128_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
+#define _mm_mask_shrdi_epi16(S, U, A, B, I) __extension__ ({ \
(__m128i)__builtin_ia32_vpshrdw128_mask((__v8hi)(A), \
(__v8hi)(B), \
(int)(I), \
(__v8hi)(S), \
(__mmask8)(U)); })
-#define _mm128_maskz_shrdi_epi16(U, A, B, I) \
- _mm128_mask_shrdi_epi16(_mm128_setzero_hi(), (U), (A), (B), (I))
+#define _mm_maskz_shrdi_epi16(U, A, B, I) \
+ _mm_mask_shrdi_epi16(_mm_setzero_si128(), (U), (A), (B), (I))
-#define _mm128_shrdi_epi16(A, B, I) \
- _mm128_mask_shrdi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
+#define _mm_shrdi_epi16(A, B, I) \
+ _mm_mask_shrdi_epi16(_mm_undefined_si128(), (__mmask8)(-1), (A), (B), (I))
static __inline__ __m256i __DEFAULT_FN_ATTRS
_mm256_mask_shldv_epi64(__m256i __S, __mmask8 __U, __m256i __A, __m256i __B)
@@ -446,7 +435,7 @@ _mm256_shldv_epi64(__m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -455,7 +444,7 @@ _mm128_mask_shldv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvq128_maskz ((__v2di) __S,
(__v2di) __A,
@@ -464,7 +453,7 @@ _mm128_maskz_shldv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
+_mm_shldv_epi64(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -500,7 +489,7 @@ _mm256_shldv_epi32(__m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -509,7 +498,7 @@ _mm128_mask_shldv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvd128_maskz ((__v4si) __S,
(__v4si) __A,
@@ -518,7 +507,7 @@ _mm128_maskz_shldv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
+_mm_shldv_epi32(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -554,7 +543,7 @@ _mm256_shldv_epi16(__m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -563,7 +552,7 @@ _mm128_mask_shldv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvw128_maskz ((__v8hi) __S,
(__v8hi) __A,
@@ -572,7 +561,7 @@ _mm128_maskz_shldv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
+_mm_shldv_epi16(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshldvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -608,7 +597,7 @@ _mm256_shrdv_epi64(__m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -617,7 +606,7 @@ _mm128_mask_shrdv_epi64(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvq128_maskz ((__v2di) __S,
(__v2di) __A,
@@ -626,7 +615,7 @@ _mm128_maskz_shrdv_epi64(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
+_mm_shrdv_epi64(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvq128_mask ((__v2di) __S,
(__v2di) __A,
@@ -662,7 +651,7 @@ _mm256_shrdv_epi32(__m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -671,7 +660,7 @@ _mm128_mask_shrdv_epi32(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvd128_maskz ((__v4si) __S,
(__v4si) __A,
@@ -680,7 +669,7 @@ _mm128_maskz_shrdv_epi32(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
+_mm_shrdv_epi32(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvd128_mask ((__v4si) __S,
(__v4si) __A,
@@ -716,7 +705,7 @@ _mm256_shrdv_epi16(__m256i __S, __m256i __A, __m256i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
+_mm_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
(__v8hi) __A,
@@ -725,7 +714,7 @@ _mm128_mask_shrdv_epi16(__m128i __S, __mmask8 __U, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
+_mm_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvw128_maskz ((__v8hi) __S,
(__v8hi) __A,
@@ -734,7 +723,7 @@ _mm128_maskz_shrdv_epi16(__mmask8 __U, __m128i __S, __m128i __A, __m128i __B)
}
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm128_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
+_mm_shrdv_epi16(__m128i __S, __m128i __A, __m128i __B)
{
return (__m128i) __builtin_ia32_vpshrdvw128_mask ((__v8hi) __S,
(__v8hi) __A,