clang API Documentation
Go to the source code of this file.
Defines | |
#define | _mm256_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw256((X), (Y), (M)) |
#define | _mm256_alignr_epi8(a, b, n) |
#define | _mm256_blend_epi16(V1, V2, M) |
#define | _mm256_shuffle_epi32(a, imm) |
#define | _mm256_shufflehi_epi16(a, imm) |
#define | _mm256_shufflelo_epi16(a, imm) |
#define | _mm256_slli_si256(a, count) |
#define | _mm256_srli_si256(a, count) |
#define | _mm_blend_epi32(V1, V2, M) |
#define | _mm256_blend_epi32(V1, V2, M) |
#define | _mm256_permute4x64_pd(V, M) |
#define | _mm256_permute4x64_epi64(V, M) |
#define | _mm256_permute2x128_si256(V1, V2, M) |
#define | _mm256_extracti128_si256(A, O) |
#define | _mm256_inserti128_si256(V1, V2, O) |
#define | _mm_mask_i32gather_pd(a, m, i, mask, s) |
#define | _mm256_mask_i32gather_pd(a, m, i, mask, s) |
#define | _mm_mask_i64gather_pd(a, m, i, mask, s) |
#define | _mm256_mask_i64gather_pd(a, m, i, mask, s) |
#define | _mm_mask_i32gather_ps(a, m, i, mask, s) |
#define | _mm256_mask_i32gather_ps(a, m, i, mask, s) |
#define | _mm_mask_i64gather_ps(a, m, i, mask, s) |
#define | _mm256_mask_i64gather_ps(a, m, i, mask, s) |
#define | _mm_mask_i32gather_epi32(a, m, i, mask, s) |
#define | _mm256_mask_i32gather_epi32(a, m, i, mask, s) |
#define | _mm_mask_i64gather_epi32(a, m, i, mask, s) |
#define | _mm256_mask_i64gather_epi32(a, m, i, mask, s) |
#define | _mm_mask_i32gather_epi64(a, m, i, mask, s) |
#define | _mm256_mask_i32gather_epi64(a, m, i, mask, s) |
#define | _mm_mask_i64gather_epi64(a, m, i, mask, s) |
#define | _mm256_mask_i64gather_epi64(a, m, i, mask, s) |
#define | _mm_i32gather_pd(m, i, s) |
#define | _mm256_i32gather_pd(m, i, s) |
#define | _mm_i64gather_pd(m, i, s) |
#define | _mm256_i64gather_pd(m, i, s) |
#define | _mm_i32gather_ps(m, i, s) |
#define | _mm256_i32gather_ps(m, i, s) |
#define | _mm_i64gather_ps(m, i, s) |
#define | _mm256_i64gather_ps(m, i, s) |
#define | _mm_i32gather_epi32(m, i, s) |
#define | _mm256_i32gather_epi32(m, i, s) |
#define | _mm_i64gather_epi32(m, i, s) |
#define | _mm256_i64gather_epi32(m, i, s) |
#define | _mm_i32gather_epi64(m, i, s) |
#define | _mm256_i32gather_epi64(m, i, s) |
#define | _mm_i64gather_epi64(m, i, s) |
#define | _mm256_i64gather_epi64(m, i, s) |
Functions | |
static __inline__ __m256i | __attribute__ ((__always_inline__, __nodebug__)) _mm256_abs_epi8(__m256i __a) |
#define _mm256_alignr_epi8 | ( | a, | |
b, | |||
n | |||
) |
__extension__ ({ \ __m256i __a = (a); \ __m256i __b = (b); \ (__m256i)__builtin_ia32_palignr256((__v32qi)__a, (__v32qi)__b, (n)); })
#define _mm256_blend_epi16 | ( | V1, | |
V2, | |||
M | |||
) |
__extension__ ({ \ __m256i __V1 = (V1); \ __m256i __V2 = (V2); \ (__m256d)__builtin_shufflevector((__v16hi)__V1, (__v16hi)__V2, \ (((M) & 0x01) ? 16 : 0), \ (((M) & 0x02) ? 17 : 1), \ (((M) & 0x04) ? 18 : 2), \ (((M) & 0x08) ? 19 : 3), \ (((M) & 0x10) ? 20 : 4), \ (((M) & 0x20) ? 21 : 5), \ (((M) & 0x40) ? 22 : 6), \ (((M) & 0x80) ? 23 : 7), \ (((M) & 0x01) ? 24 : 8), \ (((M) & 0x02) ? 25 : 9), \ (((M) & 0x04) ? 26 : 10), \ (((M) & 0x08) ? 27 : 11), \ (((M) & 0x10) ? 28 : 12), \ (((M) & 0x20) ? 29 : 13), \ (((M) & 0x40) ? 30 : 14), \ (((M) & 0x80) ? 31 : 15)); })
#define _mm256_blend_epi32 | ( | V1, | |
V2, | |||
M | |||
) |
__extension__ ({ \ __m256i __V1 = (V1); \ __m256i __V2 = (V2); \ (__m256i)__builtin_shufflevector((__v8si)__V1, (__v8si)__V2, \ (((M) & 0x01) ? 8 : 0), \ (((M) & 0x02) ? 9 : 1), \ (((M) & 0x04) ? 10 : 2), \ (((M) & 0x08) ? 11 : 3), \ (((M) & 0x10) ? 12 : 4), \ (((M) & 0x20) ? 13 : 5), \ (((M) & 0x40) ? 14 : 6), \ (((M) & 0x80) ? 15 : 7)); })
#define _mm256_extracti128_si256 | ( | A, | |
O | |||
) |
__extension__ ({ \ __m256i __A = (A); \ (__m128i)__builtin_ia32_extract128i256(__A, (O)); })
#define _mm256_i32gather_epi32 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ int const *__m = (m); \ __m256i __i = (i); \ (__m256i)__builtin_ia32_gatherd_d256((__v8si)_mm256_setzero_si256(), \ (const __v8si *)__m, (__v8si)__i, \ (__v8si)_mm256_set1_epi32(-1), (s)); })
#define _mm256_i32gather_epi64 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ long long const *__m = (m); \ __m128i __i = (i); \ (__m256i)__builtin_ia32_gatherd_q256((__v4di)_mm256_setzero_si256(), \ (const __v4di *)__m, (__v4si)__i, \ (__v4di)_mm256_set1_epi64x(-1), (s)); })
#define _mm256_i32gather_pd | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ double const *__m = (m); \ __m128i __i = (i); \ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)_mm256_setzero_pd(), \ (const __v4df *)__m, (__v4si)__i, \ (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
#define _mm256_i32gather_ps | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ float const *__m = (m); \ __m256i __i = (i); \ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)_mm256_setzero_ps(), \ (const __v8sf *)__m, (__v8si)__i, \ (__v8sf)_mm256_set1_ps((float)(int)-1), (s)); })
#define _mm256_i64gather_epi32 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ int const *__m = (m); \ __m256i __i = (i); \ (__m128i)__builtin_ia32_gatherq_d256((__v4si)_mm_setzero_si128(), \ (const __v4si *)__m, (__v4di)__i, \ (__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm256_i64gather_epi64 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ long long const *__m = (m); \ __m256i __i = (i); \ (__m256i)__builtin_ia32_gatherq_q256((__v4di)_mm256_setzero_si256(), \ (const __v4di *)__m, (__v4di)__i, \ (__v4di)_mm256_set1_epi64x(-1), (s)); })
#define _mm256_i64gather_pd | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ double const *__m = (m); \ __m256i __i = (i); \ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)_mm256_setzero_pd(), \ (const __v4df *)__m, (__v4di)__i, \ (__v4df)_mm256_set1_pd((double)(long long int)-1), (s)); })
#define _mm256_i64gather_ps | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ float const *__m = (m); \ __m256i __i = (i); \ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)_mm_setzero_ps(), \ (const __v4sf *)__m, (__v4di)__i, \ (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
#define _mm256_inserti128_si256 | ( | V1, | |
V2, | |||
O | |||
) |
__extension__ ({ \ __m256i __V1 = (V1); \ __m128i __V2 = (V2); \ (__m256i)__builtin_ia32_insert128i256(__V1, __V2, (O)); })
#define _mm256_mask_i32gather_epi32 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m256i __a = (a); \ int const *__m = (m); \ __m256i __i = (i); \ __m256i __mask = (mask); \ (__m256i)__builtin_ia32_gatherd_d256((__v8si)__a, (const __v8si *)__m, \ (__v8si)__i, (__v8si)__mask, (s)); })
#define _mm256_mask_i32gather_epi64 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m256i __a = (a); \ long long const *__m = (m); \ __m128i __i = (i); \ __m256i __mask = (mask); \ (__m256i)__builtin_ia32_gatherd_q256((__v4di)__a, (const __v4di *)__m, \ (__v4si)__i, (__v4di)__mask, (s)); })
#define _mm256_mask_i32gather_pd | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m256d __a = (a); \ double const *__m = (m); \ __m128i __i = (i); \ __m256d __mask = (mask); \ (__m256d)__builtin_ia32_gatherd_pd256((__v4df)__a, (const __v4df *)__m, \ (__v4si)__i, (__v4df)__mask, (s)); })
#define _mm256_mask_i32gather_ps | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m256 __a = (a); \ float const *__m = (m); \ __m256i __i = (i); \ __m256 __mask = (mask); \ (__m256)__builtin_ia32_gatherd_ps256((__v8sf)__a, (const __v8sf *)__m, \ (__v8si)__i, (__v8sf)__mask, (s)); })
#define _mm256_mask_i64gather_epi32 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128i __a = (a); \ int const *__m = (m); \ __m256i __i = (i); \ __m128i __mask = (mask); \ (__m128i)__builtin_ia32_gatherq_d256((__v4si)__a, (const __v4si *)__m, \ (__v4di)__i, (__v4si)__mask, (s)); })
#define _mm256_mask_i64gather_epi64 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m256i __a = (a); \ long long const *__m = (m); \ __m256i __i = (i); \ __m256i __mask = (mask); \ (__m256i)__builtin_ia32_gatherq_q256((__v4di)__a, (const __v4di *)__m, \ (__v4di)__i, (__v4di)__mask, (s)); })
#define _mm256_mask_i64gather_pd | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m256d __a = (a); \ double const *__m = (m); \ __m256i __i = (i); \ __m256d __mask = (mask); \ (__m256d)__builtin_ia32_gatherq_pd256((__v4df)__a, (const __v4df *)__m, \ (__v4di)__i, (__v4df)__mask, (s)); })
#define _mm256_mask_i64gather_ps | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128 __a = (a); \ float const *__m = (m); \ __m256i __i = (i); \ __m128 __mask = (mask); \ (__m128)__builtin_ia32_gatherq_ps256((__v4sf)__a, (const __v4sf *)__m, \ (__v4di)__i, (__v4sf)__mask, (s)); })
#define _mm256_mpsadbw_epu8 | ( | X, | |
Y, | |||
M | |||
) | __builtin_ia32_mpsadbw256((X), (Y), (M)) |
Definition at line 32 of file avx2intrin.h.
#define _mm256_permute2x128_si256 | ( | V1, | |
V2, | |||
M | |||
) |
__extension__ ({ \ __m256i __V1 = (V1); \ __m256i __V2 = (V2); \ (__m256i)__builtin_ia32_permti256(__V1, __V2, (M)); })
#define _mm256_permute4x64_epi64 | ( | V, | |
M | |||
) |
__extension__ ({ \ __m256i __V = (V); \ (__m256i)__builtin_shufflevector((__v4di)__V, (__v4di) _mm256_setzero_si256(), \ (M) & 0x3, ((M) & 0xc) >> 2, \ ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
#define _mm256_permute4x64_pd | ( | V, | |
M | |||
) |
__extension__ ({ \ __m256d __V = (V); \ (__m256d)__builtin_shufflevector((__v4df)__V, (__v4df) _mm256_setzero_pd(), \ (M) & 0x3, ((M) & 0xc) >> 2, \ ((M) & 0x30) >> 4, ((M) & 0xc0) >> 6); })
#define _mm256_shuffle_epi32 | ( | a, | |
imm | |||
) |
__extension__ ({ \ __m256i __a = (a); \ (__m256i)__builtin_shufflevector((__v8si)__a, (__v8si)_mm256_set1_epi32(0), \ (imm) & 0x3, ((imm) & 0xc) >> 2, \ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \ 4 + (((imm) & 0x03) >> 0), \ 4 + (((imm) & 0x0c) >> 2), \ 4 + (((imm) & 0x30) >> 4), \ 4 + (((imm) & 0xc0) >> 6)); })
#define _mm256_shufflehi_epi16 | ( | a, | |
imm | |||
) |
__extension__ ({ \ __m256i __a = (a); \ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \ 0, 1, 2, 3, \ 4 + (((imm) & 0x03) >> 0), \ 4 + (((imm) & 0x0c) >> 2), \ 4 + (((imm) & 0x30) >> 4), \ 4 + (((imm) & 0xc0) >> 6), \ 8, 9, 10, 11, \ 12 + (((imm) & 0x03) >> 0), \ 12 + (((imm) & 0x0c) >> 2), \ 12 + (((imm) & 0x30) >> 4), \ 12 + (((imm) & 0xc0) >> 6)); })
#define _mm256_shufflelo_epi16 | ( | a, | |
imm | |||
) |
__extension__ ({ \ __m256i __a = (a); \ (__m256i)__builtin_shufflevector((__v16hi)__a, (__v16hi)_mm256_set1_epi16(0), \ (imm) & 0x3,((imm) & 0xc) >> 2, \ ((imm) & 0x30) >> 4, ((imm) & 0xc0) >> 6, \ 4, 5, 6, 7, \ 8 + (((imm) & 0x03) >> 0), \ 8 + (((imm) & 0x0c) >> 2), \ 8 + (((imm) & 0x30) >> 4), \ 8 + (((imm) & 0xc0) >> 6), \ 12, 13, 14, 15); })
#define _mm256_slli_si256 | ( | a, | |
count | |||
) |
__extension__ ({ \ __m256i __a = (a); \ (__m256i)__builtin_ia32_pslldqi256(__a, (count)*8); })
#define _mm256_srli_si256 | ( | a, | |
count | |||
) |
__extension__ ({ \ __m256i __a = (a); \ (__m256i)__builtin_ia32_psrldqi256(__a, (count)*8); })
#define _mm_blend_epi32 | ( | V1, | |
V2, | |||
M | |||
) |
__extension__ ({ \ __m128i __V1 = (V1); \ __m128i __V2 = (V2); \ (__m128i)__builtin_shufflevector((__v4si)__V1, (__v4si)__V2, \ (((M) & 0x01) ? 4 : 0), \ (((M) & 0x02) ? 5 : 1), \ (((M) & 0x04) ? 6 : 2), \ (((M) & 0x08) ? 7 : 3)); })
#define _mm_i32gather_epi32 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ int const *__m = (m); \ __m128i __i = (i); \ (__m128i)__builtin_ia32_gatherd_d((__v4si)_mm_setzero_si128(), \ (const __v4si *)__m, (__v4si)__i, \ (__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm_i32gather_epi64 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ long long const *__m = (m); \ __m128i __i = (i); \ (__m128i)__builtin_ia32_gatherd_q((__v2di)_mm_setzero_si128(), \ (const __v2di *)__m, (__v4si)__i, \ (__v2di)_mm_set1_epi64x(-1), (s)); })
#define _mm_i32gather_pd | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ double const *__m = (m); \ __m128i __i = (i); \ (__m128d)__builtin_ia32_gatherd_pd((__v2df)_mm_setzero_pd(), \ (const __v2df *)__m, (__v4si)__i, \ (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
#define _mm_i32gather_ps | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ float const *__m = (m); \ __m128i __i = (i); \ (__m128)__builtin_ia32_gatherd_ps((__v4sf)_mm_setzero_ps(), \ (const __v4sf *)__m, (__v4si)__i, \ (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
#define _mm_i64gather_epi32 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ int const *__m = (m); \ __m128i __i = (i); \ (__m128i)__builtin_ia32_gatherq_d((__v4si)_mm_setzero_si128(), \ (const __v4si *)__m, (__v2di)__i, \ (__v4si)_mm_set1_epi32(-1), (s)); })
#define _mm_i64gather_epi64 | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ long long const *__m = (m); \ __m128i __i = (i); \ (__m128i)__builtin_ia32_gatherq_q((__v2di)_mm_setzero_si128(), \ (const __v2di *)__m, (__v2di)__i, \ (__v2di)_mm_set1_epi64x(-1), (s)); })
#define _mm_i64gather_pd | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ double const *__m = (m); \ __m128i __i = (i); \ (__m128d)__builtin_ia32_gatherq_pd((__v2df)_mm_setzero_pd(), \ (const __v2df *)__m, (__v2di)__i, \ (__v2df)_mm_set1_pd((double)(long long int)-1), (s)); })
#define _mm_i64gather_ps | ( | m, | |
i, | |||
s | |||
) |
__extension__ ({ \ float const *__m = (m); \ __m128i __i = (i); \ (__m128)__builtin_ia32_gatherq_ps((__v4sf)_mm_setzero_ps(), \ (const __v4sf *)__m, (__v2di)__i, \ (__v4sf)_mm_set1_ps((float)(int)-1), (s)); })
#define _mm_mask_i32gather_epi32 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128i __a = (a); \ int const *__m = (m); \ __m128i __i = (i); \ __m128i __mask = (mask); \ (__m128i)__builtin_ia32_gatherd_d((__v4si)__a, (const __v4si *)__m, \ (__v4si)__i, (__v4si)__mask, (s)); })
#define _mm_mask_i32gather_epi64 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128i __a = (a); \ long long const *__m = (m); \ __m128i __i = (i); \ __m128i __mask = (mask); \ (__m128i)__builtin_ia32_gatherd_q((__v2di)__a, (const __v2di *)__m, \ (__v4si)__i, (__v2di)__mask, (s)); })
#define _mm_mask_i32gather_pd | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128d __a = (a); \ double const *__m = (m); \ __m128i __i = (i); \ __m128d __mask = (mask); \ (__m128d)__builtin_ia32_gatherd_pd((__v2df)__a, (const __v2df *)__m, \ (__v4si)__i, (__v2df)__mask, (s)); })
#define _mm_mask_i32gather_ps | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128 __a = (a); \ float const *__m = (m); \ __m128i __i = (i); \ __m128 __mask = (mask); \ (__m128)__builtin_ia32_gatherd_ps((__v4sf)__a, (const __v4sf *)__m, \ (__v4si)__i, (__v4sf)__mask, (s)); })
#define _mm_mask_i64gather_epi32 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128i __a = (a); \ int const *__m = (m); \ __m128i __i = (i); \ __m128i __mask = (mask); \ (__m128i)__builtin_ia32_gatherq_d((__v4si)__a, (const __v4si *)__m, \ (__v2di)__i, (__v4si)__mask, (s)); })
#define _mm_mask_i64gather_epi64 | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128i __a = (a); \ long long const *__m = (m); \ __m128i __i = (i); \ __m128i __mask = (mask); \ (__m128i)__builtin_ia32_gatherq_q((__v2di)__a, (const __v2di *)__m, \ (__v2di)__i, (__v2di)__mask, (s)); })
#define _mm_mask_i64gather_pd | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128d __a = (a); \ double const *__m = (m); \ __m128i __i = (i); \ __m128d __mask = (mask); \ (__m128d)__builtin_ia32_gatherq_pd((__v2df)__a, (const __v2df *)__m, \ (__v2di)__i, (__v2df)__mask, (s)); })
#define _mm_mask_i64gather_ps | ( | a, | |
m, | |||
i, | |||
mask, | |||
s | |||
) |
__extension__ ({ \ __m128 __a = (a); \ float const *__m = (m); \ __m128i __i = (i); \ __m128 __mask = (mask); \ (__m128)__builtin_ia32_gatherq_ps((__v4sf)__a, (const __v4sf *)__m, \ (__v2di)__i, (__v4sf)__mask, (s)); })
static __inline__ __m256i __attribute__ | ( | (__always_inline__, __nodebug__) | ) | [static] |
Definition at line 34 of file avx2intrin.h.