| // Copyright 2021 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE >= 32 |
| $assert BATCH_TILE % 32 == 0 |
| $SIMD_TILE = BATCH_TILE // 32 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/lut.h> |
| #include <xnnpack/common.h> |
| |
| |
| void xnn_x8_lut_ukernel__avx2_x${BATCH_TILE}( |
| size_t n, |
| const uint8_t* x, |
| uint8_t* y, |
| const uint8_t t[restrict XNN_MIN_ELEMENTS(256)]) |
| { |
| assert(n != 0); |
| assert(x != NULL); |
| assert(y != NULL); |
| |
| const __m256i vt0 = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) t)); |
| $for T in range(1, 16): |
| const __m256i vt${ABC[T]} = _mm256_broadcastsi128_si256(_mm_load_si128((const __m128i*) (t + ${T * 16}))); |
| |
| const __m256i vtable0 = vt0; |
| $for T in range(1, 8): |
| const __m256i vtable${ABC[T]} = _mm256_xor_si256(vt${ABC[T-1]}, vt${ABC[T]}); |
| $for T in range(8, 16): |
| const __m256i vtable${ABC[T]} = _mm256_xor_si256(_mm256_xor_si256(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]}); |
| |
| const __m256i voffset = _mm256_set1_epi8(16); |
| for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) { |
| __m256i vx0 = _mm256_loadu_si256((const __m256i*) x); |
| $for N in range(1, SIMD_TILE): |
| __m256i vx${N} = _mm256_loadu_si256((const __m256i*) (x + ${N * 32})); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(SIMD_TILE): |
| __m256i vy${N} = _mm256_shuffle_epi8(vtable0, vx${N}); |
| |
| $for T in range(1, 9): |
| $for N in range(SIMD_TILE): |
| vx${N} = _mm256_sub_epi8(vx${N}, voffset); |
| $for N in range(SIMD_TILE): |
| vy${N} = _mm256_xor_si256(vy${N}, _mm256_shuffle_epi8(vtable${ABC[T]}, vx${N})); |
| |
| $for T in range(9, 16): |
| $for N in range(SIMD_TILE): |
| vx${N} = _mm256_subs_epi8(vx${N}, voffset); |
| $for N in range(SIMD_TILE): |
| vy${N} = _mm256_xor_si256(vy${N}, _mm256_shuffle_epi8(vtable${ABC[T]}, vx${N})); |
| |
| _mm256_storeu_si256((__m256i*) y, vy0); |
| $for N in range(1, SIMD_TILE): |
| _mm256_storeu_si256((__m256i*) (y + ${N * 32}), vy${N}); |
| y += ${BATCH_TILE}; |
| } |
| for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) { |
| __m128i vx = _mm_loadu_si128((const __m128i*) x); |
| x += 16; |
| |
| __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx); |
| |
| $for T in range(1, 9): |
| vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset)); |
| vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); |
| |
| $for T in range(9, 16): |
| vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset)); |
| vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); |
| |
| _mm_storeu_si128((__m128i*) y, vy); |
| y += 16; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| __m128i vx = _mm_loadu_si128((const __m128i*) x); |
| |
| __m128i vy = _mm_shuffle_epi8(_mm256_castsi256_si128(vtable0), vx); |
| |
| $for T in range(1, 9): |
| vx = _mm_sub_epi8(vx, _mm256_castsi256_si128(voffset)); |
| vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); |
| |
| $for T in range(9, 16): |
| vx = _mm_subs_epi8(vx, _mm256_castsi256_si128(voffset)); |
| vy = _mm_xor_si128(vy, _mm_shuffle_epi8(_mm256_castsi256_si128(vtable${ABC[T]}), vx)); |
| |
| if (n & (8 * sizeof(uint8_t))) { |
| _mm_storel_epi64((__m128i*) y, vy); |
| vy = _mm_unpackhi_epi64(vy, vy); |
| y += 8; |
| } |
| if (n & (4 * sizeof(uint8_t))) { |
| _mm_storeu_si32(y, vy); |
| vy = _mm_srli_epi64(vy, 32); |
| y += 4; |
| } |
| if (n & (2 * sizeof(uint8_t))) { |
| _mm_storeu_si16(y, vy); |
| vy = _mm_srli_epi32(vy, 16); |
| y += 2; |
| } |
| if (n & (1 * sizeof(uint8_t))) { |
| *y = (uint8_t) _mm_extract_epi8(vy, 0); |
| } |
| } |
| } |