blob: 01ee468e4158fe3e2413cb62e0ef43e33db2ad2e [file] [log] [blame] [edit]
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert BATCH_TILE >= 64
$assert BATCH_TILE % 64 == 0
$SIMD_TILE = BATCH_TILE // 64
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/lut.h>
#include <xnnpack/common.h>
void xnn_x8_lut_ukernel__avx512skx_vpshufb_x${BATCH_TILE}(
size_t n,
const uint8_t* x,
uint8_t* y,
const uint8_t t[restrict XNN_MIN_ELEMENTS(256)])
{
assert(n != 0);
assert(x != NULL);
assert(y != NULL);
const __m512i vt0 = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) t));
$for T in range(1, 16):
const __m512i vt${ABC[T]} = _mm512_broadcast_i32x4(_mm_load_si128((const __m128i*) (t + ${T * 16})));
const __m512i vtable0 = vt0;
$for T in range(1, 8):
const __m512i vtable${ABC[T]} = _mm512_xor_si512(vt${ABC[T-1]}, vt${ABC[T]});
$for T in range(8, 16):
const __m512i vtable${ABC[T]} = _mm512_xor_si512(_mm512_xor_si512(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]});
const __m512i voffset = _mm512_set1_epi8(16);
$if BATCH_TILE > 64:
for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) {
__m512i vx0 = _mm512_loadu_si512(x);
$for N in range(1, SIMD_TILE):
__m512i vx${N} = _mm512_loadu_si512(x + ${N * 64});
x += ${BATCH_TILE};
$for N in range(SIMD_TILE):
__m512i vy${N} = _mm512_shuffle_epi8(vtable0, vx${N});
$for T in range(1, 9):
$for N in range(SIMD_TILE):
vx${N} = _mm512_sub_epi8(vx${N}, voffset);
$for N in range(SIMD_TILE):
vy${N} = _mm512_xor_si512(vy${N}, _mm512_shuffle_epi8(vtable${ABC[T]}, vx${N}));
$for T in range(9, 16):
$for N in range(SIMD_TILE):
vx${N} = _mm512_subs_epi8(vx${N}, voffset);
$for N in range(SIMD_TILE):
vy${N} = _mm512_xor_si512(vy${N}, _mm512_shuffle_epi8(vtable${ABC[T]}, vx${N}));
_mm512_storeu_si512(y, vy0);
$for N in range(1, SIMD_TILE):
_mm512_storeu_si512(y + ${N * 64}, vy${N});
y += ${BATCH_TILE};
}
for (; n >= 64 * sizeof(uint8_t); n -= 64 * sizeof(uint8_t)) {
__m512i vx = _mm512_loadu_si512(x);
x += 64;
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
$for T in range(1, 9):
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
$for T in range(9, 16):
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
_mm512_storeu_si512(y, vy);
y += 64;
}
if XNN_UNLIKELY(n != 0) {
assert(n < 64);
const __mmask64 vmask = _cvtu64_mask64((uint64_t) ((UINT64_C(1) << n) - UINT64_C(1)));
__m512i vx = _mm512_maskz_loadu_epi8(vmask, x);
__m512i vy = _mm512_shuffle_epi8(vtable0, vx);
$for T in range(1, 9):
vx = _mm512_sub_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
$for T in range(9, 16):
vx = _mm512_subs_epi8(vx, voffset);
vy = _mm512_xor_si512(vy, _mm512_shuffle_epi8(vtable${ABC[T]}, vx));
_mm512_mask_storeu_epi8(y, vmask, vy);
}
}