| // Copyright 2021 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 8 == 0 |
| $assert BATCH_TILE >= 8 |
| $SIMD_TILE = BATCH_TILE // 16 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/vcvt.h> |
| |
| |
| $XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] |
| $_MM512_CVTEPX8_EPI32 = {"QS8": "_mm512_cvtepi8_epi32", "QU8": "_mm512_cvtepu8_epi32"}[DATATYPE] |
| void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__avx512skx_x${BATCH_TILE}( |
| size_t n, |
| const ${XINT8_T}* x, |
| float* y, |
| const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| { |
| assert(n != 0); |
| assert(n % sizeof(${XINT8_T}) == 0); |
| assert(x != NULL); |
| assert(y != NULL); |
| |
| const __m512i vminus_zero_point = _mm512_load_si512(params->avx512.minus_zero_point); |
| const __m512 vscale = _mm512_load_ps(params->avx512.scale); |
| $if BATCH_TILE > 16: |
| for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { |
| __m512i vx${ABC[0:16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) x)); |
| $for N in range(16, BATCH_TILE, 16): |
| __m512i vx${ABC[N:N+16]} = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) (x + ${N}))); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(0, BATCH_TILE, 16): |
| vx${ABC[N:N+16]} = _mm512_add_epi32(vx${ABC[N:N+16]}, vminus_zero_point); |
| |
| $for N in range(0, BATCH_TILE, 16): |
| __m512 vy${ABC[N:N+16]} = _mm512_cvtepi32_ps(vx${ABC[N:N+16]}); |
| |
| $for N in range(0, BATCH_TILE, 16): |
| vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vscale); |
| |
| _mm512_storeu_ps(y, vy${ABC[0:16]}); |
| $for N in range(16, BATCH_TILE, 16): |
| _mm512_storeu_ps(y + ${N}, vy${ABC[N:N+16]}); |
| y += ${BATCH_TILE}; |
| } |
| for (; n >= 16 * sizeof(${XINT8_T}); n -= 16 * sizeof(${XINT8_T})) { |
| __m512i vx = ${_MM512_CVTEPX8_EPI32}(_mm_loadu_si128((const __m128i*) x)); |
| vx = _mm512_add_epi32(vx, vminus_zero_point); |
| x += 16; |
| |
| __m512 vy = _mm512_cvtepi32_ps(vx); |
| vy = _mm512_mul_ps(vy, vscale); |
| |
| _mm512_storeu_ps(y, vy); |
| y += 16; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| assert(n >= 1 * sizeof(${XINT8_T})); |
| assert(n <= 15 * sizeof(${XINT8_T})); |
| |
| // Prepare mask for valid elements (depends on n). |
| const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1))); |
| |
| __m512i vx = ${_MM512_CVTEPX8_EPI32}(_mm_maskz_loadu_epi8(vmask, x)); |
| vx = _mm512_add_epi32(vx, vminus_zero_point); |
| |
| __m512 vy = _mm512_cvtepi32_ps(vx); |
| vy = _mm512_mul_ps(vy, vscale); |
| |
| _mm512_mask_storeu_ps(y, vmask, vy); |
| } |
| } |