| // Copyright 2021 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 8 == 0 |
| $assert BATCH_TILE >= 8 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <emmintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/vcvt.h> |
| |
| |
| $XINT8_T = {"QS8": "int8_t", "QU8": "uint8_t"}[DATATYPE] |
| $_MM_CVTEPX8_EPI32 = {"QS8": "_mm_cvtepi8_epi32", "QU8": "_mm_cvtepu8_epi32"}[DATATYPE] |
| void xnn_${DATATYPE.lower()}_f32_vcvt_ukernel__sse2_x${BATCH_TILE}( |
| size_t n, |
| const ${XINT8_T}* x, |
| float* y, |
| const union xnn_${DATATYPE.lower()}_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| { |
| assert(n != 0); |
| assert(n % sizeof(${XINT8_T}) == 0); |
| assert(x != NULL); |
| assert(y != NULL); |
| |
| $if DATATYPE == "QS8": |
| const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask); |
| const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp); |
| const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias); |
| const __m128 vscale = _mm_load_ps(params->sse2.scale); |
| const __m128i vzero = _mm_setzero_si128(); |
| $if BATCH_TILE > 8: |
| for (; n >= ${BATCH_TILE} * sizeof(${XINT8_T}); n -= ${BATCH_TILE} * sizeof(${XINT8_T})) { |
| __m128i vx${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) x); |
| $for N in range(8, BATCH_TILE, 8): |
| __m128i vx${ABC[N:N+8]} = _mm_loadl_epi64((const __m128i*) (x + ${N})); |
| x += ${BATCH_TILE}; |
| |
| $if DATATYPE == "QS8": |
| $for N in range(0, BATCH_TILE, 8): |
| vx${ABC[N:N+8]} = _mm_xor_si128(vx${ABC[N:N+8]}, vsign_mask); |
| |
| $for N in range(0, BATCH_TILE, 8): |
| vx${ABC[N:N+8]} = _mm_unpacklo_epi8(vx${ABC[N:N+8]}, vzero); |
| |
| $for N in range(0, BATCH_TILE, 8): |
| __m128 vy${ABC[N:N+4]} = _mm_castsi128_ps(_mm_unpacklo_epi16(vx${ABC[N:N+8]}, vmagic_exp)); |
| __m128 vy${ABC[N+4:N+8]} = _mm_castsi128_ps(_mm_unpackhi_epi16(vx${ABC[N:N+8]}, vmagic_exp)); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| vy${ABC[N:N+4]} = _mm_sub_ps(vy${ABC[N:N+4]}, vmagic_bias); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| vy${ABC[N:N+4]} = _mm_mul_ps(vy${ABC[N:N+4]}, vscale); |
| |
| _mm_storeu_ps(y, vy${ABC[0:4]}); |
| $for N in range(4, BATCH_TILE, 4): |
| _mm_storeu_ps(y + ${N}, vy${ABC[N:N+4]}); |
| y += ${BATCH_TILE}; |
| } |
| for (; n >= 8 * sizeof(${XINT8_T}); n -= 8 * sizeof(${XINT8_T})) { |
| __m128i vx = _mm_loadl_epi64((const __m128i*) x); |
| $if DATATYPE == "QS8": |
| vx = _mm_xor_si128(vx, vsign_mask); |
| vx = _mm_unpacklo_epi8(vx, vzero); |
| x += 8; |
| |
| __m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp)); |
| __m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp)); |
| |
| vy_lo = _mm_sub_ps(vy_lo, vmagic_bias); |
| vy_hi = _mm_sub_ps(vy_hi, vmagic_bias); |
| |
| vy_lo = _mm_mul_ps(vy_lo, vscale); |
| vy_hi = _mm_mul_ps(vy_hi, vscale); |
| |
| _mm_storeu_ps(y, vy_lo); |
| _mm_storeu_ps(y + 4, vy_hi); |
| y += 8; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| assert(n >= 1 * sizeof(${XINT8_T})); |
| assert(n <= 7 * sizeof(${XINT8_T})); |
| |
| __m128i vx = _mm_loadl_epi64((const __m128i*) x); |
| $if DATATYPE == "QS8": |
| vx = _mm_xor_si128(vx, vsign_mask); |
| vx = _mm_unpacklo_epi8(vx, vzero); |
| |
| __m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp)); |
| vy = _mm_sub_ps(vy, vmagic_bias); |
| vy = _mm_mul_ps(vy, vscale); |
| |
| if (n & (4 * sizeof(${XINT8_T}))) { |
| _mm_storeu_ps(y, vy); |
| vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp)); |
| vy = _mm_sub_ps(vy, vmagic_bias); |
| vy = _mm_mul_ps(vy, vscale); |
| y += 4; |
| } |
| if (n & (2 * sizeof(${XINT8_T}))) { |
| _mm_storel_pi((__m64*) y, vy); |
| vy = _mm_movehl_ps(vy, vy); |
| y += 2; |
| } |
| if (n & (1 * sizeof(${XINT8_T}))) { |
| _mm_store_ss(y, vy); |
| } |
| } |
| } |