| // Copyright 2022 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 8 == 0 |
| $assert BATCH_TILE >= 8 |
| $assert DIV_ALGO in ["div", "rcp"] |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| $SIMD_TILE = BATCH_TILE // 8 |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/vunary.h> |
| |
| |
| void xnn_f16_vsigmoid_ukernel__avx2_rr1_p2_${DIV_ALGO}_x${BATCH_TILE}( |
| size_t batch, |
| const void* input, |
| void* output, |
| const union xnn_f16_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| { |
| assert(batch % sizeof(uint16_t) == 0); |
| |
| const __m256 vsign_mask = _mm256_load_ps(params->avx2_rr1_p2.sign_mask); |
| const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p2.magic_bias); |
| const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p2.log2e); |
| const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p2.minus_ln2); |
| const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p2.c2); |
| const __m256 vc1 = _mm256_load_ps(params->avx2_rr1_p2.c1); |
| const __m256 vone = _mm256_load_ps(params->avx2_rr1_p2.one); |
| const __m256 vdenorm_cutoff = _mm256_load_ps(params->avx2_rr1_p2.denorm_cutoff); |
| |
| const uint16_t* i = (const uint16_t*) input; |
| uint16_t* o = (uint16_t*) output; |
| $if BATCH_TILE > 8: |
| for (; batch >= ${BATCH_TILE} * sizeof(uint16_t); batch -= ${BATCH_TILE} * sizeof(uint16_t)) { |
| const __m256 vx${ABC[0]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
| $for N in range(1, SIMD_TILE): |
| const __m256 vx${ABC[N]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i + ${N * 8}))); |
| i += ${BATCH_TILE}; |
| |
| $for N in range(SIMD_TILE): |
| const __m256 vz${ABC[N]} = _mm256_or_ps(vx${ABC[N]}, vsign_mask); |
| |
| $for N in range(SIMD_TILE): |
| __m256 vn${ABC[N]} = _mm256_fmadd_ps(vz${ABC[N]}, vlog2e, vmagic_bias); |
| |
| $for N in range(SIMD_TILE): |
| const __m256 vs${ABC[N]} = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn${ABC[N]}), 23)); |
| |
| $for N in range(SIMD_TILE): |
| vn${ABC[N]} = _mm256_sub_ps(vn${ABC[N]}, vmagic_bias); |
| |
| $for N in range(SIMD_TILE): |
| __m256 vt${ABC[N]} = _mm256_fmadd_ps(vn${ABC[N]}, vminus_ln2, vz${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| const __m256 vp${ABC[N]} = _mm256_fmadd_ps(vc2, vt${ABC[N]}, vc1); |
| |
| $for N in range(SIMD_TILE): |
| vt${ABC[N]} = _mm256_mul_ps(vt${ABC[N]}, vs${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| const __m256 ve${ABC[N]} = _mm256_fmadd_ps(vt${ABC[N]}, vp${ABC[N]}, vs${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| const __m256 vd${ABC[N]} = _mm256_add_ps(ve${ABC[N]}, vone); |
| |
| $if DIV_ALGO == "div": |
| $for N in range(SIMD_TILE): |
| __m256 vf${ABC[N]} = _mm256_div_ps(ve${ABC[N]}, vd${ABC[N]}); |
| $else: |
| $for N in range(SIMD_TILE): |
| const __m256 vr${ABC[N]} = _mm256_rcp_ps(vd${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| __m256 vf${ABC[N]} = _mm256_mul_ps(ve${ABC[N]}, vr${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| vf${ABC[N]} = _mm256_andnot_ps(_mm256_cmp_ps(vz${ABC[N]}, vdenorm_cutoff, _CMP_LT_OS), vf${ABC[N]}); |
| |
| $for N in range(SIMD_TILE): |
| vf${ABC[N]} = _mm256_blendv_ps(_mm256_sub_ps(vone, vf${ABC[N]}), vf${ABC[N]}, vx${ABC[N]}); |
| |
| _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf${ABC[0]}, _MM_FROUND_NO_EXC)); |
| $for N in range(1, SIMD_TILE): |
| _mm_storeu_si128((__m128i*) (o + ${N * 8}), _mm256_cvtps_ph(vf${ABC[N]}, _MM_FROUND_NO_EXC)); |
| o += ${BATCH_TILE}; |
| } |
| for (; batch >= 8 * sizeof(uint16_t); batch -= 8 * sizeof(uint16_t)) { |
| const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
| i += 8; |
| |
| const __m256 vz = _mm256_or_ps(vx, vsign_mask); |
| |
| __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); |
| const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23)); |
| vn = _mm256_sub_ps(vn, vmagic_bias); |
| |
| __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); |
| |
| const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1); |
| vt = _mm256_mul_ps(vt, vs); |
| const __m256 ve = _mm256_fmadd_ps(vt, vp, vs); |
| |
| const __m256 vd = _mm256_add_ps(ve, vone); |
| $if DIV_ALGO == "div": |
| __m256 vf = _mm256_div_ps(ve, vd); |
| $else: |
| const __m256 vr = _mm256_rcp_ps(vd); |
| __m256 vf = _mm256_mul_ps(ve, vr); |
| |
| vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf); |
| vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx); |
| |
| _mm_storeu_si128((__m128i*) o, _mm256_cvtps_ph(vf, _MM_FROUND_NO_EXC)); |
| o += 8; |
| } |
| if XNN_UNLIKELY(batch != 0) { |
| assert(batch >= 1 * sizeof(uint16_t)); |
| assert(batch <= 7 * sizeof(uint16_t)); |
| const __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i)); |
| |
| const __m256 vz = _mm256_or_ps(vx, vsign_mask); |
| |
| __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); |
| const __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23)); |
| vn = _mm256_sub_ps(vn, vmagic_bias); |
| |
| __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); |
| |
| const __m256 vp = _mm256_fmadd_ps(vc2, vt, vc1); |
| vt = _mm256_mul_ps(vt, vs); |
| const __m256 ve = _mm256_fmadd_ps(vt, vp, vs); |
| |
| const __m256 vd = _mm256_add_ps(ve, vone); |
| $if DIV_ALGO == "div": |
| __m256 vf = _mm256_div_ps(ve, vd); |
| $else: |
| const __m256 vr = _mm256_rcp_ps(vd); |
| __m256 vf = _mm256_mul_ps(ve, vr); |
| |
| vf = _mm256_andnot_ps(_mm256_cmp_ps(vz, vdenorm_cutoff, _CMP_LT_OS), vf); |
| vf = _mm256_blendv_ps(_mm256_sub_ps(vone, vf), vf, vx); |
| |
| __m128i vh = _mm256_cvtps_ph(vf, _MM_FROUND_NO_EXC); |
| if (batch & (4 * sizeof(uint16_t))) { |
| _mm_storel_epi64((__m128i*) o, vh); |
| vh = _mm_unpackhi_epi64(vh, vh); |
| o += 4; |
| } |
| if (batch & (2 * sizeof(uint16_t))) { |
| _mm_storeu_si32(o, vh); |
| vh = _mm_srli_epi64(vh, 32); |
| o += 2; |
| } |
| if (batch & (1 * sizeof(uint16_t))) { |
| *o = (uint16_t) _mm_extract_epi16(vh, 0); |
| } |
| } |
| } |