| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 16 == 0 |
| $assert BATCH_TILE >= 16 |
| $SIMD_TILE = BATCH_TILE // 16 |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/vunary.h> |
| |
| |
| void xnn_f32_velu_ukernel__avx512f_rr1_p6_x${BATCH_TILE}( |
| size_t n, |
| const float* x, |
| float* y, |
| const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| { |
| assert(n != 0); |
| assert(n % sizeof(float) == 0); |
| |
| const __m512 vprescale = _mm512_set1_ps(params->avx512_rr1_p6.prescale); |
| const __m512 valpha = _mm512_set1_ps(params->avx512_rr1_p6.alpha); |
| const __m512 vbeta = _mm512_set1_ps(params->avx512_rr1_p6.beta); |
| const __m512 vsat_cutoff = _mm512_set1_ps(params->avx512_rr1_p6.sat_cutoff); |
| const __m512 vmagic_bias = _mm512_set1_ps(params->avx512_rr1_p6.magic_bias); |
| const __m512 vlog2e = _mm512_set1_ps(params->avx512_rr1_p6.log2e); |
| const __m512 vminus_ln2 = _mm512_set1_ps(params->avx512_rr1_p6.minus_ln2); |
| const __m512 vc6 = _mm512_set1_ps(params->avx512_rr1_p6.c6); |
| const __m512 vc5 = _mm512_set1_ps(params->avx512_rr1_p6.c5); |
| const __m512 vc4 = _mm512_set1_ps(params->avx512_rr1_p6.c4); |
| const __m512 vc3 = _mm512_set1_ps(params->avx512_rr1_p6.c3); |
| const __m512 vc2 = _mm512_set1_ps(params->avx512_rr1_p6.c2); |
| |
| $if BATCH_TILE > 16: |
| for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { |
| __m512 vx0 = _mm512_loadu_ps(x); |
| $for N in range(1, SIMD_TILE): |
| __m512 vx${N} = _mm512_loadu_ps(x + ${N * 16}); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(SIMD_TILE): |
| const __m512 vz${N} = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx${N}, vprescale)); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vn${N} = _mm512_fmadd_ps(vz${N}, vlog2e, vmagic_bias); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vs${N} = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn${N}), 23)); |
| vn${N} = _mm512_sub_ps(vn${N}, vmagic_bias); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vt${N} = _mm512_fmadd_ps(vn${N}, vminus_ln2, vz${N}); |
| |
| $for N in range(SIMD_TILE): |
| __m512 vp${N} = _mm512_fmadd_ps(vc6, vt${N}, vc5); |
| |
| $for N in range(SIMD_TILE): |
| vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vc4); |
| |
| $for N in range(SIMD_TILE): |
| vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vc3); |
| |
| $for N in range(SIMD_TILE): |
| vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vc2); |
| |
| $for N in range(SIMD_TILE): |
| vp${N} = _mm512_mul_ps(vp${N}, vt${N}); |
| vt${N} = _mm512_mul_ps(vt${N}, vs${N}); |
| |
| $for N in range(SIMD_TILE): |
| vs${N} = _mm512_fmsub_ps(vs${N}, valpha, valpha); |
| |
| $for N in range(SIMD_TILE): |
| vp${N} = _mm512_fmadd_ps(vp${N}, vt${N}, vt${N}); |
| |
| const __m512 vzero = _mm512_setzero_ps(); |
| $for N in range(SIMD_TILE): |
| __m512 vy${N} = _mm512_fmadd_ps(vp${N}, valpha, vs${N}); |
| const __mmask16 vsign${N} = _mm512_cmp_ps_mask(vx${N}, vzero, _CMP_NLT_US); |
| |
| $for N in range(SIMD_TILE): |
| vy${N} = _mm512_mask_mul_ps(vy${N}, vsign${N}, vx${N}, vbeta); |
| |
| _mm512_storeu_ps(y, vy0); |
| $for N in range(1, SIMD_TILE): |
| _mm512_storeu_ps(y + ${N * 16}, vy${N}); |
| y += ${BATCH_TILE}; |
| } |
| for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) { |
| __m512 vx = _mm512_loadu_ps(x); |
| x += 16; |
| |
| const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale)); |
| const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); |
| |
| __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias); |
| __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23)); |
| vn = _mm512_sub_ps(vn, vmagic_bias); |
| |
| __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz); |
| |
| __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5); |
| vp = _mm512_fmadd_ps(vp, vt, vc4); |
| vp = _mm512_fmadd_ps(vp, vt, vc3); |
| vp = _mm512_fmadd_ps(vp, vt, vc2); |
| vp = _mm512_mul_ps(vp, vt); |
| |
| vt = _mm512_mul_ps(vt, vs); |
| vs = _mm512_fmsub_ps(vs, valpha, valpha); |
| vp = _mm512_fmadd_ps(vp, vt, vt); |
| __m512 vy = _mm512_fmadd_ps(vp, valpha, vs); |
| |
| vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta); |
| |
| _mm512_storeu_ps(y, vy); |
| y += 16; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| assert(n >= 1 * sizeof(float)); |
| assert(n <= 15 * sizeof(float)); |
| // Prepare mask for valid 32-bit elements (depends on n). |
| n >>= 2 /* log2(sizeof(float)) */; |
| const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1))); |
| |
| __m512 vx = _mm512_maskz_loadu_ps(vmask, x); |
| |
| const __m512 vz = _mm512_max_ps(vsat_cutoff, _mm512_mul_ps(vx, vprescale)); |
| const __mmask16 vsign = _mm512_cmp_ps_mask(vx, _mm512_setzero_ps(), _CMP_NLT_US); |
| |
| __m512 vn = _mm512_fmadd_ps(vz, vlog2e, vmagic_bias); |
| __m512 vs = _mm512_castsi512_ps(_mm512_slli_epi32(_mm512_castps_si512(vn), 23)); |
| vn = _mm512_sub_ps(vn, vmagic_bias); |
| |
| __m512 vt = _mm512_fmadd_ps(vn, vminus_ln2, vz); |
| |
| __m512 vp = _mm512_fmadd_ps(vc6, vt, vc5); |
| vp = _mm512_fmadd_ps(vp, vt, vc4); |
| vp = _mm512_fmadd_ps(vp, vt, vc3); |
| vp = _mm512_fmadd_ps(vp, vt, vc2); |
| vp = _mm512_mul_ps(vp, vt); |
| |
| vt = _mm512_mul_ps(vt, vs); |
| vs = _mm512_fmsub_ps(vs, valpha, valpha); |
| vp = _mm512_fmadd_ps(vp, vt, vt); |
| __m512 vy = _mm512_fmadd_ps(vp, valpha, vs); |
| |
| vy = _mm512_mask_mul_ps(vy, vsign, vx, vbeta); |
| |
| _mm512_mask_storeu_ps(y, vmask, vy); |
| } |
| } |