| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 4 == 0 |
| $assert BATCH_TILE >= 4 |
| $assert SSE in [1, 2, 4] |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| $SSE_HEADER = {1: "xmmintrin.h", 2: "emmintrin.h", 4: "smmintrin.h"}[SSE] |
| #include <assert.h> |
| |
| #include <${SSE_HEADER}> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/vunary.h> |
| |
| |
| $ISA = {1: "sse", 2: "sse2", 4: "sse41"}[SSE] |
| void xnn_f32_vlrelu_ukernel__${ISA}_x${BATCH_TILE}( |
| size_t n, |
| const float* x, |
| float* y, |
| const union xnn_f32_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| { |
| assert(n != 0); |
| assert(n % sizeof(float) == 0); |
| |
| const __m128 vslope = _mm_load_ps(params->sse.slope); |
| $if SSE == 1: |
| const __m128 vzero = _mm_setzero_ps(); |
| for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { |
| $if SSE == 1: |
| __m128 vx${ABC[0:4]} = _mm_loadu_ps(x); |
| $for N in range(4, BATCH_TILE, 4): |
| __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); |
| $else: |
| const __m128 vx${ABC[0:4]} = _mm_loadu_ps(x); |
| $for N in range(4, BATCH_TILE, 4): |
| const __m128 vx${ABC[N:N+4]} = _mm_loadu_ps(x + ${N}); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(0, BATCH_TILE, 4): |
| $if SSE == 1: |
| __m128 vacc${ABC[N:N+4]} = _mm_max_ps(_mm_setzero_ps(), vx${ABC[N:N+4]}); |
| vx${ABC[N:N+4]} = _mm_min_ps(vx${ABC[N:N+4]}, vzero); |
| $else: |
| __m128 vacc${ABC[N:N+4]} = _mm_mul_ps(vx${ABC[N:N+4]}, vslope); |
| $if SSE == 2: |
| const __m128 vmask${ABC[N:N+4]} = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx${ABC[N:N+4]}))); |
| |
| $for N in range(0, BATCH_TILE, 4): |
| $if SSE == 1: |
| vacc${ABC[N:N+4]} = _mm_add_ps(vacc${ABC[N:N+4]}, _mm_mul_ps(vx${ABC[N:N+4]}, vslope)); |
| $elif SSE == 2: |
| vacc${ABC[N:N+4]} = _mm_or_ps(_mm_and_ps(vacc${ABC[N:N+4]}, vmask${ABC[N:N+4]}), _mm_andnot_ps(vmask${ABC[N:N+4]}, vx${ABC[N:N+4]})); |
| $elif SSE == 4: |
| vacc${ABC[N:N+4]} = _mm_blendv_ps(vx${ABC[N:N+4]}, vacc${ABC[N:N+4]}, vx${ABC[N:N+4]}); |
| |
| _mm_storeu_ps(y, vacc${ABC[0:4]}); |
| $for N in range(4, BATCH_TILE, 4): |
| _mm_storeu_ps(y + ${N}, vacc${ABC[N:N+4]}); |
| y += ${BATCH_TILE}; |
| } |
| $if BATCH_TILE > 4: |
| for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { |
| $if SSE == 1: |
| __m128 vx = _mm_loadu_ps(x); |
| $else: |
| const __m128 vx = _mm_loadu_ps(x); |
| x += 4; |
| |
| $if SSE == 1: |
| __m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx); |
| vx = _mm_min_ps(vx, vzero); |
| vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope)); |
| $else: |
| __m128 vacc = _mm_mul_ps(vx, vslope); |
| $if SSE == 2: |
| const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx))); |
| vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx)); |
| $elif SSE == 4: |
| vacc = _mm_blendv_ps(vx, vacc, vx); |
| |
| _mm_storeu_ps(y, vacc); |
| y += 4; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| $if SSE == 1: |
| __m128 vx = _mm_loadu_ps(x); |
| |
| __m128 vacc = _mm_max_ps(_mm_setzero_ps(), vx); |
| vx = _mm_min_ps(vx, vzero); |
| vacc = _mm_add_ps(vacc, _mm_mul_ps(vx, vslope)); |
| $else: |
| const __m128 vx = _mm_loadu_ps(x); |
| |
| __m128 vacc = _mm_mul_ps(vx, vslope); |
| $if SSE == 2: |
| const __m128 vmask = _mm_castsi128_ps(_mm_cmpgt_epi32(_mm_setzero_si128(), _mm_castps_si128(vx))); |
| vacc = _mm_or_ps(_mm_and_ps(vacc, vmask), _mm_andnot_ps(vmask, vx)); |
| $elif SSE == 4: |
| vacc = _mm_blendv_ps(vx, vacc, vx); |
| |
| if (n & (2 * sizeof(float))) { |
| _mm_storel_pi((__m64*) y, vacc); |
| vacc = _mm_movehl_ps(vacc, vacc); |
| y += 2; |
| } |
| if (n & (1 * sizeof(float))) { |
| _mm_store_ss(y, vacc); |
| } |
| } |
| } |