| // Copyright 2022 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert BATCH_TILE % 8 == 0 |
| $assert BATCH_TILE >= 8 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/vunary.h> |
| |
| |
| void xnn_f16_vhswish_ukernel__f16c_x${BATCH_TILE}( |
| size_t n, |
| const void* restrict x_ptr, |
| void* restrict y_ptr, |
| const union xnn_f16_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| { |
| assert(n != 0); |
| assert(n % sizeof(uint16_t) == 0); |
| |
| const uint16_t* x = (const uint16_t*) x_ptr; |
| uint16_t* y = (uint16_t*) y_ptr; |
| |
| const __m256 vsixth = _mm256_load_ps(params->avx.sixth); |
| const __m256 vthree = _mm256_load_ps(params->avx.three); |
| const __m128i vsix = _mm_load_si128((const __m128i*) params->avx.six); |
| const __m128i vzero = _mm_setzero_si128(); |
| |
| $if BATCH_TILE > 8: |
| for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) { |
| __m256 vx${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x)); |
| $for N in range(8, BATCH_TILE, 8): |
| __m256 vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (x + ${N}))); |
| x += ${BATCH_TILE}; |
| |
| $for N in range(0, BATCH_TILE, 8): |
| __m128i vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_add_ps(vx${ABC[N:N+8]}, vthree), _MM_FROUND_NO_EXC); |
| vx${ABC[N:N+8]} = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx${ABC[N:N+8]}, vsixth), _MM_FROUND_NO_EXC)); |
| |
| $for N in range(0, BATCH_TILE, 8): |
| vacc${ABC[N:N+8]} = _mm_max_epi16(vacc${ABC[N:N+8]}, vzero); |
| |
| $for N in range(0, BATCH_TILE, 8): |
| vacc${ABC[N:N+8]} = _mm_min_epi16(vacc${ABC[N:N+8]}, vsix); |
| |
| $for N in range(0, BATCH_TILE, 8): |
| vacc${ABC[N:N+8]} = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc${ABC[N:N+8]}), vx${ABC[N:N+8]}), _MM_FROUND_NO_EXC); |
| |
| _mm_storeu_si128((__m128i*) y, vacc${ABC[0:8]}); |
| $for N in range(8, BATCH_TILE, 8): |
| _mm_storeu_si128((__m128i*) (y + ${N}), vacc${ABC[N:N+8]}); |
| y += ${BATCH_TILE}; |
| } |
| for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) { |
| __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x)); |
| x += 8; |
| __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC); |
| vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC)); |
| vacc = _mm_max_epi16(vacc, vzero); |
| vacc = _mm_min_epi16(vacc, vsix); |
| vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC); |
| _mm_storeu_si128((__m128i*) y, vacc); |
| y += 8; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| __m256 vx = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) x)); |
| __m128i vacc = _mm256_cvtps_ph(_mm256_add_ps(vx, vthree), _MM_FROUND_NO_EXC); |
| vx = _mm256_cvtph_ps(_mm256_cvtps_ph(_mm256_mul_ps(vx, vsixth), _MM_FROUND_NO_EXC)); |
| vacc = _mm_max_epi16(vacc, vzero); |
| vacc = _mm_min_epi16(vacc, vsix); |
| vacc = _mm256_cvtps_ph(_mm256_mul_ps(_mm256_cvtph_ps(vacc), vx), _MM_FROUND_NO_EXC); |
| |
| if (n & (4 * sizeof(uint16_t))) { |
| _mm_storel_epi64((__m128i*) y, vacc); |
| vacc = _mm_unpackhi_epi64(vacc, vacc); |
| y += 4; |
| } |
| if (n & (2 * sizeof(uint16_t))) { |
| _mm_storeu_si32(y, vacc); |
| vacc = _mm_srli_epi64(vacc, 32); |
| y += 2; |
| } |
| if (n & (1 * sizeof(uint16_t))) { |
| *y = (uint16_t) _mm_extract_epi16(vacc, 0); |
| } |
| } |
| } |