| // Copyright 2022 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert CHANNEL_TILE % 8 == 0 |
| $assert CHANNEL_TILE >= 8 |
| $assert ROW_TILE >= 1 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/math.h> |
| #include <xnnpack/vmulcaddc.h> |
| |
| |
| void xnn_f16_vmulcaddc_minmax_ukernel_c${CHANNEL_TILE}__fma3_${ROW_TILE}x( |
| size_t rows, |
| size_t channels, |
| const void*restrict input, |
| size_t input_stride, |
| const void*restrict weights, |
| void*restrict output, |
| size_t output_stride, |
| const union xnn_f16_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| { |
| assert(rows != 0); |
| assert(channels != 0); |
| assert(channels % sizeof(uint16_t) == 0); |
| |
| const uint16_t* i0 = (const uint16_t*) input; |
| uint16_t* o0 = (uint16_t*) output; |
| $for M in range(1, ROW_TILE): |
| const uint16_t* i${M} = (const uint16_t*) ((uintptr_t) i${M-1} + input_stride); |
| uint16_t* o${M} = (uint16_t*) ((uintptr_t) o${M-1} + output_stride); |
| |
| const size_t input_increment = input_stride * ${ROW_TILE} - channels; |
| const size_t output_increment = output_stride * ${ROW_TILE} - channels; |
| |
| const __m256 vmin = _mm256_load_ps(params->avx.min); |
| const __m256 vmax = _mm256_load_ps(params->avx.max); |
| do { |
| $for M in range(1, ROW_TILE): |
| $if M % 2 == 0: |
| if XNN_UNPREDICTABLE(rows <= ${M}) { |
| i${M} = i${M-1}; |
| o${M} = o${M-1}; |
| } |
| $else: |
| if XNN_UNPREDICTABLE(rows < ${M+1}) { |
| i${M} = i${M-1}; |
| o${M} = o${M-1}; |
| } |
| |
| const uint16_t* w = (const uint16_t*) weights; |
| size_t c = channels; |
| $if CHANNEL_TILE > 8: |
| for (; c >= ${CHANNEL_TILE} * sizeof(uint16_t); c -= ${CHANNEL_TILE} * sizeof(uint16_t)) { |
| const __m256 vscale${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w)); |
| $for C in range(8, CHANNEL_TILE, 8): |
| const __m256 vscale${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + ${C}))); |
| |
| $for M in range(ROW_TILE): |
| __m256 vacc${M}x${ABC[0:8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); |
| $for C in range(8, CHANNEL_TILE, 8): |
| __m256 vacc${M}x${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (i${M} + ${C}))); |
| i${M} += ${CHANNEL_TILE}; |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| const __m256 vbias${ABC[C:C+8]} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + ${CHANNEL_TILE + C}))); |
| w += ${2 * CHANNEL_TILE}; |
| |
| $for M in range(ROW_TILE): |
| $for C in range(0, CHANNEL_TILE, 8): |
| vacc${M}x${ABC[C:C+8]} = _mm256_fmadd_ps(vacc${M}x${ABC[C:C+8]}, vscale${ABC[C:C+8]}, vbias${ABC[C:C+8]}); |
| |
| $for M in range(ROW_TILE): |
| $for C in range(0, CHANNEL_TILE, 8): |
| vacc${M}x${ABC[C:C+8]} = _mm256_max_ps(vacc${M}x${ABC[C:C+8]}, vmin); |
| |
| $for M in range(ROW_TILE): |
| $for C in range(0, CHANNEL_TILE, 8): |
| vacc${M}x${ABC[C:C+8]} = _mm256_min_ps(vacc${M}x${ABC[C:C+8]}, vmax); |
| |
| $for M in range(ROW_TILE): |
| _mm_storeu_si128((__m128i*) o${M}, _mm256_cvtps_ph(vacc${M}x${ABC[0:8]}, _MM_FROUND_NO_EXC)); |
| $for C in range(8, CHANNEL_TILE, 8): |
| _mm_storeu_si128((__m128i*) (o${M} + ${C}), _mm256_cvtps_ph(vacc${M}x${ABC[C:C+8]}, _MM_FROUND_NO_EXC)); |
| o${M} += ${CHANNEL_TILE}; |
| } |
| for (; c >= 8 * sizeof(uint16_t); c -= 8 * sizeof(uint16_t)) { |
| const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w)); |
| |
| $for M in range(ROW_TILE): |
| __m256 vacc${M} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); |
| i${M} += 8; |
| |
| const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + ${CHANNEL_TILE}))); |
| w += ${8 if CHANNEL_TILE > 8 else CHANNEL_TILE * 2}; |
| |
| $for M in range(ROW_TILE): |
| vacc${M} = _mm256_fmadd_ps(vacc${M}, vscale, vbias); |
| |
| $for M in range(ROW_TILE): |
| vacc${M} = _mm256_max_ps(vacc${M}, vmin); |
| |
| $for M in range(ROW_TILE): |
| vacc${M} = _mm256_min_ps(vacc${M}, vmax); |
| |
| $for M in range(ROW_TILE): |
| _mm_storeu_si128((__m128i*) o${M}, _mm256_cvtps_ph(vacc${M}, _MM_FROUND_NO_EXC)); |
| o${M} += 8; |
| } |
| if XNN_UNLIKELY(c != 0) { |
| const __m256 vscale = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) w)); |
| |
| $for M in range(ROW_TILE): |
| __m256 vacc${M} = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) i${M})); |
| i${M} = (const uint16_t*) ((uintptr_t) i${M} + c); |
| |
| const __m256 vbias = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i*) (w + ${CHANNEL_TILE}))); |
| |
| $for M in range(ROW_TILE): |
| vacc${M} = _mm256_fmadd_ps(vacc${M}, vscale, vbias); |
| |
| $for M in range(ROW_TILE): |
| vacc${M} = _mm256_max_ps(vacc${M}, vmin); |
| |
| $for M in range(ROW_TILE): |
| vacc${M} = _mm256_min_ps(vacc${M}, vmax); |
| |
| $for M in range(ROW_TILE): |
| __m128i vh${M} = _mm256_cvtps_ph(vacc${M}, _MM_FROUND_NO_EXC); |
| |
| if (c & (4 * sizeof(uint16_t))) { |
| $for M in range(ROW_TILE): |
| _mm_storel_epi64((__m128i*) o${M}, vh${M}); |
| |
| $for M in range(ROW_TILE): |
| vh${M} = _mm_unpackhi_epi64(vh${M}, vh${M}); |
| |
| $for M in range(ROW_TILE): |
| o${M} += 4; |
| } |
| if (c & (2 * sizeof(uint16_t))) { |
| $for M in range(ROW_TILE): |
| _mm_storeu_si32(o${M}, vh${M}); |
| |
| $for M in range(ROW_TILE): |
| vh${M} = _mm_srli_epi64(vh${M}, 32); |
| |
| $for M in range(ROW_TILE): |
| o${M} += 2; |
| } |
| if (c & (1 * sizeof(uint16_t))) { |
| $for M in range(ROW_TILE): |
| *o${M} = (uint16_t) _mm_extract_epi16(vh${M}, 0); |
| |
| $for M in range(ROW_TILE): |
| o${M} += 1; |
| } |
| } |
| $for M in range(ROW_TILE): |
| i${M} = (const uint16_t*) ((uintptr_t) i${M} + input_increment); |
| o${M} = (uint16_t*) ((uintptr_t) o${M} + output_increment); |
| rows = doz(rows, ${ROW_TILE}); |
| } while (rows != 0); |
| } |