| // Auto-generated file. Do not edit! |
| // Template: src/qs8-vadd/avx512skx-mul32-ld128.c.in |
| // Generator: tools/xngen |
| // |
| // Copyright 2021 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| #include <assert.h> |
| |
| #include <immintrin.h> |
| |
| #include <xnnpack/intrinsics-polyfill.h> |
| #include <xnnpack/vadd.h> |
| |
| |
| void xnn_qu8_vadd_minmax_ukernel__avx512skx_mul32_ld128_x16( |
| size_t n, |
| const uint8_t* input_a, |
| const uint8_t* input_b, |
| uint8_t* output, |
| const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) |
| { |
| const __m512i vbias = _mm512_load_si512(params->avx512.bias); |
| const __m512i va_multiplier = _mm512_load_si512(params->avx512.a_multiplier); |
| const __m512i vb_multiplier = _mm512_load_si512(params->avx512.b_multiplier); |
| const __m128i vshift = _mm_load_si128((const __m128i*) params->avx512.shift); |
| const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx512.output_zero_point); |
| const __m128i voutput_min = _mm_load_si128((const __m128i*) params->avx512.output_min); |
| const __m128i voutput_max = _mm_load_si128((const __m128i*) params->avx512.output_max); |
| |
| for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) { |
| const __m512i va0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input_a)); |
| const __m512i vb0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_loadu_si128((const __m128i*) input_b)); |
| input_a += 16; |
| input_b += 16; |
| |
| __m512i vacc0123456789ABCDEF = _mm512_add_epi32(vbias, _mm512_mullo_epi32(va0123456789ABCDEF, va_multiplier)); |
| |
| vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vb0123456789ABCDEF, vb_multiplier)); |
| |
| vacc0123456789ABCDEF = _mm512_sra_epi32(vacc0123456789ABCDEF, vshift); |
| |
| __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point); |
| |
| __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0)); |
| |
| vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min); |
| |
| vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max); |
| |
| _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF); |
| output += 16; |
| } |
| if XNN_UNLIKELY(n != 0) { |
| { |
| const __mmask16 vmask = _cvtu32_mask16((uint32_t) ((UINT32_C(1) << n) - UINT32_C(1))); |
| const __m512i va0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_maskz_loadu_epi8(vmask, input_a)); |
| const __m512i vb0123456789ABCDEF = _mm512_cvtepu8_epi32(_mm_maskz_loadu_epi8(vmask, input_b)); |
| |
| __m512i vacc0123456789ABCDEF = _mm512_add_epi32(vbias, _mm512_mullo_epi32(va0123456789ABCDEF, va_multiplier)); |
| |
| vacc0123456789ABCDEF = _mm512_add_epi32(vacc0123456789ABCDEF, _mm512_mullo_epi32(vb0123456789ABCDEF, vb_multiplier)); |
| |
| vacc0123456789ABCDEF = _mm512_sra_epi32(vacc0123456789ABCDEF, vshift); |
| |
| __m256i vout012389AB4567CDEF = _mm256_adds_epi16(_mm256_packs_epi32(_mm512_castsi512_si256(vacc0123456789ABCDEF), _mm512_extracti32x8_epi32(vacc0123456789ABCDEF, 1)), voutput_zero_point); |
| __m128i vout0123456789ABCDEF = _mm_shuffle_epi32(_mm_packus_epi16(_mm256_castsi256_si128(vout012389AB4567CDEF), _mm256_extracti128_si256(vout012389AB4567CDEF, 1)), _MM_SHUFFLE(3, 1, 2, 0)); |
| vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min); |
| vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max); |
| |
| _mm_mask_storeu_epi8(output, vmask, vout0123456789ABCDEF); |
| } |
| } |
| } |