SSE2/SSSE3/SSE4.1/XOP implementation of QS8 DWCONV microkernels
- Use 16x16-bit multiplication instructions
PiperOrigin-RevId: 324884503
diff --git a/BUILD.bazel b/BUILD.bazel
index 723e8f0..a72d9b0 100644
--- a/BUILD.bazel
+++ b/BUILD.bazel
@@ -1647,9 +1647,9 @@
"src/f32-vrnd/gen/vrndu-sse2-x8.c",
"src/f32-vrnd/gen/vrndd-sse2-x4.c",
"src/f32-vrnd/gen/vrndd-sse2-x8.c",
- "src/qs8-requantization/precise-sse2.c",
- "src/qs8-requantization/fp32-sse2.c",
- "src/qs8-requantization/q31-sse2.c",
+ "src/qs8-dwconv/gen/up8x9-minmax-sse2-mul16.c",
+ "src/qs8-dwconv/gen/up16x9-minmax-sse2-mul16.c",
+ "src/qs8-dwconv/gen/up24x9-minmax-sse2-mul16.c",
"src/qs8-gemm/gen/1x4c2-minmax-sse2-ld64.c",
"src/qs8-gemm/gen/4x4c2-minmax-sse2-ld64.c",
"src/qs8-gemm/gen/1x4c2-minmax-sse2-ld128.c",
@@ -1675,6 +1675,9 @@
"src/qs8-igemm/gen/1x4c8-minmax-sse2-ld128.c",
"src/qs8-igemm/gen/2x4c8-minmax-sse2-ld128.c",
"src/qs8-igemm/gen/3x4c8-minmax-sse2-ld128.c",
+ "src/qs8-requantization/precise-sse2.c",
+ "src/qs8-requantization/fp32-sse2.c",
+ "src/qs8-requantization/q31-sse2.c",
"src/qu8-avgpool/9p8x-minmax-sse2-c8.c",
"src/qu8-avgpool/9x-minmax-sse2-c8.c",
"src/qu8-igemm/4x4c2-minmax-sse2.c",
@@ -1709,6 +1712,9 @@
]
SSSE3_UKERNELS = [
+ "src/qs8-dwconv/gen/up8x9-minmax-ssse3-mul16.c",
+ "src/qs8-dwconv/gen/up16x9-minmax-ssse3-mul16.c",
+ "src/qs8-dwconv/gen/up24x9-minmax-ssse3-mul16.c",
"src/qs8-gemm/gen/1x4c2-minmax-ssse3-ld64.c",
"src/qs8-gemm/gen/4x4c2-minmax-ssse3-ld64.c",
"src/qs8-gemm/gen/1x4c2-minmax-ssse3-ld128.c",
@@ -1759,6 +1765,9 @@
"src/f32-vrnd/gen/vrndu-sse41-x8.c",
"src/f32-vrnd/gen/vrndd-sse41-x4.c",
"src/f32-vrnd/gen/vrndd-sse41-x8.c",
+ "src/qs8-dwconv/gen/up8x9-minmax-sse41-mul16.c",
+ "src/qs8-dwconv/gen/up16x9-minmax-sse41-mul16.c",
+ "src/qs8-dwconv/gen/up24x9-minmax-sse41-mul16.c",
"src/qs8-gemm/gen/1x4c2-minmax-sse41-ld64.c",
"src/qs8-gemm/gen/4x4c2-minmax-sse41-ld64.c",
"src/qs8-gemm/gen/1x4c2-minmax-sse41-ld128.c",
@@ -1898,6 +1907,9 @@
]
XOP_UKERNELS = [
+ "src/qs8-dwconv/gen/up8x9-minmax-xop-mul16.c",
+ "src/qs8-dwconv/gen/up16x9-minmax-xop-mul16.c",
+ "src/qs8-dwconv/gen/up24x9-minmax-xop-mul16.c",
"src/qs8-gemm/gen/1x4c2-minmax-xop-ld64.c",
"src/qs8-gemm/gen/4x4c2-minmax-xop-ld64.c",
"src/qs8-gemm/gen/1x4c2-minmax-xop-ld128.c",
@@ -5172,6 +5184,16 @@
)
xnnpack_unit_test(
+ name = "qs8_dwconv_minmax_test",
+ srcs = [
+ "test/qs8-dwconv-minmax.cc",
+ "test/dwconv-microkernel-tester.h",
+ "src/xnnpack/AlignedAllocator.h",
+ ] + WEIGHTS_PACK_HDRS + MICROKERNEL_TEST_HDRS,
+ deps = MICROKERNEL_TEST_DEPS + [":packing"],
+)
+
+xnnpack_unit_test(
name = "qs8_gemm_minmax_test",
srcs = [
"test/qs8-gemm-minmax.cc",
@@ -5212,20 +5234,20 @@
)
xnnpack_unit_test(
- name = "qu8_igemm_minmax_test",
+ name = "qu8_dwconv_minmax_test",
srcs = [
- "test/qu8-igemm-minmax.cc",
- "test/gemm-microkernel-tester.h",
+ "test/qu8-dwconv-minmax.cc",
+ "test/dwconv-microkernel-tester.h",
"src/xnnpack/AlignedAllocator.h",
] + WEIGHTS_PACK_HDRS + MICROKERNEL_TEST_HDRS,
deps = MICROKERNEL_TEST_DEPS + [":packing"],
)
xnnpack_unit_test(
- name = "qu8_dwconv_minmax_test",
+ name = "qu8_igemm_minmax_test",
srcs = [
- "test/qu8-dwconv-minmax.cc",
- "test/dwconv-microkernel-tester.h",
+ "test/qu8-igemm-minmax.cc",
+ "test/gemm-microkernel-tester.h",
"src/xnnpack/AlignedAllocator.h",
] + WEIGHTS_PACK_HDRS + MICROKERNEL_TEST_HDRS,
deps = MICROKERNEL_TEST_DEPS + [":packing"],
diff --git a/CMakeLists.txt b/CMakeLists.txt
index a601047..99e2730 100755
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1290,6 +1290,9 @@
src/f32-vrnd/gen/vrndu-sse2-x8.c
src/f32-vrnd/gen/vrndd-sse2-x4.c
src/f32-vrnd/gen/vrndd-sse2-x8.c
+ src/qs8-dwconv/gen/up8x9-minmax-sse2-mul16.c
+ src/qs8-dwconv/gen/up16x9-minmax-sse2-mul16.c
+ src/qs8-dwconv/gen/up24x9-minmax-sse2-mul16.c
src/qs8-gemm/gen/1x4c2-minmax-sse2-ld64.c
src/qs8-gemm/gen/4x4c2-minmax-sse2-ld64.c
src/qs8-gemm/gen/1x4c2-minmax-sse2-ld128.c
@@ -1356,6 +1359,9 @@
src/math/sigmoid-sse2-p5-div.c)
SET(XNNPACK_SSSE3_MICROKERNEL_SRCS
+ src/qs8-dwconv/gen/up8x9-minmax-ssse3-mul16.c
+ src/qs8-dwconv/gen/up16x9-minmax-ssse3-mul16.c
+ src/qs8-dwconv/gen/up24x9-minmax-ssse3-mul16.c
src/qs8-gemm/gen/1x4c2-minmax-ssse3-ld64.c
src/qs8-gemm/gen/4x4c2-minmax-ssse3-ld64.c
src/qs8-gemm/gen/1x4c2-minmax-ssse3-ld128.c
@@ -1405,6 +1411,9 @@
src/f32-vrnd/gen/vrndu-sse41-x8.c
src/f32-vrnd/gen/vrndd-sse41-x4.c
src/f32-vrnd/gen/vrndd-sse41-x8.c
+ src/qs8-dwconv/gen/up8x9-minmax-sse41-mul16.c
+ src/qs8-dwconv/gen/up16x9-minmax-sse41-mul16.c
+ src/qs8-dwconv/gen/up24x9-minmax-sse41-mul16.c
src/qs8-gemm/gen/1x4c2-minmax-sse41-ld64.c
src/qs8-gemm/gen/4x4c2-minmax-sse41-ld64.c
src/qs8-gemm/gen/1x4c2-minmax-sse41-ld128.c
@@ -1540,6 +1549,9 @@
src/f32-vunary/gen/vsqr-avx-x16.c)
SET(XNNPACK_XOP_MICROKERNEL_SRCS
+ src/qs8-dwconv/gen/up8x9-minmax-xop-mul16.c
+ src/qs8-dwconv/gen/up16x9-minmax-xop-mul16.c
+ src/qs8-dwconv/gen/up24x9-minmax-xop-mul16.c
src/qs8-gemm/gen/1x4c2-minmax-xop-ld64.c
src/qs8-gemm/gen/4x4c2-minmax-xop-ld64.c
src/qs8-gemm/gen/1x4c2-minmax-xop-ld128.c
@@ -3415,6 +3427,15 @@
TARGET_LINK_LIBRARIES(f32-vrsubc-minmax-test PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
ADD_TEST(f32-vrsubc-minmax-test f32-vrsubc-minmax-test)
+ ADD_EXECUTABLE(qs8-dwconv-minmax-test test/qs8-dwconv-minmax.cc)
+ SET_TARGET_PROPERTIES(qs8-dwconv-minmax-test PROPERTIES
+ CXX_STANDARD 11
+ CXX_STANDARD_REQUIRED YES
+ CXX_EXTENSIONS YES)
+ TARGET_INCLUDE_DIRECTORIES(qs8-dwconv-minmax-test PRIVATE src test)
+ TARGET_LINK_LIBRARIES(qs8-dwconv-minmax-test PRIVATE XNNPACK cpuinfo fp16 gtest gtest_main)
+ ADD_TEST(qs8-dwconv-minmax-test qs8-dwconv-minmax-test)
+
ADD_EXECUTABLE(qs8-gemm-minmax-test test/qs8-gemm-minmax.cc)
SET_TARGET_PROPERTIES(qs8-gemm-minmax-test PROPERTIES
CXX_STANDARD 11
diff --git a/scripts/generate-qs8-dwconv.sh b/scripts/generate-qs8-dwconv.sh
new file mode 100755
index 0000000..2474518
--- /dev/null
+++ b/scripts/generate-qs8-dwconv.sh
@@ -0,0 +1,25 @@
+#!/bin/sh
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+
+################################### x86 SSE ###################################
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D SSE=2 -o src/qs8-dwconv/gen/up8x9-minmax-sse2-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D SSE=2 -o src/qs8-dwconv/gen/up16x9-minmax-sse2-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D SSE=2 -o src/qs8-dwconv/gen/up24x9-minmax-sse2-mul16.c
+
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D SSE=3 -o src/qs8-dwconv/gen/up8x9-minmax-ssse3-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D SSE=3 -o src/qs8-dwconv/gen/up16x9-minmax-ssse3-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D SSE=3 -o src/qs8-dwconv/gen/up24x9-minmax-ssse3-mul16.c
+
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D SSE=4 -o src/qs8-dwconv/gen/up8x9-minmax-sse41-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D SSE=4 -o src/qs8-dwconv/gen/up16x9-minmax-sse41-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D SSE=4 -o src/qs8-dwconv/gen/up24x9-minmax-sse41-mul16.c
+
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=8 -D KERNEL_TILE=9 -D SSE=5 -o src/qs8-dwconv/gen/up8x9-minmax-xop-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=16 -D KERNEL_TILE=9 -D SSE=5 -o src/qs8-dwconv/gen/up16x9-minmax-xop-mul16.c
+tools/xngen src/qs8-dwconv/up-sse-mul16.c.in -D CHANNEL_TILE=24 -D KERNEL_TILE=9 -D SSE=5 -o src/qs8-dwconv/gen/up24x9-minmax-xop-mul16.c
+
+################################## Unit tests #################################
+tools/generate-dwconv-test.py --spec test/qs8-dwconv-minmax.yaml --output test/qs8-dwconv-minmax.cc
diff --git a/src/packing.c b/src/packing.c
index 49a3c2a..5736624 100644
--- a/src/packing.c
+++ b/src/packing.c
@@ -1100,6 +1100,47 @@
}
}
+void xnn_pack_qs8_dwconv_ghw_w(
+ size_t h,
+ size_t w,
+ size_t c,
+ size_t cr,
+ const int8_t* k,
+ const int32_t* b,
+ void* packed_w,
+ const struct xnn_qs8_packing_params* params)
+{
+ const int32_t izp = (int32_t) params->input_zero_point;
+ for (size_t cr_block_start = 0; cr_block_start < c; cr_block_start += cr) {
+ const size_t cr_block_size = min(c - cr_block_start, cr);
+ int32_t* packed_b = (int32_t*) packed_w;
+ if XNN_LIKELY(b != NULL) {
+ for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) {
+ *((int32_t*) packed_w) = b[cr_block_start + cr_block_offset];
+ packed_w = (void*) ((uintptr_t) packed_w + sizeof(int32_t));
+ }
+ } else {
+ size_t n = cr_block_size;
+ do {
+ *((int32_t*) packed_w) = 0;
+ packed_w = (void*) ((uintptr_t) packed_w + sizeof(int32_t));
+ } while (--n != 0);
+ }
+ packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int32_t));
+ for (size_t x = 0; x < w; x++) {
+ for (size_t y = 0; y < h; y++) {
+ for (size_t cr_block_offset = 0; cr_block_offset < cr_block_size; cr_block_offset++) {
+ const int8_t kv = k[((cr_block_start + cr_block_offset) * h + y) * w + x];
+ packed_b[cr_block_offset] -= (int32_t) kv * izp;
+ *((int8_t*) packed_w) = kv;
+ packed_w = (void*) ((uintptr_t) packed_w + sizeof(int8_t));
+ }
+ packed_w = (void*) ((uintptr_t) packed_w + (cr - cr_block_size) * sizeof(int8_t));
+ }
+ }
+ }
+}
+
void xnn_pack_f32_dwconv_hwg_w(
size_t h,
size_t w,
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-sse2-mul16.c b/src/qs8-dwconv/gen/up16x9-minmax-sse2-mul16.c
new file mode 100644
index 0000000..5f936b9
--- /dev/null
+++ b/src/qs8-dwconv/gen/up16x9-minmax-sse2-mul16.c
@@ -0,0 +1,598 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ i0 += 16;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+ const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x89ABCDEF));
+ const __m128i vxk0x89ABCDEF = _mm_unpacklo_epi8(vk0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x89ABCDEF));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x89ABCDEFlo = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0x89ABCDEFhi = _mm_mulhi_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ i1 += 16;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+ const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x89ABCDEF));
+ const __m128i vxk1x89ABCDEF = _mm_unpacklo_epi8(vk1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x89ABCDEF));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x89ABCDEFlo = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1x89ABCDEFhi = _mm_mulhi_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i2 += 16;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+ const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x89ABCDEF));
+ const __m128i vxk2x89ABCDEF = _mm_unpacklo_epi8(vk2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x89ABCDEF));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x89ABCDEFlo = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2x89ABCDEFhi = _mm_mulhi_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ i3 += 16;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+ const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x89ABCDEF));
+ const __m128i vxk3x89ABCDEF = _mm_unpacklo_epi8(vk3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x89ABCDEF));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x89ABCDEFlo = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3x89ABCDEFhi = _mm_mulhi_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ i4 += 16;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+ const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x89ABCDEF));
+ const __m128i vxk4x89ABCDEF = _mm_unpacklo_epi8(vk4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x89ABCDEF));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x89ABCDEFlo = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4x89ABCDEFhi = _mm_mulhi_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ i5 += 16;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+ const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x89ABCDEF));
+ const __m128i vxk5x89ABCDEF = _mm_unpacklo_epi8(vk5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x89ABCDEF));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x89ABCDEFlo = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5x89ABCDEFhi = _mm_mulhi_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ i6 += 16;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+ const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x89ABCDEF));
+ const __m128i vxk6x89ABCDEF = _mm_unpacklo_epi8(vk6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x89ABCDEF));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x89ABCDEFlo = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6x89ABCDEFhi = _mm_mulhi_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ i7 += 16;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+ const __m128i vxi7x89ABCDEF = _mm_unpacklo_epi8(vi7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x89ABCDEF));
+ const __m128i vxk7x89ABCDEF = _mm_unpacklo_epi8(vk7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x89ABCDEF));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x89ABCDEFlo = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7x89ABCDEFhi = _mm_mulhi_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ i8 += 16;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+ const __m128i vxi8x89ABCDEF = _mm_unpacklo_epi8(vi8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x89ABCDEF));
+ const __m128i vxk8x89ABCDEF = _mm_unpacklo_epi8(vk8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x89ABCDEF));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x89ABCDEFlo = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8x89ABCDEFhi = _mm_mulhi_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+ const __m128i vnmask89AB = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc89AB);
+ const __m128i vnmaskCDEF = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccCDEF);
+
+ const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vnmask0123), vnmask0123);
+ const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vnmask4567), vnmask4567);
+ const __m128i vabsacc89AB = _mm_sub_epi32(_mm_xor_si128(vacc89AB, vnmask89AB), vnmask89AB);
+ const __m128i vabsaccCDEF = _mm_sub_epi32(_mm_xor_si128(vaccCDEF, vnmaskCDEF), vnmaskCDEF);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+ const __m128i vabsacc9B = _mm_srli_epi64(vabsacc89AB, 32);
+ const __m128i vabsaccDF = _mm_srli_epi64(vabsaccCDEF, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+ const __m128i vabsprod8A = _mm_mul_epu32(vabsacc89AB, vmultiplier);
+ const __m128i vabsprod9B = _mm_mul_epu32(vabsacc9B, vmultiplier);
+ const __m128i vabsprodCE = _mm_mul_epu32(vabsaccCDEF, vmultiplier);
+ const __m128i vabsprodDF = _mm_mul_epu32(vabsaccDF, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask8A = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask9B = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskCE = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskDF = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+ const __m128i vprod8A = _mm_sub_epi64(_mm_xor_si128(vabsprod8A, vnmask8A), vnmask8A);
+ const __m128i vprod9B = _mm_sub_epi64(_mm_xor_si128(vabsprod9B, vnmask9B), vnmask9B);
+ const __m128i vprodCE = _mm_sub_epi64(_mm_xor_si128(vabsprodCE, vnmaskCE), vnmaskCE);
+ const __m128i vprodDF = _mm_sub_epi64(_mm_xor_si128(vabsprodDF, vnmaskDF), vnmaskDF);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+ const __m128i vq31prod8A = _mm_srli_epi64(_mm_add_epi64(vprod8A, vrounding), 31);
+ const __m128i vq31prod9B = _mm_srli_epi64(_mm_add_epi64(vprod9B, vrounding), 31);
+ const __m128i vq31prodCE = _mm_srli_epi64(_mm_add_epi64(vprodCE, vrounding), 31);
+ const __m128i vq31prodDF = _mm_srli_epi64(_mm_add_epi64(vprodDF, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod8A9B = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod8A), _mm_castsi128_ps(vq31prod9B), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodCEDF = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodCE), _mm_castsi128_ps(vq31prodDF), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod89AB = _mm_shuffle_epi32(vq31prod8A9B, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodCDEF = _mm_shuffle_epi32(vq31prodCEDF, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 16 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vnmask0123), vnmask0123);
+ const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vnmask4567), vnmask4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-sse41-mul16.c b/src/qs8-dwconv/gen/up16x9-minmax-sse41-mul16.c
new file mode 100644
index 0000000..82b8af7
--- /dev/null
+++ b/src/qs8-dwconv/gen/up16x9-minmax-sse41-mul16.c
@@ -0,0 +1,542 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
+ i0 += 16;
+
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x89ABCDEFlo = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0x89ABCDEFhi = _mm_mulhi_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
+ i1 += 16;
+
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x89ABCDEFlo = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1x89ABCDEFhi = _mm_mulhi_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
+ i2 += 16;
+
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x89ABCDEFlo = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2x89ABCDEFhi = _mm_mulhi_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
+ i3 += 16;
+
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x89ABCDEFlo = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3x89ABCDEFhi = _mm_mulhi_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
+ i4 += 16;
+
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x89ABCDEFlo = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4x89ABCDEFhi = _mm_mulhi_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
+ i5 += 16;
+
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x89ABCDEFlo = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5x89ABCDEFhi = _mm_mulhi_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
+ i6 += 16;
+
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x89ABCDEFlo = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6x89ABCDEFhi = _mm_mulhi_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
+ i7 += 16;
+
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x89ABCDEFlo = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7x89ABCDEFhi = _mm_mulhi_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
+ i8 += 16;
+
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x89ABCDEFlo = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8x89ABCDEFhi = _mm_mulhi_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+ const __m128i vacc9B = _mm_srli_epi64(vacc89AB, 32);
+ const __m128i vaccDF = _mm_srli_epi64(vaccCDEF, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+ const __m128i vprod8A = _mm_add_epi64(_mm_mul_epi32(vacc89AB, vmultiplier), vrounding);
+ const __m128i vprodCE = _mm_add_epi64(_mm_mul_epi32(vaccCDEF, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+ const __m128i vprod9B = _mm_add_epi64(_mm_mul_epi32(vacc9B, vmultiplier), vrounding);
+ const __m128i vprodDF = _mm_add_epi64(_mm_mul_epi32(vaccDF, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+ const __m128i vq31prod8A = _mm_srli_epi64(vprod8A, 31);
+ const __m128i vq31prod9B = _mm_add_epi64(vprod9B, vprod9B);
+ const __m128i vq31prodCE = _mm_srli_epi64(vprodCE, 31);
+ const __m128i vq31prodDF = _mm_add_epi64(vprodDF, vprodDF);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+ const __m128i vq31prod89AB = _mm_blend_epi16(vq31prod8A, vq31prod9B, 0xCC);
+ const __m128i vq31prodCDEF = _mm_blend_epi16(vq31prodCE, vq31prodDF, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 16 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
+ const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
+ const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
+ const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
+ const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
+ const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
+ const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-ssse3-mul16.c b/src/qs8-dwconv/gen/up16x9-minmax-ssse3-mul16.c
new file mode 100644
index 0000000..680db5a
--- /dev/null
+++ b/src/qs8-dwconv/gen/up16x9-minmax-ssse3-mul16.c
@@ -0,0 +1,598 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <tmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ i0 += 16;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+ const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x89ABCDEF));
+ const __m128i vxk0x89ABCDEF = _mm_unpacklo_epi8(vk0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x89ABCDEF));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x89ABCDEFlo = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0x89ABCDEFhi = _mm_mulhi_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ i1 += 16;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+ const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x89ABCDEF));
+ const __m128i vxk1x89ABCDEF = _mm_unpacklo_epi8(vk1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x89ABCDEF));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x89ABCDEFlo = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1x89ABCDEFhi = _mm_mulhi_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i2 += 16;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+ const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x89ABCDEF));
+ const __m128i vxk2x89ABCDEF = _mm_unpacklo_epi8(vk2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x89ABCDEF));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x89ABCDEFlo = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2x89ABCDEFhi = _mm_mulhi_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ i3 += 16;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+ const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x89ABCDEF));
+ const __m128i vxk3x89ABCDEF = _mm_unpacklo_epi8(vk3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x89ABCDEF));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x89ABCDEFlo = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3x89ABCDEFhi = _mm_mulhi_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ i4 += 16;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+ const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x89ABCDEF));
+ const __m128i vxk4x89ABCDEF = _mm_unpacklo_epi8(vk4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x89ABCDEF));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x89ABCDEFlo = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4x89ABCDEFhi = _mm_mulhi_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ i5 += 16;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+ const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x89ABCDEF));
+ const __m128i vxk5x89ABCDEF = _mm_unpacklo_epi8(vk5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x89ABCDEF));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x89ABCDEFlo = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5x89ABCDEFhi = _mm_mulhi_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ i6 += 16;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+ const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x89ABCDEF));
+ const __m128i vxk6x89ABCDEF = _mm_unpacklo_epi8(vk6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x89ABCDEF));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x89ABCDEFlo = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6x89ABCDEFhi = _mm_mulhi_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ i7 += 16;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+ const __m128i vxi7x89ABCDEF = _mm_unpacklo_epi8(vi7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x89ABCDEF));
+ const __m128i vxk7x89ABCDEF = _mm_unpacklo_epi8(vk7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x89ABCDEF));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x89ABCDEFlo = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7x89ABCDEFhi = _mm_mulhi_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ i8 += 16;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+ const __m128i vxi8x89ABCDEF = _mm_unpacklo_epi8(vi8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x89ABCDEF));
+ const __m128i vxk8x89ABCDEF = _mm_unpacklo_epi8(vk8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x89ABCDEF));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x89ABCDEFlo = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8x89ABCDEFhi = _mm_mulhi_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+ const __m128i vnmask89AB = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc89AB);
+ const __m128i vnmaskCDEF = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccCDEF);
+
+ const __m128i vabsacc0123 = _mm_abs_epi32(vacc0123);
+ const __m128i vabsacc4567 = _mm_abs_epi32(vacc4567);
+ const __m128i vabsacc89AB = _mm_abs_epi32(vacc89AB);
+ const __m128i vabsaccCDEF = _mm_abs_epi32(vaccCDEF);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+ const __m128i vabsacc9B = _mm_srli_epi64(vabsacc89AB, 32);
+ const __m128i vabsaccDF = _mm_srli_epi64(vabsaccCDEF, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+ const __m128i vabsprod8A = _mm_mul_epu32(vabsacc89AB, vmultiplier);
+ const __m128i vabsprod9B = _mm_mul_epu32(vabsacc9B, vmultiplier);
+ const __m128i vabsprodCE = _mm_mul_epu32(vabsaccCDEF, vmultiplier);
+ const __m128i vabsprodDF = _mm_mul_epu32(vabsaccDF, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask8A = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask9B = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskCE = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskDF = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+ const __m128i vprod8A = _mm_sub_epi64(_mm_xor_si128(vabsprod8A, vnmask8A), vnmask8A);
+ const __m128i vprod9B = _mm_sub_epi64(_mm_xor_si128(vabsprod9B, vnmask9B), vnmask9B);
+ const __m128i vprodCE = _mm_sub_epi64(_mm_xor_si128(vabsprodCE, vnmaskCE), vnmaskCE);
+ const __m128i vprodDF = _mm_sub_epi64(_mm_xor_si128(vabsprodDF, vnmaskDF), vnmaskDF);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+ const __m128i vq31prod8A = _mm_srli_epi64(_mm_add_epi64(vprod8A, vrounding), 31);
+ const __m128i vq31prod9B = _mm_srli_epi64(_mm_add_epi64(vprod9B, vrounding), 31);
+ const __m128i vq31prodCE = _mm_srli_epi64(_mm_add_epi64(vprodCE, vrounding), 31);
+ const __m128i vq31prodDF = _mm_srli_epi64(_mm_add_epi64(vprodDF, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod8A9B = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod8A), _mm_castsi128_ps(vq31prod9B), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodCEDF = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodCE), _mm_castsi128_ps(vq31prodDF), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod89AB = _mm_shuffle_epi32(vq31prod8A9B, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodCDEF = _mm_shuffle_epi32(vq31prodCEDF, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 16 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_abs_epi32(vacc0123);
+ const __m128i vabsacc4567 = _mm_abs_epi32(vacc4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up16x9-minmax-xop-mul16.c b/src/qs8-dwconv/gen/up16x9-minmax-xop-mul16.c
new file mode 100644
index 0000000..2396878
--- /dev/null
+++ b/src/qs8-dwconv/gen/up16x9-minmax-xop-mul16.c
@@ -0,0 +1,538 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#ifdef __GNUC__
+ #include <x86intrin.h>
+#else
+ #include <immintrin.h>
+ #include <ammintrin.h>
+#endif
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 16; c -= 16) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
+ i0 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc0123);
+ vxi0x01234567 = _mm_unpackhi_epi64(vxi0x01234567, vxi0x01234567);
+ vxk0x01234567 = _mm_unpackhi_epi64(vxk0x01234567, vxk0x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF, vacc89AB);
+ vxi0x89ABCDEF = _mm_unpackhi_epi64(vxi0x89ABCDEF, vxi0x89ABCDEF);
+ vxk0x89ABCDEF = _mm_unpackhi_epi64(vxk0x89ABCDEF, vxk0x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF, vaccCDEF);
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
+ i1 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc0123);
+ vxi1x01234567 = _mm_unpackhi_epi64(vxi1x01234567, vxi1x01234567);
+ vxk1x01234567 = _mm_unpackhi_epi64(vxk1x01234567, vxk1x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF, vacc89AB);
+ vxi1x89ABCDEF = _mm_unpackhi_epi64(vxi1x89ABCDEF, vxi1x89ABCDEF);
+ vxk1x89ABCDEF = _mm_unpackhi_epi64(vxk1x89ABCDEF, vxk1x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF, vaccCDEF);
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
+ i2 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc0123);
+ vxi2x01234567 = _mm_unpackhi_epi64(vxi2x01234567, vxi2x01234567);
+ vxk2x01234567 = _mm_unpackhi_epi64(vxk2x01234567, vxk2x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF, vacc89AB);
+ vxi2x89ABCDEF = _mm_unpackhi_epi64(vxi2x89ABCDEF, vxi2x89ABCDEF);
+ vxk2x89ABCDEF = _mm_unpackhi_epi64(vxk2x89ABCDEF, vxk2x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF, vaccCDEF);
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
+ i3 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc0123);
+ vxi3x01234567 = _mm_unpackhi_epi64(vxi3x01234567, vxi3x01234567);
+ vxk3x01234567 = _mm_unpackhi_epi64(vxk3x01234567, vxk3x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF, vacc89AB);
+ vxi3x89ABCDEF = _mm_unpackhi_epi64(vxi3x89ABCDEF, vxi3x89ABCDEF);
+ vxk3x89ABCDEF = _mm_unpackhi_epi64(vxk3x89ABCDEF, vxk3x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF, vaccCDEF);
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
+ i4 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc0123);
+ vxi4x01234567 = _mm_unpackhi_epi64(vxi4x01234567, vxi4x01234567);
+ vxk4x01234567 = _mm_unpackhi_epi64(vxk4x01234567, vxk4x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF, vacc89AB);
+ vxi4x89ABCDEF = _mm_unpackhi_epi64(vxi4x89ABCDEF, vxi4x89ABCDEF);
+ vxk4x89ABCDEF = _mm_unpackhi_epi64(vxk4x89ABCDEF, vxk4x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF, vaccCDEF);
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
+ i5 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc0123);
+ vxi5x01234567 = _mm_unpackhi_epi64(vxi5x01234567, vxi5x01234567);
+ vxk5x01234567 = _mm_unpackhi_epi64(vxk5x01234567, vxk5x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF, vacc89AB);
+ vxi5x89ABCDEF = _mm_unpackhi_epi64(vxi5x89ABCDEF, vxi5x89ABCDEF);
+ vxk5x89ABCDEF = _mm_unpackhi_epi64(vxk5x89ABCDEF, vxk5x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF, vaccCDEF);
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
+ i6 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc0123);
+ vxi6x01234567 = _mm_unpackhi_epi64(vxi6x01234567, vxi6x01234567);
+ vxk6x01234567 = _mm_unpackhi_epi64(vxk6x01234567, vxk6x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF, vacc89AB);
+ vxi6x89ABCDEF = _mm_unpackhi_epi64(vxi6x89ABCDEF, vxi6x89ABCDEF);
+ vxk6x89ABCDEF = _mm_unpackhi_epi64(vxk6x89ABCDEF, vxk6x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF, vaccCDEF);
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
+ i7 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc0123);
+ vxi7x01234567 = _mm_unpackhi_epi64(vxi7x01234567, vxi7x01234567);
+ vxk7x01234567 = _mm_unpackhi_epi64(vxk7x01234567, vxk7x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF, vacc89AB);
+ vxi7x89ABCDEF = _mm_unpackhi_epi64(vxi7x89ABCDEF, vxi7x89ABCDEF);
+ vxk7x89ABCDEF = _mm_unpackhi_epi64(vxk7x89ABCDEF, vxk7x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF, vaccCDEF);
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
+ i8 += 16;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc0123);
+ vxi8x01234567 = _mm_unpackhi_epi64(vxi8x01234567, vxi8x01234567);
+ vxk8x01234567 = _mm_unpackhi_epi64(vxk8x01234567, vxk8x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF, vacc89AB);
+ vxi8x89ABCDEF = _mm_unpackhi_epi64(vxi8x89ABCDEF, vxi8x89ABCDEF);
+ vxk8x89ABCDEF = _mm_unpackhi_epi64(vxk8x89ABCDEF, vxk8x89ABCDEF);
+
+ vacc4567 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF, vaccCDEF);
+
+ w = (const void*) ((uintptr_t) w + 16 * sizeof(int32_t) + 144 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+ const __m128i vacc9B = _mm_srli_epi64(vacc89AB, 32);
+ const __m128i vaccDF = _mm_srli_epi64(vaccCDEF, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+ const __m128i vprod8A = _mm_add_epi64(_mm_mul_epi32(vacc89AB, vmultiplier), vrounding);
+ const __m128i vprodCE = _mm_add_epi64(_mm_mul_epi32(vaccCDEF, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+ const __m128i vprod9B = _mm_add_epi64(_mm_mul_epi32(vacc9B, vmultiplier), vrounding);
+ const __m128i vprodDF = _mm_add_epi64(_mm_mul_epi32(vaccDF, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+ const __m128i vq31prod8A = _mm_srli_epi64(vprod8A, 31);
+ const __m128i vq31prod9B = _mm_add_epi64(vprod9B, vprod9B);
+ const __m128i vq31prodCE = _mm_srli_epi64(vprodCE, 31);
+ const __m128i vq31prodDF = _mm_add_epi64(vprodDF, vprodDF);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+ const __m128i vq31prod89AB = _mm_blend_epi16(vq31prod8A, vq31prod9B, 0xCC);
+ const __m128i vq31prodCDEF = _mm_blend_epi16(vq31prodCE, vq31prodDF, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ output += 16;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 16 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc0123);
+ vxi0x01234567 = _mm_unpackhi_epi64(vxi0x01234567, vxi0x01234567);
+ vxk0x01234567 = _mm_unpackhi_epi64(vxk0x01234567, vxk0x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc4567);
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 16));
+ __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc0123);
+ vxi1x01234567 = _mm_unpackhi_epi64(vxi1x01234567, vxi1x01234567);
+ vxk1x01234567 = _mm_unpackhi_epi64(vxk1x01234567, vxk1x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc4567);
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 32));
+ __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc0123);
+ vxi2x01234567 = _mm_unpackhi_epi64(vxi2x01234567, vxi2x01234567);
+ vxk2x01234567 = _mm_unpackhi_epi64(vxk2x01234567, vxk2x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc4567);
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc0123);
+ vxi3x01234567 = _mm_unpackhi_epi64(vxi3x01234567, vxi3x01234567);
+ vxk3x01234567 = _mm_unpackhi_epi64(vxk3x01234567, vxk3x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc4567);
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 64));
+ __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc0123);
+ vxi4x01234567 = _mm_unpackhi_epi64(vxi4x01234567, vxi4x01234567);
+ vxk4x01234567 = _mm_unpackhi_epi64(vxk4x01234567, vxk4x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc4567);
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 80));
+ __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc0123);
+ vxi5x01234567 = _mm_unpackhi_epi64(vxi5x01234567, vxi5x01234567);
+ vxk5x01234567 = _mm_unpackhi_epi64(vxk5x01234567, vxk5x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc4567);
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc0123);
+ vxi6x01234567 = _mm_unpackhi_epi64(vxi6x01234567, vxi6x01234567);
+ vxk6x01234567 = _mm_unpackhi_epi64(vxk6x01234567, vxk6x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc4567);
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 112));
+ __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc0123);
+ vxi7x01234567 = _mm_unpackhi_epi64(vxi7x01234567, vxi7x01234567);
+ vxk7x01234567 = _mm_unpackhi_epi64(vxk7x01234567, vxk7x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc4567);
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 128));
+ __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc0123);
+ vxi8x01234567 = _mm_unpackhi_epi64(vxi8x01234567, vxi8x01234567);
+ vxk8x01234567 = _mm_unpackhi_epi64(vxk8x01234567, vxk8x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc4567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up24x9-minmax-sse2-mul16.c b/src/qs8-dwconv/gen/up24x9-minmax-sse2-mul16.c
new file mode 100644
index 0000000..f9b1ec8
--- /dev/null
+++ b/src/qs8-dwconv/gen/up24x9-minmax-sse2-mul16.c
@@ -0,0 +1,712 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 24; c -= 24) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+ __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 20 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
+ const __m128i vk0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ i0 += 24;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+ const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x89ABCDEF));
+ const __m128i vxk0x89ABCDEF = _mm_unpacklo_epi8(vk0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x89ABCDEF));
+ const __m128i vxi0xGHIJKLMN = _mm_unpacklo_epi8(vi0xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0xGHIJKLMN));
+ const __m128i vxk0xGHIJKLMN = _mm_unpacklo_epi8(vk0xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0xGHIJKLMN));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x89ABCDEFlo = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0x89ABCDEFhi = _mm_mulhi_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0xGHIJKLMNlo = _mm_mullo_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
+ const __m128i vp0xGHIJKLMNhi = _mm_mulhi_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp0xGHIJKLMNlo, vp0xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp0xGHIJKLMNlo, vp0xGHIJKLMNhi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
+ const __m128i vk1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i1 += 24;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+ const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x89ABCDEF));
+ const __m128i vxk1x89ABCDEF = _mm_unpacklo_epi8(vk1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x89ABCDEF));
+ const __m128i vxi1xGHIJKLMN = _mm_unpacklo_epi8(vi1xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1xGHIJKLMN));
+ const __m128i vxk1xGHIJKLMN = _mm_unpacklo_epi8(vk1xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1xGHIJKLMN));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x89ABCDEFlo = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1x89ABCDEFhi = _mm_mulhi_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1xGHIJKLMNlo = _mm_mullo_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
+ const __m128i vp1xGHIJKLMNhi = _mm_mulhi_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp1xGHIJKLMNlo, vp1xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp1xGHIJKLMNlo, vp1xGHIJKLMNhi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
+ const __m128i vk2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ i2 += 24;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+ const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x89ABCDEF));
+ const __m128i vxk2x89ABCDEF = _mm_unpacklo_epi8(vk2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x89ABCDEF));
+ const __m128i vxi2xGHIJKLMN = _mm_unpacklo_epi8(vi2xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2xGHIJKLMN));
+ const __m128i vxk2xGHIJKLMN = _mm_unpacklo_epi8(vk2xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2xGHIJKLMN));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x89ABCDEFlo = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2x89ABCDEFhi = _mm_mulhi_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2xGHIJKLMNlo = _mm_mullo_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
+ const __m128i vp2xGHIJKLMNhi = _mm_mulhi_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp2xGHIJKLMNlo, vp2xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp2xGHIJKLMNlo, vp2xGHIJKLMNhi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
+ const __m128i vk3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ i3 += 24;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+ const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x89ABCDEF));
+ const __m128i vxk3x89ABCDEF = _mm_unpacklo_epi8(vk3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x89ABCDEF));
+ const __m128i vxi3xGHIJKLMN = _mm_unpacklo_epi8(vi3xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3xGHIJKLMN));
+ const __m128i vxk3xGHIJKLMN = _mm_unpacklo_epi8(vk3xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3xGHIJKLMN));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x89ABCDEFlo = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3x89ABCDEFhi = _mm_mulhi_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3xGHIJKLMNlo = _mm_mullo_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
+ const __m128i vp3xGHIJKLMNhi = _mm_mulhi_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp3xGHIJKLMNlo, vp3xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp3xGHIJKLMNlo, vp3xGHIJKLMNhi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
+ const __m128i vk4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ i4 += 24;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+ const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x89ABCDEF));
+ const __m128i vxk4x89ABCDEF = _mm_unpacklo_epi8(vk4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x89ABCDEF));
+ const __m128i vxi4xGHIJKLMN = _mm_unpacklo_epi8(vi4xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4xGHIJKLMN));
+ const __m128i vxk4xGHIJKLMN = _mm_unpacklo_epi8(vk4xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4xGHIJKLMN));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x89ABCDEFlo = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4x89ABCDEFhi = _mm_mulhi_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4xGHIJKLMNlo = _mm_mullo_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
+ const __m128i vp4xGHIJKLMNhi = _mm_mulhi_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp4xGHIJKLMNlo, vp4xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp4xGHIJKLMNlo, vp4xGHIJKLMNhi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
+ const __m128i vk5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ i5 += 24;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+ const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x89ABCDEF));
+ const __m128i vxk5x89ABCDEF = _mm_unpacklo_epi8(vk5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x89ABCDEF));
+ const __m128i vxi5xGHIJKLMN = _mm_unpacklo_epi8(vi5xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5xGHIJKLMN));
+ const __m128i vxk5xGHIJKLMN = _mm_unpacklo_epi8(vk5xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5xGHIJKLMN));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x89ABCDEFlo = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5x89ABCDEFhi = _mm_mulhi_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5xGHIJKLMNlo = _mm_mullo_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
+ const __m128i vp5xGHIJKLMNhi = _mm_mulhi_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp5xGHIJKLMNlo, vp5xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp5xGHIJKLMNlo, vp5xGHIJKLMNhi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
+ const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
+ const __m128i vk6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
+ i6 += 24;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+ const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x89ABCDEF));
+ const __m128i vxk6x89ABCDEF = _mm_unpacklo_epi8(vk6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x89ABCDEF));
+ const __m128i vxi6xGHIJKLMN = _mm_unpacklo_epi8(vi6xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6xGHIJKLMN));
+ const __m128i vxk6xGHIJKLMN = _mm_unpacklo_epi8(vk6xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6xGHIJKLMN));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x89ABCDEFlo = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6x89ABCDEFhi = _mm_mulhi_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6xGHIJKLMNlo = _mm_mullo_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
+ const __m128i vp6xGHIJKLMNhi = _mm_mulhi_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp6xGHIJKLMNlo, vp6xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp6xGHIJKLMNlo, vp6xGHIJKLMNhi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
+ const __m128i vi7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i7 + 16));
+ const __m128i vk7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
+ i7 += 24;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+ const __m128i vxi7x89ABCDEF = _mm_unpacklo_epi8(vi7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x89ABCDEF));
+ const __m128i vxk7x89ABCDEF = _mm_unpacklo_epi8(vk7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x89ABCDEF));
+ const __m128i vxi7xGHIJKLMN = _mm_unpacklo_epi8(vi7xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7xGHIJKLMN));
+ const __m128i vxk7xGHIJKLMN = _mm_unpacklo_epi8(vk7xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7xGHIJKLMN));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x89ABCDEFlo = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7x89ABCDEFhi = _mm_mulhi_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7xGHIJKLMNlo = _mm_mullo_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
+ const __m128i vp7xGHIJKLMNhi = _mm_mulhi_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp7xGHIJKLMNlo, vp7xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp7xGHIJKLMNlo, vp7xGHIJKLMNhi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
+ const __m128i vi8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i8 + 16));
+ const __m128i vk8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
+ i8 += 24;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+ const __m128i vxi8x89ABCDEF = _mm_unpacklo_epi8(vi8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x89ABCDEF));
+ const __m128i vxk8x89ABCDEF = _mm_unpacklo_epi8(vk8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x89ABCDEF));
+ const __m128i vxi8xGHIJKLMN = _mm_unpacklo_epi8(vi8xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8xGHIJKLMN));
+ const __m128i vxk8xGHIJKLMN = _mm_unpacklo_epi8(vk8xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8xGHIJKLMN));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x89ABCDEFlo = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8x89ABCDEFhi = _mm_mulhi_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8xGHIJKLMNlo = _mm_mullo_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
+ const __m128i vp8xGHIJKLMNhi = _mm_mulhi_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp8xGHIJKLMNlo, vp8xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp8xGHIJKLMNlo, vp8xGHIJKLMNhi));
+
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+ const __m128i vnmask89AB = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc89AB);
+ const __m128i vnmaskCDEF = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccCDEF);
+ const __m128i vnmaskGHIJ = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccGHIJ);
+ const __m128i vnmaskKLMN = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccKLMN);
+
+ const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vnmask0123), vnmask0123);
+ const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vnmask4567), vnmask4567);
+ const __m128i vabsacc89AB = _mm_sub_epi32(_mm_xor_si128(vacc89AB, vnmask89AB), vnmask89AB);
+ const __m128i vabsaccCDEF = _mm_sub_epi32(_mm_xor_si128(vaccCDEF, vnmaskCDEF), vnmaskCDEF);
+ const __m128i vabsaccGHIJ = _mm_sub_epi32(_mm_xor_si128(vaccGHIJ, vnmaskGHIJ), vnmaskGHIJ);
+ const __m128i vabsaccKLMN = _mm_sub_epi32(_mm_xor_si128(vaccKLMN, vnmaskKLMN), vnmaskKLMN);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+ const __m128i vabsacc9B = _mm_srli_epi64(vabsacc89AB, 32);
+ const __m128i vabsaccDF = _mm_srli_epi64(vabsaccCDEF, 32);
+ const __m128i vabsaccHJ = _mm_srli_epi64(vabsaccGHIJ, 32);
+ const __m128i vabsaccLN = _mm_srli_epi64(vabsaccKLMN, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+ const __m128i vabsprod8A = _mm_mul_epu32(vabsacc89AB, vmultiplier);
+ const __m128i vabsprod9B = _mm_mul_epu32(vabsacc9B, vmultiplier);
+ const __m128i vabsprodCE = _mm_mul_epu32(vabsaccCDEF, vmultiplier);
+ const __m128i vabsprodDF = _mm_mul_epu32(vabsaccDF, vmultiplier);
+ const __m128i vabsprodGI = _mm_mul_epu32(vabsaccGHIJ, vmultiplier);
+ const __m128i vabsprodHJ = _mm_mul_epu32(vabsaccHJ, vmultiplier);
+ const __m128i vabsprodKM = _mm_mul_epu32(vabsaccKLMN, vmultiplier);
+ const __m128i vabsprodLN = _mm_mul_epu32(vabsaccLN, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask8A = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask9B = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskCE = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskDF = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskGI = _mm_shuffle_epi32(vnmaskGHIJ, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskHJ = _mm_shuffle_epi32(vnmaskGHIJ, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskKM = _mm_shuffle_epi32(vnmaskKLMN, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskLN = _mm_shuffle_epi32(vnmaskKLMN, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+ const __m128i vprod8A = _mm_sub_epi64(_mm_xor_si128(vabsprod8A, vnmask8A), vnmask8A);
+ const __m128i vprod9B = _mm_sub_epi64(_mm_xor_si128(vabsprod9B, vnmask9B), vnmask9B);
+ const __m128i vprodCE = _mm_sub_epi64(_mm_xor_si128(vabsprodCE, vnmaskCE), vnmaskCE);
+ const __m128i vprodDF = _mm_sub_epi64(_mm_xor_si128(vabsprodDF, vnmaskDF), vnmaskDF);
+ const __m128i vprodGI = _mm_sub_epi64(_mm_xor_si128(vabsprodGI, vnmaskGI), vnmaskGI);
+ const __m128i vprodHJ = _mm_sub_epi64(_mm_xor_si128(vabsprodHJ, vnmaskHJ), vnmaskHJ);
+ const __m128i vprodKM = _mm_sub_epi64(_mm_xor_si128(vabsprodKM, vnmaskKM), vnmaskKM);
+ const __m128i vprodLN = _mm_sub_epi64(_mm_xor_si128(vabsprodLN, vnmaskLN), vnmaskLN);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+ const __m128i vq31prod8A = _mm_srli_epi64(_mm_add_epi64(vprod8A, vrounding), 31);
+ const __m128i vq31prod9B = _mm_srli_epi64(_mm_add_epi64(vprod9B, vrounding), 31);
+ const __m128i vq31prodCE = _mm_srli_epi64(_mm_add_epi64(vprodCE, vrounding), 31);
+ const __m128i vq31prodDF = _mm_srli_epi64(_mm_add_epi64(vprodDF, vrounding), 31);
+ const __m128i vq31prodGI = _mm_srli_epi64(_mm_add_epi64(vprodGI, vrounding), 31);
+ const __m128i vq31prodHJ = _mm_srli_epi64(_mm_add_epi64(vprodHJ, vrounding), 31);
+ const __m128i vq31prodKM = _mm_srli_epi64(_mm_add_epi64(vprodKM, vrounding), 31);
+ const __m128i vq31prodLN = _mm_srli_epi64(_mm_add_epi64(vprodLN, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod8A9B = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod8A), _mm_castsi128_ps(vq31prod9B), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodCEDF = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodCE), _mm_castsi128_ps(vq31prodDF), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodGIHJ = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodGI), _mm_castsi128_ps(vq31prodHJ), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodKMLN = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodKM), _mm_castsi128_ps(vq31prodLN), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod89AB = _mm_shuffle_epi32(vq31prod8A9B, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodCDEF = _mm_shuffle_epi32(vq31prodCEDF, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodGHIJ = _mm_shuffle_epi32(vq31prodGIHJ, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodKLMN = _mm_shuffle_epi32(vq31prodKMLN, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+ const __m128i vremGHIJ =
+ _mm_add_epi32(_mm_and_si128(vq31prodGHIJ, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodGHIJ));
+ const __m128i vremKLMN =
+ _mm_add_epi32(_mm_and_si128(vq31prodKLMN, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodKLMN));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+ vaccGHIJ =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodGHIJ, vshift), _mm_cmpgt_epi32(vremGHIJ, vremainder_threshold));
+ vaccKLMN =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodKLMN, vshift), _mm_cmpgt_epi32(vremKLMN, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+ voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 24));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 72));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 120));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 168));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vnmask0123), vnmask0123);
+ const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vnmask4567), vnmask4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up24x9-minmax-sse41-mul16.c b/src/qs8-dwconv/gen/up24x9-minmax-sse41-mul16.c
new file mode 100644
index 0000000..f50262e
--- /dev/null
+++ b/src/qs8-dwconv/gen/up24x9-minmax-sse41-mul16.c
@@ -0,0 +1,640 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 24; c -= 24) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+ __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 20 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ const __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
+ const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
+ const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(vi0xGHIJKLMN);
+ const __m128i vk0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ const __m128i vxk0xGHIJKLMN = _mm_cvtepi8_epi16(vk0xGHIJKLMN);
+ i0 += 24;
+
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x89ABCDEFlo = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0x89ABCDEFhi = _mm_mulhi_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0xGHIJKLMNlo = _mm_mullo_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
+ const __m128i vp0xGHIJKLMNhi = _mm_mulhi_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp0xGHIJKLMNlo, vp0xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp0xGHIJKLMNlo, vp0xGHIJKLMNhi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
+ const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
+ const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(vi1xGHIJKLMN);
+ const __m128i vk1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ const __m128i vxk1xGHIJKLMN = _mm_cvtepi8_epi16(vk1xGHIJKLMN);
+ i1 += 24;
+
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x89ABCDEFlo = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1x89ABCDEFhi = _mm_mulhi_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1xGHIJKLMNlo = _mm_mullo_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
+ const __m128i vp1xGHIJKLMNhi = _mm_mulhi_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp1xGHIJKLMNlo, vp1xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp1xGHIJKLMNlo, vp1xGHIJKLMNhi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ const __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
+ const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
+ const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(vi2xGHIJKLMN);
+ const __m128i vk2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ const __m128i vxk2xGHIJKLMN = _mm_cvtepi8_epi16(vk2xGHIJKLMN);
+ i2 += 24;
+
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x89ABCDEFlo = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2x89ABCDEFhi = _mm_mulhi_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2xGHIJKLMNlo = _mm_mullo_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
+ const __m128i vp2xGHIJKLMNhi = _mm_mulhi_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp2xGHIJKLMNlo, vp2xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp2xGHIJKLMNlo, vp2xGHIJKLMNhi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ const __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
+ const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
+ const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(vi3xGHIJKLMN);
+ const __m128i vk3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ const __m128i vxk3xGHIJKLMN = _mm_cvtepi8_epi16(vk3xGHIJKLMN);
+ i3 += 24;
+
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x89ABCDEFlo = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3x89ABCDEFhi = _mm_mulhi_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3xGHIJKLMNlo = _mm_mullo_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
+ const __m128i vp3xGHIJKLMNhi = _mm_mulhi_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp3xGHIJKLMNlo, vp3xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp3xGHIJKLMNlo, vp3xGHIJKLMNhi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ const __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
+ const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
+ const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(vi4xGHIJKLMN);
+ const __m128i vk4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ const __m128i vxk4xGHIJKLMN = _mm_cvtepi8_epi16(vk4xGHIJKLMN);
+ i4 += 24;
+
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x89ABCDEFlo = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4x89ABCDEFhi = _mm_mulhi_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4xGHIJKLMNlo = _mm_mullo_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
+ const __m128i vp4xGHIJKLMNhi = _mm_mulhi_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp4xGHIJKLMNlo, vp4xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp4xGHIJKLMNlo, vp4xGHIJKLMNhi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ const __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
+ const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
+ const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(vi5xGHIJKLMN);
+ const __m128i vk5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ const __m128i vxk5xGHIJKLMN = _mm_cvtepi8_epi16(vk5xGHIJKLMN);
+ i5 += 24;
+
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x89ABCDEFlo = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5x89ABCDEFhi = _mm_mulhi_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5xGHIJKLMNlo = _mm_mullo_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
+ const __m128i vp5xGHIJKLMNhi = _mm_mulhi_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp5xGHIJKLMNlo, vp5xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp5xGHIJKLMNlo, vp5xGHIJKLMNhi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
+ const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
+ const __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
+ const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
+ const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(vi6xGHIJKLMN);
+ const __m128i vk6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
+ const __m128i vxk6xGHIJKLMN = _mm_cvtepi8_epi16(vk6xGHIJKLMN);
+ i6 += 24;
+
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x89ABCDEFlo = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6x89ABCDEFhi = _mm_mulhi_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6xGHIJKLMNlo = _mm_mullo_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
+ const __m128i vp6xGHIJKLMNhi = _mm_mulhi_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp6xGHIJKLMNlo, vp6xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp6xGHIJKLMNlo, vp6xGHIJKLMNhi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
+ const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ const __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
+ const __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
+ const __m128i vi7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i7 + 16));
+ const __m128i vxi7xGHIJKLMN = _mm_cvtepi8_epi16(vi7xGHIJKLMN);
+ const __m128i vk7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
+ const __m128i vxk7xGHIJKLMN = _mm_cvtepi8_epi16(vk7xGHIJKLMN);
+ i7 += 24;
+
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x89ABCDEFlo = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7x89ABCDEFhi = _mm_mulhi_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7xGHIJKLMNlo = _mm_mullo_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
+ const __m128i vp7xGHIJKLMNhi = _mm_mulhi_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp7xGHIJKLMNlo, vp7xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp7xGHIJKLMNlo, vp7xGHIJKLMNhi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
+ const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ const __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
+ const __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
+ const __m128i vi8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i8 + 16));
+ const __m128i vxi8xGHIJKLMN = _mm_cvtepi8_epi16(vi8xGHIJKLMN);
+ const __m128i vk8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
+ const __m128i vxk8xGHIJKLMN = _mm_cvtepi8_epi16(vk8xGHIJKLMN);
+ i8 += 24;
+
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x89ABCDEFlo = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8x89ABCDEFhi = _mm_mulhi_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8xGHIJKLMNlo = _mm_mullo_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
+ const __m128i vp8xGHIJKLMNhi = _mm_mulhi_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp8xGHIJKLMNlo, vp8xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp8xGHIJKLMNlo, vp8xGHIJKLMNhi));
+
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+ const __m128i vacc9B = _mm_srli_epi64(vacc89AB, 32);
+ const __m128i vaccDF = _mm_srli_epi64(vaccCDEF, 32);
+ const __m128i vaccHJ = _mm_srli_epi64(vaccGHIJ, 32);
+ const __m128i vaccLN = _mm_srli_epi64(vaccKLMN, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+ const __m128i vprod8A = _mm_add_epi64(_mm_mul_epi32(vacc89AB, vmultiplier), vrounding);
+ const __m128i vprodCE = _mm_add_epi64(_mm_mul_epi32(vaccCDEF, vmultiplier), vrounding);
+ const __m128i vprodGI = _mm_add_epi64(_mm_mul_epi32(vaccGHIJ, vmultiplier), vrounding);
+ const __m128i vprodKM = _mm_add_epi64(_mm_mul_epi32(vaccKLMN, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+ const __m128i vprod9B = _mm_add_epi64(_mm_mul_epi32(vacc9B, vmultiplier), vrounding);
+ const __m128i vprodDF = _mm_add_epi64(_mm_mul_epi32(vaccDF, vmultiplier), vrounding);
+ const __m128i vprodHJ = _mm_add_epi64(_mm_mul_epi32(vaccHJ, vmultiplier), vrounding);
+ const __m128i vprodLN = _mm_add_epi64(_mm_mul_epi32(vaccLN, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+ const __m128i vq31prod8A = _mm_srli_epi64(vprod8A, 31);
+ const __m128i vq31prod9B = _mm_add_epi64(vprod9B, vprod9B);
+ const __m128i vq31prodCE = _mm_srli_epi64(vprodCE, 31);
+ const __m128i vq31prodDF = _mm_add_epi64(vprodDF, vprodDF);
+ const __m128i vq31prodGI = _mm_srli_epi64(vprodGI, 31);
+ const __m128i vq31prodHJ = _mm_add_epi64(vprodHJ, vprodHJ);
+ const __m128i vq31prodKM = _mm_srli_epi64(vprodKM, 31);
+ const __m128i vq31prodLN = _mm_add_epi64(vprodLN, vprodLN);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+ const __m128i vq31prod89AB = _mm_blend_epi16(vq31prod8A, vq31prod9B, 0xCC);
+ const __m128i vq31prodCDEF = _mm_blend_epi16(vq31prodCE, vq31prodDF, 0xCC);
+ const __m128i vq31prodGHIJ = _mm_blend_epi16(vq31prodGI, vq31prodHJ, 0xCC);
+ const __m128i vq31prodKLMN = _mm_blend_epi16(vq31prodKM, vq31prodLN, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+ const __m128i vremGHIJ =
+ _mm_add_epi32(_mm_and_si128(vq31prodGHIJ, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodGHIJ));
+ const __m128i vremKLMN =
+ _mm_add_epi32(_mm_and_si128(vq31prodKLMN, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodKLMN));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+ vaccGHIJ =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodGHIJ, vshift), _mm_cmpgt_epi32(vremGHIJ, vremainder_threshold));
+ vaccKLMN =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodKLMN, vshift), _mm_cmpgt_epi32(vremKLMN, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+ voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 24));
+ const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 72));
+ const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 120));
+ const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
+ const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 168));
+ const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
+ const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up24x9-minmax-ssse3-mul16.c b/src/qs8-dwconv/gen/up24x9-minmax-ssse3-mul16.c
new file mode 100644
index 0000000..c13f218
--- /dev/null
+++ b/src/qs8-dwconv/gen/up24x9-minmax-ssse3-mul16.c
@@ -0,0 +1,712 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <tmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 24; c -= 24) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+ __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 20 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
+ const __m128i vk0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ i0 += 24;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+ const __m128i vxi0x89ABCDEF = _mm_unpacklo_epi8(vi0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x89ABCDEF));
+ const __m128i vxk0x89ABCDEF = _mm_unpacklo_epi8(vk0x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x89ABCDEF));
+ const __m128i vxi0xGHIJKLMN = _mm_unpacklo_epi8(vi0xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0xGHIJKLMN));
+ const __m128i vxk0xGHIJKLMN = _mm_unpacklo_epi8(vk0xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0xGHIJKLMN));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x89ABCDEFlo = _mm_mullo_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0x89ABCDEFhi = _mm_mulhi_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF);
+ const __m128i vp0xGHIJKLMNlo = _mm_mullo_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
+ const __m128i vp0xGHIJKLMNhi = _mm_mulhi_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp0x89ABCDEFlo, vp0x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp0xGHIJKLMNlo, vp0xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp0xGHIJKLMNlo, vp0xGHIJKLMNhi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
+ const __m128i vk1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i1 += 24;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+ const __m128i vxi1x89ABCDEF = _mm_unpacklo_epi8(vi1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x89ABCDEF));
+ const __m128i vxk1x89ABCDEF = _mm_unpacklo_epi8(vk1x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x89ABCDEF));
+ const __m128i vxi1xGHIJKLMN = _mm_unpacklo_epi8(vi1xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1xGHIJKLMN));
+ const __m128i vxk1xGHIJKLMN = _mm_unpacklo_epi8(vk1xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1xGHIJKLMN));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x89ABCDEFlo = _mm_mullo_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1x89ABCDEFhi = _mm_mulhi_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF);
+ const __m128i vp1xGHIJKLMNlo = _mm_mullo_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
+ const __m128i vp1xGHIJKLMNhi = _mm_mulhi_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp1x89ABCDEFlo, vp1x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp1xGHIJKLMNlo, vp1xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp1xGHIJKLMNlo, vp1xGHIJKLMNhi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
+ const __m128i vk2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ i2 += 24;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+ const __m128i vxi2x89ABCDEF = _mm_unpacklo_epi8(vi2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x89ABCDEF));
+ const __m128i vxk2x89ABCDEF = _mm_unpacklo_epi8(vk2x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x89ABCDEF));
+ const __m128i vxi2xGHIJKLMN = _mm_unpacklo_epi8(vi2xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2xGHIJKLMN));
+ const __m128i vxk2xGHIJKLMN = _mm_unpacklo_epi8(vk2xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2xGHIJKLMN));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x89ABCDEFlo = _mm_mullo_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2x89ABCDEFhi = _mm_mulhi_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF);
+ const __m128i vp2xGHIJKLMNlo = _mm_mullo_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
+ const __m128i vp2xGHIJKLMNhi = _mm_mulhi_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp2x89ABCDEFlo, vp2x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp2xGHIJKLMNlo, vp2xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp2xGHIJKLMNlo, vp2xGHIJKLMNhi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
+ const __m128i vk3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ i3 += 24;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+ const __m128i vxi3x89ABCDEF = _mm_unpacklo_epi8(vi3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x89ABCDEF));
+ const __m128i vxk3x89ABCDEF = _mm_unpacklo_epi8(vk3x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x89ABCDEF));
+ const __m128i vxi3xGHIJKLMN = _mm_unpacklo_epi8(vi3xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3xGHIJKLMN));
+ const __m128i vxk3xGHIJKLMN = _mm_unpacklo_epi8(vk3xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3xGHIJKLMN));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x89ABCDEFlo = _mm_mullo_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3x89ABCDEFhi = _mm_mulhi_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF);
+ const __m128i vp3xGHIJKLMNlo = _mm_mullo_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
+ const __m128i vp3xGHIJKLMNhi = _mm_mulhi_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp3x89ABCDEFlo, vp3x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp3xGHIJKLMNlo, vp3xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp3xGHIJKLMNlo, vp3xGHIJKLMNhi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
+ const __m128i vk4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ i4 += 24;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+ const __m128i vxi4x89ABCDEF = _mm_unpacklo_epi8(vi4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x89ABCDEF));
+ const __m128i vxk4x89ABCDEF = _mm_unpacklo_epi8(vk4x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x89ABCDEF));
+ const __m128i vxi4xGHIJKLMN = _mm_unpacklo_epi8(vi4xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4xGHIJKLMN));
+ const __m128i vxk4xGHIJKLMN = _mm_unpacklo_epi8(vk4xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4xGHIJKLMN));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x89ABCDEFlo = _mm_mullo_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4x89ABCDEFhi = _mm_mulhi_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF);
+ const __m128i vp4xGHIJKLMNlo = _mm_mullo_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
+ const __m128i vp4xGHIJKLMNhi = _mm_mulhi_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp4x89ABCDEFlo, vp4x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp4xGHIJKLMNlo, vp4xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp4xGHIJKLMNlo, vp4xGHIJKLMNhi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
+ const __m128i vk5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ i5 += 24;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+ const __m128i vxi5x89ABCDEF = _mm_unpacklo_epi8(vi5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x89ABCDEF));
+ const __m128i vxk5x89ABCDEF = _mm_unpacklo_epi8(vk5x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x89ABCDEF));
+ const __m128i vxi5xGHIJKLMN = _mm_unpacklo_epi8(vi5xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5xGHIJKLMN));
+ const __m128i vxk5xGHIJKLMN = _mm_unpacklo_epi8(vk5xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5xGHIJKLMN));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x89ABCDEFlo = _mm_mullo_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5x89ABCDEFhi = _mm_mulhi_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF);
+ const __m128i vp5xGHIJKLMNlo = _mm_mullo_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
+ const __m128i vp5xGHIJKLMNhi = _mm_mulhi_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp5x89ABCDEFlo, vp5x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp5xGHIJKLMNlo, vp5xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp5xGHIJKLMNlo, vp5xGHIJKLMNhi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
+ const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
+ const __m128i vk6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
+ i6 += 24;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+ const __m128i vxi6x89ABCDEF = _mm_unpacklo_epi8(vi6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x89ABCDEF));
+ const __m128i vxk6x89ABCDEF = _mm_unpacklo_epi8(vk6x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x89ABCDEF));
+ const __m128i vxi6xGHIJKLMN = _mm_unpacklo_epi8(vi6xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6xGHIJKLMN));
+ const __m128i vxk6xGHIJKLMN = _mm_unpacklo_epi8(vk6xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6xGHIJKLMN));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x89ABCDEFlo = _mm_mullo_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6x89ABCDEFhi = _mm_mulhi_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF);
+ const __m128i vp6xGHIJKLMNlo = _mm_mullo_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
+ const __m128i vp6xGHIJKLMNhi = _mm_mulhi_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp6x89ABCDEFlo, vp6x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp6xGHIJKLMNlo, vp6xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp6xGHIJKLMNlo, vp6xGHIJKLMNhi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
+ const __m128i vi7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i7 + 16));
+ const __m128i vk7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
+ i7 += 24;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+ const __m128i vxi7x89ABCDEF = _mm_unpacklo_epi8(vi7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x89ABCDEF));
+ const __m128i vxk7x89ABCDEF = _mm_unpacklo_epi8(vk7x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x89ABCDEF));
+ const __m128i vxi7xGHIJKLMN = _mm_unpacklo_epi8(vi7xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7xGHIJKLMN));
+ const __m128i vxk7xGHIJKLMN = _mm_unpacklo_epi8(vk7xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7xGHIJKLMN));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x89ABCDEFlo = _mm_mullo_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7x89ABCDEFhi = _mm_mulhi_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF);
+ const __m128i vp7xGHIJKLMNlo = _mm_mullo_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
+ const __m128i vp7xGHIJKLMNhi = _mm_mulhi_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp7x89ABCDEFlo, vp7x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp7xGHIJKLMNlo, vp7xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp7xGHIJKLMNlo, vp7xGHIJKLMNhi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
+ const __m128i vi8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i8 + 16));
+ const __m128i vk8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
+ i8 += 24;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+ const __m128i vxi8x89ABCDEF = _mm_unpacklo_epi8(vi8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x89ABCDEF));
+ const __m128i vxk8x89ABCDEF = _mm_unpacklo_epi8(vk8x89ABCDEF, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x89ABCDEF));
+ const __m128i vxi8xGHIJKLMN = _mm_unpacklo_epi8(vi8xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8xGHIJKLMN));
+ const __m128i vxk8xGHIJKLMN = _mm_unpacklo_epi8(vk8xGHIJKLMN, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8xGHIJKLMN));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x89ABCDEFlo = _mm_mullo_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8x89ABCDEFhi = _mm_mulhi_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF);
+ const __m128i vp8xGHIJKLMNlo = _mm_mullo_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
+ const __m128i vp8xGHIJKLMNhi = _mm_mulhi_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vp8x89ABCDEFlo, vp8x89ABCDEFhi));
+ vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_unpacklo_epi16(vp8xGHIJKLMNlo, vp8xGHIJKLMNhi));
+ vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_unpackhi_epi16(vp8xGHIJKLMNlo, vp8xGHIJKLMNhi));
+
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+ const __m128i vnmask89AB = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc89AB);
+ const __m128i vnmaskCDEF = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccCDEF);
+ const __m128i vnmaskGHIJ = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccGHIJ);
+ const __m128i vnmaskKLMN = _mm_cmpgt_epi32(_mm_setzero_si128(), vaccKLMN);
+
+ const __m128i vabsacc0123 = _mm_abs_epi32(vacc0123);
+ const __m128i vabsacc4567 = _mm_abs_epi32(vacc4567);
+ const __m128i vabsacc89AB = _mm_abs_epi32(vacc89AB);
+ const __m128i vabsaccCDEF = _mm_abs_epi32(vaccCDEF);
+ const __m128i vabsaccGHIJ = _mm_abs_epi32(vaccGHIJ);
+ const __m128i vabsaccKLMN = _mm_abs_epi32(vaccKLMN);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+ const __m128i vabsacc9B = _mm_srli_epi64(vabsacc89AB, 32);
+ const __m128i vabsaccDF = _mm_srli_epi64(vabsaccCDEF, 32);
+ const __m128i vabsaccHJ = _mm_srli_epi64(vabsaccGHIJ, 32);
+ const __m128i vabsaccLN = _mm_srli_epi64(vabsaccKLMN, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+ const __m128i vabsprod8A = _mm_mul_epu32(vabsacc89AB, vmultiplier);
+ const __m128i vabsprod9B = _mm_mul_epu32(vabsacc9B, vmultiplier);
+ const __m128i vabsprodCE = _mm_mul_epu32(vabsaccCDEF, vmultiplier);
+ const __m128i vabsprodDF = _mm_mul_epu32(vabsaccDF, vmultiplier);
+ const __m128i vabsprodGI = _mm_mul_epu32(vabsaccGHIJ, vmultiplier);
+ const __m128i vabsprodHJ = _mm_mul_epu32(vabsaccHJ, vmultiplier);
+ const __m128i vabsprodKM = _mm_mul_epu32(vabsaccKLMN, vmultiplier);
+ const __m128i vabsprodLN = _mm_mul_epu32(vabsaccLN, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask8A = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask9B = _mm_shuffle_epi32(vnmask89AB, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskCE = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskDF = _mm_shuffle_epi32(vnmaskCDEF, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskGI = _mm_shuffle_epi32(vnmaskGHIJ, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskHJ = _mm_shuffle_epi32(vnmaskGHIJ, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmaskKM = _mm_shuffle_epi32(vnmaskKLMN, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmaskLN = _mm_shuffle_epi32(vnmaskKLMN, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+ const __m128i vprod8A = _mm_sub_epi64(_mm_xor_si128(vabsprod8A, vnmask8A), vnmask8A);
+ const __m128i vprod9B = _mm_sub_epi64(_mm_xor_si128(vabsprod9B, vnmask9B), vnmask9B);
+ const __m128i vprodCE = _mm_sub_epi64(_mm_xor_si128(vabsprodCE, vnmaskCE), vnmaskCE);
+ const __m128i vprodDF = _mm_sub_epi64(_mm_xor_si128(vabsprodDF, vnmaskDF), vnmaskDF);
+ const __m128i vprodGI = _mm_sub_epi64(_mm_xor_si128(vabsprodGI, vnmaskGI), vnmaskGI);
+ const __m128i vprodHJ = _mm_sub_epi64(_mm_xor_si128(vabsprodHJ, vnmaskHJ), vnmaskHJ);
+ const __m128i vprodKM = _mm_sub_epi64(_mm_xor_si128(vabsprodKM, vnmaskKM), vnmaskKM);
+ const __m128i vprodLN = _mm_sub_epi64(_mm_xor_si128(vabsprodLN, vnmaskLN), vnmaskLN);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+ const __m128i vq31prod8A = _mm_srli_epi64(_mm_add_epi64(vprod8A, vrounding), 31);
+ const __m128i vq31prod9B = _mm_srli_epi64(_mm_add_epi64(vprod9B, vrounding), 31);
+ const __m128i vq31prodCE = _mm_srli_epi64(_mm_add_epi64(vprodCE, vrounding), 31);
+ const __m128i vq31prodDF = _mm_srli_epi64(_mm_add_epi64(vprodDF, vrounding), 31);
+ const __m128i vq31prodGI = _mm_srli_epi64(_mm_add_epi64(vprodGI, vrounding), 31);
+ const __m128i vq31prodHJ = _mm_srli_epi64(_mm_add_epi64(vprodHJ, vrounding), 31);
+ const __m128i vq31prodKM = _mm_srli_epi64(_mm_add_epi64(vprodKM, vrounding), 31);
+ const __m128i vq31prodLN = _mm_srli_epi64(_mm_add_epi64(vprodLN, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod8A9B = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod8A), _mm_castsi128_ps(vq31prod9B), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodCEDF = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodCE), _mm_castsi128_ps(vq31prodDF), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodGIHJ = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodGI), _mm_castsi128_ps(vq31prodHJ), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prodKMLN = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prodKM), _mm_castsi128_ps(vq31prodLN), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod89AB = _mm_shuffle_epi32(vq31prod8A9B, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodCDEF = _mm_shuffle_epi32(vq31prodCEDF, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodGHIJ = _mm_shuffle_epi32(vq31prodGIHJ, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prodKLMN = _mm_shuffle_epi32(vq31prodKMLN, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+ const __m128i vremGHIJ =
+ _mm_add_epi32(_mm_and_si128(vq31prodGHIJ, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodGHIJ));
+ const __m128i vremKLMN =
+ _mm_add_epi32(_mm_and_si128(vq31prodKLMN, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodKLMN));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+ vaccGHIJ =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodGHIJ, vshift), _mm_cmpgt_epi32(vremGHIJ, vremainder_threshold));
+ vaccKLMN =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodKLMN, vshift), _mm_cmpgt_epi32(vremKLMN, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+ voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 24));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 72));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 120));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 168));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_abs_epi32(vacc0123);
+ const __m128i vabsacc4567 = _mm_abs_epi32(vacc4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up24x9-minmax-xop-mul16.c b/src/qs8-dwconv/gen/up24x9-minmax-xop-mul16.c
new file mode 100644
index 0000000..9d1c1d7
--- /dev/null
+++ b/src/qs8-dwconv/gen/up24x9-minmax-xop-mul16.c
@@ -0,0 +1,636 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#ifdef __GNUC__
+ #include <x86intrin.h>
+#else
+ #include <immintrin.h>
+ #include <ammintrin.h>
+#endif
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 24; c -= 24) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+ __m128i vacc89AB = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t)));
+ __m128i vaccCDEF = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 12 * sizeof(int32_t)));
+ __m128i vaccGHIJ = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 16 * sizeof(int32_t)));
+ __m128i vaccKLMN = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 20 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
+ __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(vi0x89ABCDEF);
+ const __m128i vk0x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ __m128i vxk0x89ABCDEF = _mm_cvtepi8_epi16(vk0x89ABCDEF);
+ const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
+ __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(vi0xGHIJKLMN);
+ const __m128i vk0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ __m128i vxk0xGHIJKLMN = _mm_cvtepi8_epi16(vk0xGHIJKLMN);
+ i0 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc0123);
+ vxi0x01234567 = _mm_unpackhi_epi64(vxi0x01234567, vxi0x01234567);
+ vxk0x01234567 = _mm_unpackhi_epi64(vxk0x01234567, vxk0x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF, vacc89AB);
+ vxi0x89ABCDEF = _mm_unpackhi_epi64(vxi0x89ABCDEF, vxi0x89ABCDEF);
+ vxk0x89ABCDEF = _mm_unpackhi_epi64(vxk0x89ABCDEF, vxk0x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN, vaccGHIJ);
+ vxi0xGHIJKLMN = _mm_unpackhi_epi64(vxi0xGHIJKLMN, vxi0xGHIJKLMN);
+ vxk0xGHIJKLMN = _mm_unpackhi_epi64(vxk0xGHIJKLMN, vxk0xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi0x89ABCDEF, vxk0x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi0xGHIJKLMN, vxk0xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
+ __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(vi1x89ABCDEF);
+ const __m128i vk1x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ __m128i vxk1x89ABCDEF = _mm_cvtepi8_epi16(vk1x89ABCDEF);
+ const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
+ __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(vi1xGHIJKLMN);
+ const __m128i vk1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ __m128i vxk1xGHIJKLMN = _mm_cvtepi8_epi16(vk1xGHIJKLMN);
+ i1 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc0123);
+ vxi1x01234567 = _mm_unpackhi_epi64(vxi1x01234567, vxi1x01234567);
+ vxk1x01234567 = _mm_unpackhi_epi64(vxk1x01234567, vxk1x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF, vacc89AB);
+ vxi1x89ABCDEF = _mm_unpackhi_epi64(vxi1x89ABCDEF, vxi1x89ABCDEF);
+ vxk1x89ABCDEF = _mm_unpackhi_epi64(vxk1x89ABCDEF, vxk1x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN, vaccGHIJ);
+ vxi1xGHIJKLMN = _mm_unpackhi_epi64(vxi1xGHIJKLMN, vxi1xGHIJKLMN);
+ vxk1xGHIJKLMN = _mm_unpackhi_epi64(vxk1xGHIJKLMN, vxk1xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi1x89ABCDEF, vxk1x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi1xGHIJKLMN, vxk1xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
+ __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(vi2x89ABCDEF);
+ const __m128i vk2x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ __m128i vxk2x89ABCDEF = _mm_cvtepi8_epi16(vk2x89ABCDEF);
+ const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
+ __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(vi2xGHIJKLMN);
+ const __m128i vk2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ __m128i vxk2xGHIJKLMN = _mm_cvtepi8_epi16(vk2xGHIJKLMN);
+ i2 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc0123);
+ vxi2x01234567 = _mm_unpackhi_epi64(vxi2x01234567, vxi2x01234567);
+ vxk2x01234567 = _mm_unpackhi_epi64(vxk2x01234567, vxk2x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF, vacc89AB);
+ vxi2x89ABCDEF = _mm_unpackhi_epi64(vxi2x89ABCDEF, vxi2x89ABCDEF);
+ vxk2x89ABCDEF = _mm_unpackhi_epi64(vxk2x89ABCDEF, vxk2x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN, vaccGHIJ);
+ vxi2xGHIJKLMN = _mm_unpackhi_epi64(vxi2xGHIJKLMN, vxi2xGHIJKLMN);
+ vxk2xGHIJKLMN = _mm_unpackhi_epi64(vxk2xGHIJKLMN, vxk2xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi2x89ABCDEF, vxk2x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi2xGHIJKLMN, vxk2xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 72 * sizeof(int8_t)));
+ __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
+ __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(vi3x89ABCDEF);
+ const __m128i vk3x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 80 * sizeof(int8_t)));
+ __m128i vxk3x89ABCDEF = _mm_cvtepi8_epi16(vk3x89ABCDEF);
+ const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
+ __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(vi3xGHIJKLMN);
+ const __m128i vk3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 88 * sizeof(int8_t)));
+ __m128i vxk3xGHIJKLMN = _mm_cvtepi8_epi16(vk3xGHIJKLMN);
+ i3 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc0123);
+ vxi3x01234567 = _mm_unpackhi_epi64(vxi3x01234567, vxi3x01234567);
+ vxk3x01234567 = _mm_unpackhi_epi64(vxk3x01234567, vxk3x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF, vacc89AB);
+ vxi3x89ABCDEF = _mm_unpackhi_epi64(vxi3x89ABCDEF, vxi3x89ABCDEF);
+ vxk3x89ABCDEF = _mm_unpackhi_epi64(vxk3x89ABCDEF, vxk3x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN, vaccGHIJ);
+ vxi3xGHIJKLMN = _mm_unpackhi_epi64(vxi3xGHIJKLMN, vxi3xGHIJKLMN);
+ vxk3xGHIJKLMN = _mm_unpackhi_epi64(vxk3xGHIJKLMN, vxk3xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi3x89ABCDEF, vxk3x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi3xGHIJKLMN, vxk3xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 96 * sizeof(int8_t)));
+ __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
+ __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(vi4x89ABCDEF);
+ const __m128i vk4x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 104 * sizeof(int8_t)));
+ __m128i vxk4x89ABCDEF = _mm_cvtepi8_epi16(vk4x89ABCDEF);
+ const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
+ __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(vi4xGHIJKLMN);
+ const __m128i vk4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 112 * sizeof(int8_t)));
+ __m128i vxk4xGHIJKLMN = _mm_cvtepi8_epi16(vk4xGHIJKLMN);
+ i4 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc0123);
+ vxi4x01234567 = _mm_unpackhi_epi64(vxi4x01234567, vxi4x01234567);
+ vxk4x01234567 = _mm_unpackhi_epi64(vxk4x01234567, vxk4x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF, vacc89AB);
+ vxi4x89ABCDEF = _mm_unpackhi_epi64(vxi4x89ABCDEF, vxi4x89ABCDEF);
+ vxk4x89ABCDEF = _mm_unpackhi_epi64(vxk4x89ABCDEF, vxk4x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN, vaccGHIJ);
+ vxi4xGHIJKLMN = _mm_unpackhi_epi64(vxi4xGHIJKLMN, vxi4xGHIJKLMN);
+ vxk4xGHIJKLMN = _mm_unpackhi_epi64(vxk4xGHIJKLMN, vxk4xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi4x89ABCDEF, vxk4x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi4xGHIJKLMN, vxk4xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 120 * sizeof(int8_t)));
+ __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
+ __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(vi5x89ABCDEF);
+ const __m128i vk5x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 128 * sizeof(int8_t)));
+ __m128i vxk5x89ABCDEF = _mm_cvtepi8_epi16(vk5x89ABCDEF);
+ const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
+ __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(vi5xGHIJKLMN);
+ const __m128i vk5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 136 * sizeof(int8_t)));
+ __m128i vxk5xGHIJKLMN = _mm_cvtepi8_epi16(vk5xGHIJKLMN);
+ i5 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc0123);
+ vxi5x01234567 = _mm_unpackhi_epi64(vxi5x01234567, vxi5x01234567);
+ vxk5x01234567 = _mm_unpackhi_epi64(vxk5x01234567, vxk5x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF, vacc89AB);
+ vxi5x89ABCDEF = _mm_unpackhi_epi64(vxi5x89ABCDEF, vxi5x89ABCDEF);
+ vxk5x89ABCDEF = _mm_unpackhi_epi64(vxk5x89ABCDEF, vxk5x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN, vaccGHIJ);
+ vxi5xGHIJKLMN = _mm_unpackhi_epi64(vxi5xGHIJKLMN, vxi5xGHIJKLMN);
+ vxk5xGHIJKLMN = _mm_unpackhi_epi64(vxk5xGHIJKLMN, vxk5xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi5x89ABCDEF, vxk5x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi5xGHIJKLMN, vxk5xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 144 * sizeof(int8_t)));
+ __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
+ __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(vi6x89ABCDEF);
+ const __m128i vk6x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 152 * sizeof(int8_t)));
+ __m128i vxk6x89ABCDEF = _mm_cvtepi8_epi16(vk6x89ABCDEF);
+ const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
+ __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(vi6xGHIJKLMN);
+ const __m128i vk6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 160 * sizeof(int8_t)));
+ __m128i vxk6xGHIJKLMN = _mm_cvtepi8_epi16(vk6xGHIJKLMN);
+ i6 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc0123);
+ vxi6x01234567 = _mm_unpackhi_epi64(vxi6x01234567, vxi6x01234567);
+ vxk6x01234567 = _mm_unpackhi_epi64(vxk6x01234567, vxk6x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF, vacc89AB);
+ vxi6x89ABCDEF = _mm_unpackhi_epi64(vxi6x89ABCDEF, vxi6x89ABCDEF);
+ vxk6x89ABCDEF = _mm_unpackhi_epi64(vxk6x89ABCDEF, vxk6x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN, vaccGHIJ);
+ vxi6xGHIJKLMN = _mm_unpackhi_epi64(vxi6xGHIJKLMN, vxi6xGHIJKLMN);
+ vxk6xGHIJKLMN = _mm_unpackhi_epi64(vxk6xGHIJKLMN, vxk6xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi6x89ABCDEF, vxk6x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi6xGHIJKLMN, vxk6xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 168 * sizeof(int8_t)));
+ __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ const __m128i vi7x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i7 + 8));
+ __m128i vxi7x89ABCDEF = _mm_cvtepi8_epi16(vi7x89ABCDEF);
+ const __m128i vk7x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 176 * sizeof(int8_t)));
+ __m128i vxk7x89ABCDEF = _mm_cvtepi8_epi16(vk7x89ABCDEF);
+ const __m128i vi7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i7 + 16));
+ __m128i vxi7xGHIJKLMN = _mm_cvtepi8_epi16(vi7xGHIJKLMN);
+ const __m128i vk7xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 184 * sizeof(int8_t)));
+ __m128i vxk7xGHIJKLMN = _mm_cvtepi8_epi16(vk7xGHIJKLMN);
+ i7 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc0123);
+ vxi7x01234567 = _mm_unpackhi_epi64(vxi7x01234567, vxi7x01234567);
+ vxk7x01234567 = _mm_unpackhi_epi64(vxk7x01234567, vxk7x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF, vacc89AB);
+ vxi7x89ABCDEF = _mm_unpackhi_epi64(vxi7x89ABCDEF, vxi7x89ABCDEF);
+ vxk7x89ABCDEF = _mm_unpackhi_epi64(vxk7x89ABCDEF, vxk7x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN, vaccGHIJ);
+ vxi7xGHIJKLMN = _mm_unpackhi_epi64(vxi7xGHIJKLMN, vxi7xGHIJKLMN);
+ vxk7xGHIJKLMN = _mm_unpackhi_epi64(vxk7xGHIJKLMN, vxk7xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi7x89ABCDEF, vxk7x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi7xGHIJKLMN, vxk7xGHIJKLMN, vaccKLMN);
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 192 * sizeof(int8_t)));
+ __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ const __m128i vi8x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i8 + 8));
+ __m128i vxi8x89ABCDEF = _mm_cvtepi8_epi16(vi8x89ABCDEF);
+ const __m128i vk8x89ABCDEF = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 200 * sizeof(int8_t)));
+ __m128i vxk8x89ABCDEF = _mm_cvtepi8_epi16(vk8x89ABCDEF);
+ const __m128i vi8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i8 + 16));
+ __m128i vxi8xGHIJKLMN = _mm_cvtepi8_epi16(vi8xGHIJKLMN);
+ const __m128i vk8xGHIJKLMN = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24 * sizeof(int32_t) + 208 * sizeof(int8_t)));
+ __m128i vxk8xGHIJKLMN = _mm_cvtepi8_epi16(vk8xGHIJKLMN);
+ i8 += 24;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc0123);
+ vxi8x01234567 = _mm_unpackhi_epi64(vxi8x01234567, vxi8x01234567);
+ vxk8x01234567 = _mm_unpackhi_epi64(vxk8x01234567, vxk8x01234567);
+ vacc89AB = _mm_maccd_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF, vacc89AB);
+ vxi8x89ABCDEF = _mm_unpackhi_epi64(vxi8x89ABCDEF, vxi8x89ABCDEF);
+ vxk8x89ABCDEF = _mm_unpackhi_epi64(vxk8x89ABCDEF, vxk8x89ABCDEF);
+ vaccGHIJ = _mm_maccd_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN, vaccGHIJ);
+ vxi8xGHIJKLMN = _mm_unpackhi_epi64(vxi8xGHIJKLMN, vxi8xGHIJKLMN);
+ vxk8xGHIJKLMN = _mm_unpackhi_epi64(vxk8xGHIJKLMN, vxk8xGHIJKLMN);
+
+ vacc4567 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc4567);
+ vaccCDEF = _mm_maccd_epi16(vxi8x89ABCDEF, vxk8x89ABCDEF, vaccCDEF);
+ vaccKLMN = _mm_maccd_epi16(vxi8xGHIJKLMN, vxk8xGHIJKLMN, vaccKLMN);
+
+ w = (const void*) ((uintptr_t) w + 24 * sizeof(int32_t) + 216 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+ const __m128i vacc9B = _mm_srli_epi64(vacc89AB, 32);
+ const __m128i vaccDF = _mm_srli_epi64(vaccCDEF, 32);
+ const __m128i vaccHJ = _mm_srli_epi64(vaccGHIJ, 32);
+ const __m128i vaccLN = _mm_srli_epi64(vaccKLMN, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+ const __m128i vprod8A = _mm_add_epi64(_mm_mul_epi32(vacc89AB, vmultiplier), vrounding);
+ const __m128i vprodCE = _mm_add_epi64(_mm_mul_epi32(vaccCDEF, vmultiplier), vrounding);
+ const __m128i vprodGI = _mm_add_epi64(_mm_mul_epi32(vaccGHIJ, vmultiplier), vrounding);
+ const __m128i vprodKM = _mm_add_epi64(_mm_mul_epi32(vaccKLMN, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+ const __m128i vprod9B = _mm_add_epi64(_mm_mul_epi32(vacc9B, vmultiplier), vrounding);
+ const __m128i vprodDF = _mm_add_epi64(_mm_mul_epi32(vaccDF, vmultiplier), vrounding);
+ const __m128i vprodHJ = _mm_add_epi64(_mm_mul_epi32(vaccHJ, vmultiplier), vrounding);
+ const __m128i vprodLN = _mm_add_epi64(_mm_mul_epi32(vaccLN, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+ const __m128i vq31prod8A = _mm_srli_epi64(vprod8A, 31);
+ const __m128i vq31prod9B = _mm_add_epi64(vprod9B, vprod9B);
+ const __m128i vq31prodCE = _mm_srli_epi64(vprodCE, 31);
+ const __m128i vq31prodDF = _mm_add_epi64(vprodDF, vprodDF);
+ const __m128i vq31prodGI = _mm_srli_epi64(vprodGI, 31);
+ const __m128i vq31prodHJ = _mm_add_epi64(vprodHJ, vprodHJ);
+ const __m128i vq31prodKM = _mm_srli_epi64(vprodKM, 31);
+ const __m128i vq31prodLN = _mm_add_epi64(vprodLN, vprodLN);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+ const __m128i vq31prod89AB = _mm_blend_epi16(vq31prod8A, vq31prod9B, 0xCC);
+ const __m128i vq31prodCDEF = _mm_blend_epi16(vq31prodCE, vq31prodDF, 0xCC);
+ const __m128i vq31prodGHIJ = _mm_blend_epi16(vq31prodGI, vq31prodHJ, 0xCC);
+ const __m128i vq31prodKLMN = _mm_blend_epi16(vq31prodKM, vq31prodLN, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+ const __m128i vrem89AB =
+ _mm_add_epi32(_mm_and_si128(vq31prod89AB, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod89AB));
+ const __m128i vremCDEF =
+ _mm_add_epi32(_mm_and_si128(vq31prodCDEF, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodCDEF));
+ const __m128i vremGHIJ =
+ _mm_add_epi32(_mm_and_si128(vq31prodGHIJ, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodGHIJ));
+ const __m128i vremKLMN =
+ _mm_add_epi32(_mm_and_si128(vq31prodKLMN, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prodKLMN));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+ vacc89AB =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod89AB, vshift), _mm_cmpgt_epi32(vrem89AB, vremainder_threshold));
+ vaccCDEF =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodCDEF, vshift), _mm_cmpgt_epi32(vremCDEF, vremainder_threshold));
+ vaccGHIJ =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodGHIJ, vshift), _mm_cmpgt_epi32(vremGHIJ, vremainder_threshold));
+ vaccKLMN =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prodKLMN, vshift), _mm_cmpgt_epi32(vremKLMN, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+ __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
+ __m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+ vout89ABCDEF = _mm_min_epi16(_mm_max_epi16(vout89ABCDEF, voutput_min), voutput_max);
+ voutGHIJKLMN = _mm_min_epi16(_mm_max_epi16(voutGHIJKLMN, voutput_min), voutput_max);
+
+ __m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
+ __m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
+
+ _mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
+ _mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
+ output += 24;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + 24 * sizeof(int32_t));
+ do {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) k);
+ __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc0123);
+ vxi0x01234567 = _mm_unpackhi_epi64(vxi0x01234567, vxi0x01234567);
+ vxk0x01234567 = _mm_unpackhi_epi64(vxk0x01234567, vxk0x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc4567);
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) (k + 24));
+ __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc0123);
+ vxi1x01234567 = _mm_unpackhi_epi64(vxi1x01234567, vxi1x01234567);
+ vxk1x01234567 = _mm_unpackhi_epi64(vxk1x01234567, vxk1x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc4567);
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) (k + 48));
+ __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc0123);
+ vxi2x01234567 = _mm_unpackhi_epi64(vxi2x01234567, vxi2x01234567);
+ vxk2x01234567 = _mm_unpackhi_epi64(vxk2x01234567, vxk2x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc4567);
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) (k + 72));
+ __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc0123);
+ vxi3x01234567 = _mm_unpackhi_epi64(vxi3x01234567, vxi3x01234567);
+ vxk3x01234567 = _mm_unpackhi_epi64(vxk3x01234567, vxk3x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc4567);
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) (k + 96));
+ __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc0123);
+ vxi4x01234567 = _mm_unpackhi_epi64(vxi4x01234567, vxi4x01234567);
+ vxk4x01234567 = _mm_unpackhi_epi64(vxk4x01234567, vxk4x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc4567);
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) (k + 120));
+ __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc0123);
+ vxi5x01234567 = _mm_unpackhi_epi64(vxi5x01234567, vxi5x01234567);
+ vxk5x01234567 = _mm_unpackhi_epi64(vxk5x01234567, vxk5x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc4567);
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) (k + 144));
+ __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc0123);
+ vxi6x01234567 = _mm_unpackhi_epi64(vxi6x01234567, vxi6x01234567);
+ vxk6x01234567 = _mm_unpackhi_epi64(vxk6x01234567, vxk6x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc4567);
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) (k + 168));
+ __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc0123);
+ vxi7x01234567 = _mm_unpackhi_epi64(vxi7x01234567, vxi7x01234567);
+ vxk7x01234567 = _mm_unpackhi_epi64(vxk7x01234567, vxk7x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc4567);
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) (k + 192));
+ __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc0123);
+ vxi8x01234567 = _mm_unpackhi_epi64(vxi8x01234567, vxi8x01234567);
+ vxk8x01234567 = _mm_unpackhi_epi64(vxk8x01234567, vxk8x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc4567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ c = 0;
+ }
+ } while (c != 0);
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up8x9-minmax-sse2-mul16.c b/src/qs8-dwconv/gen/up8x9-minmax-sse2-mul16.c
new file mode 100644
index 0000000..1200a16
--- /dev/null
+++ b/src/qs8-dwconv/gen/up8x9-minmax-sse2-mul16.c
@@ -0,0 +1,477 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <emmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 8; c -= 8) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vnmask0123), vnmask0123);
+ const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vnmask4567), vnmask4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_sub_epi32(_mm_xor_si128(vacc0123, vnmask0123), vnmask0123);
+ const __m128i vabsacc4567 = _mm_sub_epi32(_mm_xor_si128(vacc4567, vnmask4567), vnmask4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up8x9-minmax-sse41-mul16.c b/src/qs8-dwconv/gen/up8x9-minmax-sse41-mul16.c
new file mode 100644
index 0000000..5bd8d9f
--- /dev/null
+++ b/src/qs8-dwconv/gen/up8x9-minmax-sse41-mul16.c
@@ -0,0 +1,437 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 8; c -= 8) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ const __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ const __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ const __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ const __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ const __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ const __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ const __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ const __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ const __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up8x9-minmax-ssse3-mul16.c b/src/qs8-dwconv/gen/up8x9-minmax-ssse3-mul16.c
new file mode 100644
index 0000000..0b752eb
--- /dev/null
+++ b/src/qs8-dwconv/gen/up8x9-minmax-ssse3-mul16.c
@@ -0,0 +1,477 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#include <tmmintrin.h>
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 8; c -= 8) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_abs_epi32(vacc0123);
+ const __m128i vabsacc4567 = _mm_abs_epi32(vacc4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ i0 += 8;
+
+ const __m128i vxi0x01234567 = _mm_unpacklo_epi8(vi0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi0x01234567));
+ const __m128i vxk0x01234567 = _mm_unpacklo_epi8(vk0x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk0x01234567));
+
+ const __m128i vp0x01234567lo = _mm_mullo_epi16(vxi0x01234567, vxk0x01234567);
+ const __m128i vp0x01234567hi = _mm_mulhi_epi16(vxi0x01234567, vxk0x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp0x01234567lo, vp0x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp0x01234567lo, vp0x01234567hi));
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ i1 += 8;
+
+ const __m128i vxi1x01234567 = _mm_unpacklo_epi8(vi1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi1x01234567));
+ const __m128i vxk1x01234567 = _mm_unpacklo_epi8(vk1x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk1x01234567));
+
+ const __m128i vp1x01234567lo = _mm_mullo_epi16(vxi1x01234567, vxk1x01234567);
+ const __m128i vp1x01234567hi = _mm_mulhi_epi16(vxi1x01234567, vxk1x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp1x01234567lo, vp1x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp1x01234567lo, vp1x01234567hi));
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ i2 += 8;
+
+ const __m128i vxi2x01234567 = _mm_unpacklo_epi8(vi2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi2x01234567));
+ const __m128i vxk2x01234567 = _mm_unpacklo_epi8(vk2x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk2x01234567));
+
+ const __m128i vp2x01234567lo = _mm_mullo_epi16(vxi2x01234567, vxk2x01234567);
+ const __m128i vp2x01234567hi = _mm_mulhi_epi16(vxi2x01234567, vxk2x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp2x01234567lo, vp2x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp2x01234567lo, vp2x01234567hi));
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ i3 += 8;
+
+ const __m128i vxi3x01234567 = _mm_unpacklo_epi8(vi3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi3x01234567));
+ const __m128i vxk3x01234567 = _mm_unpacklo_epi8(vk3x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk3x01234567));
+
+ const __m128i vp3x01234567lo = _mm_mullo_epi16(vxi3x01234567, vxk3x01234567);
+ const __m128i vp3x01234567hi = _mm_mulhi_epi16(vxi3x01234567, vxk3x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp3x01234567lo, vp3x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp3x01234567lo, vp3x01234567hi));
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ i4 += 8;
+
+ const __m128i vxi4x01234567 = _mm_unpacklo_epi8(vi4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi4x01234567));
+ const __m128i vxk4x01234567 = _mm_unpacklo_epi8(vk4x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk4x01234567));
+
+ const __m128i vp4x01234567lo = _mm_mullo_epi16(vxi4x01234567, vxk4x01234567);
+ const __m128i vp4x01234567hi = _mm_mulhi_epi16(vxi4x01234567, vxk4x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp4x01234567lo, vp4x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp4x01234567lo, vp4x01234567hi));
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ i5 += 8;
+
+ const __m128i vxi5x01234567 = _mm_unpacklo_epi8(vi5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi5x01234567));
+ const __m128i vxk5x01234567 = _mm_unpacklo_epi8(vk5x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk5x01234567));
+
+ const __m128i vp5x01234567lo = _mm_mullo_epi16(vxi5x01234567, vxk5x01234567);
+ const __m128i vp5x01234567hi = _mm_mulhi_epi16(vxi5x01234567, vxk5x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp5x01234567lo, vp5x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp5x01234567lo, vp5x01234567hi));
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ i6 += 8;
+
+ const __m128i vxi6x01234567 = _mm_unpacklo_epi8(vi6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi6x01234567));
+ const __m128i vxk6x01234567 = _mm_unpacklo_epi8(vk6x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk6x01234567));
+
+ const __m128i vp6x01234567lo = _mm_mullo_epi16(vxi6x01234567, vxk6x01234567);
+ const __m128i vp6x01234567hi = _mm_mulhi_epi16(vxi6x01234567, vxk6x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp6x01234567lo, vp6x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp6x01234567lo, vp6x01234567hi));
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ i7 += 8;
+
+ const __m128i vxi7x01234567 = _mm_unpacklo_epi8(vi7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi7x01234567));
+ const __m128i vxk7x01234567 = _mm_unpacklo_epi8(vk7x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk7x01234567));
+
+ const __m128i vp7x01234567lo = _mm_mullo_epi16(vxi7x01234567, vxk7x01234567);
+ const __m128i vp7x01234567hi = _mm_mulhi_epi16(vxi7x01234567, vxk7x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp7x01234567lo, vp7x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp7x01234567lo, vp7x01234567hi));
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ i8 += 8;
+
+ const __m128i vxi8x01234567 = _mm_unpacklo_epi8(vi8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vi8x01234567));
+ const __m128i vxk8x01234567 = _mm_unpacklo_epi8(vk8x01234567, _mm_cmpgt_epi8(_mm_setzero_si128(), vk8x01234567));
+
+ const __m128i vp8x01234567lo = _mm_mullo_epi16(vxi8x01234567, vxk8x01234567);
+ const __m128i vp8x01234567hi = _mm_mulhi_epi16(vxi8x01234567, vxk8x01234567);
+
+ vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vp8x01234567lo, vp8x01234567hi));
+ vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vp8x01234567lo, vp8x01234567hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vnmask0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0123);
+ const __m128i vnmask4567 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc4567);
+
+ const __m128i vabsacc0123 = _mm_abs_epi32(vacc0123);
+ const __m128i vabsacc4567 = _mm_abs_epi32(vacc4567);
+
+ const __m128i vabsacc13 = _mm_srli_epi64(vabsacc0123, 32);
+ const __m128i vabsacc57 = _mm_srli_epi64(vabsacc4567, 32);
+
+ const __m128i vabsprod02 = _mm_mul_epu32(vabsacc0123, vmultiplier);
+ const __m128i vabsprod13 = _mm_mul_epu32(vabsacc13, vmultiplier);
+ const __m128i vabsprod46 = _mm_mul_epu32(vabsacc4567, vmultiplier);
+ const __m128i vabsprod57 = _mm_mul_epu32(vabsacc57, vmultiplier);
+
+ const __m128i vnmask02 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask13 = _mm_shuffle_epi32(vnmask0123, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask46 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask57 = _mm_shuffle_epi32(vnmask4567, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod02 = _mm_sub_epi64(_mm_xor_si128(vabsprod02, vnmask02), vnmask02);
+ const __m128i vprod13 = _mm_sub_epi64(_mm_xor_si128(vabsprod13, vnmask13), vnmask13);
+ const __m128i vprod46 = _mm_sub_epi64(_mm_xor_si128(vabsprod46, vnmask46), vnmask46);
+ const __m128i vprod57 = _mm_sub_epi64(_mm_xor_si128(vabsprod57, vnmask57), vnmask57);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(_mm_add_epi64(vprod02, vrounding), 31);
+ const __m128i vq31prod13 = _mm_srli_epi64(_mm_add_epi64(vprod13, vrounding), 31);
+ const __m128i vq31prod46 = _mm_srli_epi64(_mm_add_epi64(vprod46, vrounding), 31);
+ const __m128i vq31prod57 = _mm_srli_epi64(_mm_add_epi64(vprod57, vrounding), 31);
+
+ const __m128i vq31prod0213 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod02), _mm_castsi128_ps(vq31prod13), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod4657 = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod46), _mm_castsi128_ps(vq31prod57), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod0123 = _mm_shuffle_epi32(vq31prod0213, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod4567 = _mm_shuffle_epi32(vq31prod4657, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/gen/up8x9-minmax-xop-mul16.c b/src/qs8-dwconv/gen/up8x9-minmax-xop-mul16.c
new file mode 100644
index 0000000..b7866b0
--- /dev/null
+++ b/src/qs8-dwconv/gen/up8x9-minmax-xop-mul16.c
@@ -0,0 +1,433 @@
+// Auto-generated file. Do not edit!
+// Template: src/qs8-dwconv/up-sse-mul16.c.in
+// Generator: tools/xngen
+//
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+#include <assert.h>
+
+#ifdef __GNUC__
+ #include <x86intrin.h>
+#else
+ #include <immintrin.h>
+ #include <ammintrin.h>
+#endif
+
+#include <xnnpack/dwconv.h>
+
+
+void xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ const int8_t* i0 = input[0];
+ assert(i0 != NULL);
+ if XNN_UNPREDICTABLE(i0 != zero) {
+ i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
+ }
+ const int8_t* i1 = input[1];
+ assert(i1 != NULL);
+ if XNN_UNPREDICTABLE(i1 != zero) {
+ i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
+ }
+ const int8_t* i2 = input[2];
+ assert(i2 != NULL);
+ if XNN_UNPREDICTABLE(i2 != zero) {
+ i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
+ }
+ const int8_t* i3 = input[3];
+ assert(i3 != NULL);
+ if XNN_UNPREDICTABLE(i3 != zero) {
+ i3 = (const int8_t*) ((uintptr_t) i3 + input_offset);
+ }
+ const int8_t* i4 = input[4];
+ assert(i4 != NULL);
+ if XNN_UNPREDICTABLE(i4 != zero) {
+ i4 = (const int8_t*) ((uintptr_t) i4 + input_offset);
+ }
+ const int8_t* i5 = input[5];
+ assert(i5 != NULL);
+ if XNN_UNPREDICTABLE(i5 != zero) {
+ i5 = (const int8_t*) ((uintptr_t) i5 + input_offset);
+ }
+ const int8_t* i6 = input[6];
+ assert(i6 != NULL);
+ if XNN_UNPREDICTABLE(i6 != zero) {
+ i6 = (const int8_t*) ((uintptr_t) i6 + input_offset);
+ }
+ const int8_t* i7 = input[7];
+ assert(i7 != NULL);
+ if XNN_UNPREDICTABLE(i7 != zero) {
+ i7 = (const int8_t*) ((uintptr_t) i7 + input_offset);
+ }
+ const int8_t* i8 = input[8];
+ assert(i8 != NULL);
+ if XNN_UNPREDICTABLE(i8 != zero) {
+ i8 = (const int8_t*) ((uintptr_t) i8 + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= 8; c -= 8) {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc0123);
+ vxi0x01234567 = _mm_unpackhi_epi64(vxi0x01234567, vxi0x01234567);
+ vxk0x01234567 = _mm_unpackhi_epi64(vxk0x01234567, vxk0x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc4567);
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc0123);
+ vxi1x01234567 = _mm_unpackhi_epi64(vxi1x01234567, vxi1x01234567);
+ vxk1x01234567 = _mm_unpackhi_epi64(vxk1x01234567, vxk1x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc4567);
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc0123);
+ vxi2x01234567 = _mm_unpackhi_epi64(vxi2x01234567, vxi2x01234567);
+ vxk2x01234567 = _mm_unpackhi_epi64(vxk2x01234567, vxk2x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc4567);
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc0123);
+ vxi3x01234567 = _mm_unpackhi_epi64(vxi3x01234567, vxi3x01234567);
+ vxk3x01234567 = _mm_unpackhi_epi64(vxk3x01234567, vxk3x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc4567);
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc0123);
+ vxi4x01234567 = _mm_unpackhi_epi64(vxi4x01234567, vxi4x01234567);
+ vxk4x01234567 = _mm_unpackhi_epi64(vxk4x01234567, vxk4x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc4567);
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc0123);
+ vxi5x01234567 = _mm_unpackhi_epi64(vxi5x01234567, vxi5x01234567);
+ vxk5x01234567 = _mm_unpackhi_epi64(vxk5x01234567, vxk5x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc4567);
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc0123);
+ vxi6x01234567 = _mm_unpackhi_epi64(vxi6x01234567, vxi6x01234567);
+ vxk6x01234567 = _mm_unpackhi_epi64(vxk6x01234567, vxk6x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc4567);
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc0123);
+ vxi7x01234567 = _mm_unpackhi_epi64(vxi7x01234567, vxi7x01234567);
+ vxk7x01234567 = _mm_unpackhi_epi64(vxk7x01234567, vxk7x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc4567);
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc0123);
+ vxi8x01234567 = _mm_unpackhi_epi64(vxi8x01234567, vxi8x01234567);
+ vxk8x01234567 = _mm_unpackhi_epi64(vxk8x01234567, vxk8x01234567);
+
+ vacc4567 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc4567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t) + 72 * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ _mm_storel_epi64((__m128i*) output, vout0123456701234567);
+ output += 8;
+ }
+ if XNN_UNLIKELY(c != 0) {
+ {
+ __m128i vacc0123 = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc4567 = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+
+ const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
+ __m128i vxi0x01234567 = _mm_cvtepi8_epi16(vi0x01234567);
+ const __m128i vk0x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 0 * sizeof(int8_t)));
+ __m128i vxk0x01234567 = _mm_cvtepi8_epi16(vk0x01234567);
+ i0 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc0123);
+ vxi0x01234567 = _mm_unpackhi_epi64(vxi0x01234567, vxi0x01234567);
+ vxk0x01234567 = _mm_unpackhi_epi64(vxk0x01234567, vxk0x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi0x01234567, vxk0x01234567, vacc4567);
+
+ const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
+ __m128i vxi1x01234567 = _mm_cvtepi8_epi16(vi1x01234567);
+ const __m128i vk1x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 8 * sizeof(int8_t)));
+ __m128i vxk1x01234567 = _mm_cvtepi8_epi16(vk1x01234567);
+ i1 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc0123);
+ vxi1x01234567 = _mm_unpackhi_epi64(vxi1x01234567, vxi1x01234567);
+ vxk1x01234567 = _mm_unpackhi_epi64(vxk1x01234567, vxk1x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi1x01234567, vxk1x01234567, vacc4567);
+
+ const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
+ __m128i vxi2x01234567 = _mm_cvtepi8_epi16(vi2x01234567);
+ const __m128i vk2x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 16 * sizeof(int8_t)));
+ __m128i vxk2x01234567 = _mm_cvtepi8_epi16(vk2x01234567);
+ i2 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc0123);
+ vxi2x01234567 = _mm_unpackhi_epi64(vxi2x01234567, vxi2x01234567);
+ vxk2x01234567 = _mm_unpackhi_epi64(vxk2x01234567, vxk2x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi2x01234567, vxk2x01234567, vacc4567);
+
+ const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
+ __m128i vxi3x01234567 = _mm_cvtepi8_epi16(vi3x01234567);
+ const __m128i vk3x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 24 * sizeof(int8_t)));
+ __m128i vxk3x01234567 = _mm_cvtepi8_epi16(vk3x01234567);
+ i3 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc0123);
+ vxi3x01234567 = _mm_unpackhi_epi64(vxi3x01234567, vxi3x01234567);
+ vxk3x01234567 = _mm_unpackhi_epi64(vxk3x01234567, vxk3x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi3x01234567, vxk3x01234567, vacc4567);
+
+ const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
+ __m128i vxi4x01234567 = _mm_cvtepi8_epi16(vi4x01234567);
+ const __m128i vk4x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 32 * sizeof(int8_t)));
+ __m128i vxk4x01234567 = _mm_cvtepi8_epi16(vk4x01234567);
+ i4 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc0123);
+ vxi4x01234567 = _mm_unpackhi_epi64(vxi4x01234567, vxi4x01234567);
+ vxk4x01234567 = _mm_unpackhi_epi64(vxk4x01234567, vxk4x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi4x01234567, vxk4x01234567, vacc4567);
+
+ const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
+ __m128i vxi5x01234567 = _mm_cvtepi8_epi16(vi5x01234567);
+ const __m128i vk5x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 40 * sizeof(int8_t)));
+ __m128i vxk5x01234567 = _mm_cvtepi8_epi16(vk5x01234567);
+ i5 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc0123);
+ vxi5x01234567 = _mm_unpackhi_epi64(vxi5x01234567, vxi5x01234567);
+ vxk5x01234567 = _mm_unpackhi_epi64(vxk5x01234567, vxk5x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi5x01234567, vxk5x01234567, vacc4567);
+
+ const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
+ __m128i vxi6x01234567 = _mm_cvtepi8_epi16(vi6x01234567);
+ const __m128i vk6x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 48 * sizeof(int8_t)));
+ __m128i vxk6x01234567 = _mm_cvtepi8_epi16(vk6x01234567);
+ i6 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc0123);
+ vxi6x01234567 = _mm_unpackhi_epi64(vxi6x01234567, vxi6x01234567);
+ vxk6x01234567 = _mm_unpackhi_epi64(vxk6x01234567, vxk6x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi6x01234567, vxk6x01234567, vacc4567);
+
+ const __m128i vi7x01234567 = _mm_loadl_epi64((const __m128i*) i7);
+ __m128i vxi7x01234567 = _mm_cvtepi8_epi16(vi7x01234567);
+ const __m128i vk7x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 56 * sizeof(int8_t)));
+ __m128i vxk7x01234567 = _mm_cvtepi8_epi16(vk7x01234567);
+ i7 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc0123);
+ vxi7x01234567 = _mm_unpackhi_epi64(vxi7x01234567, vxi7x01234567);
+ vxk7x01234567 = _mm_unpackhi_epi64(vxk7x01234567, vxk7x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi7x01234567, vxk7x01234567, vacc4567);
+
+ const __m128i vi8x01234567 = _mm_loadl_epi64((const __m128i*) i8);
+ __m128i vxi8x01234567 = _mm_cvtepi8_epi16(vi8x01234567);
+ const __m128i vk8x01234567 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8 * sizeof(int32_t) + 64 * sizeof(int8_t)));
+ __m128i vxk8x01234567 = _mm_cvtepi8_epi16(vk8x01234567);
+ i8 += 8;
+
+
+ vacc0123 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc0123);
+ vxi8x01234567 = _mm_unpackhi_epi64(vxi8x01234567, vxi8x01234567);
+ vxk8x01234567 = _mm_unpackhi_epi64(vxk8x01234567, vxk8x01234567);
+ vacc4567 = _mm_maccd_epi16(vxi8x01234567, vxk8x01234567, vacc4567);
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ const __m128i vacc13 = _mm_srli_epi64(vacc0123, 32);
+ const __m128i vacc57 = _mm_srli_epi64(vacc4567, 32);
+
+ const __m128i vprod02 = _mm_add_epi64(_mm_mul_epi32(vacc0123, vmultiplier), vrounding);
+ const __m128i vprod46 = _mm_add_epi64(_mm_mul_epi32(vacc4567, vmultiplier), vrounding);
+
+ const __m128i vprod13 = _mm_add_epi64(_mm_mul_epi32(vacc13, vmultiplier), vrounding);
+ const __m128i vprod57 = _mm_add_epi64(_mm_mul_epi32(vacc57, vmultiplier), vrounding);
+
+ const __m128i vq31prod02 = _mm_srli_epi64(vprod02, 31);
+ const __m128i vq31prod13 = _mm_add_epi64(vprod13, vprod13);
+ const __m128i vq31prod46 = _mm_srli_epi64(vprod46, 31);
+ const __m128i vq31prod57 = _mm_add_epi64(vprod57, vprod57);
+
+ const __m128i vq31prod0123 = _mm_blend_epi16(vq31prod02, vq31prod13, 0xCC);
+ const __m128i vq31prod4567 = _mm_blend_epi16(vq31prod46, vq31prod57, 0xCC);
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem0123 =
+ _mm_add_epi32(_mm_and_si128(vq31prod0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0123));
+ const __m128i vrem4567 =
+ _mm_add_epi32(_mm_and_si128(vq31prod4567, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod4567));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc0123 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod0123, vshift), _mm_cmpgt_epi32(vrem0123, vremainder_threshold));
+ vacc4567 =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod4567, vshift), _mm_cmpgt_epi32(vrem4567, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout01234567 = _mm_min_epi16(_mm_max_epi16(vout01234567, voutput_min), voutput_max);
+
+ __m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
+
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
+ vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout0123456701234567, 0);
+ vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ *output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
+ output += 1;
+ }
+ }
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/qs8-dwconv/up-sse-mul16.c.in b/src/qs8-dwconv/up-sse-mul16.c.in
new file mode 100644
index 0000000..9f891fb
--- /dev/null
+++ b/src/qs8-dwconv/up-sse-mul16.c.in
@@ -0,0 +1,363 @@
+// Copyright 2020 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+
+$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+$SSE_HEADER = {2: "emmintrin.h", 3: "tmmintrin.h", 4: "smmintrin.h", 5: "ammintrin.h"}[SSE]
+$assert CHANNEL_TILE % 8 == 0
+$assert CHANNEL_TILE >= 8
+$assert KERNEL_TILE >= 2
+#include <assert.h>
+
+$if SSE == 5:
+ #ifdef __GNUC__
+ #include <x86intrin.h>
+ #else
+ #include <immintrin.h>
+ #include <${SSE_HEADER}>
+ #endif
+$else:
+ #include <${SSE_HEADER}>
+
+#include <xnnpack/dwconv.h>
+
+
+$ISA = {2: "sse2", 3: "ssse3", 4: "sse41", 5: "xop"}[SSE]
+void xnn_qs8_dwconv_minmax_ukernel_up${CHANNEL_TILE}x${KERNEL_TILE}__${ISA}_mul16(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
+{
+ assert(channels != 0);
+ assert(output_width != 0);
+
+ do {
+ $for K in range(KERNEL_TILE):
+ const int8_t* i${K} = input[${K}];
+ assert(i${K} != NULL);
+ if XNN_UNPREDICTABLE(i${K} != zero) {
+ i${K} = (const int8_t*) ((uintptr_t) i${K} + input_offset);
+ }
+ input = (const int8_t**) ((uintptr_t) input + input_stride);
+
+ size_t c = channels;
+ const int8_t* w = weights;
+ for (; c >= ${CHANNEL_TILE}; c -= ${CHANNEL_TILE}) {
+ __m128i vacc${ABC[0:4]} = _mm_loadu_si128((const __m128i*) w);
+ $for C in range(4, CHANNEL_TILE, 4):
+ __m128i vacc${ABC[C:C+4]} = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + ${C} * sizeof(int32_t)));
+
+ $for K in range(KERNEL_TILE):
+
+ $for C in range(0, CHANNEL_TILE, 8):
+ $if C == 0:
+ const __m128i vi${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${K});
+ $else:
+ const __m128i vi${K}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) (i${K} + ${C}));
+ $if SSE >= 4:
+ ${"__m128i" if SSE == 5 else "const __m128i"} vxi${K}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(vi${K}x${ABC[C:C+8]});
+ const __m128i vk${K}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE + C} * sizeof(int8_t)));
+ $if SSE >= 4:
+ ${"__m128i" if SSE == 5 else "const __m128i"} vxk${K}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(vk${K}x${ABC[C:C+8]});
+ i${K} += ${CHANNEL_TILE};
+
+ $if SSE < 4:
+ $for C in range(0, CHANNEL_TILE, 8):
+ const __m128i vxi${K}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vi${K}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${K}x${ABC[C:C+8]}));
+ const __m128i vxk${K}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vk${K}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vk${K}x${ABC[C:C+8]}));
+
+ $if SSE == 5:
+ $for C in range(0, CHANNEL_TILE, 8):
+ vacc${ABC[C:C+4]} = _mm_maccd_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]}, vacc${ABC[C:C+4]});
+ vxi${K}x${ABC[C:C+8]} = _mm_unpackhi_epi64(vxi${K}x${ABC[C:C+8]}, vxi${K}x${ABC[C:C+8]});
+ vxk${K}x${ABC[C:C+8]} = _mm_unpackhi_epi64(vxk${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]});
+
+ $for C in range(0, CHANNEL_TILE, 8):
+ vacc${ABC[C+4:C+8]} = _mm_maccd_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]}, vacc${ABC[C+4:C+8]});
+ $else:
+ $for C in range(0, CHANNEL_TILE, 8):
+ const __m128i vp${K}x${ABC[C:C+8]}lo = _mm_mullo_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]});
+ const __m128i vp${K}x${ABC[C:C+8]}hi = _mm_mulhi_epi16(vxi${K}x${ABC[C:C+8]}, vxk${K}x${ABC[C:C+8]});
+
+ $for C in range(0, CHANNEL_TILE, 8):
+ vacc${ABC[C:C+4]} = _mm_add_epi32(vacc${ABC[C:C+4]}, _mm_unpacklo_epi16(vp${K}x${ABC[C:C+8]}lo, vp${K}x${ABC[C:C+8]}hi));
+ vacc${ABC[C+4:C+8]} = _mm_add_epi32(vacc${ABC[C+4:C+8]}, _mm_unpackhi_epi16(vp${K}x${ABC[C:C+8]}lo, vp${K}x${ABC[C:C+8]}hi));
+
+ w = (const void*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${KERNEL_TILE * CHANNEL_TILE} * sizeof(int8_t));
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ $if SSE >= 4:
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vacc${ABC[C+1:C+4:2]} = _mm_srli_epi64(vacc${ABC[C:C+4]}, 32);
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vprod${ABC[C:C+4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[C:C+4]}, vmultiplier), vrounding);
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vprod${ABC[C+1:C+4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[C+1:C+4:2]}, vmultiplier), vrounding);
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vq31prod${ABC[C:C+4:2]} = _mm_srli_epi64(vprod${ABC[C:C+4:2]}, 31);
+ const __m128i vq31prod${ABC[C+1:C+4:2]} = _mm_add_epi64(vprod${ABC[C+1:C+4:2]}, vprod${ABC[C+1:C+4:2]});
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vq31prod${ABC[C:C+4]} = _mm_blend_epi16(vq31prod${ABC[C:C+4:2]}, vq31prod${ABC[C+1:C+4:2]}, 0xCC);
+ $else:
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vnmask${ABC[C:C+4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[C:C+4]});
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ $if SSE >= 3:
+ const __m128i vabsacc${ABC[C:C+4]} = _mm_abs_epi32(vacc${ABC[C:C+4]});
+ $else:
+ const __m128i vabsacc${ABC[C:C+4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[C:C+4]}, vnmask${ABC[C:C+4]}), vnmask${ABC[C:C+4]});
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vabsacc${ABC[C+1:C+4:2]} = _mm_srli_epi64(vabsacc${ABC[C:C+4]}, 32);
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vabsprod${ABC[C:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C:C+4]}, vmultiplier);
+ const __m128i vabsprod${ABC[C+1:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C+1:C+4:2]}, vmultiplier);
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vnmask${ABC[C:C+4:2]} = _mm_shuffle_epi32(vnmask${ABC[C:C+4]}, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vnmask${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1));
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vprod${ABC[C:C+4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[C:C+4:2]}, vnmask${ABC[C:C+4:2]}), vnmask${ABC[C:C+4:2]});
+ const __m128i vprod${ABC[C+1:C+4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[C+1:C+4:2]}, vnmask${ABC[C+1:C+4:2]}), vnmask${ABC[C+1:C+4:2]});
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vq31prod${ABC[C:C+4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[C:C+4:2]}, vrounding), 31);
+ const __m128i vq31prod${ABC[C+1:C+4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[C+1:C+4:2]}, vrounding), 31);
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vq31prod${ABC[C:C+4:2]}${ABC[C+1:C+4:2]} = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod${ABC[C:C+4:2]}), _mm_castsi128_ps(vq31prod${ABC[C+1:C+4:2]}), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vq31prod${ABC[C:C+4]} = _mm_shuffle_epi32(vq31prod${ABC[C:C+4:2]}${ABC[C+1:C+4:2]}, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ $for C in range(0, CHANNEL_TILE, 4):
+ const __m128i vrem${ABC[C:C+4]} =
+ _mm_add_epi32(_mm_and_si128(vq31prod${ABC[C:C+4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[C:C+4]}));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ $for C in range(0, CHANNEL_TILE, 4):
+ vacc${ABC[C:C+4]} =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[C:C+4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[C:C+4]}, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ $for C in range(0, CHANNEL_TILE, 8):
+ __m128i vout${ABC[C:C+8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[C:C+4]}, vacc${ABC[C+4:C+8]}), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ $for C in range(0, CHANNEL_TILE, 8):
+ vout${ABC[C:C+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[C:C+8]}, voutput_min), voutput_max);
+
+ $for C in range(0, CHANNEL_TILE, 16):
+ $if C + 8 < CHANNEL_TILE:
+ __m128i vout${ABC[C:C+16]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C+8:C+16]});
+ $else:
+ __m128i vout${ABC[C:C+8]}${ABC[C:C+8]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C:C+8]});
+
+ $if CHANNEL_TILE > 8:
+ _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]});
+ $else:
+ _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
+ $for C in range(16, CHANNEL_TILE, 16):
+ $if C + 8 < CHANNEL_TILE:
+ _mm_storeu_si128((__m128i*) (output + ${C}), vout${ABC[C:C+16]});
+ $else:
+ _mm_storel_epi64((__m128i*) (output + ${C}), vout${ABC[C:C+8]}${ABC[C:C+8]});
+ output += ${CHANNEL_TILE};
+ }
+ if XNN_UNLIKELY(c != 0) {
+ $if CHANNEL_TILE > 8:
+ const int8_t* k = (const int8_t*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t));
+ ${"do " if CHANNEL_TILE > 8 else ""}{
+ __m128i vacc${ABC[0:4]} = _mm_loadu_si128((const __m128i*) w);
+ __m128i vacc${ABC[4:8]} = _mm_loadu_si128((const __m128i*) ((uintptr_t) w + 4 * sizeof(int32_t)));
+
+ $for K in range(KERNEL_TILE):
+
+ const __m128i vi${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${K});
+ $if SSE >= 4:
+ ${"__m128i" if SSE == 5 else "const __m128i"} vxi${K}x${ABC[0:8]} = _mm_cvtepi8_epi16(vi${K}x${ABC[0:8]});
+ $if CHANNEL_TILE > 8:
+ $if K == 0:
+ const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) k);
+ $else:
+ const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) (k + ${K * CHANNEL_TILE}));
+ $else:
+ const __m128i vk${K}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + ${CHANNEL_TILE} * sizeof(int32_t) + ${K * CHANNEL_TILE} * sizeof(int8_t)));
+ $if SSE >= 4:
+ ${"__m128i" if SSE == 5 else "const __m128i"} vxk${K}x${ABC[0:8]} = _mm_cvtepi8_epi16(vk${K}x${ABC[0:8]});
+ i${K} += 8;
+
+ $if SSE < 4:
+ const __m128i vxi${K}x${ABC[0:8]} = _mm_unpacklo_epi8(vi${K}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${K}x${ABC[0:8]}));
+ const __m128i vxk${K}x${ABC[0:8]} = _mm_unpacklo_epi8(vk${K}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vk${K}x${ABC[0:8]}));
+
+ $if SSE == 5:
+ vacc${ABC[0:4]} = _mm_maccd_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]}, vacc${ABC[0:4]});
+ vxi${K}x${ABC[0:8]} = _mm_unpackhi_epi64(vxi${K}x${ABC[0:8]}, vxi${K}x${ABC[0:8]});
+ vxk${K}x${ABC[0:8]} = _mm_unpackhi_epi64(vxk${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]});
+ vacc${ABC[4:8]} = _mm_maccd_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]}, vacc${ABC[4:8]});
+ $else:
+ const __m128i vp${K}x${ABC[0:8]}lo = _mm_mullo_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]});
+ const __m128i vp${K}x${ABC[0:8]}hi = _mm_mulhi_epi16(vxi${K}x${ABC[0:8]}, vxk${K}x${ABC[0:8]});
+
+ vacc${ABC[0:4]} = _mm_add_epi32(vacc${ABC[0:4]}, _mm_unpacklo_epi16(vp${K}x${ABC[0:8]}lo, vp${K}x${ABC[0:8]}hi));
+ vacc${ABC[4:8]} = _mm_add_epi32(vacc${ABC[4:8]}, _mm_unpackhi_epi16(vp${K}x${ABC[0:8]}lo, vp${K}x${ABC[0:8]}hi));
+
+ w = (const void*) ((uintptr_t) w + 8 * sizeof(int32_t));
+ $if CHANNEL_TILE > 8:
+ k += 8;
+
+ const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
+ const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
+
+ $if SSE >= 4:
+ const __m128i vacc${ABC[1:4:2]} = _mm_srli_epi64(vacc${ABC[0:4]}, 32);
+ const __m128i vacc${ABC[5:8:2]} = _mm_srli_epi64(vacc${ABC[4:8]}, 32);
+
+ const __m128i vprod${ABC[0:4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[0:4]}, vmultiplier), vrounding);
+ const __m128i vprod${ABC[4:8:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[4:8]}, vmultiplier), vrounding);
+
+ const __m128i vprod${ABC[1:4:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[1:4:2]}, vmultiplier), vrounding);
+ const __m128i vprod${ABC[5:8:2]} = _mm_add_epi64(_mm_mul_epi32(vacc${ABC[5:8:2]}, vmultiplier), vrounding);
+
+ const __m128i vq31prod${ABC[0:4:2]} = _mm_srli_epi64(vprod${ABC[0:4:2]}, 31);
+ const __m128i vq31prod${ABC[1:4:2]} = _mm_add_epi64(vprod${ABC[1:4:2]}, vprod${ABC[1:4:2]});
+ const __m128i vq31prod${ABC[4:8:2]} = _mm_srli_epi64(vprod${ABC[4:8:2]}, 31);
+ const __m128i vq31prod${ABC[5:8:2]} = _mm_add_epi64(vprod${ABC[5:8:2]}, vprod${ABC[5:8:2]});
+
+ const __m128i vq31prod${ABC[0:4]} = _mm_blend_epi16(vq31prod${ABC[0:4:2]}, vq31prod${ABC[1:4:2]}, 0xCC);
+ const __m128i vq31prod${ABC[4:8]} = _mm_blend_epi16(vq31prod${ABC[4:8:2]}, vq31prod${ABC[5:8:2]}, 0xCC);
+ $else:
+ const __m128i vnmask${ABC[0:4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[0:4]});
+ const __m128i vnmask${ABC[4:8]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[4:8]});
+
+ $if SSE >= 3:
+ const __m128i vabsacc${ABC[0:4]} = _mm_abs_epi32(vacc${ABC[0:4]});
+ const __m128i vabsacc${ABC[4:8]} = _mm_abs_epi32(vacc${ABC[4:8]});
+ $else:
+ const __m128i vabsacc${ABC[0:4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[0:4]}, vnmask${ABC[0:4]}), vnmask${ABC[0:4]});
+ const __m128i vabsacc${ABC[4:8]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[4:8]}, vnmask${ABC[4:8]}), vnmask${ABC[4:8]});
+
+ const __m128i vabsacc${ABC[1:4:2]} = _mm_srli_epi64(vabsacc${ABC[0:4]}, 32);
+ const __m128i vabsacc${ABC[5:8:2]} = _mm_srli_epi64(vabsacc${ABC[4:8]}, 32);
+
+ const __m128i vabsprod${ABC[0:4:2]} = _mm_mul_epu32(vabsacc${ABC[0:4]}, vmultiplier);
+ const __m128i vabsprod${ABC[1:4:2]} = _mm_mul_epu32(vabsacc${ABC[1:4:2]}, vmultiplier);
+ const __m128i vabsprod${ABC[4:8:2]} = _mm_mul_epu32(vabsacc${ABC[4:8]}, vmultiplier);
+ const __m128i vabsprod${ABC[5:8:2]} = _mm_mul_epu32(vabsacc${ABC[5:8:2]}, vmultiplier);
+
+ const __m128i vnmask${ABC[0:4:2]} = _mm_shuffle_epi32(vnmask${ABC[0:4]}, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask${ABC[1:4:2]} = _mm_shuffle_epi32(vnmask${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1));
+ const __m128i vnmask${ABC[4:8:2]} = _mm_shuffle_epi32(vnmask${ABC[4:8]}, _MM_SHUFFLE(2, 2, 0, 0));
+ const __m128i vnmask${ABC[5:8:2]} = _mm_shuffle_epi32(vnmask${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1));
+
+ const __m128i vprod${ABC[0:4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[0:4:2]}, vnmask${ABC[0:4:2]}), vnmask${ABC[0:4:2]});
+ const __m128i vprod${ABC[1:4:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[1:4:2]}, vnmask${ABC[1:4:2]}), vnmask${ABC[1:4:2]});
+ const __m128i vprod${ABC[4:8:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[4:8:2]}, vnmask${ABC[4:8:2]}), vnmask${ABC[4:8:2]});
+ const __m128i vprod${ABC[5:8:2]} = _mm_sub_epi64(_mm_xor_si128(vabsprod${ABC[5:8:2]}, vnmask${ABC[5:8:2]}), vnmask${ABC[5:8:2]});
+
+ const __m128i vq31prod${ABC[0:4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[0:4:2]}, vrounding), 31);
+ const __m128i vq31prod${ABC[1:4:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[1:4:2]}, vrounding), 31);
+ const __m128i vq31prod${ABC[4:8:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[4:8:2]}, vrounding), 31);
+ const __m128i vq31prod${ABC[5:8:2]} = _mm_srli_epi64(_mm_add_epi64(vprod${ABC[5:8:2]}, vrounding), 31);
+
+ const __m128i vq31prod${ABC[0:4:2]}${ABC[1:4:2]} = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod${ABC[0:4:2]}), _mm_castsi128_ps(vq31prod${ABC[1:4:2]}), _MM_SHUFFLE(2, 0, 2, 0)));
+ const __m128i vq31prod${ABC[4:8:2]}${ABC[5:8:2]} = _mm_castps_si128(_mm_shuffle_ps(
+ _mm_castsi128_ps(vq31prod${ABC[4:8:2]}), _mm_castsi128_ps(vq31prod${ABC[5:8:2]}), _MM_SHUFFLE(2, 0, 2, 0)));
+
+ const __m128i vq31prod${ABC[0:4]} = _mm_shuffle_epi32(vq31prod${ABC[0:4:2]}${ABC[1:4:2]}, _MM_SHUFFLE(3, 1, 2, 0));
+ const __m128i vq31prod${ABC[4:8]} = _mm_shuffle_epi32(vq31prod${ABC[4:8:2]}${ABC[5:8:2]}, _MM_SHUFFLE(3, 1, 2, 0));
+
+ const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
+ const __m128i vrem${ABC[0:4]} =
+ _mm_add_epi32(_mm_and_si128(vq31prod${ABC[0:4]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[0:4]}));
+ const __m128i vrem${ABC[4:8]} =
+ _mm_add_epi32(_mm_and_si128(vq31prod${ABC[4:8]}, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod${ABC[4:8]}));
+
+ const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
+ const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
+ vacc${ABC[0:4]} =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[0:4]}, vshift), _mm_cmpgt_epi32(vrem${ABC[0:4]}, vremainder_threshold));
+ vacc${ABC[4:8]} =
+ _mm_sub_epi32(_mm_sra_epi32(vq31prod${ABC[4:8]}, vshift), _mm_cmpgt_epi32(vrem${ABC[4:8]}, vremainder_threshold));
+
+ const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
+ __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vacc${ABC[0:4]}, vacc${ABC[4:8]}), voutput_zero_point);
+
+ const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
+ const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
+ vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max);
+
+ __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]});
+
+ $if CHANNEL_TILE > 8:
+ if XNN_LIKELY(c >= 8) {
+ _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]});
+ output += 8;
+ c -= 8;
+ } else {
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ $if SSE >= 4:
+ *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ $else:
+ *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ output += 1;
+ }
+ c = 0;
+ }
+ $else:
+ if (c & 4) {
+ *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32);
+ output += 4;
+ }
+ if (c & 2) {
+ *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16);
+ output += 2;
+ }
+ if (c & 1) {
+ $if SSE >= 4:
+ *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0);
+ $else:
+ *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]});
+ output += 1;
+ }
+ }${" while (c != 0);" if CHANNEL_TILE > 8 else ""}
+ }
+
+ output = (int8_t*) ((uintptr_t) output + output_increment);
+ } while (--output_width != 0);
+}
diff --git a/src/xnnpack/dwconv.h b/src/xnnpack/dwconv.h
index 46d8609..b513fd3 100644
--- a/src/xnnpack/dwconv.h
+++ b/src/xnnpack/dwconv.h
@@ -18,6 +18,7 @@
extern "C" {
#endif
+
#define DECLARE_F32_DWCONV_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t channels, \
@@ -217,6 +218,7 @@
DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up2x25__scalar)
DECLARE_F32_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f32_dwconv_minmax_ukernel_up2x25__scalar_acc2)
+
#define DECLARE_F16_DWCONV_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t channels, \
@@ -256,6 +258,7 @@
DECLARE_F16_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith)
DECLARE_F16_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_f16_dwconv_minmax_ukernel_up16x25__neonfp16arith_acc2)
+
#define DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t channels, \
@@ -274,6 +277,36 @@
DECLARE_QU8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qu8_dwconv_minmax_ukernel_up8x9__sse2)
+#define DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(fn_name) \
+ XNN_INTERNAL void fn_name( \
+ size_t channels, \
+ size_t output_width, \
+ const int8_t** input, \
+ const void* weights, \
+ int8_t* output, \
+ size_t input_stride, \
+ size_t output_increment, \
+ size_t input_offset, \
+ const int8_t* zero, \
+ const union xnn_qs8_gemm_params* params);
+
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16)
+
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16)
+
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16)
+
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16)
+DECLARE_QS8_DWCONV_MINMAX_UNIPASS_UKERNEL_FUNCTION(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16)
+
+
#define DECLARE_F32_DWCONV_CHW_UKERNEL_FUNCTION(fn_name) \
XNN_INTERNAL void fn_name( \
size_t input_height, \
diff --git a/src/xnnpack/pack.h b/src/xnnpack/pack.h
index 02185ad..c94b9d8 100644
--- a/src/xnnpack/pack.h
+++ b/src/xnnpack/pack.h
@@ -368,6 +368,16 @@
void* packed_w,
const struct xnn_qu8_packing_params* params);
+XNN_INTERNAL void xnn_pack_qs8_dwconv_ghw_w(
+ size_t h,
+ size_t w,
+ size_t c,
+ size_t cr,
+ const int8_t* k,
+ const int32_t* b,
+ void* packed_w,
+ const struct xnn_qs8_packing_params* params);
+
typedef void (*xnn_pack_dwconv_hwg_w_function)(
size_t h,
diff --git a/src/xnnpack/params.h b/src/xnnpack/params.h
index 5f0c806..b7b4674 100644
--- a/src/xnnpack/params.h
+++ b/src/xnnpack/params.h
@@ -951,6 +951,18 @@
const uint8_t* zero,
const union xnn_qu8_gemm_params* params);
+typedef void (*xnn_qs8_dwconv_minmax_unipass_ukernel_function)(
+ size_t channels,
+ size_t output_width,
+ const int8_t** input,
+ const void* weights,
+ int8_t* output,
+ size_t input_stride,
+ size_t output_increment,
+ size_t input_offset,
+ const int8_t* zero,
+ const union xnn_qs8_gemm_params* params);
+
typedef void (*xnn_dwconv_multipass_ukernel_function)(
size_t channels,
size_t output_width,
diff --git a/test/dwconv-microkernel-tester.h b/test/dwconv-microkernel-tester.h
index 42bff16..4524005 100644
--- a/test/dwconv-microkernel-tester.h
+++ b/test/dwconv-microkernel-tester.h
@@ -282,6 +282,119 @@
}
}
+ void Test(xnn_qs8_dwconv_minmax_unipass_ukernel_function dwconv_minmax, Variant variant = Variant::Native) const {
+ std::random_device random_device;
+ auto rng = std::mt19937(random_device());
+ auto i32rng = std::bind(std::uniform_int_distribution<int32_t>(-10000, 10000), rng);
+ auto i8rng = std::bind(
+ std::uniform_int_distribution<uint32_t>(std::numeric_limits<int8_t>::min(), std::numeric_limits<int8_t>::max()), rng);
+
+ std::vector<const int8_t*> indirection((width() - 1) * step() + kr());
+ std::vector<int8_t> input(XNN_EXTRA_BYTES / sizeof(int8_t) + indirection.size() * channels());
+ std::vector<int8_t> kernel(channels() * kr());
+ std::vector<int32_t> bias(channels());
+ std::vector<int8_t, AlignedAllocator<int8_t, 64>> packed_weights((kr() + sizeof(int32_t) / sizeof(int8_t)) * packed_channels());
+ std::vector<int8_t> zero(channels() + XNN_EXTRA_BYTES / sizeof(int8_t));
+ std::vector<int8_t> output((width() - 1) * output_stride() + channels());
+ std::vector<int32_t> accumulators(width() * channels());
+ std::vector<int8_t> output_ref(width() * channels());
+
+ for (size_t iteration = 0; iteration < iterations(); iteration++) {
+ do {
+ std::generate(input.begin(), input.end(), std::ref(i8rng));
+ } while (input.size() > 1 && *std::max_element(input.cbegin(), input.cend()) == *std::min_element(input.cbegin(), input.cend()));
+ do {
+ std::generate(kernel.begin(), kernel.end(), std::ref(i8rng));
+ } while (kernel.size() > 1 && *std::max_element(kernel.cbegin(), kernel.cend()) == *std::min_element(kernel.cbegin(), kernel.cend()));
+ std::generate(bias.begin(), bias.end(), std::ref(i32rng));
+ std::fill(zero.begin(), zero.end(), int8_t(input_zero_point() - 0x80));
+ std::fill(output.begin(), output.end(), 0xA5);
+
+ std::fill(packed_weights.begin(), packed_weights.end(), 0);
+ const xnn_qs8_packing_params packing_params = { int8_t(input_zero_point() - 0x80) };
+ xnn_pack_qs8_dwconv_ghw_w(
+ kr(), 1, channels(), cr(),
+ kernel.data(), bias.data(), packed_weights.data(), &packing_params);
+ for (size_t i = 0; i < indirection.size(); i++) {
+ indirection[i] = input.data() + i * channels() - input_offset();
+ }
+ std::shuffle(indirection.begin(), indirection.end(), rng);
+ if (zero_index() != SIZE_MAX) {
+ for (size_t i = 0; i < indirection.size(); i += kr()) {
+ indirection[i + zero_index()] = zero.data();
+ }
+ }
+
+ // Compute reference results, without renormalization.
+ for (size_t x = 0; x < width(); x++) {
+ for (size_t c = 0; c < channels(); c++) {
+ float acc = bias[c];
+ for (size_t k = 0; k < kr(); k++) {
+ if (indirection[x * step() + k] != zero.data()) {
+ acc +=
+ (int32_t(indirection[x * step() + k][c + input_offset()]) - int32_t(input_zero_point() - 0x80)) *
+ int32_t(kernel[c * kr() + k]);
+ }
+ }
+ accumulators[x * channels() + c] = acc;
+ }
+ }
+
+ // Compute renormalization parameters.
+ const int32_t accumulated_min = *std::min_element(accumulators.cbegin(), accumulators.cend());
+ const int32_t accumulated_max = *std::max_element(accumulators.cbegin(), accumulators.cend());
+ const uint32_t accumulated_range = uint32_t(accumulated_max) - uint32_t(accumulated_min);
+ const double output_scale = accumulated_range >= 256 ? double(accumulated_range) / 255.0 : 1.00001;
+ const int8_t output_zero_point = int8_t(std::max(std::min(
+ lrint(-0.5 - 0.5 * double(accumulated_min + accumulated_max) / output_scale),
+ long(std::numeric_limits<int8_t>::max())), long(std::numeric_limits<int8_t>::min())));
+
+ // Prepare parameters.
+ const float requantization_scale = 1.0f / float(output_scale);
+ union xnn_qs8_gemm_params quantization_params = { };
+ switch (variant) {
+ case Variant::Native:
+ quantization_params = xnn_init_qs8_gemm_params(
+ requantization_scale, output_zero_point, int8_t(qmin() - 0x80), int8_t(qmax() - 0x80));
+ break;
+ case Variant::Scalar:
+ quantization_params = xnn_init_scalar_qs8_gemm_params(
+ requantization_scale, output_zero_point, int8_t(qmin() - 0x80), int8_t(qmax() - 0x80));
+ break;
+ }
+ const union xnn_qs8_requantization_params scalar_requantization_params =
+ xnn_init_scalar_qs8_requantization_params(requantization_scale, output_zero_point, int8_t(qmin() - 0x80), int8_t(qmax() - 0x80));
+
+ // Renormalize reference results.
+ for (size_t x = 0; x < width(); x++) {
+ for (size_t c = 0; c < channels(); c++) {
+ output_ref[x * channels() + c] = xnn_qs8_requantize_q31(accumulators[x * channels() + c], scalar_requantization_params);
+ }
+ }
+
+ // Call optimized micro-kernel.
+ dwconv_minmax(
+ channels(), width(),
+ indirection.data(), packed_weights.data(), output.data(),
+ step() * sizeof(void*),
+ (output_stride() - channels()) * sizeof(int8_t),
+ input_offset() * sizeof(int8_t), zero.data(),
+ &quantization_params);
+
+ // Verify results.
+ for (size_t x = 0; x < width(); x++) {
+ for (size_t c = 0; c < channels(); c++) {
+ ASSERT_GE(int32_t(output[x * output_stride() + c]), int32_t(qmin()) - 0x80)
+ << "x = " << x << ", channel = " << c;
+ ASSERT_LE(int32_t(output[x * output_stride() + c]), int32_t(qmax()) - 0x80)
+ << "x = " << x << ", channel = " << c;
+ ASSERT_EQ(int32_t(output[x * output_stride() + c]), int32_t(output_ref[x * channels() + c]))
+ << "x = " << x << ", channel = " << c << ", accumulator = " << accumulators[x * channels() + c];
+ }
+ }
+ }
+ }
+
void Test(xnn_f16_dwconv_minmax_unipass_ukernel_function dwconv_minmax, Variant variant = Variant::Native) const {
std::random_device random_device;
auto rng = std::mt19937(random_device());
diff --git a/test/qs8-dwconv-minmax.cc b/test/qs8-dwconv-minmax.cc
new file mode 100644
index 0000000..4925493
--- /dev/null
+++ b/test/qs8-dwconv-minmax.cc
@@ -0,0 +1,2252 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+// All rights reserved.
+//
+// Copyright 2019 Google LLC
+//
+// This source code is licensed under the BSD-style license found in the
+// LICENSE file in the root directory of this source tree.
+//
+// Auto-generated file. Do not edit!
+// Specification: test/qs8-dwconv-minmax.yaml
+// Generator: tools/generate-dwconv-test.py
+
+
+#include <gtest/gtest.h>
+
+#include <xnnpack/common.h>
+#include <xnnpack/isa-checks.h>
+
+#include <xnnpack/dwconv.h>
+#include "dwconv-microkernel-tester.h"
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_eq_8) {
+ TEST_REQUIRES_X86_SSE2;
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_div_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_div_8_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_div_8_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_lt_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 1; channels < 8; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_gt_8) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_gt_8_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, c_gt_8_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .width(5)
+ .output_stride(43)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE2_MUL16, zero) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_eq_16) {
+ TEST_REQUIRES_X86_SSE2;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_div_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_lt_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_gt_16) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE2_MUL16, zero) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_eq_24) {
+ TEST_REQUIRES_X86_SSE2;
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_div_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_div_24_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_div_24_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_lt_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 1; channels < 24; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_gt_24) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_gt_24_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, c_gt_24_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .width(5)
+ .output_stride(127)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSE2;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE2_MUL16, zero) {
+ TEST_REQUIRES_X86_SSE2;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_eq_8) {
+ TEST_REQUIRES_X86_SSSE3;
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_div_8) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_div_8_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_div_8_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_lt_8) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 1; channels < 8; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_gt_8) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_gt_8_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, c_gt_8_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .width(5)
+ .output_stride(43)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSSE3_MUL16, zero) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_eq_16) {
+ TEST_REQUIRES_X86_SSSE3;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_div_16) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_lt_16) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_gt_16) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSSE3_MUL16, zero) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_eq_24) {
+ TEST_REQUIRES_X86_SSSE3;
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_div_24) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_div_24_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_div_24_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_lt_24) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 1; channels < 24; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_gt_24) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_gt_24_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, c_gt_24_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .width(5)
+ .output_stride(127)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSSE3_MUL16, zero) {
+ TEST_REQUIRES_X86_SSSE3;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_eq_8) {
+ TEST_REQUIRES_X86_SSE41;
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_div_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_div_8_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_div_8_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_lt_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 1; channels < 8; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_gt_8) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_gt_8_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, c_gt_8_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .width(5)
+ .output_stride(43)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__SSE41_MUL16, zero) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_eq_16) {
+ TEST_REQUIRES_X86_SSE41;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_div_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_lt_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_gt_16) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__SSE41_MUL16, zero) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_eq_24) {
+ TEST_REQUIRES_X86_SSE41;
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_div_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_div_24_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_div_24_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_lt_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 1; channels < 24; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_gt_24) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_gt_24_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, c_gt_24_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, multipixel) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .width(5)
+ .output_stride(127)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_SSE41;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, input_offset) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__SSE41_MUL16, zero) {
+ TEST_REQUIRES_X86_SSE41;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_eq_8) {
+ TEST_REQUIRES_X86_XOP;
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_div_8) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_div_8_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_div_8_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_lt_8) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 1; channels < 8; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_gt_8) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_gt_8_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, c_gt_8_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 9; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, multipixel) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(8)
+ .width(5)
+ .output_stride(43)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 40; channels += 7) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, input_offset) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP8X9__XOP_MUL16, zero) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 16; channels < 128; channels += 24) {
+ DWConvMicrokernelTester()
+ .cr(8)
+ .kr(9)
+ .channels(channels)
+ .input_offset(176)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_eq_16) {
+ TEST_REQUIRES_X86_XOP;
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_div_16) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_div_16_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_div_16_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_lt_16) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 1; channels < 16; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_gt_16) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_gt_16_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, c_gt_16_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 17; channels < 32; channels++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, multipixel) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(16)
+ .width(5)
+ .output_stride(83)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 80; channels += 15) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, input_offset) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP16X9__XOP_MUL16, zero) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 32; channels < 256; channels += 48) {
+ DWConvMicrokernelTester()
+ .cr(16)
+ .kr(9)
+ .channels(channels)
+ .input_offset(304)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
+
+
+#if XNN_ARCH_X86 || XNN_ARCH_X86_64
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_eq_24) {
+ TEST_REQUIRES_X86_XOP;
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_div_24) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_div_24_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_div_24_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_lt_24) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 1; channels < 24; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_gt_24) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_gt_24_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, c_gt_24_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 25; channels < 48; channels++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, multipixel) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, multipixel_with_step) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ for (size_t step = 2; step <= 9; step++) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .step(step)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, multipixel_with_output_stride) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(24)
+ .width(5)
+ .output_stride(127)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, multipixel_with_qmin) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmin(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, multipixel_with_qmax) {
+ TEST_REQUIRES_X86_XOP;
+ for (size_t channels = 1; channels <= 120; channels += 23) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .width(3)
+ .qmax(128)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, input_offset) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+
+ TEST(QS8_DWCONV_MINMAX_UP24X9__XOP_MUL16, zero) {
+ TEST_REQUIRES_X86_XOP;
+ for (uint32_t mz = 0; mz < 9; mz++) {
+ for (uint32_t channels = 48; channels < 384; channels += 72) {
+ DWConvMicrokernelTester()
+ .cr(24)
+ .kr(9)
+ .channels(channels)
+ .input_offset(464)
+ .zero_index(mz)
+ .Test(xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16);
+ }
+ }
+ }
+#endif // XNN_ARCH_X86 || XNN_ARCH_X86_64
diff --git a/test/qs8-dwconv-minmax.yaml b/test/qs8-dwconv-minmax.yaml
new file mode 100644
index 0000000..773d674
--- /dev/null
+++ b/test/qs8-dwconv-minmax.yaml
@@ -0,0 +1,16 @@
+# Copyright 2020 Google LLC
+#
+# This source code is licensed under the BSD-style license found in the
+# LICENSE file in the root directory of this source tree.
+- name: xnn_qs8_dwconv_minmax_ukernel_up8x9__sse2_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up16x9__sse2_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up24x9__sse2_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up8x9__ssse3_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up16x9__ssse3_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up24x9__ssse3_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up8x9__sse41_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up16x9__sse41_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up24x9__sse41_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up8x9__xop_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up16x9__xop_mul16
+- name: xnn_qs8_dwconv_minmax_ukernel_up24x9__xop_mul16