| // Copyright 2020 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert ROW_TILE >= 1 |
| $assert ACCUMULATORS >= 1 |
| #include <assert.h> |
| |
| #include <wasm_simd128.h> |
| |
| #include <xnnpack/dwconv.h> |
| #include <xnnpack/math.h> |
| |
| |
| $ARCH_SUFFIX = "_x86" if X86 else "_arm" |
| |
| void xnn_f32_dwconv2d_chw_ukernel_3x3p1__wasmsimd${ARCH_SUFFIX}_splat_${ROW_TILE}x4${"_acc%d" % ACCUMULATORS if ACCUMULATORS > 1 else ""}( |
| size_t input_height, |
| size_t input_width, |
| const float* input, |
| const float* weights, |
| const float* zero, |
| float* output, |
| uint32_t padding_top, |
| const union xnn_f32_chw_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS |
| { |
| assert(input_height != 0); |
| assert(input_width != 0); |
| assert(input_width % sizeof(float) == 0); |
| assert(padding_top == 1); |
| |
| const v128_t vmask = wasm_v128_load(params->scalar.mask); |
| const v128_t vmax = wasm_v128_load32_splat(¶ms->scalar.max); |
| const v128_t vmin = wasm_v128_load32_splat(¶ms->scalar.min); |
| |
| const v128_t vw0123 = wasm_v128_load(weights); |
| const v128_t vw4567 = wasm_v128_load(weights + 4); |
| const v128_t vw89 = wasm_v128_load64_splat(weights + 8); |
| |
| const size_t input_decrement = round_up_po2(input_width, 4 * sizeof(float)); |
| |
| const float* i0 = zero; |
| const float* i1 = input; |
| $for M in range(2, 2 + ROW_TILE): |
| const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_width); |
| |
| float* o0 = output; |
| $for M in range(1, ROW_TILE): |
| float* o${M} = (float*) ((uintptr_t) o${M-1} + input_width); |
| |
| size_t output_height = input_height; |
| do { |
| $for M in range(2, 2 + ROW_TILE): |
| if XNN_UNPREDICTABLE(output_height < ${M}) { |
| i${M} = zero; |
| $if M <= ROW_TILE: |
| o${M-1} = o${M-2}; |
| } |
| |
| $for M in range(2 + ROW_TILE): |
| v128_t vi${M}x0123 = wasm_f32x4_const_splat(0.0f); |
| |
| $for M in range(2 + ROW_TILE): |
| v128_t vi${M}x4567 = wasm_v128_load(i${M}); i${M} += 4; |
| |
| size_t w = input_width; |
| for (; w > 4 * sizeof(float); w -= 4 * sizeof(float)) { |
| $for M in range(ROW_TILE): |
| v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0); |
| |
| $for M in range(2 + ROW_TILE): |
| const v128_t vi${M}x89AB = wasm_v128_load(i${M}); i${M} += 4; |
| |
| $for M in range(ROW_TILE): |
| vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2))); |
| |
| $for M in range(ROW_TILE): |
| $if ACCUMULATORS >= 2: |
| v128_t vo${M}p1 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)); |
| $else: |
| vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1))); |
| |
| $for M in range(ROW_TILE): |
| $if ACCUMULATORS >= 3: |
| v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)); |
| $else: |
| vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0))); |
| |
| $for M in range(2 + ROW_TILE): |
| const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6); |
| |
| $for M in range(ROW_TILE): |
| $if ACCUMULATORS >= 4: |
| v128_t vo${M}p3 = wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)); |
| $else: |
| vo${M}p${3 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${3 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3))); |
| |
| $for M in range(2 + ROW_TILE): |
| vi${M}x0123 = vi${M}x4567; |
| |
| $for M in range(2 + ROW_TILE): |
| const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vi${M}x89AB, 1, 2, 3, 4); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1))); |
| |
| $for M in range(2 + ROW_TILE): |
| vi${M}x4567 = vi${M}x89AB; |
| |
| $if ACCUMULATORS > 1: |
| $ACC_SLICE = 1 |
| $while ACC_SLICE < ACCUMULATORS: |
| $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): |
| $if A + ACC_SLICE < ACCUMULATORS: |
| $for M in range(ROW_TILE): |
| vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE}); |
| $ACC_SLICE *= 2 |
| |
| $if X86: |
| $for M in range(ROW_TILE): |
| v128_t vo${M} = wasm_f32x4_pmax(vmin, vo${M}p0); |
| $for M in range(ROW_TILE): |
| vo${M} = wasm_f32x4_pmin(vmax, vo${M}); |
| $else: |
| $for M in range(ROW_TILE): |
| v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin); |
| $for M in range(ROW_TILE): |
| vo${M} = wasm_f32x4_min(vo${M}, vmax); |
| |
| $for M in reversed(range(ROW_TILE)): |
| wasm_v128_store(o${M}, vo${M}); o${M} += 4; |
| } |
| // Always process the last block of 1..4 pixels. |
| assert(w >= 1 * sizeof(float)); |
| assert(w <= 4 * sizeof(float)); |
| { |
| $for M in range(ROW_TILE): |
| v128_t vo${M}p0 = wasm_v32x4_shuffle(vw0123, vw0123, 0, 0, 0, 0); |
| |
| $for M in range(2 + ROW_TILE): |
| vi${M}x4567 = wasm_v128_and(vmask, vi${M}x4567); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M}x4567, wasm_v32x4_shuffle(vw0123, vw0123, 2, 2, 2, 2))); |
| |
| $for M in range(ROW_TILE): |
| $if ACCUMULATORS >= 2: |
| v128_t vo${M}p1 = wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1)); |
| $else: |
| vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+1}x4567, wasm_v32x4_shuffle(vw4567, vw4567, 1, 1, 1, 1))); |
| |
| $for M in range(ROW_TILE): |
| $if ACCUMULATORS >= 3: |
| v128_t vo${M}p2 = wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0)); |
| $else: |
| vo${M}p0 = wasm_f32x4_add(vo${M}p0, wasm_f32x4_mul(vi${M+2}x4567, wasm_v32x4_shuffle(vw89, vw89, 0, 0, 0, 0))); |
| |
| $for M in range(2 + ROW_TILE): |
| const v128_t vi${M}x3456 = wasm_v32x4_shuffle(vi${M}x0123, vi${M}x4567, 3, 4, 5, 6); |
| |
| $for M in range(ROW_TILE): |
| $if ACCUMULATORS >= 4: |
| v128_t vo${M}p3 = wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1)); |
| $else: |
| vo${M}p${3 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${3 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x3456, wasm_v32x4_shuffle(vw0123, vw0123, 1, 1, 1, 1))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${4 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${4 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 0, 0, 0, 0))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${5 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${5 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x3456, wasm_v32x4_shuffle(vw4567, vw4567, 3, 3, 3, 3))); |
| |
| const v128_t vzero = wasm_f32x4_const_splat(0.0f); |
| $for M in range(2 + ROW_TILE): |
| const v128_t vi${M}x5678 = wasm_v32x4_shuffle(vi${M}x4567, vzero, 1, 2, 3, 4); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${6 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${6 % ACCUMULATORS}, wasm_f32x4_mul(vi${M}x5678, wasm_v32x4_shuffle(vw0123, vw0123, 3, 3, 3, 3))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${7 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${7 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+1}x5678, wasm_v32x4_shuffle(vw4567, vw4567, 2, 2, 2, 2))); |
| |
| $for M in range(ROW_TILE): |
| vo${M}p${8 % ACCUMULATORS} = wasm_f32x4_add(vo${M}p${8 % ACCUMULATORS}, wasm_f32x4_mul(vi${M+2}x5678, wasm_v32x4_shuffle(vw89, vw89, 1, 1, 1, 1))); |
| |
| $if ACCUMULATORS > 1: |
| $ACC_SLICE = 1 |
| $while ACC_SLICE < ACCUMULATORS: |
| $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): |
| $if A + ACC_SLICE < ACCUMULATORS: |
| $for M in range(ROW_TILE): |
| vo${M}p${A} = wasm_f32x4_add(vo${M}p${A}, vo${M}p${A + ACC_SLICE}); |
| $ACC_SLICE *= 2 |
| |
| $if X86: |
| $for M in range(ROW_TILE): |
| v128_t vo${M} = wasm_f32x4_pmax(vmin, vo${M}p0); |
| $for M in range(ROW_TILE): |
| vo${M} = wasm_f32x4_pmin(vmax, vo${M}); |
| $else: |
| $for M in range(ROW_TILE): |
| v128_t vo${M} = wasm_f32x4_max(vo${M}p0, vmin); |
| $for M in range(ROW_TILE): |
| vo${M} = wasm_f32x4_min(vo${M}, vmax); |
| |
| if XNN_LIKELY(w == 4 * sizeof(float)) { |
| $for M in reversed(range(ROW_TILE)): |
| wasm_v128_store(o${M}, vo${M}); o${M} += 4; |
| } else { |
| if (w & (2 * sizeof(float))) { |
| $for M in reversed(range(ROW_TILE)): |
| *((double*) o${M}) = wasm_f64x2_extract_lane(vo${M}, 0); o${M} += 2; |
| |
| $for M in range(ROW_TILE): |
| vo${M} = wasm_v32x4_shuffle(vo${M}, vo${M}, 2, 3, 0, 1); |
| } |
| if (w & (1 * sizeof(float))) { |
| $for M in reversed(range(ROW_TILE)): |
| *o${M} = wasm_f32x4_extract_lane(vo${M}, 0); o${M} += 1; |
| } |
| } |
| } |
| |
| i0 = (const float*) ((uintptr_t) i${ROW_TILE} - input_decrement); |
| i1 = (const float*) ((uintptr_t) i${ROW_TILE+1} - input_decrement); |
| $for M in range(2, 2 + ROW_TILE): |
| i${M} = (const float*) ((uintptr_t) i${M-1} + input_width); |
| |
| $if ROW_TILE > 1: |
| o0 = o${ROW_TILE - 1}; |
| $for M in range(1, ROW_TILE): |
| o${M} = (float*) ((uintptr_t) o${M-1} + input_width); |
| |
| $if ROW_TILE > 1: |
| output_height = doz(output_height, ${ROW_TILE}); |
| } while (${"--" if ROW_TILE == 1 else ""}output_height != 0); |
| } |