blob: ecd521a26fced9532653bcc918502d77350f9c89 [file] [log] [blame] [edit]
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
$assert CHANNEL_TILE % 4 == 0
$assert CHANNEL_TILE >= 4
$assert ROW_TILE >= 1
$assert ARCH in ["ARM", "X86", "RELAXED"]
$assert not FMA or ARCH == "RELAXED"
$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/vmulcaddc.h>
$WASM_F32X4_MIN={"ARM": "wasm_f32x4_min", "X86": "wasm_f32x4_pmin", "RELAXED": "__builtin_wasm_relaxed_min_f32x4"}[ARCH]
$WASM_F32X4_MAX={"ARM": "wasm_f32x4_max", "X86": "wasm_f32x4_pmax", "RELAXED": "__builtin_wasm_relaxed_max_f32x4"}[ARCH]
$ISA = "wasmsimd" if ARCH != "RELAXED" else "wasmrelaxedsimd"
$ARCH_SUFFIX = "" if ARCH == "RELAXED" and not FMA else "_" + ("fma" if FMA else ARCH.lower())
void xnn_f32_vmulcaddc_minmax_ukernel_c${CHANNEL_TILE}__${ISA}${ARCH_SUFFIX}_${ROW_TILE}x(
size_t rows,
size_t channels,
const float*restrict input,
size_t input_stride,
const float*restrict weights,
float*restrict output,
size_t output_stride,
const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(channels % sizeof(float) == 0);
const float* i0 = input;
float* o0 = output;
$for M in range(1, ROW_TILE):
const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride);
float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride);
const size_t input_increment = input_stride * ${ROW_TILE} - channels;
const size_t output_increment = output_stride * ${ROW_TILE} - channels;
const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
do {
$for M in range(1, ROW_TILE):
$if M % 2 == 0:
if XNN_UNPREDICTABLE(rows <= ${M}) {
i${M} = i${M-1};
o${M} = o${M-1};
}
$else:
if XNN_UNPREDICTABLE(rows < ${M+1}) {
i${M} = i${M-1};
o${M} = o${M-1};
}
const float* w = weights;
size_t c = channels;
for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) {
const v128_t vscale${ABC[0:4]} = wasm_v128_load(w);
$for C in range(4, CHANNEL_TILE, 4):
const v128_t vscale${ABC[C:C+4]} = wasm_v128_load(w + ${C});
$for M in range(ROW_TILE):
v128_t vacc${M}x${ABC[0:4]} = wasm_v128_load(i${M});
$for C in range(4, CHANNEL_TILE, 4):
v128_t vacc${M}x${ABC[C:C+4]} = wasm_v128_load(i${M} + ${C});
i${M} += ${CHANNEL_TILE};
$for C in range(0, CHANNEL_TILE, 4):
const v128_t vbias${ABC[C:C+4]} = wasm_v128_load(w + ${C + CHANNEL_TILE});
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
$if FMA:
vacc${M}x${ABC[C:C+4]} = __builtin_wasm_fma_f32x4(vbias${ABC[C:C+4]}, vscale${ABC[C:C+4]}, vacc${M}x${ABC[C:C+4]});
$else:
vacc${M}x${ABC[C:C+4]} = wasm_f32x4_add(vbias${ABC[C:C+4]}, wasm_f32x4_mul(vscale${ABC[C:C+4]}, vacc${M}x${ABC[C:C+4]}));
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
vacc${M}x${ABC[C:C+4]} = ${WASM_F32X4_MAX}(vmin, vacc${M}x${ABC[C:C+4]});
$for M in range(ROW_TILE):
$for C in range(0, CHANNEL_TILE, 4):
vacc${M}x${ABC[C:C+4]} = ${WASM_F32X4_MIN}(vmax, vacc${M}x${ABC[C:C+4]});
$for M in range(ROW_TILE):
wasm_v128_store(o${M}, vacc${M}x${ABC[0:4]});
$for C in range(4, CHANNEL_TILE, 4):
wasm_v128_store(o${M} + ${C}, vacc${M}x${ABC[C:C+4]});
o${M} += ${CHANNEL_TILE};
w += ${CHANNEL_TILE * 2};
}
$if CHANNEL_TILE > 4:
for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
const v128_t vscale = wasm_v128_load(w);
$for M in range(ROW_TILE):
v128_t vacc${M} = wasm_v128_load(i${M});
i${M} += 4;
const v128_t vbias = wasm_v128_load(w + ${CHANNEL_TILE});
$for M in range(ROW_TILE):
$if FMA:
vacc${M} = __builtin_wasm_fma_f32x4(vbias, vscale, vacc${M});
$else:
vacc${M} = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc${M}));
$for M in range(ROW_TILE):
vacc${M} = ${WASM_F32X4_MAX}(vmin, vacc${M});
$for M in range(ROW_TILE):
vacc${M} = ${WASM_F32X4_MIN}(vmax, vacc${M});
$for M in range(ROW_TILE):
wasm_v128_store(o${M}, vacc${M});
o${M} += 4;
w += 4;
}
if XNN_UNLIKELY(c != 0) {
const v128_t vscale = wasm_v128_load(w);
$for M in range(ROW_TILE):
v128_t vacc${M} = wasm_v128_load(i${M});
i${M} = (const float*) ((uintptr_t) i${M} + c);
const v128_t vbias = wasm_v128_load(w + ${CHANNEL_TILE});
$for M in range(ROW_TILE):
$if FMA:
vacc${M} = __builtin_wasm_fma_f32x4(vbias, vscale, vacc${M});
$else:
vacc${M} = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc${M}));
$for M in range(ROW_TILE):
vacc${M} = ${WASM_F32X4_MAX}(vmin, vacc${M});
$for M in range(ROW_TILE):
vacc${M} = ${WASM_F32X4_MIN}(vmax, vacc${M});
if (c & (2 * sizeof(float))) {
$for M in range(ROW_TILE):
*((double*) o${M}) = wasm_f64x2_extract_lane(vacc${M}, 0);
$for M in range(ROW_TILE):
vacc${M} = wasm_v32x4_shuffle(vacc${M}, vacc${M}, 2, 3, 2, 3);
$for M in range(ROW_TILE):
o${M} += 2;
}
if (c & (1 * sizeof(float))) {
$for M in range(ROW_TILE):
*o${M}++ = wasm_f32x4_extract_lane(vacc${M}, 0);
}
}
$for M in range(ROW_TILE):
i${M} = (const float*) ((uintptr_t) i${M} + input_increment);
o${M} = (float*) ((uintptr_t) o${M} + output_increment);
rows = doz(rows, ${ROW_TILE});
} while (rows != 0);
}