| // Copyright 2021 Google LLC |
| // |
| // This source code is licensed under the BSD-style license found in the |
| // LICENSE file in the root directory of this source tree. |
| |
| $assert DATATYPE in ["S8", "U8"] |
| $assert CHANNEL_TILE % 8 == 0 |
| $assert CHANNEL_TILE >= 8 |
| $assert PIXEL_TILE == 1 |
| $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" |
| #include <assert.h> |
| |
| #include <arm_neon.h> |
| |
| #include <xnnpack/common.h> |
| #include <xnnpack/ibilinear.h> |
| |
| |
| $XINT8_T = {"S8": "int8_t", "U8": "uint8_t"}[DATATYPE] |
| $XINT8X8_T = {"S8": "int8x8_t", "U8": "uint8x8_t"}[DATATYPE] |
| $VLD1_X8 = {"S8": "vld1_s8", "U8": "vld1_u8"}[DATATYPE] |
| $VST1_X8 = {"S8": "vst1_s8", "U8": "vst1_u8"}[DATATYPE] |
| $VST1_LANE_X8 = {"S8": "vst1_lane_s8", "U8": "vst1_lane_u8"}[DATATYPE] |
| $VEXT_X8 = {"S8": "vext_s8", "U8": "vext_u8"}[DATATYPE] |
| $VREINTERPRET_U32_X8 = {"S8": "vreinterpret_u32_s8", "U8": "vreinterpret_u32_u8"}[DATATYPE] |
| $VREINTERPRET_U16_X8 = {"S8": "vreinterpret_u16_s8", "U8": "vreinterpret_u16_u8"}[DATATYPE] |
| void xnn_${DATATYPE.lower()}_ibilinear_ukernel__neon_c${CHANNEL_TILE}${"" if PIXEL_TILE == 1 else "x%d" % PIXEL_TILE}( |
| size_t output_pixels, |
| size_t channels, |
| const ${XINT8_T}**restrict input, |
| size_t input_offset, |
| const int16_t*restrict weights, |
| ${XINT8_T}*restrict output, |
| size_t output_increment) XNN_OOB_READS |
| { |
| assert(output_pixels != 0); |
| assert(channels != 0); |
| |
| do { |
| const ${XINT8_T}* i0 = (const ${XINT8_T}*) ((uintptr_t) input[0] + input_offset); |
| const ${XINT8_T}* i1 = (const ${XINT8_T}*) ((uintptr_t) input[1] + input_offset); |
| const ${XINT8_T}* i2 = (const ${XINT8_T}*) ((uintptr_t) input[2] + input_offset); |
| const ${XINT8_T}* i3 = (const ${XINT8_T}*) ((uintptr_t) input[3] + input_offset); |
| input += 4; |
| |
| #if XNN_ARCH_ARM64 |
| const int16x8_t valphah = vld1q_dup_s16(weights); weights += 1; |
| #else |
| const int16x4_t valphah = vld1_dup_s16(weights); weights += 1; |
| #endif |
| const int32x4_t valphav = vmovl_s16(vld1_dup_s16(weights)); weights += 1; |
| |
| size_t c = channels; |
| $if CHANNEL_TILE > 8: |
| for (; c >= ${CHANNEL_TILE} * sizeof(${XINT8_T}); c -= ${CHANNEL_TILE} * sizeof(${XINT8_T})) { |
| $for C in range(0, CHANNEL_TILE, 8): |
| const ${XINT8X8_T} vtl${ABC[C:C+8]} = ${VLD1_X8}(i0); i0 += 8; |
| const ${XINT8X8_T} vtr${ABC[C:C+8]} = ${VLD1_X8}(i1); i1 += 8; |
| const ${XINT8X8_T} vbl${ABC[C:C+8]} = ${VLD1_X8}(i2); i2 += 8; |
| const ${XINT8X8_T} vbr${ABC[C:C+8]} = ${VLD1_X8}(i3); i3 += 8; |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| $if DATATYPE == "S8": |
| const int16x8_t vtd${ABC[C:C+8]} = vsubl_s8(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]}); |
| const int16x8_t vbd${ABC[C:C+8]} = vsubl_s8(vbr${ABC[C:C+8]}, vbl${ABC[C:C+8]}); |
| const int16x8_t vdl${ABC[C:C+8]} = vsubl_s8(vbl${ABC[C:C+8]}, vtl${ABC[C:C+8]}); |
| const int16x8_t vxtl${ABC[C:C+8]} = vmovl_s8(vtl${ABC[C:C+8]}); |
| $else: |
| const int16x8_t vtd${ABC[C:C+8]} = vreinterpretq_s16_u16(vsubl_u8(vtr${ABC[C:C+8]}, vtl${ABC[C:C+8]})); |
| const int16x8_t vbd${ABC[C:C+8]} = vreinterpretq_s16_u16(vsubl_u8(vbr${ABC[C:C+8]}, vbl${ABC[C:C+8]})); |
| const int16x8_t vdl${ABC[C:C+8]} = vreinterpretq_s16_u16(vsubl_u8(vbl${ABC[C:C+8]}, vtl${ABC[C:C+8]})); |
| const int16x8_t vxtl${ABC[C:C+8]} = vreinterpretq_s16_u16(vmovl_u8(vtl${ABC[C:C+8]})); |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int16x8_t vdd${ABC[C:C+8]} = vsubq_s16(vbd${ABC[C:C+8]}, vtd${ABC[C:C+8]}); |
| |
| #if XNN_ARCH_ARM64 |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int32x4_t vt${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl${ABC[C:C+8]}), 11), vget_low_s16(vtd${ABC[C:C+8]}), vget_low_s16(valphah)); |
| const int32x4_t vt${ABC[C+4:C+8]} = vmlal_high_s16(vshll_n_s16(vget_high_s16(vxtl${ABC[C:C+8]}), 11), vtd${ABC[C:C+8]}, valphah); |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int32x4_t vd${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vdl${ABC[C:C+8]}), 11), vget_low_s16(vdd${ABC[C:C+8]}), vget_low_s16(valphah)); |
| const int32x4_t vd${ABC[C+4:C+8]} = vmlal_high_s16(vshll_n_s16(vget_high_s16(vdl${ABC[C:C+8]}), 11), vdd${ABC[C:C+8]}, valphah); |
| #else // !XNN_ARCH_ARM64 |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int32x4_t vt${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl${ABC[C:C+8]}), 11), vget_low_s16(vtd${ABC[C:C+8]}), valphah); |
| const int32x4_t vt${ABC[C+4:C+8]} = vmlal_s16(vshll_n_s16(vget_high_s16(vxtl${ABC[C:C+8]}), 11), vget_high_s16(vtd${ABC[C:C+8]}), valphah); |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int32x4_t vd${ABC[C:C+4]} = vmlal_s16(vshll_n_s16(vget_low_s16(vdl${ABC[C:C+8]}), 11), vget_low_s16(vdd${ABC[C:C+8]}), valphah); |
| const int32x4_t vd${ABC[C+4:C+8]} = vmlal_s16(vshll_n_s16(vget_high_s16(vdl${ABC[C:C+8]}), 11), vget_high_s16(vdd${ABC[C:C+8]}), valphah); |
| #endif // !XNN_ARCH_ARM64 |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int32x4_t vacc${ABC[C:C+4]} = vmlaq_s32(vshlq_n_s32(vt${ABC[C:C+4]}, 11), vd${ABC[C:C+4]}, valphav); |
| const int32x4_t vacc${ABC[C+4:C+8]} = vmlaq_s32(vshlq_n_s32(vt${ABC[C+4:C+8]}, 11), vd${ABC[C+4:C+8]}, valphav); |
| |
| #if XNN_ARCH_ARM64 |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int16x8_t vacc${ABC[C:C+8]} = vuzp2q_s16(vreinterpretq_s16_s32(vacc${ABC[C:C+4]}), vreinterpretq_s16_s32(vacc${ABC[C+4:C+8]})); |
| #else // !XNN_ARCH_ARM64 |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int16x8_t vacc${ABC[C:C+8]} = vcombine_s16(vshrn_n_s32(vacc${ABC[C:C+4]}, 16), vshrn_n_s32(vacc${ABC[C+4:C+8]}, 16)); |
| #endif // !XNN_ARCH_ARM64 |
| |
| $if DATATYPE == "S8": |
| $for C in range(0, CHANNEL_TILE, 8): |
| const int8x8_t vo${ABC[C:C+8]} = vrshrn_n_s16(vacc${ABC[C:C+8]}, 6); |
| $else: |
| $for C in range(0, CHANNEL_TILE, 8): |
| const uint8x8_t vo${ABC[C:C+8]} = vrshrn_n_u16(vreinterpretq_u16_s16(vacc${ABC[C:C+8]}), 6); |
| |
| $for C in range(0, CHANNEL_TILE, 8): |
| ${VST1_X8}(output, vo${ABC[C:C+8]}); output += 8; |
| } |
| for (; c >= 8 * sizeof(${XINT8_T}); c -= 8 * sizeof(${XINT8_T})) { |
| const ${XINT8X8_T} vtl01234567 = ${VLD1_X8}(i0); i0 += 8; |
| const ${XINT8X8_T} vtr01234567 = ${VLD1_X8}(i1); i1 += 8; |
| const ${XINT8X8_T} vbl01234567 = ${VLD1_X8}(i2); i2 += 8; |
| const ${XINT8X8_T} vbr01234567 = ${VLD1_X8}(i3); i3 += 8; |
| |
| $if DATATYPE == "S8": |
| const int16x8_t vtd01234567 = vsubl_s8(vtr01234567, vtl01234567); |
| const int16x8_t vbd01234567 = vsubl_s8(vbr01234567, vbl01234567); |
| const int16x8_t vdl01234567 = vsubl_s8(vbl01234567, vtl01234567); |
| const int16x8_t vxtl01234567 = vmovl_s8(vtl01234567); |
| $else: |
| const int16x8_t vtd01234567 = vreinterpretq_s16_u16(vsubl_u8(vtr01234567, vtl01234567)); |
| const int16x8_t vbd01234567 = vreinterpretq_s16_u16(vsubl_u8(vbr01234567, vbl01234567)); |
| const int16x8_t vdl01234567 = vreinterpretq_s16_u16(vsubl_u8(vbl01234567, vtl01234567)); |
| const int16x8_t vxtl01234567 = vreinterpretq_s16_u16(vmovl_u8(vtl01234567)); |
| |
| const int16x8_t vdd01234567 = vsubq_s16(vbd01234567, vtd01234567); |
| |
| #if XNN_ARCH_ARM64 |
| const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), vget_low_s16(valphah)); |
| const int32x4_t vt4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vtd01234567, valphah); |
| |
| const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), vget_low_s16(valphah)); |
| const int32x4_t vd4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vdd01234567, valphah); |
| #else // !XNN_ARCH_ARM64 |
| const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), valphah); |
| const int32x4_t vt4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vget_high_s16(vtd01234567), valphah); |
| |
| const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), valphah); |
| const int32x4_t vd4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vget_high_s16(vdd01234567), valphah); |
| #endif // !XNN_ARCH_ARM64 |
| |
| const int32x4_t vacc0123 = vmlaq_s32(vshlq_n_s32(vt0123, 11), vd0123, valphav); |
| const int32x4_t vacc4567 = vmlaq_s32(vshlq_n_s32(vt4567, 11), vd4567, valphav); |
| |
| #if XNN_ARCH_ARM64 |
| const int16x8_t vacc01234567 = vuzp2q_s16(vreinterpretq_s16_s32(vacc0123), vreinterpretq_s16_s32(vacc4567)); |
| #else // !XNN_ARCH_ARM64 |
| const int16x8_t vacc01234567 = vcombine_s16(vshrn_n_s32(vacc0123, 16), vshrn_n_s32(vacc4567, 16)); |
| #endif // !XNN_ARCH_ARM64 |
| |
| $if DATATYPE == "S8": |
| const int8x8_t vo01234567 = vrshrn_n_s16(vacc01234567, 6); |
| $else: |
| const uint8x8_t vo01234567 = vrshrn_n_u16(vreinterpretq_u16_s16(vacc01234567), 6); |
| |
| ${VST1_X8}(output, vo01234567); output += 8; |
| } |
| if XNN_UNLIKELY(c != 0) { |
| const ${XINT8X8_T} vtl01234567 = ${VLD1_X8}(i0); |
| const ${XINT8X8_T} vtr01234567 = ${VLD1_X8}(i1); |
| const ${XINT8X8_T} vbl01234567 = ${VLD1_X8}(i2); |
| const ${XINT8X8_T} vbr01234567 = ${VLD1_X8}(i3); |
| |
| $if DATATYPE == "S8": |
| const int16x8_t vtd01234567 = vsubl_s8(vtr01234567, vtl01234567); |
| const int16x8_t vbd01234567 = vsubl_s8(vbr01234567, vbl01234567); |
| const int16x8_t vdl01234567 = vsubl_s8(vbl01234567, vtl01234567); |
| const int16x8_t vxtl01234567 = vmovl_s8(vtl01234567); |
| $else: |
| const int16x8_t vtd01234567 = vreinterpretq_s16_u16(vsubl_u8(vtr01234567, vtl01234567)); |
| const int16x8_t vbd01234567 = vreinterpretq_s16_u16(vsubl_u8(vbr01234567, vbl01234567)); |
| const int16x8_t vdl01234567 = vreinterpretq_s16_u16(vsubl_u8(vbl01234567, vtl01234567)); |
| const int16x8_t vxtl01234567 = vreinterpretq_s16_u16(vmovl_u8(vtl01234567)); |
| |
| const int16x8_t vdd01234567 = vsubq_s16(vbd01234567, vtd01234567); |
| |
| #if XNN_ARCH_ARM64 |
| const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), vget_low_s16(valphah)); |
| const int32x4_t vt4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vtd01234567, valphah); |
| |
| const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), vget_low_s16(valphah)); |
| const int32x4_t vd4567 = vmlal_high_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vdd01234567, valphah); |
| #else // !XNN_ARCH_ARM64 |
| const int32x4_t vt0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vxtl01234567), 11), vget_low_s16(vtd01234567), valphah); |
| const int32x4_t vt4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vxtl01234567), 11), vget_high_s16(vtd01234567), valphah); |
| |
| const int32x4_t vd0123 = vmlal_s16(vshll_n_s16(vget_low_s16(vdl01234567), 11), vget_low_s16(vdd01234567), valphah); |
| const int32x4_t vd4567 = vmlal_s16(vshll_n_s16(vget_high_s16(vdl01234567), 11), vget_high_s16(vdd01234567), valphah); |
| #endif // !XNN_ARCH_ARM64 |
| |
| const int32x4_t vacc0123 = vmlaq_s32(vshlq_n_s32(vt0123, 11), vd0123, valphav); |
| const int32x4_t vacc4567 = vmlaq_s32(vshlq_n_s32(vt4567, 11), vd4567, valphav); |
| |
| #if XNN_ARCH_ARM64 |
| const int16x8_t vacc01234567 = vuzp2q_s16(vreinterpretq_s16_s32(vacc0123), vreinterpretq_s16_s32(vacc4567)); |
| #else // !XNN_ARCH_ARM64 |
| const int16x8_t vacc01234567 = vcombine_s16(vshrn_n_s32(vacc0123, 16), vshrn_n_s32(vacc4567, 16)); |
| #endif // !XNN_ARCH_ARM64 |
| |
| $if DATATYPE == "S8": |
| int8x8_t vo01234567 = vrshrn_n_s16(vacc01234567, 6); |
| $else: |
| uint8x8_t vo01234567 = vrshrn_n_u16(vreinterpretq_u16_s16(vacc01234567), 6); |
| |
| if (c & (4 * sizeof(${XINT8_T}))) { |
| vst1_lane_u32((void*) output, ${VREINTERPRET_U32_X8}(vo01234567), 0); output += 4; |
| vo01234567 = ${VEXT_X8}(vo01234567, vo01234567, 4); |
| } |
| if (c & (2 * sizeof(${XINT8_T}))) { |
| vst1_lane_u16((void*) output, ${VREINTERPRET_U16_X8}(vo01234567), 0); output += 2; |
| vo01234567 = ${VEXT_X8}(vo01234567, vo01234567, 2); |
| } |
| if (c & (1 * sizeof(${XINT8_T}))) { |
| ${VST1_LANE_X8}(output, vo01234567, 0); output += 1; |
| } |
| } |
| |
| output = (${XINT8_T}*) ((uintptr_t) output + output_increment); |
| } while (--output_pixels != 0); |
| } |