| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc < %s | FileCheck %s |
| |
| target triple = "aarch64-unknown-linux-gnu" |
| |
| ; |
| ; SABA |
| ; |
| |
| define <vscale x 16 x i8> @saba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 { |
| ; CHECK-LABEL: saba_b: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.b, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 16 x i8> %b to <vscale x 16 x i16> |
| %c.sext = sext <vscale x 16 x i8> %c to <vscale x 16 x i16> |
| %sub = sub <vscale x 16 x i16> %b.sext, %c.sext |
| %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true) |
| %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8> |
| %add = add <vscale x 16 x i8> %a, %trunc |
| ret <vscale x 16 x i8> %add |
| } |
| |
| define <vscale x 16 x i8> @saba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 { |
| ; CHECK-LABEL: saba_b_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p2.b |
| ; CHECK-NEXT: mov z1.b, #1 // =0x1 |
| ; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b |
| ; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 16 x i1> %b to <vscale x 16 x i8> |
| %c.sext = sext <vscale x 16 x i1> %c to <vscale x 16 x i8> |
| %sub = sub <vscale x 16 x i8> %b.sext, %c.sext |
| %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true) |
| %add = add <vscale x 16 x i8> %a, %abs |
| ret <vscale x 16 x i8> %add |
| } |
| |
| define <vscale x 16 x i8> @saba_b_from_sabd(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 { |
| ; CHECK-LABEL: saba_b_from_sabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.b, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| %2 = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1> %1, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) |
| %3 = add <vscale x 16 x i8> %2, %a |
| ret <vscale x 16 x i8> %3 |
| } |
| |
| define <vscale x 16 x i8> @saba_b_from_sabd_u(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 { |
| ; CHECK-LABEL: saba_b_from_sabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.b, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) |
| %2 = add <vscale x 16 x i8> %1, %a |
| ret <vscale x 16 x i8> %2 |
| } |
| |
| define <vscale x 8 x i16> @saba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 { |
| ; CHECK-LABEL: saba_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 8 x i16> %b to <vscale x 8 x i32> |
| %c.sext = sext <vscale x 8 x i16> %c to <vscale x 8 x i32> |
| %sub = sub <vscale x 8 x i32> %b.sext, %c.sext |
| %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true) |
| %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16> |
| %add = add <vscale x 8 x i16> %a, %trunc |
| ret <vscale x 8 x i16> %add |
| } |
| |
| define <vscale x 8 x i16> @saba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 { |
| ; CHECK-LABEL: saba_h_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.h |
| ; CHECK-NEXT: sxtb z2.h, p0/m, z2.h |
| ; CHECK-NEXT: sxtb z1.h, p0/m, z1.h |
| ; CHECK-NEXT: saba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 8 x i8> %b to <vscale x 8 x i16> |
| %c.sext = sext <vscale x 8 x i8> %c to <vscale x 8 x i16> |
| %sub = sub <vscale x 8 x i16> %b.sext, %c.sext |
| %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true) |
| %add = add <vscale x 8 x i16> %a, %abs |
| ret <vscale x 8 x i16> %add |
| } |
| |
| define <vscale x 8 x i16> @saba_h_from_sabd(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 { |
| ; CHECK-LABEL: saba_h_from_sabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) |
| %2 = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1> %1, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) |
| %3 = add <vscale x 8 x i16> %2, %a |
| ret <vscale x 8 x i16> %3 |
| } |
| |
| define <vscale x 8 x i16> @saba_h_from_sabd_u(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 { |
| ; CHECK-LABEL: saba_h_from_sabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) |
| %2 = add <vscale x 8 x i16> %1, %a |
| ret <vscale x 8 x i16> %2 |
| } |
| |
| define <vscale x 4 x i32> @saba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: saba_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 4 x i32> %b to <vscale x 4 x i64> |
| %c.sext = sext <vscale x 4 x i32> %c to <vscale x 4 x i64> |
| %sub = sub <vscale x 4 x i64> %b.sext, %c.sext |
| %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) |
| %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> |
| %add = add <vscale x 4 x i32> %a, %trunc |
| ret <vscale x 4 x i32> %add |
| } |
| |
| define <vscale x 4 x i32> @saba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 { |
| ; CHECK-LABEL: saba_s_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.s |
| ; CHECK-NEXT: sxth z2.s, p0/m, z2.s |
| ; CHECK-NEXT: sxth z1.s, p0/m, z1.s |
| ; CHECK-NEXT: saba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 4 x i16> %b to <vscale x 4 x i32> |
| %c.sext = sext <vscale x 4 x i16> %c to <vscale x 4 x i32> |
| %sub = sub <vscale x 4 x i32> %b.sext, %c.sext |
| %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) |
| %add = add <vscale x 4 x i32> %a, %abs |
| ret <vscale x 4 x i32> %add |
| } |
| |
| define <vscale x 4 x i32> @saba_s_from_sabd(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: saba_s_from_sabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) |
| %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1> %1, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) |
| %3 = add <vscale x 4 x i32> %2, %a |
| ret <vscale x 4 x i32> %3 |
| } |
| |
| define <vscale x 4 x i32> @saba_s_from_sabd_u(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: saba_s_from_sabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) |
| %2 = add <vscale x 4 x i32> %1, %a |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 2 x i64> @saba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 { |
| ; CHECK-LABEL: saba_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 2 x i64> %b to <vscale x 2 x i128> |
| %c.sext = sext <vscale x 2 x i64> %c to <vscale x 2 x i128> |
| %sub = sub <vscale x 2 x i128> %b.sext, %c.sext |
| %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true) |
| %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64> |
| %add = add <vscale x 2 x i64> %a, %trunc |
| ret <vscale x 2 x i64> %add |
| } |
| |
| define <vscale x 2 x i64> @saba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 { |
| ; CHECK-LABEL: saba_d_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p0.d |
| ; CHECK-NEXT: sxtw z2.d, p0/m, z2.d |
| ; CHECK-NEXT: sxtw z1.d, p0/m, z1.d |
| ; CHECK-NEXT: saba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %b.sext = sext <vscale x 2 x i32> %b to <vscale x 2 x i64> |
| %c.sext = sext <vscale x 2 x i32> %c to <vscale x 2 x i64> |
| %sub = sub <vscale x 2 x i64> %b.sext, %c.sext |
| %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true) |
| %add = add <vscale x 2 x i64> %a, %abs |
| ret <vscale x 2 x i64> %add |
| } |
| |
| define <vscale x 2 x i64> @saba_d_from_sabd(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 { |
| ; CHECK-LABEL: saba_d_from_sabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) |
| %2 = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1> %1, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) |
| %3 = add <vscale x 2 x i64> %2, %a |
| ret <vscale x 2 x i64> %3 |
| } |
| |
| define <vscale x 2 x i64> @saba_d_from_sabd_u(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 { |
| ; CHECK-LABEL: saba_d_from_sabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: saba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) |
| %2 = add <vscale x 2 x i64> %1, %a |
| ret <vscale x 2 x i64> %2 |
| } |
| |
| ; |
| ; UABA |
| ; |
| |
| define <vscale x 16 x i8> @uaba_b(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 { |
| ; CHECK-LABEL: uaba_b: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.b, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 16 x i8> %b to <vscale x 16 x i16> |
| %c.zext = zext <vscale x 16 x i8> %c to <vscale x 16 x i16> |
| %sub = sub <vscale x 16 x i16> %b.zext, %c.zext |
| %abs = call <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16> %sub, i1 true) |
| %trunc = trunc <vscale x 16 x i16> %abs to <vscale x 16 x i8> |
| %add = add <vscale x 16 x i8> %a, %trunc |
| ret <vscale x 16 x i8> %add |
| } |
| |
| define <vscale x 16 x i8> @uaba_b_promoted_ops(<vscale x 16 x i8> %a, <vscale x 16 x i1> %b, <vscale x 16 x i1> %c) #0 { |
| ; CHECK-LABEL: uaba_b_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: ptrue p2.b |
| ; CHECK-NEXT: mov z1.b, #1 // =0x1 |
| ; CHECK-NEXT: eor p0.b, p2/z, p0.b, p1.b |
| ; CHECK-NEXT: add z0.b, p0/m, z0.b, z1.b |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 16 x i1> %b to <vscale x 16 x i8> |
| %c.zext = zext <vscale x 16 x i1> %c to <vscale x 16 x i8> |
| %sub = sub <vscale x 16 x i8> %b.zext, %c.zext |
| %abs = call <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8> %sub, i1 true) |
| %add = add <vscale x 16 x i8> %a, %abs |
| ret <vscale x 16 x i8> %add |
| } |
| |
| define <vscale x 16 x i8> @uaba_b_from_uabd(<vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 { |
| ; CHECK-LABEL: uaba_b_from_uabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.b, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32 31) |
| %2 = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.u.nxv16i8(<vscale x 16 x i1> %1, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) |
| %3 = add <vscale x 16 x i8> %2, %a |
| ret <vscale x 16 x i8> %3 |
| } |
| |
| define <vscale x 16 x i8> @uaba_b_from_uabd_u(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %a, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) #0 { |
| ; CHECK-LABEL: uaba_b_from_uabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.b, z1.b, z2.b |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 16 x i8> @llvm.aarch64.sve.uabd.u.nxv16i8(<vscale x 16 x i1> %pg, <vscale x 16 x i8> %b, <vscale x 16 x i8> %c) |
| %2 = add <vscale x 16 x i8> %1, %a |
| ret <vscale x 16 x i8> %2 |
| } |
| |
| define <vscale x 8 x i16> @uaba_h(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 { |
| ; CHECK-LABEL: uaba_h: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 8 x i16> %b to <vscale x 8 x i32> |
| %c.zext = zext <vscale x 8 x i16> %c to <vscale x 8 x i32> |
| %sub = sub <vscale x 8 x i32> %b.zext, %c.zext |
| %abs = call <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32> %sub, i1 true) |
| %trunc = trunc <vscale x 8 x i32> %abs to <vscale x 8 x i16> |
| %add = add <vscale x 8 x i16> %a, %trunc |
| ret <vscale x 8 x i16> %add |
| } |
| |
| define <vscale x 8 x i16> @uaba_h_promoted_ops(<vscale x 8 x i16> %a, <vscale x 8 x i8> %b, <vscale x 8 x i8> %c) #0 { |
| ; CHECK-LABEL: uaba_h_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: and z2.h, z2.h, #0xff |
| ; CHECK-NEXT: and z1.h, z1.h, #0xff |
| ; CHECK-NEXT: uaba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 8 x i8> %b to <vscale x 8 x i16> |
| %c.zext = zext <vscale x 8 x i8> %c to <vscale x 8 x i16> |
| %sub = sub <vscale x 8 x i16> %b.zext, %c.zext |
| %abs = call <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16> %sub, i1 true) |
| %add = add <vscale x 8 x i16> %a, %abs |
| ret <vscale x 8 x i16> %add |
| } |
| |
| define <vscale x 8 x i16> @uaba_h_from_uabd(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 { |
| ; CHECK-LABEL: uaba_h_from_uabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) |
| %2 = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.u.nxv8i16(<vscale x 8 x i1> %1, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) |
| %3 = add <vscale x 8 x i16> %2, %a |
| ret <vscale x 8 x i16> %3 |
| } |
| |
| define <vscale x 8 x i16> @uaba_h_from_uabd_u(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) #0 { |
| ; CHECK-LABEL: uaba_h_from_uabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.h, z1.h, z2.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i16> @llvm.aarch64.sve.uabd.u.nxv8i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %b, <vscale x 8 x i16> %c) |
| %2 = add <vscale x 8 x i16> %1, %a |
| ret <vscale x 8 x i16> %2 |
| } |
| |
| define <vscale x 4 x i32> @uaba_s(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: uaba_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64> |
| %c.zext = zext <vscale x 4 x i32> %c to <vscale x 4 x i64> |
| %sub = sub <vscale x 4 x i64> %b.zext, %c.zext |
| %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) |
| %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> |
| %add = add <vscale x 4 x i32> %a, %trunc |
| ret <vscale x 4 x i32> %add |
| } |
| |
| define <vscale x 4 x i32> @uaba_s_promoted_ops(<vscale x 4 x i32> %a, <vscale x 4 x i16> %b, <vscale x 4 x i16> %c) #0 { |
| ; CHECK-LABEL: uaba_s_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: and z2.s, z2.s, #0xffff |
| ; CHECK-NEXT: and z1.s, z1.s, #0xffff |
| ; CHECK-NEXT: uaba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 4 x i16> %b to <vscale x 4 x i32> |
| %c.zext = zext <vscale x 4 x i16> %c to <vscale x 4 x i32> |
| %sub = sub <vscale x 4 x i32> %b.zext, %c.zext |
| %abs = call <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32> %sub, i1 true) |
| %add = add <vscale x 4 x i32> %a, %abs |
| ret <vscale x 4 x i32> %add |
| } |
| |
| define <vscale x 4 x i32> @uaba_s_from_uabd(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: uaba_s_from_uabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32 31) |
| %2 = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.u.nxv4i32(<vscale x 4 x i1> %1, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) |
| %3 = add <vscale x 4 x i32> %2, %a |
| ret <vscale x 4 x i32> %3 |
| } |
| |
| define <vscale x 4 x i32> @uaba_s_from_uabd_u(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: uaba_s_from_uabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i32> @llvm.aarch64.sve.uabd.u.nxv4i32(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) |
| %2 = add <vscale x 4 x i32> %1, %a |
| ret <vscale x 4 x i32> %2 |
| } |
| |
| define <vscale x 2 x i64> @uaba_d(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 { |
| ; CHECK-LABEL: uaba_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 2 x i64> %b to <vscale x 2 x i128> |
| %c.zext = zext <vscale x 2 x i64> %c to <vscale x 2 x i128> |
| %sub = sub <vscale x 2 x i128> %b.zext, %c.zext |
| %abs = call <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128> %sub, i1 true) |
| %trunc = trunc <vscale x 2 x i128> %abs to <vscale x 2 x i64> |
| %add = add <vscale x 2 x i64> %a, %trunc |
| ret <vscale x 2 x i64> %add |
| } |
| |
| define <vscale x 2 x i64> @uaba_d_promoted_ops(<vscale x 2 x i64> %a, <vscale x 2 x i32> %b, <vscale x 2 x i32> %c) #0 { |
| ; CHECK-LABEL: uaba_d_promoted_ops: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: and z2.d, z2.d, #0xffffffff |
| ; CHECK-NEXT: and z1.d, z1.d, #0xffffffff |
| ; CHECK-NEXT: uaba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 2 x i32> %b to <vscale x 2 x i64> |
| %c.zext = zext <vscale x 2 x i32> %c to <vscale x 2 x i64> |
| %sub = sub <vscale x 2 x i64> %b.zext, %c.zext |
| %abs = call <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64> %sub, i1 true) |
| %add = add <vscale x 2 x i64> %a, %abs |
| ret <vscale x 2 x i64> %add |
| } |
| |
| define <vscale x 2 x i64> @uaba_d_from_uabd(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 { |
| ; CHECK-LABEL: uaba_d_from_uabd: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) |
| %2 = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.u.nxv2i64(<vscale x 2 x i1> %1, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) |
| %3 = add <vscale x 2 x i64> %2, %a |
| ret <vscale x 2 x i64> %3 |
| } |
| |
| define <vscale x 2 x i64> @uaba_d_from_uabd_u(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) #0 { |
| ; CHECK-LABEL: uaba_d_from_uabd_u: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.d, z1.d, z2.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i64> @llvm.aarch64.sve.uabd.u.nxv2i64(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %b, <vscale x 2 x i64> %c) |
| %2 = add <vscale x 2 x i64> %1, %a |
| ret <vscale x 2 x i64> %2 |
| } |
| |
| ; A variant of uaba_s but with the add operands switched. |
| define <vscale x 4 x i32> @uaba_s_commutative(<vscale x 4 x i32> %a, <vscale x 4 x i32> %b, <vscale x 4 x i32> %c) #0 { |
| ; CHECK-LABEL: uaba_s_commutative: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: uaba z0.s, z1.s, z2.s |
| ; CHECK-NEXT: ret |
| %b.zext = zext <vscale x 4 x i32> %b to <vscale x 4 x i64> |
| %c.zext = zext <vscale x 4 x i32> %c to <vscale x 4 x i64> |
| %sub = sub <vscale x 4 x i64> %b.zext, %c.zext |
| %abs = call <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64> %sub, i1 true) |
| %trunc = trunc <vscale x 4 x i64> %abs to <vscale x 4 x i32> |
| %add = add <vscale x 4 x i32> %trunc, %a |
| ret <vscale x 4 x i32> %add |
| } |
| |
| declare <vscale x 16 x i8> @llvm.abs.nxv16i8(<vscale x 16 x i8>, i1) |
| declare <vscale x 8 x i16> @llvm.abs.nxv8i16(<vscale x 8 x i16>, i1) |
| declare <vscale x 16 x i16> @llvm.abs.nxv16i16(<vscale x 16 x i16>, i1) |
| declare <vscale x 4 x i32> @llvm.abs.nxv4i32(<vscale x 4 x i32>, i1) |
| declare <vscale x 8 x i32> @llvm.abs.nxv8i32(<vscale x 8 x i32>, i1) |
| declare <vscale x 2 x i64> @llvm.abs.nxv2i64(<vscale x 2 x i64>, i1) |
| declare <vscale x 4 x i64> @llvm.abs.nxv4i64(<vscale x 4 x i64>, i1) |
| declare <vscale x 2 x i128> @llvm.abs.nxv2i128(<vscale x 2 x i128>, i1) |
| |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.ptrue.nxv2i1(i32) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.ptrue.nxv4i1(i32) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.ptrue.nxv8i1(i32) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.ptrue.nxv16i1(i32) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.sabd.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.sabd.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.sabd.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.sabd.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| |
| declare <vscale x 16 x i8> @llvm.aarch64.sve.uabd.u.nxv16i8(<vscale x 16 x i1>, <vscale x 16 x i8>, <vscale x 16 x i8>) |
| declare <vscale x 8 x i16> @llvm.aarch64.sve.uabd.u.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) |
| declare <vscale x 4 x i32> @llvm.aarch64.sve.uabd.u.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 4 x i32>) |
| declare <vscale x 2 x i64> @llvm.aarch64.sve.uabd.u.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| |
| attributes #0 = { "target-features"="+neon,+sve,+sve2" } |