| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s |
| |
| define <vscale x 16 x i1> @facgt_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: facgt_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: facgt p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @facge_fun(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) { |
| ; CHECK-LABEL: facge_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: facge p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = tail call <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1> %a, <vscale x 2 x double> %b, <vscale x 2 x double> %c) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilege_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilege_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilege p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilegt_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilegt_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilegt p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilehi_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehi_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilehi p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilehs_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilehs_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilehs p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilele_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilele_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilele p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilelo_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilelo_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilelo p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilels_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilels_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilels p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @whilelt_fun(i32 %a, i32 %b) { |
| ; CHECK-LABEL: whilelt_fun: |
| ; CHECK: // %bb.0: // %entry |
| ; CHECK-NEXT: whilelt p0.d, w0, w1 |
| ; CHECK-NEXT: ret |
| entry: |
| %0 = call <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32 %a, i32 %b) |
| %1 = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %0) |
| ret <vscale x 16 x i1> %1 |
| } |
| |
| define <vscale x 16 x i1> @cmpeq_d_fun(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpeq_d_fun: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpeq p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x i64> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpeq_wide_s_fun(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpeq_wide_s_fun: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpeq p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpge_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpge_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpge p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x i64> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpge_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpge_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpge p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpgt_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpgt p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x i64> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpgt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpgt_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpgt p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmphi_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmphi_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmphi p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x i64> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmphi_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmphi_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmphi p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmphs_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmphs_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmphs p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x i64> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmphs_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmphs_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmphs p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmple_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmple_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmple p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmplo_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmplo_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmplo p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpls_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpls_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpls p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmplt_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmplt_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmplt p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpne_d(<vscale x 2 x i1> %pg, <vscale x 2 x i64> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpne_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpne p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x i64> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @cmpne_wide_s(<vscale x 4 x i1> %pg, <vscale x 4 x i32> %a, <vscale x 2 x i64> %b) { |
| ; CHECK-LABEL: cmpne_wide_s: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: cmpne p0.s, p0/z, z0.s, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1> %pg, |
| <vscale x 4 x i32> %a, |
| <vscale x 2 x i64> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @fcmeq_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fcmeq_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcmeq p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @fcmgt_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fcmgt_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcmgt p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @fcmne_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fcmne_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcmne p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @fcmuo_d(<vscale x 2 x i1> %pg, <vscale x 2 x double> %a, <vscale x 2 x double> %b) { |
| ; CHECK-LABEL: fcmuo_d: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: fcmuo p0.d, p0/z, z0.d, z1.d |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1> %pg, |
| <vscale x 2 x double> %a, |
| <vscale x 2 x double> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @match_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { |
| ; CHECK-LABEL: match_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: match p0.h, p0/z, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x i16> %a, |
| <vscale x 8 x i16> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| define <vscale x 16 x i1> @nmatch_i16(<vscale x 8 x i1> %pg, <vscale x 8 x i16> %a, <vscale x 8 x i16> %b) { |
| ; CHECK-LABEL: nmatch_i16: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: nmatch p0.h, p0/z, z0.h, z1.h |
| ; CHECK-NEXT: ret |
| %1 = call <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1> %pg, |
| <vscale x 8 x i16> %a, |
| <vscale x 8 x i16> %b) |
| %out = tail call <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1> %1) |
| ret <vscale x 16 x i1> %out |
| } |
| |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.facgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.facge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpeq.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpge.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpgt.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpne.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.fcmpuo.nxv2f64(<vscale x 2 x i1>, <vscale x 2 x double>, <vscale x 2 x double>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilele.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelo.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilels.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.whilelt.nxv2i1.i32(i32, i32) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpeq.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpeq.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpge.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpge.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpgt.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpgt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphi.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphi.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.cmphs.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmphs.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmple.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplo.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpls.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmplt.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 2 x i1> @llvm.aarch64.sve.cmpne.nxv2i64(<vscale x 2 x i1>, <vscale x 2 x i64>, <vscale x 2 x i64>) |
| declare <vscale x 4 x i1> @llvm.aarch64.sve.cmpne.wide.nxv4i32(<vscale x 4 x i1>, <vscale x 4 x i32>, <vscale x 2 x i64>) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.match.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) |
| declare <vscale x 8 x i1> @llvm.aarch64.sve.nmatch.nxv8i16(<vscale x 8 x i1>, <vscale x 8 x i16>, <vscale x 8 x i16>) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv2i1(<vscale x 2 x i1>) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv4i1(<vscale x 4 x i1>) |
| declare <vscale x 16 x i1> @llvm.aarch64.sve.convert.to.svbool.nxv8i1(<vscale x 8 x i1>) |