| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 |
| ; RUN: llc -force-streaming -verify-machineinstrs < %s | FileCheck %s |
| |
| target triple = "aarch64-linux" |
| |
| define void @add_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #0 { |
| ; CHECK-LABEL: add_f16_vg1x2: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: fadd za.h[w8, 0, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: fadd za.h[w8, 7, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: ret |
| call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) |
| ret void |
| } |
| |
| define void @add_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, |
| ; CHECK-LABEL: add_f16_vg1x4: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: fadd za.h[w8, 0, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: fadd za.h[w8, 7, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: ret |
| <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3) #1 { |
| call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, |
| <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, |
| <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3); |
| ret void |
| } |
| |
| define void @sub_f16_vg1x2(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) #1 { |
| ; CHECK-LABEL: sub_f16_vg1x2: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: fsub za.h[w8, 0, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: fsub za.h[w8, 7, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: ret |
| call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1) |
| ret void |
| } |
| |
| define void @sub_f16_vg1x4(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, |
| ; CHECK-LABEL: sub_f16_vg1x4: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: fsub za.h[w8, 0, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: fsub za.h[w8, 7, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: ret |
| <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3) #0 { |
| call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8f16(i32 %slice, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, |
| <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8f16(i32 %slice.7, <vscale x 8 x half> %zn0, <vscale x 8 x half> %zn1, |
| <vscale x 8 x half> %zn2, <vscale x 8 x half> %zn3); |
| ret void |
| } |
| |
| define void @add_bf16_vg1x2(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) #2 { |
| ; CHECK-LABEL: add_bf16_vg1x2: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: bfadd za.h[w8, 0, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: bfadd za.h[w8, 7, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: ret |
| call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.add.za16.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) |
| ret void |
| } |
| |
| define void @add_bf16_vg1x4(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, |
| ; CHECK-LABEL: add_bf16_vg1x4: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: bfadd za.h[w8, 0, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: bfadd za.h[w8, 7, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: ret |
| <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3) #2 { |
| call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, |
| <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.add.za16.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, |
| <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3); |
| ret void |
| } |
| |
| define void @sub_bf16_vg1x2(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) #2 { |
| ; CHECK-LABEL: sub_bf16_vg1x2: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 |
| ; CHECK-NEXT: bfsub za.h[w8, 0, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: bfsub za.h[w8, 7, vgx2], { z0.h, z1.h } |
| ; CHECK-NEXT: ret |
| call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.sub.za16.vg1x2.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1) |
| ret void |
| } |
| |
| define void @sub_bf16_vg1x4(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, |
| ; CHECK-LABEL: sub_bf16_vg1x4: |
| ; CHECK: // %bb.0: |
| ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: mov w8, w0 |
| ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 |
| ; CHECK-NEXT: bfsub za.h[w8, 0, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: bfsub za.h[w8, 7, vgx4], { z0.h - z3.h } |
| ; CHECK-NEXT: ret |
| <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3) #2 { |
| call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8bf16(i32 %slice, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, |
| <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3); |
| %slice.7 = add i32 %slice, 7 |
| call void @llvm.aarch64.sme.sub.za16.vg1x4.nxv8bf16(i32 %slice.7, <vscale x 8 x bfloat> %zn0, <vscale x 8 x bfloat> %zn1, |
| <vscale x 8 x bfloat> %zn2, <vscale x 8 x bfloat> %zn3); |
| ret void |
| } |
| |
| attributes #0 = { nounwind "target-features"="+sme-f16f16" } |
| attributes #1 = { nounwind "target-features"="+sme-f8f16" } |
| attributes #2 = { nounwind "target-features"="+sme-b16b16,+bf16" } |