| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py |
| ; RUN: llc --mtriple=loongarch64 --mattr=+lsx < %s | FileCheck %s |
| |
| declare <16 x i8> @llvm.loongarch.lsx.vssrlni.b.h(<16 x i8>, <16 x i8>, i32) |
| |
| define <16 x i8> @lsx_vssrlni_b_h(<16 x i8> %va, <16 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_b_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.b.h $vr0, $vr1, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i8> @llvm.loongarch.lsx.vssrlni.b.h(<16 x i8> %va, <16 x i8> %vb, i32 1) |
| ret <16 x i8> %res |
| } |
| |
| declare <8 x i16> @llvm.loongarch.lsx.vssrlni.h.w(<8 x i16>, <8 x i16>, i32) |
| |
| define <8 x i16> @lsx_vssrlni_h_w(<8 x i16> %va, <8 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_h_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.h.w $vr0, $vr1, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i16> @llvm.loongarch.lsx.vssrlni.h.w(<8 x i16> %va, <8 x i16> %vb, i32 1) |
| ret <8 x i16> %res |
| } |
| |
| declare <4 x i32> @llvm.loongarch.lsx.vssrlni.w.d(<4 x i32>, <4 x i32>, i32) |
| |
| define <4 x i32> @lsx_vssrlni_w_d(<4 x i32> %va, <4 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_w_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.w.d $vr0, $vr1, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i32> @llvm.loongarch.lsx.vssrlni.w.d(<4 x i32> %va, <4 x i32> %vb, i32 1) |
| ret <4 x i32> %res |
| } |
| |
| declare <2 x i64> @llvm.loongarch.lsx.vssrlni.d.q(<2 x i64>, <2 x i64>, i32) |
| |
| define <2 x i64> @lsx_vssrlni_d_q(<2 x i64> %va, <2 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_d_q: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.d.q $vr0, $vr1, 1 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <2 x i64> @llvm.loongarch.lsx.vssrlni.d.q(<2 x i64> %va, <2 x i64> %vb, i32 1) |
| ret <2 x i64> %res |
| } |
| |
| declare <16 x i8> @llvm.loongarch.lsx.vssrlni.bu.h(<16 x i8>, <16 x i8>, i32) |
| |
| define <16 x i8> @lsx_vssrlni_bu_h(<16 x i8> %va, <16 x i8> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_bu_h: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.bu.h $vr0, $vr1, 15 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <16 x i8> @llvm.loongarch.lsx.vssrlni.bu.h(<16 x i8> %va, <16 x i8> %vb, i32 15) |
| ret <16 x i8> %res |
| } |
| |
| declare <8 x i16> @llvm.loongarch.lsx.vssrlni.hu.w(<8 x i16>, <8 x i16>, i32) |
| |
| define <8 x i16> @lsx_vssrlni_hu_w(<8 x i16> %va, <8 x i16> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_hu_w: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.hu.w $vr0, $vr1, 31 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <8 x i16> @llvm.loongarch.lsx.vssrlni.hu.w(<8 x i16> %va, <8 x i16> %vb, i32 31) |
| ret <8 x i16> %res |
| } |
| |
| declare <4 x i32> @llvm.loongarch.lsx.vssrlni.wu.d(<4 x i32>, <4 x i32>, i32) |
| |
| define <4 x i32> @lsx_vssrlni_wu_d(<4 x i32> %va, <4 x i32> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_wu_d: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.wu.d $vr0, $vr1, 63 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <4 x i32> @llvm.loongarch.lsx.vssrlni.wu.d(<4 x i32> %va, <4 x i32> %vb, i32 63) |
| ret <4 x i32> %res |
| } |
| |
| declare <2 x i64> @llvm.loongarch.lsx.vssrlni.du.q(<2 x i64>, <2 x i64>, i32) |
| |
| define <2 x i64> @lsx_vssrlni_du_q(<2 x i64> %va, <2 x i64> %vb) nounwind { |
| ; CHECK-LABEL: lsx_vssrlni_du_q: |
| ; CHECK: # %bb.0: # %entry |
| ; CHECK-NEXT: vssrlni.du.q $vr0, $vr1, 127 |
| ; CHECK-NEXT: ret |
| entry: |
| %res = call <2 x i64> @llvm.loongarch.lsx.vssrlni.du.q(<2 x i64> %va, <2 x i64> %vb, i32 127) |
| ret <2 x i64> %res |
| } |