| ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 |
| ; Test high-part i64->i128 multiplications on z13. |
| ; |
| ; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 | FileCheck %s |
| |
| ; Check zero-extended multiplication in which only the high part is used. |
| define i64 @f1(i64 %dummy, i64 %a, i64 %b) { |
| ; CHECK-LABEL: f1: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlgr %r2, %r4 |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check sign-extended multiplication in which only the high part is used. |
| ; This needs a rather convoluted sequence. |
| define i64 @f2(i64 %dummy, i64 %a, i64 %b) { |
| ; CHECK-LABEL: f2: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: srag %r1, %r4, 63 |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: srag %r0, %r3, 63 |
| ; CHECK-NEXT: ngr %r1, %r3 |
| ; CHECK-NEXT: mlgr %r2, %r4 |
| ; CHECK-NEXT: ngr %r0, %r4 |
| ; CHECK-NEXT: agr %r0, %r1 |
| ; CHECK-NEXT: sgr %r2, %r0 |
| ; CHECK-NEXT: br %r14 |
| %ax = sext i64 %a to i128 |
| %bx = sext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check zero-extended multiplication in which only part of the high half |
| ; is used. |
| define i64 @f3(i64 %dummy, i64 %a, i64 %b) { |
| ; CHECK-LABEL: f3: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlgr %r2, %r4 |
| ; CHECK-NEXT: srlg %r2, %r2, 3 |
| ; CHECK-NEXT: br %r14 |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 67 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check zero-extended multiplication in which the result is split into |
| ; high and low halves. |
| define i64 @f4(i64 %dummy, i64 %a, i64 %b) { |
| ; CHECK-LABEL: f4: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlgr %r2, %r4 |
| ; CHECK-NEXT: ogr %r2, %r3 |
| ; CHECK-NEXT: br %r14 |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| %low = trunc i128 %mulx to i64 |
| %or = or i64 %high, %low |
| ret i64 %or |
| } |
| |
| ; Check division by a constant, which should use multiplication instead. |
| define i64 @f5(i64 %dummy, i64 %a) { |
| ; CHECK-LABEL: f5: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: llihf %r0, 1782028570 |
| ; CHECK-NEXT: oilf %r0, 598650223 |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlgr %r2, %r0 |
| ; CHECK-NEXT: srlg %r2, %r2, 9 |
| ; CHECK-NEXT: br %r14 |
| %res = udiv i64 %a, 1234 |
| ret i64 %res |
| } |
| |
| ; Check MLG with no displacement. |
| define i64 @f6(i64 %dummy, i64 %a, ptr %src) { |
| ; CHECK-LABEL: f6: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, 0(%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %b = load i64, ptr %src |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check the high end of the aligned MLG range. |
| define i64 @f7(i64 %dummy, i64 %a, ptr %src) { |
| ; CHECK-LABEL: f7: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, 524280(%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 65535 |
| %b = load i64, ptr %ptr |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check the next doubleword up, which requires separate address logic. |
| ; Other sequences besides this one would be OK. |
| define i64 @f8(i64 %dummy, i64 %a, ptr %src) { |
| ; CHECK-LABEL: f8: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: agfi %r4, 524288 |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, 0(%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 65536 |
| %b = load i64, ptr %ptr |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check the high end of the negative aligned MLG range. |
| define i64 @f9(i64 %dummy, i64 %a, ptr %src) { |
| ; CHECK-LABEL: f9: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, -8(%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 -1 |
| %b = load i64, ptr %ptr |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check the low end of the MLG range. |
| define i64 @f10(i64 %dummy, i64 %a, ptr %src) { |
| ; CHECK-LABEL: f10: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, -524288(%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 -65536 |
| %b = load i64, ptr %ptr |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check the next doubleword down, which needs separate address logic. |
| ; Other sequences besides this one would be OK. |
| define i64 @f11(ptr %dest, i64 %a, ptr %src) { |
| ; CHECK-LABEL: f11: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: agfi %r4, -524296 |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, 0(%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %ptr = getelementptr i64, ptr %src, i64 -65537 |
| %b = load i64, ptr %ptr |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |
| ; Check that MLG allows an index. |
| define i64 @f12(ptr %dest, i64 %a, i64 %src, i64 %index) { |
| ; CHECK-LABEL: f12: |
| ; CHECK: # %bb.0: |
| ; CHECK-NEXT: # kill: def $r3d killed $r3d def $r2q |
| ; CHECK-NEXT: mlg %r2, 524287(%r5,%r4) |
| ; CHECK-NEXT: # kill: def $r2d killed $r2d killed $r2q |
| ; CHECK-NEXT: br %r14 |
| %add1 = add i64 %src, %index |
| %add2 = add i64 %add1, 524287 |
| %ptr = inttoptr i64 %add2 to ptr |
| %b = load i64, ptr %ptr |
| %ax = zext i64 %a to i128 |
| %bx = zext i64 %b to i128 |
| %mulx = mul i128 %ax, %bx |
| %highx = lshr i128 %mulx, 64 |
| %high = trunc i128 %highx to i64 |
| ret i64 %high |
| } |
| |