| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py |
| ; RUN: opt < %s -mattr=sse2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE |
| ; RUN: opt < %s -mattr=avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX |
| |
| ; TODO: |
| ; With AVX, we are able to vectorize the 1st 4 elements as 256-bit vector ops, |
| ; but the final 2 elements remain scalar. They should get vectorized using |
| ; 128-bit ops identically to what happens with SSE. |
| |
| target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" |
| target triple = "x86_64-unknown-linux-gnu" |
| |
| define void @PR28457(ptr noalias nocapture align 32 %q, ptr noalias nocapture readonly align 32 %p) { |
| ; SSE-LABEL: @PR28457( |
| ; SSE-NEXT: [[P2:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 2 |
| ; SSE-NEXT: [[P4:%.*]] = getelementptr inbounds double, ptr [[P]], i64 4 |
| ; SSE-NEXT: [[Q2:%.*]] = getelementptr inbounds double, ptr [[Q:%.*]], i64 2 |
| ; SSE-NEXT: [[Q4:%.*]] = getelementptr inbounds double, ptr [[Q]], i64 4 |
| ; SSE-NEXT: [[TMP2:%.*]] = load <2 x double>, ptr [[P]], align 8 |
| ; SSE-NEXT: [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], <double 1.000000e+00, double 1.000000e+00> |
| ; SSE-NEXT: store <2 x double> [[TMP3]], ptr [[Q]], align 8 |
| ; SSE-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr [[P2]], align 8 |
| ; SSE-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00> |
| ; SSE-NEXT: store <2 x double> [[TMP7]], ptr [[Q2]], align 8 |
| ; SSE-NEXT: [[TMP10:%.*]] = load <2 x double>, ptr [[P4]], align 8 |
| ; SSE-NEXT: [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], <double 1.000000e+00, double 1.000000e+00> |
| ; SSE-NEXT: store <2 x double> [[TMP11]], ptr [[Q4]], align 8 |
| ; SSE-NEXT: ret void |
| ; |
| ; AVX-LABEL: @PR28457( |
| ; AVX-NEXT: [[P4:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 4 |
| ; AVX-NEXT: [[Q4:%.*]] = getelementptr inbounds double, ptr [[Q:%.*]], i64 4 |
| ; AVX-NEXT: [[TMP2:%.*]] = load <4 x double>, ptr [[P]], align 8 |
| ; AVX-NEXT: [[TMP3:%.*]] = fadd <4 x double> [[TMP2]], <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00> |
| ; AVX-NEXT: store <4 x double> [[TMP3]], ptr [[Q]], align 8 |
| ; AVX-NEXT: [[TMP6:%.*]] = load <2 x double>, ptr [[P4]], align 8 |
| ; AVX-NEXT: [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00> |
| ; AVX-NEXT: store <2 x double> [[TMP7]], ptr [[Q4]], align 8 |
| ; AVX-NEXT: ret void |
| ; |
| %p1 = getelementptr inbounds double, ptr %p, i64 1 |
| %p2 = getelementptr inbounds double, ptr %p, i64 2 |
| %p3 = getelementptr inbounds double, ptr %p, i64 3 |
| %p4 = getelementptr inbounds double, ptr %p, i64 4 |
| %p5 = getelementptr inbounds double, ptr %p, i64 5 |
| |
| %q1 = getelementptr inbounds double, ptr %q, i64 1 |
| %q2 = getelementptr inbounds double, ptr %q, i64 2 |
| %q3 = getelementptr inbounds double, ptr %q, i64 3 |
| %q4 = getelementptr inbounds double, ptr %q, i64 4 |
| %q5 = getelementptr inbounds double, ptr %q, i64 5 |
| |
| %d0 = load double, ptr %p |
| %d1 = load double, ptr %p1 |
| %d2 = load double, ptr %p2 |
| %d3 = load double, ptr %p3 |
| %d4 = load double, ptr %p4 |
| %d5 = load double, ptr %p5 |
| |
| %a0 = fadd double %d0, 1.0 |
| %a1 = fadd double %d1, 1.0 |
| %a2 = fadd double %d2, 1.0 |
| %a3 = fadd double %d3, 1.0 |
| %a4 = fadd double %d4, 1.0 |
| %a5 = fadd double %d5, 1.0 |
| |
| store double %a0, ptr %q |
| store double %a1, ptr %q1 |
| store double %a2, ptr %q2 |
| store double %a3, ptr %q3 |
| store double %a4, ptr %q4 |
| store double %a5, ptr %q5 |
| ret void |
| } |