File: different-vec-widths.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3~deb12u1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm-proposed-updates
  • size: 1,998,492 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (74 lines) | stat: -rw-r--r-- 3,548 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -mattr=sse2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=SSE
; RUN: opt < %s -mattr=avx2 -passes=slp-vectorizer -S | FileCheck %s --check-prefix=AVX

; TODO:
; With AVX, we are able to vectorize the 1st 4 elements as 256-bit vector ops,
; but the final 2 elements remain scalar. They should get vectorized using
; 128-bit ops identically to what happens with SSE.

target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-unknown-linux-gnu"

define void @PR28457(ptr noalias nocapture align 32 %q, ptr noalias nocapture readonly align 32 %p) {
; SSE-LABEL: @PR28457(
; SSE-NEXT:    [[P2:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 2
; SSE-NEXT:    [[P4:%.*]] = getelementptr inbounds double, ptr [[P]], i64 4
; SSE-NEXT:    [[Q2:%.*]] = getelementptr inbounds double, ptr [[Q:%.*]], i64 2
; SSE-NEXT:    [[Q4:%.*]] = getelementptr inbounds double, ptr [[Q]], i64 4
; SSE-NEXT:    [[TMP2:%.*]] = load <2 x double>, ptr [[P]], align 8
; SSE-NEXT:    [[TMP3:%.*]] = fadd <2 x double> [[TMP2]], <double 1.000000e+00, double 1.000000e+00>
; SSE-NEXT:    store <2 x double> [[TMP3]], ptr [[Q]], align 8
; SSE-NEXT:    [[TMP6:%.*]] = load <2 x double>, ptr [[P2]], align 8
; SSE-NEXT:    [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00>
; SSE-NEXT:    store <2 x double> [[TMP7]], ptr [[Q2]], align 8
; SSE-NEXT:    [[TMP10:%.*]] = load <2 x double>, ptr [[P4]], align 8
; SSE-NEXT:    [[TMP11:%.*]] = fadd <2 x double> [[TMP10]], <double 1.000000e+00, double 1.000000e+00>
; SSE-NEXT:    store <2 x double> [[TMP11]], ptr [[Q4]], align 8
; SSE-NEXT:    ret void
;
; AVX-LABEL: @PR28457(
; AVX-NEXT:    [[P4:%.*]] = getelementptr inbounds double, ptr [[P:%.*]], i64 4
; AVX-NEXT:    [[Q4:%.*]] = getelementptr inbounds double, ptr [[Q:%.*]], i64 4
; AVX-NEXT:    [[TMP2:%.*]] = load <4 x double>, ptr [[P]], align 8
; AVX-NEXT:    [[TMP3:%.*]] = fadd <4 x double> [[TMP2]], <double 1.000000e+00, double 1.000000e+00, double 1.000000e+00, double 1.000000e+00>
; AVX-NEXT:    store <4 x double> [[TMP3]], ptr [[Q]], align 8
; AVX-NEXT:    [[TMP6:%.*]] = load <2 x double>, ptr [[P4]], align 8
; AVX-NEXT:    [[TMP7:%.*]] = fadd <2 x double> [[TMP6]], <double 1.000000e+00, double 1.000000e+00>
; AVX-NEXT:    store <2 x double> [[TMP7]], ptr [[Q4]], align 8
; AVX-NEXT:    ret void
;
  %p1 = getelementptr inbounds double, ptr %p, i64 1
  %p2 = getelementptr inbounds double, ptr %p, i64 2
  %p3 = getelementptr inbounds double, ptr %p, i64 3
  %p4 = getelementptr inbounds double, ptr %p, i64 4
  %p5 = getelementptr inbounds double, ptr %p, i64 5

  %q1 = getelementptr inbounds double, ptr %q, i64 1
  %q2 = getelementptr inbounds double, ptr %q, i64 2
  %q3 = getelementptr inbounds double, ptr %q, i64 3
  %q4 = getelementptr inbounds double, ptr %q, i64 4
  %q5 = getelementptr inbounds double, ptr %q, i64 5

  %d0 = load double, ptr %p
  %d1 = load double, ptr %p1
  %d2 = load double, ptr %p2
  %d3 = load double, ptr %p3
  %d4 = load double, ptr %p4
  %d5 = load double, ptr %p5

  %a0 = fadd double %d0, 1.0
  %a1 = fadd double %d1, 1.0
  %a2 = fadd double %d2, 1.0
  %a3 = fadd double %d3, 1.0
  %a4 = fadd double %d4, 1.0
  %a5 = fadd double %d5, 1.0

  store double %a0, ptr %q
  store double %a1, ptr %q1
  store double %a2, ptr %q2
  store double %a3, ptr %q3
  store double %a4, ptr %q4
  store double %a5, ptr %q5
  ret void
}