1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt < %s -slp-vectorizer -slp-threshold=-200 -mtriple=x86_64-unknown-linux -mcpu=core-avx2 -S | FileCheck %s
define void @test_add_sdiv(i32 *%arr1, i32 *%arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: @test_add_sdiv(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr i32, i32* [[ARR1:%.*]], i32 0
; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr i32, i32* [[ARR1]], i32 1
; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr i32, i32* [[ARR1]], i32 2
; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr i32, i32* [[ARR1]], i32 3
; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr i32, i32* [[ARR2:%.*]], i32 0
; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr i32, i32* [[ARR2]], i32 1
; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr i32, i32* [[ARR2]], i32 2
; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr i32, i32* [[ARR2]], i32 3
; CHECK-NEXT: [[V0:%.*]] = load i32, i32* [[GEP1_0]], align 4
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[GEP1_1]], align 4
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[GEP1_2]], align 4
; CHECK-NEXT: [[V3:%.*]] = load i32, i32* [[GEP1_3]], align 4
; CHECK-NEXT: [[Y0:%.*]] = add nsw i32 [[A0:%.*]], 1146
; CHECK-NEXT: [[Y1:%.*]] = add nsw i32 [[A1:%.*]], 146
; CHECK-NEXT: [[Y2:%.*]] = add nsw i32 [[A2:%.*]], 42
; CHECK-NEXT: [[Y3:%.*]] = add nsw i32 [[A3:%.*]], 0
; CHECK-NEXT: [[RES0:%.*]] = add nsw i32 [[V0]], [[Y0]]
; CHECK-NEXT: [[RES1:%.*]] = add nsw i32 [[V1]], [[Y1]]
; CHECK-NEXT: [[RES2:%.*]] = sdiv i32 [[V2]], [[Y2]]
; CHECK-NEXT: [[RES3:%.*]] = add nsw i32 [[V3]], [[Y3]]
; CHECK-NEXT: store i32 [[RES0]], i32* [[GEP2_0]], align 4
; CHECK-NEXT: store i32 [[RES1]], i32* [[GEP2_1]], align 4
; CHECK-NEXT: store i32 [[RES2]], i32* [[GEP2_2]], align 4
; CHECK-NEXT: store i32 [[RES3]], i32* [[GEP2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%gep1.0 = getelementptr i32, i32* %arr1, i32 0
%gep1.1 = getelementptr i32, i32* %arr1, i32 1
%gep1.2 = getelementptr i32, i32* %arr1, i32 2
%gep1.3 = getelementptr i32, i32* %arr1, i32 3
%gep2.0 = getelementptr i32, i32* %arr2, i32 0
%gep2.1 = getelementptr i32, i32* %arr2, i32 1
%gep2.2 = getelementptr i32, i32* %arr2, i32 2
%gep2.3 = getelementptr i32, i32* %arr2, i32 3
%v0 = load i32, i32* %gep1.0
%v1 = load i32, i32* %gep1.1
%v2 = load i32, i32* %gep1.2
%v3 = load i32, i32* %gep1.3
%y0 = add nsw i32 %a0, 1146
%y1 = add nsw i32 %a1, 146
%y2 = add nsw i32 %a2, 42
;; %y3 is zero if %a3 is zero
%y3 = add nsw i32 %a3, 0
%res0 = add nsw i32 %v0, %y0
%res1 = add nsw i32 %v1, %y1
;; As such, doing alternate shuffling would be incorrect:
;; %vadd = add nsw %v[0-3], %y[0-3]
;; %vsdiv = sdiv %v[0-3], %y[0-3]
;; %result = shuffle %vadd, %vsdiv, <mask>
;; would be illegal.
%res2 = sdiv i32 %v2, %y2
%res3 = add nsw i32 %v3, %y3
store i32 %res0, i32* %gep2.0
store i32 %res1, i32* %gep2.1
store i32 %res2, i32* %gep2.2
store i32 %res3, i32* %gep2.3
ret void
}
;; Similar test, but now div/rem is main opcode and not the alternate one. Same issue.
define void @test_urem_add(i32 *%arr1, i32 *%arr2, i32 %a0, i32 %a1, i32 %a2, i32 %a3) {
; CHECK-LABEL: @test_urem_add(
; CHECK-NEXT: entry:
; CHECK-NEXT: [[GEP1_0:%.*]] = getelementptr i32, i32* [[ARR1:%.*]], i32 0
; CHECK-NEXT: [[GEP1_1:%.*]] = getelementptr i32, i32* [[ARR1]], i32 1
; CHECK-NEXT: [[GEP1_2:%.*]] = getelementptr i32, i32* [[ARR1]], i32 2
; CHECK-NEXT: [[GEP1_3:%.*]] = getelementptr i32, i32* [[ARR1]], i32 3
; CHECK-NEXT: [[GEP2_0:%.*]] = getelementptr i32, i32* [[ARR2:%.*]], i32 0
; CHECK-NEXT: [[GEP2_1:%.*]] = getelementptr i32, i32* [[ARR2]], i32 1
; CHECK-NEXT: [[GEP2_2:%.*]] = getelementptr i32, i32* [[ARR2]], i32 2
; CHECK-NEXT: [[GEP2_3:%.*]] = getelementptr i32, i32* [[ARR2]], i32 3
; CHECK-NEXT: [[V0:%.*]] = load i32, i32* [[GEP1_0]], align 4
; CHECK-NEXT: [[V1:%.*]] = load i32, i32* [[GEP1_1]], align 4
; CHECK-NEXT: [[V2:%.*]] = load i32, i32* [[GEP1_2]], align 4
; CHECK-NEXT: [[V3:%.*]] = load i32, i32* [[GEP1_3]], align 4
; CHECK-NEXT: [[Y0:%.*]] = add nsw i32 [[A0:%.*]], 1146
; CHECK-NEXT: [[Y1:%.*]] = add nsw i32 [[A1:%.*]], 146
; CHECK-NEXT: [[Y2:%.*]] = add nsw i32 [[A2:%.*]], 42
; CHECK-NEXT: [[Y3:%.*]] = add nsw i32 [[A3:%.*]], 0
; CHECK-NEXT: [[RES0:%.*]] = urem i32 [[V0]], [[Y0]]
; CHECK-NEXT: [[RES1:%.*]] = urem i32 [[V1]], [[Y1]]
; CHECK-NEXT: [[RES2:%.*]] = urem i32 [[V2]], [[Y2]]
; CHECK-NEXT: [[RES3:%.*]] = add nsw i32 [[V3]], [[Y3]]
; CHECK-NEXT: store i32 [[RES0]], i32* [[GEP2_0]], align 4
; CHECK-NEXT: store i32 [[RES1]], i32* [[GEP2_1]], align 4
; CHECK-NEXT: store i32 [[RES2]], i32* [[GEP2_2]], align 4
; CHECK-NEXT: store i32 [[RES3]], i32* [[GEP2_3]], align 4
; CHECK-NEXT: ret void
;
entry:
%gep1.0 = getelementptr i32, i32* %arr1, i32 0
%gep1.1 = getelementptr i32, i32* %arr1, i32 1
%gep1.2 = getelementptr i32, i32* %arr1, i32 2
%gep1.3 = getelementptr i32, i32* %arr1, i32 3
%gep2.0 = getelementptr i32, i32* %arr2, i32 0
%gep2.1 = getelementptr i32, i32* %arr2, i32 1
%gep2.2 = getelementptr i32, i32* %arr2, i32 2
%gep2.3 = getelementptr i32, i32* %arr2, i32 3
%v0 = load i32, i32* %gep1.0
%v1 = load i32, i32* %gep1.1
%v2 = load i32, i32* %gep1.2
%v3 = load i32, i32* %gep1.3
%y0 = add nsw i32 %a0, 1146
%y1 = add nsw i32 %a1, 146
%y2 = add nsw i32 %a2, 42
;; %y3 is zero if %a3 is zero
%y3 = add nsw i32 %a3, 0
%res0 = urem i32 %v0, %y0
%res1 = urem i32 %v1, %y1
%res2 = urem i32 %v2, %y2
;; As such, doing alternate shuffling would be incorrect:
;; %vurem = urem %v[0-3], %y[0-3]
;; %vadd = add nsw %v[0-3], %y[0-3]
;; %result = shuffle %vurem, %vadd, <mask>
;; would be illegal.
%res3 = add nsw i32 %v3, %y3
store i32 %res0, i32* %gep2.0
store i32 %res1, i32* %gep2.1
store i32 %res2, i32* %gep2.2
store i32 %res3, i32* %gep2.3
ret void
}
|