| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 134
 135
 136
 137
 138
 139
 140
 141
 142
 143
 144
 145
 146
 147
 148
 149
 150
 151
 152
 153
 154
 155
 156
 157
 158
 159
 160
 161
 162
 163
 164
 165
 166
 167
 168
 169
 170
 171
 172
 
 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X86
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=X64
declare <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float>, ptr,
                      <4 x i32>, <4 x float>, i8) nounwind readonly
define <4 x float> @test_x86_avx2_gather_d_ps(ptr %a1, <4 x i32> %idx, <4 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_ps:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    vxorps %xmm2, %xmm2, %xmm2
; X86-NEXT:    vgatherdps %xmm1, (%eax,%xmm0,2), %xmm2
; X86-NEXT:    vmovaps %xmm2, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps:
; X64:       # %bb.0:
; X64-NEXT:    vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT:    vgatherdps %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT:    vmovaps %xmm2, %xmm0
; X64-NEXT:    retq
  %res = call <4 x float> @llvm.x86.avx2.gather.d.ps(<4 x float> undef,
                            ptr %a1, <4 x i32> %idx, <4 x float> %mask, i8 2) ;
  ret <4 x float> %res
}
declare <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double>, ptr,
                      <4 x i32>, <2 x double>, i8) nounwind readonly
define <2 x double> @test_x86_avx2_gather_d_pd(ptr %a1, <4 x i32> %idx, <2 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_pd:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
; X86-NEXT:    vgatherdpd %xmm1, (%eax,%xmm0,2), %xmm2
; X86-NEXT:    vmovapd %xmm2, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd:
; X64:       # %bb.0:
; X64-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT:    vgatherdpd %xmm1, (%rdi,%xmm0,2), %xmm2
; X64-NEXT:    vmovapd %xmm2, %xmm0
; X64-NEXT:    retq
  %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> undef,
                            ptr %a1, <4 x i32> %idx, <2 x double> %mask, i8 2) ;
  ret <2 x double> %res
}
declare <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float>, ptr,
                      <8 x i32>, <8 x float>, i8) nounwind readonly
define <8 x float> @test_x86_avx2_gather_d_ps_256(ptr %a1, <8 x i32> %idx, <8 x float> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_ps_256:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    vxorps %xmm2, %xmm2, %xmm2
; X86-NEXT:    vgatherdps %ymm1, (%eax,%ymm0,4), %ymm2
; X86-NEXT:    vmovaps %ymm2, %ymm0
; X86-NEXT:    retl
;
; X64-LABEL: test_x86_avx2_gather_d_ps_256:
; X64:       # %bb.0:
; X64-NEXT:    vxorps %xmm2, %xmm2, %xmm2
; X64-NEXT:    vgatherdps %ymm1, (%rdi,%ymm0,4), %ymm2
; X64-NEXT:    vmovaps %ymm2, %ymm0
; X64-NEXT:    retq
  %res = call <8 x float> @llvm.x86.avx2.gather.d.ps.256(<8 x float> undef,
                            ptr %a1, <8 x i32> %idx, <8 x float> %mask, i8 4) ;
  ret <8 x float> %res
}
declare <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double>, ptr,
                      <4 x i32>, <4 x double>, i8) nounwind readonly
define <4 x double> @test_x86_avx2_gather_d_pd_256(ptr %a1, <4 x i32> %idx, <4 x double> %mask) {
; X86-LABEL: test_x86_avx2_gather_d_pd_256:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
; X86-NEXT:    vgatherdpd %ymm1, (%eax,%xmm0,8), %ymm2
; X86-NEXT:    vmovapd %ymm2, %ymm0
; X86-NEXT:    retl
;
; X64-LABEL: test_x86_avx2_gather_d_pd_256:
; X64:       # %bb.0:
; X64-NEXT:    vxorpd %xmm2, %xmm2, %xmm2
; X64-NEXT:    vgatherdpd %ymm1, (%rdi,%xmm0,8), %ymm2
; X64-NEXT:    vmovapd %ymm2, %ymm0
; X64-NEXT:    retq
  %res = call <4 x double> @llvm.x86.avx2.gather.d.pd.256(<4 x double> undef,
                            ptr %a1, <4 x i32> %idx, <4 x double> %mask, i8 8) ;
  ret <4 x double> %res
}
define <2 x i64> @test_mm_i32gather_epi32(ptr%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_epi32:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT:    vpxor %xmm1, %xmm1, %xmm1
; X86-NEXT:    vpgatherdd %xmm2, (%eax,%xmm0,2), %xmm1
; X86-NEXT:    vmovdqa %xmm1, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: test_mm_i32gather_epi32:
; X64:       # %bb.0:
; X64-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT:    vpxor %xmm1, %xmm1, %xmm1
; X64-NEXT:    vpgatherdd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT:    vmovdqa %xmm1, %xmm0
; X64-NEXT:    retq
  %arg0 = bitcast ptr%a0 to ptr
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %mask = bitcast <2 x i64> <i64 -1, i64 -1> to <4 x i32>
  %call = call <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32> zeroinitializer, ptr %arg0, <4 x i32> %arg1, <4 x i32> %mask, i8 2)
  %bc = bitcast <4 x i32> %call to <2 x i64>
  ret <2 x i64> %bc
}
declare <4 x i32> @llvm.x86.avx2.gather.d.d(<4 x i32>, ptr, <4 x i32>, <4 x i32>, i8) nounwind readonly
define <2 x double> @test_mm_i32gather_pd(ptr%a0, <2 x i64> %a1) {
; X86-LABEL: test_mm_i32gather_pd:
; X86:       # %bb.0:
; X86-NEXT:    movl {{[0-9]+}}(%esp), %eax
; X86-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT:    vgatherdpd %xmm2, (%eax,%xmm0,2), %xmm1
; X86-NEXT:    vmovapd %xmm1, %xmm0
; X86-NEXT:    retl
;
; X64-LABEL: test_mm_i32gather_pd:
; X64:       # %bb.0:
; X64-NEXT:    vxorpd %xmm1, %xmm1, %xmm1
; X64-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT:    vgatherdpd %xmm2, (%rdi,%xmm0,2), %xmm1
; X64-NEXT:    vmovapd %xmm1, %xmm0
; X64-NEXT:    retq
  %arg0 = bitcast ptr%a0 to ptr
  %arg1 = bitcast <2 x i64> %a1 to <4 x i32>
  %cmp = fcmp oeq <2 x double> zeroinitializer, zeroinitializer
  %sext = sext <2 x i1> %cmp to <2 x i64>
  %mask = bitcast <2 x i64> %sext to <2 x double>
  %res = call <2 x double> @llvm.x86.avx2.gather.d.pd(<2 x double> zeroinitializer, ptr %arg0, <4 x i32> %arg1, <2 x double> %mask, i8 2)
  ret <2 x double> %res
}
@x = dso_local global [1024 x float] zeroinitializer, align 16
define <4 x float> @gather_global(<4 x i64>, ptr nocapture readnone) {
; X86-LABEL: gather_global:
; X86:       # %bb.0:
; X86-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
; X86-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; X86-NEXT:    vgatherqps %xmm2, x(,%ymm0,4), %xmm1
; X86-NEXT:    vmovaps %xmm1, %xmm0
; X86-NEXT:    vzeroupper
; X86-NEXT:    retl
;
; X64-LABEL: gather_global:
; X64:       # %bb.0:
; X64-NEXT:    vpcmpeqd %xmm2, %xmm2, %xmm2
; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
; X64-NEXT:    vgatherqps %xmm2, x(,%ymm0,4), %xmm1
; X64-NEXT:    vmovaps %xmm1, %xmm0
; X64-NEXT:    vzeroupper
; X64-NEXT:    retq
  %3 = tail call <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float> zeroinitializer, ptr @x, <4 x i64> %0, <4 x float> <float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000, float 0xFFFFFFFFE0000000>, i8 4)
  ret <4 x float> %3
}
declare <4 x float> @llvm.x86.avx2.gather.q.ps.256(<4 x float>, ptr, <4 x i64>, <4 x float>, i8)
 |