| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 
 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-linux-gnu -mcpu=haswell | FileCheck %s
define void @WhileWithLoopInvariantOperation.21() {
; CHECK-LABEL: WhileWithLoopInvariantOperation.21:
; CHECK:       # %bb.0: # %while.1.body.preheader
; CHECK-NEXT:    movq (%rax), %rax
; CHECK-NEXT:    vxorps %xmm0, %xmm0, %xmm0
; CHECK-NEXT:    vmovaps %xmm0, 32(%rax)
; CHECK-NEXT:    vmovaps {{.*#+}} xmm0 = [4294967295,4294967295,0,0]
; CHECK-NEXT:    vmaskmovps %ymm0, %ymm0, (%rax)
while.1.body.preheader:
  %0 = load ptr, ptr undef, align 8, !invariant.load !0, !dereferenceable !1, !align !2
  %1 = getelementptr inbounds i8, ptr %0, i64 32
  tail call void @llvm.memset.p0.i64(ptr nonnull align 16 dereferenceable(16) %1, i8 0, i64 16, i1 false)
  %2 = tail call <8 x float> @llvm.masked.load.v8f32.p0(ptr undef, i32 4, <8 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <8 x float> <float undef, float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>)
  %3 = tail call <16 x float> @llvm.masked.load.v16f32.p0(ptr undef, i32 4, <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>, <16 x float> <float undef, float undef, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00, float 0.000000e+00>)
  tail call void @llvm.masked.store.v16f32.p0(<16 x float> undef, ptr nonnull undef, i32 4, <16 x i1> <i1 true, i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false, i1 false>)
  unreachable
}
declare void @llvm.memset.p0.i64(ptr nocapture writeonly, i8, i64, i1 immarg)
declare <8 x float> @llvm.masked.load.v8f32.p0(ptr, i32 immarg, <8 x i1>, <8 x float>)
declare <16 x float> @llvm.masked.load.v16f32.p0(ptr, i32 immarg, <16 x i1>, <16 x float>)
declare void @llvm.masked.store.v16f32.p0(<16 x float>, ptr, i32 immarg, <16 x i1>)
!0 = !{}
!1 = !{i64 65}
!2 = !{i64 16}
 |