| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 
 | ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -verify-machineinstrs -mcpu=pwr8 -mtriple=powerpc64le < %s | FileCheck %s
define i64 @test1(ptr %a, ptr %b) {
; CHECK-LABEL: test1:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    ld 5, 0(3)
; CHECK-NEXT:    ld 6, 0(4)
; CHECK-NEXT:    mtvsrd 34, 5
; CHECK-NEXT:    mtvsrd 35, 6
; CHECK-NEXT:    add 4, 5, 6
; CHECK-NEXT:    vavgsb 2, 2, 3
; CHECK-NEXT:    stxsdx 34, 0, 3
; CHECK-NEXT:    mr 3, 4
; CHECK-NEXT:    blr
entry:
  %lhs = load i64, ptr %a, align 8
  %rhs = load i64, ptr %b, align 8
  %sum = add i64 %lhs, %rhs
  %lv = insertelement <2 x i64> undef, i64 %lhs, i32 0
  %rv = insertelement <2 x i64> undef, i64 %rhs, i32 0
  %lhc = bitcast <2 x i64> %lv to <16 x i8>
  %rhc = bitcast <2 x i64> %rv to <16 x i8>
  %add = call <16 x i8> @llvm.ppc.altivec.vavgsb(<16 x i8> %lhc, <16 x i8> %rhc)
  %cb = bitcast <16 x i8> %add to <2 x i64>
  %fv = extractelement <2 x i64> %cb, i32 0
  store i64 %fv, ptr %a, align 8
  ret i64 %sum
}
define i64 @test2(ptr %a, ptr %b) {
; CHECK-LABEL: test2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    ld 5, 0(3)
; CHECK-NEXT:    ld 6, 0(4)
; CHECK-NEXT:    mtvsrd 34, 5
; CHECK-NEXT:    mtvsrd 35, 6
; CHECK-NEXT:    add 4, 5, 6
; CHECK-NEXT:    vadduhm 2, 2, 3
; CHECK-NEXT:    stxsdx 34, 0, 3
; CHECK-NEXT:    mr 3, 4
; CHECK-NEXT:    blr
entry:
  %lhs = load i64, ptr %a, align 8
  %rhs = load i64, ptr %b, align 8
  %sum = add i64 %lhs, %rhs
  %lv = insertelement <2 x i64> undef, i64 %lhs, i32 0
  %rv = insertelement <2 x i64> undef, i64 %rhs, i32 0
  %lhc = bitcast <2 x i64> %lv to <8 x i16>
  %rhc = bitcast <2 x i64> %rv to <8 x i16>
  %add = add <8 x i16> %lhc, %rhc
  %cb = bitcast <8 x i16> %add to <2 x i64>
  %fv = extractelement <2 x i64> %cb, i32 0
  store i64 %fv, ptr %a, align 8
  ret i64 %sum
}
; Ensure that vec-ops with multiple uses aren't simplified.
define signext i16 @vecop_uses(ptr %addr) {
; CHECK-LABEL: vecop_uses:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    li 4, 16
; CHECK-NEXT:    lxvd2x 0, 3, 4
; CHECK-NEXT:    xxswapd 34, 0
; CHECK-NEXT:    lxvd2x 0, 0, 3
; CHECK-NEXT:    xxswapd 35, 0
; CHECK-NEXT:    vminsh 2, 3, 2
; CHECK-NEXT:    xxswapd 35, 34
; CHECK-NEXT:    vminsh 2, 2, 3
; CHECK-NEXT:    xxspltw 35, 34, 2
; CHECK-NEXT:    vminsh 2, 2, 3
; CHECK-NEXT:    vsplth 3, 2, 6
; CHECK-NEXT:    vminsh 2, 2, 3
; CHECK-NEXT:    xxswapd 0, 34
; CHECK-NEXT:    mffprd 3, 0
; CHECK-NEXT:    clrldi 3, 3, 48
; CHECK-NEXT:    extsh 3, 3
; CHECK-NEXT:    blr
entry:
  %0 = load <16 x i16>, ptr %addr, align 2
  %1 = call i16 @llvm.vector.reduce.smin.v16i16(<16 x i16> %0)
  ret i16 %1
}
define signext i32 @vecop_uses2(ptr %a, ptr %b, ptr %c) {
; CHECK-LABEL: vecop_uses2:
; CHECK:       # %bb.0: # %entry
; CHECK-NEXT:    lxvd2x 0, 0, 3
; CHECK-NEXT:    xxswapd 34, 0
; CHECK-NEXT:    lxvd2x 0, 0, 4
; CHECK-NEXT:    xxswapd 35, 0
; CHECK-NEXT:    xxsldwi 0, 34, 34, 3
; CHECK-NEXT:    vmuluwm 2, 3, 2
; CHECK-NEXT:    mffprwz 3, 0
; CHECK-NEXT:    extsw 3, 3
; CHECK-NEXT:    xxswapd 1, 34
; CHECK-NEXT:    stxvd2x 1, 0, 5
; CHECK-NEXT:    blr
entry:
  %0 = load <4 x i32>, ptr %a, align 4
  %1 = load <4 x i32>, ptr %b, align 4
  %2 = mul <4 x i32> %1, %0
  store <4 x i32> %2, ptr %c, align 4
  %3 = extractelement <4 x i32> %0, i32 3
  ret i32 %3
}
declare <16 x i8> @llvm.ppc.altivec.vavgsb(<16 x i8>, <16 x i8>)
declare i16 @llvm.vector.reduce.smin.v16i16(<16 x i16>)
 |