File: vec_insert-7.ll

package info (click to toggle)
llvm-toolchain-6.0 1%3A6.0.1-10
  • links: PTS, VCS
  • area: main
  • in suites: buster
  • size: 598,080 kB
  • sloc: cpp: 3,046,253; ansic: 595,057; asm: 271,965; python: 128,926; objc: 106,554; sh: 21,906; lisp: 10,191; pascal: 6,094; ml: 5,544; perl: 5,265; makefile: 2,227; cs: 2,027; xml: 686; php: 212; csh: 117
file content (38 lines) | stat: -rw-r--r-- 1,530 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-apple-darwin9 -mattr=+mmx,+sse4.2 | FileCheck %s --check-prefix=X32
; RUN: llc < %s -mtriple=x86_64-apple-darwin9 -mattr=+mmx,+sse4.2 | FileCheck %s --check-prefix=X64

; MMX insertelement is not available; these are promoted to xmm.
; (Without SSE they are split to two ints, and the code is much better.)

define x86_mmx @mmx_movzl(x86_mmx %x) nounwind {
; X32-LABEL: mmx_movzl:
; X32:       ## %bb.0:
; X32-NEXT:    subl $20, %esp
; X32-NEXT:    movq %mm0, {{[0-9]+}}(%esp)
; X32-NEXT:    pmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero
; X32-NEXT:    movl $32, %eax
; X32-NEXT:    pinsrd $0, %eax, %xmm0
; X32-NEXT:    pxor %xmm1, %xmm1
; X32-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3,4,5,6,7]
; X32-NEXT:    movq %xmm1, (%esp)
; X32-NEXT:    movq (%esp), %mm0
; X32-NEXT:    addl $20, %esp
; X32-NEXT:    retl
;
; X64-LABEL: mmx_movzl:
; X64:       ## %bb.0:
; X64-NEXT:    movdq2q %xmm0, %mm0
; X64-NEXT:    movq %mm0, -{{[0-9]+}}(%rsp)
; X64-NEXT:    pmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero
; X64-NEXT:    movl $32, %eax
; X64-NEXT:    pinsrq $0, %rax, %xmm1
; X64-NEXT:    pxor %xmm0, %xmm0
; X64-NEXT:    pblendw {{.*#+}} xmm0 = xmm1[0,1],xmm0[2,3,4,5,6,7]
; X64-NEXT:    retq
  %tmp = bitcast x86_mmx %x to <2 x i32>
  %tmp3 = insertelement <2 x i32> %tmp, i32 32, i32 0
  %tmp8 = insertelement <2 x i32> %tmp3, i32 0, i32 1
  %tmp9 = bitcast <2 x i32> %tmp8 to x86_mmx
  ret x86_mmx %tmp9
}