File: overlap-shift.ll

package info (click to toggle)
llvm-toolchain-3.8 1%3A3.8.1-24
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 379,280 kB
  • ctags: 388,501
  • sloc: cpp: 2,309,705; ansic: 477,070; objc: 100,918; asm: 97,974; python: 95,911; sh: 18,634; makefile: 7,294; perl: 5,584; ml: 5,460; pascal: 4,661; lisp: 2,548; xml: 686; cs: 350; php: 212; csh: 117
file content (19 lines) | stat: -rw-r--r-- 682 bytes parent folder | download | duplicates (9)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
;; X's live range extends beyond the shift, so the register allocator
;; cannot coalesce it with Y.  Because of this, a copy needs to be
;; emitted before the shift to save the register value before it is
;; clobbered.  However, this copy is not needed if the register
;; allocator turns the shift into an LEA.  This also occurs for ADD.

; Check that the shift gets turned into an LEA.

; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
; RUN:   not grep "mov E.X, E.X"

@G = external global i32                ; <i32*> [#uses=1]

define i32 @test1(i32 %X) {
        %Z = shl i32 %X, 2              ; <i32> [#uses=1]
        store volatile i32 %Z, i32* @G
        ret i32 %X
}