File: overlap-shift.ll

package info (click to toggle)
llvm-toolchain-3.9 1%3A3.9.1-9
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 441,144 kB
  • ctags: 428,836
  • sloc: cpp: 2,546,577; ansic: 538,318; asm: 119,677; objc: 103,316; python: 102,148; sh: 27,847; pascal: 5,626; ml: 5,510; perl: 5,293; lisp: 4,801; makefile: 2,177; xml: 686; cs: 362; php: 212; csh: 117
file content (19 lines) | stat: -rw-r--r-- 682 bytes parent folder | download | duplicates (9)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
;; X's live range extends beyond the shift, so the register allocator
;; cannot coalesce it with Y.  Because of this, a copy needs to be
;; emitted before the shift to save the register value before it is
;; clobbered.  However, this copy is not needed if the register
;; allocator turns the shift into an LEA.  This also occurs for ADD.

; Check that the shift gets turned into an LEA.

; RUN: llc < %s -march=x86 -x86-asm-syntax=intel | \
; RUN:   not grep "mov E.X, E.X"

@G = external global i32                ; <i32*> [#uses=1]

define i32 @test1(i32 %X) {
        %Z = shl i32 %X, 2              ; <i32> [#uses=1]
        store volatile i32 %Z, i32* @G
        ret i32 %X
}