File: overlap-shift.ll

package info (click to toggle)
llvm-toolchain-7 1%3A7.0.1-8~deb9u3
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 733,456 kB
  • sloc: cpp: 3,776,651; ansic: 633,271; asm: 350,301; python: 142,716; objc: 107,612; sh: 22,626; lisp: 11,056; perl: 7,999; pascal: 6,742; ml: 5,537; awk: 3,536; makefile: 2,557; cs: 2,027; xml: 841; ruby: 156
file content (19 lines) | stat: -rw-r--r-- 687 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
;; X's live range extends beyond the shift, so the register allocator
;; cannot coalesce it with Y.  Because of this, a copy needs to be
;; emitted before the shift to save the register value before it is
;; clobbered.  However, this copy is not needed if the register
;; allocator turns the shift into an LEA.  This also occurs for ADD.

; Check that the shift gets turned into an LEA.

; RUN: llc < %s -mtriple=i686-- -x86-asm-syntax=intel | \
; RUN:   not grep "mov E.X, E.X"

@G = external global i32                ; <i32*> [#uses=1]

define i32 @test1(i32 %X) {
        %Z = shl i32 %X, 2              ; <i32> [#uses=1]
        store volatile i32 %Z, i32* @G
        ret i32 %X
}