File: overlap-shift.ll

package info (click to toggle)
llvm-toolchain-9 1%3A9.0.1-16
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 882,436 kB
  • sloc: cpp: 4,167,636; ansic: 714,256; asm: 457,610; python: 155,927; objc: 65,094; sh: 42,856; lisp: 26,908; perl: 7,786; pascal: 7,722; makefile: 6,881; ml: 5,581; awk: 3,648; cs: 2,027; xml: 888; javascript: 381; ruby: 156
file content (19 lines) | stat: -rw-r--r-- 687 bytes parent folder | download | duplicates (6)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
;; X's live range extends beyond the shift, so the register allocator
;; cannot coalesce it with Y.  Because of this, a copy needs to be
;; emitted before the shift to save the register value before it is
;; clobbered.  However, this copy is not needed if the register
;; allocator turns the shift into an LEA.  This also occurs for ADD.

; Check that the shift gets turned into an LEA.

; RUN: llc < %s -mtriple=i686-- -x86-asm-syntax=intel | \
; RUN:   not grep "mov E.X, E.X"

@G = external global i32                ; <i32*> [#uses=1]

define i32 @test1(i32 %X) {
        %Z = shl i32 %X, 2              ; <i32> [#uses=1]
        store volatile i32 %Z, i32* @G
        ret i32 %X
}