File: ashr-demand.ll

package info (click to toggle)
llvm-toolchain-19 1%3A19.1.7-3
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,998,520 kB
  • sloc: cpp: 6,951,680; ansic: 1,486,157; asm: 913,598; python: 232,024; f90: 80,126; objc: 75,281; lisp: 37,276; pascal: 16,990; sh: 10,009; ml: 5,058; perl: 4,724; awk: 3,523; makefile: 3,167; javascript: 2,504; xml: 892; fortran: 664; cs: 573
file content (87 lines) | stat: -rw-r--r-- 3,083 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -passes=instcombine -S < %s | FileCheck %s

; If we only want bits that already match the signbit then we don't need to shift.

define i32 @srem2_ashr_mask(i32 %a0) {
; CHECK-LABEL: @srem2_ashr_mask(
; CHECK-NEXT:    [[SREM:%.*]] = srem i32 [[A0:%.*]], 2
; CHECK-NEXT:    [[MASK:%.*]] = and i32 [[SREM]], 2
; CHECK-NEXT:    ret i32 [[MASK]]
;
  %srem = srem i32 %a0, 2 ; result = (1,0,-1) num signbits = 31
  %ashr = ashr i32 %srem, 31
  %mask = and i32 %ashr, 2
  ret i32 %mask
}

; Negative test - mask demands non-signbit from shift source
define i32 @srem8_ashr_mask(i32 %a0) {
; CHECK-LABEL: @srem8_ashr_mask(
; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A0:%.*]], -2147483641
; CHECK-NEXT:    [[ISNEG:%.*]] = icmp ugt i32 [[TMP1]], -2147483648
; CHECK-NEXT:    [[MASK:%.*]] = select i1 [[ISNEG]], i32 2, i32 0
; CHECK-NEXT:    ret i32 [[MASK]]
;
  %srem = srem i32 %a0, 8
  %ashr = ashr i32 %srem, 31
  %mask = and i32 %ashr, 2
  ret i32 %mask
}

define <2 x i32> @srem2_ashr_mask_vector(<2 x i32> %a0) {
; CHECK-LABEL: @srem2_ashr_mask_vector(
; CHECK-NEXT:    [[SREM:%.*]] = srem <2 x i32> [[A0:%.*]], <i32 2, i32 2>
; CHECK-NEXT:    [[MASK:%.*]] = and <2 x i32> [[SREM]], <i32 2, i32 2>
; CHECK-NEXT:    ret <2 x i32> [[MASK]]
;
  %srem = srem <2 x i32> %a0, <i32 2, i32 2>
  %ashr = ashr <2 x i32> %srem, <i32 31, i32 31>
  %mask = and <2 x i32> %ashr, <i32 2, i32 2>
  ret <2 x i32> %mask
}

define <2 x i32> @srem2_ashr_mask_vector_nonconstant(<2 x i32> %a0, <2 x i32> %a1) {
; CHECK-LABEL: @srem2_ashr_mask_vector_nonconstant(
; CHECK-NEXT:    [[SREM:%.*]] = srem <2 x i32> [[A0:%.*]], <i32 2, i32 2>
; CHECK-NEXT:    [[MASK:%.*]] = and <2 x i32> [[SREM]], <i32 2, i32 2>
; CHECK-NEXT:    ret <2 x i32> [[MASK]]
;
  %srem = srem <2 x i32> %a0, <i32 2, i32 2>
  %ashr = ashr <2 x i32> %srem, %a1
  %mask = and <2 x i32> %ashr, <i32 2, i32 2>
  ret <2 x i32> %mask
}


; If it does not matter if we do ashr or lshr, then we canonicalize to lshr.

define i16 @ashr_can_be_lshr(i32 %a) {
; CHECK-LABEL: @ashr_can_be_lshr(
; CHECK-NEXT:    [[ASHR:%.*]] = lshr exact i32 [[A:%.*]], 16
; CHECK-NEXT:    [[TRUNC:%.*]] = trunc nuw i32 [[ASHR]] to i16
; CHECK-NEXT:    ret i16 [[TRUNC]]
;
  %ashr = ashr exact i32 %a, 16
  %trunc = trunc nsw i32 %ashr to i16
  ret i16 %trunc
}

; Historically SimplifyDemandedUseBits skipped replacing ashr with lshr here
; due to known sign bits analysis indicating that %ashr had more than 33 sign
; bits. It does however seem weird not to always canonicalize to lshr when
; possible, and in this case rewriting into lshr would trigger further
; optimizations.
define i32 @ashr_can_be_lshr_2(i32 %a) {
; CHECK-LABEL: @ashr_can_be_lshr_2(
; CHECK-NEXT:    [[TMP1:%.*]] = shl i32 [[A:%.*]], 2
; CHECK-NEXT:    [[TMP2:%.*]] = or i32 [[TMP1]], -67108864
; CHECK-NEXT:    ret i32 [[TMP2]]
;
  %ext = zext i32 %a to i64
  %or = or i64 %ext, 4278190080
  %shl = shl i64 %or, 34
  %ashr = ashr exact i64 %shl, 32
  %trunc = trunc nsw i64 %ashr to i32
  ret i32 %trunc
}