File: amdgpu-late-codegenprepare.ll

package info (click to toggle)
llvm-toolchain-18 1%3A18.1.8-18
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 1,908,340 kB
  • sloc: cpp: 6,667,937; ansic: 1,440,452; asm: 883,619; python: 230,549; objc: 76,880; f90: 74,238; lisp: 35,989; pascal: 16,571; sh: 10,229; perl: 7,459; ml: 5,047; awk: 3,523; makefile: 2,987; javascript: 2,149; xml: 892; fortran: 649; cs: 573
file content (95 lines) | stat: -rw-r--r-- 5,270 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX9
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX12

; Make sure we don't crash when trying to create a bitcast between
; address spaces
define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
; GFX9-LABEL: @constant_from_offset_cast_generic_null(
; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT:    ret void
;
; GFX12-LABEL: @constant_from_offset_cast_generic_null(
; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

define amdgpu_kernel void @constant_from_offset_cast_global_null() {
; GFX9-LABEL: @constant_from_offset_cast_global_null(
; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT:    ret void
;
; GFX12-LABEL: @constant_from_offset_cast_global_null(
; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

@gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4

define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
; GFX9-LABEL: @constant_from_offset_cast_global_gv(
; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT:    ret void
;
; GFX12-LABEL: @constant_from_offset_cast_global_gv(
; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
; GFX9-LABEL: @constant_from_offset_cast_generic_inttoptr(
; GFX9-NEXT:    [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT:    [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT:    [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT:    store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT:    ret void
;
; GFX12-LABEL: @constant_from_offset_cast_generic_inttoptr(
; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}

define amdgpu_kernel void @constant_from_inttoptr() {
; GFX9-LABEL: @constant_from_inttoptr(
; GFX9-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 4
; GFX9-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX9-NEXT:    ret void
;
; GFX12-LABEL: @constant_from_inttoptr(
; GFX12-NEXT:    [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
; GFX12-NEXT:    store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT:    ret void
;
  %load = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
  store i8 %load, ptr addrspace(1) undef
  ret void
}