1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
|
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5
; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -S -o - %s | FileCheck --check-prefix=OOB-STRICT %s
; RUN: opt -mtriple=amdgcn--amdpal -passes=load-store-vectorizer -mattr=+relaxed-buffer-oob-mode -S -o - %s | FileCheck --check-prefixes=OOB-RELAXED %s
; The test checks that relaxed-buffer-oob-mode allows merging loads even if the target load is not naturally aligned.
define amdgpu_kernel void @merge_align_4(ptr addrspace(7) captures(none) %p) #0 {
;
; OOB-STRICT-LABEL: define amdgpu_kernel void @merge_align_4(
; OOB-STRICT-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
; OOB-STRICT-NEXT: [[ENTRY:.*:]]
; OOB-STRICT-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-STRICT-NEXT: [[LD_M8:%.*]] = load i32, ptr addrspace(7) [[GEP_M8]], align 4
; OOB-STRICT-NEXT: [[GEP_M4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -4
; OOB-STRICT-NEXT: [[LD_M4:%.*]] = load i32, ptr addrspace(7) [[GEP_M4]], align 4
; OOB-STRICT-NEXT: [[GEP_0:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 0
; OOB-STRICT-NEXT: [[LD_0:%.*]] = load i32, ptr addrspace(7) [[GEP_0]], align 4
; OOB-STRICT-NEXT: [[GEP_4:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i64 4
; OOB-STRICT-NEXT: [[LD_4:%.*]] = load i32, ptr addrspace(7) [[GEP_4]], align 4
; OOB-STRICT-NEXT: ret void
;
; OOB-RELAXED-LABEL: define amdgpu_kernel void @merge_align_4(
; OOB-RELAXED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0:[0-9]+]] {
; OOB-RELAXED-NEXT: [[ENTRY:.*:]]
; OOB-RELAXED-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-RELAXED-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 4
; OOB-RELAXED-NEXT: [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
; OOB-RELAXED-NEXT: [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; OOB-RELAXED-NEXT: [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
; OOB-RELAXED-NEXT: [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
; OOB-RELAXED-NEXT: ret void
;
entry:
%gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
%ld_m8 = load i32, ptr addrspace(7) %gep_m8, align 4
%gep_m4 = getelementptr i8, ptr addrspace(7) %p, i32 -4
%ld_m4 = load i32, ptr addrspace(7) %gep_m4, align 4
%gep_0 = getelementptr i8, ptr addrspace(7) %p, i32 0
%ld_0 = load i32, ptr addrspace(7) %gep_0, align 4
%gep_4 = getelementptr i8, ptr addrspace(7) %p, i64 4
%ld_4 = load i32, ptr addrspace(7) %gep_4, align 4
ret void
}
; The test checks that strict OOB mode (relaxed-buffer-oob-mode not set) allows merging loads if the target load is naturally aligned.
define amdgpu_kernel void @merge_align_16(ptr addrspace(7) captures(none) %p) #0 {
; OOB-STRICT-LABEL: define amdgpu_kernel void @merge_align_16(
; OOB-STRICT-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) {
; OOB-STRICT-NEXT: [[ENTRY:.*:]]
; OOB-STRICT-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-STRICT-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
; OOB-STRICT-NEXT: [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
; OOB-STRICT-NEXT: [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; OOB-STRICT-NEXT: [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
; OOB-STRICT-NEXT: [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
; OOB-STRICT-NEXT: ret void
;
; OOB-RELAXED-LABEL: define amdgpu_kernel void @merge_align_16(
; OOB-RELAXED-SAME: ptr addrspace(7) captures(none) [[P:%.*]]) #[[ATTR0]] {
; OOB-RELAXED-NEXT: [[ENTRY:.*:]]
; OOB-RELAXED-NEXT: [[GEP_M8:%.*]] = getelementptr i8, ptr addrspace(7) [[P]], i32 -8
; OOB-RELAXED-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr addrspace(7) [[GEP_M8]], align 16
; OOB-RELAXED-NEXT: [[LD_M81:%.*]] = extractelement <4 x i32> [[TMP0]], i32 0
; OOB-RELAXED-NEXT: [[LD_M42:%.*]] = extractelement <4 x i32> [[TMP0]], i32 1
; OOB-RELAXED-NEXT: [[LD_03:%.*]] = extractelement <4 x i32> [[TMP0]], i32 2
; OOB-RELAXED-NEXT: [[LD_44:%.*]] = extractelement <4 x i32> [[TMP0]], i32 3
; OOB-RELAXED-NEXT: ret void
;
entry:
%gep_m8 = getelementptr i8, ptr addrspace(7) %p, i32 -8
%ld_m8 = load i32, ptr addrspace(7) %gep_m8, align 16
%gep_m4 = getelementptr i8, ptr addrspace(7) %p, i32 -4
%ld_m4 = load i32, ptr addrspace(7) %gep_m4, align 4
%gep_0 = getelementptr i8, ptr addrspace(7) %p, i32 0
%ld_0 = load i32, ptr addrspace(7) %gep_0, align 8
%gep_4 = getelementptr i8, ptr addrspace(7) %p, i64 4
%ld_4 = load i32, ptr addrspace(7) %gep_4, align 4
ret void
}
|