1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
|
; RUN: opt -basic-aa -aa-eval -print-all-alias-modref-info -disable-output 2>&1 %s | FileCheck %s
declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8*, i8, i64, i32)
define void @test_memset_element_unordered_atomic_const_size(i8* noalias %a) {
; CHECK-LABEL: Function: test_memset_element_unordered_atomic_const_size
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
; CHECK-NEXT: NoModRef: Ptr: i8* %a.gep.5 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 4, i32 1)
;
entry:
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %a, i8 0, i64 4, i32 1)
%a.gep.1 = getelementptr i8, i8* %a, i32 1
store i8 0, i8* %a.gep.1
%a.gep.5 = getelementptr i8, i8* %a, i32 5
store i8 1, i8* %a.gep.5
ret void
}
define void @test_memset_element_unordered_atomic_variable_size(i8* noalias %a, i64 %n) {
; CHECK-LABEL: Function: test_memset_element_unordered_atomic_variable_size
; CHECK: Just Mod (MustAlias): Ptr: i8* %a <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.1 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %a.gep.5 <-> call void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* align 1 %a, i8 0, i64 %n, i32 1)
;
entry:
call void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* align 1 %a, i8 0, i64 %n, i32 1)
%a.gep.1 = getelementptr i8, i8* %a, i32 1
store i8 0, i8* %a.gep.1
%a.gep.5 = getelementptr i8, i8* %a, i32 5
store i8 1, i8* %a.gep.5
ret void
}
declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32)
define void @test_memcpy_element_unordered_atomic_const_size(i8* noalias %a, i8* noalias %b) {
; CHECK-LABEL: Function: test_memcpy_element_unordered_atomic_const_size
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: NoModRef: Ptr: i8* %a.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: NoModRef: Ptr: i8* %b.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
;
entry:
%a.gep.1 = getelementptr i8, i8* %a, i32 1
store i8 0, i8* %a.gep.1
%a.gep.5 = getelementptr i8, i8* %a, i32 5
store i8 1, i8* %a.gep.5
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
%b.gep.1 = getelementptr i8, i8* %b, i32 1
store i8 0, i8* %b.gep.1
%b.gep.5 = getelementptr i8, i8* %b, i32 5
store i8 1, i8* %b.gep.5
ret void
}
define void @test_memcpy_element_unordered_atomic_variable_size(i8* noalias %a, i8* noalias %b, i64 %n) {
; CHECK-LABEL: Function: test_memcpy_element_unordered_atomic_variable_size
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.5 <-> call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
;
entry:
%a.gep.1 = getelementptr i8, i8* %a, i32 1
store i8 0, i8* %a.gep.1
%a.gep.5 = getelementptr i8, i8* %a, i32 5
store i8 1, i8* %a.gep.5
call void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
%b.gep.1 = getelementptr i8, i8* %b, i32 1
store i8 0, i8* %b.gep.1
%b.gep.5 = getelementptr i8, i8* %b, i32 5
store i8 1, i8* %b.gep.5
ret void
}
declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* nocapture writeonly, i8* nocapture readonly, i64, i32)
define void @test_memmove_element_unordered_atomic_const_size(i8* noalias %a, i8* noalias %b) {
; CHECK-LABEL: Function: test_memmove_element_unordered_atomic_const_size
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: NoModRef: Ptr: i8* %a.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
; CHECK-NEXT: NoModRef: Ptr: i8* %b.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
;
entry:
%a.gep.1 = getelementptr i8, i8* %a, i32 1
store i8 0, i8* %a.gep.1
%a.gep.5 = getelementptr i8, i8* %a, i32 5
store i8 1, i8* %a.gep.5
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 4, i32 1)
%b.gep.1 = getelementptr i8, i8* %b, i32 1
store i8 0, i8* %b.gep.1
%b.gep.5 = getelementptr i8, i8* %b, i32 5
store i8 1, i8* %b.gep.5
ret void
}
define void @test_memmove_element_unordered_atomic_variable_size(i8* noalias %a, i8* noalias %b, i64 %n) {
; CHECK-LABEL: Function: test_memmove_element_unordered_atomic_variable_size
; CHECK: Just Ref: Ptr: i8* %a <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Ref: Ptr: i8* %a.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.1 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
; CHECK-NEXT: Just Mod: Ptr: i8* %b.gep.5 <-> call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
;
entry:
%a.gep.1 = getelementptr i8, i8* %a, i32 1
store i8 0, i8* %a.gep.1
%a.gep.5 = getelementptr i8, i8* %a, i32 5
store i8 1, i8* %a.gep.5
call void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* align 1 %b, i8* align 1 %a, i64 %n, i32 1)
%b.gep.1 = getelementptr i8, i8* %b, i32 1
store i8 0, i8* %b.gep.1
%b.gep.5 = getelementptr i8, i8* %b, i32 5
store i8 1, i8* %b.gep.5
ret void
}
|