1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105
|
;=========================== begin_copyright_notice ============================
;
; Copyright (C) 2022 Intel Corporation
;
; SPDX-License-Identifier: MIT
;
;============================ end_copyright_notice =============================
; RUN: igc_opt %s -S -o - -igc-resolve-atomics | FileCheck %s
; This test verifies the correctness of local spinlock translation. Local spinlock is used by
; atomics implementation. Here is the copy of macros that are implemented in BiFModule
; (Pasting it here as a comment to give better understanding of what below LLVM module does):
; #define LOCAL_SPINLOCK_START() \
; { \
; volatile bool done = false; \
; while(!done) { \
; if(SPIRV_BUILTIN(AtomicCompareExchange, _p3i32_i32_i32_i32_i32_i32, )(__builtin_IB_get_local_lock(), Device, Relaxed, Relaxed, 1, 0) == 0) {
; #define LOCAL_SPINLOCK_END() \
; done = true; \
; SPIRV_BUILTIN(AtomicStore, _p3i32_i32_i32_i32, )(__builtin_IB_get_local_lock(), Device, SequentiallyConsistent | WorkgroupMemory, 0); \
; }}}
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f16:16:16-f32:32:32-f64:64:64-f80:128:128-v16:16:16-v24:32:32-v32:32:32-v48:64:64-v64:64:64-v96:128:128-v128:128:128-v192:256:256-v256:256:256-v512:512:512-v1024:1024:1024-a:64:64-f80:128:128-n8:16:32:64"
; CHECK: @spinlock = addrspace(3) global i32 0
define spir_kernel void @kernel() {
entry:
; Below code initializes local lock variable with 0, since there is no
; guarantee that it will be initialized by UMD on all platforms.
; The code is generated by ResolveOCLAtomics::generateLockInitilization.
; CHECK: %[[LID_X:.*]] = call i32 @__builtin_IB_get_local_id_x()
; CHECK: %[[LID_Y:.*]] = call i32 @__builtin_IB_get_local_id_y()
; CHECK: %[[LID_Z:.*]] = call i32 @__builtin_IB_get_local_id_z()
; CHECK: %[[LID_XY:.*]] = or i32 %[[LID_X]], %[[LID_Y]]
; CHECK: %[[LID_XYZ:.*]] = or i32 %[[LID_XY]], %[[LID_Z]]
; CHECK: %[[IS_FIRST_WORKITEM:.*]] = icmp eq i32 %[[LID_XYZ]], 0
; CHECK: br i1 %[[IS_FIRST_WORKITEM]], label %init_spinlock_var.start, label %init_spinlock_var.end
; CHECK: init_spinlock_var.start:
; CHECK: store i32 0, i32 addrspace(3)* @spinlock
; CHECK: br label %init_spinlock_var.end
; CHECK: init_spinlock_var.end:
; CHECK: call void @llvm.genx.GenISA.memoryfence(i1 true, i1 false, i1 false, i1 false, i1 false, i1 false, i1 true)
; CHECK: call void @llvm.genx.GenISA.threadgroupbarrier()
; ---------- LOCAL_SPINLOCK_START() --------------------
%done_alloca = alloca i8, align 1
store volatile i8 0, i8* %done_alloca, align 1
%done_load0 = load volatile i8, i8* %done_alloca, align 1
%0 = and i8 %done_load0, 1
%tobool8.i = icmp eq i8 %0, 0
br i1 %tobool8.i, label %while.body.lr.ph.i, label %test_spinlock.exit
while.body.lr.ph.i: ; preds = %entry
br label %while.body.i
while.body.i: ; preds = %if.end5.i, %while.body.lr.ph.i
; CHECK-NOT: __builtin_IB_get_local_lock
%call.i = call spir_func i32 addrspace(3)* @__builtin_IB_get_local_lock()
; CHECK: %[[SPINLOCK_AS_INT0:.*]] = ptrtoint i32 addrspace(3)* @spinlock to i32
; CHECK: call i32 @llvm.genx.GenISA.icmpxchgatomicraw.i32.p3i32.i32(i32 addrspace(3)* @spinlock, i32 %[[SPINLOCK_AS_INT0]], i32 0, i32 1)
%call.i.i = call spir_func i32 @__builtin_IB_atomic_cmpxchg_local_i32(i32 addrspace(3)* %call.i, i32 0, i32 1)
%cmp.i = icmp eq i32 %call.i.i, 0
br i1 %cmp.i, label %if.then3.i, label %if.end5.i
; ---------- LOCAL_SPINLOCK_END() --------------------
if.then3.i: ; preds = %while.body.i
store volatile i8 1, i8* %done_alloca, align 1
; CHECK-NOT: __builtin_IB_get_local_lock
%call4.i = call spir_func i32 addrspace(3)* @__builtin_IB_get_local_lock()
call spir_func void @__builtin_IB_memfence(i1 zeroext true, i1 zeroext false, i1 zeroext false, i1 zeroext false, i1 zeroext false, i1 zeroext false, i1 zeroext false)
; CHECK: %[[SPINLOCK_AS_INT1:.*]] = ptrtoint i32 addrspace(3)* @spinlock to i32
; CHECK: call i32 @llvm.genx.GenISA.intatomicraw.i32.p3i32.i32(i32 addrspace(3)* @spinlock, i32 %[[SPINLOCK_AS_INT1]], i32 0, i32 6)
%call.i1.i = call spir_func i32 @__builtin_IB_atomic_xchg_local_i32(i32 addrspace(3)* %call4.i, i32 0)
call spir_func void @__builtin_IB_memfence(i1 zeroext true, i1 zeroext false, i1 zeroext false, i1 zeroext false, i1 zeroext false, i1 zeroext false, i1 zeroext false)
br label %if.end5.i
if.end5.i: ; preds = %if.then3.i, %while.body.i
%done_load1 = load volatile i8, i8* %done_alloca, align 1
%1 = and i8 %done_load1, 1
%tobool.i = icmp eq i8 %1, 0
br i1 %tobool.i, label %while.body.i, label %test_spinlock.exit
test_spinlock.exit: ; preds = %entry, %if.end5.i
ret void
}
declare spir_func i32 addrspace(3)* @__builtin_IB_get_local_lock()
declare spir_func i32 @__builtin_IB_atomic_xchg_local_i32(i32 addrspace(3)*, i32)
declare spir_func i32 @__builtin_IB_atomic_cmpxchg_local_i32(i32 addrspace(3)*, i32, i32)
declare spir_func void @__builtin_IB_memfence(i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext, i1 zeroext)
; CHECK: declare i32 @llvm.genx.GenISA.icmpxchgatomicraw.i32.p3i32.i32(i32 addrspace(3)*, i32, i32, i32)
; CHECK: declare i32 @llvm.genx.GenISA.intatomicraw.i32.p3i32.i32(i32 addrspace(3)*, i32, i32, i32)
; CHECK: declare i32 @__builtin_IB_get_local_id_x()
; CHECK: declare i32 @__builtin_IB_get_local_id_y()
; CHECK: declare i32 @__builtin_IB_get_local_id_z()
; CHECK: declare void @llvm.genx.GenISA.memoryfence(i1, i1, i1, i1, i1, i1, i1)
; CHECK: declare void @llvm.genx.GenISA.threadgroupbarrier()
|