1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168
|
/* OpenCL built-in library: subgroup basic functionality
Copyright (c) 2022-2023 Pekka Jääskeläinen / Intel Finland Oy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to
deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
IN THE SOFTWARE.
*/
/* See subgroups.c for further documentation. */
void _CL_OVERLOADABLE
sub_group_barrier (cl_mem_fence_flags flags)
{
/* This should work as long as there are no diverging
subgroups -- right? It models all subgroups of the WG
stepping in lockstep. */
work_group_barrier (flags);
}
void _CL_OVERLOADABLE
sub_group_barrier (cl_mem_fence_flags flags, memory_scope scope)
__attribute__ ((noduplicate))
{
work_group_barrier (flags);
}
void _CL_OVERLOADABLE
sub_group_barrier (memory_scope scope)
__attribute__ ((noduplicate))
{
work_group_barrier (CLK_GLOBAL_MEM_FENCE);
}
int _CL_OVERLOADABLE
sub_group_any (int predicate)
{
return sub_group_reduce_max ((unsigned)predicate);
}
int _CL_OVERLOADABLE
sub_group_all (int predicate)
{
return sub_group_reduce_min ((unsigned)predicate);
}
#ifdef cl_intel_subgroups
uint _CL_OVERLOADABLE
intel_sub_group_shuffle_down (uint current, uint next, uint delta)
{
int idx = get_sub_group_local_id () + delta;
uint cur_idx = (idx >= get_max_sub_group_size ()) ? 0 : idx;
uint other_cur = sub_group_shuffle (current, cur_idx);
int next_idx
= (idx > get_max_sub_group_size ()) ? idx - get_sub_group_size () : 0;
uint other_next = sub_group_shuffle (next, next_idx);
return idx >= get_sub_group_size () ? other_cur : other_next;
}
uint _CL_OVERLOADABLE
intel_sub_group_block_read (const global uint *p)
{
return p[get_sub_group_local_id ()];
}
uint2 _CL_OVERLOADABLE
intel_sub_group_block_read2 (const global uint *p)
{
return (uint2)(p[get_sub_group_local_id ()],
p[get_sub_group_local_id () + get_max_sub_group_size ()]);
}
uint4 _CL_OVERLOADABLE
intel_sub_group_block_read4 (const global uint *p)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
return (uint4)(p[sglid], p[sglid + sgsize], p[sglid + 2 * sgsize],
p[sglid + 3 * sgsize]);
}
uint8 _CL_OVERLOADABLE
intel_sub_group_block_read8 (const global uint *p)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
return (uint8)(p[sglid], p[sglid + sgsize], p[sglid + 2 * sgsize],
p[sglid + 3 * sgsize], p[sglid + 4 * sgsize],
p[sglid + 5 * sgsize], p[sglid + 6 * sgsize],
p[sglid + 7 * sgsize]);
}
void _CL_OVERLOADABLE
intel_sub_group_block_write (global uint *p, uint data)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
p[sglid] = data;
}
void _CL_OVERLOADABLE
intel_sub_group_block_write2 (global uint *p, uint2 data)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
p[sglid] = data.x;
p[sglid + sgsize] = data.y;
}
void _CL_OVERLOADABLE
intel_sub_group_block_write4 (global uint *p, uint4 data)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
p[sglid] = data.s0;
p[sglid + sgsize] = data.s1;
p[sglid + 2 * sgsize] = data.s2;
p[sglid + 3 * sgsize] = data.s3;
}
void _CL_OVERLOADABLE
intel_sub_group_block_write8 (global uint *p, uint8 data)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
p[sglid] = data.s0;
p[sglid + sgsize] = data.s1;
p[sglid + 2 * sgsize] = data.s2;
p[sglid + 3 * sgsize] = data.s3;
p[sglid + 4 * sgsize] = data.s4;
p[sglid + 5 * sgsize] = data.s5;
p[sglid + 6 * sgsize] = data.s6;
p[sglid + 7 * sgsize] = data.s7;
}
#endif
#ifdef cl_intel_subgroups_short
/* https://registry.khronos.org/OpenCL/extensions/intel/cl_intel_subgroups_short.html
*/
ushort8 _CL_OVERLOADABLE
intel_sub_group_block_read_us8 (const global ushort *p)
{
uint sglid = get_sub_group_local_id ();
uint sgsize = get_max_sub_group_size ();
return (ushort8)(p[sglid], p[sglid + sgsize], p[sglid + 2 * sgsize],
p[sglid + 3 * sgsize], p[sglid + 4 * sgsize],
p[sglid + 5 * sgsize], p[sglid + 6 * sgsize],
p[sglid + 7 * sgsize]);
}
#endif
|