1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77
|
[require]
GL >= 3.3
GLSL >= 3.30
GL_ARB_compute_shader
GL_ARB_shader_atomic_counters
GL_INTEL_shader_atomic_float_minmax
[compute shader]
#version 330
#extension GL_ARB_compute_shader: require
#extension GL_ARB_shader_atomic_counters: require
#extension GL_INTEL_shader_atomic_float_minmax: require
layout(local_size_x = 32) in;
shared float value;
shared uint mask;
layout(binding = 0) uniform atomic_uint pass;
layout(binding = 0) uniform atomic_uint fail;
void main()
{
if (gl_LocalInvocationIndex == 0u) {
/* Bootstrapping this with zero is unavoidable, but it
* also causes problems later. Namely it means that
* bit 0 will get set twice and invocation 0 can see a
* zero value. Note the explicit checks for bit != 0
* and gl_LocalInvocationIndex != 0 below.
*/
value = 0.0;
mask = 0u;
}
barrier();
float f = atomicExchange(value, .5 * float(gl_LocalInvocationIndex));
uint i = uint(f * 2.);
uint bit = i % 32u;
uint m = 1u << bit;
if (i < 32u) {
/* If the bit was already set, the test fails. */
uint r = atomicOr(mask, m);
if (bit != 0u && (r & m) != 0u)
atomicCounterIncrement(fail);
/* Invocation index 0 can read it's own index (due to
* bootstrapping with zero), but no other invocation should.
*/
if (gl_LocalInvocationIndex != 0u &&
bit == gl_LocalInvocationIndex)
atomicCounterIncrement(fail);
} else {
atomicCounterIncrement(fail);
}
barrier();
if (gl_LocalInvocationIndex == 0u) {
uint i = uint(value * 2.);
uint bit = i % 32u;
uint m = 1u << bit;
uint final = m | mask;
/* If all 32 bits are set, the test passes. */
if (final == 0xffffffffu)
atomicCounterIncrement(pass);
}
}
[test]
atomic counters 2
compute 2 3 4
probe atomic counter 0 == 24
probe atomic counter 1 == 0
|