1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
|
#define FILENAME(line) FILENAME_FOR_EXCEPTIONS_CUDA("src/cuda-kernels/manual_awkward_ByteMaskedArray_getitem_nextcarry.cu", line)
#include "standard_parallel_algorithms.h"
#include "awkward/kernels.h"
__global__ void
awkward_ByteMaskedArray_getitem_nextcarry_filter_mask(int8_t* mask,
bool validwhen,
int64_t length) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id < length) {
if ((mask[thread_id] != 0) == validwhen) {
mask[thread_id] = 1;
}
}
}
__global__ void
awkward_ByteMaskedArray_getitem_nextcarry_kernel(int64_t* prefixed_mask,
int64_t* to_carry,
int8_t* mask,
int64_t length) {
int64_t thread_id = blockIdx.x * blockDim.x + threadIdx.x;
if(thread_id < length) {
if (mask[thread_id] != 0) {
to_carry[prefixed_mask[thread_id] - 1] = thread_id;
}
}
}
ERROR
awkward_ByteMaskedArray_getitem_nextcarry_64(int64_t* tocarry,
const int8_t* mask,
int64_t length,
bool validwhen) {
int64_t* res_temp;
int8_t* filtered_mask;
dim3 blocks_per_grid = blocks(length);
dim3 threads_per_block = threads(length);
HANDLE_ERROR(cudaMalloc((void**)&res_temp, sizeof(int64_t) * length));
HANDLE_ERROR(cudaMalloc((void**)&filtered_mask, sizeof(int8_t) * length));
HANDLE_ERROR(cudaMemcpy(
filtered_mask, mask, sizeof(int8_t) * length, cudaMemcpyDeviceToDevice));
awkward_ByteMaskedArray_getitem_nextcarry_filter_mask<<<blocks_per_grid, threads_per_block>>>(
filtered_mask, validwhen, length);
exclusive_scan<int64_t, int8_t>(res_temp, filtered_mask, length);
awkward_ByteMaskedArray_getitem_nextcarry_kernel<<<blocks_per_grid, threads_per_block>>>(
res_temp, tocarry, filtered_mask, length);
cudaDeviceSynchronize();
return success();
}
|