File: test_subbuffers.cpp

package info (click to toggle)
pocl 6.0-7
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 25,320 kB
  • sloc: lisp: 149,513; ansic: 103,778; cpp: 54,947; python: 1,513; sh: 949; ruby: 255; pascal: 226; tcl: 180; makefile: 175; java: 72; xml: 49
file content (285 lines) | stat: -rw-r--r-- 10,220 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
/* Tests for subbuffers, especially their legal concurrent access patterns.

   Copyright (c) 2024 Pekka Jääskeläinen / Intel Finland Oy

   Permission is hereby granted, free of charge, to any person obtaining a copy
   of this software and associated documentation files (the "Software"), to deal
   in the Software without restriction, including without limitation the rights
   to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
   copies of the Software, and to permit persons to whom the Software is
   furnished to do so, subject to the following conditions:

   The above copyright notice and this permission notice shall be included in
   all copies or substantial portions of the Software.

   THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
   IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
   FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
   AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
   LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
   OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
   THE SOFTWARE.
*/

// Enable OpenCL C++ exceptions
#define CL_HPP_ENABLE_EXCEPTIONS

#include "pocl_opencl.h"

#include "../../include/CL/cl_ext_pocl.h"
#include <CL/opencl.hpp>

#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <map>
#include <random>

static char VecAddSrc[] = R"raw(

  __kernel void vecadd (__global int *A, __global int *B,
                        __global int *C) {
    C[get_global_id(0)] = A[get_global_id(0)] + B[get_global_id(0)];
  }
)raw";

// Split a buffer to N subbuffers which are concurrently processed
// by multiple kernel commands in different command queues.
int TestOutputDataDecomposition() {

  unsigned Errors = 0;
  bool AllOK = true;

  try {
    std::vector<cl::Platform> PlatformList;

    cl::Platform::get(&PlatformList);

    cl_context_properties cprops[] = {
        CL_CONTEXT_PLATFORM, (cl_context_properties)(PlatformList[0])(), 0};
    cl::Context Context(CL_DEVICE_TYPE_CPU | CL_DEVICE_TYPE_GPU, cprops);

    std::vector<cl::Device> Devices = Context.getInfo<CL_CONTEXT_DEVICES>();

    if (Devices.empty()) {
      std::cout << "No devices found." << std::endl;
      return EXIT_FAILURE;
    }

    const size_t NumParallelQueues = 8;
    const size_t WorkShare =
      (Devices[0].getInfo<CL_DEVICE_MEM_BASE_ADDR_ALIGN>() / sizeof(cl_int)) *
      4;
    // Leave one chunk of the data untouched so we can check that
    // the migrations are done at subbuffer level.
    const size_t NumData = (NumParallelQueues + 1) * WorkShare;

    std::cerr << "Number of devices: " << Devices.size() << std::endl;
    std::cerr << "NumData == " << NumData << std::endl;
    std::cerr << "WorkShare == " << WorkShare << std::endl;
    std::cerr << "Processing data before " << NumParallelQueues * WorkShare
              << std::endl;
    std::cerr << "Last sub-buffer starts at "
              << (NumParallelQueues - 1) * WorkShare << std::endl;

    std::vector<int> HostBufA, HostBufB, HostBufC;
    for (size_t i = 0; i < NumData; ++i) {
      HostBufA.push_back(i);
      HostBufB.push_back(2);
      HostBufC.push_back(1);
    }

    cl::Buffer ABuffer =
        cl::Buffer(Context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
                   sizeof(cl_int) * NumData, HostBufA.data());

    cl::Buffer BBuffer =
        cl::Buffer(Context, CL_MEM_READ_ONLY | CL_MEM_COPY_HOST_PTR,
                   sizeof(cl_int) * NumData, HostBufB.data());

    cl::Buffer CBuffer =
        cl::Buffer(Context, CL_MEM_READ_WRITE | CL_MEM_COPY_HOST_PTR,
                   sizeof(cl_int) * NumData, HostBufC.data());

    cl::Program::Sources Sources({VecAddSrc});
    cl::Program Program(Context, Sources);
    Program.build(Devices);
    cl::Kernel VecAddKernel(Program, "vecadd");

    std::vector<cl::Buffer> SubBuffers;
    std::vector<cl::CommandQueue> Queues;
    std::vector<cl::Event> KernelEvents;
    // Spawn a bunch of kernel commands in their independent command queues
    // (which could target different devices) to process their piece of the
    // data.
    for (size_t i = 0; i < NumParallelQueues; ++i) {

      cl::CommandQueue Queue(Context, Devices[i % Devices.size()], 0);
      Queues.push_back(Queue);

      cl_buffer_region Region{.origin = i * WorkShare * sizeof(cl_int),
                              .size = WorkShare * sizeof(cl_int)};

      cl::Buffer ASubBuffer =
          ABuffer.createSubBuffer(0, CL_BUFFER_CREATE_TYPE_REGION, &Region);
      cl::Buffer BSubBuffer =
          BBuffer.createSubBuffer(0, CL_BUFFER_CREATE_TYPE_REGION, &Region);

      VecAddKernel.setArg(0, ASubBuffer);
      VecAddKernel.setArg(1, BSubBuffer);

      cl::Buffer CSubBuffer =
          CBuffer.createSubBuffer(0, CL_BUFFER_CREATE_TYPE_REGION, &Region);
      VecAddKernel.setArg(2, CSubBuffer);

      SubBuffers.push_back(ASubBuffer);
      SubBuffers.push_back(BSubBuffer);
      SubBuffers.push_back(CSubBuffer);

      cl::Event Ev;
      Queue.enqueueNDRangeKernel(VecAddKernel, cl::NullRange,
                                 cl::NDRange(WorkShare), cl::NullRange, nullptr,
                                 &Ev);
      KernelEvents.push_back(Ev);
    }

    std::vector<int> AfterSubBufCContents(NumData);

    for (size_t i = 0; i < Queues.size(); ++i)
      Queues[i].finish();

    Queues[0].enqueueReadBuffer(CBuffer, CL_TRUE, 0, sizeof(cl_int) * NumData,
                                AfterSubBufCContents.data(), &KernelEvents);

    // Push a kernel that reads and writes the whole buffer.
    VecAddKernel.setArg(0, CBuffer);
    // Should add 2 to all elements again, in place.
    VecAddKernel.setArg(1, BBuffer);
    VecAddKernel.setArg(2, CBuffer);

    // Event dep on the previous kernel commands should ensure the data is
    // implicitly migrated to the parent buffer.
    Queues[0].enqueueNDRangeKernel(VecAddKernel, cl::NullRange,
                                   cl::NDRange(WorkShare * NumParallelQueues),
                                   cl::NullRange, &KernelEvents, nullptr);

    std::vector<int> NewBufCContents(NumData);

    Queues[0].enqueueReadBuffer(CBuffer, CL_FALSE, 0, sizeof(cl_int) * NumData,
                                NewBufCContents.data());

    // Push a kernel that inputs the old subbuffer, that should get updated with
    // the changes done by the previous command.
    VecAddKernel.setArg(0, SubBuffers.back());
    VecAddKernel.setArg(1, BBuffer); // Should add 2 to all elements, in place.
    VecAddKernel.setArg(2, SubBuffers.back());

    Queues[0].enqueueNDRangeKernel(VecAddKernel, cl::NullRange,
                                   cl::NDRange(WorkShare), cl::NullRange,
                                   &KernelEvents, nullptr);

    std::vector<int> FinalBufCContents(NumData);

    Queues[0].enqueueReadBuffer(CBuffer, CL_FALSE, 0, sizeof(cl_int) * NumData,
                                FinalBufCContents.data());

    Queues[0].finish();

    // This should not be needed due to the event dep from the other queues.
    for (size_t i = 0; i < Queues.size(); ++i)
      Queues[i].finish();

    // Check the data after the parallel sub-buffer launches.
    for (size_t i = 0; i < NumData; ++i) {
      if (i < (WorkShare * NumParallelQueues)) {
        if (AfterSubBufCContents[i] != i + 2) {
          std::cerr << "ERROR: after sub-bufs " << i << " was "
                    << AfterSubBufCContents[i] << " expected " << i + 2 + 2
                    << std::endl;
          AllOK = false;
          break;
        }
      } else {
        // The last part should remain untouched.
        if (AfterSubBufCContents[i] != 1) {
          std::cerr << "ERROR: after sub-bufs the last part " << i << " was "
                    << AfterSubBufCContents[i] << " expected 1\n";
          AllOK = false;
          break;
        }
      }
    }

    // Check the data before the last kernel launch.
    for (size_t i = 0; i < NumData; ++i) {
      if (i < (WorkShare * NumParallelQueues)) {
        if (NewBufCContents[i] != i + 2 + 2) {
          std::cerr << "ERROR: " << i << " was " << NewBufCContents[i]
                    << " expected " << i + 2 + 2 << std::endl;
          AllOK = false;
          break;
        }
      } else {
        // The last part should remain untouched.
        if (NewBufCContents[i] != 1) {
          std::cerr << "ERROR: " << i << " was " << NewBufCContents[i]
                    << " expected 1\n";
          AllOK = false;
          break;
        }
      }
    }

    // In the final state there should be one additional 2 addition in the
    // last manipulated part of the array.
    for (size_t i = 0; i < NumData; ++i) {
      if (i < (WorkShare * (NumParallelQueues - 1))) {
        if (FinalBufCContents[i] != i + 2 + 2) {
          std::cerr << "ERROR: final " << i << " was " << FinalBufCContents[i]
                    << " expected " << i + 2 + 2 << std::endl;
          AllOK = false;
          break;
        }
      } else if (i < (WorkShare * NumParallelQueues)) {
        if (FinalBufCContents[i] != i + 2 + 2 + 2) {
          std::cerr << "ERROR: final " << i << " was " << FinalBufCContents[i]
                    << " expected " << i + 2 + 2 << std::endl;
          AllOK = false;
          break;
        }
      } else {
        // The very last part should still remain untouched.
        if (FinalBufCContents[i] != 1) {
          std::cerr << "ERROR: final last part " << i << " was "
                    << FinalBufCContents[i] << " expected 1\n";
          AllOK = false;
          break;
        }
      }
    }

  } catch (cl::Error &err) {
    std::cerr << "ERROR: " << err.what() << "(" << err.err() << ")"
              << std::endl;
    AllOK = false;
  }

  if (AllOK) {
    printf("PASSED\n");
    return EXIT_SUCCESS;
  } else
    return EXIT_FAILURE;
}

int main() {

  std::cout << "TestOutputDataDecomposition: ";
  if (TestOutputDataDecomposition() == EXIT_FAILURE)
    return EXIT_FAILURE;

  CHECK_CL_ERROR(clUnloadCompiler());

  std::cout << "OK" << std::endl;

  return EXIT_SUCCESS;
}