File: collectives.pyx

package info (click to toggle)
libgpuarray 0.7.6-5
  • links: PTS, VCS
  • area: main
  • in suites: bullseye
  • size: 2,784 kB
  • sloc: ansic: 19,235; python: 4,621; makefile: 213; sh: 9
file content (477 lines) | stat: -rw-r--r-- 16,479 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
from libc.stdlib cimport malloc, calloc, free
from libc.string cimport memcmp

from cpython cimport Py_buffer, Py_INCREF, Py_DECREF
from cpython.buffer cimport PyBUF_FORMAT, PyBUF_ND, PyBUF_STRIDES

from pygpu.gpuarray cimport (gpucontext, GpuContext, _GpuArray, GpuArray,
                             ensure_context,
                             GA_NO_ERROR, get_exc, gpucontext_error,
                             GpuArray_IS_C_CONTIGUOUS,
                             GA_C_ORDER, GA_F_ORDER, GA_ANY_ORDER,
                             pygpu_empty_like, pygpu_empty, memcpy)
from pygpu.gpuarray import GpuArrayException


COMM_ID_BYTES = GA_COMM_ID_BYTES

cdef class GpuCommCliqueId:
    """GpuCommCliqueId(context=None, comm_id=None)

    Represents a unique id shared among :class:`GpuComm` communicators which
    participate in a multi-gpu clique.

    Parameters
    ----------
    context: GpuContext
        Reference to which gpu this GpuCommCliqueId object belongs.
    comm_id: bytes
        Existing unique id to be passed in this object.

    """
    def __cinit__(self, GpuContext context=None, unsigned char[:] comm_id=None):
        self.context = ensure_context(context)
        if comm_id is None:
            comm_generate_id(self.context.ctx, &self.c_comm_id)

    def __init__(self, GpuContext context=None, unsigned char[:] comm_id=None):
        if comm_id is not None:
            self.comm_id = comm_id

    def __richcmp__(this, that, int op):
        if type(this) != type(that):
            raise TypeError, "Cannot compare %s with %s" % (type(this), type(that))
        cdef int res
        cdef GpuCommCliqueId a
        a = this
        cdef GpuCommCliqueId b
        b = that
        res = memcmp(<void*>a.c_comm_id.internal, <void*>b.c_comm_id.internal, GA_COMM_ID_BYTES)
        if op == 0:
            return res < 0
        elif op == 1:
            return res <= 0
        elif op == 2:
            return res == 0
        elif op == 3:
            return res != 0
        elif op == 4:
            return res > 0
        else:
            return res >= 0

    def __hash__(self):
        return hash(self.__class__.__name__) ^ hash(self.c_comm_id.internal[:GA_COMM_ID_BYTES])

    def __reduce__(self):
        raise RuntimeError, "Cannot pickle %s object" % self.__class__.__name__

    property comm_id:
        "Unique clique id to be used by each :class:`GpuComm` in a group of devices"
        def __get__(self):
            cdef bytearray res
            res = self.c_comm_id.internal[:GA_COMM_ID_BYTES]
            return res

        def __set__(self, unsigned char[:] cid):
            cdef int length
            length = cid.shape[0]
            if length < GA_COMM_ID_BYTES:
                raise ValueError, "GpuComm clique id must have length %d bytes" % (GA_COMM_ID_BYTES)
            memcpy(self.c_comm_id.internal, <char*>&cid[0], GA_COMM_ID_BYTES)


cdef class GpuComm:
    """GpuComm(cid, ndev, rank)

    Represents a communicator which participates in a multi-gpu clique.

    It is used to invoke collective operations to gpus inside its clique.

    Parameters
    ----------
    cid: GpuCommCliqueId
        Unique id shared among participating communicators.
    ndev: int
        Number of communicators inside the clique.
    rank: int
        User-defined rank of this communicator inside the clique. It
        influences order of collective operations.

    """
    def __dealloc__(self):
        gpucomm_free(self.c)

    def __cinit__(self, GpuCommCliqueId cid not None, int ndev, int rank):
        cdef int err
        err = gpucomm_new(&self.c, cid.context.ctx, cid.c_comm_id, ndev, rank)
        if err != GA_NO_ERROR:
            raise get_exc(err), gpucontext_error(cid.context.ctx, err)

    def __reduce__(self):
        raise RuntimeError, "Cannot pickle %s object" % self.__class__.__name__

    property count:
        "Total number of communicators inside the clique"
        def __get__(self):
            cdef int gpucount
            comm_get_count(self, &gpucount)
            return gpucount

    property rank:
        "User-defined rank of this communicator inside the clique"
        def __get__(self):
            cdef int gpurank
            comm_get_rank(self, &gpurank)
            return gpurank

    def reduce(self, GpuArray src not None, op, GpuArray dest=None,
               int root=-1):
        """
        reduce(self, src, op, dest=None, root=-1)

        Reduce collective operation for ranks in a communicator world.

        Parameters
        ----------
        src: GpuArray
            Array to be reduced.
        op: str
            Key indicating operation type.
        dest: GpuArray
            Array to collect reduce operation result.
        root: int
            Rank in GpuComm which will collect result.

        Notes
        -----
        * `root` is necessary when invoking from a non-root rank. Root
          caller does not need to provide `root` argument.
        * Not providing `dest` argument for a root caller will result
          in creating a new compatible :class:`GpuArray` and returning
          result in it.

        """
        cdef int srank
        if dest is None:
            if root != -1:
                comm_get_rank(self, &srank)
                if root == srank:
                    return pygpu_make_reduced(self, src, to_reduce_opcode(op))
                comm_reduce_from(self, src, to_reduce_opcode(op), root)
                return
            else:
                return pygpu_make_reduced(self, src, to_reduce_opcode(op))
        if root == -1:
            comm_get_rank(self, &root)
        comm_reduce(self, src, dest, to_reduce_opcode(op), root)

    def all_reduce(self, GpuArray src not None, op, GpuArray dest=None):
        """
        all_reduce(self, src, op, dest=None)

        AllReduce collective operation for ranks in a communicator world.

        Parameters
        ----------
        src: GpuArray
            Array to be reduced.
        op: str
            Key indicating operation type.
        dest: GpuArray
            Array to collect reduce operation result.

        Notes
        -----
        * Not providing `dest` argument for a caller will result in creating
          a new compatible :class:`GpuArray` and returning result in it.

        """
        if dest is None:
            return pygpu_make_all_reduced(self, src, to_reduce_opcode(op))
        comm_all_reduce(self, src, dest, to_reduce_opcode(op))

    def reduce_scatter(self, GpuArray src not None, op, GpuArray dest=None):
        """
        reduce_scatter(self, src, op, dest=None)

        ReduceScatter collective operation for ranks in a communicator world.

        Parameters
        ----------
        src: GpuArray
            Array to be reduced.
        op: str
            Key indicating operation type.
        dest: GpuArray
            Array to collect reduce operation scattered result.

        Notes
        -----
        * Not providing `dest` argument for a caller will result in creating
          a new compatible :class:`GpuArray` and returning result in it.

        """
        if dest is None:
            return pygpu_make_reduce_scattered(self, src, to_reduce_opcode(op))
        comm_reduce_scatter(self, src, dest, to_reduce_opcode(op))

    def broadcast(self, GpuArray array not None, int root=-1):
        """
        broadcast(self, array, root=-1)

        Broadcast collective operation for ranks in a communicator world.

        Parameters
        ----------
        array: GpuArray
            Array to be reduced.
        root: int
            Rank in `GpuComm` which broadcasts its `array`.

        Notes
        -----
        * `root` is necessary when invoking from a non-root rank. Root caller
          does not need to provide `root` argument.

        """
        if root == -1:
            comm_get_rank(self, &root)
        comm_broadcast(self, array, root)

    def all_gather(self, GpuArray src not None, GpuArray dest=None,
                   unsigned int nd_up=1):
        """
        all_gather(self, src, dest=None, nd_up=1)

        AllGather collective operation for ranks in a communicator world.

        Parameters
        ----------
        src: GpuArray
            Array to be gathered.
        dest: GpuArray
            Array to receive all gathered arrays from ranks in `GpuComm`.
        nd_up: int
            Used when creating result array. Indicates how many extra
            dimensions user wants result to have. Default is 1, which
            means that the result will store each rank's gathered
            array in one extra new dimension.

        Notes
        -----
        * Providing `nd_up` == 0 means that gathered arrays will be appended to
          the dimension with the largest stride.

        """
        if dest is None:
            return pygpu_make_all_gathered(self, src, nd_up)
        comm_all_gather(self, src, dest)


cdef dict TO_RED_OP = {
    '+': GA_SUM,
    "sum": GA_SUM,
    "add": GA_SUM,
    '*': GA_PROD,
    "prod": GA_PROD,
    "product": GA_PROD,
    "mul": GA_PROD,
    "max": GA_MAX,
    "maximum": GA_MAX,
    "min": GA_MIN,
    "minimum": GA_MIN,
    }

cdef int to_reduce_opcode(op) except -1:
    res = TO_RED_OP.get(op.lower())
    if res is not None:
        return res
    raise ValueError, "Invalid reduce operation: %s" % (str(op))

cdef gpucontext* comm_context(GpuComm comm) except NULL:
    cdef gpucontext* res
    res = gpucomm_context(comm.c)
    if res is NULL:
        raise GpuArrayException, "Invalid communicator or destroyed context"
    return res

cdef int comm_generate_id(gpucontext* ctx, gpucommCliqueId* comm_id) except -1:
    cdef int err
    err = gpucomm_gen_clique_id(ctx, comm_id)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(ctx, err)

cdef int comm_get_count(GpuComm comm, int* gpucount) except -1:
    cdef int err
    err = gpucomm_get_count(comm.c, gpucount)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_get_rank(GpuComm comm, int* gpurank) except -1:
    cdef int err
    err = gpucomm_get_rank(comm.c, gpurank)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_reduce_from(GpuComm comm, GpuArray src, int opcode,
                          int root) except -1:
    cdef int err
    err = GpuArray_reduce_from(&src.ga, opcode, root, comm.c)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_reduce(GpuComm comm, GpuArray src, GpuArray dest, int opcode,
                     int root) except -1:
    cdef int err
    err = GpuArray_reduce(&src.ga, &dest.ga, opcode, root, comm.c)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_all_reduce(GpuComm comm, GpuArray src, GpuArray dest,
                         int opcode) except -1:
    cdef int err
    err = GpuArray_all_reduce(&src.ga, &dest.ga, opcode, comm.c)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_reduce_scatter(GpuComm comm, GpuArray src, GpuArray dest,
                             int opcode) except -1:
    cdef int err
    err = GpuArray_reduce_scatter(&src.ga, &dest.ga, opcode, comm.c)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_broadcast(GpuComm comm, GpuArray arr, int root) except -1:
    cdef int err
    err = GpuArray_broadcast(&arr.ga, root, comm.c)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef int comm_all_gather(GpuComm comm, GpuArray src, GpuArray dest) except -1:
    cdef int err
    err = GpuArray_all_gather(&src.ga, &dest.ga, comm.c)
    if err != GA_NO_ERROR:
        raise get_exc(err), gpucontext_error(comm_context(comm), err)

cdef api GpuArray pygpu_make_reduced(GpuComm comm, GpuArray src, int opcode):
    cdef GpuArray res
    res = pygpu_empty_like(src, GA_ANY_ORDER, -1)
    cdef int rank
    comm_get_rank(comm, &rank)
    comm_reduce(comm, src, res, opcode, rank)
    return res

cdef api GpuArray pygpu_make_all_reduced(GpuComm comm, GpuArray src, int opcode):
    cdef GpuArray res
    res = pygpu_empty_like(src, GA_ANY_ORDER, -1)
    comm_all_reduce(comm, src, res, opcode)
    return res

cdef api GpuArray pygpu_make_reduce_scattered(GpuComm comm, GpuArray src, int opcode):
    if src.ga.nd < 1:
        raise TypeError, "Source GpuArray must have number of dimensions >= 1"

    cdef GpuArray res
    cdef int gpucount
    cdef bint is_c_cont
    cdef unsigned int nd
    cdef size_t chosen_dim_size
    cdef size_t* dims
    cdef unsigned int j

    comm_get_count(comm, &gpucount)
    is_c_cont = GpuArray_IS_C_CONTIGUOUS(&src.ga)
    nd = src.ga.nd
    dims = <size_t*>calloc(nd, sizeof(size_t))
    if dims == NULL:
        raise MemoryError, "Could not allocate dims"

    try:
        if is_c_cont:
            # Smallest in index dimension has the largest stride
            if src.ga.dimensions[0] % gpucount == 0:
                chosen_dim_size = src.ga.dimensions[0] / gpucount
                if chosen_dim_size != 1:
                    dims[0] = chosen_dim_size
                    for j in range(1, nd):
                        dims[j] = src.ga.dimensions[j]
                else:
                    for j in range(nd - 1):
                        dims[j] = src.ga.dimensions[1 + j]
                    nd -= 1
            else:
                raise TypeError, "Source GpuArray cannot be split in %d c-contiguous arrays" % (gpucount)
        else:
            # Largest in index dimension has the largest stride
            if src.ga.dimensions[nd - 1] % gpucount == 0:
                chosen_dim_size = src.ga.dimensions[nd - 1] / gpucount
                for j in range(nd - 1):
                    dims[j] = src.ga.dimensions[j]
                if chosen_dim_size != 1:
                    dims[nd - 1] = chosen_dim_size
                else:
                    nd -= 1
            else:
                raise TypeError, "Source GpuArray cannot be split in %d f-contiguous arrays" % (gpucount)
        res = pygpu_empty(nd, dims, src.ga.typecode,
                          GA_C_ORDER if is_c_cont else GA_F_ORDER,
                          src.context, type(src))
        comm_reduce_scatter(comm, src, res, opcode)
    finally:
        free(dims)

    return res

cdef api GpuArray pygpu_make_all_gathered(GpuComm comm, GpuArray src,
                                          unsigned int nd_up):
    if src.ga.nd < 1:
        raise TypeError, "Source GpuArray must have number of dimensions >= 1"

    cdef GpuArray res
    cdef int gpucount
    cdef bint is_c_cont
    cdef unsigned int nd
    cdef size_t* dims
    cdef unsigned int j

    comm_get_count(comm, &gpucount)
    is_c_cont = GpuArray_IS_C_CONTIGUOUS(&src.ga)
    nd = src.ga.nd + nd_up
    dims = <size_t*>calloc(nd, sizeof(size_t))
    if dims == NULL:
        raise MemoryError, "Could not allocate dims"

    try:
        if is_c_cont:
            # Smallest in index dimension has the largest stride
            if nd_up == 0:
                dims[0] = <size_t>gpucount * src.ga.dimensions[0]
                for j in range(1, nd):
                    dims[j] = src.ga.dimensions[j]
            else:
                dims[0] = <size_t>gpucount
                for j in range(1, nd_up):
                    dims[j] = 1
                for j in range(src.ga.nd):
                    dims[nd_up + j] = src.ga.dimensions[j]
        else:
            # Largest in index dimension has the largest stride
            if nd_up == 0:
                dims[nd - 1] = <size_t>gpucount * src.ga.dimensions[nd - 1]
                for j in range(nd - 1):
                    dims[j] = src.ga.dimensions[j]
            else:
                dims[nd - 1] = <size_t>gpucount
                for j in range(nd_up - 1):
                    dims[src.ga.nd + j] = 1
                for j in range(src.ga.nd):
                    dims[j] = src.ga.dimensions[j]
        res = pygpu_empty(nd, dims, src.ga.typecode,
                          GA_C_ORDER if is_c_cont else GA_F_ORDER,
                          src.context, type(src))
        comm_all_gather(comm, src, res)
    finally:
        free(dims)

    return res