File: split.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (75 lines) | stat: -rw-r--r-- 2,257 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
## @package split
# Module caffe2.python.layers.split





from caffe2.python import schema
from caffe2.python.layers.layers import (
    ModelLayer,
)


class Split(ModelLayer):

    def __init__(self, model, input_record, num_splits=1, axis=1,
                 name='split', split=None, **kwargs):
        super(Split, self).__init__(model, name, input_record, **kwargs)
        self.axis = axis
        # Assume that first dimension is batch, so actual axis in shape is
        # axis - 1
        axis -= 1
        assert axis >= 0

        assert isinstance(input_record, schema.Scalar),\
            "Incorrect input type. Expected Scalar, but received: {0}".\
            format(input_record)

        input_shape = input_record.field_type().shape
        assert len(input_shape) >= axis
        if split is None:
            assert input_shape[axis] % num_splits == 0
        else:
            num_splits = len(split)
            assert input_shape[axis] == sum(split)

        if split is None:
            output_shape = list(input_shape)
            output_shape[axis] = int(output_shape[axis] / num_splits)
        else:
            output_shape = []
            for i in range(num_splits):
                output_shape_i = list(input_shape)
                output_shape_i[axis] = split[i]
                output_shape.append(output_shape_i)

        data_type = input_record.field_type().base


        if split is None:
            output_scalars = [
                schema.Scalar(
                    (data_type, output_shape),
                    self.get_next_blob_reference('output_{}'.format(i)),
                )
                for i in range(num_splits)
            ]
        else:
            output_scalars = [
                schema.Scalar(
                    (data_type, output_shape[i]),
                    self.get_next_blob_reference('output_{}'.format(i)),
                )
                for i in range(num_splits)
            ]
        self.output_schema = schema.Tuple(*output_scalars)
        self.split = split

    def add_ops(self, net):
        net.Split(
            self.input_record.field_blobs(),
            self.output_schema.field_blobs(),
            split=self.split,
            axis=self.axis,
        )