File: functional.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (123 lines) | stat: -rw-r--r-- 4,875 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
# @package functional
# Module caffe2.python.layers.functional





from caffe2.python import core, schema, scope, workspace
from caffe2.python.layers.layers import (
    ModelLayer,
)
import caffe2.proto.caffe2_pb2 as caffe2_pb2
import numpy as np
import logging

logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)


class Functional(ModelLayer):

    def __init__(self, model, input_record, output_names_or_num, function,
                 name='functional', output_dtypes=None, tags=None, **kwargs):

        # allow coercion
        input_record = schema.as_record(input_record)

        super(Functional, self).__init__(model, name, input_record, tags=tags, **kwargs)
        self._function = function
        self._kwargs = kwargs
        return_struct = (
            isinstance(output_names_or_num, list) or
            (isinstance(output_names_or_num, int) and
             output_names_or_num != 1)
        )

        with scope.NameScope(self.name, reset=True):
            if isinstance(output_names_or_num, int):
                struct_output_schema = schema.NewRecord(
                    model.net, schema.RawTuple(output_names_or_num))
            elif isinstance(output_names_or_num, schema.Field):
                self.output_schema = output_names_or_num.clone(keep_blobs=True)
                return
            else:
                if not isinstance(output_names_or_num, list):
                    output_names_or_num = [output_names_or_num]
                out_tuple = [(out, np.void) for out in output_names_or_num]
                struct_output_schema = schema.NewRecord(
                    model.net, schema.Struct(*out_tuple))

        num_outputs = len(struct_output_schema.field_blobs())

        # functional layer returns Struct if more than one outputs or output is
        # a list, otherwise Scalar
        if return_struct:
            self.output_schema = struct_output_schema
        else:
            self.output_schema = struct_output_schema[0]

        # If output_dtypes is provided, use it for output schema. Otherwise
        # the shape and type will be inferred.
        if output_dtypes is not None:
            if not isinstance(output_dtypes, list):
                output_dtypes = [output_dtypes] * num_outputs
            assert len(output_dtypes) == num_outputs
            for dtype, scalar in zip(output_dtypes,
                                     self.output_schema.all_scalars()):
                scalar.set_type(dtype)
            return

        # Fake execution of the function to infer shapes and types automatically
        had_issues = False
        try:
            type_net = core.Net('_temp_type_and_shape_inference_net')
            schema.InitEmptyRecord(type_net, input_record, enforce_types=True)

            function(type_net, self.input_record, self.output_schema, **kwargs)
            (shapes, types) = workspace.InferShapesAndTypes([type_net], {})
            for i in range(num_outputs):
                scalar_schema = (self.output_schema[i] if return_struct
                                 else self.output_schema)
                blob = scalar_schema()
                if blob not in types or blob not in shapes:
                    had_issues = True
                    continue
                if shapes[blob] == []:
                    # Scalar type
                    shape = tuple()
                elif shapes[blob][0] == 0:
                    shape = tuple(shapes[blob][1:])
                else:
                    logger.warning("unexpected shape: {}".format(shapes[blob]))
                    # If batch dimension is not first - give up on shape
                    # inference for that blob
                    had_issues = True
                    continue

                # TODO(amalevich): Move it to some shared library
                dtype = None
                if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
                    dtype = (np.float64, shape)
                elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
                    dtype = (np.float32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT32:
                    dtype = (np.int32, shape)
                elif types[blob] == caffe2_pb2.TensorProto.INT64:
                    dtype = (np.int64, shape)
                elif types[blob] == caffe2_pb2.TensorProto.FLOAT16:
                    dtype = (np.float16, shape)

                if dtype is not None:
                    scalar_schema.set_type(dtype)
        except TypeError as ex:
            had_issues = True
            logger.warning(str(ex))

        if had_issues:
            logger.warning(
                "Type inference had problems for layer: {}".format(self.name))

    def add_ops(self, net):
        self._function(
            net, self.input_record, self.output_schema, **(self._kwargs))