File: add_bias.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (44 lines) | stat: -rw-r--r-- 1,396 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
## @package add_bias
# Module caffe2.python.layers.add_bias





from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import math


class AddBias(ModelLayer):

    def __init__(self, model, input_record, bias_init=None,
                 bias_optim=None, name='add_bias'):
        super(AddBias, self).__init__(model, name, input_record)
        assert isinstance(input_record, schema.Scalar), "Incorrect input type"
        assert len(input_record.field_type().shape) > 0, (
            "AddBias expects limited dimensions of the input tensor")

        input_dims = input_record.field_type().shape[0]
        assert input_dims > 0, (
            "AddBias expects input dimensions > 0, got {}".format(input_dims))

        scale = math.sqrt(1.0 / input_dims)
        bias_init = bias_init if bias_init else (
            'UniformFill', {'min': -scale, 'max': scale})

        self.b = self.create_param(
            param_name='b',
            shape=[input_dims, ],
            initializer=bias_init,
            optimizer=bias_optim,
        )

        self.output_schema = schema.Scalar(
            (input_record.field_type().base, (input_dims, )),
            self.get_next_blob_reference('output')
        )

    def add_ops(self, net):
        net.Add(self.input_record.field_blobs() + [self.b],
                self.output_schema.field_blobs(), broadcast=1)