File: uniform_sampling.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (83 lines) | stat: -rw-r--r-- 2,779 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
## @package uniform_sampling
# Module caffe2.python.layers.uniform_sampling





import numpy as np

from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer


class UniformSampling(ModelLayer):
    """
    Uniform sampling `num_samples - len(input_record)` unique elements from the
    range [0, num_elements). `samples` is the concatenation of input_record and
    the samples. input_record is expected to be unique.
    """

    def __init__(
        self,
        model,
        input_record,
        num_samples,
        num_elements,
        name='uniform_sampling',
        **kwargs
    ):
        super(UniformSampling, self).__init__(
            model, name, input_record, **kwargs
        )

        assert num_elements > num_samples > 0
        assert isinstance(input_record, schema.Scalar)

        self.num_elements = num_elements

        num_examples_init = ('GivenTensorInt64Fill',
                             {'values': [num_samples]})
        self.num_samples = self.create_param(param_name='num_examples',
                                              shape=(1,),
                                              initializer=num_examples_init,
                                              optimizer=model.NoOptim)

        sampling_blob_init = ('ConstantFill',
                              {'value': float(num_samples) / num_elements,
                               'dtype': core.DataType.FLOAT})
        self.sampling_prob = self.create_param(param_name='prob',
                                               shape=(num_samples,),
                                               initializer=sampling_blob_init,
                                               optimizer=model.NoOptim)

        self.output_schema = schema.Struct(
            (
                'samples', schema.Scalar(
                    np.int32, self.get_next_blob_reference("samples")
                )
            ),
            ('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
        )

    def add_ops(self, net):
        net.StopGradient(self.sampling_prob, self.sampling_prob)

        shape = net.Shape([self.input_record()], net.NextScopedBlob("shape"))
        shape = net.Sub([self.num_samples, shape], shape)
        samples = net.UniqueUniformFill(
            [shape, self.input_record()],
            net.NextScopedBlob("samples_before_concat"),
            min=0,
            max=self.num_elements - 1,
            input_as_shape=True
        )

        net.Concat(
            [self.input_record(), samples],
            [self.output_schema.samples(), net.NextScopedBlob("split_info")],
            axis=0
        )
        net.StopGradient(
            self.output_schema.samples(), self.output_schema.samples()
        )