File: apmeter_test.py

package info (click to toggle)
pytorch 1.13.1%2Bdfsg-4
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 139,252 kB
  • sloc: cpp: 1,100,274; python: 706,454; ansic: 83,052; asm: 7,618; java: 3,273; sh: 2,841; javascript: 612; makefile: 323; xml: 269; ruby: 185; yacc: 144; objc: 68; lex: 44
file content (84 lines) | stat: -rw-r--r-- 2,738 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84





from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np


def calculate_ap(predictions, labels):
    N, D = predictions.shape
    ap = np.zeros(D)
    num_range = np.arange((N), dtype=np.float32) + 1
    for k in range(D):
        scores = predictions[:N, k]
        label = labels[:N, k]
        sortind = np.argsort(-scores, kind='mergesort')
        truth = label[sortind]
        precision = np.cumsum(truth) / num_range
        ap[k] = precision[truth.astype(np.bool)].sum() / max(1, truth.sum())
    return ap


class TestAPMeterOps(hu.HypothesisTestCase):
    @given(predictions=hu.arrays(dims=[10, 3],
           elements=hu.floats(allow_nan=False,
                              allow_infinity=False,
                              min_value=0.1,
                              max_value=1)),
           labels=hu.arrays(dims=[10, 3],
                            dtype=np.int32,
                            elements=st.integers(min_value=0,
                                                 max_value=1)),
           **hu.gcs_cpu_only)
    def test_average_precision(self, predictions, labels, gc, dc):
        op = core.CreateOperator(
            "APMeter",
            ["predictions", "labels"],
            ["AP"],
            buffer_size=10,
        )

        def op_ref(predictions, labels):
            ap = calculate_ap(predictions, labels)
            return (ap, )

        self.assertReferenceChecks(
            device_option=gc,
            op=op,
            inputs=[predictions, labels],
            reference=op_ref)

    @given(predictions=hu.arrays(dims=[10, 3],
           elements=hu.floats(allow_nan=False,
                              allow_infinity=False,
                              min_value=0.1,
                              max_value=1)),
           labels=hu.arrays(dims=[10, 3],
                            dtype=np.int32,
                            elements=st.integers(min_value=0,
                                                 max_value=1)),
           **hu.gcs_cpu_only)
    def test_average_precision_small_buffer(self, predictions, labels, gc, dc):
        op_small_buffer = core.CreateOperator(
            "APMeter",
            ["predictions", "labels"],
            ["AP"],
            buffer_size=5,
        )

        def op_ref(predictions, labels):
            # We can only hold the last 5 in the buffer
            ap = calculate_ap(predictions[5:], labels[5:])
            return (ap, )

        self.assertReferenceChecks(
            device_option=gc,
            op=op_small_buffer,
            inputs=[predictions, labels],
            reference=op_ref
        )