1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101
|
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given, settings
import hypothesis.strategies as st
class LpnormTest(hu.HypothesisTestCase):
def _test_Lp_Norm(self, inputs, gc, dc):
X = inputs[0]
# avoid kinks by moving away from 0
X += 0.02 * np.sign(X)
X[X == 0.0] += 0.02
self.ws.create_blob("X").feed(X)
op = core.CreateOperator(
'LpNorm',
['X'],
['l1_norm'],
p=1,
)
self.ws.run(op)
np.testing.assert_allclose(self.ws.blobs[("l1_norm")].fetch(),
np.linalg.norm((X).flatten(), ord=1),
rtol=1e-4, atol=1e-4)
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=1e-2, threshold=1e-2)
op = core.CreateOperator(
'LpNorm',
['X'],
['l2_norm'],
p=2,
)
self.ws.run(op)
np.testing.assert_allclose(
self.ws.blobs[("l2_norm")].fetch(),
np.linalg.norm((X).flatten(), ord=2)**2,
rtol=1e-4,
atol=1e-4
)
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0], stepsize=1e-2, threshold=1e-2)
op = core.CreateOperator(
'LpNorm',
['X'],
['l2_averaged_norm'],
p=2,
average=True
)
self.ws.run(op)
np.testing.assert_allclose(
self.ws.blobs[("l2_averaged_norm")].fetch(),
np.linalg.norm((X).flatten(), ord=2)**2 / X.size,
rtol=1e-4,
atol=1e-4
)
@given(inputs=hu.tensors(n=1,
min_dim=1,
max_dim=3,
dtype=np.float32),
**hu.gcs)
@settings(deadline=10000)
def test_Lp_Norm(self, inputs, gc, dc):
self._test_Lp_Norm(inputs, gc, dc)
def test_Lp_Norm_empty(self):
self._test_Lp_Norm([np.array([], dtype=np.float32)], hu.cpu_do, [hu.cpu_do])
self.assertEqual(self.ws.blobs["l1_norm"].fetch()[0], 0.0)
self.assertEqual(self.ws.blobs["l2_norm"].fetch()[0], 0.0)
self.assertTrue(np.isnan(self.ws.blobs["l2_averaged_norm"].fetch()[0]))
@given(x=hu.tensor(
min_dim=1, max_dim=10, dtype=np.float32,
elements=st.integers(min_value=-100, max_value=100)),
p=st.integers(1, 2),
average=st.integers(0, 1)
)
def test_lpnorm_shape_inference(self, x, p, average):
workspace.FeedBlob('x', x)
net = core.Net("lpnorm_test")
result = net.LpNorm(['x'], p=p, average=bool(average))
(shapes, types) = workspace.InferShapesAndTypes([net])
workspace.RunNetOnce(net)
self.assertEqual(shapes[result], list(workspace.blobs[result].shape))
self.assertEqual(types[result], core.DataType.FLOAT)
|