File: test_get_output_data.py

package info (click to toggle)
python-apsystems-ez1 2.3.0-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 3,328 kB
  • sloc: python: 697; makefile: 5
file content (106 lines) | stat: -rw-r--r-- 2,729 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import pytest
from APsystemsEZ1 import ReturnOutputData


@pytest.mark.asyncio
@pytest.mark.parametrize(
    "response_data, expected_output, test_id",
    [
        # Happy path tests with various realistic test values
        (
            {
                "data": {
                    "p1": 100.0,
                    "e1": 50.0,
                    "te1": 500.0,
                    "p2": 200.0,
                    "e2": 75.0,
                    "te2": 750.0,
                }, "status": 0
            },
            ReturnOutputData(
                p1=100.0, e1=50.0, te1=500.0, p2=200.0, e2=75.0, te2=750.0
            ),
            "happy_path_1",
        ),
        (
            {
                "data": {
                    "p1": 0.0,
                    "e1": 0.0,
                    "te1": 0.0,
                    "p2": 0.0,
                    "e2": 0.0,
                    "te2": 0.0,
                }, "status": 0
            },
            ReturnOutputData(p1=0.0, e1=0.0, te1=0.0, p2=0.0, e2=0.0, te2=0.0),
            "happy_path_2",
        ),
        # Edge cases
        (
            {
                "data": {
                    "p1": -1.0,
                    "e1": -1.0,
                    "te1": -1.0,
                    "p2": -1.0,
                    "e2": -1.0,
                    "te2": -1.0,
                }, "status": 0
            },
            ReturnOutputData(p1=-1.0, e1=-1.0, te1=-1.0, p2=-1.0, e2=-1.0, te2=-1.0),
            "edge_case_negative_values",
        ),
    ],
)
async def test_get_output_data_happy_paths(
    response_data, expected_output, test_id, mock_response
):
    # Arrange
    ez1m = mock_response(response_data)

    # Act
    result = await ez1m.get_output_data()

    # Assert
    assert result == expected_output


@pytest.mark.asyncio
@pytest.mark.parametrize(
    "response_data, test_id",
    [
        # Error cases
        ({"data": {}, "status": 0}, "error_case_empty_data"),
    ],
)
async def test_get_output_data_error_empty_data(response_data, test_id, mock_response):
    # Arrange
    ez1m = mock_response(response_data)

    # Assert
    with pytest.raises(TypeError) as exc_info:
        await ez1m.get_output_data()
    assert "missing 6 required positional arguments" in str(
        exc_info.value
    ), f"Test Failed: {test_id}"


@pytest.mark.asyncio
@pytest.mark.parametrize(
    "response_data, test_id",
    [
        # Error cases
        (None, "error_case_none_response"),
    ],
)
async def test_get_output_data_error_no_response(response_data, test_id, mock_response):
    # Arrange
    ez1m = mock_response(response_data)

    # Act
    result = await ez1m.get_output_data()

    # Assert
    assert result is None