File: test_get_model_evaluation_results.py

package info (click to toggle)
python-azure 20251104%2Bgit-1
  • links: PTS, VCS
  • area: main
  • in suites: forky
  • size: 770,224 kB
  • sloc: python: 6,357,217; ansic: 804; javascript: 287; makefile: 198; sh: 193; xml: 109
file content (66 lines) | stat: -rw-r--r-- 2,950 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
# pylint: disable=line-too-long,useless-suppression
import functools

from devtools_testutils import AzureRecordedTestCase, EnvironmentVariableLoader, recorded_by_proxy
from azure.core.credentials import AzureKeyCredential
from azure.ai.textanalytics.authoring import TextAuthoringClient
from azure.ai.textanalytics.authoring.models import (
    CustomSingleLabelClassificationDocumentEvalResult,
)

AuthoringPreparer = functools.partial(
    EnvironmentVariableLoader,
    "authoring",
    authoring_endpoint="https://Sanitized.cognitiveservices.azure.com/",
    authoring_key="fake_key",
)


class TestTextAuthoring(AzureRecordedTestCase):
    def create_client(self, endpoint, key):
        return TextAuthoringClient(endpoint, AzureKeyCredential(key))


class TestTextAuthoringGetModelEvaluationResultsSync(TestTextAuthoring):
    @AuthoringPreparer()
    @recorded_by_proxy
    def test_get_model_evaluation_results(self, authoring_endpoint, authoring_key):
        client = self.create_client(authoring_endpoint, authoring_key)

        project_name = "single-class-project"
        trained_model_label = "model3"

        # Trained-model–scoped call
        project_client = client.get_project_client(project_name)
        results = project_client.trained_model.list_model_evaluation_results(
            trained_model_label,
            string_index_type="UTF16CodeUnit",
        )

        # Assert the pager exists
        assert results is not None, "The evaluation results should not be null."

        for result in results:
            # Base validations
            assert result is not None, "The result should not be null."
            assert result.location is not None, "The result location should not be null."
            assert result.language is not None, "The result language should not be null."

            # Validate classification result
            if isinstance(result, CustomSingleLabelClassificationDocumentEvalResult):
                classification = result.custom_single_label_classification_result
                assert classification is not None, "The classification result should not be null."
                assert (
                    classification.expected_class and classification.expected_class.strip()
                ), "The expected class should not be null or empty."
                assert (
                    classification.predicted_class and classification.predicted_class.strip()
                ), "The predicted class should not be null or empty."

                # Optional: print a couple of fields for recording visibility
                print(f"Document Location: {result.location}")
                print("  Classification:")
                print(f"    Expected: {classification.expected_class}")
                print(f"    Predicted: {classification.predicted_class}")
            else:
                raise AssertionError(f"Unsupported result type: {type(result).__name__}")