File: test_ai_project_evaluations_operations_async.py

package info (click to toggle)
python-azure 20250603%2Bgit-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 851,724 kB
  • sloc: python: 7,362,925; ansic: 804; javascript: 287; makefile: 195; sh: 145; xml: 109
file content (72 lines) | stat: -rw-r--r-- 3,042 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) Python Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import pytest
from devtools_testutils.aio import recorded_by_proxy_async
from testpreparer import AIProjectPreparer
from testpreparer_async import AIProjectClientTestBaseAsync


@pytest.mark.skip("you may need to update the auto-generated test case before run it")
class TestAIProjectEvaluationsOperationsAsync(AIProjectClientTestBaseAsync):
    @AIProjectPreparer()
    @recorded_by_proxy_async
    async def test_evaluations_get(self, aiproject_endpoint):
        client = self.create_async_client(endpoint=aiproject_endpoint)
        response = await client.evaluations.get(
            name="str",
        )

        # please add some check logic here by yourself
        # ...

    @AIProjectPreparer()
    @recorded_by_proxy_async
    async def test_evaluations_list(self, aiproject_endpoint):
        client = self.create_async_client(endpoint=aiproject_endpoint)
        response = client.evaluations.list()
        result = [r async for r in response]
        # please add some check logic here by yourself
        # ...

    @AIProjectPreparer()
    @recorded_by_proxy_async
    async def test_evaluations_create(self, aiproject_endpoint):
        client = self.create_async_client(endpoint=aiproject_endpoint)
        response = await client.evaluations.create(
            evaluation={
                "data": "input_data",
                "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}},
                "id": "str",
                "description": "str",
                "displayName": "str",
                "properties": {"str": "str"},
                "status": "str",
                "tags": {"str": "str"},
            },
        )

        # please add some check logic here by yourself
        # ...

    @AIProjectPreparer()
    @recorded_by_proxy_async
    async def test_evaluations_create_agent_evaluation(self, aiproject_endpoint):
        client = self.create_async_client(endpoint=aiproject_endpoint)
        response = await client.evaluations.create_agent_evaluation(
            evaluation={
                "appInsightsConnectionString": "str",
                "evaluators": {"str": {"id": "str", "dataMapping": {"str": "str"}, "initParams": {"str": {}}}},
                "runId": "str",
                "redactionConfiguration": {"redactScoreProperties": bool},
                "samplingConfiguration": {"maxRequestRate": 0.0, "name": "str", "samplingPercent": 0.0},
                "threadId": "str",
            },
        )

        # please add some check logic here by yourself
        # ...