File: slow_eval.py

package info (click to toggle)
python-azure 20250603%2Bgit-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 851,724 kB
  • sloc: python: 7,362,925; ansic: 804; javascript: 287; makefile: 195; sh: 145; xml: 109
file content (34 lines) | stat: -rw-r--r-- 1,120 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import time
from typing import Dict
from typing_extensions import overload, override


from azure.ai.evaluation._evaluators._common import EvaluatorBase


class SlowEvaluator(EvaluatorBase[str]):
    """Test evaluator that just returns the input after a slight delay. Used for testing performance."""

    def __call__(
        self,
        *,
        query: str,
    ) -> Dict[str, str]:
        """Evaluate a collection of content safety metrics for the given query/response pair

        :keyword query: The query to be evaluated.
        :paramtype query: str
        :keyword response: The response to be evaluated.
        :paramtype response: str
        :return: The content safety scores.
        :rtype: Dict[str, Union[str, float]]
        """
        return super().__call__(query=query)

    @override
    async def _do_eval(self, eval_input: Dict) -> Dict[str, str]:
        time.sleep(0.5)
        return {"result": "done"}