File: test_non_adv_simulator.py

package info (click to toggle)
python-azure 20250603%2Bgit-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid, trixie
  • size: 851,724 kB
  • sloc: python: 7,362,925; ansic: 804; javascript: 287; makefile: 195; sh: 145; xml: 109
file content (362 lines) | stat: -rw-r--r-- 15,426 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
# ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
# flake8: noqa
# type: ignore
import asyncio
from unittest.mock import AsyncMock, patch

import pytest

from azure.ai.evaluation.simulator import Simulator
from azure.ai.evaluation.simulator._utils import JsonLineChatProtocol


@pytest.fixture()
def valid_azure_model_config():
    return {
        "azure_deployment": "test_deployment",
        "azure_endpoint": "https://test-endpoint.openai.azure.com/",
    }


@pytest.fixture()
def invalid_azure_model_config():
    # Missing 'azure_endpoint'
    return {
        "azure_deployment": "test_deployment",
    }


@pytest.fixture()
def valid_openai_model_config():
    return {
        "api_key": "test_api_key",
        "model": "gpt-3.5-turbo",
    }


@pytest.fixture()
def invalid_openai_model_config():
    # Missing 'model'
    return {
        "api_key": "test_api_key",
    }


@pytest.mark.unittest
class TestSimulator:
    def test_init_valid_azure_model_config(self, valid_azure_model_config):
        simulator = Simulator(model_config=valid_azure_model_config)
        assert simulator.model_config["azure_deployment"] == "test_deployment"
        assert simulator.model_config["api_version"] == "2024-06-01"

    def test_init_valid_openai_model_config(self, valid_openai_model_config):
        simulator = Simulator(model_config=valid_openai_model_config)
        assert simulator.model_config["model"] == "gpt-3.5-turbo"
        assert simulator.model_config["api_version"] == "2024-06-01"

    def test_init_invalid_azure_model_config(self, invalid_azure_model_config):
        with pytest.raises(ValueError) as exc_info:
            Simulator(model_config=invalid_azure_model_config)
        assert exc_info is not None

    def test_init_invalid_openai_model_config(self, invalid_openai_model_config):
        with pytest.raises(ValueError) as exc_info:
            Simulator(model_config=invalid_openai_model_config)
        assert exc_info is not None

    def test_validate_model_config_valid_azure(self, valid_azure_model_config):
        Simulator._validate_model_config(valid_azure_model_config)  # Should not raise

    def test_validate_model_config_valid_openai(self, valid_openai_model_config):
        Simulator._validate_model_config(valid_openai_model_config)  # Should not raise

    def test_validate_model_config_infer_type_azure(self, valid_azure_model_config):
        if "type" in valid_azure_model_config:
            del valid_azure_model_config["type"]
        Simulator._validate_model_config(valid_azure_model_config)
        assert valid_azure_model_config["type"] == "azure_openai"

    def test_validate_model_config_infer_type_openai(self, valid_openai_model_config):
        if "type" in valid_openai_model_config:
            del valid_openai_model_config["type"]
        Simulator._validate_model_config(valid_openai_model_config)
        assert valid_openai_model_config["type"] == "openai"

    def test_validate_model_config_unable_to_infer_type(self):
        model_config = {"api_key": "test_api_key"}  # Not enough info to infer type
        with pytest.raises(ValueError) as exc_info:
            Simulator._validate_model_config(model_config)
        assert "Unable to infer 'type' from model_config" in str(exc_info.value)

    def test_validate_model_config_invalid_type(self):
        model_config = {
            "type": "invalid_type",
            "api_key": "test_api_key",
            "model": "gpt-3.5-turbo",
        }
        with pytest.raises(ValueError) as exc_info:
            Simulator._validate_model_config(model_config)
        assert "model_config 'type' must be 'azure_openai' or 'openai'" in str(exc_info.value)

    def test_validate_model_config_none_values(self):
        model_config = {
            "type": "azure_openai",
            "azure_deployment": None,
            "azure_endpoint": "https://test-endpoint.openai.azure.com/",
            "api_key": "test_api_key",
        }
        with pytest.raises(ValueError) as exc_info:
            Simulator._validate_model_config(model_config)
        assert "must not be None" in str(exc_info.value)

    def test_parse_prompty_response_valid_json(self, valid_azure_model_config):
        simulator = Simulator(model_config=valid_azure_model_config)
        response = '{"content": "Test response"}'
        parsed_response = simulator._parse_prompty_response(response=response)
        assert parsed_response == {"content": "Test response"}

    def test_parse_prompty_response_invalid_json(self, valid_azure_model_config):
        simulator = Simulator(model_config=valid_azure_model_config)
        response = "Invalid JSON"
        with pytest.raises(ValueError) as exc_info:
            simulator._parse_prompty_response(response=response)
        assert "Error parsing response content" in str(exc_info.value)

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.AsyncPrompty.load")
    async def test_generate_query_responses(self, mock_async_prompty_load, valid_azure_model_config):
        simulator = Simulator(model_config=valid_azure_model_config)
        mock_flow = AsyncMock()
        mock_flow.return_value = '[{"q": "query1", "r": "response1"}]'
        mock_async_prompty_load.return_value = mock_flow

        query_responses = await simulator._generate_query_responses(
            text="Test text",
            num_queries=1,
            query_response_generating_prompty=None,
            query_response_generating_prompty_options={},
            prompty_model_config={},
        )
        assert query_responses == [{"q": "query1", "r": "response1"}]

    @patch("azure.ai.evaluation.simulator._simulator.AsyncPrompty.load")
    def test_load_user_simulation_flow(self, mock_async_prompty_load, valid_azure_model_config):
        simulator = Simulator(model_config=valid_azure_model_config)
        mock_async_prompty_load.return_value = AsyncMock()
        user_flow = simulator._load_user_simulation_flow(
            user_simulator_prompty=None,
            prompty_model_config={},
            user_simulator_prompty_options={},
        )
        assert user_flow is not None

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._load_user_simulation_flow")
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._get_target_response")
    async def test_complete_conversation(
        self, mock_get_target_response, mock_load_user_simulation_flow, valid_azure_model_config
    ):
        simulator = Simulator(model_config=valid_azure_model_config)
        mock_user_flow = AsyncMock()
        mock_user_flow.return_value = {"content": "User response"}
        mock_load_user_simulation_flow.return_value = mock_user_flow
        mock_get_target_response.return_value = "Assistant response", "Assistant context"

        conversation = await simulator._complete_conversation(
            conversation_starter="Hello",
            max_conversation_turns=4,
            task="Test task",
            user_simulator_prompty=None,
            user_simulator_prompty_options={},
            target=AsyncMock(),
            api_call_delay_sec=0,
            progress_bar=AsyncMock(),
        )
        assert len(conversation) == 4
        assert conversation[0]["role"] == "user"
        assert conversation[0]["content"] == "User response"
        assert conversation[1]["role"] == "assistant"
        assert conversation[1]["content"] == "Assistant response"

    @pytest.mark.asyncio
    async def test_get_target_response(self, valid_openai_model_config):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_target = AsyncMock()
        mock_target.return_value = {
            "messages": [
                {"role": "assistant", "content": "Assistant response", "context": "assistant context"},
            ]
        }
        response = await simulator._get_target_response(
            target=mock_target,
            api_call_delay_sec=0,
            conversation_history=AsyncMock(),
        )
        assert response == ("Assistant response", "assistant context")

    @pytest.mark.asyncio
    async def test_call_with_both_conversation_turns_and_text_tasks(self, valid_openai_model_config):
        simulator = Simulator(model_config=valid_openai_model_config)
        with pytest.raises(ValueError, match="Cannot specify both conversation_turns and text/tasks"):
            await simulator(
                target=AsyncMock(),
                max_conversation_turns=2,
                conversation_turns=[["user_turn"]],
                text="some text",
                tasks=[{"task": "task"}],
                api_call_delay_sec=1,
            )

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._simulate_with_predefined_turns", new_callable=AsyncMock)
    async def test_call_with_conversation_turns(self, mock_simulate_with_predefined_turns, valid_openai_model_config):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_simulate_with_predefined_turns.return_value = [JsonLineChatProtocol({"messages": []})]

        result = await simulator(
            target=AsyncMock(),
            max_conversation_turns=2,
            conversation_turns=[["user_turn"]],
            api_call_delay_sec=1,
        )
        assert len(result) == 1
        assert isinstance(result[0], JsonLineChatProtocol)

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._generate_query_responses", new_callable=AsyncMock)
    @patch(
        "azure.ai.evaluation.simulator._simulator.Simulator._create_conversations_from_query_responses",
        new_callable=AsyncMock,
    )
    async def test_call_with_text_and_tasks(
        self,
        mock_create_conversations_from_query_responses,
        mock_generate_query_responses,
        valid_openai_model_config,
    ):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_generate_query_responses.return_value = [{"q": "query", "r": "response"}]
        mock_create_conversations_from_query_responses.return_value = [JsonLineChatProtocol({"messages": []})]

        result = await simulator(
            target=AsyncMock(),
            max_conversation_turns=2,
            text="some text",
            tasks=[{"task": "task"}],
            api_call_delay_sec=1,
            num_queries=1,
        )
        assert len(result) == 1
        assert isinstance(result[0], JsonLineChatProtocol)

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._generate_query_responses", new_callable=AsyncMock)
    @patch(
        "azure.ai.evaluation.simulator._simulator.Simulator._create_conversations_from_query_responses",
        new_callable=AsyncMock,
    )
    async def test_call_with_num_queries_greater_than_tasks(
        self,
        mock_create_conversations_from_query_responses,
        mock_generate_query_responses,
        valid_openai_model_config,
    ):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_generate_query_responses.return_value = [{"q": "query", "r": "response"}]
        mock_create_conversations_from_query_responses.return_value = [JsonLineChatProtocol({"messages": []})]
        tasks = [{"task": "task1"}]

        with pytest.warns(UserWarning, match="You have specified 'num_queries' > len\\('tasks'\\)"):
            result = await simulator(
                target=AsyncMock(),
                max_conversation_turns=2,
                text="some text",
                tasks=tasks,
                api_call_delay_sec=1,
                num_queries=2,
            )
        assert len(result) == 1
        assert isinstance(result[0], JsonLineChatProtocol)

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._generate_query_responses", new_callable=AsyncMock)
    @patch(
        "azure.ai.evaluation.simulator._simulator.Simulator._create_conversations_from_query_responses",
        new_callable=AsyncMock,
    )
    async def test_call_with_num_queries_less_than_tasks(
        self,
        mock_create_conversations_from_query_responses,
        mock_generate_query_responses,
        valid_openai_model_config,
    ):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_generate_query_responses.return_value = [{"q": "query", "r": "response"}]
        mock_create_conversations_from_query_responses.return_value = [JsonLineChatProtocol({"messages": []})]
        tasks = [{"task": "task1"}, {"task": "task2"}]

        with pytest.warns(UserWarning, match="You have specified 'num_queries' < len\\('tasks'\\)"):
            result = await simulator(
                target=AsyncMock(),
                max_conversation_turns=2,
                text="some text",
                tasks=tasks,
                api_call_delay_sec=1,
                num_queries=1,
            )
        assert len(result) == 1
        assert isinstance(result[0], JsonLineChatProtocol)

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._get_target_response", new_callable=AsyncMock)
    @patch(
        "azure.ai.evaluation.simulator._simulator.Simulator._extend_conversation_with_simulator", new_callable=AsyncMock
    )
    async def test_simulate_with_predefined_turns(
        self, mock_extend_conversation_with_simulator, mock_get_target_response, valid_openai_model_config
    ):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_get_target_response.return_value = "assistant_response", "assistant_context"
        mock_extend_conversation_with_simulator.return_value = None

        conversation_turns = [["user_turn"]]
        result = await simulator._simulate_with_predefined_turns(
            target=AsyncMock(),
            max_conversation_turns=2,
            conversation_turns=conversation_turns,
            api_call_delay_sec=1,
            prompty_model_config={},
            user_simulator_prompty=None,
            user_simulator_prompty_options={},
            concurrent_async_tasks=1,
        )

        assert len(result) == 1
        assert isinstance(result[0], JsonLineChatProtocol)

    @pytest.mark.asyncio
    @patch("azure.ai.evaluation.simulator._simulator.Simulator._complete_conversation", new_callable=AsyncMock)
    async def test_create_conversations_from_query_responses(
        self, mock_complete_conversation, valid_openai_model_config
    ):
        simulator = Simulator(model_config=valid_openai_model_config)
        mock_complete_conversation.return_value = [{"role": "user", "content": "query"}]

        query_responses = [{"q": "query", "r": "response"}]
        tasks = [{"task": "task"}]

        result = await simulator._create_conversations_from_query_responses(
            query_responses=query_responses,
            max_conversation_turns=2,
            tasks=tasks,
            target=AsyncMock(),
            api_call_delay_sec=1,
            user_simulator_prompty=None,
            user_simulator_prompty_options={},
            text="some text",
        )

        assert len(result) == 1
        assert isinstance(result[0], JsonLineChatProtocol)