# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) Python Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import pytest
import uuid
from devtools_testutils.aio import recorded_by_proxy_async
from azure.onlineexperimentation.aio import OnlineExperimentationClient
from azure.onlineexperimentation.models import *
from testpreparer import OnlineExperimentationPreparer
from testpreparer_async import OnlineExperimentationClientTestBaseAsync
from azure.core.exceptions import ResourceNotFoundError, HttpResponseError
from azure.core import MatchConditions


class TestOnlineExperimentationAsync(OnlineExperimentationClientTestBaseAsync):

    async def _create_test_metric(
        self,
        client: OnlineExperimentationClient,
        metric_id: str = None,
        display_name: str = None,
        description: str = None,
        categories: list[str] = None,
        desired_direction: DesiredDirection = "Increase",
    ):
        """Helper function to create a test metric"""
        if metric_id is None:
            metric_id = "test_metric_fixed_id"

        metric_definition = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name=display_name or f"Test Metric {metric_id}",
            description=description or f"A metric created for testing purposes ({metric_id})",
            categories=categories or ["Test"],
            desired_direction=desired_direction or DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(event=ObservedEvent(event_name="TestEvent")),
        )

        response = await client.create_or_update_metric(experiment_metric_id=metric_id, resource=metric_definition)

        return response

    # CREATE operations
    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_create_or_update_metric(self, azure_onlineexperimentation_endpoint):
        """Test creating a new experiment metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        metric_id = "test_create_fixed_id"
        metric_definition = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="New Test Metric",
            description="A metric created for testing purposes",
            categories=["Test"],
            desired_direction=DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(event=ObservedEvent(event_name="TestEvent")),
        )

        # Create the metric
        response = await client.create_or_update_metric(experiment_metric_id=metric_id, resource=metric_definition)

        # Assert
        assert response is not None
        assert response.id == metric_id
        assert response.display_name == metric_definition.display_name
        assert response.description == metric_definition.description

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_create_if_not_exists(self, azure_onlineexperimentation_endpoint):
        """Test creating a metric only if it doesn't exist using If-None-Match"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        metric_id = "test_create_if_not_exists"

        # Ensure the metric doesn't exist (try to delete it first)
        try:
            await client.delete_metric(experiment_metric_id=metric_id)
        except ResourceNotFoundError:
            pass  # It's fine if it doesn't exist

        metric_definition = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="If-None-Match Test Metric",
            description="A metric created with If-None-Match header",
            categories=["Test", "Conditional"],
            desired_direction=DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(event=ObservedEvent(event_name="ConditionalCreateEvent")),
        )

        # Create with If-None-Match: *
        response = await client.create_or_update_metric(
            experiment_metric_id=metric_id, resource=metric_definition, match_condition=MatchConditions.IfMissing
        )

        # Assert
        assert response is not None
        assert response.id == metric_id
        assert response.display_name == metric_definition.display_name

        # Try to create it again with If-None-Match - should fail since resource now exists
        updated_definition = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="This should not be updated",
            description="This update should fail",
            categories=["Test"],
            desired_direction=DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(event=ObservedEvent(event_name="TestEvent")),
        )

        with pytest.raises(HttpResponseError) as excinfo:
            await client.create_or_update_metric(
                experiment_metric_id=metric_id, resource=updated_definition, match_condition=MatchConditions.IfMissing
            )

        # Assert second create fails with precondition failed
        assert excinfo.value.status_code == 412  # Precondition Failed

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_reject_invalid_metric_definition(self, azure_onlineexperimentation_endpoint):
        """Test that creating an invalid metric definition is rejected"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        metric_id = "test_invalid_metric_definition"

        # Create an invalid metric definition (missing required fields)
        invalid_metric = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="Invalid Test Metric",
            # Missing required fields: definition, desiredDirection
        )

        # Attempt to create the metric, expecting it to fail
        with pytest.raises(HttpResponseError) as excinfo:
            await client.create_or_update_metric(experiment_metric_id=metric_id, resource=invalid_metric)

        # Assert we got a Bad Request (400) response
        assert excinfo.value.status_code == 400  # Bad Request

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_update_existing_metric(self, azure_onlineexperimentation_endpoint):
        """Test updating an existing experiment metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        created_metric = await self._create_test_metric(
            client, display_name="Initial Test Metric", description="A metric to be updated"
        )

        # Update the metric
        updated_metric_definition = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="Updated Test Metric",
            description="This metric was updated for testing purposes",
            categories=["Test", "Updated"],
            desired_direction=DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(event=ObservedEvent(event_name="TestEvent")),
        )

        response = await client.create_or_update_metric(
            experiment_metric_id=created_metric.id, resource=updated_metric_definition
        )

        # Assert
        assert response is not None
        assert response.id == created_metric.id
        assert response.display_name == updated_metric_definition.display_name
        assert response.description == updated_metric_definition.description

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_conditional_update_with_etag(self, azure_onlineexperimentation_endpoint):
        """Test updating a metric conditionally with If-Match header"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        original_metric = await self._create_test_metric(client)

        # Update with matching ETag
        updated_metric = ExperimentMetric(description="This metric was updated with a conditional request")

        response = await client.create_or_update_metric(
            experiment_metric_id=original_metric.id,
            resource=updated_metric,
            match_condition=MatchConditions.IfNotModified,
            etag=original_metric.e_tag,
        )

        # Assert
        assert response is not None
        assert response.id == original_metric.id
        assert response.description == updated_metric.description
        assert response.e_tag != original_metric.e_tag

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_conditional_update_etag_mismatch(self, azure_onlineexperimentation_endpoint):
        """Test updating a metric with a mismatched ETag fails"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        created_metric = await self._create_test_metric(client)

        # Try to update with incorrect ETag
        updated_metric = ExperimentMetric(description="This update should fail due to ETag mismatch")

        with pytest.raises(HttpResponseError) as excinfo:
            await client.create_or_update_metric(
                experiment_metric_id=created_metric.id,
                resource=updated_metric,
                match_condition=MatchConditions.IfNotModified,
                etag="incorrect-etag-value",
            )

        # Assert
        assert excinfo.value.status_code == 412  # Precondition Failed

    # READ operations
    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_list_metrics(self, azure_onlineexperimentation_endpoint):
        """Test listing metrics with top parameter limit"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create multiple test metrics
        await self._create_test_metric(client)
        await self._create_test_metric(client)
        await self._create_test_metric(client)

        # Get metrics with top parameter
        metrics = []
        async for metric in client.list_metrics(top=2):
            metrics.append(metric)

        # Assert we don't exceed the max_results
        assert len(metrics) >= 3

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_get_metric(self, azure_onlineexperimentation_endpoint):
        """Test retrieving a specific experiment metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        created_metric = await self._create_test_metric(client)

        # Get the metric
        response = await client.get_metric(experiment_metric_id=created_metric.id)

        # Assert
        assert response is not None
        assert response.id == created_metric.id
        assert hasattr(response, "e_tag") and response.e_tag is not None

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_get_non_existent_metric(self, azure_onlineexperimentation_endpoint):
        """Test retrieving a non-existent metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Use a fixed ID that shouldn't exist
        non_existent_metric_id = "non_existent_metric_id"

        # Assert getting a non-existent metric raises ResourceNotFoundError
        with pytest.raises(ResourceNotFoundError):
            await client.get_metric(experiment_metric_id=non_existent_metric_id)

    # VALIDATE operation (special operation)
    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_validate_metric(self, azure_onlineexperimentation_endpoint):
        """Test validating a valid experiment metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        valid_metric = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="Valid Test Metric",
            description="A valid metric for validation testing",
            categories=["Test", "Validation"],
            desired_direction=DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(event=ObservedEvent(event_name="TestValidationEvent")),
        )

        # Validate the metric
        response = await client.validate_metric(valid_metric)

        # Assert
        assert response is not None
        assert response.is_valid is True
        assert hasattr(response, "diagnostics")
        assert len(response.diagnostics) == 0

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_validate_invalid_metric(self, azure_onlineexperimentation_endpoint):
        """Test validating an invalid experiment metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        invalid_metric = ExperimentMetric(
            lifecycle=LifecycleStage.ACTIVE,
            display_name="Invalid Test Metric",
            description="An invalid metric for validation testing",
            categories=["Test"],
            desired_direction=DesiredDirection.INCREASE,
            definition=EventCountMetricDefinition(
                event=ObservedEvent(event_name="TestValidationEvent", filter="this is not a valid filter expression.")
            ),
        )

        # Validate the invalid metric
        response = await client.validate_metric(invalid_metric)

        # Assert
        assert response is not None
        assert response.is_valid is False
        assert hasattr(response, "diagnostics")
        assert len(response.diagnostics) > 0

    # DELETE operations
    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_delete_metric(self, azure_onlineexperimentation_endpoint):
        """Test deleting an experiment metric"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        created_metric = await self._create_test_metric(client)

        # Delete the metric
        delete_response = await client.delete_metric(experiment_metric_id=created_metric.id)

        # Verify deletion - attempting to get should raise ResourceNotFoundError
        with pytest.raises(ResourceNotFoundError):
            await client.get_metric(experiment_metric_id=created_metric.id)

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_conditional_delete_with_etag(self, azure_onlineexperimentation_endpoint):
        """Test deleting a metric conditionally with If-Match header"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        created_metric = await self._create_test_metric(client)

        # Delete with matching ETag
        delete_response = await client.delete_metric(
            experiment_metric_id=created_metric.id,
            match_condition=MatchConditions.IfNotModified,
            etag=created_metric.e_tag,
        )

        # Verify deletion
        with pytest.raises(ResourceNotFoundError):
            await client.get_metric(experiment_metric_id=created_metric.id)

    @OnlineExperimentationPreparer()
    @recorded_by_proxy_async
    async def test_conditional_delete_etag_mismatch(self, azure_onlineexperimentation_endpoint):
        """Test deleting a metric with mismatched ETag fails"""
        client = self.create_async_client(endpoint=azure_onlineexperimentation_endpoint)

        # Create a test metric
        created_metric = await self._create_test_metric(client)

        # Try to delete with incorrect ETag
        with pytest.raises(HttpResponseError) as excinfo:
            await client.delete_metric(
                experiment_metric_id=created_metric.id,
                match_condition=MatchConditions.IfNotModified,
                etag="incorrect-delete-etag",
            )

        # Assert
        assert excinfo.value.status_code == 412  # Precondition Failed
