1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""
DESCRIPTION:
This sample demonstrates how to get a chat completions response from
the service using a synchronous client. The sample shows how to load
audio data from a file and include it in the input chat messages.
This sample will only work on AI models that support audio input.
Only these AI models accept the array form of `content` in the
`UserMessage`, as shown here.
This sample assumes the AI model is hosted on a Serverless API or
Managed Compute endpoint. For GitHub Models or Azure OpenAI endpoints,
the client constructor needs to be modified. See package documentation:
https://github.com/Azure/azure-sdk-for-python/blob/main/sdk/ai/azure-ai-inference/README.md#key-concepts
USAGE:
python sample_chat_completions_with_audio_data.py
Set these two or three environment variables before running the sample:
1) AZURE_AI_CHAT_ENDPOINT - Your endpoint URL, in the form
https://<your-deployment-name>.<your-azure-region>.models.ai.azure.com
where `your-deployment-name` is your unique AI Model deployment name, and
`your-azure-region` is the Azure region where your model is deployed.
2) AZURE_AI_CHAT_KEY - Your model key. Keep it secret.
3) AZURE_AI_CHAT_DEPLOYMENT_NAME - Optional. The value for the HTTP
request header `azureml-model-deployment`.
"""
def sample_chat_completions_with_audio_data():
import os
from azure.ai.inference import ChatCompletionsClient
from azure.ai.inference.models import (
SystemMessage,
UserMessage,
TextContentItem,
AudioContentItem,
InputAudio,
AudioContentFormat,
)
from azure.core.credentials import AzureKeyCredential
try:
endpoint = os.environ["AZURE_AI_CHAT_ENDPOINT"]
key = os.environ["AZURE_AI_CHAT_KEY"]
except KeyError:
print("Missing environment variable 'AZURE_AI_CHAT_ENDPOINT' or 'AZURE_AI_CHAT_KEY'")
print("Set them before running this sample.")
exit()
try:
model_deployment = os.environ["AZURE_AI_CHAT_DEPLOYMENT_NAME"]
except KeyError:
print("Could not read optional environment variable `AZURE_AI_CHAT_DEPLOYMENT_NAME`.")
print("No specific model target will not be set.")
model_deployment = None
client = ChatCompletionsClient(
endpoint=endpoint,
credential=AzureKeyCredential(key),
)
response = client.complete(
messages=[
SystemMessage("You are an AI assistant for translating and transcribing audio clips."),
UserMessage(
[
TextContentItem(text="Please translate this audio snippet to spanish."),
AudioContentItem(
input_audio=InputAudio.load(
audio_file="hello_how_are_you.mp3", audio_format=AudioContentFormat.MP3
)
),
],
),
],
model=model_deployment,
)
print(response.choices[0].message.content)
if __name__ == "__main__":
sample_chat_completions_with_audio_data()
|