Skip to content

Commit eb30f0f

Browse files
gustavocidornelaswhoseoyster
authored andcommitted
Completes OPEN-5863 Client should have notebook example for using azure gpt
1 parent 5e29153 commit eb30f0f

File tree

3 files changed

+160
-0
lines changed

3 files changed

+160
-0
lines changed
Lines changed: 157 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,157 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"id": "2722b419",
6+
"metadata": {},
7+
"source": [
8+
"[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/openlayer-ai/examples-gallery/blob/main/monitoring/llms/azure-openai/azure_openai_llm_monitor.ipynb)\n",
9+
"\n",
10+
"\n",
11+
"# <a id=\"top\">Azure OpenAI LLM monitoring quickstart</a>\n",
12+
"\n",
13+
"This notebook illustrates how to get started monitoring Azure OpenAI LLMs with Openlayer."
14+
]
15+
},
16+
{
17+
"cell_type": "code",
18+
"execution_count": null,
19+
"id": "020c8f6a",
20+
"metadata": {},
21+
"outputs": [],
22+
"source": [
23+
"!pip install openlayer"
24+
]
25+
},
26+
{
27+
"cell_type": "markdown",
28+
"id": "75c2a473",
29+
"metadata": {},
30+
"source": [
31+
"## 1. Set the environment variables"
32+
]
33+
},
34+
{
35+
"cell_type": "code",
36+
"execution_count": 1,
37+
"id": "f3f4fa13",
38+
"metadata": {},
39+
"outputs": [],
40+
"source": [
41+
"import os\n",
42+
"import openai\n",
43+
"\n",
44+
"# Azure OpenAI env variables\n",
45+
"os.environ[\"ENDPOINT\"] = \"YOUR_AZURE_OPENAI_ENDPOINT_HERE\"\n",
46+
"os.environ[\"AZURE_API_KEY\"] = \"YOUR_AZURE_OPENAI_API_KEY_HERE\"\n",
47+
"os.environ[\"DEPLOYMENT_NAME\"] = \"YOUR_AZURE_OPENAI_DEPLOYMENT_NAME_HERE\"\n",
48+
"\n",
49+
"# Openlayer env variables\n",
50+
"os.environ[\"OPENLAYER_API_KEY\"] = \"YOUR_OPENLAYER_API_KEY_HERE\"\n",
51+
"os.environ[\"OPENLAYER_PROJECT_NAME\"] = \"YOUR_OPENLAYER_PROJECT_NAME_HERE\" "
52+
]
53+
},
54+
{
55+
"cell_type": "markdown",
56+
"id": "9758533f",
57+
"metadata": {},
58+
"source": [
59+
"## 2. Instantiate the monitor"
60+
]
61+
},
62+
{
63+
"cell_type": "code",
64+
"execution_count": 2,
65+
"id": "e60584fa",
66+
"metadata": {},
67+
"outputs": [
68+
{
69+
"data": {
70+
"text/plain": [
71+
"<openlayer.llm_monitors.AzureOpenAIMonitor at 0x7f8758d3abe0>"
72+
]
73+
},
74+
"execution_count": 2,
75+
"metadata": {},
76+
"output_type": "execute_result"
77+
}
78+
],
79+
"source": [
80+
"from openlayer import llm_monitors\n",
81+
"\n",
82+
"from openai import AzureOpenAI\n",
83+
" \n",
84+
"azure_client = AzureOpenAI(\n",
85+
" api_key=os.environ.get(\"AZURE_API_KEY\"), \n",
86+
" api_version=\"2024-02-01\",\n",
87+
" azure_endpoint=os.environ.get(\"ENDPOINT\"),\n",
88+
")\n",
89+
"\n",
90+
"llm_monitors.AzureOpenAIMonitor(client=azure_client)"
91+
]
92+
},
93+
{
94+
"cell_type": "markdown",
95+
"id": "72a6b954",
96+
"metadata": {},
97+
"source": [
98+
"## 3. Use your monitored Azure OpenAI client normally"
99+
]
100+
},
101+
{
102+
"cell_type": "markdown",
103+
"id": "76a350b4",
104+
"metadata": {},
105+
"source": [
106+
"That's it! Now you can continue using Azure OpenAI LLMs normally. The data is automatically published to Openlayer and you can start creating tests around it!"
107+
]
108+
},
109+
{
110+
"cell_type": "code",
111+
"execution_count": 3,
112+
"id": "e00c1c79",
113+
"metadata": {},
114+
"outputs": [],
115+
"source": [
116+
"completion = azure_client.chat.completions.create(\n",
117+
" model=os.environ.get(\"DEPLOYMENT_NAME\"),\n",
118+
" messages=[\n",
119+
" {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n",
120+
" {\"role\": \"user\", \"content\": \"How are you doing today?\"},\n",
121+
" {\"role\": \"assistant\", \"content\": \"Pretty well! How about you?\"},\n",
122+
" {\"role\": \"user\", \"content\": \"I am doing well, but would like some words of encouragement.\"},\n",
123+
" ]\n",
124+
")"
125+
]
126+
},
127+
{
128+
"cell_type": "code",
129+
"execution_count": null,
130+
"id": "abaf6987-c257-4f0d-96e7-3739b24c7206",
131+
"metadata": {},
132+
"outputs": [],
133+
"source": []
134+
}
135+
],
136+
"metadata": {
137+
"kernelspec": {
138+
"display_name": "Python 3 (ipykernel)",
139+
"language": "python",
140+
"name": "python3"
141+
},
142+
"language_info": {
143+
"codemirror_mode": {
144+
"name": "ipython",
145+
"version": 3
146+
},
147+
"file_extension": ".py",
148+
"mimetype": "text/x-python",
149+
"name": "python",
150+
"nbconvert_exporter": "python",
151+
"pygments_lexer": "ipython3",
152+
"version": "3.9.18"
153+
}
154+
},
155+
"nbformat": 4,
156+
"nbformat_minor": 5
157+
}

openlayer/llm_monitors.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -493,6 +493,8 @@ def thread_messages_to_prompt(
493493

494494

495495
class AzureOpenAIMonitor(OpenAIMonitor):
496+
"""Monitor inferences from Azure OpenAI LLMs and upload traces to Openlayer."""
497+
496498
def __init__(
497499
self,
498500
client=None,

openlayer/model_runners/ll_model_runners.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -390,6 +390,7 @@ def _initialize_llm(self):
390390
"""Initializes Cohere's Generate model."""
391391
# Check if API key is valid -- Cohere's validation seems to be very shallow
392392
try:
393+
# pylint: disable=unexpected-keyword-arg
393394
self.cohere_client = cohere.Client(
394395
api_key=self.cohere_api_key, check_api_key=True
395396
)

0 commit comments

Comments
 (0)