@@ -19,7 +19,7 @@ class OpenAIMonitor:
19
19
20
20
Parameters
21
21
----------
22
- client : openai.api_client.Client, optional
22
+ client : openai.api_client.Client
23
23
The OpenAI client. It is required if you are using openai>=1.0.0.
24
24
25
25
Examples
@@ -43,7 +43,7 @@ class OpenAIMonitor:
43
43
>>> from openai import OpenAI
44
44
>>>
45
45
>>> openai_client = OpenAI()
46
- >>> monitor = llm_monitors.OpenAIMonitor(publish=True, client=openai_client)
46
+ >>> monitor = llm_monitors.OpenAIMonitor(client=openai_client)
47
47
48
48
3. Use the OpenAI model as you normally would:
49
49
@@ -76,7 +76,7 @@ def __init__(
76
76
)
77
77
78
78
def start_monitoring (self ) -> None :
79
- """Start monitoring the OpenAI assistant."""
79
+ """(Deprecated) Start monitoring the OpenAI assistant."""
80
80
warnings .warn (
81
81
"The `start_monitoring` method is deprecated and will be removed in a future"
82
82
" version. Monitoring is now automatically enabled once the OpenAIMonitor"
@@ -86,7 +86,7 @@ def start_monitoring(self) -> None:
86
86
)
87
87
88
88
def stop_monitoring (self ) -> None :
89
- """Stop monitoring the OpenAI assistant."""
89
+ """(Deprecated) Stop monitoring the OpenAI assistant."""
90
90
warnings .warn (
91
91
"The `stop_monitoring` method is deprecated and will be removed in a future"
92
92
" version. Monitoring is now automatically enabled once the OpenAIMonitor"
@@ -417,7 +417,7 @@ def monitor_thread_run(self, run: "openai.types.beta.threads.run.Run") -> None:
417
417
messages = self .openai_client .beta .threads .messages .list (
418
418
thread_id = run .thread_id , order = "asc"
419
419
)
420
- prompt = self .thread_messages_to_prompt (messages )
420
+ prompt = self ._thread_messages_to_prompt (messages )
421
421
422
422
# Add step to the trace
423
423
tracer .add_openai_chat_completion_step_to_trace (
@@ -466,7 +466,7 @@ def _extract_run_metadata(
466
466
}
467
467
468
468
@staticmethod
469
- def thread_messages_to_prompt (
469
+ def _thread_messages_to_prompt (
470
470
messages : List ["openai.types.beta.threads.thread_message.ThreadMessage" ],
471
471
) -> List [Dict [str , str ]]:
472
472
"""Given list of ThreadMessage, return its contents in the `prompt` format,
@@ -493,7 +493,57 @@ def thread_messages_to_prompt(
493
493
494
494
495
495
class AzureOpenAIMonitor (OpenAIMonitor ):
496
- """Monitor inferences from Azure OpenAI LLMs and upload traces to Openlayer."""
496
+ """Monitor inferences from Azure OpenAI LLMs and upload traces to Openlayer.
497
+
498
+ Parameters
499
+ ----------
500
+ client : openai.AzureOpenAI
501
+ The AzureOpenAI client.
502
+
503
+ Examples
504
+ --------
505
+
506
+ Let's say that you have a GPT model you want to monitor. You can turn on monitoring
507
+ with Openlayer by simply doing:
508
+
509
+ 1. Set the environment variables:
510
+
511
+ .. code-block:: bash
512
+
513
+ export AZURE_OPENAI_ENDPOINT=<your-azure-openai-endpoint>
514
+ export AZURE_OPENAI_API_KEY=<your-azure-openai-api-key>
515
+ export AZURE_OPENAI_DEPLOYMENT_NAME=<your-azure-openai-deployment-name>
516
+
517
+ export OPENLAYER_API_KEY=<your-openlayer-api-key>
518
+ export OPENLAYER_PROJECT_NAME=<your-project-name>
519
+
520
+ 2. Instantiate the monitor:
521
+
522
+ >>> from opemlayer import llm_monitors
523
+ >>> from openai import AzureOpenAI
524
+ >>>
525
+ >>> azure_client = AzureOpenAI(
526
+ >>> api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
527
+ >>> api_version="2024-02-01",
528
+ >>> azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
529
+ >>> )
530
+ >>> monitor = llm_monitors.OpenAIMonitor(client=azure_client)
531
+
532
+ 3. Use the Azure OpenAI model as you normally would:
533
+
534
+ From this point onwards, you can continue making requests to your model normally:
535
+
536
+ >>> completion = azure_client.chat.completions.create(
537
+ >>> model=os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME"),
538
+ >>> messages=[
539
+ >>> {"role": "system", "content": "You are a helpful assistant."},
540
+ >>> {"role": "user", "content": "How are you doing today?"},
541
+ >>> ]
542
+ >>> )
543
+
544
+ The trace of this inference request is automatically uploaded to your Openlayer
545
+ project.
546
+ """
497
547
498
548
def __init__ (
499
549
self ,
0 commit comments