Skip to content

Commit 627a8a8

Browse files
gustavocidornelaswhoseoyster
authored andcommitted
Completes OPEN-5899 Add tracing documentation to reference.openlayer.com
1 parent ed8612f commit 627a8a8

File tree

3 files changed

+105
-7
lines changed

3 files changed

+105
-7
lines changed

docs/source/reference/monitoring.rst

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,17 @@ Openlayer project is what enables the monitoring mode.
3131
Project.create_inference_pipeline
3232
Project.load_inference_pipeline
3333

34+
Tracing
35+
-------
36+
If you have a multi-step system (e.g., RAG), you can trace all the steps in the system
37+
by decorating the functions with the ``@trace()`` decorator.
38+
39+
.. autosummary::
40+
:toctree: api/
41+
:template: class.rst
42+
43+
openlayer.tracing.tracer.trace
44+
3445
Publishing production data
3546
----------------------------
3647

@@ -45,6 +56,7 @@ single line of code.
4556
:template: class.rst
4657

4758
openlayer.llm_monitors.OpenAIMonitor
59+
openlayer.llm_monitors.AzureOpenAIMonitor
4860

4961
Traditional ML models
5062
^^^^^^^^^^^^^^^^^^^^^

openlayer/llm_monitors.py

Lines changed: 57 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ class OpenAIMonitor:
1919
2020
Parameters
2121
----------
22-
client : openai.api_client.Client, optional
22+
client : openai.api_client.Client
2323
The OpenAI client. It is required if you are using openai>=1.0.0.
2424
2525
Examples
@@ -43,7 +43,7 @@ class OpenAIMonitor:
4343
>>> from openai import OpenAI
4444
>>>
4545
>>> openai_client = OpenAI()
46-
>>> monitor = llm_monitors.OpenAIMonitor(publish=True, client=openai_client)
46+
>>> monitor = llm_monitors.OpenAIMonitor(client=openai_client)
4747
4848
3. Use the OpenAI model as you normally would:
4949
@@ -76,7 +76,7 @@ def __init__(
7676
)
7777

7878
def start_monitoring(self) -> None:
79-
"""Start monitoring the OpenAI assistant."""
79+
"""(Deprecated) Start monitoring the OpenAI assistant."""
8080
warnings.warn(
8181
"The `start_monitoring` method is deprecated and will be removed in a future"
8282
" version. Monitoring is now automatically enabled once the OpenAIMonitor"
@@ -86,7 +86,7 @@ def start_monitoring(self) -> None:
8686
)
8787

8888
def stop_monitoring(self) -> None:
89-
"""Stop monitoring the OpenAI assistant."""
89+
"""(Deprecated) Stop monitoring the OpenAI assistant."""
9090
warnings.warn(
9191
"The `stop_monitoring` method is deprecated and will be removed in a future"
9292
" version. Monitoring is now automatically enabled once the OpenAIMonitor"
@@ -417,7 +417,7 @@ def monitor_thread_run(self, run: "openai.types.beta.threads.run.Run") -> None:
417417
messages = self.openai_client.beta.threads.messages.list(
418418
thread_id=run.thread_id, order="asc"
419419
)
420-
prompt = self.thread_messages_to_prompt(messages)
420+
prompt = self._thread_messages_to_prompt(messages)
421421

422422
# Add step to the trace
423423
tracer.add_openai_chat_completion_step_to_trace(
@@ -466,7 +466,7 @@ def _extract_run_metadata(
466466
}
467467

468468
@staticmethod
469-
def thread_messages_to_prompt(
469+
def _thread_messages_to_prompt(
470470
messages: List["openai.types.beta.threads.thread_message.ThreadMessage"],
471471
) -> List[Dict[str, str]]:
472472
"""Given list of ThreadMessage, return its contents in the `prompt` format,
@@ -493,7 +493,57 @@ def thread_messages_to_prompt(
493493

494494

495495
class AzureOpenAIMonitor(OpenAIMonitor):
496-
"""Monitor inferences from Azure OpenAI LLMs and upload traces to Openlayer."""
496+
"""Monitor inferences from Azure OpenAI LLMs and upload traces to Openlayer.
497+
498+
Parameters
499+
----------
500+
client : openai.AzureOpenAI
501+
The AzureOpenAI client.
502+
503+
Examples
504+
--------
505+
506+
Let's say that you have a GPT model you want to monitor. You can turn on monitoring
507+
with Openlayer by simply doing:
508+
509+
1. Set the environment variables:
510+
511+
.. code-block:: bash
512+
513+
export AZURE_OPENAI_ENDPOINT=<your-azure-openai-endpoint>
514+
export AZURE_OPENAI_API_KEY=<your-azure-openai-api-key>
515+
export AZURE_OPENAI_DEPLOYMENT_NAME=<your-azure-openai-deployment-name>
516+
517+
export OPENLAYER_API_KEY=<your-openlayer-api-key>
518+
export OPENLAYER_PROJECT_NAME=<your-project-name>
519+
520+
2. Instantiate the monitor:
521+
522+
>>> from opemlayer import llm_monitors
523+
>>> from openai import AzureOpenAI
524+
>>>
525+
>>> azure_client = AzureOpenAI(
526+
>>> api_key=os.environ.get("AZURE_OPENAI_API_KEY"),
527+
>>> api_version="2024-02-01",
528+
>>> azure_endpoint=os.environ.get("AZURE_OPENAI_ENDPOINT"),
529+
>>> )
530+
>>> monitor = llm_monitors.OpenAIMonitor(client=azure_client)
531+
532+
3. Use the Azure OpenAI model as you normally would:
533+
534+
From this point onwards, you can continue making requests to your model normally:
535+
536+
>>> completion = azure_client.chat.completions.create(
537+
>>> model=os.environ.get("AZURE_OPENAI_DEPLOYMENT_NAME"),
538+
>>> messages=[
539+
>>> {"role": "system", "content": "You are a helpful assistant."},
540+
>>> {"role": "user", "content": "How are you doing today?"},
541+
>>> ]
542+
>>> )
543+
544+
The trace of this inference request is automatically uploaded to your Openlayer
545+
project.
546+
"""
497547

498548
def __init__(
499549
self,

openlayer/tracing/tracer.py

Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -110,6 +110,42 @@ def add_openai_chat_completion_step_to_trace(**kwargs) -> None:
110110

111111
# ----------------------------- Tracing decorator ---------------------------- #
112112
def trace(*step_args, **step_kwargs):
113+
"""Decorator to trace a function.
114+
115+
Examples
116+
--------
117+
118+
To trace a function, simply decorate it with the ``@trace()`` decorator. By doing so,
119+
the functions inputs, outputs, and metadata will be automatically logged to your
120+
Openlayer project.
121+
122+
>>> import os
123+
>>> from openlayer.tracing import tracer
124+
>>>
125+
>>> # Set the environment variables
126+
>>> os.environ["OPENLAYER_API_KEY"] = "YOUR_OPENLAYER_API_KEY_HERE"
127+
>>> os.environ["OPENLAYER_PROJECT_NAME"] = "YOUR_OPENLAYER_PROJECT_NAME_HERE"
128+
>>>
129+
>>> # Decorate all the functions you want to trace
130+
>>> @tracer.trace()
131+
>>> def main(user_query: str) -> str:
132+
>>> context = retrieve_context(user_query)
133+
>>> answer = generate_answer(user_query, context)
134+
>>> return answer
135+
>>>
136+
>>> @tracer.trace()
137+
>>> def retrieve_context(user_query: str) -> str:
138+
>>> return "Some context"
139+
>>>
140+
>>> @tracer.trace()
141+
>>> def generate_answer(user_query: str, context: str) -> str:
142+
>>> return "Some answer"
143+
>>>
144+
>>> # Every time the main function is called, the data is automatically
145+
>>> # streamed to your Openlayer project. E.g.:
146+
>>> main("What is the meaning of life?")
147+
"""
148+
113149
def decorator(func):
114150
func_signature = inspect.signature(func)
115151

0 commit comments

Comments
 (0)