diff --git a/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/__init__.py b/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/__init__.py index b93b06f3d8..9cbed7ac8f 100644 --- a/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/__init__.py +++ b/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/__init__.py @@ -30,8 +30,10 @@ from opentelemetry.instrumentation.openai.v1.responses_wrappers import ( async_responses_cancel_wrapper, async_responses_get_or_create_wrapper, + async_responses_parse_wrapper, responses_cancel_wrapper, responses_get_or_create_wrapper, + responses_parse_wrapper, ) from opentelemetry.instrumentation.openai.version import __version__ @@ -309,6 +311,11 @@ def _instrument(self, **kwargs): "Responses.retrieve", responses_get_or_create_wrapper(tracer), ) + self._try_wrap( + "openai.resources.responses", + "Responses.parse", + responses_parse_wrapper(tracer), + ) self._try_wrap( "openai.resources.responses", "Responses.cancel", @@ -324,6 +331,11 @@ def _instrument(self, **kwargs): "AsyncResponses.retrieve", async_responses_get_or_create_wrapper(tracer), ) + self._try_wrap( + "openai.resources.responses", + "AsyncResponses.parse", + async_responses_parse_wrapper(tracer), + ) self._try_wrap( "openai.resources.responses", "AsyncResponses.cancel", @@ -350,9 +362,11 @@ def _uninstrument(self, **kwargs): unwrap("openai.resources.beta.threads.messages", "Messages.list") unwrap("openai.resources.responses", "Responses.create") unwrap("openai.resources.responses", "Responses.retrieve") + unwrap("openai.resources.responses", "Responses.parse") unwrap("openai.resources.responses", "Responses.cancel") unwrap("openai.resources.responses", "AsyncResponses.create") unwrap("openai.resources.responses", "AsyncResponses.retrieve") + unwrap("openai.resources.responses", "AsyncResponses.parse") unwrap("openai.resources.responses", "AsyncResponses.cancel") except ImportError: pass diff --git a/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/responses_wrappers.py b/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/responses_wrappers.py index 24113d1060..d32302d56c 100644 --- a/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/responses_wrappers.py +++ b/packages/opentelemetry-instrumentation-openai/opentelemetry/instrumentation/openai/v1/responses_wrappers.py @@ -19,6 +19,7 @@ from openai.types.responses.response_output_message_param import ( ResponseOutputMessageParam, ) + RESPONSES_AVAILABLE = True except ImportError: # Fallback types for older OpenAI SDK versions @@ -107,8 +108,10 @@ def is_validator_iterator(content): # OpenAI API accepts output messages without an ID in its inputs, but # the ID is marked as required in the output type. if RESPONSES_AVAILABLE: + class ResponseOutputMessageParamWithoutId(ResponseOutputMessageParam): id: NotRequired[str] + else: # Fallback for older SDK versions ResponseOutputMessageParamWithoutId = dict @@ -207,13 +210,15 @@ def set_data_attributes(traced_response: TracedData, span: Span): reasoning_tokens = None # Support both dict-style and object-style `usage` tokens_details = ( - usage.get("output_tokens_details") if isinstance(usage, dict) + usage.get("output_tokens_details") + if isinstance(usage, dict) else getattr(usage, "output_tokens_details", None) ) if tokens_details: reasoning_tokens = ( - tokens_details.get("reasoning_tokens", None) if isinstance(tokens_details, dict) + tokens_details.get("reasoning_tokens", None) + if isinstance(tokens_details, dict) else getattr(tokens_details, "reasoning_tokens", None) ) @@ -735,4 +740,298 @@ async def async_responses_cancel_wrapper( return response +@dont_throw +@_with_tracer_wrapper +def responses_parse_wrapper(tracer: Tracer, wrapped, instance, args, kwargs): + """ + Wrapper for Responses.parse method which handles structured outputs. + Similar to responses.create but specifically for parsing with schemas. + """ + if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY): + return wrapped(*args, **kwargs) + start_time = time.time_ns() + + try: + response = wrapped(*args, **kwargs) + if isinstance(response, Stream): + return response + except Exception as e: + response_id = kwargs.get("response_id") + existing_data = {} + if response_id and response_id in responses: + existing_data = responses[response_id].model_dump() + try: + traced_data = TracedData( + start_time=existing_data.get("start_time", start_time), + response_id=response_id or "", + input=process_input( + kwargs.get("input", existing_data.get("input", [])) + ), + instructions=kwargs.get( + "instructions", existing_data.get("instructions") + ), + tools=get_tools_from_kwargs(kwargs) or existing_data.get("tools", []), + output_blocks=existing_data.get("output_blocks", {}), + usage=existing_data.get("usage"), + output_text=kwargs.get( + "output_text", existing_data.get("output_text", "") + ), + request_model=kwargs.get( + "model", existing_data.get("request_model", "") + ), + response_model=existing_data.get("response_model", ""), + # Reasoning attributes + request_reasoning_summary=( + kwargs.get("reasoning", {}).get( + "summary", existing_data.get("request_reasoning_summary") + ) + ), + request_reasoning_effort=( + kwargs.get("reasoning", {}).get( + "effort", existing_data.get("request_reasoning_effort") + ) + ), + response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"), + ) + except Exception: + traced_data = None + + span = tracer.start_span( + SPAN_NAME, + kind=SpanKind.CLIENT, + start_time=( + start_time if traced_data is None else int(traced_data.start_time) + ), + ) + span.set_attribute(ERROR_TYPE, e.__class__.__name__) + span.record_exception(e) + span.set_status(StatusCode.ERROR, str(e)) + if traced_data: + set_data_attributes(traced_data, span) + span.end() + raise + + parsed_response = parse_response(response) + + existing_data = responses.get(parsed_response.id) + if existing_data is None: + existing_data = {} + else: + existing_data = existing_data.model_dump() + + request_tools = get_tools_from_kwargs(kwargs) + merged_tools = existing_data.get("tools", []) + request_tools + + try: + parsed_response_output_text = None + if hasattr(parsed_response, "output_text"): + parsed_response_output_text = parsed_response.output_text + elif hasattr(parsed_response, "output_parsed"): + # For structured outputs, serialize the parsed output + try: + parsed_output = parsed_response.output_parsed + if parsed_output is not None: + parsed_response_output_text = json.dumps( + model_as_dict(parsed_output) + ) + except Exception: + pass + + if parsed_response_output_text is None: + try: + parsed_response_output_text = parsed_response.output[0].content[0].text + except Exception: + pass + + traced_data = TracedData( + start_time=existing_data.get("start_time", start_time), + response_id=parsed_response.id, + input=process_input(existing_data.get("input", kwargs.get("input"))), + instructions=existing_data.get("instructions", kwargs.get("instructions")), + tools=merged_tools if merged_tools else None, + output_blocks={block.id: block for block in parsed_response.output} + | existing_data.get("output_blocks", {}), + usage=existing_data.get("usage", parsed_response.usage), + output_text=( + parsed_response_output_text + if parsed_response_output_text is not None + else existing_data.get("output_text") + ), + request_model=existing_data.get("request_model", kwargs.get("model")), + response_model=existing_data.get("response_model", parsed_response.model), + # Reasoning attributes + request_reasoning_summary=( + kwargs.get("reasoning", {}).get( + "summary", existing_data.get("request_reasoning_summary") + ) + ), + request_reasoning_effort=( + kwargs.get("reasoning", {}).get( + "effort", existing_data.get("request_reasoning_effort") + ) + ), + response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"), + ) + responses[parsed_response.id] = traced_data + except Exception: + return response + + if parsed_response.status == "completed": + span = tracer.start_span( + SPAN_NAME, + kind=SpanKind.CLIENT, + start_time=int(traced_data.start_time), + ) + set_data_attributes(traced_data, span) + span.end() + + return response + + +@dont_throw +@_with_tracer_wrapper +async def async_responses_parse_wrapper( + tracer: Tracer, wrapped, instance, args, kwargs +): + """ + Async wrapper for Responses.parse method which handles structured outputs. + Similar to responses.create but specifically for parsing with schemas. + """ + if context_api.get_value(_SUPPRESS_INSTRUMENTATION_KEY): + return await wrapped(*args, **kwargs) + start_time = time.time_ns() + + try: + response = await wrapped(*args, **kwargs) + if isinstance(response, (Stream, AsyncStream)): + return response + except Exception as e: + response_id = kwargs.get("response_id") + existing_data = {} + if response_id and response_id in responses: + existing_data = responses[response_id].model_dump() + try: + traced_data = TracedData( + start_time=existing_data.get("start_time", start_time), + response_id=response_id or "", + input=process_input( + kwargs.get("input", existing_data.get("input", [])) + ), + instructions=kwargs.get( + "instructions", existing_data.get("instructions", "") + ), + tools=get_tools_from_kwargs(kwargs) or existing_data.get("tools", []), + output_blocks=existing_data.get("output_blocks", {}), + usage=existing_data.get("usage"), + output_text=kwargs.get("output_text", existing_data.get("output_text")), + request_model=kwargs.get("model", existing_data.get("request_model")), + response_model=existing_data.get("response_model"), + # Reasoning attributes + request_reasoning_summary=( + kwargs.get("reasoning", {}).get( + "summary", existing_data.get("request_reasoning_summary") + ) + ), + request_reasoning_effort=( + kwargs.get("reasoning", {}).get( + "effort", existing_data.get("request_reasoning_effort") + ) + ), + response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"), + ) + except Exception: + traced_data = None + + span = tracer.start_span( + SPAN_NAME, + kind=SpanKind.CLIENT, + start_time=( + start_time if traced_data is None else int(traced_data.start_time) + ), + ) + span.set_attribute(ERROR_TYPE, e.__class__.__name__) + span.record_exception(e) + span.set_status(StatusCode.ERROR, str(e)) + if traced_data: + set_data_attributes(traced_data, span) + span.end() + raise + + parsed_response = parse_response(response) + + existing_data = responses.get(parsed_response.id) + if existing_data is None: + existing_data = {} + else: + existing_data = existing_data.model_dump() + + request_tools = get_tools_from_kwargs(kwargs) + merged_tools = existing_data.get("tools", []) + request_tools + + try: + parsed_response_output_text = None + if hasattr(parsed_response, "output_text"): + parsed_response_output_text = parsed_response.output_text + elif hasattr(parsed_response, "output_parsed"): + # For structured outputs, serialize the parsed output + try: + parsed_output = parsed_response.output_parsed + if parsed_output is not None: + parsed_response_output_text = json.dumps( + model_as_dict(parsed_output) + ) + except Exception: + pass + + if parsed_response_output_text is None: + try: + parsed_response_output_text = parsed_response.output[0].content[0].text + except Exception: + pass + + traced_data = TracedData( + start_time=existing_data.get("start_time", start_time), + response_id=parsed_response.id, + input=process_input(existing_data.get("input", kwargs.get("input"))), + instructions=existing_data.get("instructions", kwargs.get("instructions")), + tools=merged_tools if merged_tools else None, + output_blocks={block.id: block for block in parsed_response.output} + | existing_data.get("output_blocks", {}), + usage=existing_data.get("usage", parsed_response.usage), + output_text=( + parsed_response_output_text + if parsed_response_output_text is not None + else existing_data.get("output_text") + ), + request_model=existing_data.get("request_model", kwargs.get("model")), + response_model=existing_data.get("response_model", parsed_response.model), + # Reasoning attributes + request_reasoning_summary=( + kwargs.get("reasoning", {}).get( + "summary", existing_data.get("request_reasoning_summary") + ) + ), + request_reasoning_effort=( + kwargs.get("reasoning", {}).get( + "effort", existing_data.get("request_reasoning_effort") + ) + ), + response_reasoning_effort=kwargs.get("reasoning", {}).get("effort"), + ) + responses[parsed_response.id] = traced_data + except Exception: + return response + + if parsed_response.status == "completed": + span = tracer.start_span( + SPAN_NAME, + kind=SpanKind.CLIENT, + start_time=int(traced_data.start_time), + ) + set_data_attributes(traced_data, span) + span.end() + + return response + + # TODO: build streaming responses diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_basic.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_basic.yaml new file mode 100644 index 0000000000..4265c6df6e --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_basic.yaml @@ -0,0 +1,118 @@ +interactions: + - request: + body: + '{"input": "Mike, 35 years old", "model": "gpt-4.1-nano", "text": {"format": + {"type": "json_schema", "strict": true, "name": "Person", "schema": {"description": + "Simple structured output for basic tests.", "properties": {"name": {"title": + "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "409" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xVy47bOgzd5ysErScD5+G8dvcDWhTosikMWqI96siSK9GDBoP8eyEpduRM5+4S + HvKYj0PqfcEYV5KfGHfo+6rYlmJbwr4syr2QclcUuwMW5QZhvd1vD6vj5girulwdRF0cG1wfgD8F + Clv/QkEjjTUek104BEJZQcBW+/JYbtbFqoiYJ6DBhxhhu14joUxBNYjX1tnBhLwa0B6TWWmtTMtP + 7H3BGGO8hwu6EC/xDbXt0fEFY9fojM7ZgJlB62hQZvxKJZFAaT9HPblBkLJmZu/gT2UH6geqyL7i + R5Cs1ZUAPafrrEQdMmt7Wm6fV0sDxi7XxbpcFtvlantrWiTmJ/Yj1pOqmubR+fbzcdTbWsZxADY7 + KA7lvsHd7gip7ZGFLj1GHvQe2gz4rO8RFNYQmntSeWIz2rEr+Iem6OgAxliCsZM/fs5Abdve2fof + SCQ6Mf5+5gY6PPPTmX9Rr3jmT2cObTBsyiufQq63XxMLd1bHzMB75QkMJefgGJ14Dw60Rj0fGrkh + 6at3+Kbs4KtRwlUcxDTU3tmup0qAeMHqFS855hC8NTN1YtNYR5lTaP3QdeDGyEmsHhqkS6UkGlKN + wplwPbo3JbAiNYq9gUGnpnNP1mFeBGHXowMaonn1XNyssbm3zBrrOrj/z4b6y1tTefGCHdwlIdEL + p/ow0Vk1jMVJhbhv6Lw1mcQSxSmTzgMN/66C9lhau8GhZElRrLGO1eCVYISe/HOmrjCCHh0p9DPu + eya5LdSlKGnia4CfHsBb0Z5cGFwGXueCbv+P+b/2U2JlCNt0lB5VO/sGd/h7UA7lbOvGoj7kMv3P + Fuiez8Mo8o1NNzpDQEoV5gH6W97YeHIXD2nGNsUTH7S2yDD+hq62XtElnRuphu5+iNOuvVgl0nIO + ZPkE3O8AJ9tX2XUoJmOf69gNRsAoIak81Hp8NQafD4orMzvau83TR3v2EkyrENdb3gOLWamPb0F6 + yh6Bf/FOF+IzarIE+g7uN1MLBz+/CB0SSKC4X9fF9S8AAAD//wMATUAQDsQHAAA= + headers: + CF-RAY: + - 989004140b48770b-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:31 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:31 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "1415" + openai-project: + - proj-REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "1419" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999918" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_5f4e2d1fbecc59474383efb535a8ba6e + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_with_message_history.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_with_message_history.yaml new file mode 100644 index 0000000000..4d64369eb4 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_with_message_history.yaml @@ -0,0 +1,120 @@ +interactions: + - request: + body: + '{"input": [{"role": "system", "content": "Extract person data from text."}, + {"role": "user", "content": "Parse this: Emily Jones, age 31"}], "model": "gpt-4.1-nano", + "text": {"format": {"type": "json_schema", "strict": true, "name": "Person", + "schema": {"description": "Simple structured output for basic tests.", "properties": + {"name": {"title": "Name", "type": "string"}, "age": {"title": "Age", "type": + "integer"}}, "required": ["name", "age"], "title": "Person", "type": "object", + "additionalProperties": false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "518" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//fFXBbts4EL37Kwie40CW7cT2rYde9rAo0GO9EEbkSGFDkSo5DGoE/vcF + KUuinKY3ex75NDPvzfB9xRhXkp8Yd+j7qoBtsd/KAo/bI4hmXxRPByz2W6zlbn84bI67GhpoyhKO + ZbkrD/WRP0QKW/9EQSONNR6HuHAIhLKCiG2e98f9tiw2m4R5Ago+3hG26zUSyuFSDeK1dTaYmFcD + 2uMQVlor0/ITe18xxhjv4YIu3pf4htr26PiKsWs6jM7ZiJmgdQooM36lkkigtF+inlwQpKxZxDv4 + XdlAfaCK7Ct+BMlaXQnQS7rOStQxs7an9e5xszZg7Losyv262K03u1vTEjE/sR+pnqGqSY/Ot5/L + IbZP9SBHcyxF0xye6vr5UBfbxJxY6NJj4kHvocUZ+KzvCRTWEJo5qTyxBe3YFfxN0+10AIyxBGMn + f/y3ALVte2frPyCJ6MT4+5kb6PDMT2f+tVP6wv6xBv2ZP5w5tDG+3Vz5dPN6+zWRcWd1ShC8V57A + 0HA4HkyHeA8OtEa91I5cGGzWO3xTNvhqdHKV9Ji07Z3teqoEiBesXvGSYw7BW7MwKTaNdZQdigqE + rgM33pw866FBulRKoiHVKFz416N7UwIrUqPnGwh66D33ZB3mRRB2PTqgkMKbx+IWTT2+ZdZY18H8 + P9P2p7em8uIFO5idIdELp/oo7KIaxpJg8d43dN6azGkDxSlz0B0N/66iBdkwfcGhZIOxWGMdq8Er + wQg9+cfMZFGCHh0p9AvuOZM8FutSNHji3wg/3IG3oj25KFwGXpe+bv/G/KX9lFgZwnbYTfeuXXyD + O/wVlEO5GL6xqA+5TP+zOZrzuZMiH9xhVWcISKmiHqC/5Y1Nm3d1l2ZqU9r00WurDONv6GrrFV2G + rSNV6OZ9PMzai1ViGM5Alk/AvA442b7KlkQxBfvcxy4YAaOFpPJQ6/HxCD4Xiiuz2N3PTw8f49mD + MI1CGm85XywWpd4/CcOLdg/8iXfaEJ9RkyXQM3h4nloY/HIjdEgggdJ8XVfX/wEAAP//AwA4FiW2 + ywcAAA== + headers: + CF-RAY: + - 989004214c06e90f-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:32 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:32 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "697" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "708" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999905" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_f349a9e2422b9414c481be430fe74b67 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_with_tools.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_with_tools.yaml new file mode 100644 index 0000000000..94e34e6810 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_async_responses_parse_with_tools.yaml @@ -0,0 +1,124 @@ +interactions: + - request: + body: + '{"input": "Find person: Tom", "model": "gpt-4.1-nano", "text": {"format": + {"type": "json_schema", "strict": true, "name": "Person", "schema": {"description": + "Simple structured output for basic tests.", "properties": {"name": {"title": + "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}, "tools": [{"type": "function", "name": "search_database", "description": + "Search for records in the database", "parameters": {"type": "object", "properties": + {"query": {"type": "string", "description": "The search query"}}, "required": + ["query"]}}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "657" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//xFbbbuM2EH33VxB8jgPJlmM5b2m76BYougskiwZoAmFEjmwmkqglh2mM + wP++IGXrYsfbCwr0TZpDHs7lzJBvE8a4kvyacYO2yaIcIpzlqViJYpViGkVXKUaLebFIl1Kk8SpZ + YQ5yFYurJUBcJJJfeAqdP6GgA42uLbZ2YRAIZQYei5eL1WI+i2ZxwCwBOev3CF01JRLuyXIQz2uj + Xe39KqC02JpVWap6za/Z24QxxngDWzR+v8QXLHWDhk8Y24XFaIz2WO3KMhhUfTglk0igSjtGLRkn + SOl6ZK/gNdOOGkcZ6Wc8BUnrMhNQjukqLbH0nq0bmiaX8bSGWk9n0WwxjZJpnOyTFoj5NfsjxNNG + 1dWjEOercZUs55GvBiTpbL7A5eJqnsYgF4E4kNC2wUDj6hBWcLKHzyU/gGDWrsKaAv72wL86NNsH + fv3A73T1wHf9Sk+atf6Gz49pVcTSpasf7l8+fvnwe3rz/Of9L/ef+h01VMEvi2DEJpNAkINFHvDd + xT/OxHKvS5jP47gQSwEwXy5z8X9nAl4XP8H25+hHQ1/g5tNd9evt0xMuPvzdTEwYewwqacBAWWI5 + lhoZ13ZFY/BFaWezQ+O1TnRSbIyuGsoEiA1mz7gdYgbB6nrUU1gU2tBgkc+Qqyowh51di1kokLaZ + kliTKhSO2s2ieVECM1KHFi3AlcT3na8NDoMgrBo0QC6Y48tob32l3rNCmwr6/0Fhn6yuMys2WEGf + XYlWGNX4go+iGST+Mxqr64ESWor+hBMafqu8RFg7LJxBydomZoU2LAerBCO0ZC871rYEDRpSaEfc + vSdDm49LURkc/M3DF0fgPmhLxhduAO6GKzmsv8d8sz5LrGrCdTtKO+bJO2dwg1+dMii7ATYM6sSX + 7v9xQNH5c1SKgTv7m2WAgJTK1wPKz8PEhoticuRmSFO4mLzWhjOGv6DJtVXkdc0rlMpV/fXR9tpG + KxGcAEead4A9HdnHM+acDvltaPYgF4NCG2mZqhltkHX9/5cDolvgR0OFhGYsrO/k7rwUw2R7RzFj + uR1p5ii4uw2y1lnW0v1rDR1tf/wPy98NVtJNVup1Y3TuCaLO2AxnkHG1gEOAUlnIy8M7xdlhk3FV + j54JSXpxah+8Pd76m0NsUPYbo5FMj18fSfIe8B5vN93PUZMmKHtwNevk7+x4mldI4OXn6XeT3TcA + AAD//wMAuIYMOTYKAAA= + headers: + CF-RAY: + - 98900457392e940c-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:42 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:42 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "1472" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "2480" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999686" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 4ms + x-request-id: + - req_623c03adba6fc3af3bc7760b88a7ff47 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_basic.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_basic.yaml new file mode 100644 index 0000000000..c29cfb8746 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_basic.yaml @@ -0,0 +1,118 @@ +interactions: + - request: + body: + '{"input": "Jane, 54 years old", "model": "gpt-4.1-nano", "text": {"format": + {"type": "json_schema", "strict": true, "name": "Person", "schema": {"description": + "Simple structured output for basic tests.", "properties": {"name": {"title": + "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "409" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xVy46jOhDd5yssrzstCCGv3Wzv4mqku5yMUAUXtKeNzbXLrYla+feRTQCT7p4d + 1Kk61ONU8b5ijEvBT4xbdH2ViW2e54dts88ODeAhy3YHzMoC80YU20N+LC/Ffre5lJDvD8Vxeyn4 + U6Awl19Y00hjtMPBXlsEQlFBwPJ9eSyLTZZtIuYIyLsQU5uuV0gohqAL1K+tNV6HvBpQDgezVErq + lp/Y+4oxxngPV7QhXuAbKtOj5SvGbtEZrTUB016paJB6/EolkEAqt0QdWV+TNHph7+B3ZTz1nioy + r/gRJGNUVYNa0nVGoAqZtT2tt8/5WoM26022KdfZdp1v702LxPzEfsR6hqqmeXSu/XocmxLKOozj + mGXNcYflTkBZZNhE5shC1x4jDzoHLc7AV32PYG00oZ6TShNb0I5dwd80RUcH0NoQjJ388XMBKtP2 + 1lw+QSLRifH3M9fQ4Zmfzvwf0HjmT2cObTCU2xufQm73p4mFW6NiZuCcdASaBufgGJ14DxaUQrUc + Glk/6Ku3+CaNd9Uo4SoOYhpqb03XU1VD/YLVK15TzCI4oxfqxKYxlhKn0HrfdWDHyEmsDhqkayUF + apKNxIVwHdo3WWNFchR7A14NTeeOjMW0CMKuRwvkozl/zu7W2Nx7Zo2xHczvyVB/OaMrV79gB7Mk + BLrayj5MdFENY3FSIe47Wmd0IrGB4pRI54GG/yeD9tiwdt6iYIOiWGMsu4CTNSN05J4TdYUR9GhJ + oltwz5mktlCXpEET/wb46QG8F+3IhsEl4G0p6PZvzN/aL4mlJmyHo/So2sU3uMX/vbQoFls3FvUh + l+k9WaA5n4dRpBs73OgEASFkmAeo72lj48ldPaQZ2xRPfNDaKsH4G9qLcZKuw7kR0nfzIR527cXI + elhOT4ZPwHwHOJm+Sq5DNhn7VMfW6xpGCQnp4KLGv4Z36aC41IujvSuePtqTP8G0CnG9xRyYLUp9 + /Bfk2WfAZ7zThfiKmgyBmsF9MbXQu+VF6JBAAMX9uq1ufwAAAP//AwDCUO03xAcAAA== + headers: + CF-RAY: + - 989003e3ef167775-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:22 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:22 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "597" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "600" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999918" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_9e775516b46738362252ab68b2337ee7 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_moderation.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_moderation.yaml new file mode 100644 index 0000000000..73a6ea33dc --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_moderation.yaml @@ -0,0 +1,124 @@ +interactions: + - request: + body: + '{"input": [{"role": "system", "content": "Determine if the user input violates + specific guidelines and explain if they do."}, {"role": "user", "content": "How + do I prepare for a job interview?"}], "model": "gpt-4.1-nano", "text": {"format": + {"type": "json_schema", "strict": true, "name": "ContentCompliance", "schema": + {"description": "Structured output for moderation tests.", "properties": {"is_violating": + {"title": "Is Violating", "type": "boolean"}, "category": {"anyOf": [{"type": + "string"}, {"type": "null"}], "title": "Category"}, "explanation_if_violating": + {"anyOf": [{"type": "string"}, {"type": "null"}], "title": "Explanation If Violating"}}, + "required": ["is_violating", "category", "explanation_if_violating"], "title": + "ContentCompliance", "type": "object", "additionalProperties": false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "808" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA9RWTY/iOBC98yssn6GVAOluuLb2MKdZaaW9DKOoklRoTzu2xy6jRi3++8oOCQ4f + 0lz3Bn7l5/p4VZWvGWNcNHzLuEVnygzWTZEjPBebYpO9rLLs+RWzYoXrFmH9mm+yqqpbzKt8/fKa + Py+Lhs8Dha5+YU0DjVYO+/PaIhA2JQQsfyk2xWqZZUXEHAF5F+7UujMSCc9kFdQfe6u9Cn61IB32 + x0JKofZ8y75mjDHGDRzRhvsNHlBqg5bPGDtFY7RWB0x5KeOBUMMrZYMEQrop6sj6moRWk/MOPkvt + yXgqSX/gLUhay7IGOaXrdIMyeLY3tFg/5QsFSi+W2bJYZOtFvj4nLRLzLfsR4+mjGuvRuf3jchRY + rc7lwGdcAeTNqqigKiJzZKGjwciDzsEeL8CjvEew1opQXZxKHZvQDlnBTxpvRwNQShMMmfzxcwJK + vTdWV3eQSLRl/GvHhSsPQksgofY7vu0VsOM1EO61Pe74NiZ6x/HTSFDxrVK0k0vB4sTHB07nX+Ob + 3GoZ4wDnhCNQ1BsHw2jEDViQEuW0xGR9r0Zj8SC0d+Ug+DKWbZSAsbozVNZQv2P5gccUswhOq4mW + sW21pcQoFMp3Hdjh5ihtBy3SsRQNKhKtwInMHdqDqLEkMbRGC172JeKOtMU0CMLOoAXy8Th/ys6n + sRRnz1ptO7j8TyTwy2lVuvodO7gIqEFXW2FCTSbRMMYVdPHeW6+xt6A+AapOtdmzbRPNXTHyf2Kj + eosN6zXIWm1Z6DgblcAIHbmnRJShFgYtCXQT5tBqidSusBCooF4k3xz7dzSbXxmds1FpLREUT9DT + ROGDfm/fAXX83k5a7rbxrl5zZIMvVwan+Z8zhNLc3J/8/zl/kI63IZKHsT7qzP9/7H9dImPf2kQW + s3tUiVPc4m8vLDZXwU41eF8xf5bb2R3nk6I97Lp0qvd7PEGgaUR4CuTfaRfFoTy7CjJWJn4GhAkz + SzB+QFtpJ+jYr6RG+O6yrPsJ+65F3Y9kT5qPwGVXcNKmTDZINh6adHpZr2oYpkUjHFRy+LLwcROO + o02oyWLPly/zWyD5XBgnYJzqzeVmNon1+oNhubwH3OMdF8MjatIEMvF4vRmT6N10E3RI0ADFYXqa + nf4DAAD//wMABOBtjOoJAAA= + headers: + CF-RAY: + - 989003f42a503698-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:26 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:26 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "1262" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "1272" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999854" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 2ms + x-request-id: + - req_ce1943127f55123a58f6142eb40a2f60 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_output_fallback.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_output_fallback.yaml new file mode 100644 index 0000000000..51cb27ad67 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_output_fallback.yaml @@ -0,0 +1,118 @@ +interactions: + - request: + body: + '{"input": "Alex, 29", "model": "gpt-4.1-nano", "text": {"format": {"type": + "json_schema", "strict": true, "name": "Person", "schema": {"description": "Simple + structured output for basic tests.", "properties": {"name": {"title": "Name", + "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "399" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xVXW+jOhB9z6+w/NxUQBLy8bZ/4GqlfdxcoQkeqLfGZu1x1ajKf7+yCWDS9r7B + HPswM+fM8LFijEvBT4xbdH2VHZp9Lpqd2BfN9rgtsqw8YLbboBBlkR3y4+YAR7iUx6LYbPJjlu/5 + U6Awlz9Y00hjtMMhXlsEQlFBwPL97rjbFFm+iZgjIO/Cndp0vUJCMVy6QP3aWuN1yKsB5XAIS6Wk + bvmJfawYY4z3cEUb7gt8Q2V6tHzF2C0eRmtNwLRXKgakHr9SCSSQyi1RR9bXJI1exDt4r4yn3lNF + 5hU/g2SMqmpQS7rOCFQhs7an9fY5X2vQZl1kxW6dbdf59t60SMxP7HesZ6hq0qNz7bdyNHm5KaMc + xzK7ZJiVZXbImrwY5IgsdO0x8qBz0OIMfNf3CNZGE+o5qTSxBe3YFXyn6XY8AFobgrGTv/9dgMq0 + vTWXL5BIdGL848w1dHjmpzP/ofD9zJ/OHNoQKI43Pl253Z8mFm6NipmBc9IRaBoOh4PxEO/BglKo + lqKR9YO/eotv0nhXjRauohCTqL01XU9VDfULVq94TTGL4IxeuBObxlhKDoXW+64DO96czOqgQbpW + UqAm2UhcGNehfZM1ViRHszfg1dB07shYTIsg7Hq0QD6G8+fsHo3NvWfWGNvB/J6I+scZXbn6BTuY + LSHQ1Vb2QdFFNYxFpcK9n2id0YnFBopTYp0HGv5LBu+xYey8RcEGR7HGWHYBJ2tG6Mg9J+4KEvRo + SaJbcM+ZpLFQl6TBE/8E+OkBvBftyAbhEvC2NHT7f8w/2m+JpSZsh6X06NrFN7jFv15aFIupG4v6 + lMv0ngzQnM+DFOnEDjs6QUAIGfQA9TNtbFy5q4c0Y5viig9eWyUYf0N7MU7SdVg3QvpuXsTDrL0Y + WQ/D6cnwCZj3ACfTV8l2yKZgn/rYel3DaCEhHVzU+NfwLhWKS71Y2mX+9Dme/AmmUYjjLeaL2aLU + x39Bnn0FfMU7bYjvqMkQqBnc51MLvVtuhA4JBFCcr9vq9h8AAAD//wMATUYSOsQHAAA= + headers: + CF-RAY: + - 9890042a1d5f6555-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:37 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:37 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "3892" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "4016" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999920" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_c8c7a0bfdda97cccd8b3c69840eafdea + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_response_id.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_response_id.yaml new file mode 100644 index 0000000000..81949f3f08 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_response_id.yaml @@ -0,0 +1,118 @@ +interactions: + - request: + body: + '{"input": "David, 52", "model": "gpt-4.1-nano", "text": {"format": {"type": + "json_schema", "strict": true, "name": "Person", "schema": {"description": "Simple + structured output for basic tests.", "properties": {"name": {"title": "Name", + "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "400" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xVy46rOBDd5yssrzstSEJeu5FmPbrSLG9GqDAF7dvGZuxydKNW/n1kE8Cku2cH + daoO9ThVfKwY47LmZ8Ytur7MdtXx0OD+BAjHY4VZtj9iVmybrThl4pifChBFA9us2Gb7A+DhyF8C + hal+oaCRxmiHg11YBMK6hIDlh+JUbDdZfoqYIyDvQowwXa+QsB6CKhDvrTVeh7waUA4Hs1RK6paf + 2ceKMcZ4Dze0Ib7GKyrTo+Urxu7RGa01AdNeqWiQevxKWSOBVG6JOrJekDR6Ye/gd2k89Z5KMu/4 + GSRjVClALek6U6MKmbU9rXev+VqDNutNtinW2W6d7x5Ni8T8zH7Geoaqpnl0rv1+HLtsX2RhHKfj + EapNcdrkTbUHyCNzZKFbj5EHnYMWZ+C7vkdQGE2o56TSxBa0Y1fwN03R0QG0NgRjJ3/+swCVaXtr + qi+QSHRm/OPCNXR44ecL/xOusr7wlwuHNliKzZ1PMffH00TDrVExNXBOOgJNg3NwjE68BwtKoVpO + jawfBNZbvErjXTlquIyTmKbaW9P1VAoQb1i+4y3FLIIzeiFPbBpjKXEKvfddB3aMnNTqoEG6lbJG + TbKRuFCuQ3uVAkuSo9ob8GroOndkLKZFEHY9WiAfzflr9rDG7j4ya4ztYH5PpvrLGV068YYdzJqo + 0Qkr+zDSRTWMxVGFuB9ondGJxgaKc6KdJxr+twziY8PeeYs1GyTFGmNZBU4KRujIvSbyCiPo0ZJE + t+CeM0ltoS5Jgyb+CvDLE/go2pENg0vA+1LR7f8x/9F+Syw1YTtcpWfVLr7BLf7rpcV6sXZjUZ9y + md6TDZrzeRpFurLDkU4QqGsZ5gHqR9rYeHNXT2nGNsUbH7S2SjB+RVsZJ+k23Jta+m6+xMOuvRkp + huX0ZPgEzIeAk+nL5Dxkk7FPdWy9FjBKqJYOKjX+NrxLB8WlXlztff7y2Z78CqZViOtdz4HZotTn + n0GefQV8xTtdiO+oyRCoGTzkUwu9W16EDglqoLhf99X9PwAAAP//AwDuAqkhxQcAAA== + headers: + CF-RAY: + - 9890045318ff9428-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:40 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:40 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "440" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "443" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999920" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_33d9e1e3e8d7d7a44c8375539ef79076 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_token_usage.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_token_usage.yaml new file mode 100644 index 0000000000..cc10ede0a3 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_token_usage.yaml @@ -0,0 +1,118 @@ +interactions: + - request: + body: + '{"input": "Lisa, 38 years old", "model": "gpt-4.1-nano", "text": {"format": + {"type": "json_schema", "strict": true, "name": "Person", "schema": {"description": + "Simple structured output for basic tests.", "properties": {"name": {"title": + "Name", "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "409" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xVy27rOAzd5ysErZvCiZvnbvaDwQVmeTMwaIl2dStLHokqblDk3weSY1tO29nZ + POQxH4f0x4oxriQ/M+7Q91WxbZotnmrE/VaKuiyK/RGLXdls6215PG5Oh6PcgCwbuUOxOeIe+VOk + sPUvFDTSWOPvduEQCGUFEdscdqdduS02x4R5Ago+xgjb9RoJ5RBUg3hrnQ0m5tWA9jiYldbKtPzM + PlaMMcZ7uKKL8RLfUdseHV8xdkvO6JyNmAlaJ4My41cqiQRK+yXqyQVBypqFvYPflQ3UB6rIvuFn + kKzVlQC9pOusRB0za3tavzxv1gaMXW+L7W5dvKw3L/emJWJ+Zj9TPUNV0zw6334/jrI81UUcx2lf + Hw77AovisD/UUiTmxELXHhMPeg8tzsB3fU+gsIbQzEnliS1ox67gb5qikwMYYwnGTv78ZwFq2/bO + 1l8giejM+MeFG+jwws8X/qfycOFPFw5tNJTHG59CbveniYU7q1Nm4L3yBIYG5+iYnHgPDrRGvRwa + uTDoq3f4rmzw1SjhKg1iGmrvbNdTJUC8YvWG1xxzCN6ahTqxaayjzCm2PnQduDFyEquHBulaKYmG + VKNwIVyP7l0JrEiNYm8g6KHp3JN1mBdB2PXogEIyb56LuzU1955ZY10H83s21F/emsqLV+xgloRE + L5zq40QX1TCWJhXjfqDz1mQSGyjOmXQeaPjfKmqPDWsXHEo2KIo11rEavBKM0JN/ztQVR9CjI4V+ + wT1nkttiXYoGTfwV4acH8F60JxcHl4G3paDb/2P+o/2WWBnCdjhKj6pdfIM7/Dcoh3KxdWNRn3KZ + 3rMFmvN5GEW+scONzhCQUsV5gP6RNzad3NVDmqlN6cRHra0yjL+jq61XdB3OjVShmw/xsGuvVolh + OQNZPgHzHeBk+yq7DsVk7HMdu2AEjBKSykOtx79G8PmguDKLo70vnz7bsz/BtAppveUcWCxKffwX + bIqvgK94pwvxHTVZAj2Dh3JqYfDLi9AhgQRK+3Vb3f4DAAD//wMAyBXM9cQHAAA= + headers: + CF-RAY: + - 9890044c481994a1-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:39 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:39 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "781" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "785" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999918" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_53552a6e423de93e4d5b14e0ca7760e0 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_instructions.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_instructions.yaml new file mode 100644 index 0000000000..4a9a2a92b3 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_instructions.yaml @@ -0,0 +1,119 @@ +interactions: + - request: + body: + '{"input": "Robert, 45", "instructions": "You are an expert at extracting + structured data.", "model": "gpt-4.1-nano", "text": {"format": {"type": "json_schema", + "strict": true, "name": "Person", "schema": {"description": "Simple structured + output for basic tests.", "properties": {"name": {"title": "Name", "type": "string"}, + "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": + "Person", "type": "object", "additionalProperties": false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "469" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xVTW/jOAy951cIOjeFndhNmtv+gcVg9rSYLAxaol1NZckj0UWDIv99ITm25Uw7 + N5tPfOLHI/WxYYwryU+MO/R9lR33u91hL+qmyYpjUWTZ0xGzct/kTVmIY/6c1bU8NE9PCLuiFFBm + /CFQ2PonCpporPE42oVDIJQVBCw/lM/lfpflx4h5Ahp88BG26zUSytGpBvHaOjuYEFcD2uNoVlor + 0/IT+9gwxhjv4YIu+Et8Q217dHzD2DUeRudswMygdTQoM91SSSRQ2q9RT24QpKyJEf1rBwYOGRiG + 7z06YkAM38mBIGVaNp4eHEomgeBxDLyD98oO1A9UkX1Fs7oigGStrgTo9eWdlajDrW1P2+Ix3xow + drvLduU2K7Z5cStxJOYn9iNmP9Zg7l7n26+btytyUYTmgSyPIjs+78U+OxzyXWSOLHTpMfKg99Di + AnzVpQgKawjNElQa2Ip2qgq+0+wdD4AxlmCq+4//VqC2be9s/QkSiU6Mf5y5gQ7P/HTm322Njs78 + 4cyhDaaivPLZ6Xr7mnm4szrGBt4rT2BoPBwOxkO8Bwdao163jdww6rF3+Kbs4KtJ8lVsxdzW3tmu + p0qAeMHqFS8p5hC8NSs1Y9NYR8mhUPyh68BNnrO4PTRIl0pJNKQahSuhe3RvSmBFahqOBgY9lp17 + sg7TJAi7Hh0EKYcJfcxu1ljeW2SNdR0s/0lbf3prKi9esINFFBK9cKoPPV1lw1jsVfD7hs5bk4hs + pDgl4rmj4f+ooL508EZNscY6VoNXghF68o+JvkILwvAq9CvuJZLUFvJSNGri7wA/3IG3pD250LgE + vK4l3f6J+a/2S2JlCNtxid2rdnUHd/hrUA7lau6mpH6LZf5PRmiJ564V6cyOOz1BQEoV+gH6W1rY + uKI3d2HGMsUnIWhtk2D8DV1tvaLLuHCkGrplcY+z9mKVGIdzIMtnYNkEnGxfJfshm419qmM3GAGT + hKTyUOvplRl82iiuzGptH4qH3+3JyzGPQhxvuThmq1TvX4M8+wz4jHfeEF9RkyXQC3gs5hIOfr0R + OiQIr1Sgv26u/wMAAP//AwB7ygmw9AcAAA== + headers: + CF-RAY: + - 989004479f459589-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:38 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:38 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "498" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "503" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999907" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_7dfb67375bcb940c15048ce12b2c96d4 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_message_history.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_message_history.yaml new file mode 100644 index 0000000000..c4f747df9d --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_message_history.yaml @@ -0,0 +1,120 @@ +interactions: + - request: + body: + '{"input": [{"role": "system", "content": "You are a helpful assistant that + extracts person information."}, {"role": "user", "content": "Extract info: John + Smith, 42 years old"}], "model": "gpt-4.1-nano", "text": {"format": {"type": + "json_schema", "strict": true, "name": "Person", "schema": {"description": "Simple + structured output for basic tests.", "properties": {"name": {"title": "Name", + "type": "string"}, "age": {"title": "Age", "type": "integer"}}, "required": + ["name", "age"], "title": "Person", "type": "object", "additionalProperties": + false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "556" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAAwAAAP//fFXBbtswDL3nKwSdm8JO4tbJbdcdhgE7LoPBWLSjVZY8iSoWFPn3QXJs + y2m7m80nPZN8j/TbijEuBT8wbtH1VbY7AdRFWWyzoszgKcueSsyKLW6LRpRlvn+CvNmJooSyFKen + 5z3yh0BhTr+xppHGaHeL1xaBUFQQsPy52BfbTZZtI+YIyLtwpzZdr5BQDJdOUL+01ngd8mpAORzC + UimpW35gbyvGGOM9XNCG+wJfUZkeLV8xdo2H0VoTMO2VigGpx69UAgmkckvUkfU1SaMX8Q7+VsZT + 76ki84LvQTJGVTWoJV1nBKqQWdvTeveYrzVos95km2Kd7db57ta0SMwP7GesZ6hq0qNz7edy7DbP + RZRjvyvKDEsBRQb4LE6RObLQpcfIg85BizPwWd8jWBtNqOek0sQWtGNX8C9Nt+MB0NoQjJ38+WsB + KtP21pw+QCLRgfG3I9fQ4ZEfjvyrOWv2o5N0PvKHI4c2hHebK58uXm9PExe3RsX8wDnpCDQNh8PB + eIj3YEEpVEvpyPrBZb3FV2m8q0YjV1GOSdremq6nqob6jNULXlLMIjijFx7FpjGWkkNBAN91YMeb + k2UdNEiXSgrUJBuJC/s6tK+yxorkaPkGvBpazx0Zi2kRhF2PFsjHcP6Y3aKxxbfMGmM7mN8TaX87 + oytXn7GD2RgCXW1lH3RdVMNY1Cvc+47WGZ0YbaA4JAa6o+E/ZHAgG4bPWxRs8BVrjGUncLJmhI7c + Y+KxIEGPliS6BfecSRoLdUkaPPEtwA934K1oRzYIl4DXpa3b/zF/aT8llpqwHVbTvWsX3+AW/3hp + USxmbyzqXS7TezJGcz53UqRzO2zqBAEhZNAD1Pe0sXHxru7SjG2Kiz54bZVg/BXtyThJl2HpCOm7 + eR0Ps3Y2sh6G05PhEzBvA06mr5IdkU3BPvWx9bqG0UJCOjip8d/hXSoUl3qxusv84X08+R9MoxDH + W8wXs0Wp93+EPP8I+Ih32hCfUZMhUDO430wt9G65ETokEEBxvq6r6z8AAAD//wMAXbp6nMoHAAA= + headers: + CF-RAY: + - 989003e99d1063d3-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:24 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:24 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "1010" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "1013" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999900" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 1ms + x-request-id: + - req_2c79ede5e483b30fcc17103e4776ce0c + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_reasoning.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_reasoning.yaml new file mode 100644 index 0000000000..6d1a81c18a --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_reasoning.yaml @@ -0,0 +1,119 @@ +interactions: + - request: + body: + '{"input": "Extract person info: Sarah, 28", "model": "gpt-5-nano", "reasoning": + {"effort": "low", "summary": null}, "text": {"format": {"type": "json_schema", + "strict": true, "name": "Person", "schema": {"description": "Simple structured + output for basic tests.", "properties": {"name": {"title": "Name", "type": "string"}, + "age": {"title": "Age", "type": "integer"}}, "required": ["name", "age"], "title": + "Person", "type": "object", "additionalProperties": false}}}}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "468" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA3xV0Y6jOgx971egPLcrSqEwfdsfuFppH7crZBLDZCYk3MTM3WrUf79KaCF0ZvYN + fGLn2Md23jdJwqRgp4RZdEOdpg0XPC8xLdPyWOVpeqwwLQ5YiuaQVvunw9O+yo8C8gx4AaIo2NaH + MM0LcrqHMdrhZOcWgVDU4LF9WTwVhyxNy4A5Ahqd9+GmHxQSismpAf7aWTNqz6sF5XAyS6Wk7tgp + ed8kSZKwAS5ovb/AN1RmQMs2SXINh9Fa4zE9KhUMUt9vqQUSSOXWqCM7cpJGr+w9/KnNSMNINZlX + /AiSMarmoNbheiNQeWbdQLtip0GbXZZmxS6tdml5K1kIy07Jr5DNlNOihvtai6rat9xrUUGKeZ41 + ZYWAKKbyhSB0GXBSA5zRvmoz5Ma+B3vxF/8Otuv2MwK9675m8JRhWXkGTdtm+yYtxbHIRMXzjwx6 + dA46jO7/QvYAcqMJ9VKVmNgq7F0U/EOzdzgAWhuCu5C/fq9AZbrBmuYTJAQ6Jez9zDT0eGanM/sJ + Fp7PbHtm0HlLVl3Z7HO9fc1hmDUqUAPnpCPQNB32B8MhNoAFpVCtm4bsOPX3YPFNmtHV9xGqgxJz + Uw3W9APVHPgz1q94ibFF53k6sG2NDTkp89+tRpH43nWeFgct0qWWAjXJVuJqchzaN8mxJnmfthZG + NZWdOTIW4ywI+wEt0BjM+2/pzRrKe6PWGtvD8h/J+uKMrh1/xh6WphDouJWD1zTiFSCvlff7gdYZ + HTXZFOIUNc9DGPZT+u5LprkfLYpk6qmkNTZpwEmeEDpy36L+8hoMaEmiW8VemMQ2n5ekqSn+8fD2 + Abwl7ch65SLwum7p7m+Rv3dfBpaasJu24mPbru5gFv8dpUWxmrt7Uh+4zP/RCC18HqSIZ3Z6JCIE + hJBeD1A/4sKGnb95oBnKFN4Y32vx5mJvaBvjJF2mhSPk2C8vwTRsz0byaTpHMmwGlk3AyAx1tB/S + 2TjEfWxHzeHeQkI6aNT92RpdLBSTevVqHA/bj/boKZpHIcy3WBzTVaqPj1GVfQZ8FndeERGnfBWb + DIFa0H1ezEUc3Xon9EgggMKEXTfX/wEAAP//AwDuYeLERwgAAA== + headers: + CF-RAY: + - 989004088eaebeba-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:29 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:29 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "1619" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "1622" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999720" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 4ms + x-request-id: + - req_a6b039cec417363232b12e6156e8de61 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_tools.yaml b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_tools.yaml new file mode 100644 index 0000000000..ff38c76410 --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/cassettes/test_responses_parse/test_responses_parse_with_tools.yaml @@ -0,0 +1,125 @@ +interactions: + - request: + body: + '{"input": "What''s the weather in Paris?", "model": "gpt-4.1-nano", "text": + {"format": {"type": "json_schema", "strict": true, "name": "WeatherInfo", "schema": + {"description": "Structured output for tool call tests.", "properties": {"location": + {"title": "Location", "type": "string"}, "temperature": {"title": "Temperature", + "type": "integer"}, "conditions": {"title": "Conditions", "type": "string"}}, + "required": ["location", "temperature", "conditions"], "title": "WeatherInfo", + "type": "object", "additionalProperties": false}}}, "tools": [{"type": "function", + "name": "get_weather", "description": "Get the current weather for a location", + "parameters": {"type": "object", "properties": {"location": {"type": "string", + "description": "The city and state, e.g. San Francisco, CA"}}, "required": ["location"]}}]}' + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + connection: + - keep-alive + content-length: + - "815" + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.99.7 + x-stainless-arch: + - arm64 + x-stainless-async: + - "false" + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.99.7 + x-stainless-read-timeout: + - "600" + x-stainless-retry-count: + - "0" + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.9.5 + method: POST + uri: https://api.openai.com/v1/responses + response: + body: + string: !!binary | + H4sIAAAAAAAAA8xWS2/jNhC++1cQOseGZEuOk1uxyBYFim2ABCjQzUIYkyObG4rUksMkRpD/XpCy + 9XDsYnfbQ2/2zPCbmW9eep0wlkiRXLPEomvKtLparLNCrAvIBF7O03S5wrRY4BIu83yVXS1gPefL + VVoscV0si4onFwHCrL8ipwOM0Q5bObcIhKKEoMsui6tiETCjzhGQd+ENN3WjkFC0j9bAHzfWeB3i + qkA5bMVSKak3yTV7nTDGWNLADm14L/AJlWnQJhPG3qIxWmuCTnulokDqg5dSIIFUbqx1ZD0nafRI + XsNLaTw1nkoyj/heScaokoMaw9VGoAqRbRqa5rNsqkGb6TydF9M0n2b5nrQInFyzzzGfNquuHhU/ + X43LRQ5pqMZquUxXWSbmBa6LebWIwBGEdg1GGK9jWjHIXn2O/KgEu/E1aor614dEGQ4B4yG5fkhu + wUr3kLz15gG5bIOOP2/uIN3JesPnt/L+013K7z/OdfF017/QUMfgNkjlMwJt29Lti/djVOSrRUsF + zvmqENnV5Wq1KOb5/4IK/OumKZrn52d5ay2+5NW31Sd384f+HiomjH2JfdKABaVQjZuNrG/norH4 + JI135WH02gC6ZmysqRsqOfAtlo+4G+osgjN6NFVYVcbSwChQ5Osa7OFlN2QOKqRdKQVqkpXE0cA5 + tE+SY0nyMKQVeEXJfvaNxWEShHWDFshHcTZL99IX6iOrjK2h/z+o7FdndOn4FmvomRXouJVNqNYo + mwHpf7aE/6YrM+iHFqd38w4ruYvLwlsUrB1iVhnLQnlYKA8jdORmHWRbhAYtSXQjYMa6jjqSh+wk + qRjm7weTiyODffqObCjhQPk2tDwi95yX+4HVGUdSE24ODXrKEzdayMMWPefoQ2/0/QlNTjhMLH7z + 0qLodugxp2dpOBd1J/4ycNNFfqpfBmHvr+BAA6IFBnU7bIB41CZH+cSs4xENUzFch8kT2rVxksIE + JjUK6ev+1LVbYWskj0GApza4qHDvz8vxOjw3McmvSIy2yLi3FjWx/XqKvQ7sHccnV1mnDEusRkI7 + 7ox/4O6nRmbcQEftdZTffchN0o6BFixcAbxgONvM2B1o9tGC5tJxc8E+/PIfdOLJxvq37dGdCDJN + qcymsWYdANJO2Ay3qfW6Iy4R0sFaHb65vINNvxwSqUefPMvs4r188B312t8/vkXRP0xHbXz8JZXn + pxSncLs7dQ6aDIHqlVladPPh3fgw1UgggOKGf5u8/Q0AAP//AwBfT5q8AwsAAA== + headers: + CF-RAY: + - 98900400c8ac653a-LHR + Connection: + - keep-alive + Content-Encoding: + - gzip + Content-Type: + - application/json + Date: + - Fri, 03 Oct 2025 22:53:27 GMT + Server: + - cloudflare + Set-Cookie: + - __cf_bm=REDACTED; + path=/; expires=Fri, 03-Oct-25 23:23:27 GMT; domain=.api.openai.com; HttpOnly; + Secure; SameSite=None + - _cfuvid=REDACTED; + path=/; domain=.api.openai.com; HttpOnly; Secure; SameSite=None + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + alt-svc: + - h3=":443"; ma=86400 + cf-cache-status: + - DYNAMIC + openai-organization: + - user-REDACTED + openai-processing-ms: + - "885" + openai-project: + - proj_REDACTED + openai-version: + - "2020-10-01" + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-envoy-upstream-service-time: + - "891" + x-ratelimit-limit-requests: + - "5000" + x-ratelimit-limit-tokens: + - "4000000" + x-ratelimit-remaining-requests: + - "4999" + x-ratelimit-remaining-tokens: + - "3999661" + x-ratelimit-reset-requests: + - 12ms + x-ratelimit-reset-tokens: + - 5ms + x-request-id: + - req_88d9fb3bc820507488912541f9936c44 + status: + code: 200 + message: OK +version: 1 diff --git a/packages/opentelemetry-instrumentation-openai/tests/traces/test_responses_parse.py b/packages/opentelemetry-instrumentation-openai/tests/traces/test_responses_parse.py new file mode 100644 index 0000000000..97dd4d6b0c --- /dev/null +++ b/packages/opentelemetry-instrumentation-openai/tests/traces/test_responses_parse.py @@ -0,0 +1,537 @@ +"""Tests for OpenAI Responses.parse method with structured outputs.""" + +import json +import pytest +from openai import OpenAI, AsyncOpenAI +from opentelemetry.instrumentation.openai.utils import is_reasoning_supported +from opentelemetry.sdk.trace.export.in_memory_span_exporter import InMemorySpanExporter +from opentelemetry.trace import StatusCode +from pydantic import BaseModel +from typing import Optional + + +class Person(BaseModel): + """Simple structured output for basic tests.""" + + name: str + age: int + + +class ContentCompliance(BaseModel): + """Structured output for moderation tests.""" + + is_violating: bool + category: Optional[str] + explanation_if_violating: Optional[str] + + +class WeatherInfo(BaseModel): + """Structured output for tool call tests.""" + + location: str + temperature: int + conditions: str + + +@pytest.mark.vcr +def test_responses_parse_basic( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test basic responses.parse with simple structured output.""" + openai_client.responses.parse( + model="gpt-4.1-nano", + input="Jane, 54 years old", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.attributes["gen_ai.system"] == "openai" + assert span.attributes["gen_ai.request.model"] == "gpt-4.1-nano" + assert span.attributes["gen_ai.response.model"] == "gpt-4.1-nano-2025-04-14" + assert span.attributes["gen_ai.prompt.0.content"] == "Jane, 54 years old" + assert span.attributes["gen_ai.prompt.0.role"] == "user" + assert span.attributes["gen_ai.completion.0.role"] == "assistant" + + # Verify the structured output was captured + assert "gen_ai.completion.0.content" in span.attributes + output_content = span.attributes["gen_ai.completion.0.content"] + # The output should be JSON serialized + parsed_output = json.loads(output_content) + assert parsed_output["name"] == "Jane" + assert parsed_output["age"] == 54 + + # Verify token usage + assert "gen_ai.usage.input_tokens" in span.attributes + assert "gen_ai.usage.output_tokens" in span.attributes + assert span.attributes["gen_ai.usage.input_tokens"] > 0 + assert span.attributes["gen_ai.usage.output_tokens"] > 0 + + +@pytest.mark.vcr +def test_responses_parse_with_message_history( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test responses.parse with message history.""" + openai_client.responses.parse( + model="gpt-4.1-nano", + input=[ + { + "role": "system", + "content": "You are a helpful assistant that extracts person information.", + }, + { + "role": "user", + "content": "Extract info: John Smith, 42 years old", + }, + ], + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.attributes["gen_ai.system"] == "openai" + + # Check system message + assert ( + span.attributes["gen_ai.prompt.0.content"] + == "You are a helpful assistant that extracts person information." + ) + assert span.attributes["gen_ai.prompt.0.role"] == "system" + + # Check user message + assert ( + span.attributes["gen_ai.prompt.1.content"] + == "Extract info: John Smith, 42 years old" + ) + assert span.attributes["gen_ai.prompt.1.role"] == "user" + + # Check response + assert span.attributes["gen_ai.completion.0.role"] == "assistant" + output_content = span.attributes["gen_ai.completion.0.content"] + parsed_output = json.loads(output_content) + assert parsed_output["name"] == "John Smith" + assert parsed_output["age"] == 42 + + +@pytest.mark.vcr +def test_responses_parse_moderation( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test responses.parse for content moderation use case.""" + openai_client.responses.parse( + model="gpt-4.1-nano", + input=[ + { + "role": "system", + "content": "Determine if the user input violates specific guidelines and explain if they do.", + }, + { + "role": "user", + "content": "How do I prepare for a job interview?", + }, + ], + text_format=ContentCompliance, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.attributes["gen_ai.system"] == "openai" + + # Verify structured output + output_content = span.attributes["gen_ai.completion.0.content"] + parsed_output = json.loads(output_content) + assert parsed_output["is_violating"] is False + assert parsed_output["category"] is None + assert parsed_output["explanation_if_violating"] is None + + +@pytest.mark.vcr +def test_responses_parse_with_tools( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test responses.parse with tool definitions.""" + tools = [ + { + "type": "function", + "name": "get_weather", + "description": "Get the current weather for a location", + "parameters": { + "type": "object", + "properties": { + "location": { + "type": "string", + "description": "The city and state, e.g. San Francisco, CA", + } + }, + "required": ["location"], + }, + } + ] + + openai_client.responses.parse( + model="gpt-4.1-nano", + input="What's the weather in Paris?", + text_format=WeatherInfo, + tools=tools, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.attributes["gen_ai.system"] == "openai" + + # Verify tool was captured + assert "llm.request.functions.0.name" in span.attributes + assert span.attributes["llm.request.functions.0.name"] == "get_weather" + assert ( + span.attributes["llm.request.functions.0.description"] + == "Get the current weather for a location" + ) + + # When tools are used, the response may contain tool calls instead of content + assert "gen_ai.completion.0.tool_calls.0.name" in span.attributes + + +@pytest.mark.vcr +@pytest.mark.skipif( + not is_reasoning_supported(), + reason="Reasoning is not supported in older OpenAI library versions", +) +def test_responses_parse_with_reasoning( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test responses.parse with reasoning parameters.""" + openai_client.responses.parse( + model="gpt-5-nano", + input="Extract person info: Sarah, 28", + text_format=Person, + reasoning={"effort": "low", "summary": None}, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.attributes["gen_ai.request.reasoning_effort"] == "low" + assert span.attributes["gen_ai.request.reasoning_summary"] == () + assert span.attributes["gen_ai.response.reasoning_effort"] == "low" + + # Reasoning tokens should be tracked + assert span.attributes["gen_ai.usage.reasoning_tokens"] >= 0 + + # Verify structured output still works + output_content = span.attributes["gen_ai.completion.0.content"] + parsed_output = json.loads(output_content) + assert parsed_output["name"] == "Sarah" + assert parsed_output["age"] == 28 + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_async_responses_parse_basic( + instrument_legacy, + span_exporter: InMemorySpanExporter, + async_openai_client: AsyncOpenAI, +): + """Test async responses.parse with basic structured output.""" + await async_openai_client.responses.parse( + model="gpt-4.1-nano", + input="Mike, 35 years old", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.attributes["gen_ai.system"] == "openai" + assert span.attributes["gen_ai.request.model"] == "gpt-4.1-nano" + assert span.attributes["gen_ai.prompt.0.content"] == "Mike, 35 years old" + assert span.attributes["gen_ai.prompt.0.role"] == "user" + + # Verify structured output + output_content = span.attributes["gen_ai.completion.0.content"] + parsed_output = json.loads(output_content) + assert parsed_output["name"] == "Mike" + assert parsed_output["age"] == 35 + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_async_responses_parse_with_message_history( + instrument_legacy, + span_exporter: InMemorySpanExporter, + async_openai_client: AsyncOpenAI, +): + """Test async responses.parse with message history.""" + await async_openai_client.responses.parse( + model="gpt-4.1-nano", + input=[ + { + "role": "system", + "content": "Extract person data from text.", + }, + { + "role": "user", + "content": "Parse this: Emily Jones, age 31", + }, + ], + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert ( + span.attributes["gen_ai.prompt.0.content"] == "Extract person data from text." + ) + assert span.attributes["gen_ai.prompt.0.role"] == "system" + assert ( + span.attributes["gen_ai.prompt.1.content"] == "Parse this: Emily Jones, age 31" + ) + assert span.attributes["gen_ai.prompt.1.role"] == "user" + + # Verify structured output + output_content = span.attributes["gen_ai.completion.0.content"] + parsed_output = json.loads(output_content) + assert parsed_output["name"] == "Emily Jones" + assert parsed_output["age"] == 31 + + +def test_responses_parse_exception( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test that exceptions in responses.parse are properly traced.""" + openai_client.api_key = "invalid" + + with pytest.raises(Exception): + openai_client.responses.parse( + model="gpt-4.1-nano", + input="Test input", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.status.status_code == StatusCode.ERROR + assert span.attributes.get("error.type") == "AuthenticationError" + + # Verify exception event + events = span.events + assert len(events) == 1 + event = events[0] + assert event.name == "exception" + assert event.attributes["exception.type"] == "openai.AuthenticationError" + assert "Error code: 401" in event.attributes["exception.message"] + + +@pytest.mark.asyncio +async def test_async_responses_parse_exception( + instrument_legacy, + span_exporter: InMemorySpanExporter, + async_openai_client: AsyncOpenAI, +): + """Test that exceptions in async responses.parse are properly traced.""" + async_openai_client.api_key = "invalid" + + with pytest.raises(Exception): + await async_openai_client.responses.parse( + model="gpt-4.1-nano", + input="Test input", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + assert span.status.status_code == StatusCode.ERROR + assert span.attributes.get("error.type") == "AuthenticationError" + + # Verify exception event + events = span.events + assert len(events) == 1 + event = events[0] + assert event.name == "exception" + assert event.attributes["exception.type"] == "openai.AuthenticationError" + assert "Error code: 401" in event.attributes["exception.message"] + + +@pytest.mark.vcr +def test_responses_parse_output_fallback( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test that output is captured even if output_parsed is not available.""" + openai_client.responses.parse( + model="gpt-4.1-nano", + input="Alex, 29", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Should have completion content even if parsing fails + assert "gen_ai.completion.0.content" in span.attributes + assert span.attributes["gen_ai.completion.0.role"] == "assistant" + + # Verify response was successful + assert span.status.status_code != StatusCode.ERROR + + +@pytest.mark.vcr +def test_responses_parse_with_instructions( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test responses.parse with system instructions.""" + openai_client.responses.parse( + model="gpt-4.1-nano", + input="Robert, 45", + instructions="You are an expert at extracting structured data.", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + + # System instructions should be the first prompt + assert ( + span.attributes["gen_ai.prompt.0.content"] + == "You are an expert at extracting structured data." + ) + assert span.attributes["gen_ai.prompt.0.role"] == "system" + + # User input should be next + assert span.attributes["gen_ai.prompt.1.content"] == "Robert, 45" + assert span.attributes["gen_ai.prompt.1.role"] == "user" + + # Verify structured output + output_content = span.attributes["gen_ai.completion.0.content"] + parsed_output = json.loads(output_content) + assert parsed_output["name"] == "Robert" + assert parsed_output["age"] == 45 + + +@pytest.mark.vcr +def test_responses_parse_token_usage( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test that token usage is properly tracked for responses.parse.""" + openai_client.responses.parse( + model="gpt-4.1-nano", + input="Lisa, 38 years old", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Verify all token metrics are present + assert "gen_ai.usage.input_tokens" in span.attributes + assert "gen_ai.usage.output_tokens" in span.attributes + assert "llm.usage.total_tokens" in span.attributes + + input_tokens = span.attributes["gen_ai.usage.input_tokens"] + output_tokens = span.attributes["gen_ai.usage.output_tokens"] + total_tokens = span.attributes["llm.usage.total_tokens"] + + assert input_tokens > 0 + assert output_tokens > 0 + assert total_tokens == input_tokens + output_tokens + + +@pytest.mark.vcr +def test_responses_parse_response_id( + instrument_legacy, span_exporter: InMemorySpanExporter, openai_client: OpenAI +): + """Test that response ID is properly captured.""" + response = openai_client.responses.parse( + model="gpt-4.1-nano", + input="David, 52", + text_format=Person, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + # Response ID should be present + assert "gen_ai.response.id" in span.attributes + response_id = span.attributes["gen_ai.response.id"] + assert response_id.startswith("resp_") + + # Response ID should match the actual response + assert response_id == response.id + + +@pytest.mark.vcr +@pytest.mark.asyncio +async def test_async_responses_parse_with_tools( + instrument_legacy, + span_exporter: InMemorySpanExporter, + async_openai_client: AsyncOpenAI, +): + """Test async responses.parse with tool definitions.""" + tools = [ + { + "type": "function", + "name": "search_database", + "description": "Search for records in the database", + "parameters": { + "type": "object", + "properties": { + "query": {"type": "string", "description": "The search query"} + }, + "required": ["query"], + }, + } + ] + + await async_openai_client.responses.parse( + model="gpt-4.1-nano", + input="Find person: Tom", + text_format=Person, + tools=tools, + ) + + spans = span_exporter.get_finished_spans() + assert len(spans) == 1 + span = spans[0] + + assert span.name == "openai.response" + + # Verify tool was captured + assert "llm.request.functions.0.name" in span.attributes + assert span.attributes["llm.request.functions.0.name"] == "search_database" + + # When tools are used, the response may contain tool calls instead of content + # Verify either content or tool calls are present + assert ( + "gen_ai.completion.0.content" in span.attributes + or "gen_ai.completion.0.tool_calls.0.name" in span.attributes + )