Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -163,3 +163,4 @@ chroma.sqlite3
#.idea/

logs/
playground/
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
async_embeddings_create,
async_images_generate,
chat_completions_create,
openai_responses_create,
async_openai_responses_create,
embeddings_create,
images_edit,
images_generate,
Expand All @@ -32,7 +34,7 @@
logging.basicConfig(level=logging.FATAL)


class OpenAIInstrumentation(BaseInstrumentor): # type: ignore
class OpenAIInstrumentation(BaseInstrumentor): # type: ignore

def instrumentation_dependencies(self) -> Collection[str]:
return ["openai >= 0.27.0", "trace-attributes >= 4.0.5"]
Expand All @@ -54,6 +56,18 @@ def _instrument(self, **kwargs: Any) -> None:
async_chat_completions_create(version, tracer),
)

wrap_function_wrapper(
"openai.resources.responses",
"AsyncResponses.create",
async_openai_responses_create(version, tracer),
)

wrap_function_wrapper(
"openai.resources.responses",
"Responses.create",
openai_responses_create(version, tracer),
)

wrap_function_wrapper(
"openai.resources.images",
"Images.generate",
Expand Down
143 changes: 128 additions & 15 deletions src/langtrace_python_sdk/instrumentation/openai/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,27 +7,121 @@
from opentelemetry.trace.propagation import set_span_in_context
from opentelemetry.trace.status import Status, StatusCode

from langtrace_python_sdk.constants.instrumentation.common import \
SERVICE_PROVIDERS
from langtrace_python_sdk.constants.instrumentation.common import SERVICE_PROVIDERS
from langtrace_python_sdk.constants.instrumentation.openai import APIS
from langtrace_python_sdk.instrumentation.openai.types import (
ChatCompletionsCreateKwargs, ContentItem, EmbeddingsCreateKwargs,
ImagesEditKwargs, ImagesGenerateKwargs, ResultType)
ChatCompletionsCreateKwargs,
ContentItem,
EmbeddingsCreateKwargs,
ImagesEditKwargs,
ImagesGenerateKwargs,
ResultType,
)
from langtrace_python_sdk.types import NOT_GIVEN
from langtrace_python_sdk.utils import set_span_attribute
from langtrace_python_sdk.utils.llm import (StreamWrapper,
calculate_prompt_tokens,
get_base_url, get_extra_attributes,
get_langtrace_attributes,
get_llm_request_attributes,
get_llm_url, get_span_name,
get_tool_calls, is_streaming,
set_event_completion,
set_span_attributes,
set_usage_attributes)
from langtrace_python_sdk.utils.llm import (
StreamWrapper,
calculate_prompt_tokens,
get_base_url,
get_extra_attributes,
get_langtrace_attributes,
get_llm_request_attributes,
get_llm_url,
get_span_name,
get_tool_calls,
is_streaming,
set_event_completion,
set_span_attributes,
set_usage_attributes,
)
from langtrace_python_sdk.utils.silently_fail import silently_fail


def async_openai_responses_create(version: str, tracer: Tracer) -> Callable:
"""Wrap the `create` method of the `openai.AsyncResponse.create` class to trace it."""

async def traced_method(
wrapped: Callable, instance: Any, args: List[Any], kwargs: Dict[str, Any]
):
input_value = kwargs.get("input")
prompt = (
input_value[0]
if isinstance(input_value, list)
else [{"role": "user", "content": input_value}]
)
service_provider = SERVICE_PROVIDERS["OPENAI"]
span_attributes = {
"instructions": kwargs.get("instructions"),
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
**get_llm_request_attributes(
kwargs,
operation_name="openai.responses.create",
prompts=prompt,
),
}
with tracer.start_as_current_span(
name="openai.responses.create",
kind=SpanKind.CLIENT,
context=set_span_in_context(trace.get_current_span()),
) as span:
try:
set_span_attributes(span, span_attributes)

response = await wrapped(*args, **kwargs)
_set_openai_agentic_response_attributes(span, response)

return response
except Exception as err:
span.record_exception(err)
raise

return traced_method


def openai_responses_create(version: str, tracer: Tracer) -> Callable:
"""Wrap the `create` method of the `openai.responses.create` class to trace it."""

def traced_method(
wrapped: Callable, instance: Any, args: List[Any], kwargs: Dict[str, Any]
):
input_value = kwargs.get("input")
prompt = (
input_value[0]
if isinstance(input_value, list)
else [{"role": "user", "content": input_value}]
)
service_provider = SERVICE_PROVIDERS["OPENAI"]
span_attributes = {
"instructions": kwargs.get("instructions"),
**get_langtrace_attributes(version, service_provider, vendor_type="llm"),
**get_llm_request_attributes(
kwargs,
operation_name="openai.responses.create",
prompts=prompt,
),
}
with tracer.start_as_current_span(
name="openai.responses.create",
kind=SpanKind.CLIENT,
context=set_span_in_context(trace.get_current_span()),
end_on_exit=False,
) as span:
try:
set_span_attributes(span, span_attributes)

response = wrapped(*args, **kwargs)
if is_streaming(kwargs) and span.is_recording():
return StreamWrapper(response, span)
else:
_set_openai_agentic_response_attributes(span, response)
return response
except Exception as err:
span.record_exception(err)
raise

return traced_method


def filter_valid_attributes(attributes):
"""Filter attributes where value is not None, not an empty string, and not openai.NOT_GIVEN."""
return {
Expand Down Expand Up @@ -634,6 +728,21 @@ def extract_content(choice: Any) -> Union[str, List[Dict[str, Any]], Dict[str, A
return ""


def _set_openai_agentic_response_attributes(span: Span, response) -> None:
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_ID, response.id)
set_span_attribute(span, SpanAttributes.LLM_RESPONSE_MODEL, response.model)
set_event_completion(span, [{"role": "assistant", "content": response.output_text}])
set_usage_attributes(
span,
{
"input_tokens": response.usage.input_tokens,
"output_tokens": response.usage.output_tokens,
"total_tokens": response.usage.total_tokens,
"cached_tokens": response.usage.input_tokens_details["cached_tokens"],
},
)


@silently_fail
def _set_input_attributes(
span: Span, kwargs: ChatCompletionsCreateKwargs, attributes: LLMSpanAttributes
Expand Down Expand Up @@ -707,5 +816,9 @@ def _set_response_attributes(span: Span, result: ResultType) -> None:
set_span_attribute(
span,
"gen_ai.usage.cached_tokens",
result.usage.prompt_tokens_details.cached_tokens if result.usage.prompt_tokens_details else 0,
(
result.usage.prompt_tokens_details.cached_tokens
if result.usage.prompt_tokens_details
else 0
),
)
Loading
Loading