From f8108605fac897d8a5a8bd4fafb77d560bd44f05 Mon Sep 17 00:00:00 2001 From: Anton Pirker Date: Thu, 18 Sep 2025 15:58:02 +0200 Subject: [PATCH 1/2] Replace SPANDATA with sentry-conventions attributes --- sentry_sdk/ai/monitoring.py | 18 +- sentry_sdk/client.py | 6 +- sentry_sdk/consts.py | 637 ------------------ sentry_sdk/integrations/aiohttp.py | 9 +- sentry_sdk/integrations/anthropic.py | 25 +- sentry_sdk/integrations/asyncpg.py | 23 +- sentry_sdk/integrations/boto3.py | 9 +- sentry_sdk/integrations/celery/__init__.py | 21 +- sentry_sdk/integrations/clickhouse_driver.py | 13 +- sentry_sdk/integrations/cohere.py | 52 +- sentry_sdk/integrations/django/__init__.py | 13 +- sentry_sdk/integrations/django/caching.py | 16 +- sentry_sdk/integrations/httpx.py | 15 +- sentry_sdk/integrations/huggingface_hub.py | 47 +- sentry_sdk/integrations/langchain.py | 89 +-- sentry_sdk/integrations/langgraph.py | 35 +- sentry_sdk/integrations/openai.py | 38 +- .../openai_agents/spans/ai_client.py | 5 +- .../openai_agents/spans/execute_tool.py | 15 +- .../openai_agents/spans/handoff.py | 5 +- .../openai_agents/spans/invoke_agent.py | 11 +- .../integrations/openai_agents/utils.py | 38 +- sentry_sdk/integrations/pymongo.py | 17 +- .../integrations/redis/modules/caches.py | 17 +- .../integrations/redis/modules/queries.py | 11 +- sentry_sdk/integrations/redis/utils.py | 4 +- sentry_sdk/integrations/sqlalchemy.py | 11 +- sentry_sdk/integrations/stdlib.py | 9 +- sentry_sdk/tracing.py | 17 +- sentry_sdk/tracing_utils.py | 69 +- setup.py | 1 + .../integrations/anthropic/test_anthropic.py | 120 ++-- tests/integrations/asyncpg/test_asyncpg.py | 91 ++- tests/integrations/cohere/test_cohere.py | 30 +- tests/integrations/django/test_basic.py | 14 +- .../integrations/django/test_db_query_data.py | 128 ++-- tests/integrations/httpx/test_httpx.py | 27 +- .../integrations/langchain/test_langchain.py | 30 +- .../integrations/langgraph/test_langgraph.py | 75 ++- tests/integrations/openai/test_openai.py | 58 +- tests/integrations/pymongo/test_pymongo.py | 22 +- .../redis/asyncio/test_redis_asyncio.py | 10 +- .../redis/cluster/test_redis_cluster.py | 14 +- .../test_redis_cluster_asyncio.py | 14 +- tests/integrations/redis/test_redis.py | 20 +- .../test_redis_py_cluster_legacy.py | 26 +- tests/integrations/requests/test_requests.py | 26 +- .../sqlalchemy/test_sqlalchemy.py | 103 +-- tests/integrations/stdlib/test_httplib.py | 35 +- tests/test_logs.py | 5 +- 50 files changed, 760 insertions(+), 1384 deletions(-) diff --git a/sentry_sdk/ai/monitoring.py b/sentry_sdk/ai/monitoring.py index 9dd1aa132c..cbd2b31c44 100644 --- a/sentry_sdk/ai/monitoring.py +++ b/sentry_sdk/ai/monitoring.py @@ -1,11 +1,11 @@ import inspect from functools import wraps -from sentry_sdk.consts import SPANDATA import sentry_sdk.utils from sentry_sdk import start_span from sentry_sdk.tracing import Span from sentry_sdk.utils import ContextVar +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -42,7 +42,7 @@ def sync_wrapped(*args, **kwargs): for k, v in kwargs.pop("sentry_data", {}).items(): span.set_data(k, v) if curr_pipeline: - span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline) + span.set_data(ATTRS.GEN_AI_PIPELINE_NAME, curr_pipeline) return f(*args, **kwargs) else: _ai_pipeline_name.set(description) @@ -71,7 +71,7 @@ async def async_wrapped(*args, **kwargs): for k, v in kwargs.pop("sentry_data", {}).items(): span.set_data(k, v) if curr_pipeline: - span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, curr_pipeline) + span.set_data(ATTRS.GEN_AI_PIPELINE_NAME, curr_pipeline) return await f(*args, **kwargs) else: _ai_pipeline_name.set(description) @@ -110,23 +110,23 @@ def record_token_usage( # TODO: move pipeline name elsewhere ai_pipeline_name = get_ai_pipeline_name() if ai_pipeline_name: - span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, ai_pipeline_name) + span.set_data(ATTRS.GEN_AI_PIPELINE_NAME, ai_pipeline_name) if input_tokens is not None: - span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_INPUT_TOKENS, input_tokens) if input_tokens_cached is not None: span.set_data( - SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + ATTRS.GEN_AI_USAGE_INPUT_TOKENS_CACHED, input_tokens_cached, ) if output_tokens is not None: - span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) if output_tokens_reasoning is not None: span.set_data( - SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, output_tokens_reasoning, ) @@ -134,4 +134,4 @@ def record_token_usage( total_tokens = input_tokens + output_tokens if total_tokens is not None: - span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index c45d5e2f4f..de94631e91 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -30,7 +30,6 @@ from sentry_sdk.tracing import trace from sentry_sdk.transport import BaseHttpTransport, make_transport from sentry_sdk.consts import ( - SPANDATA, DEFAULT_MAX_VALUE_LENGTH, DEFAULT_OPTIONS, INSTRUMENTER, @@ -49,6 +48,7 @@ ) from sentry_sdk.scrubber import EventScrubber from sentry_sdk.monitor import Monitor +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS if TYPE_CHECKING: from typing import Any @@ -914,8 +914,8 @@ def _capture_experimental_log(self, log): log["attributes"]["sentry.sdk.version"] = SDK_INFO["version"] server_name = self.options.get("server_name") - if server_name is not None and SPANDATA.SERVER_ADDRESS not in log["attributes"]: - log["attributes"][SPANDATA.SERVER_ADDRESS] = server_name + if server_name is not None and ATTRS.SERVER_ADDRESS not in log["attributes"]: + log["attributes"][ATTRS.SERVER_ADDRESS] = server_name environment = self.options.get("environment") if environment is not None and "sentry.environment" not in log["attributes"]: diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 91a1740526..339241ea85 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -116,643 +116,6 @@ class INSTRUMENTER: OTEL = "otel" -class SPANDATA: - """ - Additional information describing the type of the span. - See: https://develop.sentry.dev/sdk/performance/span-data-conventions/ - """ - - AI_CITATIONS = "ai.citations" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - References or sources cited by the AI model in its response. - Example: ["Smith et al. 2020", "Jones 2019"] - """ - - AI_DOCUMENTS = "ai.documents" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Documents or content chunks used as context for the AI model. - Example: ["doc1.txt", "doc2.pdf"] - """ - - AI_FINISH_REASON = "ai.finish_reason" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_RESPONSE_FINISH_REASONS instead. - - The reason why the model stopped generating. - Example: "length" - """ - - AI_FREQUENCY_PENALTY = "ai.frequency_penalty" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_FREQUENCY_PENALTY instead. - - Used to reduce repetitiveness of generated tokens. - Example: 0.5 - """ - - AI_FUNCTION_CALL = "ai.function_call" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead. - - For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls - """ - - AI_GENERATION_ID = "ai.generation_id" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_RESPONSE_ID instead. - - Unique identifier for the completion. - Example: "gen_123abc" - """ - - AI_INPUT_MESSAGES = "ai.input_messages" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_MESSAGES instead. - - The input messages to an LLM call. - Example: [{"role": "user", "message": "hello"}] - """ - - AI_LOGIT_BIAS = "ai.logit_bias" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - For an AI model call, the logit bias - """ - - AI_METADATA = "ai.metadata" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Extra metadata passed to an AI pipeline step. - Example: {"executed_function": "add_integers"} - """ - - AI_MODEL_ID = "ai.model_id" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_MODEL or GEN_AI_RESPONSE_MODEL instead. - - The unique descriptor of the model being executed. - Example: gpt-4 - """ - - AI_PIPELINE_NAME = "ai.pipeline.name" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_PIPELINE_NAME instead. - - Name of the AI pipeline or chain being executed. - Example: "qa-pipeline" - """ - - AI_PREAMBLE = "ai.preamble" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - For an AI model call, the preamble parameter. - Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style. - Example: "You are now a clown." - """ - - AI_PRESENCE_PENALTY = "ai.presence_penalty" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_PRESENCE_PENALTY instead. - - Used to reduce repetitiveness of generated tokens. - Example: 0.5 - """ - - AI_RAW_PROMPTING = "ai.raw_prompting" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Minimize pre-processing done to the prompt sent to the LLM. - Example: true - """ - - AI_RESPONSE_FORMAT = "ai.response_format" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - For an AI model call, the format of the response - """ - - AI_RESPONSES = "ai.responses" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_RESPONSE_TEXT instead. - - The responses to an AI model call. Always as a list. - Example: ["hello", "world"] - """ - - AI_SEARCH_QUERIES = "ai.search_queries" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Queries used to search for relevant context or documents. - Example: ["climate change effects", "renewable energy"] - """ - - AI_SEARCH_REQUIRED = "ai.is_search_required" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Boolean indicating if the model needs to perform a search. - Example: true - """ - - AI_SEARCH_RESULTS = "ai.search_results" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Results returned from search queries for context. - Example: ["Result 1", "Result 2"] - """ - - AI_SEED = "ai.seed" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_SEED instead. - - The seed, ideally models given the same seed and same other parameters will produce the exact same output. - Example: 123.45 - """ - - AI_STREAMING = "ai.streaming" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_RESPONSE_STREAMING instead. - - Whether or not the AI model call's response was streamed back asynchronously - Example: true - """ - - AI_TAGS = "ai.tags" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Tags that describe an AI pipeline step. - Example: {"executed_function": "add_integers"} - """ - - AI_TEMPERATURE = "ai.temperature" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_TEMPERATURE instead. - - For an AI model call, the temperature parameter. Temperature essentially means how random the output will be. - Example: 0.5 - """ - - AI_TEXTS = "ai.texts" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Raw text inputs provided to the model. - Example: ["What is machine learning?"] - """ - - AI_TOP_K = "ai.top_k" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_TOP_K instead. - - For an AI model call, the top_k parameter. Top_k essentially controls how random the output will be. - Example: 35 - """ - - AI_TOP_P = "ai.top_p" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_TOP_P instead. - - For an AI model call, the top_p parameter. Top_p essentially controls how random the output will be. - Example: 0.5 - """ - - AI_TOOL_CALLS = "ai.tool_calls" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_RESPONSE_TOOL_CALLS instead. - - For an AI model call, the function that was called. This is deprecated for OpenAI, and replaced by tool_calls - """ - - AI_TOOLS = "ai.tools" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_REQUEST_AVAILABLE_TOOLS instead. - - For an AI model call, the functions that are available - """ - - AI_WARNINGS = "ai.warnings" - """ - .. deprecated:: - This attribute is deprecated. Use GEN_AI_* attributes instead. - - Warning messages generated during model execution. - Example: ["Token limit exceeded"] - """ - - CACHE_HIT = "cache.hit" - """ - A boolean indicating whether the requested data was found in the cache. - Example: true - """ - - CACHE_ITEM_SIZE = "cache.item_size" - """ - The size of the requested data in bytes. - Example: 58 - """ - - CACHE_KEY = "cache.key" - """ - The key of the requested data. - Example: template.cache.some_item.867da7e2af8e6b2f3aa7213a4080edb3 - """ - - CODE_FILEPATH = "code.filepath" - """ - The source code file name that identifies the code unit as uniquely as possible (preferably an absolute file path). - Example: "/app/myapplication/http/handler/server.py" - """ - - CODE_FUNCTION = "code.function" - """ - The method or function name, or equivalent (usually rightmost part of the code unit's name). - Example: "server_request" - """ - - CODE_LINENO = "code.lineno" - """ - The line number in `code.filepath` best representing the operation. It SHOULD point within the code unit named in `code.function`. - Example: 42 - """ - - CODE_NAMESPACE = "code.namespace" - """ - The "namespace" within which `code.function` is defined. Usually the qualified class or module name, such that `code.namespace` + some separator + `code.function` form a unique identifier for the code unit. - Example: "http.handler" - """ - - DB_MONGODB_COLLECTION = "db.mongodb.collection" - """ - The MongoDB collection being accessed within the database. - See: https://github.com/open-telemetry/semantic-conventions/blob/main/docs/database/mongodb.md#attributes - Example: public.users; customers - """ - - DB_NAME = "db.name" - """ - The name of the database being accessed. For commands that switch the database, this should be set to the target database (even if the command fails). - Example: myDatabase - """ - - DB_OPERATION = "db.operation" - """ - The name of the operation being executed, e.g. the MongoDB command name such as findAndModify, or the SQL keyword. - See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md - Example: findAndModify, HMSET, SELECT - """ - - DB_SYSTEM = "db.system" - """ - An identifier for the database management system (DBMS) product being used. - See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md - Example: postgresql - """ - - DB_USER = "db.user" - """ - The name of the database user used for connecting to the database. - See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/database.md - Example: my_user - """ - - GEN_AI_AGENT_NAME = "gen_ai.agent.name" - """ - The name of the agent being used. - Example: "ResearchAssistant" - """ - - GEN_AI_CHOICE = "gen_ai.choice" - """ - The model's response message. - Example: "The weather in Paris is rainy and overcast, with temperatures around 57°F" - """ - - GEN_AI_OPERATION_NAME = "gen_ai.operation.name" - """ - The name of the operation being performed. - Example: "chat" - """ - - GEN_AI_PIPELINE_NAME = "gen_ai.pipeline.name" - """ - Name of the AI pipeline or chain being executed. - Example: "qa-pipeline" - """ - - GEN_AI_RESPONSE_FINISH_REASONS = "gen_ai.response.finish_reasons" - """ - The reason why the model stopped generating. - Example: "COMPLETE" - """ - - GEN_AI_RESPONSE_ID = "gen_ai.response.id" - """ - Unique identifier for the completion. - Example: "gen_123abc" - """ - - GEN_AI_RESPONSE_MODEL = "gen_ai.response.model" - """ - Exact model identifier used to generate the response - Example: gpt-4o-mini-2024-07-18 - """ - - GEN_AI_RESPONSE_STREAMING = "gen_ai.response.streaming" - """ - Whether or not the AI model call's response was streamed back asynchronously - Example: true - """ - - GEN_AI_RESPONSE_TEXT = "gen_ai.response.text" - """ - The model's response text messages. - Example: ["The weather in Paris is rainy and overcast, with temperatures around 57°F", "The weather in London is sunny and warm, with temperatures around 65°F"] - """ - - GEN_AI_RESPONSE_TOOL_CALLS = "gen_ai.response.tool_calls" - """ - The tool calls in the model's response. - Example: [{"name": "get_weather", "arguments": {"location": "Paris"}}] - """ - - GEN_AI_REQUEST_AVAILABLE_TOOLS = "gen_ai.request.available_tools" - """ - The available tools for the model. - Example: [{"name": "get_weather", "description": "Get the weather for a given location"}, {"name": "get_news", "description": "Get the news for a given topic"}] - """ - - GEN_AI_REQUEST_FREQUENCY_PENALTY = "gen_ai.request.frequency_penalty" - """ - The frequency penalty parameter used to reduce repetitiveness of generated tokens. - Example: 0.1 - """ - - GEN_AI_REQUEST_MAX_TOKENS = "gen_ai.request.max_tokens" - """ - The maximum number of tokens to generate in the response. - Example: 2048 - """ - - GEN_AI_REQUEST_MESSAGES = "gen_ai.request.messages" - """ - The messages passed to the model. The "content" can be a string or an array of objects. - Example: [{role: "system", "content: "Generate a random number."}, {"role": "user", "content": [{"text": "Generate a random number between 0 and 10.", "type": "text"}]}] - """ - - GEN_AI_REQUEST_MODEL = "gen_ai.request.model" - """ - The model identifier being used for the request. - Example: "gpt-4-turbo" - """ - - GEN_AI_REQUEST_PRESENCE_PENALTY = "gen_ai.request.presence_penalty" - """ - The presence penalty parameter used to reduce repetitiveness of generated tokens. - Example: 0.1 - """ - - GEN_AI_REQUEST_SEED = "gen_ai.request.seed" - """ - The seed, ideally models given the same seed and same other parameters will produce the exact same output. - Example: "1234567890" - """ - - GEN_AI_REQUEST_TEMPERATURE = "gen_ai.request.temperature" - """ - The temperature parameter used to control randomness in the output. - Example: 0.7 - """ - - GEN_AI_REQUEST_TOP_K = "gen_ai.request.top_k" - """ - Limits the model to only consider the K most likely next tokens, where K is an integer (e.g., top_k=20 means only the 20 highest probability tokens are considered). - Example: 35 - """ - - GEN_AI_REQUEST_TOP_P = "gen_ai.request.top_p" - """ - The top_p parameter used to control diversity via nucleus sampling. - Example: 1.0 - """ - - GEN_AI_SYSTEM = "gen_ai.system" - """ - The name of the AI system being used. - Example: "openai" - """ - - GEN_AI_TOOL_DESCRIPTION = "gen_ai.tool.description" - """ - The description of the tool being used. - Example: "Searches the web for current information about a topic" - """ - - GEN_AI_TOOL_INPUT = "gen_ai.tool.input" - """ - The input of the tool being used. - Example: {"location": "Paris"} - """ - - GEN_AI_TOOL_NAME = "gen_ai.tool.name" - """ - The name of the tool being used. - Example: "web_search" - """ - - GEN_AI_TOOL_OUTPUT = "gen_ai.tool.output" - """ - The output of the tool being used. - Example: "rainy, 57°F" - """ - - GEN_AI_TOOL_TYPE = "gen_ai.tool.type" - """ - The type of tool being used. - Example: "function" - """ - - GEN_AI_USAGE_INPUT_TOKENS = "gen_ai.usage.input_tokens" - """ - The number of tokens in the input. - Example: 150 - """ - - GEN_AI_USAGE_INPUT_TOKENS_CACHED = "gen_ai.usage.input_tokens.cached" - """ - The number of cached tokens in the input. - Example: 50 - """ - - GEN_AI_USAGE_OUTPUT_TOKENS = "gen_ai.usage.output_tokens" - """ - The number of tokens in the output. - Example: 250 - """ - - GEN_AI_USAGE_OUTPUT_TOKENS_REASONING = "gen_ai.usage.output_tokens.reasoning" - """ - The number of tokens used for reasoning in the output. - Example: 75 - """ - - GEN_AI_USAGE_TOTAL_TOKENS = "gen_ai.usage.total_tokens" - """ - The total number of tokens used (input + output). - Example: 400 - """ - - GEN_AI_USER_MESSAGE = "gen_ai.user.message" - """ - The user message passed to the model. - Example: "What's the weather in Paris?" - """ - - HTTP_FRAGMENT = "http.fragment" - """ - The Fragments present in the URL. - Example: #foo=bar - """ - - HTTP_METHOD = "http.method" - """ - The HTTP method used. - Example: GET - """ - - HTTP_QUERY = "http.query" - """ - The Query string present in the URL. - Example: ?foo=bar&bar=baz - """ - - HTTP_STATUS_CODE = "http.response.status_code" - """ - The HTTP status code as an integer. - Example: 418 - """ - - MESSAGING_DESTINATION_NAME = "messaging.destination.name" - """ - The destination name where the message is being consumed from, - e.g. the queue name or topic. - """ - - MESSAGING_MESSAGE_ID = "messaging.message.id" - """ - The message's identifier. - """ - - MESSAGING_MESSAGE_RECEIVE_LATENCY = "messaging.message.receive.latency" - """ - The latency between when the task was enqueued and when it was started to be processed. - """ - - MESSAGING_MESSAGE_RETRY_COUNT = "messaging.message.retry.count" - """ - Number of retries/attempts to process a message. - """ - - MESSAGING_SYSTEM = "messaging.system" - """ - The messaging system's name, e.g. `kafka`, `aws_sqs` - """ - - NETWORK_PEER_ADDRESS = "network.peer.address" - """ - Peer address of the network connection - IP address or Unix domain socket name. - Example: 10.1.2.80, /tmp/my.sock, localhost - """ - - NETWORK_PEER_PORT = "network.peer.port" - """ - Peer port number of the network connection. - Example: 6379 - """ - - PROFILER_ID = "profiler_id" - """ - Label identifying the profiler id that the span occurred in. This should be a string. - Example: "5249fbada8d5416482c2f6e47e337372" - """ - - SERVER_ADDRESS = "server.address" - """ - Name of the database host. - Example: example.com - """ - - SERVER_PORT = "server.port" - """ - Logical server port number - Example: 80; 8080; 443 - """ - - SERVER_SOCKET_ADDRESS = "server.socket.address" - """ - Physical server IP address or Unix socket address. - Example: 10.5.3.2 - """ - - SERVER_SOCKET_PORT = "server.socket.port" - """ - Physical server port. - Recommended: If different than server.port. - Example: 16456 - """ - - THREAD_ID = "thread.id" - """ - Identifier of a thread from where the span originated. This should be a string. - Example: "7972576320" - """ - - THREAD_NAME = "thread.name" - """ - Label identifying a thread from where the span originated. This should be a string. - Example: "MainThread" - """ - - class SPANSTATUS: """ The status of a Sentry span. diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py index ad3202bf2c..7e9c822cb5 100644 --- a/sentry_sdk/integrations/aiohttp.py +++ b/sentry_sdk/integrations/aiohttp.py @@ -4,7 +4,7 @@ import sentry_sdk from sentry_sdk.api import continue_trace -from sentry_sdk.consts import OP, SPANSTATUS, SPANDATA +from sentry_sdk.consts import OP, SPANSTATUS from sentry_sdk.integrations import ( _DEFAULT_FAILED_REQUEST_STATUS_CODES, _check_minimum_version, @@ -37,6 +37,7 @@ SENSITIVE_DATA_SUBSTITUTE, AnnotatedValue, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: import asyncio @@ -239,11 +240,11 @@ async def on_request_start(session, trace_config_ctx, params): % (method, parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE), origin=AioHttpIntegration.origin, ) - span.set_data(SPANDATA.HTTP_METHOD, method) + span.set_data(ATTRS.HTTP_METHOD, method) if parsed_url is not None: span.set_data("url", parsed_url.url) - span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query) - span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment) + span.set_data(ATTRS.HTTP_QUERY, parsed_url.query) + span.set_data(ATTRS.HTTP_FRAGMENT, parsed_url.fragment) client = sentry_sdk.get_client() diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index 4f4c0b1a2a..5cf51a0b7e 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -4,7 +4,7 @@ import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( @@ -13,6 +13,7 @@ package_version, safe_serialize, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: try: @@ -138,19 +139,19 @@ def _set_input_data(span, kwargs, integration): normalized_messages.append(message) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False ) set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False) + span, ATTRS.GEN_AI_RESPONSE_STREAMING, kwargs.get("stream", False) ) kwargs_keys_to_attributes = { - "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, - "model": SPANDATA.GEN_AI_REQUEST_MODEL, - "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, - "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, - "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, + "max_tokens": ATTRS.GEN_AI_REQUEST_MAX_TOKENS, + "model": ATTRS.GEN_AI_REQUEST_MODEL, + "temperature": ATTRS.GEN_AI_REQUEST_TEMPERATURE, + "top_k": ATTRS.GEN_AI_REQUEST_TOP_K, + "top_p": ATTRS.GEN_AI_REQUEST_TOP_P, } for key, attribute in kwargs_keys_to_attributes.items(): value = kwargs.get(key) @@ -161,7 +162,7 @@ def _set_input_data(span, kwargs, integration): tools = kwargs.get("tools") if tools is not NOT_GIVEN and tools is not None and len(tools) > 0: set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) + span, ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) ) @@ -177,7 +178,7 @@ def _set_output_data( # type: (Span, AnthropicIntegration, str | None, int | None, int | None, list[Any], bool) -> None """ Set output data for the span based on the AI response.""" - span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, model) + span.set_data(ATTRS.GEN_AI_RESPONSE_MODEL, model) if should_send_default_pii() and integration.include_prompts: output_messages = { "response": [], @@ -193,14 +194,14 @@ def _set_output_data( if len(output_messages["tool"]) > 0: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, output_messages["tool"], unpack=False, ) if len(output_messages["response"]) > 0: set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] + span, ATTRS.GEN_AI_RESPONSE_TEXT, output_messages["response"] ) record_token_usage( diff --git a/sentry_sdk/integrations/asyncpg.py b/sentry_sdk/integrations/asyncpg.py index b6b53f4668..95539ee00e 100644 --- a/sentry_sdk/integrations/asyncpg.py +++ b/sentry_sdk/integrations/asyncpg.py @@ -3,7 +3,7 @@ from typing import Any, TypeVar, Callable, Awaitable, Iterator import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing import Span from sentry_sdk.tracing_utils import add_query_source, record_sql_queries @@ -12,6 +12,7 @@ parse_version, capture_internal_exceptions, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: import asyncpg # type: ignore[import-not-found] @@ -166,16 +167,16 @@ async def _inner(*args: Any, **kwargs: Any) -> T: name="connect", origin=AsyncPGIntegration.origin, ) as span: - span.set_data(SPANDATA.DB_SYSTEM, "postgresql") + span.set_data(ATTRS.DB_SYSTEM, "postgresql") addr = kwargs.get("addr") if addr: try: - span.set_data(SPANDATA.SERVER_ADDRESS, addr[0]) - span.set_data(SPANDATA.SERVER_PORT, addr[1]) + span.set_data(ATTRS.SERVER_ADDRESS, addr[0]) + span.set_data(ATTRS.SERVER_PORT, addr[1]) except IndexError: pass - span.set_data(SPANDATA.DB_NAME, database) - span.set_data(SPANDATA.DB_USER, user) + span.set_data(ATTRS.DB_NAME, database) + span.set_data(ATTRS.DB_USER, user) with capture_internal_exceptions(): sentry_sdk.add_breadcrumb( @@ -189,20 +190,20 @@ async def _inner(*args: Any, **kwargs: Any) -> T: def _set_db_data(span: Span, conn: Any) -> None: - span.set_data(SPANDATA.DB_SYSTEM, "postgresql") + span.set_data(ATTRS.DB_SYSTEM, "postgresql") addr = conn._addr if addr: try: - span.set_data(SPANDATA.SERVER_ADDRESS, addr[0]) - span.set_data(SPANDATA.SERVER_PORT, addr[1]) + span.set_data(ATTRS.SERVER_ADDRESS, addr[0]) + span.set_data(ATTRS.SERVER_PORT, addr[1]) except IndexError: pass database = conn._params.database if database: - span.set_data(SPANDATA.DB_NAME, database) + span.set_data(ATTRS.DB_NAME, database) user = conn._params.user if user: - span.set_data(SPANDATA.DB_USER, user) + span.set_data(ATTRS.DB_USER, user) diff --git a/sentry_sdk/integrations/boto3.py b/sentry_sdk/integrations/boto3.py index 0207341f1b..08a12a5395 100644 --- a/sentry_sdk/integrations/boto3.py +++ b/sentry_sdk/integrations/boto3.py @@ -1,7 +1,7 @@ from functools import partial import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing import Span from sentry_sdk.utils import ( @@ -10,6 +10,7 @@ parse_url, parse_version, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -68,12 +69,12 @@ def _sentry_request_created(service_id, request, operation_name, **kwargs): with capture_internal_exceptions(): parsed_url = parse_url(request.url, sanitize=False) span.set_data("aws.request.url", parsed_url.url) - span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query) - span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment) + span.set_data(ATTRS.HTTP_QUERY, parsed_url.query) + span.set_data(ATTRS.HTTP_FRAGMENT, parsed_url.fragment) span.set_tag("aws.service_id", service_id) span.set_tag("aws.operation_name", operation_name) - span.set_data(SPANDATA.HTTP_METHOD, request.method) + span.set_data(ATTRS.HTTP_METHOD, request.method) # We do it in order for subsequent http calls/retries be # attached to this span. diff --git a/sentry_sdk/integrations/celery/__init__.py b/sentry_sdk/integrations/celery/__init__.py index b5601fc0f9..940ae187a2 100644 --- a/sentry_sdk/integrations/celery/__init__.py +++ b/sentry_sdk/integrations/celery/__init__.py @@ -5,7 +5,7 @@ import sentry_sdk from sentry_sdk import isolation_scope from sentry_sdk.api import continue_trace -from sentry_sdk.consts import OP, SPANSTATUS, SPANDATA +from sentry_sdk.consts import OP, SPANSTATUS from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.integrations.celery.beat import ( _patch_beat_apply_entry, @@ -22,6 +22,7 @@ event_from_exception, reraise, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -355,7 +356,7 @@ def _set_messaging_destination_name(task, span): if delivery_info.get("exchange") == "" and routing_key is not None: # Empty exchange indicates the default exchange, meaning the tasks # are sent to the queue with the same name as the routing key. - span.set_data(SPANDATA.MESSAGING_DESTINATION_NAME, routing_key) + span.set_data(ATTRS.MESSAGING_DESTINATION_NAME, routing_key) def _wrap_task_call(task, f): @@ -392,19 +393,19 @@ def _inner(*args, **kwargs): if latency is not None: latency *= 1000 # milliseconds - span.set_data(SPANDATA.MESSAGING_MESSAGE_RECEIVE_LATENCY, latency) + span.set_data(ATTRS.MESSAGING_MESSAGE_RECEIVE_LATENCY, latency) with capture_internal_exceptions(): - span.set_data(SPANDATA.MESSAGING_MESSAGE_ID, task.request.id) + span.set_data(ATTRS.MESSAGING_MESSAGE_ID, task.request.id) with capture_internal_exceptions(): span.set_data( - SPANDATA.MESSAGING_MESSAGE_RETRY_COUNT, task.request.retries + ATTRS.MESSAGING_MESSAGE_RETRY_COUNT, task.request.retries ) with capture_internal_exceptions(): span.set_data( - SPANDATA.MESSAGING_SYSTEM, + ATTRS.MESSAGING_SYSTEM, task.app.connection().transport.driver_type, ) @@ -509,19 +510,19 @@ def sentry_publish(self, *args, **kwargs): origin=CeleryIntegration.origin, ) as span: if task_id is not None: - span.set_data(SPANDATA.MESSAGING_MESSAGE_ID, task_id) + span.set_data(ATTRS.MESSAGING_MESSAGE_ID, task_id) if exchange == "" and routing_key is not None: # Empty exchange indicates the default exchange, meaning messages are # routed to the queue with the same name as the routing key. - span.set_data(SPANDATA.MESSAGING_DESTINATION_NAME, routing_key) + span.set_data(ATTRS.MESSAGING_DESTINATION_NAME, routing_key) if retries is not None: - span.set_data(SPANDATA.MESSAGING_MESSAGE_RETRY_COUNT, retries) + span.set_data(ATTRS.MESSAGING_MESSAGE_RETRY_COUNT, retries) with capture_internal_exceptions(): span.set_data( - SPANDATA.MESSAGING_SYSTEM, self.connection.transport.driver_type + ATTRS.MESSAGING_SYSTEM, self.connection.transport.driver_type ) return original_publish(self, *args, **kwargs) diff --git a/sentry_sdk/integrations/clickhouse_driver.py b/sentry_sdk/integrations/clickhouse_driver.py index bbaaaeec8e..17b36f7882 100644 --- a/sentry_sdk/integrations/clickhouse_driver.py +++ b/sentry_sdk/integrations/clickhouse_driver.py @@ -1,9 +1,10 @@ import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing import Span from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import capture_internal_exceptions, ensure_integration_enabled +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING, TypeVar @@ -170,8 +171,8 @@ def wrapped_generator() -> "Iterator[Any]": def _set_db_data( span: Span, connection: clickhouse_driver.connection.Connection ) -> None: - span.set_data(SPANDATA.DB_SYSTEM, "clickhouse") - span.set_data(SPANDATA.SERVER_ADDRESS, connection.host) - span.set_data(SPANDATA.SERVER_PORT, connection.port) - span.set_data(SPANDATA.DB_NAME, connection.database) - span.set_data(SPANDATA.DB_USER, connection.user) + span.set_data(ATTRS.DB_SYSTEM, "clickhouse") + span.set_data(ATTRS.SERVER_ADDRESS, connection.host) + span.set_data(ATTRS.SERVER_PORT, connection.port) + span.set_data(ATTRS.DB_NAME, connection.database) + span.set_data(ATTRS.DB_USER, connection.user) diff --git a/sentry_sdk/integrations/cohere.py b/sentry_sdk/integrations/cohere.py index 57ffdb908a..91cc2e23b9 100644 --- a/sentry_sdk/integrations/cohere.py +++ b/sentry_sdk/integrations/cohere.py @@ -2,8 +2,8 @@ from sentry_sdk import consts from sentry_sdk.ai.monitoring import record_token_usage -from sentry_sdk.consts import SPANDATA from sentry_sdk.ai.utils import set_data_normalized +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -37,32 +37,32 @@ COLLECTED_CHAT_PARAMS = { - "model": SPANDATA.AI_MODEL_ID, - "k": SPANDATA.AI_TOP_K, - "p": SPANDATA.AI_TOP_P, - "seed": SPANDATA.AI_SEED, - "frequency_penalty": SPANDATA.AI_FREQUENCY_PENALTY, - "presence_penalty": SPANDATA.AI_PRESENCE_PENALTY, - "raw_prompting": SPANDATA.AI_RAW_PROMPTING, + "model": ATTRS.AI_MODEL_ID, + "k": ATTRS.AI_TOP_K, + "p": ATTRS.AI_TOP_P, + "seed": ATTRS.AI_SEED, + "frequency_penalty": ATTRS.AI_FREQUENCY_PENALTY, + "presence_penalty": ATTRS.AI_PRESENCE_PENALTY, + "raw_prompting": ATTRS.AI_RAW_PROMPTING, } COLLECTED_PII_CHAT_PARAMS = { - "tools": SPANDATA.AI_TOOLS, - "preamble": SPANDATA.AI_PREAMBLE, + "tools": ATTRS.AI_TOOLS, + "preamble": ATTRS.AI_PREAMBLE, } COLLECTED_CHAT_RESP_ATTRS = { - "generation_id": SPANDATA.AI_GENERATION_ID, - "is_search_required": SPANDATA.AI_SEARCH_REQUIRED, - "finish_reason": SPANDATA.AI_FINISH_REASON, + "generation_id": ATTRS.AI_GENERATION_ID, + "is_search_required": ATTRS.AI_SEARCH_REQUIRED, + "finish_reason": ATTRS.AI_FINISH_REASON, } COLLECTED_PII_CHAT_RESP_ATTRS = { - "citations": SPANDATA.AI_CITATIONS, - "documents": SPANDATA.AI_DOCUMENTS, - "search_queries": SPANDATA.AI_SEARCH_QUERIES, - "search_results": SPANDATA.AI_SEARCH_RESULTS, - "tool_calls": SPANDATA.AI_TOOL_CALLS, + "citations": ATTRS.AI_CITATIONS, + "documents": ATTRS.AI_DOCUMENTS, + "search_queries": ATTRS.AI_SEARCH_QUERIES, + "search_results": ATTRS.AI_SEARCH_RESULTS, + "tool_calls": ATTRS.AI_TOOL_CALLS, } @@ -101,7 +101,7 @@ def collect_chat_response_fields(span, res, include_pii): if hasattr(res, "text"): set_data_normalized( span, - SPANDATA.AI_RESPONSES, + ATTRS.AI_RESPONSES, [res.text], ) for pii_attr in COLLECTED_PII_CHAT_RESP_ATTRS: @@ -127,7 +127,7 @@ def collect_chat_response_fields(span, res, include_pii): ) if hasattr(res.meta, "warnings"): - set_data_normalized(span, SPANDATA.AI_WARNINGS, res.meta.warnings) + set_data_normalized(span, ATTRS.AI_WARNINGS, res.meta.warnings) @wraps(f) def new_chat(*args, **kwargs): @@ -160,7 +160,7 @@ def new_chat(*args, **kwargs): if should_send_default_pii() and integration.include_prompts: set_data_normalized( span, - SPANDATA.AI_INPUT_MESSAGES, + ATTRS.AI_INPUT_MESSAGES, list( map( lambda x: { @@ -179,7 +179,7 @@ def new_chat(*args, **kwargs): for k, v in COLLECTED_CHAT_PARAMS.items(): if k in kwargs: set_data_normalized(span, v, kwargs[k]) - set_data_normalized(span, SPANDATA.AI_STREAMING, False) + set_data_normalized(span, ATTRS.AI_STREAMING, False) if streaming: old_iterator = res @@ -238,18 +238,16 @@ def new_embed(*args, **kwargs): should_send_default_pii() and integration.include_prompts ): if isinstance(kwargs["texts"], str): - set_data_normalized(span, SPANDATA.AI_TEXTS, [kwargs["texts"]]) + set_data_normalized(span, ATTRS.AI_TEXTS, [kwargs["texts"]]) elif ( isinstance(kwargs["texts"], list) and len(kwargs["texts"]) > 0 and isinstance(kwargs["texts"][0], str) ): - set_data_normalized( - span, SPANDATA.AI_INPUT_MESSAGES, kwargs["texts"] - ) + set_data_normalized(span, ATTRS.AI_INPUT_MESSAGES, kwargs["texts"]) if "model" in kwargs: - set_data_normalized(span, SPANDATA.AI_MODEL_ID, kwargs["model"]) + set_data_normalized(span, ATTRS.AI_MODEL_ID, kwargs["model"]) try: res = f(*args, **kwargs) except Exception as e: diff --git a/sentry_sdk/integrations/django/__init__.py b/sentry_sdk/integrations/django/__init__.py index 2041598fa0..a7f08c8726 100644 --- a/sentry_sdk/integrations/django/__init__.py +++ b/sentry_sdk/integrations/django/__init__.py @@ -5,7 +5,7 @@ from importlib import import_module import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.scope import add_global_event_processor, should_send_default_pii from sentry_sdk.serializer import add_global_repr_processor, add_repr_sequence_type from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource @@ -29,6 +29,7 @@ DEFAULT_HTTP_METHODS_TO_CAPTURE, RequestExtractor, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: from django import VERSION as DJANGO_VERSION @@ -700,7 +701,7 @@ def _set_db_data(span, cursor_or_db): # type: (Span, Any) -> None db = cursor_or_db.db if hasattr(cursor_or_db, "db") else cursor_or_db vendor = db.vendor - span.set_data(SPANDATA.DB_SYSTEM, vendor) + span.set_data(ATTRS.DB_SYSTEM, vendor) # Some custom backends override `__getattr__`, making it look like `cursor_or_db` # actually has a `connection` and the `connection` has a `get_dsn_parameters` @@ -733,19 +734,19 @@ def _set_db_data(span, cursor_or_db): db_name = connection_params.get("dbname") or connection_params.get("database") if db_name is not None: - span.set_data(SPANDATA.DB_NAME, db_name) + span.set_data(ATTRS.DB_NAME, db_name) server_address = connection_params.get("host") if server_address is not None: - span.set_data(SPANDATA.SERVER_ADDRESS, server_address) + span.set_data(ATTRS.SERVER_ADDRESS, server_address) server_port = connection_params.get("port") if server_port is not None: - span.set_data(SPANDATA.SERVER_PORT, str(server_port)) + span.set_data(ATTRS.SERVER_PORT, str(server_port)) server_socket_address = connection_params.get("unix_socket") if server_socket_address is not None: - span.set_data(SPANDATA.SERVER_SOCKET_ADDRESS, server_socket_address) + span.set_data(ATTRS.SERVER_SOCKET_ADDRESS, server_socket_address) def add_template_context_repr_sequence(): diff --git a/sentry_sdk/integrations/django/caching.py b/sentry_sdk/integrations/django/caching.py index 7985611761..d42b8e8c78 100644 --- a/sentry_sdk/integrations/django/caching.py +++ b/sentry_sdk/integrations/django/caching.py @@ -7,12 +7,12 @@ from django.core.cache import CacheHandler import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.utils import ( capture_internal_exceptions, ensure_integration_enabled, ) - +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS if TYPE_CHECKING: from typing import Any @@ -59,22 +59,22 @@ def _instrument_call( with capture_internal_exceptions(): if address is not None: - span.set_data(SPANDATA.NETWORK_PEER_ADDRESS, address) + span.set_data(ATTRS.NETWORK_PEER_ADDRESS, address) if port is not None: - span.set_data(SPANDATA.NETWORK_PEER_PORT, port) + span.set_data(ATTRS.NETWORK_PEER_PORT, port) key = _get_safe_key(method_name, args, kwargs) if key is not None: - span.set_data(SPANDATA.CACHE_KEY, key) + span.set_data(ATTRS.CACHE_KEY, key) item_size = None if is_get_operation: if value: item_size = len(str(value)) - span.set_data(SPANDATA.CACHE_HIT, True) + span.set_data(ATTRS.CACHE_HIT, True) else: - span.set_data(SPANDATA.CACHE_HIT, False) + span.set_data(ATTRS.CACHE_HIT, False) else: # TODO: We don't handle `get_or_set` which we should arg_count = len(args) if arg_count >= 2: @@ -85,7 +85,7 @@ def _instrument_call( item_size = len(str(args[0])) if item_size is not None: - span.set_data(SPANDATA.CACHE_ITEM_SIZE, item_size) + span.set_data(ATTRS.CACHE_ITEM_SIZE, item_size) return value diff --git a/sentry_sdk/integrations/httpx.py b/sentry_sdk/integrations/httpx.py index 2ddd44489f..d5ef96cd63 100644 --- a/sentry_sdk/integrations/httpx.py +++ b/sentry_sdk/integrations/httpx.py @@ -1,5 +1,5 @@ import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import Integration, DidNotEnable from sentry_sdk.tracing import BAGGAGE_HEADER_NAME from sentry_sdk.tracing_utils import Baggage, should_propagate_trace @@ -10,6 +10,7 @@ logger, parse_url, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -61,11 +62,11 @@ def send(self, request, **kwargs): ), origin=HttpxIntegration.origin, ) as span: - span.set_data(SPANDATA.HTTP_METHOD, request.method) + span.set_data(ATTRS.HTTP_METHOD, request.method) if parsed_url is not None: span.set_data("url", parsed_url.url) - span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query) - span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment) + span.set_data(ATTRS.HTTP_QUERY, parsed_url.query) + span.set_data(ATTRS.HTTP_FRAGMENT, parsed_url.fragment) if should_propagate_trace(sentry_sdk.get_client(), str(request.url)): for ( @@ -115,11 +116,11 @@ async def send(self, request, **kwargs): ), origin=HttpxIntegration.origin, ) as span: - span.set_data(SPANDATA.HTTP_METHOD, request.method) + span.set_data(ATTRS.HTTP_METHOD, request.method) if parsed_url is not None: span.set_data("url", parsed_url.url) - span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query) - span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment) + span.set_data(ATTRS.HTTP_QUERY, parsed_url.query) + span.set_data(ATTRS.HTTP_FRAGMENT, parsed_url.fragment) if should_propagate_trace(sentry_sdk.get_client(), str(request.url)): for ( diff --git a/sentry_sdk/integrations/huggingface_hub.py b/sentry_sdk/integrations/huggingface_hub.py index cb76ccf507..77335d13f8 100644 --- a/sentry_sdk/integrations/huggingface_hub.py +++ b/sentry_sdk/integrations/huggingface_hub.py @@ -4,13 +4,14 @@ import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( capture_internal_exceptions, event_from_exception, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -93,26 +94,26 @@ def new_huggingface_task(*args, **kwargs): ) span.__enter__() - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, operation_name) + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, operation_name) if model: - span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model) + span.set_data(ATTRS.GEN_AI_REQUEST_MODEL, model) # Input attributes if should_send_default_pii() and integration.include_prompts: set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompt, unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, prompt, unpack=False ) attribute_mapping = { - "tools": SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, - "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, - "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, - "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, - "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, - "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, - "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, - "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING, + "tools": ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, + "frequency_penalty": ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "max_tokens": ATTRS.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, + "temperature": ATTRS.GEN_AI_REQUEST_TEMPERATURE, + "top_p": ATTRS.GEN_AI_REQUEST_TOP_P, + "top_k": ATTRS.GEN_AI_REQUEST_TOP_K, + "stream": ATTRS.GEN_AI_RESPONSE_STREAMING, } for attribute, span_attribute in attribute_mapping.items(): @@ -180,12 +181,12 @@ def new_huggingface_task(*args, **kwargs): response_text_buffer.append(choice.message.content) if response_model is not None: - span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model) + span.set_data(ATTRS.GEN_AI_RESPONSE_MODEL, response_model) if finish_reason is not None: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + ATTRS.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason, ) @@ -193,7 +194,7 @@ def new_huggingface_task(*args, **kwargs): if tool_calls is not None and len(tool_calls) > 0: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, tool_calls, unpack=False, ) @@ -203,7 +204,7 @@ def new_huggingface_task(*args, **kwargs): if text_response: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TEXT, + ATTRS.GEN_AI_RESPONSE_TEXT, text_response, ) @@ -260,7 +261,7 @@ def new_details_iterator(): if finish_reason is not None: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + ATTRS.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason, ) @@ -270,7 +271,7 @@ def new_details_iterator(): if text_response: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TEXT, + ATTRS.GEN_AI_RESPONSE_TEXT, text_response, ) @@ -333,14 +334,12 @@ def new_iterator(): yield chunk if response_model is not None: - span.set_data( - SPANDATA.GEN_AI_RESPONSE_MODEL, response_model - ) + span.set_data(ATTRS.GEN_AI_RESPONSE_MODEL, response_model) if finish_reason is not None: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, + ATTRS.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason, ) @@ -348,7 +347,7 @@ def new_iterator(): if tool_calls is not None and len(tool_calls) > 0: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, tool_calls, unpack=False, ) @@ -358,7 +357,7 @@ def new_iterator(): if text_response: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TEXT, + ATTRS.GEN_AI_RESPONSE_TEXT, text_response, ) diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index 1401be06e1..52dfcb297f 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -5,12 +5,13 @@ import sentry_sdk from sentry_sdk.ai.monitoring import set_ai_pipeline_name from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing import Span from sentry_sdk.tracing_utils import _get_value from sentry_sdk.utils import logger, capture_internal_exceptions +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -49,14 +50,14 @@ AgentExecutor = None DATA_FIELDS = { - "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, - "function_call": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, - "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, - "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, - "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, - "tool_calls": SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, - "top_k": SPANDATA.GEN_AI_REQUEST_TOP_K, - "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, + "frequency_penalty": ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "function_call": ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, + "max_tokens": ATTRS.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, + "temperature": ATTRS.GEN_AI_REQUEST_TEMPERATURE, + "tool_calls": ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, + "top_k": ATTRS.GEN_AI_REQUEST_TOP_K, + "top_p": ATTRS.GEN_AI_REQUEST_TOP_P, } @@ -192,15 +193,15 @@ def on_llm_start( if model: span.set_data( - SPANDATA.GEN_AI_REQUEST_MODEL, + ATTRS.GEN_AI_REQUEST_MODEL, model, ) ai_type = all_params.get("_type", "") if "anthropic" in ai_type: - span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic") + span.set_data(ATTRS.GEN_AI_SYSTEM, "anthropic") elif "openai" in ai_type: - span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai") + span.set_data(ATTRS.GEN_AI_SYSTEM, "openai") for key, attribute in DATA_FIELDS.items(): if key in all_params and all_params[key] is not None: @@ -210,7 +211,7 @@ def on_llm_start( if should_send_default_pii() and self.include_prompts: set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, prompts, unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, prompts, unpack=False ) def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): @@ -239,15 +240,15 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): ) span = watched_span.span - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "chat") if model: - span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model) + span.set_data(ATTRS.GEN_AI_REQUEST_MODEL, model) ai_type = all_params.get("_type", "") if "anthropic" in ai_type: - span.set_data(SPANDATA.GEN_AI_SYSTEM, "anthropic") + span.set_data(ATTRS.GEN_AI_SYSTEM, "anthropic") elif "openai" in ai_type: - span.set_data(SPANDATA.GEN_AI_SYSTEM, "openai") + span.set_data(ATTRS.GEN_AI_SYSTEM, "openai") for key, attribute in DATA_FIELDS.items(): if key in all_params and all_params[key] is not None: @@ -264,7 +265,7 @@ def on_chat_model_start(self, serialized, messages, *, run_id, **kwargs): ) set_data_normalized( span, - SPANDATA.GEN_AI_REQUEST_MESSAGES, + ATTRS.GEN_AI_REQUEST_MESSAGES, normalized_messages, unpack=False, ) @@ -282,7 +283,7 @@ def on_chat_model_end(self, response, *, run_id, **kwargs): if should_send_default_pii() and self.include_prompts: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TEXT, + ATTRS.GEN_AI_RESPONSE_TEXT, [[x.text for x in list_] for list_ in response.generations], ) @@ -308,7 +309,7 @@ def on_llm_end(self, response, *, run_id, **kwargs): try: response_model = generation.generation_info.get("model_name") if response_model is not None: - span.set_data(SPANDATA.GEN_AI_RESPONSE_MODEL, response_model) + span.set_data(ATTRS.GEN_AI_RESPONSE_MODEL, response_model) except AttributeError: pass @@ -316,7 +317,7 @@ def on_llm_end(self, response, *, run_id, **kwargs): finish_reason = generation.generation_info.get("finish_reason") if finish_reason is not None: span.set_data( - SPANDATA.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason + ATTRS.GEN_AI_RESPONSE_FINISH_REASONS, finish_reason ) except AttributeError: pass @@ -326,7 +327,7 @@ def on_llm_end(self, response, *, run_id, **kwargs): if tool_calls is not None and tool_calls != []: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, tool_calls, unpack=False, ) @@ -336,7 +337,7 @@ def on_llm_end(self, response, *, run_id, **kwargs): if should_send_default_pii() and self.include_prompts: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TEXT, + ATTRS.GEN_AI_RESPONSE_TEXT, [[x.text for x in list_] for list_ in response.generations], ) @@ -364,7 +365,7 @@ def on_agent_finish(self, finish, *, run_id, **kwargs): if should_send_default_pii() and self.include_prompts: set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, finish.return_values.items() + span, ATTRS.GEN_AI_RESPONSE_TEXT, finish.return_values.items() ) self._exit_span(span_data, run_id) @@ -387,17 +388,17 @@ def on_tool_start(self, serialized, input_str, *, run_id, **kwargs): ) span = watched_span.span - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool") - span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool_name) + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "execute_tool") + span.set_data(ATTRS.GEN_AI_TOOL_NAME, tool_name) tool_description = serialized.get("description") if tool_description is not None: - span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool_description) + span.set_data(ATTRS.GEN_AI_TOOL_DESCRIPTION, tool_description) if should_send_default_pii() and self.include_prompts: set_data_normalized( span, - SPANDATA.GEN_AI_TOOL_INPUT, + ATTRS.GEN_AI_TOOL_INPUT, kwargs.get("inputs", [input_str]), ) @@ -412,7 +413,7 @@ def on_tool_end(self, output, *, run_id, **kwargs): span = span_data.span if should_send_default_pii() and self.include_prompts: - set_data_normalized(span, SPANDATA.GEN_AI_TOOL_OUTPUT, output) + set_data_normalized(span, ATTRS.GEN_AI_TOOL_OUTPUT, output) self._exit_span(span_data, run_id) @@ -503,13 +504,13 @@ def _record_token_usage(span, response): ) if input_tokens is not None: - span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, input_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_INPUT_TOKENS, input_tokens) if output_tokens is not None: - span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS, output_tokens) if total_tokens is not None: - span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_TOTAL_TOKENS, total_tokens) def _get_request_data(obj, args, kwargs): @@ -615,7 +616,7 @@ def _set_tools_on_span(span, tools): if simplified_tools: set_data_normalized( span, - SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, + ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, simplified_tools, unpack=False, ) @@ -724,10 +725,10 @@ def new_invoke(self, *args, **kwargs): origin=LangchainIntegration.origin, ) as span: if agent_name: - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + span.set_data(ATTRS.GEN_AI_AGENT_NAME, agent_name) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") - span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, False) + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(ATTRS.GEN_AI_RESPONSE_STREAMING, False) _set_tools_on_span(span, tools) @@ -741,7 +742,7 @@ def new_invoke(self, *args, **kwargs): and integration.include_prompts ): set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, [input], unpack=False ) output = result.get("output") @@ -750,7 +751,7 @@ def new_invoke(self, *args, **kwargs): and should_send_default_pii() and integration.include_prompts ): - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, output) return result @@ -778,10 +779,10 @@ def new_stream(self, *args, **kwargs): span.__enter__() if agent_name: - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent_name) + span.set_data(ATTRS.GEN_AI_AGENT_NAME, agent_name) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") - span.set_data(SPANDATA.GEN_AI_RESPONSE_STREAMING, True) + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(ATTRS.GEN_AI_RESPONSE_STREAMING, True) _set_tools_on_span(span, tools) @@ -792,7 +793,7 @@ def new_stream(self, *args, **kwargs): and integration.include_prompts ): set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, [input], unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, [input], unpack=False ) # Run the agent @@ -815,7 +816,7 @@ def new_iterator(): and should_send_default_pii() and integration.include_prompts ): - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, output) span.__exit__(None, None, None) @@ -834,7 +835,7 @@ async def new_iterator_async(): and should_send_default_pii() and integration.include_prompts ): - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, output) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, output) span.__exit__(None, None, None) diff --git a/sentry_sdk/integrations/langgraph.py b/sentry_sdk/integrations/langgraph.py index df3941bb13..0b10145fdf 100644 --- a/sentry_sdk/integrations/langgraph.py +++ b/sentry_sdk/integrations/langgraph.py @@ -3,10 +3,11 @@ import sentry_sdk from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import safe_serialize +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: @@ -114,8 +115,8 @@ def new_compile(self, *args, **kwargs): compiled_graph = f(self, *args, **kwargs) compiled_graph_name = getattr(compiled_graph, "name", None) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "create_agent") - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, compiled_graph_name) + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "create_agent") + span.set_data(ATTRS.GEN_AI_AGENT_NAME, compiled_graph_name) if compiled_graph_name: span.description = f"create_agent {compiled_graph_name}" @@ -123,7 +124,7 @@ def new_compile(self, *args, **kwargs): span.description = "create_agent" if kwargs.get("model", None) is not None: - span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, kwargs.get("model")) + span.set_data(ATTRS.GEN_AI_REQUEST_MODEL, kwargs.get("model")) tools = None get_graph = getattr(compiled_graph, "get_graph", None) @@ -138,7 +139,7 @@ def new_compile(self, *args, **kwargs): tools = list(data.tools_by_name.keys()) if tools is not None: - span.set_data(SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools) + span.set_data(ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, tools) return compiled_graph @@ -166,10 +167,10 @@ def new_invoke(self, *args, **kwargs): origin=LanggraphIntegration.origin, ) as span: if graph_name: - span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name) - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name) + span.set_data(ATTRS.GEN_AI_PIPELINE_NAME, graph_name) + span.set_data(ATTRS.GEN_AI_AGENT_NAME, graph_name) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "invoke_agent") # Store input messages to later compare with output input_messages = None @@ -182,7 +183,7 @@ def new_invoke(self, *args, **kwargs): if input_messages: set_data_normalized( span, - SPANDATA.GEN_AI_REQUEST_MESSAGES, + ATTRS.GEN_AI_REQUEST_MESSAGES, input_messages, unpack=False, ) @@ -217,10 +218,10 @@ async def new_ainvoke(self, *args, **kwargs): origin=LanggraphIntegration.origin, ) as span: if graph_name: - span.set_data(SPANDATA.GEN_AI_PIPELINE_NAME, graph_name) - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, graph_name) + span.set_data(ATTRS.GEN_AI_PIPELINE_NAME, graph_name) + span.set_data(ATTRS.GEN_AI_AGENT_NAME, graph_name) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "invoke_agent") input_messages = None if ( @@ -232,7 +233,7 @@ async def new_ainvoke(self, *args, **kwargs): if input_messages: set_data_normalized( span, - SPANDATA.GEN_AI_REQUEST_MESSAGES, + ATTRS.GEN_AI_REQUEST_MESSAGES, input_messages, unpack=False, ) @@ -305,17 +306,17 @@ def _set_response_attributes(span, input_messages, result, integration): llm_response_text = _extract_llm_response_text(new_messages) if llm_response_text: - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, llm_response_text) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, llm_response_text) elif new_messages: - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, new_messages) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, new_messages) else: - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, result) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, result) tool_calls = _extract_tool_calls(new_messages) if tool_calls: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(tool_calls), unpack=False, ) diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index 467116c8f4..11926bae00 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -4,7 +4,6 @@ from sentry_sdk import consts from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( @@ -12,6 +11,7 @@ event_from_exception, safe_serialize, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -179,23 +179,21 @@ def _set_input_data(span, kwargs, operation, integration): and should_send_default_pii() and integration.include_prompts ): - set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False - ) + set_data_normalized(span, ATTRS.GEN_AI_REQUEST_MESSAGES, messages, unpack=False) # Input attributes: Common - set_data_normalized(span, SPANDATA.GEN_AI_SYSTEM, "openai") - set_data_normalized(span, SPANDATA.GEN_AI_OPERATION_NAME, operation) + set_data_normalized(span, ATTRS.GEN_AI_SYSTEM, "openai") + set_data_normalized(span, ATTRS.GEN_AI_OPERATION_NAME, operation) # Input attributes: Optional kwargs_keys_to_attributes = { - "model": SPANDATA.GEN_AI_REQUEST_MODEL, - "stream": SPANDATA.GEN_AI_RESPONSE_STREAMING, - "max_tokens": SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, - "presence_penalty": SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, - "frequency_penalty": SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, - "temperature": SPANDATA.GEN_AI_REQUEST_TEMPERATURE, - "top_p": SPANDATA.GEN_AI_REQUEST_TOP_P, + "model": ATTRS.GEN_AI_REQUEST_MODEL, + "stream": ATTRS.GEN_AI_RESPONSE_STREAMING, + "max_tokens": ATTRS.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, + "frequency_penalty": ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "temperature": ATTRS.GEN_AI_REQUEST_TEMPERATURE, + "top_p": ATTRS.GEN_AI_REQUEST_TOP_P, } for key, attribute in kwargs_keys_to_attributes.items(): value = kwargs.get(key) @@ -207,14 +205,14 @@ def _set_input_data(span, kwargs, operation, integration): tools = kwargs.get("tools") if tools is not NOT_GIVEN and tools is not None and len(tools) > 0: set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) + span, ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) ) def _set_output_data(span, response, kwargs, integration, finish_span=True): # type: (Span, Any, dict[str, Any], OpenAIIntegration, bool) -> None if hasattr(response, "model"): - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_MODEL, response.model) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_MODEL, response.model) # Input messages (the prompt or data sent to the model) # used for the token usage calculation @@ -229,7 +227,7 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True): if should_send_default_pii() and integration.include_prompts: response_text = [choice.message.dict() for choice in response.choices] if len(response_text) > 0: - set_data_normalized(span, SPANDATA.GEN_AI_RESPONSE_TEXT, response_text) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, response_text) _calculate_token_usage(messages, response, span, None, integration.count_tokens) @@ -257,14 +255,14 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True): if len(output_messages["tool"]) > 0: set_data_normalized( span, - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, output_messages["tool"], unpack=False, ) if len(output_messages["response"]) > 0: set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] + span, ATTRS.GEN_AI_RESPONSE_TEXT, output_messages["response"] ) _calculate_token_usage(messages, response, span, None, integration.count_tokens) @@ -318,7 +316,7 @@ def new_iterator(): all_responses = ["".join(chunk) for chunk in data_buf] if should_send_default_pii() and integration.include_prompts: set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses + span, ATTRS.GEN_AI_RESPONSE_TEXT, all_responses ) if count_tokens_manually: _calculate_token_usage( @@ -373,7 +371,7 @@ async def new_iterator_async(): all_responses = ["".join(chunk) for chunk in data_buf] if should_send_default_pii() and integration.include_prompts: set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, all_responses + span, ATTRS.GEN_AI_RESPONSE_TEXT, all_responses ) if count_tokens_manually: _calculate_token_usage( diff --git a/sentry_sdk/integrations/openai_agents/spans/ai_client.py b/sentry_sdk/integrations/openai_agents/spans/ai_client.py index d325ae86e3..d3e500e51d 100644 --- a/sentry_sdk/integrations/openai_agents/spans/ai_client.py +++ b/sentry_sdk/integrations/openai_agents/spans/ai_client.py @@ -1,5 +1,6 @@ import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from ..consts import SPAN_ORIGIN from ..utils import ( @@ -26,7 +27,7 @@ def ai_client_span(agent, get_response_kwargs): origin=SPAN_ORIGIN, ) # TODO-anton: remove hardcoded stuff and replace something that also works for embedding and so on - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "chat") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "chat") return span diff --git a/sentry_sdk/integrations/openai_agents/spans/execute_tool.py b/sentry_sdk/integrations/openai_agents/spans/execute_tool.py index 5f9e4cb340..4034210a6f 100644 --- a/sentry_sdk/integrations/openai_agents/spans/execute_tool.py +++ b/sentry_sdk/integrations/openai_agents/spans/execute_tool.py @@ -1,6 +1,7 @@ import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA, SPANSTATUS +from sentry_sdk.consts import OP, SPANSTATUS from sentry_sdk.scope import should_send_default_pii +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from ..consts import SPAN_ORIGIN from ..utils import _set_agent_data @@ -20,17 +21,17 @@ def execute_tool_span(tool, *args, **kwargs): origin=SPAN_ORIGIN, ) - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "execute_tool") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "execute_tool") if tool.__class__.__name__ == "FunctionTool": - span.set_data(SPANDATA.GEN_AI_TOOL_TYPE, "function") + span.set_data(ATTRS.GEN_AI_TOOL_TYPE, "function") - span.set_data(SPANDATA.GEN_AI_TOOL_NAME, tool.name) - span.set_data(SPANDATA.GEN_AI_TOOL_DESCRIPTION, tool.description) + span.set_data(ATTRS.GEN_AI_TOOL_NAME, tool.name) + span.set_data(ATTRS.GEN_AI_TOOL_DESCRIPTION, tool.description) if should_send_default_pii(): input = args[1] - span.set_data(SPANDATA.GEN_AI_TOOL_INPUT, input) + span.set_data(ATTRS.GEN_AI_TOOL_INPUT, input) return span @@ -45,4 +46,4 @@ def update_execute_tool_span(span, agent, tool, result): span.set_status(SPANSTATUS.INTERNAL_ERROR) if should_send_default_pii(): - span.set_data(SPANDATA.GEN_AI_TOOL_OUTPUT, result) + span.set_data(ATTRS.GEN_AI_TOOL_OUTPUT, result) diff --git a/sentry_sdk/integrations/openai_agents/spans/handoff.py b/sentry_sdk/integrations/openai_agents/spans/handoff.py index 78e6788c7d..6e172c5594 100644 --- a/sentry_sdk/integrations/openai_agents/spans/handoff.py +++ b/sentry_sdk/integrations/openai_agents/spans/handoff.py @@ -1,5 +1,6 @@ import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from ..consts import SPAN_ORIGIN @@ -16,4 +17,4 @@ def handoff_span(context, from_agent, to_agent_name): name=f"handoff from {from_agent.name} to {to_agent_name}", origin=SPAN_ORIGIN, ) as span: - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "handoff") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "handoff") diff --git a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py index cf06120625..9496be9bf4 100644 --- a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +++ b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py @@ -1,8 +1,9 @@ import sentry_sdk from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import safe_serialize +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from ..consts import SPAN_ORIGIN from ..utils import _set_agent_data @@ -24,7 +25,7 @@ def invoke_agent_span(context, agent, kwargs): ) span.__enter__() - span.set_data(SPANDATA.GEN_AI_OPERATION_NAME, "invoke_agent") + span.set_data(ATTRS.GEN_AI_OPERATION_NAME, "invoke_agent") if should_send_default_pii(): messages = [] @@ -57,7 +58,7 @@ def invoke_agent_span(context, agent, kwargs): if len(messages) > 0: set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, messages, unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, messages, unpack=False ) _set_agent_data(span, agent) @@ -71,8 +72,6 @@ def update_invoke_agent_span(context, agent, output): if span: if should_send_default_pii(): - set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, output, unpack=False - ) + set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, output, unpack=False) span.__exit__(None, None, None) diff --git a/sentry_sdk/integrations/openai_agents/utils.py b/sentry_sdk/integrations/openai_agents/utils.py index a0487e0e3a..b0b26db4df 100644 --- a/sentry_sdk/integrations/openai_agents/utils.py +++ b/sentry_sdk/integrations/openai_agents/utils.py @@ -1,9 +1,9 @@ import sentry_sdk from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations import DidNotEnable from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import event_from_exception, safe_serialize +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -31,60 +31,58 @@ def _capture_exception(exc): def _set_agent_data(span, agent): # type: (sentry_sdk.tracing.Span, agents.Agent) -> None span.set_data( - SPANDATA.GEN_AI_SYSTEM, "openai" + ATTRS.GEN_AI_SYSTEM, "openai" ) # See footnote for https://opentelemetry.io/docs/specs/semconv/registry/attributes/gen-ai/#gen-ai-system for explanation why. - span.set_data(SPANDATA.GEN_AI_AGENT_NAME, agent.name) + span.set_data(ATTRS.GEN_AI_AGENT_NAME, agent.name) if agent.model_settings.max_tokens: - span.set_data( - SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, agent.model_settings.max_tokens - ) + span.set_data(ATTRS.GEN_AI_REQUEST_MAX_TOKENS, agent.model_settings.max_tokens) if agent.model: model_name = agent.model.model if hasattr(agent.model, "model") else agent.model - span.set_data(SPANDATA.GEN_AI_REQUEST_MODEL, model_name) + span.set_data(ATTRS.GEN_AI_REQUEST_MODEL, model_name) if agent.model_settings.presence_penalty: span.set_data( - SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, + ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, agent.model_settings.presence_penalty, ) if agent.model_settings.temperature: span.set_data( - SPANDATA.GEN_AI_REQUEST_TEMPERATURE, agent.model_settings.temperature + ATTRS.GEN_AI_REQUEST_TEMPERATURE, agent.model_settings.temperature ) if agent.model_settings.top_p: - span.set_data(SPANDATA.GEN_AI_REQUEST_TOP_P, agent.model_settings.top_p) + span.set_data(ATTRS.GEN_AI_REQUEST_TOP_P, agent.model_settings.top_p) if agent.model_settings.frequency_penalty: span.set_data( - SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, + ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, agent.model_settings.frequency_penalty, ) if len(agent.tools) > 0: span.set_data( - SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS, + ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize([vars(tool) for tool in agent.tools]), ) def _set_usage_data(span, usage): # type: (sentry_sdk.tracing.Span, Usage) -> None - span.set_data(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_INPUT_TOKENS, usage.input_tokens) span.set_data( - SPANDATA.GEN_AI_USAGE_INPUT_TOKENS_CACHED, + ATTRS.GEN_AI_USAGE_INPUT_TOKENS_CACHED, usage.input_tokens_details.cached_tokens, ) - span.set_data(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS, usage.output_tokens) span.set_data( - SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, + ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS_REASONING, usage.output_tokens_details.reasoning_tokens, ) - span.set_data(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) + span.set_data(ATTRS.GEN_AI_USAGE_TOTAL_TOKENS, usage.total_tokens) def _set_input_data(span, get_response_kwargs): @@ -119,7 +117,7 @@ def _set_input_data(span, get_response_kwargs): request_messages.append({"role": role, "content": messages}) set_data_normalized( - span, SPANDATA.GEN_AI_REQUEST_MESSAGES, request_messages, unpack=False + span, ATTRS.GEN_AI_REQUEST_MESSAGES, request_messages, unpack=False ) @@ -146,10 +144,10 @@ def _set_output_data(span, result): if len(output_messages["tool"]) > 0: span.set_data( - SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(output_messages["tool"]) + ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, safe_serialize(output_messages["tool"]) ) if len(output_messages["response"]) > 0: set_data_normalized( - span, SPANDATA.GEN_AI_RESPONSE_TEXT, output_messages["response"] + span, ATTRS.GEN_AI_RESPONSE_TEXT, output_messages["response"] ) diff --git a/sentry_sdk/integrations/pymongo.py b/sentry_sdk/integrations/pymongo.py index f65ad73687..3fea0af22d 100644 --- a/sentry_sdk/integrations/pymongo.py +++ b/sentry_sdk/integrations/pymongo.py @@ -2,11 +2,12 @@ import json import sentry_sdk -from sentry_sdk.consts import SPANSTATUS, SPANDATA, OP +from sentry_sdk.consts import SPANSTATUS, OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing import Span from sentry_sdk.utils import capture_internal_exceptions +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: from pymongo import monitoring @@ -89,19 +90,19 @@ def _get_db_data(event): # type: (Any) -> Dict[str, Any] data = {} - data[SPANDATA.DB_SYSTEM] = "mongodb" + data[ATTRS.DB_SYSTEM] = "mongodb" db_name = event.database_name if db_name is not None: - data[SPANDATA.DB_NAME] = db_name + data[ATTRS.DB_NAME] = db_name server_address = event.connection_id[0] if server_address is not None: - data[SPANDATA.SERVER_ADDRESS] = server_address + data[ATTRS.SERVER_ADDRESS] = server_address server_port = event.connection_id[1] if server_port is not None: - data[SPANDATA.SERVER_PORT] = server_port + data[ATTRS.SERVER_PORT] = server_port return data @@ -129,9 +130,9 @@ def started(self, event): tags = { "db.name": event.database_name, - SPANDATA.DB_SYSTEM: "mongodb", - SPANDATA.DB_OPERATION: event.command_name, - SPANDATA.DB_MONGODB_COLLECTION: command.get(event.command_name), + ATTRS.DB_SYSTEM: "mongodb", + ATTRS.DB_OPERATION: event.command_name, + ATTRS.DB_MONGODB_COLLECTION: command.get(event.command_name), } try: diff --git a/sentry_sdk/integrations/redis/modules/caches.py b/sentry_sdk/integrations/redis/modules/caches.py index c6fc19f5b2..3370e48330 100644 --- a/sentry_sdk/integrations/redis/modules/caches.py +++ b/sentry_sdk/integrations/redis/modules/caches.py @@ -2,9 +2,10 @@ Code used for the Caches module in Sentry """ -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations.redis.utils import _get_safe_key, _key_as_string from sentry_sdk.utils import capture_internal_exceptions +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS GET_COMMANDS = ("get", "mget") SET_COMMANDS = ("set", "setex") @@ -78,19 +79,19 @@ def _get_cache_span_description(redis_command, args, kwargs, integration): def _set_cache_data(span, redis_client, properties, return_value): # type: (Span, Any, dict[str, Any], Optional[Any]) -> None with capture_internal_exceptions(): - span.set_data(SPANDATA.CACHE_KEY, properties["key"]) + span.set_data(ATTRS.CACHE_KEY, properties["key"]) if properties["redis_command"] in GET_COMMANDS: if return_value is not None: - span.set_data(SPANDATA.CACHE_HIT, True) + span.set_data(ATTRS.CACHE_HIT, True) size = ( len(str(return_value).encode("utf-8")) if not isinstance(return_value, bytes) else len(return_value) ) - span.set_data(SPANDATA.CACHE_ITEM_SIZE, size) + span.set_data(ATTRS.CACHE_ITEM_SIZE, size) else: - span.set_data(SPANDATA.CACHE_HIT, False) + span.set_data(ATTRS.CACHE_HIT, False) elif properties["redis_command"] in SET_COMMANDS: if properties["value"] is not None: @@ -99,7 +100,7 @@ def _set_cache_data(span, redis_client, properties, return_value): if not isinstance(properties["value"], bytes) else len(properties["value"]) ) - span.set_data(SPANDATA.CACHE_ITEM_SIZE, size) + span.set_data(ATTRS.CACHE_ITEM_SIZE, size) try: connection_params = redis_client.connection_pool.connection_kwargs @@ -114,8 +115,8 @@ def _set_cache_data(span, redis_client, properties, return_value): host = connection_params.get("host") if host is not None: - span.set_data(SPANDATA.NETWORK_PEER_ADDRESS, host) + span.set_data(ATTRS.NETWORK_PEER_ADDRESS, host) port = connection_params.get("port") if port is not None: - span.set_data(SPANDATA.NETWORK_PEER_PORT, port) + span.set_data(ATTRS.NETWORK_PEER_PORT, port) diff --git a/sentry_sdk/integrations/redis/modules/queries.py b/sentry_sdk/integrations/redis/modules/queries.py index e0d85a4ef7..b6c45807de 100644 --- a/sentry_sdk/integrations/redis/modules/queries.py +++ b/sentry_sdk/integrations/redis/modules/queries.py @@ -2,9 +2,10 @@ Code used for the Queries module in Sentry """ -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations.redis.utils import _get_safe_command from sentry_sdk.utils import capture_internal_exceptions +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -45,19 +46,19 @@ def _get_db_span_description(integration, command_name, args): def _set_db_data_on_span(span, connection_params): # type: (Span, dict[str, Any]) -> None - span.set_data(SPANDATA.DB_SYSTEM, "redis") + span.set_data(ATTRS.DB_SYSTEM, "redis") db = connection_params.get("db") if db is not None: - span.set_data(SPANDATA.DB_NAME, str(db)) + span.set_data(ATTRS.DB_NAME, str(db)) host = connection_params.get("host") if host is not None: - span.set_data(SPANDATA.SERVER_ADDRESS, host) + span.set_data(ATTRS.SERVER_ADDRESS, host) port = connection_params.get("port") if port is not None: - span.set_data(SPANDATA.SERVER_PORT, port) + span.set_data(ATTRS.SERVER_PORT, port) def _set_db_data(span, redis_instance): diff --git a/sentry_sdk/integrations/redis/utils.py b/sentry_sdk/integrations/redis/utils.py index cf230f6648..c2700951ca 100644 --- a/sentry_sdk/integrations/redis/utils.py +++ b/sentry_sdk/integrations/redis/utils.py @@ -1,4 +1,3 @@ -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.redis.consts import ( _COMMANDS_INCLUDING_SENSITIVE_DATA, _MAX_NUM_ARGS, @@ -8,6 +7,7 @@ ) from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import SENSITIVE_DATA_SUBSTITUTE +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -138,7 +138,7 @@ def _set_client_data(span, is_cluster, name, *args): span.set_tag("redis.is_cluster", is_cluster) if name: span.set_tag("redis.command", name) - span.set_tag(SPANDATA.DB_OPERATION, name) + span.set_tag(ATTRS.DB_OPERATION, name) if name and args: name_low = name.lower() diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py index 068d373053..57f3f492dd 100644 --- a/sentry_sdk/integrations/sqlalchemy.py +++ b/sentry_sdk/integrations/sqlalchemy.py @@ -1,4 +1,4 @@ -from sentry_sdk.consts import SPANSTATUS, SPANDATA +from sentry_sdk.consts import SPANSTATUS from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing_utils import add_query_source, record_sql_queries from sentry_sdk.utils import ( @@ -6,6 +6,7 @@ ensure_integration_enabled, parse_version, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: from sqlalchemy.engine import Engine # type: ignore @@ -128,19 +129,19 @@ def _set_db_data(span, conn): # type: (Span, Any) -> None db_system = _get_db_system(conn.engine.name) if db_system is not None: - span.set_data(SPANDATA.DB_SYSTEM, db_system) + span.set_data(ATTRS.DB_SYSTEM, db_system) if conn.engine.url is None: return db_name = conn.engine.url.database if db_name is not None: - span.set_data(SPANDATA.DB_NAME, db_name) + span.set_data(ATTRS.DB_NAME, db_name) server_address = conn.engine.url.host if server_address is not None: - span.set_data(SPANDATA.SERVER_ADDRESS, server_address) + span.set_data(ATTRS.SERVER_ADDRESS, server_address) server_port = conn.engine.url.port if server_port is not None: - span.set_data(SPANDATA.SERVER_PORT, server_port) + span.set_data(ATTRS.SERVER_PORT, server_port) diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py index d388c5bca6..4fac877f90 100644 --- a/sentry_sdk/integrations/stdlib.py +++ b/sentry_sdk/integrations/stdlib.py @@ -5,7 +5,7 @@ from http.client import HTTPConnection import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations import Integration from sentry_sdk.scope import add_global_event_processor from sentry_sdk.tracing_utils import EnvironHeaders, should_propagate_trace @@ -18,6 +18,7 @@ safe_repr, parse_url, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -94,11 +95,11 @@ def putrequest(self, method, url, *args, **kwargs): % (method, parsed_url.url if parsed_url else SENSITIVE_DATA_SUBSTITUTE), origin="auto.http.stdlib.httplib", ) - span.set_data(SPANDATA.HTTP_METHOD, method) + span.set_data(ATTRS.HTTP_METHOD, method) if parsed_url is not None: span.set_data("url", parsed_url.url) - span.set_data(SPANDATA.HTTP_QUERY, parsed_url.query) - span.set_data(SPANDATA.HTTP_FRAGMENT, parsed_url.fragment) + span.set_data(ATTRS.HTTP_QUERY, parsed_url.query) + span.set_data(ATTRS.HTTP_FRAGMENT, parsed_url.fragment) rv = real_putrequest(self, method, url, *args, **kwargs) diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index fc43a33dc7..2d94c9d12d 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -4,7 +4,7 @@ from enum import Enum import sentry_sdk -from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS, SPANDATA, SPANTEMPLATE +from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS, SPANTEMPLATE from sentry_sdk.profiler.continuous_profiler import get_profiler_id from sentry_sdk.utils import ( capture_internal_exceptions, @@ -14,6 +14,7 @@ nanosecond_time, should_be_treated_as_error, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -660,22 +661,22 @@ def set_thread(self, thread_id, thread_name): # type: (Optional[int], Optional[str]) -> None if thread_id is not None: - self.set_data(SPANDATA.THREAD_ID, str(thread_id)) + self.set_data(ATTRS.THREAD_ID, str(thread_id)) if thread_name is not None: - self.set_data(SPANDATA.THREAD_NAME, thread_name) + self.set_data(ATTRS.THREAD_NAME, thread_name) def set_profiler_id(self, profiler_id): # type: (Optional[str]) -> None if profiler_id is not None: - self.set_data(SPANDATA.PROFILER_ID, profiler_id) + self.set_data(ATTRS.PROFILER_ID, profiler_id) def set_http_status(self, http_status): # type: (int) -> None self.set_tag( "http.status_code", str(http_status) ) # we keep this for backwards compatibility - self.set_data(SPANDATA.HTTP_STATUS_CODE, http_status) + self.set_data(ATTRS.HTTP_STATUS_CODE, http_status) self.set_status(get_span_status_from_http_code(http_status)) def is_success(self): @@ -779,11 +780,11 @@ def get_trace_context(self): data = {} - thread_id = self._data.get(SPANDATA.THREAD_ID) + thread_id = self._data.get(ATTRS.THREAD_ID) if thread_id is not None: data["thread.id"] = thread_id - thread_name = self._data.get(SPANDATA.THREAD_NAME) + thread_name = self._data.get(ATTRS.THREAD_NAME) if thread_name is not None: data["thread.name"] = thread_name @@ -794,7 +795,7 @@ def get_trace_context(self): def get_profile_context(self): # type: () -> Optional[ProfileContext] - profiler_id = self._data.get(SPANDATA.PROFILER_ID) + profiler_id = self._data.get(ATTRS.PROFILER_ID) if profiler_id is None: return None diff --git a/sentry_sdk/tracing_utils.py b/sentry_sdk/tracing_utils.py index c1cfde293b..6dbf830aa1 100644 --- a/sentry_sdk/tracing_utils.py +++ b/sentry_sdk/tracing_utils.py @@ -11,7 +11,7 @@ import uuid import sentry_sdk -from sentry_sdk.consts import OP, SPANDATA, SPANTEMPLATE +from sentry_sdk.consts import OP, SPANTEMPLATE from sentry_sdk.utils import ( capture_internal_exceptions, filename_for_module, @@ -27,6 +27,7 @@ _is_in_project_root, _module_in_list, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -167,7 +168,7 @@ def maybe_create_breadcrumbs_from_span(scope, span): elif span.op == OP.HTTP_CLIENT: level = None - status_code = span._data.get(SPANDATA.HTTP_STATUS_CODE) + status_code = span._data.get(ATTRS.HTTP_STATUS_CODE) if status_code: if 500 <= status_code <= 599: level = "error" @@ -281,14 +282,14 @@ def add_query_source(span): except Exception: lineno = None if lineno is not None: - span.set_data(SPANDATA.CODE_LINENO, frame.f_lineno) + span.set_data(ATTRS.CODE_LINENO, frame.f_lineno) try: namespace = frame.f_globals.get("__name__") except Exception: namespace = None if namespace is not None: - span.set_data(SPANDATA.CODE_NAMESPACE, namespace) + span.set_data(ATTRS.CODE_NAMESPACE, namespace) filepath = _get_frame_module_abs_path(frame) if filepath is not None: @@ -298,7 +299,7 @@ def add_query_source(span): in_app_path = filepath.replace(project_root, "").lstrip(os.sep) else: in_app_path = filepath - span.set_data(SPANDATA.CODE_FILEPATH, in_app_path) + span.set_data(ATTRS.CODE_FILEPATH, in_app_path) try: code_function = frame.f_code.co_name @@ -306,7 +307,7 @@ def add_query_source(span): code_function = None if code_function is not None: - span.set_data(SPANDATA.CODE_FUNCTION, frame.f_code.co_name) + span.set_data(ATTRS.CODE_FUNCTION, frame.f_code.co_name) def extract_sentrytrace_data(header): @@ -1007,16 +1008,16 @@ def _get_input_attributes(template, send_pii, args, kwargs): if template in [SPANTEMPLATE.AI_AGENT, SPANTEMPLATE.AI_TOOL, SPANTEMPLATE.AI_CHAT]: mapping = { - "model": (SPANDATA.GEN_AI_REQUEST_MODEL, str), - "model_name": (SPANDATA.GEN_AI_REQUEST_MODEL, str), - "agent": (SPANDATA.GEN_AI_AGENT_NAME, str), - "agent_name": (SPANDATA.GEN_AI_AGENT_NAME, str), - "max_tokens": (SPANDATA.GEN_AI_REQUEST_MAX_TOKENS, int), - "frequency_penalty": (SPANDATA.GEN_AI_REQUEST_FREQUENCY_PENALTY, float), - "presence_penalty": (SPANDATA.GEN_AI_REQUEST_PRESENCE_PENALTY, float), - "temperature": (SPANDATA.GEN_AI_REQUEST_TEMPERATURE, float), - "top_p": (SPANDATA.GEN_AI_REQUEST_TOP_P, float), - "top_k": (SPANDATA.GEN_AI_REQUEST_TOP_K, int), + "model": (ATTRS.GEN_AI_REQUEST_MODEL, str), + "model_name": (ATTRS.GEN_AI_REQUEST_MODEL, str), + "agent": (ATTRS.GEN_AI_AGENT_NAME, str), + "agent_name": (ATTRS.GEN_AI_AGENT_NAME, str), + "max_tokens": (ATTRS.GEN_AI_REQUEST_MAX_TOKENS, int), + "frequency_penalty": (ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, float), + "presence_penalty": (ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, float), + "temperature": (ATTRS.GEN_AI_REQUEST_TEMPERATURE, float), + "top_p": (ATTRS.GEN_AI_REQUEST_TOP_P, float), + "top_k": (ATTRS.GEN_AI_REQUEST_TOP_K, int), } def _set_from_key(key, value): @@ -1028,13 +1029,13 @@ def _set_from_key(key, value): for key, value in list(kwargs.items()): if key == "prompt" and isinstance(value, str): - attributes.setdefault(SPANDATA.GEN_AI_REQUEST_MESSAGES, []).append( + attributes.setdefault(ATTRS.GEN_AI_REQUEST_MESSAGES, []).append( {"role": "user", "content": value} ) continue if key == "system_prompt" and isinstance(value, str): - attributes.setdefault(SPANDATA.GEN_AI_REQUEST_MESSAGES, []).append( + attributes.setdefault(ATTRS.GEN_AI_REQUEST_MESSAGES, []).append( {"role": "system", "content": value} ) continue @@ -1042,14 +1043,14 @@ def _set_from_key(key, value): _set_from_key(key, value) if template == SPANTEMPLATE.AI_TOOL and send_pii: - attributes[SPANDATA.GEN_AI_TOOL_INPUT] = safe_repr( + attributes[ATTRS.GEN_AI_TOOL_INPUT] = safe_repr( {"args": args, "kwargs": kwargs} ) # Coerce to string - if SPANDATA.GEN_AI_REQUEST_MESSAGES in attributes: - attributes[SPANDATA.GEN_AI_REQUEST_MESSAGES] = safe_repr( - attributes[SPANDATA.GEN_AI_REQUEST_MESSAGES] + if ATTRS.GEN_AI_REQUEST_MESSAGES in attributes: + attributes[ATTRS.GEN_AI_REQUEST_MESSAGES] = safe_repr( + attributes[ATTRS.GEN_AI_REQUEST_MESSAGES] ) return attributes @@ -1070,15 +1071,15 @@ def _set_from_keys(attribute, keys): attributes[attribute] = value _set_from_keys( - SPANDATA.GEN_AI_USAGE_INPUT_TOKENS, + ATTRS.GEN_AI_USAGE_INPUT_TOKENS, ("prompt_tokens", "input_tokens"), ) _set_from_keys( - SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS, + ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS, ("completion_tokens", "output_tokens"), ) _set_from_keys( - SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS, + ATTRS.GEN_AI_USAGE_TOTAL_TOKENS, ("total_tokens",), ) @@ -1111,15 +1112,15 @@ def _get_output_attributes(template, send_pii, result): # Response model model_name = _get_value(result, "model") if model_name is not None and isinstance(model_name, str): - attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = model_name + attributes[ATTRS.GEN_AI_RESPONSE_MODEL] = model_name model_name = _get_value(result, "model_name") if model_name is not None and isinstance(model_name, str): - attributes[SPANDATA.GEN_AI_RESPONSE_MODEL] = model_name + attributes[ATTRS.GEN_AI_RESPONSE_MODEL] = model_name # Tool output if template == SPANTEMPLATE.AI_TOOL and send_pii: - attributes[SPANDATA.GEN_AI_TOOL_OUTPUT] = safe_repr(result) + attributes[ATTRS.GEN_AI_TOOL_OUTPUT] = safe_repr(result) return attributes @@ -1140,22 +1141,22 @@ def _set_input_attributes(span, template, send_pii, name, f, args, kwargs): if template == SPANTEMPLATE.AI_AGENT: attributes = { - SPANDATA.GEN_AI_OPERATION_NAME: "invoke_agent", - SPANDATA.GEN_AI_AGENT_NAME: name, + ATTRS.GEN_AI_OPERATION_NAME: "invoke_agent", + ATTRS.GEN_AI_AGENT_NAME: name, } elif template == SPANTEMPLATE.AI_CHAT: attributes = { - SPANDATA.GEN_AI_OPERATION_NAME: "chat", + ATTRS.GEN_AI_OPERATION_NAME: "chat", } elif template == SPANTEMPLATE.AI_TOOL: attributes = { - SPANDATA.GEN_AI_OPERATION_NAME: "execute_tool", - SPANDATA.GEN_AI_TOOL_NAME: name, + ATTRS.GEN_AI_OPERATION_NAME: "execute_tool", + ATTRS.GEN_AI_TOOL_NAME: name, } docstring = f.__doc__ if docstring is not None: - attributes[SPANDATA.GEN_AI_TOOL_DESCRIPTION] = docstring + attributes[ATTRS.GEN_AI_TOOL_DESCRIPTION] = docstring attributes.update(_get_input_attributes(template, send_pii, args, kwargs)) span.update_data(attributes or {}) diff --git a/setup.py b/setup.py index 58101aa65f..0481976a16 100644 --- a/setup.py +++ b/setup.py @@ -41,6 +41,7 @@ def get_file_text(file_name): install_requires=[ "urllib3>=1.26.11", "certifi", + "sentry-conventions>=0.1.1", ], extras_require={ "aiohttp": ["aiohttp>=3.5"], diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index 3893626026..c77d9d3bf6 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -41,14 +41,14 @@ async def __call__(self, *args, **kwargs): from anthropic.types.content_block import ContentBlock as TextBlock from sentry_sdk import start_transaction, start_span -from sentry_sdk.consts import OP, SPANDATA +from sentry_sdk.consts import OP from sentry_sdk.integrations.anthropic import ( AnthropicIntegration, _set_output_data, _collect_ai_data, ) from sentry_sdk.utils import package_version - +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS ANTHROPIC_VERSION = package_version("anthropic") @@ -117,22 +117,22 @@ def test_nonstreaming_create_message( assert span["op"] == OP.GEN_AI_CHAT assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert span["data"][ATTRS.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["data"][ATTRS.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["data"][ATTRS.GEN_AI_RESPONSE_STREAMING] is False @pytest.mark.asyncio @@ -186,22 +186,22 @@ async def test_nonstreaming_create_message_async( assert span["op"] == OP.GEN_AI_CHAT assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." + assert span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "Hi, I'm Claude." else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is False + assert span["data"][ATTRS.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS] == 20 + assert span["data"][ATTRS.GEN_AI_USAGE_TOTAL_TOKENS] == 30 + assert span["data"][ATTRS.GEN_AI_RESPONSE_STREAMING] is False @pytest.mark.parametrize( @@ -286,23 +286,23 @@ def test_streaming_create_message( assert span["op"] == OP.GEN_AI_CHAT assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 40 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["data"][ATTRS.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS] == 30 + assert span["data"][ATTRS.GEN_AI_USAGE_TOTAL_TOKENS] == 40 + assert span["data"][ATTRS.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -390,23 +390,23 @@ async def test_streaming_create_message_async( assert span["op"] == OP.GEN_AI_CHAT assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "Hello, Claude"}]' ) - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" + assert span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "Hi! I'm Claude!" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 10 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 30 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 40 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["data"][ATTRS.GEN_AI_USAGE_INPUT_TOKENS] == 10 + assert span["data"][ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS] == 30 + assert span["data"][ATTRS.GEN_AI_USAGE_TOTAL_TOKENS] == 40 + assert span["data"][ATTRS.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.skipif( @@ -521,25 +521,25 @@ def test_streaming_create_message_with_input_json_delta( assert span["op"] == OP.GEN_AI_CHAT assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "{'location': 'San Francisco, CA'}" ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 51 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 417 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["data"][ATTRS.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["data"][ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS] == 51 + assert span["data"][ATTRS.GEN_AI_USAGE_TOTAL_TOKENS] == 417 + assert span["data"][ATTRS.GEN_AI_RESPONSE_STREAMING] is True @pytest.mark.asyncio @@ -661,26 +661,26 @@ async def test_streaming_create_message_with_input_json_delta_async( assert span["op"] == OP.GEN_AI_CHAT assert span["description"] == "chat model" - assert span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "model" + assert span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "model" if send_default_pii and include_prompts: assert ( - span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '[{"role": "user", "content": "What is the weather like in San Francisco?"}]' ) assert ( - span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "{'location': 'San Francisco, CA'}" ) else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] - assert span["data"][SPANDATA.GEN_AI_USAGE_INPUT_TOKENS] == 366 - assert span["data"][SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS] == 51 - assert span["data"][SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS] == 417 - assert span["data"][SPANDATA.GEN_AI_RESPONSE_STREAMING] is True + assert span["data"][ATTRS.GEN_AI_USAGE_INPUT_TOKENS] == 366 + assert span["data"][ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS] == 51 + assert span["data"][ATTRS.GEN_AI_USAGE_TOTAL_TOKENS] == 417 + assert span["data"][ATTRS.GEN_AI_RESPONSE_STREAMING] is True def test_exception_message_create(sentry_init, capture_events): @@ -825,9 +825,9 @@ def test_set_output_data_with_input_json_delta(sentry_init): ) assert ( - span._data.get(SPANDATA.GEN_AI_RESPONSE_TEXT) + span._data.get(ATTRS.GEN_AI_RESPONSE_TEXT) == "{'test': 'data','more': 'json'}" ) - assert span._data.get(SPANDATA.GEN_AI_USAGE_INPUT_TOKENS) == 10 - assert span._data.get(SPANDATA.GEN_AI_USAGE_OUTPUT_TOKENS) == 20 - assert span._data.get(SPANDATA.GEN_AI_USAGE_TOTAL_TOKENS) == 30 + assert span._data.get(ATTRS.GEN_AI_USAGE_INPUT_TOKENS) == 10 + assert span._data.get(ATTRS.GEN_AI_USAGE_OUTPUT_TOKENS) == 20 + assert span._data.get(ATTRS.GEN_AI_USAGE_TOTAL_TOKENS) == 30 diff --git a/tests/integrations/asyncpg/test_asyncpg.py b/tests/integrations/asyncpg/test_asyncpg.py index e23612c055..d5b7561d2f 100644 --- a/tests/integrations/asyncpg/test_asyncpg.py +++ b/tests/integrations/asyncpg/test_asyncpg.py @@ -21,8 +21,8 @@ from sentry_sdk import capture_message, start_transaction from sentry_sdk.integrations.asyncpg import AsyncPGIntegration -from sentry_sdk.consts import SPANDATA from sentry_sdk.tracing_utils import record_sql_queries +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict PG_HOST = os.getenv("SENTRY_PYTHON_TEST_POSTGRES_HOST", "localhost") @@ -516,10 +516,10 @@ async def test_query_source_disabled(sentry_init, capture_events): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO not in data - assert SPANDATA.CODE_NAMESPACE not in data - assert SPANDATA.CODE_FILEPATH not in data - assert SPANDATA.CODE_FUNCTION not in data + assert ATTRS.CODE_LINENO not in data + assert ATTRS.CODE_NAMESPACE not in data + assert ATTRS.CODE_FILEPATH not in data + assert ATTRS.CODE_FUNCTION not in data @pytest.mark.asyncio @@ -555,10 +555,10 @@ async def test_query_source_enabled( data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data @pytest.mark.asyncio @@ -588,24 +588,22 @@ async def test_query_source(sentry_init, capture_events): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 - assert ( - data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.asyncpg.test_asyncpg" - ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 + assert data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.asyncpg.test_asyncpg" + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/asyncpg/test_asyncpg.py" ) - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "test_query_source" + assert data.get(ATTRS.CODE_FUNCTION) == "test_query_source" @pytest.mark.asyncio @@ -641,20 +639,20 @@ async def test_query_source_with_module_in_search_path(sentry_init, capture_even data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 - assert data.get(SPANDATA.CODE_NAMESPACE) == "asyncpg_helpers.helpers" - assert data.get(SPANDATA.CODE_FILEPATH) == "asyncpg_helpers/helpers.py" + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 + assert data.get(ATTRS.CODE_NAMESPACE) == "asyncpg_helpers.helpers" + assert data.get(ATTRS.CODE_FILEPATH) == "asyncpg_helpers/helpers.py" - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "execute_query_in_connection" + assert data.get(ATTRS.CODE_FUNCTION) == "execute_query_in_connection" @pytest.mark.asyncio @@ -696,10 +694,10 @@ def fake_record_sql_queries(*args, **kwargs): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO not in data - assert SPANDATA.CODE_NAMESPACE not in data - assert SPANDATA.CODE_FILEPATH not in data - assert SPANDATA.CODE_FUNCTION not in data + assert ATTRS.CODE_LINENO not in data + assert ATTRS.CODE_NAMESPACE not in data + assert ATTRS.CODE_FILEPATH not in data + assert ATTRS.CODE_FUNCTION not in data @pytest.mark.asyncio @@ -741,26 +739,23 @@ def fake_record_sql_queries(*args, **kwargs): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 - assert ( - data.get(SPANDATA.CODE_NAMESPACE) == "tests.integrations.asyncpg.test_asyncpg" - ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 + assert data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.asyncpg.test_asyncpg" + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/asyncpg/test_asyncpg.py" ) - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path assert ( - data.get(SPANDATA.CODE_FUNCTION) - == "test_query_source_if_duration_over_threshold" + data.get(ATTRS.CODE_FUNCTION) == "test_query_source_if_duration_over_threshold" ) diff --git a/tests/integrations/cohere/test_cohere.py b/tests/integrations/cohere/test_cohere.py index ee876172d1..caf3699507 100644 --- a/tests/integrations/cohere/test_cohere.py +++ b/tests/integrations/cohere/test_cohere.py @@ -5,8 +5,8 @@ from cohere import Client, ChatMessage from sentry_sdk import start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.cohere import CohereIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from unittest import mock # python 3.3 and above from httpx import Client as HTTPXClient @@ -54,21 +54,21 @@ def test_nonstreaming_chat( assert tx["type"] == "transaction" span = tx["spans"][0] assert span["op"] == "ai.chat_completions.create.cohere" - assert span["data"][SPANDATA.AI_MODEL_ID] == "some-model" + assert span["data"][ATTRS.AI_MODEL_ID] == "some-model" if send_default_pii and include_prompts: assert ( '{"role": "system", "content": "some context"}' - in span["data"][SPANDATA.AI_INPUT_MESSAGES] + in span["data"][ATTRS.AI_INPUT_MESSAGES] ) assert ( '{"role": "user", "content": "hello"}' - in span["data"][SPANDATA.AI_INPUT_MESSAGES] + in span["data"][ATTRS.AI_INPUT_MESSAGES] ) - assert "the model response" in span["data"][SPANDATA.AI_RESPONSES] + assert "the model response" in span["data"][ATTRS.AI_RESPONSES] else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert ATTRS.AI_INPUT_MESSAGES not in span["data"] + assert ATTRS.AI_RESPONSES not in span["data"] assert span["data"]["gen_ai.usage.output_tokens"] == 10 assert span["data"]["gen_ai.usage.input_tokens"] == 20 @@ -131,21 +131,21 @@ def test_streaming_chat(sentry_init, capture_events, send_default_pii, include_p assert tx["type"] == "transaction" span = tx["spans"][0] assert span["op"] == "ai.chat_completions.create.cohere" - assert span["data"][SPANDATA.AI_MODEL_ID] == "some-model" + assert span["data"][ATTRS.AI_MODEL_ID] == "some-model" if send_default_pii and include_prompts: assert ( '{"role": "system", "content": "some context"}' - in span["data"][SPANDATA.AI_INPUT_MESSAGES] + in span["data"][ATTRS.AI_INPUT_MESSAGES] ) assert ( '{"role": "user", "content": "hello"}' - in span["data"][SPANDATA.AI_INPUT_MESSAGES] + in span["data"][ATTRS.AI_INPUT_MESSAGES] ) - assert "the model response" in span["data"][SPANDATA.AI_RESPONSES] + assert "the model response" in span["data"][ATTRS.AI_RESPONSES] else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] - assert SPANDATA.AI_RESPONSES not in span["data"] + assert ATTRS.AI_INPUT_MESSAGES not in span["data"] + assert ATTRS.AI_RESPONSES not in span["data"] assert span["data"]["gen_ai.usage.output_tokens"] == 10 assert span["data"]["gen_ai.usage.input_tokens"] == 20 @@ -207,9 +207,9 @@ def test_embed(sentry_init, capture_events, send_default_pii, include_prompts): span = tx["spans"][0] assert span["op"] == "ai.embeddings.create.cohere" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.AI_INPUT_MESSAGES] + assert "hello" in span["data"][ATTRS.AI_INPUT_MESSAGES] else: - assert SPANDATA.AI_INPUT_MESSAGES not in span["data"] + assert ATTRS.AI_INPUT_MESSAGES not in span["data"] assert span["data"]["gen_ai.usage.input_tokens"] == 10 assert span["data"]["gen_ai.usage.total_tokens"] == 10 diff --git a/tests/integrations/django/test_basic.py b/tests/integrations/django/test_basic.py index bbe29c7238..4718e1acd4 100644 --- a/tests/integrations/django/test_basic.py +++ b/tests/integrations/django/test_basic.py @@ -27,7 +27,6 @@ import sentry_sdk from sentry_sdk._compat import PY310 from sentry_sdk import capture_message, capture_exception -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.django import ( DjangoIntegration, DjangoRequestExtractor, @@ -37,6 +36,7 @@ from sentry_sdk.integrations.executing import ExecutingIntegration from sentry_sdk.profiler.utils import get_frame_name from sentry_sdk.tracing import Span +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import unpack_werkzeug_response from tests.integrations.django.myapp.wsgi import application from tests.integrations.django.myapp.signals import myapp_custom_signal_silenced @@ -594,7 +594,7 @@ def test_django_connect_trace(sentry_init, client, capture_events, render_span_t for span in event["spans"]: if span.get("op") == "db": data = span.get("data") - assert data.get(SPANDATA.DB_SYSTEM) == "postgresql" + assert data.get(ATTRS.DB_SYSTEM) == "postgresql" assert '- op="db": description="connect"' in render_span_tree(event) @@ -663,16 +663,16 @@ def test_db_connection_span_data(sentry_init, client, capture_events): for span in event["spans"]: if span.get("op") == "db": data = span.get("data") - assert data.get(SPANDATA.DB_SYSTEM) == "postgresql" + assert data.get(ATTRS.DB_SYSTEM) == "postgresql" conn_params = connections["postgres"].get_connection_params() - assert data.get(SPANDATA.DB_NAME) is not None - assert data.get(SPANDATA.DB_NAME) == conn_params.get( + assert data.get(ATTRS.DB_NAME) is not None + assert data.get(ATTRS.DB_NAME) == conn_params.get( "database" ) or conn_params.get("dbname") - assert data.get(SPANDATA.SERVER_ADDRESS) == os.environ.get( + assert data.get(ATTRS.SERVER_ADDRESS) == os.environ.get( "SENTRY_PYTHON_TEST_POSTGRES_HOST", "localhost" ) - assert data.get(SPANDATA.SERVER_PORT) == os.environ.get( + assert data.get(ATTRS.SERVER_PORT) == os.environ.get( "SENTRY_PYTHON_TEST_POSTGRES_PORT", "5432" ) diff --git a/tests/integrations/django/test_db_query_data.py b/tests/integrations/django/test_db_query_data.py index 41ad9d5e1c..6e602689fd 100644 --- a/tests/integrations/django/test_db_query_data.py +++ b/tests/integrations/django/test_db_query_data.py @@ -15,9 +15,9 @@ from werkzeug.test import Client from sentry_sdk import start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.django import DjangoIntegration from sentry_sdk.tracing_utils import record_sql_queries +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import unpack_werkzeug_response from tests.integrations.django.utils import pytest_mark_django_db_decorator @@ -58,10 +58,10 @@ def test_query_source_disabled(sentry_init, client, capture_events): if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO not in data - assert SPANDATA.CODE_NAMESPACE not in data - assert SPANDATA.CODE_FILEPATH not in data - assert SPANDATA.CODE_FUNCTION not in data + assert ATTRS.CODE_LINENO not in data + assert ATTRS.CODE_NAMESPACE not in data + assert ATTRS.CODE_FILEPATH not in data + assert ATTRS.CODE_FUNCTION not in data break else: raise AssertionError("No db span found") @@ -101,10 +101,10 @@ def test_query_source_enabled( if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data break else: @@ -138,26 +138,26 @@ def test_query_source(sentry_init, client, capture_events): if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 assert ( - data.get(SPANDATA.CODE_NAMESPACE) + data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.django.myapp.views" ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/django/myapp/views.py" ) - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "postgres_select_orm" + assert data.get(ATTRS.CODE_FUNCTION) == "postgres_select_orm" break else: @@ -198,20 +198,20 @@ def test_query_source_with_module_in_search_path(sentry_init, client, capture_ev if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 - assert data.get(SPANDATA.CODE_NAMESPACE) == "django_helpers.views" - assert data.get(SPANDATA.CODE_FILEPATH) == "django_helpers/views.py" + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 + assert data.get(ATTRS.CODE_NAMESPACE) == "django_helpers.views" + assert data.get(ATTRS.CODE_FILEPATH) == "django_helpers/views.py" - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "postgres_select_orm" + assert data.get(ATTRS.CODE_FUNCTION) == "postgres_select_orm" break else: @@ -246,33 +246,33 @@ def test_query_source_with_in_app_exclude(sentry_init, client, capture_events): if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 if DJANGO_VERSION >= (1, 11): assert ( - data.get(SPANDATA.CODE_NAMESPACE) + data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.django.myapp.settings" ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/django/myapp/settings.py" ) - assert data.get(SPANDATA.CODE_FUNCTION) == "middleware" + assert data.get(ATTRS.CODE_FUNCTION) == "middleware" else: assert ( - data.get(SPANDATA.CODE_NAMESPACE) + data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.django.test_db_query_data" ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/django/test_db_query_data.py" ) assert ( - data.get(SPANDATA.CODE_FUNCTION) + data.get(ATTRS.CODE_FUNCTION) == "test_query_source_with_in_app_exclude" ) @@ -309,19 +309,19 @@ def test_query_source_with_in_app_include(sentry_init, client, capture_events): if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 - assert data.get(SPANDATA.CODE_NAMESPACE) == "django.db.models.sql.compiler" - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_NAMESPACE) == "django.db.models.sql.compiler" + assert data.get(ATTRS.CODE_FILEPATH).endswith( "django/db/models/sql/compiler.py" ) - assert data.get(SPANDATA.CODE_FUNCTION) == "execute_sql" + assert data.get(ATTRS.CODE_FUNCTION) == "execute_sql" break else: raise AssertionError("No db span found") @@ -375,10 +375,10 @@ def __exit__(self, type, value, traceback): if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO not in data - assert SPANDATA.CODE_NAMESPACE not in data - assert SPANDATA.CODE_FILEPATH not in data - assert SPANDATA.CODE_FUNCTION not in data + assert ATTRS.CODE_LINENO not in data + assert ATTRS.CODE_NAMESPACE not in data + assert ATTRS.CODE_FILEPATH not in data + assert ATTRS.CODE_FUNCTION not in data break else: @@ -433,26 +433,26 @@ def __exit__(self, type, value, traceback): if span.get("op") == "db" and "auth_user" in span.get("description"): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 assert ( - data.get(SPANDATA.CODE_NAMESPACE) + data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.django.myapp.views" ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/django/myapp/views.py" ) - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "postgres_select_orm" + assert data.get(ATTRS.CODE_FUNCTION) == "postgres_select_orm" break else: raise AssertionError("No db span found") diff --git a/tests/integrations/httpx/test_httpx.py b/tests/integrations/httpx/test_httpx.py index ba2575ce59..3b3825cd19 100644 --- a/tests/integrations/httpx/test_httpx.py +++ b/tests/integrations/httpx/test_httpx.py @@ -6,8 +6,9 @@ import sentry_sdk from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import MATCH_ALL, SPANDATA +from sentry_sdk.consts import MATCH_ALL from sentry_sdk.integrations.httpx import HttpxIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict @@ -47,10 +48,10 @@ def before_breadcrumb(crumb, hint): assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", - SPANDATA.HTTP_STATUS_CODE: 200, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", + ATTRS.HTTP_STATUS_CODE: 200, "reason": "OK", "extra": "foo", } @@ -107,10 +108,10 @@ def test_crumb_capture_client_error( assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", - SPANDATA.HTTP_STATUS_CODE: status_code, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", + ATTRS.HTTP_STATUS_CODE: status_code, } ) @@ -380,16 +381,16 @@ def test_omit_url_data_if_parsing_fails(sentry_init, capture_events, httpx_mock) (event,) = events assert event["breadcrumbs"]["values"][0]["data"] == ApproxDict( { - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_STATUS_CODE: 200, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_STATUS_CODE: 200, "reason": "OK", # no url related data } ) assert "url" not in event["breadcrumbs"]["values"][0]["data"] - assert SPANDATA.HTTP_FRAGMENT not in event["breadcrumbs"]["values"][0]["data"] - assert SPANDATA.HTTP_QUERY not in event["breadcrumbs"]["values"][0]["data"] + assert ATTRS.HTTP_FRAGMENT not in event["breadcrumbs"]["values"][0]["data"] + assert ATTRS.HTTP_QUERY not in event["breadcrumbs"]["values"][0]["data"] @pytest.mark.parametrize( diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index b6b432c523..467526df3f 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -4,7 +4,7 @@ import pytest -from sentry_sdk.consts import SPANDATA +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: # Langchain >= 0.2 @@ -211,23 +211,23 @@ def test_langchain_agent( if send_default_pii and include_prompts: assert ( "You are very powerful" - in chat_spans[0]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + in chat_spans[0]["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] ) - assert "5" in chat_spans[0]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] - assert "word" in tool_exec_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert 5 == int(tool_exec_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT]) + assert "5" in chat_spans[0]["data"][ATTRS.GEN_AI_RESPONSE_TEXT] + assert "word" in tool_exec_span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] + assert 5 == int(tool_exec_span["data"][ATTRS.GEN_AI_RESPONSE_TEXT]) assert ( "You are very powerful" - in chat_spans[1]["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + in chat_spans[1]["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] ) - assert "5" in chat_spans[1]["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "5" in chat_spans[1]["data"][ATTRS.GEN_AI_RESPONSE_TEXT] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in tool_exec_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in tool_exec_span.get("data", {}) + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in chat_spans[0].get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TEXT not in chat_spans[0].get("data", {}) + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in chat_spans[1].get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TEXT not in chat_spans[1].get("data", {}) + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in tool_exec_span.get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TEXT not in tool_exec_span.get("data", {}) def test_langchain_error(sentry_init, capture_events): @@ -642,9 +642,9 @@ def test_tools_integration_in_spans(sentry_init, capture_events): tools_found = False for span in spans: span_data = span.get("data", {}) - if SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in span_data: + if ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS in span_data: tools_found = True - tools_data = span_data[SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data = span_data[ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS] # Verify tools are in the expected format assert isinstance(tools_data, (str, list)) # Could be serialized if isinstance(tools_data, str): diff --git a/tests/integrations/langgraph/test_langgraph.py b/tests/integrations/langgraph/test_langgraph.py index 5e35f772f5..98c4b7a610 100644 --- a/tests/integrations/langgraph/test_langgraph.py +++ b/tests/integrations/langgraph/test_langgraph.py @@ -5,7 +5,8 @@ import pytest from sentry_sdk import start_transaction -from sentry_sdk.consts import SPANDATA, OP +from sentry_sdk.consts import OP +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS def mock_langgraph_imports(): @@ -178,12 +179,12 @@ def original_compile(self, *args, **kwargs): agent_span = agent_spans[0] assert agent_span["description"] == "create_agent test_graph" assert agent_span["origin"] == "auto.ai.langgraph" - assert agent_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "create_agent" - assert agent_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" - assert agent_span["data"][SPANDATA.GEN_AI_REQUEST_MODEL] == "test-model" - assert SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["data"] + assert agent_span["data"][ATTRS.GEN_AI_OPERATION_NAME] == "create_agent" + assert agent_span["data"][ATTRS.GEN_AI_AGENT_NAME] == "test_graph" + assert agent_span["data"][ATTRS.GEN_AI_REQUEST_MODEL] == "test-model" + assert ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS in agent_span["data"] - tools_data = agent_span["data"][SPANDATA.GEN_AI_REQUEST_AVAILABLE_TOOLS] + tools_data = agent_span["data"][ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS] assert tools_data == ["search_tool", "calculator"] assert len(tools_data) == 2 assert "search_tool" in tools_data @@ -254,15 +255,15 @@ def original_invoke(self, *args, **kwargs): invoke_span = invoke_spans[0] assert invoke_span["description"] == "invoke_agent test_graph" assert invoke_span["origin"] == "auto.ai.langgraph" - assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "test_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "test_graph" + assert invoke_span["data"][ATTRS.GEN_AI_OPERATION_NAME] == "invoke_agent" + assert invoke_span["data"][ATTRS.GEN_AI_PIPELINE_NAME] == "test_graph" + assert invoke_span["data"][ATTRS.GEN_AI_AGENT_NAME] == "test_graph" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT in invoke_span["data"] - request_messages = invoke_span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + request_messages = invoke_span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] if isinstance(request_messages, str): import json @@ -272,11 +273,11 @@ def original_invoke(self, *args, **kwargs): assert request_messages[0]["content"] == "Hello, can you help me?" assert request_messages[1]["content"] == "Of course! How can I assist you?" - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = invoke_span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + assert ATTRS.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] + tool_calls_data = invoke_span["data"][ATTRS.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json @@ -286,9 +287,9 @@ def original_invoke(self, *args, **kwargs): assert tool_calls_data[0]["id"] == "call_test_123" assert tool_calls_data[0]["function"]["name"] == "search_tool" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) @pytest.mark.parametrize( @@ -352,19 +353,19 @@ async def run_test(): invoke_span = invoke_spans[0] assert invoke_span["description"] == "invoke_agent async_graph" assert invoke_span["origin"] == "auto.ai.langgraph" - assert invoke_span["data"][SPANDATA.GEN_AI_OPERATION_NAME] == "invoke_agent" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == "async_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == "async_graph" + assert invoke_span["data"][ATTRS.GEN_AI_OPERATION_NAME] == "invoke_agent" + assert invoke_span["data"][ATTRS.GEN_AI_PIPELINE_NAME] == "async_graph" + assert invoke_span["data"][ATTRS.GEN_AI_AGENT_NAME] == "async_graph" if send_default_pii and include_prompts: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES in invoke_span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT in invoke_span["data"] - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + response_text = invoke_span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] assert response_text == expected_assistant_response - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + assert ATTRS.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] + tool_calls_data = invoke_span["data"][ATTRS.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): import json @@ -374,9 +375,9 @@ async def run_test(): assert tool_calls_data[0]["id"] == "call_weather_456" assert tool_calls_data[0]["function"]["name"] == "get_weather" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TEXT not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_RESPONSE_TOOL_CALLS not in invoke_span.get("data", {}) def test_pregel_invoke_error(sentry_init, capture_events): @@ -504,12 +505,12 @@ def original_invoke(self, *args, **kwargs): if graph_name and graph_name.strip(): assert invoke_span["description"] == "invoke_agent my_graph" - assert invoke_span["data"][SPANDATA.GEN_AI_PIPELINE_NAME] == graph_name - assert invoke_span["data"][SPANDATA.GEN_AI_AGENT_NAME] == graph_name + assert invoke_span["data"][ATTRS.GEN_AI_PIPELINE_NAME] == graph_name + assert invoke_span["data"][ATTRS.GEN_AI_AGENT_NAME] == graph_name else: assert invoke_span["description"] == "invoke_agent" - assert SPANDATA.GEN_AI_PIPELINE_NAME not in invoke_span.get("data", {}) - assert SPANDATA.GEN_AI_AGENT_NAME not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_PIPELINE_NAME not in invoke_span.get("data", {}) + assert ATTRS.GEN_AI_AGENT_NAME not in invoke_span.get("data", {}) def test_complex_message_parsing(): @@ -614,14 +615,14 @@ def original_invoke(self, *args, **kwargs): assert len(invoke_spans) == 1 invoke_span = invoke_spans[0] - assert SPANDATA.GEN_AI_RESPONSE_TEXT in invoke_span["data"] - response_text = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert ATTRS.GEN_AI_RESPONSE_TEXT in invoke_span["data"] + response_text = invoke_span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] assert response_text == "Final response" - assert SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] + assert ATTRS.GEN_AI_RESPONSE_TOOL_CALLS in invoke_span["data"] import json - tool_calls_data = invoke_span["data"][SPANDATA.GEN_AI_RESPONSE_TOOL_CALLS] + tool_calls_data = invoke_span["data"][ATTRS.GEN_AI_RESPONSE_TOOL_CALLS] if isinstance(tool_calls_data, str): tool_calls_data = json.loads(tool_calls_data) diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 18968fb36a..5cf803cbd7 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -34,11 +34,11 @@ SKIP_RESPONSES_TESTS = True from sentry_sdk import start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.openai import ( OpenAIIntegration, _calculate_token_usage, ) +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from unittest import mock # python 3.3 and above @@ -152,11 +152,11 @@ def test_nonstreaming_chat_completion( assert span["op"] == "gen_ai.chat" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "the model response" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] + assert "the model response" in span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] assert span["data"]["gen_ai.usage.output_tokens"] == 10 assert span["data"]["gen_ai.usage.input_tokens"] == 20 @@ -194,11 +194,11 @@ async def test_nonstreaming_chat_completion_async( assert span["op"] == "gen_ai.chat" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "the model response" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] + assert "the model response" in span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] assert span["data"]["gen_ai.usage.output_tokens"] == 10 assert span["data"]["gen_ai.usage.input_tokens"] == 20 @@ -287,11 +287,11 @@ def test_streaming_chat_completion( assert span["op"] == "gen_ai.chat" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] + assert "hello world" in span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import @@ -383,11 +383,11 @@ async def test_streaming_chat_completion_async( assert span["op"] == "gen_ai.chat" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] - assert "hello world" in span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] + assert "hello" in span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] + assert "hello world" in span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] try: import tiktoken # type: ignore # noqa # pylint: disable=unused-import @@ -473,9 +473,9 @@ def test_embeddings_create( span = tx["spans"][0] assert span["op"] == "gen_ai.embeddings" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "hello" in span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] assert span["data"]["gen_ai.usage.input_tokens"] == 20 assert span["data"]["gen_ai.usage.total_tokens"] == 30 @@ -521,9 +521,9 @@ async def test_embeddings_create_async( span = tx["spans"][0] assert span["op"] == "gen_ai.embeddings" if send_default_pii and include_prompts: - assert "hello" in span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] + assert "hello" in span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] assert span["data"]["gen_ai.usage.input_tokens"] == 20 assert span["data"]["gen_ai.usage.total_tokens"] == 30 @@ -1332,11 +1332,11 @@ def test_streaming_responses_api( assert span["op"] == "gen_ai.responses" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" + assert span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '["hello"]' + assert span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "hello world" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] assert span["data"]["gen_ai.usage.input_tokens"] == 20 assert span["data"]["gen_ai.usage.output_tokens"] == 10 @@ -1387,11 +1387,11 @@ async def test_streaming_responses_api_async( assert span["op"] == "gen_ai.responses" if send_default_pii and include_prompts: - assert span["data"][SPANDATA.GEN_AI_REQUEST_MESSAGES] == '["hello"]' - assert span["data"][SPANDATA.GEN_AI_RESPONSE_TEXT] == "hello world" + assert span["data"][ATTRS.GEN_AI_REQUEST_MESSAGES] == '["hello"]' + assert span["data"][ATTRS.GEN_AI_RESPONSE_TEXT] == "hello world" else: - assert SPANDATA.GEN_AI_REQUEST_MESSAGES not in span["data"] - assert SPANDATA.GEN_AI_RESPONSE_TEXT not in span["data"] + assert ATTRS.GEN_AI_REQUEST_MESSAGES not in span["data"] + assert ATTRS.GEN_AI_RESPONSE_TEXT not in span["data"] assert span["data"]["gen_ai.usage.input_tokens"] == 20 assert span["data"]["gen_ai.usage.output_tokens"] == 10 diff --git a/tests/integrations/pymongo/test_pymongo.py b/tests/integrations/pymongo/test_pymongo.py index 7e6556f85a..0fb9543642 100644 --- a/tests/integrations/pymongo/test_pymongo.py +++ b/tests/integrations/pymongo/test_pymongo.py @@ -1,6 +1,6 @@ from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.pymongo import PyMongoIntegration, _strip_pii +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from mockupdb import MockupDB, OpQuery from pymongo import MongoClient @@ -56,10 +56,10 @@ def test_transactions(sentry_init, capture_events, mongo_server, with_pii): "net.peer.port": str(mongo_server.port), } for span in find, insert_success, insert_fail: - assert span["data"][SPANDATA.DB_SYSTEM] == "mongodb" - assert span["data"][SPANDATA.DB_NAME] == "test_db" - assert span["data"][SPANDATA.SERVER_ADDRESS] == "localhost" - assert span["data"][SPANDATA.SERVER_PORT] == mongo_server.port + assert span["data"][ATTRS.DB_SYSTEM] == "mongodb" + assert span["data"][ATTRS.DB_NAME] == "test_db" + assert span["data"][ATTRS.SERVER_ADDRESS] == "localhost" + assert span["data"][ATTRS.SERVER_PORT] == mongo_server.port for field, value in common_tags.items(): assert span["tags"][field] == value assert span["data"][field] == value @@ -79,12 +79,12 @@ def test_transactions(sentry_init, capture_events, mongo_server, with_pii): assert insert_success["description"].startswith('{"insert') assert insert_fail["description"].startswith('{"insert') - assert find["data"][SPANDATA.DB_MONGODB_COLLECTION] == "test_collection" - assert find["tags"][SPANDATA.DB_MONGODB_COLLECTION] == "test_collection" - assert insert_success["data"][SPANDATA.DB_MONGODB_COLLECTION] == "test_collection" - assert insert_success["tags"][SPANDATA.DB_MONGODB_COLLECTION] == "test_collection" - assert insert_fail["data"][SPANDATA.DB_MONGODB_COLLECTION] == "erroneous" - assert insert_fail["tags"][SPANDATA.DB_MONGODB_COLLECTION] == "erroneous" + assert find["data"][ATTRS.DB_MONGODB_COLLECTION] == "test_collection" + assert find["tags"][ATTRS.DB_MONGODB_COLLECTION] == "test_collection" + assert insert_success["data"][ATTRS.DB_MONGODB_COLLECTION] == "test_collection" + assert insert_success["tags"][ATTRS.DB_MONGODB_COLLECTION] == "test_collection" + assert insert_fail["data"][ATTRS.DB_MONGODB_COLLECTION] == "erroneous" + assert insert_fail["tags"][ATTRS.DB_MONGODB_COLLECTION] == "erroneous" if with_pii: assert "1" in find["description"] assert "2" in insert_success["description"] diff --git a/tests/integrations/redis/asyncio/test_redis_asyncio.py b/tests/integrations/redis/asyncio/test_redis_asyncio.py index 17130b337b..87b8d3d94e 100644 --- a/tests/integrations/redis/asyncio/test_redis_asyncio.py +++ b/tests/integrations/redis/asyncio/test_redis_asyncio.py @@ -1,8 +1,8 @@ import pytest from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.redis import RedisIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict from fakeredis.aioredis import FakeRedis @@ -71,12 +71,12 @@ async def test_async_redis_pipeline( "count": 3, "first_ten": expected_first_ten, }, - SPANDATA.DB_SYSTEM: "redis", - SPANDATA.DB_NAME: "0", - SPANDATA.SERVER_ADDRESS: connection.connection_pool.connection_kwargs.get( + ATTRS.DB_SYSTEM: "redis", + ATTRS.DB_NAME: "0", + ATTRS.SERVER_ADDRESS: connection.connection_pool.connection_kwargs.get( "host" ), - SPANDATA.SERVER_PORT: 6379, + ATTRS.SERVER_PORT: 6379, } ) assert span["tags"] == { diff --git a/tests/integrations/redis/cluster/test_redis_cluster.py b/tests/integrations/redis/cluster/test_redis_cluster.py index 83d1b45cc9..df07e35f6e 100644 --- a/tests/integrations/redis/cluster/test_redis_cluster.py +++ b/tests/integrations/redis/cluster/test_redis_cluster.py @@ -1,8 +1,8 @@ import pytest from sentry_sdk import capture_message -from sentry_sdk.consts import SPANDATA from sentry_sdk.api import start_transaction from sentry_sdk.integrations.redis import RedisIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict import redis @@ -85,10 +85,10 @@ def test_rediscluster_basic(sentry_init, capture_events, send_default_pii, descr assert span["description"] == description assert span["data"] == ApproxDict( { - SPANDATA.DB_SYSTEM: "redis", + ATTRS.DB_SYSTEM: "redis", # ClusterNode converts localhost to 127.0.0.1 - SPANDATA.SERVER_ADDRESS: "127.0.0.1", - SPANDATA.SERVER_PORT: 6379, + ATTRS.SERVER_ADDRESS: "127.0.0.1", + ATTRS.SERVER_PORT: 6379, } ) assert span["tags"] == { @@ -134,10 +134,10 @@ def test_rediscluster_pipeline( "count": 3, "first_ten": expected_first_ten, }, - SPANDATA.DB_SYSTEM: "redis", + ATTRS.DB_SYSTEM: "redis", # ClusterNode converts localhost to 127.0.0.1 - SPANDATA.SERVER_ADDRESS: "127.0.0.1", - SPANDATA.SERVER_PORT: 6379, + ATTRS.SERVER_ADDRESS: "127.0.0.1", + ATTRS.SERVER_PORT: 6379, } ) assert span["tags"] == { diff --git a/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py b/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py index 993a2962ca..1f0e1ad6c2 100644 --- a/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py +++ b/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py @@ -1,8 +1,8 @@ import pytest from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.redis import RedisIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict from redis.asyncio import cluster @@ -87,10 +87,10 @@ async def test_async_basic(sentry_init, capture_events, send_default_pii, descri assert span["description"] == description assert span["data"] == ApproxDict( { - SPANDATA.DB_SYSTEM: "redis", + ATTRS.DB_SYSTEM: "redis", # ClusterNode converts localhost to 127.0.0.1 - SPANDATA.SERVER_ADDRESS: "127.0.0.1", - SPANDATA.SERVER_PORT: 6379, + ATTRS.SERVER_ADDRESS: "127.0.0.1", + ATTRS.SERVER_PORT: 6379, } ) assert span["tags"] == { @@ -137,10 +137,10 @@ async def test_async_redis_pipeline( "count": 3, "first_ten": expected_first_ten, }, - SPANDATA.DB_SYSTEM: "redis", + ATTRS.DB_SYSTEM: "redis", # ClusterNode converts localhost to 127.0.0.1 - SPANDATA.SERVER_ADDRESS: "127.0.0.1", - SPANDATA.SERVER_PORT: 6379, + ATTRS.SERVER_ADDRESS: "127.0.0.1", + ATTRS.SERVER_PORT: 6379, } ) assert span["tags"] == { diff --git a/tests/integrations/redis/test_redis.py b/tests/integrations/redis/test_redis.py index 5173885f33..4b52f5fce9 100644 --- a/tests/integrations/redis/test_redis.py +++ b/tests/integrations/redis/test_redis.py @@ -4,7 +4,7 @@ from fakeredis import FakeStrictRedis from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import SPANDATA +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from sentry_sdk.integrations.redis import RedisIntegration @@ -71,7 +71,7 @@ def test_redis_pipeline( (span,) = event["spans"] assert span["op"] == "db.redis" assert span["description"] == "redis.pipeline.execute" - assert span["data"][SPANDATA.DB_SYSTEM] == "redis" + assert span["data"][ATTRS.DB_SYSTEM] == "redis" assert span["data"]["redis.commands"] == { "count": 3, "first_ten": expected_first_ten, @@ -263,10 +263,10 @@ def test_db_connection_attributes_client(sentry_init, capture_events): assert span["op"] == "db.redis" assert span["description"] == "GET 'foobar'" - assert span["data"][SPANDATA.DB_SYSTEM] == "redis" - assert span["data"][SPANDATA.DB_NAME] == "1" - assert span["data"][SPANDATA.SERVER_ADDRESS] == "localhost" - assert span["data"][SPANDATA.SERVER_PORT] == 63791 + assert span["data"][ATTRS.DB_SYSTEM] == "redis" + assert span["data"][ATTRS.DB_NAME] == "1" + assert span["data"][ATTRS.SERVER_ADDRESS] == "localhost" + assert span["data"][ATTRS.SERVER_PORT] == 63791 def test_db_connection_attributes_pipeline(sentry_init, capture_events): @@ -289,10 +289,10 @@ def test_db_connection_attributes_pipeline(sentry_init, capture_events): assert span["op"] == "db.redis" assert span["description"] == "redis.pipeline.execute" - assert span["data"][SPANDATA.DB_SYSTEM] == "redis" - assert span["data"][SPANDATA.DB_NAME] == "1" - assert span["data"][SPANDATA.SERVER_ADDRESS] == "localhost" - assert span["data"][SPANDATA.SERVER_PORT] == 63791 + assert span["data"][ATTRS.DB_SYSTEM] == "redis" + assert span["data"][ATTRS.DB_NAME] == "1" + assert span["data"][ATTRS.SERVER_ADDRESS] == "localhost" + assert span["data"][ATTRS.SERVER_PORT] == 63791 def test_span_origin(sentry_init, capture_events): diff --git a/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py b/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py index 36a27d569d..111f012f78 100644 --- a/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py +++ b/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py @@ -5,8 +5,8 @@ from sentry_sdk import capture_message from sentry_sdk.api import start_transaction -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.redis import RedisIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict @@ -101,10 +101,10 @@ def test_rediscluster_pipeline( "count": 3, "first_ten": expected_first_ten, }, - SPANDATA.DB_SYSTEM: "redis", - SPANDATA.DB_NAME: "1", - SPANDATA.SERVER_ADDRESS: "localhost", - SPANDATA.SERVER_PORT: 63791, + ATTRS.DB_SYSTEM: "redis", + ATTRS.DB_NAME: "1", + ATTRS.SERVER_ADDRESS: "localhost", + ATTRS.SERVER_PORT: 63791, } ) assert span["tags"] == { @@ -130,10 +130,10 @@ def test_db_connection_attributes_client(sentry_init, capture_events, redisclust assert span["data"] == ApproxDict( { - SPANDATA.DB_SYSTEM: "redis", - SPANDATA.DB_NAME: "1", - SPANDATA.SERVER_ADDRESS: "localhost", - SPANDATA.SERVER_PORT: 63791, + ATTRS.DB_SYSTEM: "redis", + ATTRS.DB_NAME: "1", + ATTRS.SERVER_ADDRESS: "localhost", + ATTRS.SERVER_PORT: 63791, } ) @@ -164,9 +164,9 @@ def test_db_connection_attributes_pipeline( "count": 1, "first_ten": ["GET 'foo'"], }, - SPANDATA.DB_SYSTEM: "redis", - SPANDATA.DB_NAME: "1", - SPANDATA.SERVER_ADDRESS: "localhost", - SPANDATA.SERVER_PORT: 63791, + ATTRS.DB_SYSTEM: "redis", + ATTRS.DB_NAME: "1", + ATTRS.SERVER_ADDRESS: "localhost", + ATTRS.SERVER_PORT: 63791, } ) diff --git a/tests/integrations/requests/test_requests.py b/tests/integrations/requests/test_requests.py index 8cfc0f932f..1747581a6b 100644 --- a/tests/integrations/requests/test_requests.py +++ b/tests/integrations/requests/test_requests.py @@ -5,8 +5,8 @@ import requests from sentry_sdk import capture_message -from sentry_sdk.consts import SPANDATA from sentry_sdk.integrations.stdlib import StdlibIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict, create_mock_http_server PORT = create_mock_http_server() @@ -27,10 +27,10 @@ def test_crumb_capture(sentry_init, capture_events): assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", - SPANDATA.HTTP_STATUS_CODE: response.status_code, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", + ATTRS.HTTP_STATUS_CODE: response.status_code, "reason": response.reason, } ) @@ -75,10 +75,10 @@ def test_crumb_capture_client_error(sentry_init, capture_events, status_code, le assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", - SPANDATA.HTTP_STATUS_CODE: response.status_code, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", + ATTRS.HTTP_STATUS_CODE: response.status_code, "reason": response.reason, } ) @@ -103,12 +103,12 @@ def test_omit_url_data_if_parsing_fails(sentry_init, capture_events): (event,) = events assert event["breadcrumbs"]["values"][0]["data"] == ApproxDict( { - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_STATUS_CODE: response.status_code, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_STATUS_CODE: response.status_code, "reason": response.reason, # no url related data } ) assert "url" not in event["breadcrumbs"]["values"][0]["data"] - assert SPANDATA.HTTP_FRAGMENT not in event["breadcrumbs"]["values"][0]["data"] - assert SPANDATA.HTTP_QUERY not in event["breadcrumbs"]["values"][0]["data"] + assert ATTRS.HTTP_FRAGMENT not in event["breadcrumbs"]["values"][0]["data"] + assert ATTRS.HTTP_QUERY not in event["breadcrumbs"]["values"][0]["data"] diff --git a/tests/integrations/sqlalchemy/test_sqlalchemy.py b/tests/integrations/sqlalchemy/test_sqlalchemy.py index d2a31a55d5..5e466d44cd 100644 --- a/tests/integrations/sqlalchemy/test_sqlalchemy.py +++ b/tests/integrations/sqlalchemy/test_sqlalchemy.py @@ -11,11 +11,12 @@ import sentry_sdk from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import DEFAULT_MAX_VALUE_LENGTH, SPANDATA +from sentry_sdk.consts import DEFAULT_MAX_VALUE_LENGTH from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration from sentry_sdk.serializer import MAX_EVENT_BYTES from sentry_sdk.tracing_utils import record_sql_queries from sentry_sdk.utils import json_dumps +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS def test_orm_queries(sentry_init, capture_events): @@ -126,10 +127,10 @@ class Address(Base): (event,) = events for span in event["spans"]: - assert span["data"][SPANDATA.DB_SYSTEM] == "sqlite" - assert span["data"][SPANDATA.DB_NAME] == ":memory:" - assert SPANDATA.SERVER_ADDRESS not in span["data"] - assert SPANDATA.SERVER_PORT not in span["data"] + assert span["data"][ATTRS.DB_SYSTEM] == "sqlite" + assert span["data"][ATTRS.DB_NAME] == ":memory:" + assert ATTRS.SERVER_ADDRESS not in span["data"] + assert ATTRS.SERVER_PORT not in span["data"] assert ( render_span_tree(event) @@ -200,10 +201,10 @@ class Address(Base): (event,) = events for span in event["spans"]: - assert span["data"][SPANDATA.DB_SYSTEM] == "sqlite" - assert SPANDATA.DB_NAME not in span["data"] - assert SPANDATA.SERVER_ADDRESS not in span["data"] - assert SPANDATA.SERVER_PORT not in span["data"] + assert span["data"][ATTRS.DB_SYSTEM] == "sqlite" + assert ATTRS.DB_NAME not in span["data"] + assert ATTRS.SERVER_ADDRESS not in span["data"] + assert ATTRS.SERVER_PORT not in span["data"] def test_long_sql_query_preserved(sentry_init, capture_events): @@ -339,10 +340,10 @@ class Person(Base): ): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO not in data - assert SPANDATA.CODE_NAMESPACE not in data - assert SPANDATA.CODE_FILEPATH not in data - assert SPANDATA.CODE_FUNCTION not in data + assert ATTRS.CODE_LINENO not in data + assert ATTRS.CODE_NAMESPACE not in data + assert ATTRS.CODE_FILEPATH not in data + assert ATTRS.CODE_FUNCTION not in data break else: raise AssertionError("No db span found") @@ -391,10 +392,10 @@ class Person(Base): ): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data break else: raise AssertionError("No db span found") @@ -438,25 +439,25 @@ class Person(Base): ): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 assert ( - data.get(SPANDATA.CODE_NAMESPACE) + data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.sqlalchemy.test_sqlalchemy" ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/sqlalchemy/test_sqlalchemy.py" ) - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "test_query_source" + assert data.get(ATTRS.CODE_FUNCTION) == "test_query_source" break else: raise AssertionError("No db span found") @@ -509,20 +510,20 @@ class Person(Base): ): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 - assert data.get(SPANDATA.CODE_NAMESPACE) == "sqlalchemy_helpers.helpers" - assert data.get(SPANDATA.CODE_FILEPATH) == "sqlalchemy_helpers/helpers.py" + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 + assert data.get(ATTRS.CODE_NAMESPACE) == "sqlalchemy_helpers.helpers" + assert data.get(ATTRS.CODE_FILEPATH) == "sqlalchemy_helpers/helpers.py" - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path - assert data.get(SPANDATA.CODE_FUNCTION) == "query_first_model_from_session" + assert data.get(ATTRS.CODE_FUNCTION) == "query_first_model_from_session" break else: raise AssertionError("No db span found") @@ -584,10 +585,10 @@ def __exit__(self, type, value, traceback): ): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO not in data - assert SPANDATA.CODE_NAMESPACE not in data - assert SPANDATA.CODE_FILEPATH not in data - assert SPANDATA.CODE_FUNCTION not in data + assert ATTRS.CODE_LINENO not in data + assert ATTRS.CODE_NAMESPACE not in data + assert ATTRS.CODE_FILEPATH not in data + assert ATTRS.CODE_FUNCTION not in data break else: @@ -650,26 +651,26 @@ def __exit__(self, type, value, traceback): ): data = span.get("data", {}) - assert SPANDATA.CODE_LINENO in data - assert SPANDATA.CODE_NAMESPACE in data - assert SPANDATA.CODE_FILEPATH in data - assert SPANDATA.CODE_FUNCTION in data + assert ATTRS.CODE_LINENO in data + assert ATTRS.CODE_NAMESPACE in data + assert ATTRS.CODE_FILEPATH in data + assert ATTRS.CODE_FUNCTION in data - assert type(data.get(SPANDATA.CODE_LINENO)) == int - assert data.get(SPANDATA.CODE_LINENO) > 0 + assert type(data.get(ATTRS.CODE_LINENO)) == int + assert data.get(ATTRS.CODE_LINENO) > 0 assert ( - data.get(SPANDATA.CODE_NAMESPACE) + data.get(ATTRS.CODE_NAMESPACE) == "tests.integrations.sqlalchemy.test_sqlalchemy" ) - assert data.get(SPANDATA.CODE_FILEPATH).endswith( + assert data.get(ATTRS.CODE_FILEPATH).endswith( "tests/integrations/sqlalchemy/test_sqlalchemy.py" ) - is_relative_path = data.get(SPANDATA.CODE_FILEPATH)[0] != os.sep + is_relative_path = data.get(ATTRS.CODE_FILEPATH)[0] != os.sep assert is_relative_path assert ( - data.get(SPANDATA.CODE_FUNCTION) + data.get(ATTRS.CODE_FUNCTION) == "test_query_source_if_duration_over_threshold" ) break diff --git a/tests/integrations/stdlib/test_httplib.py b/tests/integrations/stdlib/test_httplib.py index b8d46d0558..a18fab4678 100644 --- a/tests/integrations/stdlib/test_httplib.py +++ b/tests/integrations/stdlib/test_httplib.py @@ -7,9 +7,10 @@ import pytest from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import MATCH_ALL, SPANDATA +from sentry_sdk.consts import MATCH_ALL from sentry_sdk.tracing import Transaction from sentry_sdk.integrations.stdlib import StdlibIntegration +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict, create_mock_http_server @@ -33,11 +34,11 @@ def test_crumb_capture(sentry_init, capture_events): assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_STATUS_CODE: 200, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_STATUS_CODE: 200, "reason": "OK", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", } ) @@ -78,10 +79,10 @@ def test_crumb_capture_client_error(sentry_init, capture_events, status_code, le assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_STATUS_CODE: status_code, - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_STATUS_CODE: status_code, + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", } ) @@ -106,12 +107,12 @@ def before_breadcrumb(crumb, hint): assert crumb["data"] == ApproxDict( { "url": url, - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_STATUS_CODE: 200, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_STATUS_CODE: 200, "reason": "OK", "extra": "foo", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", } ) @@ -166,11 +167,11 @@ def test_httplib_misuse(sentry_init, capture_events, request): assert crumb["data"] == ApproxDict( { "url": "http://localhost:{}/200".format(PORT), - SPANDATA.HTTP_METHOD: "GET", - SPANDATA.HTTP_STATUS_CODE: 200, + ATTRS.HTTP_METHOD: "GET", + ATTRS.HTTP_STATUS_CODE: 200, "reason": "OK", - SPANDATA.HTTP_FRAGMENT: "", - SPANDATA.HTTP_QUERY: "", + ATTRS.HTTP_FRAGMENT: "", + ATTRS.HTTP_QUERY: "", } ) diff --git a/tests/test_logs.py b/tests/test_logs.py index 596a31922e..5dde873deb 100644 --- a/tests/test_logs.py +++ b/tests/test_logs.py @@ -10,7 +10,8 @@ from sentry_sdk import get_client from sentry_sdk.envelope import Envelope from sentry_sdk.types import Log -from sentry_sdk.consts import SPANDATA, VERSION +from sentry_sdk.consts import VERSION +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS minimum_python_37 = pytest.mark.skipif( sys.version_info < (3, 7), reason="Asyncio tests need Python >= 3.7" @@ -232,7 +233,7 @@ def test_logs_attributes(sentry_init, capture_envelopes): assert logs[0]["attributes"]["sentry.environment"] == "production" assert "sentry.release" in logs[0]["attributes"] assert logs[0]["attributes"]["sentry.message.parameter.my_var"] == "some value" - assert logs[0]["attributes"][SPANDATA.SERVER_ADDRESS] == "test-server" + assert logs[0]["attributes"][ATTRS.SERVER_ADDRESS] == "test-server" assert logs[0]["attributes"]["sentry.sdk.name"].startswith("sentry.python") assert logs[0]["attributes"]["sentry.sdk.version"] == VERSION From b51536c550a8b88f1ff77079dc58af2f569fe63d Mon Sep 17 00:00:00 2001 From: Anton Pirker Date: Thu, 18 Sep 2025 16:12:18 +0200 Subject: [PATCH 2/2] better imports --- sentry_sdk/ai/monitoring.py | 2 +- sentry_sdk/client.py | 2 +- sentry_sdk/consts.py | 2 + sentry_sdk/integrations/aiohttp.py | 4 +- sentry_sdk/integrations/anthropic.py | 3 +- sentry_sdk/integrations/asyncpg.py | 3 +- sentry_sdk/integrations/boto3.py | 3 +- sentry_sdk/integrations/celery/__init__.py | 3 +- sentry_sdk/integrations/clickhouse_driver.py | 4 +- sentry_sdk/integrations/cohere.py | 51 ++++++++++--------- sentry_sdk/integrations/django/__init__.py | 3 +- sentry_sdk/integrations/django/caching.py | 3 +- sentry_sdk/integrations/httpx.py | 3 +- sentry_sdk/integrations/huggingface_hub.py | 3 +- sentry_sdk/integrations/langchain.py | 3 +- sentry_sdk/integrations/langgraph.py | 3 +- sentry_sdk/integrations/openai.py | 39 +++++++------- .../openai_agents/spans/ai_client.py | 3 +- .../openai_agents/spans/execute_tool.py | 3 +- .../openai_agents/spans/handoff.py | 3 +- .../openai_agents/spans/invoke_agent.py | 3 +- .../integrations/openai_agents/utils.py | 2 +- sentry_sdk/integrations/pymongo.py | 3 +- .../integrations/redis/modules/caches.py | 3 +- .../integrations/redis/modules/queries.py | 3 +- sentry_sdk/integrations/redis/utils.py | 2 +- sentry_sdk/integrations/sqlalchemy.py | 4 +- sentry_sdk/integrations/stdlib.py | 3 +- sentry_sdk/tracing.py | 3 +- sentry_sdk/tracing_utils.py | 3 +- .../integrations/anthropic/test_anthropic.py | 3 +- tests/integrations/asyncpg/test_asyncpg.py | 2 +- tests/integrations/cohere/test_cohere.py | 2 +- tests/integrations/django/test_basic.py | 2 +- .../integrations/django/test_db_query_data.py | 2 +- tests/integrations/httpx/test_httpx.py | 3 +- .../integrations/langchain/test_langchain.py | 3 +- .../integrations/langgraph/test_langgraph.py | 3 +- tests/integrations/openai/test_openai.py | 5 +- tests/integrations/pymongo/test_pymongo.py | 2 +- .../redis/asyncio/test_redis_asyncio.py | 2 +- .../redis/cluster/test_redis_cluster.py | 2 +- .../test_redis_cluster_asyncio.py | 2 +- tests/integrations/redis/test_redis.py | 2 +- .../test_redis_py_cluster_legacy.py | 2 +- tests/integrations/requests/test_requests.py | 2 +- .../sqlalchemy/test_sqlalchemy.py | 3 +- tests/integrations/stdlib/test_httplib.py | 3 +- tests/test_logs.py | 3 +- 49 files changed, 99 insertions(+), 121 deletions(-) diff --git a/sentry_sdk/ai/monitoring.py b/sentry_sdk/ai/monitoring.py index cbd2b31c44..42ad7ff309 100644 --- a/sentry_sdk/ai/monitoring.py +++ b/sentry_sdk/ai/monitoring.py @@ -3,9 +3,9 @@ import sentry_sdk.utils from sentry_sdk import start_span +from sentry_sdk.consts import ATTRS from sentry_sdk.tracing import Span from sentry_sdk.utils import ContextVar -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/client.py b/sentry_sdk/client.py index de94631e91..72cb11b7c0 100644 --- a/sentry_sdk/client.py +++ b/sentry_sdk/client.py @@ -30,6 +30,7 @@ from sentry_sdk.tracing import trace from sentry_sdk.transport import BaseHttpTransport, make_transport from sentry_sdk.consts import ( + ATTRS, DEFAULT_MAX_VALUE_LENGTH, DEFAULT_OPTIONS, INSTRUMENTER, @@ -48,7 +49,6 @@ ) from sentry_sdk.scrubber import EventScrubber from sentry_sdk.monitor import Monitor -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS if TYPE_CHECKING: from typing import Any diff --git a/sentry_sdk/consts.py b/sentry_sdk/consts.py index 339241ea85..ba4cf91b5c 100644 --- a/sentry_sdk/consts.py +++ b/sentry_sdk/consts.py @@ -2,6 +2,8 @@ from enum import Enum from typing import TYPE_CHECKING +from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS # noqa: F401 + # up top to prevent circular import due to integration import # This is more or less an arbitrary large-ish value for now, so that we allow # pretty long strings (like LLM prompts), but still have *some* upper limit diff --git a/sentry_sdk/integrations/aiohttp.py b/sentry_sdk/integrations/aiohttp.py index 7e9c822cb5..cf0eb8340f 100644 --- a/sentry_sdk/integrations/aiohttp.py +++ b/sentry_sdk/integrations/aiohttp.py @@ -4,7 +4,7 @@ import sentry_sdk from sentry_sdk.api import continue_trace -from sentry_sdk.consts import OP, SPANSTATUS +from sentry_sdk.consts import ATTRS, OP, SPANSTATUS from sentry_sdk.integrations import ( _DEFAULT_FAILED_REQUEST_STATUS_CODES, _check_minimum_version, @@ -37,7 +37,7 @@ SENSITIVE_DATA_SUBSTITUTE, AnnotatedValue, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS + try: import asyncio diff --git a/sentry_sdk/integrations/anthropic.py b/sentry_sdk/integrations/anthropic.py index 5cf51a0b7e..28abc429cf 100644 --- a/sentry_sdk/integrations/anthropic.py +++ b/sentry_sdk/integrations/anthropic.py @@ -4,7 +4,7 @@ import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import _check_minimum_version, DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( @@ -13,7 +13,6 @@ package_version, safe_serialize, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: try: diff --git a/sentry_sdk/integrations/asyncpg.py b/sentry_sdk/integrations/asyncpg.py index 95539ee00e..eab98147c6 100644 --- a/sentry_sdk/integrations/asyncpg.py +++ b/sentry_sdk/integrations/asyncpg.py @@ -3,7 +3,7 @@ from typing import Any, TypeVar, Callable, Awaitable, Iterator import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing import Span from sentry_sdk.tracing_utils import add_query_source, record_sql_queries @@ -12,7 +12,6 @@ parse_version, capture_internal_exceptions, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: import asyncpg # type: ignore[import-not-found] diff --git a/sentry_sdk/integrations/boto3.py b/sentry_sdk/integrations/boto3.py index 08a12a5395..73f1dbe02b 100644 --- a/sentry_sdk/integrations/boto3.py +++ b/sentry_sdk/integrations/boto3.py @@ -1,7 +1,7 @@ from functools import partial import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing import Span from sentry_sdk.utils import ( @@ -10,7 +10,6 @@ parse_url, parse_version, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/celery/__init__.py b/sentry_sdk/integrations/celery/__init__.py index 940ae187a2..18e4633ba3 100644 --- a/sentry_sdk/integrations/celery/__init__.py +++ b/sentry_sdk/integrations/celery/__init__.py @@ -5,7 +5,7 @@ import sentry_sdk from sentry_sdk import isolation_scope from sentry_sdk.api import continue_trace -from sentry_sdk.consts import OP, SPANSTATUS +from sentry_sdk.consts import ATTRS, OP, SPANSTATUS from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.integrations.celery.beat import ( _patch_beat_apply_entry, @@ -22,7 +22,6 @@ event_from_exception, reraise, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/clickhouse_driver.py b/sentry_sdk/integrations/clickhouse_driver.py index 17b36f7882..a25958dfb7 100644 --- a/sentry_sdk/integrations/clickhouse_driver.py +++ b/sentry_sdk/integrations/clickhouse_driver.py @@ -1,10 +1,10 @@ import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing import Span from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import capture_internal_exceptions, ensure_integration_enabled -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS + from typing import TYPE_CHECKING, TypeVar diff --git a/sentry_sdk/integrations/cohere.py b/sentry_sdk/integrations/cohere.py index 91cc2e23b9..37cc727bd9 100644 --- a/sentry_sdk/integrations/cohere.py +++ b/sentry_sdk/integrations/cohere.py @@ -3,7 +3,6 @@ from sentry_sdk import consts from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -37,32 +36,32 @@ COLLECTED_CHAT_PARAMS = { - "model": ATTRS.AI_MODEL_ID, - "k": ATTRS.AI_TOP_K, - "p": ATTRS.AI_TOP_P, - "seed": ATTRS.AI_SEED, - "frequency_penalty": ATTRS.AI_FREQUENCY_PENALTY, - "presence_penalty": ATTRS.AI_PRESENCE_PENALTY, - "raw_prompting": ATTRS.AI_RAW_PROMPTING, + "model": consts.ATTRS.AI_MODEL_ID, + "k": consts.ATTRS.AI_TOP_K, + "p": consts.ATTRS.AI_TOP_P, + "seed": consts.ATTRS.AI_SEED, + "frequency_penalty": consts.ATTRS.AI_FREQUENCY_PENALTY, + "presence_penalty": consts.ATTRS.AI_PRESENCE_PENALTY, + "raw_prompting": consts.ATTRS.AI_RAW_PROMPTING, } COLLECTED_PII_CHAT_PARAMS = { - "tools": ATTRS.AI_TOOLS, - "preamble": ATTRS.AI_PREAMBLE, + "tools": consts.ATTRS.AI_TOOLS, + "preamble": consts.ATTRS.AI_PREAMBLE, } COLLECTED_CHAT_RESP_ATTRS = { - "generation_id": ATTRS.AI_GENERATION_ID, - "is_search_required": ATTRS.AI_SEARCH_REQUIRED, - "finish_reason": ATTRS.AI_FINISH_REASON, + "generation_id": consts.ATTRS.AI_GENERATION_ID, + "is_search_required": consts.ATTRS.AI_SEARCH_REQUIRED, + "finish_reason": consts.ATTRS.AI_FINISH_REASON, } COLLECTED_PII_CHAT_RESP_ATTRS = { - "citations": ATTRS.AI_CITATIONS, - "documents": ATTRS.AI_DOCUMENTS, - "search_queries": ATTRS.AI_SEARCH_QUERIES, - "search_results": ATTRS.AI_SEARCH_RESULTS, - "tool_calls": ATTRS.AI_TOOL_CALLS, + "citations": consts.ATTRS.AI_CITATIONS, + "documents": consts.ATTRS.AI_DOCUMENTS, + "search_queries": consts.ATTRS.AI_SEARCH_QUERIES, + "search_results": consts.ATTRS.AI_SEARCH_RESULTS, + "tool_calls": consts.ATTRS.AI_TOOL_CALLS, } @@ -101,7 +100,7 @@ def collect_chat_response_fields(span, res, include_pii): if hasattr(res, "text"): set_data_normalized( span, - ATTRS.AI_RESPONSES, + consts.ATTRS.AI_RESPONSES, [res.text], ) for pii_attr in COLLECTED_PII_CHAT_RESP_ATTRS: @@ -127,7 +126,7 @@ def collect_chat_response_fields(span, res, include_pii): ) if hasattr(res.meta, "warnings"): - set_data_normalized(span, ATTRS.AI_WARNINGS, res.meta.warnings) + set_data_normalized(span, consts.ATTRS.AI_WARNINGS, res.meta.warnings) @wraps(f) def new_chat(*args, **kwargs): @@ -160,7 +159,7 @@ def new_chat(*args, **kwargs): if should_send_default_pii() and integration.include_prompts: set_data_normalized( span, - ATTRS.AI_INPUT_MESSAGES, + consts.ATTRS.AI_INPUT_MESSAGES, list( map( lambda x: { @@ -179,7 +178,7 @@ def new_chat(*args, **kwargs): for k, v in COLLECTED_CHAT_PARAMS.items(): if k in kwargs: set_data_normalized(span, v, kwargs[k]) - set_data_normalized(span, ATTRS.AI_STREAMING, False) + set_data_normalized(span, consts.ATTRS.AI_STREAMING, False) if streaming: old_iterator = res @@ -238,16 +237,18 @@ def new_embed(*args, **kwargs): should_send_default_pii() and integration.include_prompts ): if isinstance(kwargs["texts"], str): - set_data_normalized(span, ATTRS.AI_TEXTS, [kwargs["texts"]]) + set_data_normalized(span, consts.ATTRS.AI_TEXTS, [kwargs["texts"]]) elif ( isinstance(kwargs["texts"], list) and len(kwargs["texts"]) > 0 and isinstance(kwargs["texts"][0], str) ): - set_data_normalized(span, ATTRS.AI_INPUT_MESSAGES, kwargs["texts"]) + set_data_normalized( + span, consts.ATTRS.AI_INPUT_MESSAGES, kwargs["texts"] + ) if "model" in kwargs: - set_data_normalized(span, ATTRS.AI_MODEL_ID, kwargs["model"]) + set_data_normalized(span, consts.ATTRS.AI_MODEL_ID, kwargs["model"]) try: res = f(*args, **kwargs) except Exception as e: diff --git a/sentry_sdk/integrations/django/__init__.py b/sentry_sdk/integrations/django/__init__.py index a7f08c8726..2c3b83d849 100644 --- a/sentry_sdk/integrations/django/__init__.py +++ b/sentry_sdk/integrations/django/__init__.py @@ -5,7 +5,7 @@ from importlib import import_module import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.scope import add_global_event_processor, should_send_default_pii from sentry_sdk.serializer import add_global_repr_processor, add_repr_sequence_type from sentry_sdk.tracing import SOURCE_FOR_STYLE, TransactionSource @@ -29,7 +29,6 @@ DEFAULT_HTTP_METHODS_TO_CAPTURE, RequestExtractor, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: from django import VERSION as DJANGO_VERSION diff --git a/sentry_sdk/integrations/django/caching.py b/sentry_sdk/integrations/django/caching.py index d42b8e8c78..6cc24cf912 100644 --- a/sentry_sdk/integrations/django/caching.py +++ b/sentry_sdk/integrations/django/caching.py @@ -7,12 +7,11 @@ from django.core.cache import CacheHandler import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.utils import ( capture_internal_exceptions, ensure_integration_enabled, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS if TYPE_CHECKING: from typing import Any diff --git a/sentry_sdk/integrations/httpx.py b/sentry_sdk/integrations/httpx.py index d5ef96cd63..1b73b7ebc7 100644 --- a/sentry_sdk/integrations/httpx.py +++ b/sentry_sdk/integrations/httpx.py @@ -1,5 +1,5 @@ import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import Integration, DidNotEnable from sentry_sdk.tracing import BAGGAGE_HEADER_NAME from sentry_sdk.tracing_utils import Baggage, should_propagate_trace @@ -10,7 +10,6 @@ logger, parse_url, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/huggingface_hub.py b/sentry_sdk/integrations/huggingface_hub.py index 77335d13f8..5cb7128e4e 100644 --- a/sentry_sdk/integrations/huggingface_hub.py +++ b/sentry_sdk/integrations/huggingface_hub.py @@ -4,14 +4,13 @@ import sentry_sdk from sentry_sdk.ai.monitoring import record_token_usage from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import ( capture_internal_exceptions, event_from_exception, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/langchain.py b/sentry_sdk/integrations/langchain.py index 52dfcb297f..3b93167447 100644 --- a/sentry_sdk/integrations/langchain.py +++ b/sentry_sdk/integrations/langchain.py @@ -5,13 +5,12 @@ import sentry_sdk from sentry_sdk.ai.monitoring import set_ai_pipeline_name from sentry_sdk.ai.utils import set_data_normalized, get_start_span_function -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing import Span from sentry_sdk.tracing_utils import _get_value from sentry_sdk.utils import logger, capture_internal_exceptions -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/langgraph.py b/sentry_sdk/integrations/langgraph.py index 0b10145fdf..a6a1565330 100644 --- a/sentry_sdk/integrations/langgraph.py +++ b/sentry_sdk/integrations/langgraph.py @@ -3,11 +3,10 @@ import sentry_sdk from sentry_sdk.ai.utils import set_data_normalized -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import safe_serialize -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: diff --git a/sentry_sdk/integrations/openai.py b/sentry_sdk/integrations/openai.py index 11926bae00..6ffd3316a3 100644 --- a/sentry_sdk/integrations/openai.py +++ b/sentry_sdk/integrations/openai.py @@ -11,7 +11,6 @@ event_from_exception, safe_serialize, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING @@ -179,21 +178,23 @@ def _set_input_data(span, kwargs, operation, integration): and should_send_default_pii() and integration.include_prompts ): - set_data_normalized(span, ATTRS.GEN_AI_REQUEST_MESSAGES, messages, unpack=False) + set_data_normalized( + span, consts.ATTRS.GEN_AI_REQUEST_MESSAGES, messages, unpack=False + ) # Input attributes: Common - set_data_normalized(span, ATTRS.GEN_AI_SYSTEM, "openai") - set_data_normalized(span, ATTRS.GEN_AI_OPERATION_NAME, operation) + set_data_normalized(span, consts.ATTRS.GEN_AI_SYSTEM, "openai") + set_data_normalized(span, consts.ATTRS.GEN_AI_OPERATION_NAME, operation) # Input attributes: Optional kwargs_keys_to_attributes = { - "model": ATTRS.GEN_AI_REQUEST_MODEL, - "stream": ATTRS.GEN_AI_RESPONSE_STREAMING, - "max_tokens": ATTRS.GEN_AI_REQUEST_MAX_TOKENS, - "presence_penalty": ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, - "frequency_penalty": ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, - "temperature": ATTRS.GEN_AI_REQUEST_TEMPERATURE, - "top_p": ATTRS.GEN_AI_REQUEST_TOP_P, + "model": consts.ATTRS.GEN_AI_REQUEST_MODEL, + "stream": consts.ATTRS.GEN_AI_RESPONSE_STREAMING, + "max_tokens": consts.ATTRS.GEN_AI_REQUEST_MAX_TOKENS, + "presence_penalty": consts.ATTRS.GEN_AI_REQUEST_PRESENCE_PENALTY, + "frequency_penalty": consts.ATTRS.GEN_AI_REQUEST_FREQUENCY_PENALTY, + "temperature": consts.ATTRS.GEN_AI_REQUEST_TEMPERATURE, + "top_p": consts.ATTRS.GEN_AI_REQUEST_TOP_P, } for key, attribute in kwargs_keys_to_attributes.items(): value = kwargs.get(key) @@ -205,14 +206,14 @@ def _set_input_data(span, kwargs, operation, integration): tools = kwargs.get("tools") if tools is not NOT_GIVEN and tools is not None and len(tools) > 0: set_data_normalized( - span, ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) + span, consts.ATTRS.GEN_AI_REQUEST_AVAILABLE_TOOLS, safe_serialize(tools) ) def _set_output_data(span, response, kwargs, integration, finish_span=True): # type: (Span, Any, dict[str, Any], OpenAIIntegration, bool) -> None if hasattr(response, "model"): - set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_MODEL, response.model) + set_data_normalized(span, consts.ATTRS.GEN_AI_RESPONSE_MODEL, response.model) # Input messages (the prompt or data sent to the model) # used for the token usage calculation @@ -227,7 +228,9 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True): if should_send_default_pii() and integration.include_prompts: response_text = [choice.message.dict() for choice in response.choices] if len(response_text) > 0: - set_data_normalized(span, ATTRS.GEN_AI_RESPONSE_TEXT, response_text) + set_data_normalized( + span, consts.ATTRS.GEN_AI_RESPONSE_TEXT, response_text + ) _calculate_token_usage(messages, response, span, None, integration.count_tokens) @@ -255,14 +258,14 @@ def _set_output_data(span, response, kwargs, integration, finish_span=True): if len(output_messages["tool"]) > 0: set_data_normalized( span, - ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, + consts.ATTRS.GEN_AI_RESPONSE_TOOL_CALLS, output_messages["tool"], unpack=False, ) if len(output_messages["response"]) > 0: set_data_normalized( - span, ATTRS.GEN_AI_RESPONSE_TEXT, output_messages["response"] + span, consts.ATTRS.GEN_AI_RESPONSE_TEXT, output_messages["response"] ) _calculate_token_usage(messages, response, span, None, integration.count_tokens) @@ -316,7 +319,7 @@ def new_iterator(): all_responses = ["".join(chunk) for chunk in data_buf] if should_send_default_pii() and integration.include_prompts: set_data_normalized( - span, ATTRS.GEN_AI_RESPONSE_TEXT, all_responses + span, consts.ATTRS.GEN_AI_RESPONSE_TEXT, all_responses ) if count_tokens_manually: _calculate_token_usage( @@ -371,7 +374,7 @@ async def new_iterator_async(): all_responses = ["".join(chunk) for chunk in data_buf] if should_send_default_pii() and integration.include_prompts: set_data_normalized( - span, ATTRS.GEN_AI_RESPONSE_TEXT, all_responses + span, consts.ATTRS.GEN_AI_RESPONSE_TEXT, all_responses ) if count_tokens_manually: _calculate_token_usage( diff --git a/sentry_sdk/integrations/openai_agents/spans/ai_client.py b/sentry_sdk/integrations/openai_agents/spans/ai_client.py index d3e500e51d..002dd2dddb 100644 --- a/sentry_sdk/integrations/openai_agents/spans/ai_client.py +++ b/sentry_sdk/integrations/openai_agents/spans/ai_client.py @@ -1,6 +1,5 @@ import sentry_sdk -from sentry_sdk.consts import OP -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.consts import ATTRS, OP from ..consts import SPAN_ORIGIN from ..utils import ( diff --git a/sentry_sdk/integrations/openai_agents/spans/execute_tool.py b/sentry_sdk/integrations/openai_agents/spans/execute_tool.py index 4034210a6f..b39b76d67a 100644 --- a/sentry_sdk/integrations/openai_agents/spans/execute_tool.py +++ b/sentry_sdk/integrations/openai_agents/spans/execute_tool.py @@ -1,7 +1,6 @@ import sentry_sdk -from sentry_sdk.consts import OP, SPANSTATUS +from sentry_sdk.consts import ATTRS, OP, SPANSTATUS from sentry_sdk.scope import should_send_default_pii -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from ..consts import SPAN_ORIGIN from ..utils import _set_agent_data diff --git a/sentry_sdk/integrations/openai_agents/spans/handoff.py b/sentry_sdk/integrations/openai_agents/spans/handoff.py index 6e172c5594..e506770f76 100644 --- a/sentry_sdk/integrations/openai_agents/spans/handoff.py +++ b/sentry_sdk/integrations/openai_agents/spans/handoff.py @@ -1,6 +1,5 @@ import sentry_sdk -from sentry_sdk.consts import OP -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.consts import ATTRS, OP from ..consts import SPAN_ORIGIN diff --git a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py index 9496be9bf4..048027af96 100644 --- a/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py +++ b/sentry_sdk/integrations/openai_agents/spans/invoke_agent.py @@ -1,9 +1,8 @@ import sentry_sdk from sentry_sdk.ai.utils import get_start_span_function, set_data_normalized -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import safe_serialize -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from ..consts import SPAN_ORIGIN from ..utils import _set_agent_data diff --git a/sentry_sdk/integrations/openai_agents/utils.py b/sentry_sdk/integrations/openai_agents/utils.py index b0b26db4df..d412f61131 100644 --- a/sentry_sdk/integrations/openai_agents/utils.py +++ b/sentry_sdk/integrations/openai_agents/utils.py @@ -1,9 +1,9 @@ import sentry_sdk from sentry_sdk.ai.utils import set_data_normalized +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations import DidNotEnable from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import event_from_exception, safe_serialize -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/pymongo.py b/sentry_sdk/integrations/pymongo.py index 3fea0af22d..4c52de8f4f 100644 --- a/sentry_sdk/integrations/pymongo.py +++ b/sentry_sdk/integrations/pymongo.py @@ -2,12 +2,11 @@ import json import sentry_sdk -from sentry_sdk.consts import SPANSTATUS, OP +from sentry_sdk.consts import ATTRS, SPANSTATUS, OP from sentry_sdk.integrations import DidNotEnable, Integration from sentry_sdk.scope import should_send_default_pii from sentry_sdk.tracing import Span from sentry_sdk.utils import capture_internal_exceptions -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS try: from pymongo import monitoring diff --git a/sentry_sdk/integrations/redis/modules/caches.py b/sentry_sdk/integrations/redis/modules/caches.py index 3370e48330..71fde61764 100644 --- a/sentry_sdk/integrations/redis/modules/caches.py +++ b/sentry_sdk/integrations/redis/modules/caches.py @@ -2,10 +2,9 @@ Code used for the Caches module in Sentry """ -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations.redis.utils import _get_safe_key, _key_as_string from sentry_sdk.utils import capture_internal_exceptions -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS GET_COMMANDS = ("get", "mget") SET_COMMANDS = ("set", "setex") diff --git a/sentry_sdk/integrations/redis/modules/queries.py b/sentry_sdk/integrations/redis/modules/queries.py index b6c45807de..5d19d4b3fe 100644 --- a/sentry_sdk/integrations/redis/modules/queries.py +++ b/sentry_sdk/integrations/redis/modules/queries.py @@ -2,10 +2,9 @@ Code used for the Queries module in Sentry """ -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations.redis.utils import _get_safe_command from sentry_sdk.utils import capture_internal_exceptions -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/redis/utils.py b/sentry_sdk/integrations/redis/utils.py index c2700951ca..26ccb08f88 100644 --- a/sentry_sdk/integrations/redis/utils.py +++ b/sentry_sdk/integrations/redis/utils.py @@ -1,3 +1,4 @@ +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.redis.consts import ( _COMMANDS_INCLUDING_SENSITIVE_DATA, _MAX_NUM_ARGS, @@ -7,7 +8,6 @@ ) from sentry_sdk.scope import should_send_default_pii from sentry_sdk.utils import SENSITIVE_DATA_SUBSTITUTE -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/integrations/sqlalchemy.py b/sentry_sdk/integrations/sqlalchemy.py index 57f3f492dd..4c70bbb851 100644 --- a/sentry_sdk/integrations/sqlalchemy.py +++ b/sentry_sdk/integrations/sqlalchemy.py @@ -1,4 +1,4 @@ -from sentry_sdk.consts import SPANSTATUS +from sentry_sdk.consts import ATTRS, SPANSTATUS from sentry_sdk.integrations import _check_minimum_version, Integration, DidNotEnable from sentry_sdk.tracing_utils import add_query_source, record_sql_queries from sentry_sdk.utils import ( @@ -6,7 +6,7 @@ ensure_integration_enabled, parse_version, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS + try: from sqlalchemy.engine import Engine # type: ignore diff --git a/sentry_sdk/integrations/stdlib.py b/sentry_sdk/integrations/stdlib.py index 4fac877f90..8af0253b70 100644 --- a/sentry_sdk/integrations/stdlib.py +++ b/sentry_sdk/integrations/stdlib.py @@ -5,7 +5,7 @@ from http.client import HTTPConnection import sentry_sdk -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations import Integration from sentry_sdk.scope import add_global_event_processor from sentry_sdk.tracing_utils import EnvironHeaders, should_propagate_trace @@ -18,7 +18,6 @@ safe_repr, parse_url, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/tracing.py b/sentry_sdk/tracing.py index 2d94c9d12d..4c33309ca2 100644 --- a/sentry_sdk/tracing.py +++ b/sentry_sdk/tracing.py @@ -4,7 +4,7 @@ from enum import Enum import sentry_sdk -from sentry_sdk.consts import INSTRUMENTER, SPANSTATUS, SPANTEMPLATE +from sentry_sdk.consts import ATTRS, INSTRUMENTER, SPANSTATUS, SPANTEMPLATE from sentry_sdk.profiler.continuous_profiler import get_profiler_id from sentry_sdk.utils import ( capture_internal_exceptions, @@ -14,7 +14,6 @@ nanosecond_time, should_be_treated_as_error, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/sentry_sdk/tracing_utils.py b/sentry_sdk/tracing_utils.py index 6dbf830aa1..d45f635d97 100644 --- a/sentry_sdk/tracing_utils.py +++ b/sentry_sdk/tracing_utils.py @@ -11,7 +11,7 @@ import uuid import sentry_sdk -from sentry_sdk.consts import OP, SPANTEMPLATE +from sentry_sdk.consts import OP, SPANTEMPLATE, ATTRS from sentry_sdk.utils import ( capture_internal_exceptions, filename_for_module, @@ -27,7 +27,6 @@ _is_in_project_root, _module_in_list, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from typing import TYPE_CHECKING diff --git a/tests/integrations/anthropic/test_anthropic.py b/tests/integrations/anthropic/test_anthropic.py index c77d9d3bf6..fae24be604 100644 --- a/tests/integrations/anthropic/test_anthropic.py +++ b/tests/integrations/anthropic/test_anthropic.py @@ -41,14 +41,13 @@ async def __call__(self, *args, **kwargs): from anthropic.types.content_block import ContentBlock as TextBlock from sentry_sdk import start_transaction, start_span -from sentry_sdk.consts import OP +from sentry_sdk.consts import ATTRS, OP from sentry_sdk.integrations.anthropic import ( AnthropicIntegration, _set_output_data, _collect_ai_data, ) from sentry_sdk.utils import package_version -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS ANTHROPIC_VERSION = package_version("anthropic") diff --git a/tests/integrations/asyncpg/test_asyncpg.py b/tests/integrations/asyncpg/test_asyncpg.py index d5b7561d2f..71cbf80af5 100644 --- a/tests/integrations/asyncpg/test_asyncpg.py +++ b/tests/integrations/asyncpg/test_asyncpg.py @@ -20,9 +20,9 @@ from asyncpg import connect, Connection from sentry_sdk import capture_message, start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.asyncpg import AsyncPGIntegration from sentry_sdk.tracing_utils import record_sql_queries -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict PG_HOST = os.getenv("SENTRY_PYTHON_TEST_POSTGRES_HOST", "localhost") diff --git a/tests/integrations/cohere/test_cohere.py b/tests/integrations/cohere/test_cohere.py index caf3699507..a961c79129 100644 --- a/tests/integrations/cohere/test_cohere.py +++ b/tests/integrations/cohere/test_cohere.py @@ -5,8 +5,8 @@ from cohere import Client, ChatMessage from sentry_sdk import start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.cohere import CohereIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from unittest import mock # python 3.3 and above from httpx import Client as HTTPXClient diff --git a/tests/integrations/django/test_basic.py b/tests/integrations/django/test_basic.py index 4718e1acd4..f4e1cf5d34 100644 --- a/tests/integrations/django/test_basic.py +++ b/tests/integrations/django/test_basic.py @@ -27,6 +27,7 @@ import sentry_sdk from sentry_sdk._compat import PY310 from sentry_sdk import capture_message, capture_exception +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.django import ( DjangoIntegration, DjangoRequestExtractor, @@ -36,7 +37,6 @@ from sentry_sdk.integrations.executing import ExecutingIntegration from sentry_sdk.profiler.utils import get_frame_name from sentry_sdk.tracing import Span -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import unpack_werkzeug_response from tests.integrations.django.myapp.wsgi import application from tests.integrations.django.myapp.signals import myapp_custom_signal_silenced diff --git a/tests/integrations/django/test_db_query_data.py b/tests/integrations/django/test_db_query_data.py index 6e602689fd..0926c61f69 100644 --- a/tests/integrations/django/test_db_query_data.py +++ b/tests/integrations/django/test_db_query_data.py @@ -15,9 +15,9 @@ from werkzeug.test import Client from sentry_sdk import start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.django import DjangoIntegration from sentry_sdk.tracing_utils import record_sql_queries -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import unpack_werkzeug_response from tests.integrations.django.utils import pytest_mark_django_db_decorator diff --git a/tests/integrations/httpx/test_httpx.py b/tests/integrations/httpx/test_httpx.py index 3b3825cd19..bb0aa64b85 100644 --- a/tests/integrations/httpx/test_httpx.py +++ b/tests/integrations/httpx/test_httpx.py @@ -6,9 +6,8 @@ import sentry_sdk from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import MATCH_ALL +from sentry_sdk.consts import ATTRS, MATCH_ALL from sentry_sdk.integrations.httpx import HttpxIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict diff --git a/tests/integrations/langchain/test_langchain.py b/tests/integrations/langchain/test_langchain.py index 467526df3f..0b0bd0e95c 100644 --- a/tests/integrations/langchain/test_langchain.py +++ b/tests/integrations/langchain/test_langchain.py @@ -4,8 +4,6 @@ import pytest -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS - try: # Langchain >= 0.2 from langchain_openai import ChatOpenAI @@ -20,6 +18,7 @@ from langchain_core.language_models.chat_models import BaseChatModel from sentry_sdk import start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.langchain import ( LangchainIntegration, SentryLangchainCallback, diff --git a/tests/integrations/langgraph/test_langgraph.py b/tests/integrations/langgraph/test_langgraph.py index 98c4b7a610..3c940619b9 100644 --- a/tests/integrations/langgraph/test_langgraph.py +++ b/tests/integrations/langgraph/test_langgraph.py @@ -5,8 +5,7 @@ import pytest from sentry_sdk import start_transaction -from sentry_sdk.consts import OP -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.consts import ATTRS, OP def mock_langgraph_imports(): diff --git a/tests/integrations/openai/test_openai.py b/tests/integrations/openai/test_openai.py index 5cf803cbd7..e61374d0b0 100644 --- a/tests/integrations/openai/test_openai.py +++ b/tests/integrations/openai/test_openai.py @@ -1,7 +1,5 @@ import pytest -from sentry_sdk.utils import package_version - try: from openai import NOT_GIVEN except ImportError: @@ -34,11 +32,12 @@ SKIP_RESPONSES_TESTS = True from sentry_sdk import start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.openai import ( OpenAIIntegration, _calculate_token_usage, ) -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.utils import package_version from unittest import mock # python 3.3 and above diff --git a/tests/integrations/pymongo/test_pymongo.py b/tests/integrations/pymongo/test_pymongo.py index 0fb9543642..10b4459760 100644 --- a/tests/integrations/pymongo/test_pymongo.py +++ b/tests/integrations/pymongo/test_pymongo.py @@ -1,6 +1,6 @@ from sentry_sdk import capture_message, start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.pymongo import PyMongoIntegration, _strip_pii -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from mockupdb import MockupDB, OpQuery from pymongo import MongoClient diff --git a/tests/integrations/redis/asyncio/test_redis_asyncio.py b/tests/integrations/redis/asyncio/test_redis_asyncio.py index 87b8d3d94e..9fe298929c 100644 --- a/tests/integrations/redis/asyncio/test_redis_asyncio.py +++ b/tests/integrations/redis/asyncio/test_redis_asyncio.py @@ -1,8 +1,8 @@ import pytest from sentry_sdk import capture_message, start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.redis import RedisIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict from fakeredis.aioredis import FakeRedis diff --git a/tests/integrations/redis/cluster/test_redis_cluster.py b/tests/integrations/redis/cluster/test_redis_cluster.py index df07e35f6e..411160ef76 100644 --- a/tests/integrations/redis/cluster/test_redis_cluster.py +++ b/tests/integrations/redis/cluster/test_redis_cluster.py @@ -1,8 +1,8 @@ import pytest from sentry_sdk import capture_message from sentry_sdk.api import start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.redis import RedisIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict import redis diff --git a/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py b/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py index 1f0e1ad6c2..7f23a573e5 100644 --- a/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py +++ b/tests/integrations/redis/cluster_asyncio/test_redis_cluster_asyncio.py @@ -1,8 +1,8 @@ import pytest from sentry_sdk import capture_message, start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.redis import RedisIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict from redis.asyncio import cluster diff --git a/tests/integrations/redis/test_redis.py b/tests/integrations/redis/test_redis.py index 4b52f5fce9..24ac630f29 100644 --- a/tests/integrations/redis/test_redis.py +++ b/tests/integrations/redis/test_redis.py @@ -4,7 +4,7 @@ from fakeredis import FakeStrictRedis from sentry_sdk import capture_message, start_transaction -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.redis import RedisIntegration diff --git a/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py b/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py index 111f012f78..8a89a9e312 100644 --- a/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py +++ b/tests/integrations/redis_py_cluster_legacy/test_redis_py_cluster_legacy.py @@ -5,8 +5,8 @@ from sentry_sdk import capture_message from sentry_sdk.api import start_transaction +from sentry_sdk.consts import ATTRS from sentry_sdk.integrations.redis import RedisIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict diff --git a/tests/integrations/requests/test_requests.py b/tests/integrations/requests/test_requests.py index 1747581a6b..7328349285 100644 --- a/tests/integrations/requests/test_requests.py +++ b/tests/integrations/requests/test_requests.py @@ -6,7 +6,7 @@ from sentry_sdk import capture_message from sentry_sdk.integrations.stdlib import StdlibIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.consts import ATTRS from tests.conftest import ApproxDict, create_mock_http_server PORT = create_mock_http_server() diff --git a/tests/integrations/sqlalchemy/test_sqlalchemy.py b/tests/integrations/sqlalchemy/test_sqlalchemy.py index 5e466d44cd..dddda5f06e 100644 --- a/tests/integrations/sqlalchemy/test_sqlalchemy.py +++ b/tests/integrations/sqlalchemy/test_sqlalchemy.py @@ -11,12 +11,11 @@ import sentry_sdk from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import DEFAULT_MAX_VALUE_LENGTH +from sentry_sdk.consts import ATTRS, DEFAULT_MAX_VALUE_LENGTH from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration from sentry_sdk.serializer import MAX_EVENT_BYTES from sentry_sdk.tracing_utils import record_sql_queries from sentry_sdk.utils import json_dumps -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS def test_orm_queries(sentry_init, capture_events): diff --git a/tests/integrations/stdlib/test_httplib.py b/tests/integrations/stdlib/test_httplib.py index a18fab4678..cf8e2f883e 100644 --- a/tests/integrations/stdlib/test_httplib.py +++ b/tests/integrations/stdlib/test_httplib.py @@ -7,10 +7,9 @@ import pytest from sentry_sdk import capture_message, start_transaction -from sentry_sdk.consts import MATCH_ALL +from sentry_sdk.consts import ATTRS, MATCH_ALL from sentry_sdk.tracing import Transaction from sentry_sdk.integrations.stdlib import StdlibIntegration -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS from tests.conftest import ApproxDict, create_mock_http_server diff --git a/tests/test_logs.py b/tests/test_logs.py index 5dde873deb..f62801c7a9 100644 --- a/tests/test_logs.py +++ b/tests/test_logs.py @@ -10,8 +10,7 @@ from sentry_sdk import get_client from sentry_sdk.envelope import Envelope from sentry_sdk.types import Log -from sentry_sdk.consts import VERSION -from sentry_conventions.attributes import ATTRIBUTE_NAMES as ATTRS +from sentry_sdk.consts import ATTRS, VERSION minimum_python_37 = pytest.mark.skipif( sys.version_info < (3, 7), reason="Asyncio tests need Python >= 3.7"