Skip to content

Commit

Permalink
Moved _normalize_data to openAI integration
Browse files Browse the repository at this point in the history
This way we do not break eventually user code that passes something with a model_dump to set_data that does something different
  • Loading branch information
antonpirker committed Mar 12, 2024
1 parent a5e8596 commit 6e2b5db
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 33 deletions.
55 changes: 40 additions & 15 deletions sentry_sdk/integrations/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,28 @@ def _capture_exception(hub, exc):
hub.capture_event(event, hint=hint)


def _normalize_data(data):

Check warning on line 76 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L76

Added line #L76 was not covered by tests
# type: (Any) -> Any

# convert pydantic data (e.g. OpenAI v1+) to json compatible format
if hasattr(data, "model_dump"):
try:
return data.model_dump()
except Exception as e:
logger.warning("Could not convert pydantic data to JSON: %s", e)
return data

Check warning on line 85 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L81-L85

Added lines #L81 - L85 were not covered by tests
if isinstance(data, list):
return list(_normalize_data(x) for x in data)
if isinstance(data, dict):
return {k: _normalize_data(v) for (k, v) in data.items()}
return data

Check warning on line 90 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L90

Added line #L90 was not covered by tests


def set_data_normalized(span, key, value):

Check warning on line 93 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L93

Added line #L93 was not covered by tests
# type: (Span, str, Any) -> None
span.set_data(key, _normalize_data(value))

Check warning on line 95 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L95

Added line #L95 was not covered by tests


def _calculate_chat_completion_usage(
messages, response, span, streaming_message_responses=None
):
Expand Down Expand Up @@ -112,11 +134,11 @@ def _calculate_chat_completion_usage(
total_tokens = prompt_tokens + completion_tokens

if completion_tokens != 0:
span.set_data(COMPLETION_TOKENS_USED, completion_tokens)
set_data_normalized(span, COMPLETION_TOKENS_USED, completion_tokens)

Check warning on line 137 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L137

Added line #L137 was not covered by tests
if prompt_tokens != 0:
span.set_data(PROMPT_TOKENS_USED, prompt_tokens)
set_data_normalized(span, PROMPT_TOKENS_USED, prompt_tokens)

Check warning on line 139 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L139

Added line #L139 was not covered by tests
if total_tokens != 0:
span.set_data(TOTAL_TOKENS_USED, total_tokens)
set_data_normalized(span, TOTAL_TOKENS_USED, total_tokens)

Check warning on line 141 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L141

Added line #L141 was not covered by tests


def _wrap_chat_completion_create(f):
Expand Down Expand Up @@ -160,14 +182,17 @@ def new_chat_completion(*args, **kwargs):

with capture_internal_exceptions():
if _should_send_default_pii() and integration.include_prompts:
span.set_data("ai.input_messages", messages)
span.set_data("ai.model_id", model)
span.set_data("ai.streaming", streaming)
set_data_normalized(span, "ai.input_messages", messages)

Check warning on line 185 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L185

Added line #L185 was not covered by tests

set_data_normalized(span, "ai.model_id", model)
set_data_normalized(span, "ai.streaming", streaming)

Check warning on line 188 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L187-L188

Added lines #L187 - L188 were not covered by tests

if hasattr(res, "choices"):
if _should_send_default_pii() and integration.include_prompts:
span.set_data(
"ai.responses", list(map(lambda x: x.message, res.choices))
set_data_normalized(
span,
"ai.responses",
list(map(lambda x: x.message, res.choices)),
)
_calculate_chat_completion_usage(messages, res, span)
span.__exit__(None, None, None)
Expand Down Expand Up @@ -200,15 +225,15 @@ def new_iterator():
_should_send_default_pii()
and integration.include_prompts
):
span.set_data("ai.responses", all_responses)
set_data_normalized(span, "ai.responses", all_responses)

Check warning on line 228 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L228

Added line #L228 was not covered by tests
_calculate_chat_completion_usage(
messages, res, span, all_responses
)
span.__exit__(None, None, None)

res._iterator = new_iterator()
else:
span.set_data("unknown_response", True)
set_data_normalized(span, "unknown_response", True)

Check warning on line 236 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L236

Added line #L236 was not covered by tests
span.__exit__(None, None, None)
return res

Expand Down Expand Up @@ -238,15 +263,15 @@ def new_embeddings_create(*args, **kwargs):
_should_send_default_pii() and integration.include_prompts
):
if isinstance(kwargs["input"], str):
span.set_data("ai.input_messages", [kwargs["input"]])
set_data_normalized(span, "ai.input_messages", kwargs["input"])

Check warning on line 266 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L266

Added line #L266 was not covered by tests
elif (
isinstance(kwargs["input"], list)
and len(kwargs["input"]) > 0
and isinstance(kwargs["input"][0], str)
):
span.set_data("ai.input_messages", kwargs["input"])
set_data_normalized(span, "ai.input_messages", kwargs["input"])

Check warning on line 272 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L272

Added line #L272 was not covered by tests
if "model" in kwargs:
span.set_data("ai.model_id", kwargs["model"])
set_data_normalized(span, "ai.model_id", kwargs["model"])

Check warning on line 274 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L274

Added line #L274 was not covered by tests
try:
response = f(*args, **kwargs)
except Exception as e:
Expand All @@ -271,8 +296,8 @@ def new_embeddings_create(*args, **kwargs):
if total_tokens == 0:
total_tokens = prompt_tokens

span.set_data(PROMPT_TOKENS_USED, prompt_tokens)
span.set_data(TOTAL_TOKENS_USED, total_tokens)
set_data_normalized(span, PROMPT_TOKENS_USED, prompt_tokens)
set_data_normalized(span, TOTAL_TOKENS_USED, total_tokens)

Check warning on line 300 in sentry_sdk/integrations/openai.py

View check run for this annotation

Codecov / codecov/patch

sentry_sdk/integrations/openai.py#L299-L300

Added lines #L299 - L300 were not covered by tests

return response

Expand Down
19 changes: 1 addition & 18 deletions sentry_sdk/tracing.py
Original file line number Diff line number Diff line change
Expand Up @@ -410,26 +410,9 @@ def set_tag(self, key, value):
# type: (str, Any) -> None
self._tags[key] = value

@staticmethod
def _normalize_data(data):
# type: (Any) -> Any

# convert pydantic data (e.g. OpenAI v1+) to json compatible format
if hasattr(data, "model_dump"):
try:
return data.model_dump()
except Exception as e:
logger.warning("Could not convert pydantic data to JSON: %s", e)
return data
if isinstance(data, list):
return list(Span._normalize_data(x) for x in data)
if isinstance(data, dict):
return {k: Span._normalize_data(v) for (k, v) in data.items()}
return data

def set_data(self, key, value):
# type: (str, Any) -> None
self._data[key] = self._normalize_data(value)
self._data[key] = value

def set_status(self, value):
# type: (str) -> None
Expand Down

0 comments on commit 6e2b5db

Please sign in to comment.