Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions ddtrace/llmobs/_integrations/litellm.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,6 +158,9 @@ def _has_downstream_openai_span(self, kwargs: dict[str, Any], model: Optional[st
"""
stream = kwargs.get("stream", False)
model_lower = model.lower() if model else ""
# litellm_proxy/ requests route through a proxy — the OpenAI integration never fires for these
if model_lower.startswith("litellm_proxy/"):
return False
# best effort attempt to check if Open AI or Azure since model_provider is unknown until request completes
is_openai_model = any(prefix in model_lower for prefix in ("gpt", "openai", "azure"))
return is_openai_model and not stream and LLMObs._integration_is_enabled("openai")
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
---
fixes:
- |
litellm: Fix missing LLMObs spans when routing through a litellm proxy.
Models with the ``litellm_proxy/`` prefix were incorrectly suppressed by
the OpenAI downstream span check, resulting in empty or missing LLMObs
spans. Proxy requests are now always handled by the litellm integration.
29 changes: 29 additions & 0 deletions tests/contrib/litellm/test_litellm_llmobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -611,6 +611,35 @@ def test_completion_with_reasoning(self, litellm, request_vcr, llmobs_events, te
assert event_metrics["reasoning_output_tokens"] == 15


@pytest.mark.parametrize(
"model,stream,openai_enabled,expected",
[
# litellm_proxy/ prefix always suppresses downstream check regardless of model name or OpenAI enabled
("litellm_proxy/azure-gpt-5-nano", False, True, False),
("litellm_proxy/gpt-4o", False, True, False),
("litellm_proxy/openai/gpt-4", False, True, False),
# normal OpenAI/Azure models with OpenAI integration enabled (non-streamed) should return True
("gpt-4o", False, True, True),
("azure/gpt-4", False, True, True),
("openai/gpt-4", False, True, True),
# streaming disables downstream check
("gpt-4o", True, True, False),
# OpenAI integration disabled disables downstream check
("gpt-4o", False, False, False),
# non-OpenAI models are unaffected
("anthropic/claude-3", False, True, False),
],
)
def test_has_downstream_openai_span(model, stream, openai_enabled, expected):
from ddtrace import config
from ddtrace.llmobs._integrations import LiteLLMIntegration

integration = LiteLLMIntegration(integration_config=config.litellm)
kwargs = {"stream": stream}
with mock.patch("ddtrace.llmobs._integrations.litellm.LLMObs._integration_is_enabled", return_value=openai_enabled):
assert integration._has_downstream_openai_span(kwargs, model) is expected


def test_enable_llmobs_after_litellm_was_imported(run_python_code_in_subprocess):
"""
Test that LLMObs.enable() logs a warning if litellm is imported before LLMObs.enable() is called.
Expand Down
Loading