Skip to content

Rename legacy ai.* attributes to gen_ai.* names. #4924

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 12 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
- Emit outcomes for skipped large attachments on playstation crashes. ([#4862](https://github.com/getsentry/relay/pull/4862))
- Disable span metrics. ([#4931](https://github.com/getsentry/relay/pull/4931),[#4955](https://github.com/getsentry/relay/pull/4955))
- Deprecate old AI monitoring attributes. ([#4960](https://github.com/getsentry/relay/pull/4960))
- Normalize legacy `ai.*` attributes to `gen_ai.*` names. ([#4924](https://github.com/getsentry/relay/pull/4924))

## 25.7.0

Expand Down
89 changes: 79 additions & 10 deletions relay-event-schema/src/protocol/span.rs
Original file line number Diff line number Diff line change
Expand Up @@ -529,23 +529,21 @@ pub struct SpanData {
)]
pub gen_ai_tool_output: Annotated<Value>,

/// LLM decisions to use calls
/// LLM decisions to use tools
#[metastructure(
field = "gen_ai.response.tool_calls",
pii = "maybe",
legacy_alias = "ai.response.toolCalls"
legacy_alias = "ai.response.toolCalls",
legacy_alias = "ai.tool_calls",
pii = "maybe"
)]
pub gen_ai_response_tool_calls: Annotated<Value>,

/// Name of the tool that was called
#[metastructure(field = "gen_ai.tool.name")]
pub gen_ai_tool_name: Annotated<Value>,

/// LLM response text (Vercel AI, generateText)
#[metastructure(
field = "gen_ai.response.text",
pii = "maybe",
legacy_alias = "ai.response.text"
legacy_alias = "ai.response.text",
legacy_alias = "ai.responses",
pii = "maybe"
)]
pub gen_ai_response_text: Annotated<Value>,

Expand All @@ -557,6 +555,67 @@ pub struct SpanData {
#[metastructure(field = "gen_ai.response.tokens_per_second")]
pub gen_ai_response_tokens_per_second: Annotated<Value>,

/// The available tools for a request to an LLM
#[metastructure(
field = "gen_ai.request.available_tools",
legacy_alias = "ai.tools",
pii = "maybe"
)]
pub gen_ai_request_available_tools: Annotated<Value>,

/// The frequency penalty for a request to an LLM
#[metastructure(
field = "gen_ai.request.frequency_penalty",
legacy_alias = "ai.frequency_penalty"
)]
pub gen_ai_request_frequency_penalty: Annotated<Value>,

/// The presence penalty for a request to an LLM
#[metastructure(
field = "gen_ai.request.presence_penalty",
legacy_alias = "ai.presence_penalty"
)]
pub gen_ai_request_presence_penalty: Annotated<Value>,

/// The seed for a request to an LLM
#[metastructure(field = "gen_ai.request.seed", legacy_alias = "ai.seed")]
pub gen_ai_request_seed: Annotated<Value>,

/// The temperature for a request to an LLM
#[metastructure(field = "gen_ai.request.temperature", legacy_alias = "ai.temperature")]
pub gen_ai_request_temperature: Annotated<Value>,

/// The top_k parameter for a request to an LLM
#[metastructure(field = "gen_ai.request.top_k", legacy_alias = "ai.top_k")]
pub gen_ai_request_top_k: Annotated<Value>,

/// The top_p parameter for a request to an LLM
#[metastructure(field = "gen_ai.request.top_p", legacy_alias = "ai.top_p")]
pub gen_ai_request_top_p: Annotated<Value>,

/// The finish reason for a response from an LLM
#[metastructure(
field = "gen_ai.response.finish_reason",
legacy_alias = "ai.finish_reason"
)]
pub gen_ai_response_finish_reason: Annotated<Value>,

/// The unique identifier for a response from an LLM
#[metastructure(field = "gen_ai.response.id", legacy_alias = "ai.generation_id")]
pub gen_ai_response_id: Annotated<Value>,

/// The GenAI system identifier
#[metastructure(field = "gen_ai.system", legacy_alias = "ai.model.provider")]
pub gen_ai_system: Annotated<Value>,

/// The name of the tool being called
#[metastructure(
field = "gen_ai.tool.name",
legacy_alias = "ai.function_call",
pii = "maybe"
)]
pub gen_ai_tool_name: Annotated<Value>,

/// The client's browser name.
#[metastructure(field = "browser.name")]
pub browser_name: Annotated<String>,
Expand Down Expand Up @@ -1336,10 +1395,20 @@ mod tests {
gen_ai_tool_input: ~,
gen_ai_tool_output: ~,
gen_ai_response_tool_calls: ~,
gen_ai_tool_name: ~,
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: ~,
code_filepath: String(
"task.py",
Expand Down
12 changes: 11 additions & 1 deletion relay-event-schema/src/protocol/span/convert.rs
Original file line number Diff line number Diff line change
Expand Up @@ -169,10 +169,20 @@ mod tests {
gen_ai_tool_input: ~,
gen_ai_tool_output: ~,
gen_ai_response_tool_calls: ~,
gen_ai_tool_name: ~,
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: "Chrome",
code_filepath: ~,
code_lineno: ~,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -154,6 +154,17 @@ expression: "(&event.value().unwrap().spans, metrics.project_metrics)"
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: ~,
code_filepath: ~,
code_lineno: ~,
Expand Down Expand Up @@ -825,6 +836,17 @@ expression: "(&event.value().unwrap().spans, metrics.project_metrics)"
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: ~,
code_filepath: ~,
code_lineno: ~,
Expand Down Expand Up @@ -1023,6 +1045,17 @@ expression: "(&event.value().unwrap().spans, metrics.project_metrics)"
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: ~,
code_filepath: ~,
code_lineno: ~,
Expand Down Expand Up @@ -1330,6 +1363,17 @@ expression: "(&event.value().unwrap().spans, metrics.project_metrics)"
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: ~,
code_filepath: ~,
code_lineno: ~,
Expand Down Expand Up @@ -1528,6 +1572,17 @@ expression: "(&event.value().unwrap().spans, metrics.project_metrics)"
gen_ai_response_text: ~,
gen_ai_response_object: ~,
gen_ai_response_tokens_per_second: ~,
gen_ai_request_available_tools: ~,
gen_ai_request_frequency_penalty: ~,
gen_ai_request_presence_penalty: ~,
gen_ai_request_seed: ~,
gen_ai_request_temperature: ~,
gen_ai_request_top_k: ~,
gen_ai_request_top_p: ~,
gen_ai_response_finish_reason: ~,
gen_ai_response_id: ~,
gen_ai_system: ~,
gen_ai_tool_name: ~,
browser_name: ~,
code_filepath: ~,
code_lineno: ~,
Expand Down
4 changes: 2 additions & 2 deletions relay-spans/src/v2_to_v1.rs
Original file line number Diff line number Diff line change
Expand Up @@ -1055,8 +1055,8 @@ mod tests {
"trace_id": "89143b0763095bd9c9955e8175d1fb23",
"status": "unknown",
"data": {
"gen_ai.agent.name": "Seer",
"gen_ai.system": "openai"
"gen_ai.system": "openai",
"gen_ai.agent.name": "Seer"
},
"kind": "client"
}
Expand Down
Loading