Skip to content

[agent farm] (Run ID: codestoryai_sidecar_issue_2092_22ab5de3) #2093

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
26 changes: 26 additions & 0 deletions llm_client/src/clients/anthropic.rs
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,25 @@ struct AnthropicRequest {
}

impl AnthropicRequest {
pub fn new_chat(
messages: Vec<AnthropicMessage>,
temperature: f32,
top_p: Option<f32>,
max_tokens: Option<usize>,
model_str: String,
) -> Self {
AnthropicRequest {
system: vec![],
messages,
tools: vec![],
temperature,
stream: true,
max_tokens,
model: model_str,
thinking: None,
}
}

fn from_client_completion_request(
completion_request: LLMClientCompletionRequest,
model_str: String,
Expand Down Expand Up @@ -336,6 +355,7 @@ impl AnthropicRequest {
stream: true,
max_tokens,
model: model_str,
thinking,
}
}

Expand All @@ -349,6 +369,11 @@ impl AnthropicRequest {
"user".to_owned(),
completion_request.prompt().to_owned(),
)];
let thinking = completion_request.thinking_budget().map(|budget| AnthropicThinking {
r#type: "enabled".to_string(),
budget_tokens: budget,
});

AnthropicRequest {
system: vec![],
messages,
Expand All @@ -357,6 +382,7 @@ impl AnthropicRequest {
stream: true,
max_tokens,
model: model_str,
thinking,
}
}
}
Expand Down
40 changes: 38 additions & 2 deletions llm_client/src/clients/types.rs
Original file line number Diff line number Diff line change
Expand Up @@ -712,9 +712,14 @@ impl LLMClientCompletionStringRequest {
frequency_penalty,
stop_words: None,
max_tokens: None,
thinking_budget: None,
}
}

pub fn thinking_budget(&self) -> Option<usize> {
self.thinking_budget
}

pub fn set_stop_words(mut self, stop_words: Vec<String>) -> Self {
self.stop_words = Some(stop_words);
self
Expand Down Expand Up @@ -748,6 +753,11 @@ impl LLMClientCompletionStringRequest {
pub fn get_max_tokens(&self) -> Option<usize> {
self.max_tokens
}

pub fn set_thinking_budget(mut self, thinking_budget: usize) -> Self {
self.thinking_budget = Some(thinking_budget);
self
}
}

impl LLMClientCompletionRequest {
Expand All @@ -764,6 +774,7 @@ impl LLMClientCompletionRequest {
frequency_penalty,
stop_words: None,
max_tokens: None,
thinking_budget: None,
}
}

Expand Down Expand Up @@ -1040,12 +1051,37 @@ pub trait LLMClient {

#[cfg(test)]
mod tests {
use super::LLMType;
use super::*;

#[test]
fn test_thinking_budget() {
let request = LLMClientCompletionRequest::new(
LLMType::ClaudeSonnet,
vec![LLMClientMessage::user("test".to_string())],
0.7,
None,
);
assert_eq!(request.thinking_budget(), None);

let request = request.set_thinking_budget(16000);
assert_eq!(request.thinking_budget(), Some(16000));

let string_request = LLMClientCompletionStringRequest::new(
LLMType::ClaudeSonnet,
"test".to_string(),
0.7,
None,
);
assert_eq!(string_request.thinking_budget(), None);

let string_request = string_request.set_thinking_budget(16000);
assert_eq!(string_request.thinking_budget(), Some(16000));
}

#[test]
fn test_llm_type_from_string() {
let llm_type = LLMType::Custom("skcd_testing".to_owned());
let str_llm_type = serde_json::to_string(&llm_type).expect("to work");
assert_eq!(str_llm_type, "");
}
}
}