Skip to content

Commit e3a7e38

Browse files
committed
add all
1 parent 15cdda9 commit e3a7e38

16 files changed

+447
-79
lines changed

eval_protocol/__init__.py

Lines changed: 7 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,12 @@
2929
from .resources import create_llm_resource
3030
from .reward_function import RewardFunction
3131
from .typed_interface import reward_function
32-
from .quickstart import aha_judge, multi_turn_assistant_to_ground_truth, assistant_to_ground_truth
32+
from .quickstart.aha_judge import aha_judge
33+
from .utils.evaluation_row_utils import (
34+
multi_turn_assistant_to_ground_truth,
35+
assistant_to_ground_truth,
36+
filter_longest_conversation,
37+
)
3338
from .pytest import evaluation_test, SingleTurnRolloutProcessor, RemoteRolloutProcessor, GithubActionRolloutProcessor
3439
from .pytest.remote_rollout_processor import create_elasticsearch_config_from_env
3540
from .pytest.parameterize import DefaultParameterIdGenerator
@@ -102,6 +107,7 @@
102107
"aha_judge",
103108
"multi_turn_assistant_to_ground_truth",
104109
"assistant_to_ground_truth",
110+
"filter_longest_conversation",
105111
"evaluation_test",
106112
"SingleTurnRolloutProcessor",
107113
"OpenAIResponsesAdapter",

eval_protocol/pytest/tracing_utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
from eval_protocol.adapters.fireworks_tracing import FireworksTracingAdapter
99
from eval_protocol.data_loader.dynamic_data_loader import DynamicDataLoader
1010
from eval_protocol.models import EvaluationRow, Status
11-
from eval_protocol.quickstart.utils import filter_longest_conversation
11+
from eval_protocol.utils.evaluation_row_utils import filter_longest_conversation
1212
from eval_protocol.types.remote_rollout_processor import DataLoaderConfig, RolloutMetadata, InitRequest
1313
from eval_protocol.pytest.types import RolloutProcessorConfig
1414

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,8 @@
1-
from .llm_judge import aha_judge
2-
from .utils import multi_turn_assistant_to_ground_truth, assistant_to_ground_truth
1+
"""
2+
Quickstart modules for various evaluation scenarios.
3+
"""
4+
5+
from eval_protocol.quickstart.aha_judge.llm_judge import aha_judge
6+
from eval_protocol.utils.evaluation_row_utils import multi_turn_assistant_to_ground_truth, assistant_to_ground_truth
37

48
__all__ = ["aha_judge", "multi_turn_assistant_to_ground_truth", "assistant_to_ground_truth"]
Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
from eval_protocol.quickstart.aha_judge.llm_judge import aha_judge
2+
from eval_protocol.utils.evaluation_row_utils import multi_turn_assistant_to_ground_truth, assistant_to_ground_truth
3+
4+
__all__ = ["aha_judge", "multi_turn_assistant_to_ground_truth", "assistant_to_ground_truth"]
Lines changed: 90 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,90 @@
1+
"""
2+
Default LLM judge for Eval Protocol. Inspired by Arena-Hard-Auto.
3+
"""
4+
5+
from typing import Optional
6+
7+
from eval_protocol.models import EvaluationRow, EvaluateResult, MetricResult
8+
from eval_protocol.adapters.base import BaseAdapter
9+
from eval_protocol.quickstart.aha_judge.utils import (
10+
JUDGE_CONFIGS,
11+
LABEL_TO_SCORE,
12+
run_single_judgment,
13+
)
14+
from eval_protocol.utils.evaluation_row_utils import serialize_message
15+
16+
from openai import AsyncOpenAI
17+
18+
19+
async def aha_judge(
20+
row: EvaluationRow, judge_name: str = "kimi-k2-instruct-0905", adapter: Optional[BaseAdapter] = None
21+
) -> EvaluationRow:
22+
"""
23+
LLM Judge evaluation using Arena-Hard-Auto style pairwise comparisons for a single row.
24+
25+
Compares model response against ground truth using an LLM judge:
26+
1. Extracts the question from messages[:-1]
27+
2. Compares messages[-1] (new model response) vs ground_truth (baseline response)
28+
3. Runs two judgment rounds (A vs B, B vs A) to reduce position bias
29+
4. Returns individual scores for bootstrap aggregation
30+
31+
Args:
32+
row: Single EvaluationRow object with messages, ground_truth, and tools
33+
judge_name: Name of the judge configuration to use
34+
adapter: Optional adapter to push scores back to (if provided)
35+
36+
Returns:
37+
Same row with updated evaluation_result containing individual judgment scores
38+
"""
39+
40+
if not row.messages:
41+
return row
42+
43+
judge_config = JUDGE_CONFIGS[judge_name]
44+
45+
# Extract question and answers
46+
question_text = "\n".join([serialize_message(msg) for msg in row.messages[:-1]])
47+
model_a_answer = str(row.ground_truth)
48+
model_b_answer = serialize_message(row.messages[-1])
49+
50+
async with AsyncOpenAI(api_key=judge_config.get("api_key"), base_url=judge_config.get("base_url")) as client:
51+
# Run two judgment rounds in sequence (A vs B, then B vs A)
52+
result1 = await run_single_judgment(
53+
question_text, model_a_answer, model_b_answer, row.tools, judge_config, client
54+
)
55+
result2 = await run_single_judgment(
56+
question_text, model_b_answer, model_a_answer, row.tools, judge_config, client
57+
)
58+
59+
if not result1 or not result2 or not result1.get("score") or not result2.get("score"):
60+
# If either judgment failed, mark as invalid (don't include in distribution)
61+
final_score = 0.0
62+
reason = "Failed to get judgment scores"
63+
metrics = {}
64+
is_score_valid = False
65+
else:
66+
# Convert judgment scores to numerical scores
67+
game1_score = 1 - LABEL_TO_SCORE[result1["score"]]
68+
game2_score = LABEL_TO_SCORE[result2["score"]]
69+
final_score = (game1_score + game2_score) / 2
70+
71+
reason = f"LLM Judge comparison: Round 1: {result1['score']}, Round 2: {result2['score']}"
72+
metrics = {
73+
"round1_judgment": MetricResult(score=game1_score, reason=result1["judgment"]),
74+
"round2_judgment": MetricResult(score=game2_score, reason=result2["judgment"]),
75+
}
76+
is_score_valid = True
77+
78+
row.evaluation_result = EvaluateResult(
79+
score=final_score,
80+
reason=reason,
81+
metrics=metrics,
82+
is_score_valid=is_score_valid,
83+
)
84+
85+
# Upload score to adapter if provided
86+
if adapter and row.evaluation_result and row.evaluation_result.is_score_valid:
87+
model_name = row.input_metadata.completion_params.get("model", "unknown_model")
88+
adapter.upload_score(row, model_name)
89+
90+
return row
Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
"""
2+
Example for using Braintrust with the aha judge.
3+
"""
4+
5+
import os
6+
7+
import pytest
8+
9+
# Skip entire module in CI to prevent import-time side effects
10+
if os.environ.get("CI") == "true":
11+
pytest.skip("Skip quickstart in CI", allow_module_level=True)
12+
13+
from eval_protocol import (
14+
evaluation_test,
15+
aha_judge,
16+
EvaluationRow,
17+
SingleTurnRolloutProcessor,
18+
DynamicDataLoader,
19+
create_braintrust_adapter,
20+
multi_turn_assistant_to_ground_truth,
21+
)
22+
23+
24+
# uncomment when dataloader is fixed
25+
def braintrust_data_generator():
26+
adapter = create_braintrust_adapter()
27+
return adapter.get_evaluation_rows(
28+
btql_query=f"""
29+
select: *
30+
from: project_logs('{os.getenv("BRAINTRUST_PROJECT_ID")}') traces
31+
filter: is_root = true
32+
limit: 10
33+
"""
34+
)
35+
36+
37+
@pytest.mark.skipif(os.environ.get("CI") == "true", reason="Skip in CI")
38+
@pytest.mark.parametrize(
39+
"completion_params",
40+
[
41+
{"model": "gpt-4.1"},
42+
{
43+
"max_tokens": 131000,
44+
"extra_body": {"reasoning_effort": "medium"},
45+
"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-120b",
46+
},
47+
{
48+
"max_tokens": 131000,
49+
"extra_body": {"reasoning_effort": "low"},
50+
"model": "fireworks_ai/accounts/fireworks/models/gpt-oss-20b",
51+
},
52+
],
53+
)
54+
@evaluation_test(
55+
data_loaders=DynamicDataLoader(
56+
generators=[braintrust_data_generator],
57+
preprocess_fn=multi_turn_assistant_to_ground_truth,
58+
),
59+
rollout_processor=SingleTurnRolloutProcessor(),
60+
max_concurrent_evaluations=2,
61+
)
62+
async def test_llm_judge(row: EvaluationRow) -> EvaluationRow:
63+
return await aha_judge(row)

eval_protocol/quickstart/llm_judge_langfuse.py renamed to eval_protocol/quickstart/aha_judge/llm_judge_langfuse.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,13 @@
1010
from eval_protocol import (
1111
evaluation_test,
1212
aha_judge,
13-
multi_turn_assistant_to_ground_truth,
1413
EvaluationRow,
1514
SingleTurnRolloutProcessor,
1615
create_langfuse_adapter,
1716
DynamicDataLoader,
17+
multi_turn_assistant_to_ground_truth,
1818
)
1919

20-
from eval_protocol.quickstart import aha_judge
21-
2220

2321
def langfuse_data_generator():
2422
adapter = create_langfuse_adapter()

eval_protocol/quickstart/llm_judge_langsmith.py renamed to eval_protocol/quickstart/aha_judge/llm_judge_langsmith.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,11 +27,11 @@
2727
from eval_protocol import (
2828
evaluation_test,
2929
aha_judge,
30-
multi_turn_assistant_to_ground_truth,
3130
EvaluationRow,
3231
SingleTurnRolloutProcessor,
3332
LangSmithAdapter,
3433
DynamicDataLoader,
34+
multi_turn_assistant_to_ground_truth,
3535
)
3636

3737

eval_protocol/quickstart/aha_judge/llm_judge_openai_responses.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,8 @@
2626
SingleTurnRolloutProcessor,
2727
OpenAIResponsesAdapter,
2828
DynamicDataLoader,
29+
multi_turn_assistant_to_ground_truth,
2930
)
30-
from eval_protocol import multi_turn_assistant_to_ground_truth
3131

3232

3333
def openai_responses_data_generator():
Lines changed: 133 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,133 @@
1+
"""
2+
Arena-Hard-Auto utility functions adapted for Eval Protocol.
3+
"""
4+
5+
import os
6+
import re
7+
from typing import Dict, Any, Optional
8+
9+
OG_ARENA_HARD_PROMPT = """Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.
10+
11+
Begin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.
12+
13+
When evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.
14+
15+
Then consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.
16+
17+
Then consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.
18+
19+
After providing your explanation, you must output only one of the following choices as your final verdict with a label:
20+
21+
1. Assistant A is significantly better: [[A>>B]]
22+
2. Assistant A is slightly better: [[A>B]]
23+
3. Tie, relatively the same: [[A=B]]
24+
4. Assistant B is slightly better: [[B>A]]
25+
5. Assistant B is significantly better: [[B>>A]]
26+
27+
Example output: "My final verdict is tie: [[A=B]]"."""
28+
29+
30+
# Judge model configurations for Arena-Hard-Auto style evaluation
31+
# Each config specifies the model, parameters, and concurrency limits for LLM judges
32+
JUDGE_CONFIGS = {
33+
"gpt-4.1": {
34+
"model": "gpt-4.1",
35+
"temperature": 0.0,
36+
"max_tokens": 16000,
37+
},
38+
"gemini-2.5-pro": {
39+
"model": "gemini-2.5-pro",
40+
"temperature": 1.0,
41+
"max_tokens": 32000,
42+
"api_key": os.getenv("GEMINI_API_KEY"),
43+
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
44+
},
45+
"gemini-2.5-flash": {
46+
"model": "gemini-2.5-flash",
47+
"temperature": 1.0,
48+
"max_tokens": 32000,
49+
"api_key": os.getenv("GEMINI_API_KEY"),
50+
"base_url": "https://generativelanguage.googleapis.com/v1beta/openai/",
51+
},
52+
"kimi-k2-instruct-0905": {
53+
"model": "accounts/fireworks/models/kimi-k2-instruct-0905",
54+
"temperature": 0.6, # Kimi recommended temperature
55+
"max_tokens": 131000,
56+
"api_key": os.getenv("FIREWORKS_API_KEY"),
57+
"base_url": "https://api.fireworks.ai/inference/v1",
58+
},
59+
}
60+
61+
LABEL_TO_SCORE = {
62+
"A>>B": 1.0,
63+
"B<<A": 1.0,
64+
"A>B": 6 / 7,
65+
"B<A": 6 / 7,
66+
"A=B": 0.5,
67+
"B=A": 0.5,
68+
"A<B": 1 / 7,
69+
"B>A": 1 / 7,
70+
"A<<B": 0.0,
71+
"B>>A": 0.0,
72+
}
73+
74+
75+
def get_score(judgment, patterns):
76+
"""Extract judgment score from text. From arena-hard-auto/gen_judgment.py"""
77+
for pattern in patterns:
78+
pattern = re.compile(pattern)
79+
80+
matches = pattern.findall(judgment.upper())
81+
matches = [m for m in matches if m != ""]
82+
83+
if len(set(matches)) > 0:
84+
return matches[-1].strip("\n")
85+
return None
86+
87+
88+
async def run_single_judgment(
89+
question_text: str, answer_a: str, answer_b: str, tools, judge_config, client
90+
) -> Optional[Dict[str, Any]]:
91+
"""Run a single pairwise judgment between two answers."""
92+
user_prompt = f"""<|User Prompt|>
93+
{question_text}
94+
95+
<|The Start of Assistant A's Answer|>
96+
{answer_a}
97+
<|The End of Assistant A's Answer|>
98+
99+
<|The Start of Assistant B's Answer|>
100+
{answer_b}
101+
<|The End of Assistant B's Answer|>
102+
103+
<|Available Tools|>
104+
{tools}
105+
<|End of Available Tools|>
106+
107+
{OG_ARENA_HARD_PROMPT}"""
108+
109+
messages = [{"role": "user", "content": user_prompt}]
110+
111+
try:
112+
api_params = {
113+
"model": judge_config["model"],
114+
"messages": messages,
115+
"temperature": judge_config["temperature"],
116+
"max_tokens": judge_config["max_tokens"],
117+
}
118+
119+
if tools:
120+
api_params["tools"] = tools
121+
api_params["tool_choice"] = "none"
122+
123+
response = await client.chat.completions.create(**api_params)
124+
judgment_text = response.choices[0].message.content
125+
if not judgment_text:
126+
return None
127+
128+
except Exception as e:
129+
print(f"Error getting judgment from OpenAI: {e}")
130+
return None
131+
132+
score = get_score(judgment_text, [r"\[\[([AB<>=]+)\]\]", r"\[([AB<>=]+)\]"])
133+
return {"score": score, "judgment": judgment_text, "prompt": messages}

0 commit comments

Comments
 (0)