|
| 1 | +""" |
| 2 | +Arena-Hard-Auto utility functions adapted for Eval Protocol. |
| 3 | +""" |
| 4 | + |
| 5 | +import os |
| 6 | +import re |
| 7 | +from typing import Dict, Any, Optional |
| 8 | + |
| 9 | +OG_ARENA_HARD_PROMPT = """Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better. |
| 10 | +
|
| 11 | +Begin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers. |
| 12 | +
|
| 13 | +When evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information. |
| 14 | +
|
| 15 | +Then consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive. |
| 16 | +
|
| 17 | +Then consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt. |
| 18 | +
|
| 19 | +After providing your explanation, you must output only one of the following choices as your final verdict with a label: |
| 20 | +
|
| 21 | +1. Assistant A is significantly better: [[A>>B]] |
| 22 | +2. Assistant A is slightly better: [[A>B]] |
| 23 | +3. Tie, relatively the same: [[A=B]] |
| 24 | +4. Assistant B is slightly better: [[B>A]] |
| 25 | +5. Assistant B is significantly better: [[B>>A]] |
| 26 | +
|
| 27 | +Example output: "My final verdict is tie: [[A=B]]".""" |
| 28 | + |
| 29 | + |
| 30 | +# Judge model configurations for Arena-Hard-Auto style evaluation |
| 31 | +# Each config specifies the model, parameters, and concurrency limits for LLM judges |
| 32 | +JUDGE_CONFIGS = { |
| 33 | + "gpt-4.1": { |
| 34 | + "model": "gpt-4.1", |
| 35 | + "temperature": 0.0, |
| 36 | + "max_tokens": 16000, |
| 37 | + }, |
| 38 | + "gemini-2.5-pro": { |
| 39 | + "model": "gemini-2.5-pro", |
| 40 | + "temperature": 1.0, |
| 41 | + "max_tokens": 32000, |
| 42 | + "api_key": os.getenv("GEMINI_API_KEY"), |
| 43 | + "base_url": "https://generativelanguage.googleapis.com/v1beta/openai/", |
| 44 | + }, |
| 45 | + "gemini-2.5-flash": { |
| 46 | + "model": "gemini-2.5-flash", |
| 47 | + "temperature": 1.0, |
| 48 | + "max_tokens": 32000, |
| 49 | + "api_key": os.getenv("GEMINI_API_KEY"), |
| 50 | + "base_url": "https://generativelanguage.googleapis.com/v1beta/openai/", |
| 51 | + }, |
| 52 | + "kimi-k2-instruct-0905": { |
| 53 | + "model": "accounts/fireworks/models/kimi-k2-instruct-0905", |
| 54 | + "temperature": 0.6, # Kimi recommended temperature |
| 55 | + "max_tokens": 131000, |
| 56 | + "api_key": os.getenv("FIREWORKS_API_KEY"), |
| 57 | + "base_url": "https://api.fireworks.ai/inference/v1", |
| 58 | + }, |
| 59 | +} |
| 60 | + |
| 61 | +LABEL_TO_SCORE = { |
| 62 | + "A>>B": 1.0, |
| 63 | + "B<<A": 1.0, |
| 64 | + "A>B": 6 / 7, |
| 65 | + "B<A": 6 / 7, |
| 66 | + "A=B": 0.5, |
| 67 | + "B=A": 0.5, |
| 68 | + "A<B": 1 / 7, |
| 69 | + "B>A": 1 / 7, |
| 70 | + "A<<B": 0.0, |
| 71 | + "B>>A": 0.0, |
| 72 | +} |
| 73 | + |
| 74 | + |
| 75 | +def get_score(judgment, patterns): |
| 76 | + """Extract judgment score from text. From arena-hard-auto/gen_judgment.py""" |
| 77 | + for pattern in patterns: |
| 78 | + pattern = re.compile(pattern) |
| 79 | + |
| 80 | + matches = pattern.findall(judgment.upper()) |
| 81 | + matches = [m for m in matches if m != ""] |
| 82 | + |
| 83 | + if len(set(matches)) > 0: |
| 84 | + return matches[-1].strip("\n") |
| 85 | + return None |
| 86 | + |
| 87 | + |
| 88 | +async def run_single_judgment( |
| 89 | + question_text: str, answer_a: str, answer_b: str, tools, judge_config, client |
| 90 | +) -> Optional[Dict[str, Any]]: |
| 91 | + """Run a single pairwise judgment between two answers.""" |
| 92 | + user_prompt = f"""<|User Prompt|> |
| 93 | +{question_text} |
| 94 | +
|
| 95 | +<|The Start of Assistant A's Answer|> |
| 96 | +{answer_a} |
| 97 | +<|The End of Assistant A's Answer|> |
| 98 | +
|
| 99 | +<|The Start of Assistant B's Answer|> |
| 100 | +{answer_b} |
| 101 | +<|The End of Assistant B's Answer|> |
| 102 | +
|
| 103 | +<|Available Tools|> |
| 104 | +{tools} |
| 105 | +<|End of Available Tools|> |
| 106 | +
|
| 107 | +{OG_ARENA_HARD_PROMPT}""" |
| 108 | + |
| 109 | + messages = [{"role": "user", "content": user_prompt}] |
| 110 | + |
| 111 | + try: |
| 112 | + api_params = { |
| 113 | + "model": judge_config["model"], |
| 114 | + "messages": messages, |
| 115 | + "temperature": judge_config["temperature"], |
| 116 | + "max_tokens": judge_config["max_tokens"], |
| 117 | + } |
| 118 | + |
| 119 | + if tools: |
| 120 | + api_params["tools"] = tools |
| 121 | + api_params["tool_choice"] = "none" |
| 122 | + |
| 123 | + response = await client.chat.completions.create(**api_params) |
| 124 | + judgment_text = response.choices[0].message.content |
| 125 | + if not judgment_text: |
| 126 | + return None |
| 127 | + |
| 128 | + except Exception as e: |
| 129 | + print(f"Error getting judgment from OpenAI: {e}") |
| 130 | + return None |
| 131 | + |
| 132 | + score = get_score(judgment_text, [r"\[\[([AB<>=]+)\]\]", r"\[([AB<>=]+)\]"]) |
| 133 | + return {"score": score, "judgment": judgment_text, "prompt": messages} |
0 commit comments