diff --git a/eval_protocol/integrations/tinker_rollout_processor.py b/eval_protocol/integrations/tinker_rollout_processor.py index 5edf4cf6..5f2c1197 100644 --- a/eval_protocol/integrations/tinker_rollout_processor.py +++ b/eval_protocol/integrations/tinker_rollout_processor.py @@ -152,7 +152,7 @@ async def process_row(row: EvaluationRow) -> EvaluationRow: # Update row new_messages = list(row.messages) + [Message(role="assistant", content=assistant_content)] row.messages = new_messages - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time # Log usage (approximate since Tinker might not return usage stats in same format) # We can count tokens ourselves diff --git a/eval_protocol/mcp/execution/manager.py b/eval_protocol/mcp/execution/manager.py index 71b7c411..3480d0f7 100644 --- a/eval_protocol/mcp/execution/manager.py +++ b/eval_protocol/mcp/execution/manager.py @@ -150,7 +150,7 @@ async def _execute_with_semaphore(idx): else: evaluation_row.rollout_status = Status.rollout_running() - evaluation_row.execution_metadata.duration_seconds = time.perf_counter() - row_start_time + evaluation_row.execution_metadata.rollout_duration_seconds = time.perf_counter() - row_start_time return evaluation_row diff --git a/eval_protocol/models.py b/eval_protocol/models.py index 73c18093..2de9a386 100644 --- a/eval_protocol/models.py +++ b/eval_protocol/models.py @@ -809,9 +809,21 @@ class ExecutionMetadata(BaseModel): cost_metrics: Optional[CostMetrics] = Field(default=None, description="Cost breakdown for LLM API calls.") + # deprecated: use rollout_duration_seconds and eval_duration_seconds instead duration_seconds: Optional[float] = Field( default=None, - description="Processing duration in seconds for this evaluation row. Note that if it gets retried, this will be the duration of the last attempt.", + deprecated=True, + description="[Deprecated] Processing duration in seconds for this evaluation row. Note that if it gets retried, this will be the duration of the last attempt.", + ) + + rollout_duration_seconds: Optional[float] = Field( + default=None, + description="Processing duration in seconds for the rollout of this evaluation row. Note that if it gets retried, this will be the duration of the last attempt.", + ) + + eval_duration_seconds: Optional[float] = Field( + default=None, + description="Processing duration in seconds for the evaluation of this evaluation row. Note that if it gets retried, this will be the duration of the last attempt.", ) experiment_duration_seconds: Optional[float] = Field( diff --git a/eval_protocol/pytest/default_agent_rollout_processor.py b/eval_protocol/pytest/default_agent_rollout_processor.py index d8d4aada..ec6f983b 100644 --- a/eval_protocol/pytest/default_agent_rollout_processor.py +++ b/eval_protocol/pytest/default_agent_rollout_processor.py @@ -267,7 +267,7 @@ async def process_row(row: EvaluationRow) -> EvaluationRow: total_tokens=agent.usage["total_tokens"], ) - agent.evaluation_row.execution_metadata.duration_seconds = time.perf_counter() - start_time + agent.evaluation_row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time return agent.evaluation_row finally: diff --git a/eval_protocol/pytest/default_pydantic_ai_rollout_processor.py b/eval_protocol/pytest/default_pydantic_ai_rollout_processor.py index 47b7b456..f9618799 100644 --- a/eval_protocol/pytest/default_pydantic_ai_rollout_processor.py +++ b/eval_protocol/pytest/default_pydantic_ai_rollout_processor.py @@ -83,7 +83,7 @@ async def process_row(row: EvaluationRow) -> EvaluationRow: # total_tokens=usage_info.total_tokens or 0, # ) - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time return row diff --git a/eval_protocol/pytest/default_single_turn_rollout_process.py b/eval_protocol/pytest/default_single_turn_rollout_process.py index 665da649..b8e4445d 100644 --- a/eval_protocol/pytest/default_single_turn_rollout_process.py +++ b/eval_protocol/pytest/default_single_turn_rollout_process.py @@ -180,7 +180,7 @@ async def process_row(row: EvaluationRow) -> EvaluationRow: row.messages = messages - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time default_logger.log(row) return row diff --git a/eval_protocol/pytest/evaluation_test_utils.py b/eval_protocol/pytest/evaluation_test_utils.py index 94d6f7fe..48f8a015 100644 --- a/eval_protocol/pytest/evaluation_test_utils.py +++ b/eval_protocol/pytest/evaluation_test_utils.py @@ -42,7 +42,7 @@ async def run_tasks_with_eval_progress( - pointwise_tasks: list[asyncio.Task[EvaluationRow]], run_idx: int + pointwise_tasks: list[asyncio.Task[EvaluationRow]], run_idx: int, disable_tqdm: bool = False ) -> list[EvaluationRow]: """ Run evaluation tasks with a progress bar and proper cancellation handling. @@ -66,6 +66,7 @@ async def run_tasks_with_eval_progress( miniters=1, mininterval=0.1, bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]", + disable=disable_tqdm, ) as eval_pbar: async def task_with_progress(task: asyncio.Task[EvaluationRow]) -> EvaluationRow: @@ -88,7 +89,10 @@ async def task_with_progress(task: asyncio.Task[EvaluationRow]) -> EvaluationRow async def run_tasks_with_run_progress( - execute_run_func: Callable[[int, RolloutProcessorConfig], Any], num_runs: int, config: RolloutProcessorConfig + execute_run_func: Callable[[int, RolloutProcessorConfig], Any], + num_runs: int, + config: RolloutProcessorConfig, + disable_tqdm: bool = False, ) -> None: """ Run tasks with a parallel runs progress bar, preserving original logic. @@ -108,6 +112,7 @@ async def run_tasks_with_run_progress( dynamic_ncols=True, miniters=1, bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]", + disable=disable_tqdm, ) as run_pbar: async def execute_run_with_progress(run_idx: int, config: RolloutProcessorConfig) -> Any: @@ -330,6 +335,7 @@ async def rollout_processor_with_retry( fresh_dataset: list[EvaluationRow], config: RolloutProcessorConfig, run_idx: int = 0, + disable_tqdm: bool = False, ) -> AsyncGenerator[EvaluationRow, None]: """ Wrapper around rollout_processor that handles retry logic using the Python backoff library. @@ -449,6 +455,7 @@ async def execute_row_with_backoff_and_log( miniters=1, mininterval=0.1, bar_format="{desc}: {percentage:3.0f}%|{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]", + disable=disable_tqdm, ) as rollout_pbar: # Yield results as they complete for task in asyncio.as_completed(retry_tasks): diff --git a/eval_protocol/pytest/github_action_rollout_processor.py b/eval_protocol/pytest/github_action_rollout_processor.py index e6ca24ae..bbdd8b84 100644 --- a/eval_protocol/pytest/github_action_rollout_processor.py +++ b/eval_protocol/pytest/github_action_rollout_processor.py @@ -162,7 +162,7 @@ def _list_runs(): row.rollout_status = Status.rollout_error( f"Failed to find workflow run in GHA with rollout_id {row.execution_metadata.rollout_id}" ) - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time return row run_id = run.get("id") @@ -170,7 +170,7 @@ def _list_runs(): row.rollout_status = Status.rollout_error( f"Failed to find workflow run in GHA with rollout_id {row.execution_metadata.rollout_id}" ) - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time return row # Poll the specific run until completion @@ -194,10 +194,10 @@ def _get_run() -> Dict[str, Any]: row.rollout_status = Status.rollout_error( f"GitHub Actions run timed out after {self.timeout_seconds} seconds" ) - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time return row - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time def _update_with_trace() -> None: return update_row_with_remote_trace(row, self._output_data_loader, self.model_base_url) diff --git a/eval_protocol/pytest/openenv_rollout_processor.py b/eval_protocol/pytest/openenv_rollout_processor.py index c1ce7769..0f662692 100644 --- a/eval_protocol/pytest/openenv_rollout_processor.py +++ b/eval_protocol/pytest/openenv_rollout_processor.py @@ -411,7 +411,7 @@ async def process_row(row: EvaluationRow) -> EvaluationRow: completion_tokens=usage["completion_tokens"], total_tokens=usage["total_tokens"], ) - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time # Attach per-step rewards and accumulated token IDs to # execution_metadata.extra for downstream integrations @@ -436,14 +436,14 @@ async def process_row(row: EvaluationRow) -> EvaluationRow: logger.info("[OpenEnvRolloutProcessor] Total reward: %.3f", total_reward) logger.info( "[OpenEnvRolloutProcessor] Duration: %.2fs", - row.execution_metadata.duration_seconds, + row.execution_metadata.rollout_duration_seconds, ) logger.debug("[OpenEnvRolloutProcessor] Messages collected: %d", len(messages)) logger.info( f"Rollout complete: {len(step_rewards)} steps, " f"total_reward={total_reward:.2f}, " - f"duration={row.execution_metadata.duration_seconds:.2f}s" + f"duration={row.execution_metadata.rollout_duration_seconds:.2f}s" ) # Final log with complete message history if getattr(config, "logger", None): diff --git a/eval_protocol/pytest/priority_scheduler.py b/eval_protocol/pytest/priority_scheduler.py index eaddacc5..71958510 100644 --- a/eval_protocol/pytest/priority_scheduler.py +++ b/eval_protocol/pytest/priority_scheduler.py @@ -1,9 +1,12 @@ import asyncio import logging import os +import time from collections import defaultdict from dataclasses import dataclass, field -from typing import Any, Callable, List, Dict, Optional, Union, Awaitable +from typing import Any, List, Dict, Optional, Union + +from tqdm.asyncio import tqdm as async_tqdm from eval_protocol.models import EvaluationRow, Status from eval_protocol.pytest.types import RolloutProcessorConfig, TestFunction @@ -79,6 +82,18 @@ def __init__( self.rollout_n = rollout_n self.in_group_minibatch_size = in_group_minibatch_size if in_group_minibatch_size > 0 else rollout_n self.evaluation_test_kwargs = evaluation_test_kwargs + + # Progress bars (initialized in run()) + self.rollout_pbar: Optional[async_tqdm] = None + self.eval_pbar: Optional[async_tqdm] = None + + # Track active rollouts: {row_index: set of run_indices currently in progress} + self.active_rollouts: Dict[int, set] = defaultdict(set) + self.active_rollouts_lock = asyncio.Lock() + + # Track active evaluations + self.active_evals: int = 0 + self.active_evals_lock = asyncio.Lock() async def schedule_dataset( self, @@ -132,41 +147,68 @@ async def _run_eval(rows_to_eval: Union[EvaluationRow, List[EvaluationRow]]): experiment_id = rows_to_eval[0].execution_metadata.experiment_id if isinstance(rows_to_eval, list) else rows_to_eval.execution_metadata.experiment_id run_id = rows_to_eval[0].execution_metadata.run_id if isinstance(rows_to_eval, list) else rows_to_eval.execution_metadata.run_id eval_res = None + + # Track active eval + async with self.active_evals_lock: + self.active_evals += 1 + if self.eval_pbar: + self.eval_pbar.set_postfix_str(f"active={self.active_evals}") + + start_time = time.perf_counter() - async with self.eval_sem: - async with rollout_logging_context( - rollout_id or "", - experiment_id=experiment_id, - run_id=run_id, - ): - if isinstance(rows_to_eval, list): - eval_res = await execute_pytest_with_exception_handling( - test_func=self.eval_executor, - evaluation_test_kwargs=self.evaluation_test_kwargs, - processed_dataset=rows_to_eval, - ) - else: - eval_res = await execute_pytest_with_exception_handling( - test_func=self.eval_executor, - evaluation_test_kwargs=self.evaluation_test_kwargs, - processed_row=rows_to_eval, - ) - - # push result to the output buffer - if self.output_buffer: + try: + async with self.eval_sem: + async with rollout_logging_context( + rollout_id or "", + experiment_id=experiment_id, + run_id=run_id, + ): + if isinstance(rows_to_eval, list): + eval_res = await execute_pytest_with_exception_handling( + test_func=self.eval_executor, + evaluation_test_kwargs=self.evaluation_test_kwargs, + processed_dataset=rows_to_eval, + ) + else: + eval_res = await execute_pytest_with_exception_handling( + test_func=self.eval_executor, + evaluation_test_kwargs=self.evaluation_test_kwargs, + processed_row=rows_to_eval, + ) + eval_duration = time.perf_counter() - start_time + + # Set eval_duration_seconds BEFORE buffer writes to ensure it's included in serialization if isinstance(eval_res, list): for row in eval_res: - self._post_process_result(row) - await self.output_buffer.add_result(row) + row.execution_metadata.eval_duration_seconds = eval_duration else: - self._post_process_result(eval_res) - await self.output_buffer.add_result(eval_res) + eval_res.execution_metadata.eval_duration_seconds = eval_duration - if isinstance(eval_res, list): - self.results.extend(eval_res) - else: - self.results.append(eval_res) - return eval_res + # push result to the output buffer + if self.output_buffer: + if isinstance(eval_res, list): + for row in eval_res: + self._post_process_result(row) + await self.output_buffer.add_result(row) + else: + self._post_process_result(eval_res) + await self.output_buffer.add_result(eval_res) + + if isinstance(eval_res, list): + for row in eval_res: + self.results.append(row) + else: + self.results.append(eval_res) + return eval_res + finally: + # Always update progress bar (handles both success and failure cases) + if self.eval_pbar: + self.eval_pbar.update(1) + # Decrement active eval counter + async with self.active_evals_lock: + self.active_evals -= 1 + if self.eval_pbar: + self.eval_pbar.set_postfix_str(f"active={self.active_evals}") # 1. Prepare Config & Row for this micro-batch current_batch_rows = [] @@ -205,15 +247,33 @@ async def _run_eval(rows_to_eval: Union[EvaluationRow, List[EvaluationRow]]): batch_results: List[EvaluationRow] = [] if current_batch_rows: for idx, row in current_batch_rows: - async for result_row in rollout_processor_with_retry( - self.rollout_processor, [row], task.config, idx - ): - batch_results.append(result_row) - # in pointwise, we start evaluation immediately - if self.mode == "pointwise": - t = asyncio.create_task(_run_eval(result_row)) - self.background_tasks.add(t) - t.add_done_callback(self.background_tasks.discard) + # Track this rollout as active + async with self.active_rollouts_lock: + self.active_rollouts[task.row_index].add(idx) + await self._update_rollout_pbar_postfix() + + try: + async for result_row in rollout_processor_with_retry( + self.rollout_processor, [row], task.config, idx, disable_tqdm=True + ): + batch_results.append(result_row) + + # Update rollout progress bar + if self.rollout_pbar: + self.rollout_pbar.update(1) + + # in pointwise, we start evaluation immediately + if self.mode == "pointwise": + t = asyncio.create_task(_run_eval(result_row)) + self.background_tasks.add(t) + t.add_done_callback(self.background_tasks.discard) + finally: + # Remove from active tracking + async with self.active_rollouts_lock: + self.active_rollouts[task.row_index].discard(idx) + if not self.active_rollouts[task.row_index]: + del self.active_rollouts[task.row_index] + await self._update_rollout_pbar_postfix() # 3. Evaluate and Collect History current_batch_history_updates = [] @@ -257,6 +317,34 @@ async def _run_eval(rows_to_eval: Union[EvaluationRow, List[EvaluationRow]]): ) self.queue.put_nowait(new_task) + def _format_active_rollouts(self) -> str: + """Format active rollouts for display in progress bar.""" + if not self.active_rollouts: + return "" + + # Show active rows and their run indices + parts = [] + for row_idx in sorted(self.active_rollouts.keys())[:5]: # Limit to 5 rows to keep it readable + runs = sorted(self.active_rollouts[row_idx]) + if runs: + runs_str = ",".join(str(r) for r in runs[:3]) # Show up to 3 run indices + if len(runs) > 3: + runs_str += f"+{len(runs)-3}" + parts.append(f"r{row_idx}:[{runs_str}]") + + if len(self.active_rollouts) > 5: + parts.append(f"+{len(self.active_rollouts)-5} more") + + return " | ".join(parts) + + async def _update_rollout_pbar_postfix(self): + """Update the rollout progress bar postfix with active tasks info.""" + if self.rollout_pbar: + active_count = sum(len(runs) for runs in self.active_rollouts.values()) + self.rollout_pbar.set_postfix_str( + f"active={active_count} {self._format_active_rollouts()}" + ) + def _post_process_result(self, res: EvaluationRow): """ Process evaluation result: update cost metrics, status, and log. @@ -294,28 +382,58 @@ def _post_process_result(self, res: EvaluationRow): async def run(self, dataset: List[EvaluationRow], num_runs: int, base_config: RolloutProcessorConfig): self.num_runs = num_runs - # 1. Schedule initial tasks - await self.schedule_dataset(dataset, base_config) - - # 2. Start Workers - # If we have separate limits, we need enough workers to saturate both stages - num_workers = self.max_concurrent_rollouts - - workers = [asyncio.create_task(self.worker()) for _ in range(num_workers)] - - # 3. Wait for completion - await self.queue.join() - - # Wait for background evaluations to finish - if self.background_tasks: - await asyncio.gather(*self.background_tasks, return_exceptions=True) + # Calculate totals for progress bars + total_rollouts = len(dataset) * num_runs + # In pointwise mode: 1 eval per rollout; in groupwise mode: 1 eval per dataset row + total_evals = total_rollouts if self.mode == "pointwise" else len(dataset) - # 4. Cleanup - for w in workers: - w.cancel() + # Initialize progress bars + self.rollout_pbar = async_tqdm( + total=total_rollouts, + desc="🚀 Rollouts", + unit="row", + position=0, + leave=True, + colour="cyan", + ) + self.eval_pbar = async_tqdm( + total=total_evals, + desc="📊 Evals", + unit="eval", + position=1, + leave=True, + colour="green", + ) - if workers: - await asyncio.gather(*workers, return_exceptions=True) + try: + # 1. Schedule initial tasks + await self.schedule_dataset(dataset, base_config) + + # 2. Start Workers + # If we have separate limits, we need enough workers to saturate both stages + num_workers = self.max_concurrent_rollouts + + workers = [asyncio.create_task(self.worker()) for _ in range(num_workers)] + + # 3. Wait for completion + await self.queue.join() + + # Wait for background evaluations to finish + if self.background_tasks: + await asyncio.gather(*self.background_tasks, return_exceptions=True) + + # 4. Cleanup + for w in workers: + w.cancel() + + if workers: + await asyncio.gather(*workers, return_exceptions=True) + finally: + # Close progress bars + if self.rollout_pbar: + self.rollout_pbar.close() + if self.eval_pbar: + self.eval_pbar.close() # Return collected results return self.results diff --git a/eval_protocol/pytest/remote_rollout_processor.py b/eval_protocol/pytest/remote_rollout_processor.py index 23a7d979..ab42bdcd 100644 --- a/eval_protocol/pytest/remote_rollout_processor.py +++ b/eval_protocol/pytest/remote_rollout_processor.py @@ -185,7 +185,7 @@ def _get_status() -> Dict[str, Any]: f"Rollout {row.execution_metadata.rollout_id} timed out after {timeout_seconds} seconds" ) - row.execution_metadata.duration_seconds = time.perf_counter() - start_time + row.execution_metadata.rollout_duration_seconds = time.perf_counter() - start_time def _update_with_trace() -> None: return update_row_with_remote_trace(row, self._output_data_loader, model_base_url) diff --git a/tests/test_priority_scheduler.py b/tests/test_priority_scheduler.py index 27e748eb..f5b4fa31 100644 --- a/tests/test_priority_scheduler.py +++ b/tests/test_priority_scheduler.py @@ -57,7 +57,7 @@ async def test_scheduler_basic_execution( micro_batch_size = 1 # Mock rollout processor with delay - async def delayed_rollout(processor, rows, config, run_idx): + async def delayed_rollout(processor, rows, config, run_idx, **kwargs): await asyncio.sleep(0.01) for row in rows: yield row @@ -110,7 +110,7 @@ async def test_concurrency_control( rollout_lock = asyncio.Lock() eval_lock = asyncio.Lock() - async def mock_rollout_gen(processor, rows, config, run_idx): + async def mock_rollout_gen(processor, rows, config, run_idx, **kwargs): nonlocal active_rollouts, max_active_rollouts_seen async with rollout_lock: active_rollouts += 1 @@ -177,7 +177,7 @@ async def test_priority_scheduling( execution_order = [] - async def mock_rollout_gen(processor, rows, config, run_idx): + async def mock_rollout_gen(processor, rows, config, run_idx, **kwargs): row_id = rows[0].input_metadata.row_id execution_order.append(f"{row_id}_run_{run_idx}") for row in rows: @@ -290,7 +290,7 @@ async def mock_eval(rows): eval_calls.append(rows) return rows # Pass through - async def mock_rollout_gen(processor, rows, config, run_idx): + async def mock_rollout_gen(processor, rows, config, run_idx, **kwargs): for row in rows: yield row diff --git a/vite-app/dist/assets/index-CuQbfdPD.js b/vite-app/dist/assets/index-CuQbfdPD.js index dcf3d7e0..24021c48 100644 --- a/vite-app/dist/assets/index-CuQbfdPD.js +++ b/vite-app/dist/assets/index-CuQbfdPD.js @@ -10,7 +10,7 @@ Error generating stack: `+f.message+` Please change the parent to .`)}let B=wi(),v;if(t){let Q=typeof t=="string"?Eo(t):t;Qt(d==="/"||Q.pathname?.startsWith(d),`When overriding the location using \`\` or \`useRoutes(routes, location)\`, the location pathname must begin with the portion of the URL pathname that was matched by all parent routes. The current pathname base is "${d}" but pathname "${Q.pathname}" was given in the \`location\` prop.`),v=Q}else v=B;let w=v.pathname||"/",b=w;if(d!=="/"){let Q=d.replace(/^\//,"").split("/");b="/"+w.replace(/^\//,"").split("/").slice(Q.length).join("/")}let x=PQ(e,{pathname:b});sn(p||x!=null,`No routes matched location "${v.pathname}${v.search}${v.hash}" `),sn(x==null||x[x.length-1].route.element!==void 0||x[x.length-1].route.Component!==void 0||x[x.length-1].route.lazy!==void 0,`Matched leaf route at location "${v.pathname}${v.search}${v.hash}" does not have an element or Component. This means it will render an with a null value by default resulting in an "empty" page.`);let C=qH(x&&x.map(Q=>Object.assign({},Q,{params:Object.assign({},u,Q.params),pathname:Ar([d,a.encodeLocation?a.encodeLocation(Q.pathname.replace(/\?/g,"%3F").replace(/#/g,"%23")).pathname:Q.pathname]),pathnameBase:Q.pathnameBase==="/"?d:Ar([d,a.encodeLocation?a.encodeLocation(Q.pathnameBase.replace(/\?/g,"%3F").replace(/#/g,"%23")).pathname:Q.pathnameBase])})),o,A,n,i);return t&&C?F.createElement(Cc.Provider,{value:{location:{pathname:"/",search:"",hash:"",state:null,key:"default",...v},navigationType:"POP"}},C):C}function YH(){let e=iT(),t=zH(e)?`${e.status} ${e.statusText}`:e instanceof Error?e.message:JSON.stringify(e),A=e instanceof Error?e.stack:null,n="rgba(200,200,200, 0.5)",i={padding:"0.5rem",backgroundColor:n},a={padding:"2px 4px",backgroundColor:n},o=null;return console.error("Error handled by React Router default ErrorBoundary:",e),o=F.createElement(F.Fragment,null,F.createElement("p",null,"💿 Hey developer 👋"),F.createElement("p",null,"You can provide a way better UX than this when your app throws errors by providing your own ",F.createElement("code",{style:a},"ErrorBoundary")," or"," ",F.createElement("code",{style:a},"errorElement")," prop on your route.")),F.createElement(F.Fragment,null,F.createElement("h2",null,"Unexpected Application Error!"),F.createElement("h3",{style:{fontStyle:"italic"}},t),A?F.createElement("pre",{style:i},A):null,o)}var $H=F.createElement(YH,null),WH=class extends F.Component{constructor(e){super(e),this.state={location:e.location,revalidation:e.revalidation,error:e.error}}static getDerivedStateFromError(e){return{error:e}}static getDerivedStateFromProps(e,t){return t.location!==e.location||t.revalidation!=="idle"&&e.revalidation==="idle"?{error:e.error,location:e.location,revalidation:e.revalidation}:{error:e.error!==void 0?e.error:t.error,location:t.location,revalidation:e.revalidation||t.revalidation}}componentDidCatch(e,t){this.props.onError?this.props.onError(e,t):console.error("React Router caught the following error during render",e)}render(){return this.state.error!==void 0?F.createElement(vi.Provider,{value:this.props.routeContext},F.createElement(n0.Provider,{value:this.state.error,children:this.props.component})):this.props.children}};function JH({routeContext:e,match:t,children:A}){let n=F.useContext(Fo);return n&&n.static&&n.staticContext&&(t.route.errorElement||t.route.ErrorBoundary)&&(n.staticContext._deepestRenderedBoundaryId=t.route.id),F.createElement(vi.Provider,{value:e},A)}function qH(e,t=[],A=null,n=null,i=null){if(e==null){if(!A)return null;if(A.errors)e=A.matches;else if(t.length===0&&!A.initialized&&A.matches.length>0)e=A.matches;else return null}let a=e,o=A?.errors;if(o!=null){let d=a.findIndex(p=>p.route.id&&o?.[p.route.id]!==void 0);Qt(d>=0,`Could not find a matching route for errors on route IDs: ${Object.keys(o).join(",")}`),a=a.slice(0,Math.min(a.length,d+1))}let c=!1,u=-1;if(A)for(let d=0;d=0?a=a.slice(0,u+1):a=[a[0]];break}}}let h=A&&n?(d,p)=>{n(d,{location:A.location,params:A.matches?.[0]?.params??{},errorInfo:p})}:void 0;return a.reduceRight((d,p,B)=>{let v,w=!1,b=null,x=null;A&&(v=o&&p.route.id?o[p.route.id]:void 0,b=p.route.errorElement||$H,c&&(u<0&&B===0?(qQ("route-fallback",!1,"No `HydrateFallback` element provided to render during initial hydration"),w=!0,x=null):u===B&&(w=!0,x=p.route.hydrateFallbackElement||null)));let C=t.concat(a.slice(0,B+1)),Q=()=>{let E;return v?E=b:w?E=x:p.route.Component?E=F.createElement(p.route.Component,null):p.route.element?E=p.route.element:E=d,F.createElement(JH,{match:p,routeContext:{outlet:d,matches:C,isDataRoute:A!=null},children:E})};return A&&(p.route.ErrorBoundary||p.route.errorElement||B===0)?F.createElement(WH,{location:A.location,revalidation:A.revalidation,component:b,error:v,children:Q(),routeContext:{outlet:null,matches:C,isDataRoute:!0},onError:h}):Q()},null)}function i0(e){return`${e} must be used within a data router. See https://reactrouter.com/en/main/routers/picking-a-router.`}function eT(e){let t=F.useContext(Fo);return Qt(t,i0(e)),t}function tT(e){let t=F.useContext(Zh);return Qt(t,i0(e)),t}function AT(e){let t=F.useContext(vi);return Qt(t,i0(e)),t}function r0(e){let t=AT(e),A=t.matches[t.matches.length-1];return Qt(A.route.id,`${e} can only be used on routes that contain a unique "id"`),A.route.id}function nT(){return r0("useRouteId")}function iT(){let e=F.useContext(n0),t=tT("useRouteError"),A=r0("useRouteError");return e!==void 0?e:t.errors?.[A]}function rT(){let{router:e}=eT("useNavigate"),t=r0("useNavigate"),A=F.useRef(!1);return WQ(()=>{A.current=!0}),F.useCallback(async(i,a={})=>{sn(A.current,$Q),A.current&&(typeof i=="number"?e.navigate(i):await e.navigate(i,{fromRouteId:t,...a}))},[e,t])}var Wb={};function qQ(e,t,A){!t&&!Wb[e]&&(Wb[e]=!0,sn(!1,A))}F.memo(sT);function sT({routes:e,future:t,state:A,unstable_onError:n}){return JQ(e,void 0,A,n,t)}function aT({to:e,replace:t,state:A,relative:n}){Qt(So()," may be used only in the context of a component.");let{static:i}=F.useContext(Gn);sn(!i," must not be used on the initial render in a . This is a no-op, but you should modify your code so the is only ever rendered in response to some user interaction or state change.");let{matches:a}=F.useContext(vi),{pathname:o}=wi(),c=Yh(),u=A0(e,t0(a),o,n==="path"),h=JSON.stringify(u);return F.useEffect(()=>{c(JSON.parse(h),{replace:t,state:A,relative:n})},[c,h,n,t,A]),null}function $f(e){Qt(!1,"A is only ever to be used as the child of element, never rendered directly. Please wrap your in a .")}function oT({basename:e="/",children:t=null,location:A,navigationType:n="POP",navigator:i,static:a=!1}){Qt(!So(),"You cannot render a inside another . You should never have more than one in your app.");let o=e.replace(/^\/*/,"/"),c=F.useMemo(()=>({basename:o,navigator:i,static:a,future:{}}),[o,i,a]);typeof A=="string"&&(A=Eo(A));let{pathname:u="/",search:h="",hash:d="",state:p=null,key:B="default"}=A,v=F.useMemo(()=>{let w=sr(u,o);return w==null?null:{location:{pathname:w,search:h,hash:d,state:p,key:B},navigationType:n}},[o,u,h,d,p,B,n]);return sn(v!=null,` is not able to match the URL "${u}${h}${d}" because it does not start with the basename, so the won't render anything.`),v==null?null:F.createElement(Gn.Provider,{value:c},F.createElement(Cc.Provider,{children:t,value:v}))}function lT({children:e,location:t}){return ZH(Xm(e),t)}function Xm(e,t=[]){let A=[];return F.Children.forEach(e,(n,i)=>{if(!F.isValidElement(n))return;let a=[...t,i];if(n.type===F.Fragment){A.push.apply(A,Xm(n.props.children,a));return}Qt(n.type===$f,`[${typeof n.type=="string"?n.type:n.type.name}] is not a component. All component children of must be a or `),Qt(!n.props.index||!n.props.children,"An index route cannot have child routes.");let o={id:n.props.id||a.join("-"),caseSensitive:n.props.caseSensitive,element:n.props.element,Component:n.props.Component,index:n.props.index,path:n.props.path,middleware:n.props.middleware,loader:n.props.loader,action:n.props.action,hydrateFallbackElement:n.props.hydrateFallbackElement,HydrateFallback:n.props.HydrateFallback,errorElement:n.props.errorElement,ErrorBoundary:n.props.ErrorBoundary,hasErrorBoundary:n.props.hasErrorBoundary===!0||n.props.ErrorBoundary!=null||n.props.errorElement!=null,shouldRevalidate:n.props.shouldRevalidate,handle:n.props.handle,lazy:n.props.lazy};n.props.children&&(o.children=Xm(n.props.children,a)),A.push(o)}),A}var Wf="get",Jf="application/x-www-form-urlencoded";function $h(e){return e!=null&&typeof e.tagName=="string"}function cT(e){return $h(e)&&e.tagName.toLowerCase()==="button"}function uT(e){return $h(e)&&e.tagName.toLowerCase()==="form"}function fT(e){return $h(e)&&e.tagName.toLowerCase()==="input"}function hT(e){return!!(e.metaKey||e.altKey||e.ctrlKey||e.shiftKey)}function dT(e,t){return e.button===0&&(!t||t==="_self")&&!hT(e)}function Zm(e=""){return new URLSearchParams(typeof e=="string"||Array.isArray(e)||e instanceof URLSearchParams?e:Object.keys(e).reduce((t,A)=>{let n=e[A];return t.concat(Array.isArray(n)?n.map(i=>[A,i]):[[A,n]])},[]))}function gT(e,t){let A=Zm(e);return t&&t.forEach((n,i)=>{A.has(i)||t.getAll(i).forEach(a=>{A.append(i,a)})}),A}var ef=null;function pT(){if(ef===null)try{new FormData(document.createElement("form"),0),ef=!1}catch{ef=!0}return ef}var mT=new Set(["application/x-www-form-urlencoded","multipart/form-data","text/plain"]);function Wp(e){return e!=null&&!mT.has(e)?(sn(!1,`"${e}" is not a valid \`encType\` for \`
\`/\`\` and will default to "${Jf}"`),null):e}function BT(e,t){let A,n,i,a,o;if(uT(e)){let c=e.getAttribute("action");n=c?sr(c,t):null,A=e.getAttribute("method")||Wf,i=Wp(e.getAttribute("enctype"))||Jf,a=new FormData(e)}else if(cT(e)||fT(e)&&(e.type==="submit"||e.type==="image")){let c=e.form;if(c==null)throw new Error('Cannot submit a