-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathbatch_experiments.py
More file actions
55 lines (40 loc) · 1.83 KB
/
batch_experiments.py
File metadata and controls
55 lines (40 loc) · 1.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
#!/usr/bin/env python3
"""
Run multiple experiments using templates
"""
from core.parallel_orchestrator import ParallelOrchestrator
from core.memory import ExperimentMemory
from core.llm import LLMBackend
from agents import hypothesis, planner, executor, analyzer, critic
from templates.experiment_templates import ExperimentTemplates
async def run_batch_experiments():
"""Run experiments using templates"""
# Initialize system
llm = LLMBackend('deepseek-r1:1.5b')
memory = ExperimentMemory()
agents = {
'hypothesis': hypothesis.HypothesisAgent("H", llm),
'planner': planner.PlannerAgent('P', llm),
'executor': executor.ExecutorAgent('E', llm),
'analyzer': analyzer.AnalyzerAgent('A', llm),
'critic': critic.CriticAgent('C', llm)
}
orchestrator = ParallelOrchestrator(agents, memory)
# Get prime research template
template = ExperimentTemplates.get_template('prime_research')
problems = template['default_problems'][:2] # Run first 2 problems
print(f"🚀 Running {len(problems)} experiments in parallel...")
# Run experiments in parallel
results = await orchestrator.run_parallel_experiments(problems, template['max_iterations'])
print(f"✅ Completed {len(results)} experiments")
# Analyze results
from core.meta_analyzer import MetaAnalyzer
analyzer = MetaAnalyzer(memory)
experiment_ids = [r.get('experiment_id') for r in results if 'experiment_id' in r]
comparison = analyzer.compare_experiments(experiment_ids)
print(f"📊 Meta-analysis completed")
print(f" Average success rate: {comparison['metrics']['success_rates']}")
return results
if __name__ == "__main__":
import asyncio
asyncio.run(run_batch_experiments())