Skip to content

Commit a46e186

Browse files
fixed formatting
Signed-off-by: Jaya Venkatesh <[email protected]>
1 parent 9b1cf5c commit a46e186

File tree

1 file changed

+39
-21
lines changed

1 file changed

+39
-21
lines changed

rapids_cli/benchmark/benchmark.py

Lines changed: 39 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -99,31 +99,35 @@ def benchmark_run(
9999

100100
# Discover benchmarks
101101
benchmarks = _discover_benchmarks(filters, verbose)
102-
102+
103103
# Handle dry run
104104
if dry_run:
105-
console.print(f"[bold green]{BENCHMARK_SYMBOL} Running RAPIDS CPU vs GPU benchmarks [/bold green]")
105+
console.print(
106+
f"[bold green]{BENCHMARK_SYMBOL} Running RAPIDS CPU vs GPU benchmarks [/bold green]"
107+
)
106108
console.print("Dry run, skipping benchmarks")
107109
return True
108110

109111
if not benchmarks:
110-
console.print("[yellow]No benchmarks found. Install RAPIDS libraries to enable benchmarks.[/yellow]")
112+
console.print(
113+
"[yellow]No benchmarks found. Install RAPIDS libraries to enable benchmarks.[/yellow]"
114+
)
111115
return True
112116

113117
# Execute benchmarks and collect results
114118
results = _execute_benchmarks(benchmarks, runs, verbose)
115-
119+
116120
# Render results
117121
_render_results_rich(results, verbose)
118-
122+
119123
# Return overall success
120124
return all(result.status for result in results)
121125

122126

123127
def _discover_benchmarks(filters: list[str], verbose: bool) -> list:
124128
"""Discover available benchmark functions."""
125129
benchmarks = []
126-
130+
127131
if verbose:
128132
console.print("Discovering benchmarks")
129133

@@ -137,15 +141,19 @@ def _discover_benchmarks(filters: list[str], verbose: bool) -> list:
137141

138142
if verbose:
139143
console.print(f"Discovered {len(benchmarks)} benchmarks")
140-
144+
141145
return benchmarks
142146

143147

144-
def _execute_benchmarks(benchmarks: list, runs: int, verbose: bool) -> list[BenchmarkResult]:
148+
def _execute_benchmarks(
149+
benchmarks: list, runs: int, verbose: bool
150+
) -> list[BenchmarkResult]:
145151
"""Execute all benchmarks and collect results."""
146-
console.print(f"[bold green]{BENCHMARK_SYMBOL} Running RAPIDS CPU vs GPU benchmarks [/bold green]")
152+
console.print(
153+
f"[bold green]{BENCHMARK_SYMBOL} Running RAPIDS CPU vs GPU benchmarks [/bold green]"
154+
)
147155
console.print(f"Running benchmarks ({runs} runs each)")
148-
156+
149157
results: list[BenchmarkResult] = []
150158

151159
with Progress(
@@ -169,6 +177,8 @@ def _execute_benchmarks(benchmarks: list, runs: int, verbose: bool) -> list[Benc
169177
all_gpu_times = []
170178
caught_warnings = None
171179
error = None
180+
avg_cpu_time = avg_gpu_time = speedup = cpu_std = gpu_std = None
181+
status = False
172182

173183
try:
174184
for run in range(runs):
@@ -192,17 +202,21 @@ def _execute_benchmarks(benchmarks: list, runs: int, verbose: bool) -> list[Benc
192202
avg_cpu_time = sum(all_cpu_times) / len(all_cpu_times)
193203
avg_gpu_time = sum(all_gpu_times) / len(all_gpu_times)
194204
speedup = avg_cpu_time / avg_gpu_time
195-
cpu_std = statistics.stdev(all_cpu_times) if len(all_cpu_times) > 1 else 0.0
196-
gpu_std = statistics.stdev(all_gpu_times) if len(all_gpu_times) > 1 else 0.0
205+
cpu_std = (
206+
statistics.stdev(all_cpu_times)
207+
if len(all_cpu_times) > 1
208+
else 0.0
209+
)
210+
gpu_std = (
211+
statistics.stdev(all_gpu_times)
212+
if len(all_gpu_times) > 1
213+
else 0.0
214+
)
197215
status = True
198-
else:
199-
avg_cpu_time = avg_gpu_time = speedup = cpu_std = gpu_std = None
200-
status = False
201216

202217
except Exception as e:
203218
error = e
204219
status = False
205-
avg_cpu_time = avg_gpu_time = speedup = cpu_std = gpu_std = None
206220

207221
# Create result
208222
result = BenchmarkResult(
@@ -232,27 +246,31 @@ def _execute_benchmarks(benchmarks: list, runs: int, verbose: bool) -> list[Benc
232246
return results
233247

234248

235-
def _render_benchmark_completion(result: BenchmarkResult, index: int, total: int, runs: int):
249+
def _render_benchmark_completion(
250+
result: BenchmarkResult, index: int, total: int, runs: int
251+
):
236252
"""Render completion of a single benchmark."""
237253
if result.status:
238254
console.print(f"[green]✓[/green] [{index}/{total}] {result.name}")
239255

240256
# Show timing details
241257
cpu_display = f"{result.cpu_time:.3f}s"
242258
gpu_display = f"{result.gpu_time:.3f}s"
243-
259+
244260
if runs > 1:
245261
cpu_display += f" ± {result.cpu_std:.3f}s"
246262
gpu_display += f" ± {result.gpu_std:.3f}s"
247-
263+
248264
console.print(
249265
f" CPU Time: [red]{cpu_display}[/red] "
250266
f"GPU Time: [green]{gpu_display}[/green] "
251267
f"Speedup: [bold green]{result.speedup:.1f}x[/bold green]"
252268
)
253269
else:
254-
console.print(f"[red]❌[/red] [{index}/{total}] {result.name} - "
255-
f"[bold red]Failed[/bold red]")
270+
console.print(
271+
f"[red]❌[/red] [{index}/{total}] {result.name} - "
272+
f"[bold red]Failed[/bold red]"
273+
)
256274

257275

258276
def _render_results_rich(results: list[BenchmarkResult], verbose: bool):

0 commit comments

Comments
 (0)