Skip to content

Commit f2be069

Browse files
committed
opt_einsum enables when it is available
1 parent 85f7f30 commit f2be069

File tree

1 file changed

+6
-4
lines changed

1 file changed

+6
-4
lines changed

visualbench/runs/colab_utils.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ def performance_tweaks(
4242
gradcheck: bool | None=False,
4343
gradgradcheck: bool | None=False,
4444
):
45+
"""note that deterministic is none only for this one"""
4546
import torch
4647
# causes cuDNN to benchmark multiple convolution algorithms and select the fastest
4748
if cudnn_bench is not None: torch.backends.cudnn.benchmark = cudnn_bench
@@ -69,8 +70,9 @@ def performance_tweaks(
6970
if emit_nvtx is not None: torch.autograd.profiler.emit_nvtx(emit_nvtx) # type:ignore
7071

7172
# optimizes contraction order for einsum operation
72-
if opt_einsum is not None:
73-
torch.backends.opt_einsum.enabled = opt_einsum
73+
if hasattr(torch.backends, "opt_einsum"):
74+
if opt_einsum is not None:
75+
torch.backends.opt_einsum.enabled = opt_einsum
7476

75-
# larger search time (1 s. on 1st call) but very fast einsum
76-
if opt_einsum_strategy is not None: torch.backends.opt_einsum.strategy = opt_einsum_strategy
77+
# larger search time (1 s. on 1st call) but very fast einsum
78+
if opt_einsum_strategy is not None: torch.backends.opt_einsum.strategy = opt_einsum_strategy

0 commit comments

Comments
 (0)