We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 75983a5 commit b9ca0deCopy full SHA for b9ca0de
src/accelerate/launchers.py
@@ -149,7 +149,7 @@ def train(*args):
149
launcher = PrepareForLaunch(function, distributed_type="XLA")
150
print("Launching a training on TPU cores.")
151
xmp.spawn(launcher, args=args, start_method="fork")
152
- elif in_colab and get_gpu_info()[1] < 2:
+ elif in_colab and (not torch.cuda.is_available() or get_gpu_info()[1] < 2):
153
# No need for a distributed launch otherwise as it's either CPU or one GPU.
154
if torch.cuda.is_available():
155
print("Launching training on one GPU.")
0 commit comments