Skip to content

Commit 3a78ea7

Browse files
committed
pep 8 and change words and links
1 parent 7c0428c commit 3a78ea7

File tree

5 files changed

+15
-12
lines changed

5 files changed

+15
-12
lines changed

.github/workflows/test.yml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,4 +85,4 @@ jobs:
8585
- name: Run MLX align tests
8686
run: python test/test_align.py load_mlx_whisper
8787
- name: Run MLX refine tests
88-
run: python test/test_refine.py load_mlx_whisper
88+
run: python test/test_refine.py load_mlx_whisper

README.md

Lines changed: 2 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -537,9 +537,9 @@ stable-ts audio.mp3 -o audio.srt -hw
537537
</details>
538538

539539
<details>
540-
<summary>MLX Whisper for Apple GPU's</summary>
540+
<summary>MLX Whisper (on Apple Silicon)</summary>
541541

542-
Transcribe faster on Apple devices with [MLX Whisper](https://huggingface.co/mlx-community/whisper-large-v3-turbo):
542+
Transcribe faster on Apple devices with [MLX Whisper](https://github.com/ml-explore/mlx-examples/tree/main/whisper):
543543
```
544544
pip install -U stable-ts[mlx]
545545
```
@@ -550,10 +550,6 @@ import stable_whisper
550550
model = stable_whisper.load_mlx_whisper('base')
551551
result = model.transcribe('audio.mp3')
552552
```
553-
Supports the [various versions on Hugging Face](https://huggingface.co/models?sort=downloads&search=mlx-community%2Fwhisper&sort=downloads):
554-
```python
555-
model = stable_whisper.load_hf_whisper('mlx-community/whisper-base.en-mlx')
556-
```
557553

558554

559555
<details>

stable_whisper/alignment.py

Lines changed: 9 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -423,10 +423,13 @@ def compute_timestamps(audio_segment: torch.Tensor, word_tokens: List[WordToken]
423423
)
424424
return [w for seg in temp_segments for w in seg['words']]
425425

426-
427426
elif model_type == 'mlx':
428-
from mlx_whisper.audio import N_FRAMES as MLX_N_FRAMES, SAMPLE_RATE as MLX_SAMPLE_RATE, log_mel_spectrogram as log_mel_spectrogram_mx, \
427+
from mlx_whisper.audio import (
428+
N_FRAMES as MLX_N_FRAMES,
429+
SAMPLE_RATE as MLX_SAMPLE_RATE,
430+
log_mel_spectrogram as log_mel_spectrogram_mx,
429431
pad_or_trim as pad_or_trim_mx
432+
)
430433
import mlx.core as mx
431434
import mlx_whisper.timing as timing
432435

@@ -664,8 +667,11 @@ def inference_func(audio_segment: torch, tokens: List[int]) -> torch.Tensor:
664667
return token_probs
665668

666669
elif model_type == 'mlx':
667-
from mlx_whisper.audio import N_FRAMES_MLX, log_mel_spectrogram as log_mel_spectrogram_mx, \
670+
from mlx_whisper.audio import (
671+
N_FRAMES_MLX,
672+
log_mel_spectrogram as log_mel_spectrogram_mx,
668673
pad_or_trim as pad_or_trim_mx
674+
)
669675
import mlx.core as mx
670676

671677
def inference_func(audio_batch_torch: torch.Tensor, tokens: List[int]) -> torch.Tensor:

stable_whisper/whisper_word_level/cli.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -374,7 +374,8 @@ def url_to_path(url: str):
374374
'(https://huggingface.co/openai/whisper-large-v3); '
375375
'note: some features may not be available')
376376
parser.add_argument('--mlx_whisper', '-mlx', action='store_true',
377-
help='whether to use mlx-whisper (https://huggingface.co/mlx-community/whisper-large-v3-turbo); '
377+
help='whether to use mlx-whisper '
378+
'(https://github.com/ml-explore/mlx-examples/tree/main/whisper); '
378379
'note: some features may not be available')
379380

380381
parser.add_argument('--persist', '-p', action='store_true',

stable_whisper/whisper_word_level/mlx_whisper.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -331,4 +331,4 @@ def load_mlx_whisper(model_name: str, dtype=None, **model_kwargs):
331331
model.align_words = MethodType(align_words, model)
332332
model.refine = MethodType(refine, model)
333333

334-
return model
334+
return model

0 commit comments

Comments
 (0)