Skip to content

Commit 286ea1a

Browse files
authored
fix: fix support for numpy==2.x
1 parent 2e04ec7 commit 286ea1a

File tree

4 files changed

+14
-11
lines changed

4 files changed

+14
-11
lines changed

CHANGELOG.md

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,9 @@
11
# Changelog
22

3+
## develop
4+
5+
- fix: fix support for `numpy==2.x` ([@metal3d](https://github.com/metal3d/))
6+
37

48
## Version 3.3.1 (2024-06-19)
59

pyannote/audio/pipelines/speaker_diarization.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -400,7 +400,7 @@ def reconstruct(
400400
num_chunks, num_frames, local_num_speakers = segmentations.data.shape
401401

402402
num_clusters = np.max(hard_clusters) + 1
403-
clustered_segmentations = np.NAN * np.zeros(
403+
clustered_segmentations = np.nan * np.zeros(
404404
(num_chunks, num_frames, num_clusters)
405405
)
406406

@@ -515,7 +515,6 @@ def apply(
515515
centroids = None
516516

517517
else:
518-
519518
# skip speaker embedding extraction with oracle clustering
520519
if self.klustering == "OracleClustering" and not return_embeddings:
521520
embeddings = None

pyannote/audio/pipelines/speaker_verification.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def __call__(
186186

187187
# corner case: every signal is too short
188188
if max_len < self.min_num_samples:
189-
return np.NAN * np.zeros((batch_size, self.dimension))
189+
return np.nan * np.zeros((batch_size, self.dimension))
190190

191191
too_short = wav_lens < self.min_num_samples
192192
wav_lens[too_short] = max_len
@@ -197,7 +197,7 @@ def __call__(
197197
)
198198

199199
embeddings = embeddings.cpu().numpy()
200-
embeddings[too_short.cpu().numpy()] = np.NAN
200+
embeddings[too_short.cpu().numpy()] = np.nan
201201

202202
return embeddings
203203

@@ -364,7 +364,7 @@ def __call__(
364364

365365
# corner case: every signal is too short
366366
if max_len < self.min_num_samples:
367-
return np.NAN * np.zeros((batch_size, self.dimension))
367+
return np.nan * np.zeros((batch_size, self.dimension))
368368

369369
too_short = wav_lens < self.min_num_samples
370370
wav_lens = wav_lens / max_len
@@ -377,7 +377,7 @@ def __call__(
377377
.numpy()
378378
)
379379

380-
embeddings[too_short.cpu().numpy()] = np.NAN
380+
embeddings[too_short.cpu().numpy()] = np.nan
381381

382382
return embeddings
383383

@@ -594,7 +594,7 @@ def __call__(
594594

595595
imasks = imasks > 0.5
596596

597-
embeddings = np.NAN * np.zeros((batch_size, self.dimension))
597+
embeddings = np.nan * np.zeros((batch_size, self.dimension))
598598

599599
for f, (feature, imask) in enumerate(zip(features, imasks)):
600600
masked_feature = feature[imask]

pyannote/audio/pipelines/speech_separation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -419,7 +419,7 @@ def reconstruct(
419419
num_chunks, num_frames, local_num_speakers = segmentations.data.shape
420420

421421
num_clusters = np.max(hard_clusters) + 1
422-
clustered_segmentations = np.NAN * np.zeros(
422+
clustered_segmentations = np.nan * np.zeros(
423423
(num_chunks, num_frames, num_clusters)
424424
)
425425

@@ -644,9 +644,9 @@ def apply(
644644
len(speaker_activation), dtype=float
645645
)
646646

647-
speaker_activation_with_context[
648-
np.concatenate(remaining_zeros)
649-
] = 0.0
647+
speaker_activation_with_context[np.concatenate(remaining_zeros)] = (
648+
0.0
649+
)
650650

651651
discrete_diarization.data.T[i] = speaker_activation_with_context
652652
num_sources = sources.data.shape[1]

0 commit comments

Comments
 (0)