diff --git a/main.py b/main.py new file mode 100644 index 0000000..18befae --- /dev/null +++ b/main.py @@ -0,0 +1,77 @@ +import os,sys,time +import argparse +from simple_diarizer.diarizer import Diarizer +import pprint + +parser = argparse.ArgumentParser( + description="Speaker diarization", + formatter_class=argparse.ArgumentDefaultsHelpFormatter, + +) +parser.add_argument(dest='audio_name', type=str, help="Input audio file") +parser.add_argument(dest='outputfile', nargs="?", default=None, help="Optional output file") +parser.add_argument("--number_of_speakers", dest='number_of_speaker', default=None, type=int, help="Number of speakers (if known)") +parser.add_argument("--max_speakers", dest='max_speakers', default=25, type=int, help="Maximum number of speakers (if number of speaker is unknown)") +parser.add_argument("--embed_model", dest='embed_model', default="ecapa", type=str, help="Name of embedding") +parser.add_argument("--cluster_method", dest='cluster_method', default="nme-sc", type=str, help="Clustering method") +args = parser.parse_args() + +diar = Diarizer( + embed_model=args.embed_model, # 'xvec' and 'ecapa' supported + cluster_method=args.cluster_method # 'ahc' 'sc' and 'nme-sc' supported +) + +WAV_FILE=args.audio_name +num_speakers=args.number_of_speaker if args.number_of_speaker != "None" else None +max_spk= args.max_speakers +output_file=args.outputfile + +t0 = time.time() + +segments = diar.diarize(WAV_FILE, num_speakers=num_speakers,max_speakers=max_spk,outfile=output_file) + +print("Time used for processing:", time.time() - t0) + +if not output_file: + + json = {} + _segments = [] + _speakers = {} + seg_id = 1 + spk_i = 1 + spk_i_dict = {} + + for seg in segments: + + segment = {} + segment["seg_id"] = seg_id + + if seg['label'] not in spk_i_dict.keys(): + spk_i_dict[seg['label']] = spk_i + spk_i += 1 + + spk_id = "spk" + str(spk_i_dict[seg['label']]) + segment["spk_id"] = spk_id + segment["seg_begin"] = round(seg['start']) + segment["seg_end"] = round(seg['end']) + + if spk_id not in _speakers: + _speakers[spk_id] = {} + _speakers[spk_id]["spk_id"] = spk_id + _speakers[spk_id]["duration"] = seg['end']-seg['start'] + _speakers[spk_id]["nbr_seg"] = 1 + else: + _speakers[spk_id]["duration"] += seg['end']-seg['start'] + _speakers[spk_id]["nbr_seg"] += 1 + + _segments.append(segment) + seg_id += 1 + + for spkstat in _speakers.values(): + spkstat["duration"] = round(spkstat["duration"]) + + json["speakers"] = list(_speakers.values()) + json["segments"] = _segments + + pprint.pprint(json) + diff --git a/requirements.txt b/requirements.txt index ad2b2dc..02f3975 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,8 @@ -ipython>=7.9.0 -matplotlib>=3.5.1 +# ipython>=7.9.0 +# matplotlib>=3.5.1 pandas>=1.3.5 scikit-learn>=1.0.2 speechbrain>=0.5.11 torchaudio>=0.10.1 +onnxruntime>=1.14.0 +scipy<=1.8.1 # newer version can provoke segmentation faults \ No newline at end of file diff --git a/simple_diarizer/__init__.py b/simple_diarizer/__init__.py index dc085ab..525e1ce 100644 --- a/simple_diarizer/__init__.py +++ b/simple_diarizer/__init__.py @@ -1,3 +1,3 @@ import os -__version__ = os.getenv("GITHUB_REF_NAME", "latest") +__version__ = os.getenv("GITHUB_REF_NAME", "1.0.2") diff --git a/simple_diarizer/cluster.py b/simple_diarizer/cluster.py index 737e5a7..7f67426 100644 --- a/simple_diarizer/cluster.py +++ b/simple_diarizer/cluster.py @@ -5,7 +5,7 @@ from scipy.ndimage import gaussian_filter from sklearn.cluster import AgglomerativeClustering, KMeans, SpectralClustering from sklearn.metrics import pairwise_distances - +from .spectral_clustering import NME_SpectralClustering def similarity_matrix(embeds, metric="cosine"): return pairwise_distances(embeds, metric=metric) @@ -43,9 +43,7 @@ def cluster_AHC(embeds, n_clusters=None, threshold=None, metric="cosine", **kwar # A lot of these methods are lifted from # https://github.com/wq2012/SpectralCluster ########################################## - - -def cluster_SC(embeds, n_clusters=None, threshold=None, enhance_sim=True, **kwargs): +def cluster_SC(embeds, n_clusters=None, max_speakers= None, threshold=None, enhance_sim=True, **kwargs): """ Cluster embeds using Spectral Clustering """ @@ -59,7 +57,7 @@ def cluster_SC(embeds, n_clusters=None, threshold=None, enhance_sim=True, **kwar if n_clusters is None: (eigenvalues, eigenvectors) = compute_sorted_eigenvectors(S) # Get number of clusters. - k = compute_number_of_clusters(eigenvalues, 100, threshold) + k = compute_number_of_clusters(eigenvalues, max_speakers, threshold) # Get spectral embeddings. spectral_embeddings = eigenvectors[:, :k] @@ -82,6 +80,25 @@ def cluster_SC(embeds, n_clusters=None, threshold=None, enhance_sim=True, **kwar return cluster_model.fit_predict(S) +def cluster_NME_SC(embeds, n_clusters=None, max_speakers= None, threshold=None, enhance_sim=True, **kwargs): + """ + Cluster embeds using NME-Spectral Clustering + + if n_clusters is None: + assert threshold, "If num_clusters is not defined, threshold must be defined" + """ + + S = cos_similarity(embeds) + + labels = NME_SpectralClustering( + S, + num_clusters=n_clusters, + max_num_clusters=max_speakers + ) + + return labels + + def diagonal_fill(A): """ Sets the diagonal elemnts of the matrix to the max of each row @@ -134,7 +151,7 @@ def row_max_norm(A): def sim_enhancement(A): func_order = [ diagonal_fill, - gaussian_blur, + row_threshold_mult, symmetrization, diffusion, @@ -144,6 +161,31 @@ def sim_enhancement(A): A = f(A) return A +def cos_similarity(x): + """Compute cosine similarity matrix in CPU & memory sensitive way + + Args: + x (np.ndarray): embeddings, 2D array, embeddings are in rows + + Returns: + np.ndarray: cosine similarity matrix + + """ + assert x.ndim == 2, f"x has {x.ndim} dimensions, it must be matrix" + x = x / (np.sqrt(np.sum(np.square(x), axis=1, keepdims=True)) + 1.0e-32) + assert np.allclose(np.ones_like(x[:, 0]), np.sum(np.square(x), axis=1)) + max_n_elm = 200000000 + step = max(max_n_elm // (x.shape[0] * x.shape[0]), 1) + retval = np.zeros(shape=(x.shape[0], x.shape[0]), dtype=np.float64) + x0 = np.expand_dims(x, 0) + x1 = np.expand_dims(x, 1) + for i in range(0, x.shape[1], step): + product = x0[:, :, i : i + step] * x1[:, :, i : i + step] + retval += np.sum(product, axis=2, keepdims=False) + assert np.all(retval >= -1.0001), retval + assert np.all(retval <= 1.0001), retval + return retval + def compute_affinity_matrix(X): """Compute the affinity matrix from data. diff --git a/simple_diarizer/diarizer.py b/simple_diarizer/diarizer.py index c03659b..400e991 100644 --- a/simple_diarizer/diarizer.py +++ b/simple_diarizer/diarizer.py @@ -6,10 +6,10 @@ import pandas as pd import torch import torchaudio -from speechbrain.pretrained import EncoderClassifier +from speechbrain.inference.speaker import EncoderClassifier from tqdm.autonotebook import tqdm -from .cluster import cluster_AHC, cluster_SC +from .cluster import cluster_AHC, cluster_SC, cluster_NME_SC from .utils import check_wav_16khz_mono, convert_wavfile @@ -25,12 +25,16 @@ def __init__( assert cluster_method in [ "ahc", "sc", - ], "Only ahc and sc in the supported clustering options" + "nme-sc", + ], "Only ahc,sc and nme-sc in the supported clustering options" if cluster_method == "ahc": self.cluster = cluster_AHC if cluster_method == "sc": self.cluster = cluster_SC + if cluster_method == "nme-sc": + self.cluster = cluster_NME_SC + self.vad_model, self.get_speech_ts = self.setup_VAD() @@ -56,7 +60,7 @@ def __init__( def setup_VAD(self): model, utils = torch.hub.load( - repo_or_dir="snakers4/silero-vad", model="silero_vad" + repo_or_dir="snakers4/silero-vad", model="silero_vad", onnx=True ) # force_reload=True) @@ -182,6 +186,7 @@ def diarize( self, wav_file, num_speakers=2, + max_speakers=None, threshold=None, silence_tolerance=0.2, enhance_sim=True, @@ -194,6 +199,7 @@ def diarize( Inputs: wav_file (path): Path to input audio file num_speakers (int) or NoneType: Number of speakers to cluster to + max_speakers (int) threshold (float) or NoneType: Threshold to cluster to if num_speakers is not defined silence_tolerance (float): Same speaker segments which are close enough together @@ -229,10 +235,10 @@ def diarize( 'cluster_labels': cluster_labels (list): cluster label for each embed in embeds } - Uses AHC/SC to cluster + Uses AHC/SC/NME-SC to cluster """ recname = os.path.splitext(os.path.basename(wav_file))[0] - + if check_wav_16khz_mono(wav_file): signal, fs = torchaudio.load(wav_file) else: @@ -249,25 +255,34 @@ def diarize( print("Running VAD...") speech_ts = self.vad(signal[0]) print("Splitting by silence found {} utterances".format(len(speech_ts))) - assert len(speech_ts) >= 1, "Couldn't find any speech during VAD" - - print("Extracting embeddings...") - embeds, segments = self.recording_embeds(signal, fs, speech_ts) - - print("Clustering to {} speakers...".format(num_speakers)) - cluster_labels = self.cluster( - embeds, - n_clusters=num_speakers, - threshold=threshold, - enhance_sim=enhance_sim, - ) - - print("Cleaning up output...") - cleaned_segments = self.join_segments(cluster_labels, segments) - cleaned_segments = self.make_output_seconds(cleaned_segments, fs) - cleaned_segments = self.join_samespeaker_segments( - cleaned_segments, silence_tolerance=silence_tolerance - ) + #assert len(speech_ts) >= 1, "Couldn't find any speech during VAD" + + if len(speech_ts) >= 1: + print("Extracting embeddings...") + embeds, segments = self.recording_embeds(signal, fs, speech_ts) + + [w,k]=embeds.shape + if w >= 2: + print('Clustering to {} speakers...'.format(num_speakers)) + cluster_labels = self.cluster(embeds, n_clusters=num_speakers,max_speakers=max_speakers, + threshold=threshold, enhance_sim=enhance_sim) + + + + cleaned_segments = self.join_segments(cluster_labels, segments) + cleaned_segments = self.make_output_seconds(cleaned_segments, fs) + cleaned_segments = self.join_samespeaker_segments(cleaned_segments, + silence_tolerance=silence_tolerance) + + + else: + cluster_labels =[ 1] + cleaned_segments = self.join_segments(cluster_labels, segments) + cleaned_segments = self.make_output_seconds(cleaned_segments, fs) + + else: + cleaned_segments = [] + print("Done!") if outfile: self.rttm_output(cleaned_segments, recname, outfile=outfile) @@ -281,9 +296,9 @@ def diarize( "cluster_labels": cluster_labels} @staticmethod - def rttm_output(segments, recname, outfile=None): + def rttm_output(segments, recname, outfile=None, channel=0): assert outfile, "Please specify an outfile" - rttm_line = "SPEAKER {} 0 {} {} {} \n" + rttm_line = "SPEAKER {} "+str(channel)+" {} {} {} \n" with open(outfile, "w") as fp: for seg in segments: start = seg["start"] diff --git a/simple_diarizer/spectral_clustering.py b/simple_diarizer/spectral_clustering.py new file mode 100644 index 0000000..8b3ac01 --- /dev/null +++ b/simple_diarizer/spectral_clustering.py @@ -0,0 +1,135 @@ +import numpy as np +import scipy +from sklearn.cluster import SpectralClustering + +# NME low-level operations +# These functions are taken from the Kaldi scripts. + +# Prepares binarized(0/1) affinity matrix with p_neighbors non-zero elements in each row +def get_kneighbors_conn(X_dist, p_neighbors): + X_dist_out = np.zeros_like(X_dist) + for i, line in enumerate(X_dist): + sorted_idx = np.argsort(line) + sorted_idx = sorted_idx[::-1] + indices = sorted_idx[:p_neighbors] + X_dist_out[indices, i] = 1 + return X_dist_out + + +# Thresolds affinity matrix to leave p maximum non-zero elements in each row +def Threshold(A, p): + N = A.shape[0] + Ap = np.zeros((N, N)) + for i in range(N): + thr = sorted(A[i, :], reverse=True)[p] + Ap[i, A[i, :] > thr] = A[i, A[i, :] > thr] + return Ap + + +# Computes Laplacian of a matrix +def Laplacian(A): + d = np.sum(A, axis=1) - np.diag(A) + D = np.diag(d) + return D - A + + +# Calculates eigengaps (differences between adjacent eigenvalues sorted in descending order) +def Eigengap(S): + S = sorted(S) + return np.diff(S) + + +# Computes parameters of normalized eigenmaps for automatic thresholding selection +def ComputeNMEParameters(A, p, max_num_clusters): + # p-Neighbour binarization + Ap = get_kneighbors_conn(A, p) + # Symmetrization + Ap = (Ap + np.transpose(Ap)) / 2 + # Laplacian matrix computation + Lp = Laplacian(Ap) + # Get max_num_clusters+1 smallest eigenvalues + S = scipy.sparse.linalg.eigsh( + Lp, + k=max_num_clusters + 1, + which="SA", + tol=1e-6, + return_eigenvectors=False, + mode="buckling", + ) + # Get largest eigenvalue + Smax = scipy.sparse.linalg.eigsh( + Lp, k=1, which="LA", tol=1e-6, return_eigenvectors=False, mode="buckling" + ) + # Eigengap computation + e = Eigengap(S) + g = np.max(e[:max_num_clusters]) / (Smax + 1e-10) + r = p / g + k = np.argmax(e[:max_num_clusters]) + return (e, g, k, r) + + +""" +Performs spectral clustering with Normalized Maximum Eigengap (NME) +Parameters: + A: affinity matrix (matrix of pairwise cosine similarities or PLDA scores between speaker embeddings) + num_clusters: number of clusters to generate (if None, determined automatically) + max_num_clusters: maximum allowed number of clusters to generate + pmax: maximum count for matrix binarization (should be at least 2) + pbest: best count for matrix binarization (if 0, determined automatically) +Returns: cluster assignments for every speaker embedding +""" + + +def NME_SpectralClustering( + A, num_clusters=None, max_num_clusters=None, pbest=0, pmin=3, pmax=20 +): + if max_num_clusters is None: + assert num_clusters is not None, "Cannot have both num_clusters and max_num_clusters be None" + max_num_clusters = num_clusters + + if pbest == 0: + print("Selecting best number of neighbors for affinity matrix thresolding:") + rbest = None + kbest = None + for p in range(pmin, pmax + 1): + e, g, k, r = ComputeNMEParameters(A, p, max_num_clusters) + if rbest is None or rbest > r: + rbest = r + pbest = p + kbest = k + + num_clusters = num_clusters if num_clusters is not None else (kbest + 1) + return NME_SpectralClustering_sklearn( + A, num_clusters, pbest + ) + + if num_clusters is None: + e, g, k, r = ComputeNMEParameters(A, pbest, max_num_clusters) + return NME_SpectralClustering_sklearn(A, k + 1, pbest) + + return NME_SpectralClustering_sklearn(A, num_clusters, pbest) + + +""" +Performs spectral clustering with Normalized Maximum Eigengap (NME) with fixed threshold and number of clusters +Parameters: + A: affinity matrix (matrix of pairwise cosine similarities or PLDA scores between speaker embeddings) + OLVec: 0/1 vector denoting which segments are overlap segments + num_clusters: number of clusters to generate + pbest: best count for matrix binarization +Returns: cluster assignments for every speaker embedding +""" + + +def NME_SpectralClustering_sklearn(A, num_clusters, pbest): + + # Ap = Threshold(A, pbest) + Ap = get_kneighbors_conn(A, pbest) # thresholded and binarized + Ap = (Ap + np.transpose(Ap)) / 2 + + + model = SpectralClustering( + n_clusters=num_clusters, affinity="precomputed", random_state=0 + ) + labels = model.fit_predict(Ap) + return labels diff --git a/simple_diarizer/utils.py b/simple_diarizer/utils.py index de70954..cf92ad4 100644 --- a/simple_diarizer/utils.py +++ b/simple_diarizer/utils.py @@ -2,11 +2,8 @@ import subprocess from pprint import pprint -import matplotlib.pyplot as plt import numpy as np import torchaudio -from IPython.display import Audio, display - ################## # Audio utils @@ -83,6 +80,7 @@ def waveplot(signal, fs, start_idx=0, figsize=(5, 3), color="tab:blue"): Outputs: - Returns the matplotlib figure """ + import matplotlib.pyplot as plt plt.figure(figsize=figsize) start_time = start_idx / fs end_time = start_time + (len(signal) / fs) @@ -113,6 +111,7 @@ def combined_waveplot(signal, fs, segments, figsize=(10, 3), tick_interval=60): Outputs: - The matplotlib figure """ + import matplotlib.pyplot as plt plt.figure(figsize=figsize) for seg in segments: start = seg["start_sample"] @@ -153,6 +152,8 @@ def waveplot_perspeaker(signal, fs, segments): Designed to be run in a jupyter notebook """ + import matplotlib.pyplot as plt + from IPython.display import Audio, display for seg in segments: start = seg["start_sample"] end = seg["end_sample"]