|
# File: dataspeech-main/dataspeech/cpu_enrichments/rate.py |
|
from g2p import make_g2p |
|
transducer = make_g2p('eng', 'eng-ipa') |
|
|
|
def rate_apply(batch, rank=None, audio_column_name='audio', text_column_name='text'): |
|
if isinstance(batch[text_column_name], list): |
|
speaking_rates = [] |
|
phonemes_list = [] |
|
if 'speech_duration' in batch: |
|
for (text, audio_duration) in zip(batch[text_column_name], batch['speech_duration']): |
|
phonemes = transducer(text).output_string |
|
audio_duration = audio_duration if audio_duration != 0 else 0.01 |
|
speaking_rate = len(phonemes) / audio_duration |
|
speaking_rates.append(speaking_rate) |
|
phonemes_list.append(phonemes) |
|
else: |
|
for (text, audio) in zip(batch[text_column_name], batch[audio_column_name]): |
|
phonemes = transducer(text).output_string |
|
sample_rate = audio['sampling_rate'] |
|
audio_length = len(audio['array'].squeeze()) / sample_rate |
|
speaking_rate = len(phonemes) / audio_length |
|
speaking_rates.append(speaking_rate) |
|
phonemes_list.append(phonemes) |
|
batch['speaking_rate'] = speaking_rates |
|
batch['phonemes'] = phonemes_list |
|
else: |
|
phonemes = transducer(batch[text_column_name]).output_string |
|
if 'speech_duration' in batch: |
|
audio_length = batch['speech_duration'] if batch['speech_duration'] != 0 else 0.01 |
|
else: |
|
sample_rate = batch[audio_column_name]['sampling_rate'] |
|
audio_length = len(batch[audio_column_name]['array'].squeeze()) / sample_rate |
|
speaking_rate = len(phonemes) / audio_length |
|
batch['speaking_rate'] = speaking_rate |
|
batch['phonemes'] = phonemes |
|
return batch |
|
|
|
# File: dataspeech-main/dataspeech/gpu_enrichments/pitch.py |
|
import torch |
|
import penn |
|
hopsize = 0.01 |
|
fmin = 30.0 |
|
fmax = 1000.0 |
|
checkpoint = None |
|
center = 'half-hop' |
|
interp_unvoiced_at = 0.065 |
|
|
|
def pitch_apply(batch, rank=None, audio_column_name='audio', output_column_name='utterance_pitch', penn_batch_size=4096): |
|
if isinstance(batch[audio_column_name], list): |
|
utterance_pitch_mean = [] |
|
utterance_pitch_std = [] |
|
for sample in batch[audio_column_name]: |
|
(pitch, periodicity) = penn.from_audio(torch.tensor(sample['array'][None, :]).float(), sample['sampling_rate'], hopsize=hopsize, fmin=fmin, fmax=fmax, checkpoint=checkpoint, batch_size=penn_batch_size, center=center, interp_unvoiced_at=interp_unvoiced_at, gpu=(rank or 0) % torch.cuda.device_count() if torch.cuda.device_count() > 0 else rank) |
|
utterance_pitch_mean.append(pitch.mean().cpu()) |
|
utterance_pitch_std.append(pitch.std().cpu()) |
|
batch[f'{output_column_name}_mean'] = utterance_pitch_mean |
|
batch[f'{output_column_name}_std'] = utterance_pitch_std |
|
else: |
|
sample = batch[audio_column_name] |
|
(pitch, periodicity) = penn.from_audio(torch.tensor(sample['array'][None, :]).float(), sample['sampling_rate'], hopsize=hopsize, fmin=fmin, fmax=fmax, checkpoint=checkpoint, batch_size=penn_batch_size, center=center, interp_unvoiced_at=interp_unvoiced_at, gpu=(rank or 0) % torch.cuda.device_count() if torch.cuda.device_count() > 0 else rank) |
|
batch[f'{output_column_name}_mean'] = pitch.mean().cpu() |
|
batch[f'{output_column_name}_std'] = pitch.std().cpu() |
|
return batch |
|
|
|
# File: dataspeech-main/dataspeech/gpu_enrichments/snr_and_reverb.py |
|
from pyannote.audio import Model |
|
from pathlib import Path |
|
from brouhaha.pipeline import RegressiveActivityDetectionPipeline |
|
import torch |
|
from huggingface_hub import hf_hub_download |
|
import numpy as np |
|
model = None |
|
ratio = 16000 / 270 |
|
|
|
def snr_apply(batch, rank=None, audio_column_name='audio', batch_size=32): |
|
global model |
|
if model is None: |
|
model = Model.from_pretrained(Path(hf_hub_download(repo_id='ylacombe/brouhaha-best', filename='best.ckpt')), strict=False) |
|
if rank is not None or torch.cuda.device_count() > 0: |
|
device = f'cuda:{(rank or 0) % torch.cuda.device_count()}' |
|
model.to(device) |
|
pipeline = RegressiveActivityDetectionPipeline(segmentation=model, batch_size=batch_size) |
|
if rank: |
|
pipeline.to(torch.device(device)) |
|
device = pipeline._models['segmentation'].device |
|
if isinstance(batch[audio_column_name], list): |
|
snr = [] |
|
c50 = [] |
|
vad_durations = [] |
|
for sample in batch[audio_column_name]: |
|
res = pipeline({'sample_rate': sample['sampling_rate'], 'waveform': torch.tensor(sample['array'][None, :]).to(device).float()}) |
|
mask = np.full(res['snr'].shape, False) |
|
for (segment, _) in res['annotation'].itertracks(): |
|
start = int(segment.start * ratio) |
|
end = int(segment.end * ratio) |
|
mask[start:end] = True |
|
mask = ~((res['snr'] == 0.0) & (res['c50'] == 0.0)) & mask |
|
vad_duration = sum(map(lambda x: x[0].duration, res['annotation'].itertracks())) |
|
snr.append(res['snr'][mask].mean()) |
|
c50.append(res['c50'][mask].mean()) |
|
vad_durations.append(np.float32(vad_duration)) |
|
batch['snr'] = snr |
|
batch['c50'] = c50 |
|
batch['speech_duration'] = vad_durations |
|
else: |
|
res = pipeline({'sample_rate': batch[audio_column_name]['sampling_rate'], 'waveform': torch.tensor(batch[audio_column_name]['array'][None, :]).to(device).float()}) |
|
mask = np.full(res['snr'].shape, False) |
|
for (segment, _) in res['annotation'].itertracks(): |
|
start = int(segment.start * ratio) |
|
end = int(segment.end * ratio) |
|
mask[start:end] = True |
|
mask = ~((res['snr'] == 0.0) & (res['c50'] == 0.0)) & mask |
|
vad_duration = sum(map(lambda x: x[0].duration, res['annotation'].itertracks())) |
|
batch['snr'] = res['snr'][mask].mean() |
|
batch['c50'] = res['c50'][mask].mean() |
|
batch['speech_duration'] = vad_duration |
|
return batch |
|
|
|
# File: dataspeech-main/dataspeech/gpu_enrichments/squim.py |
|
from torchaudio.pipelines import SQUIM_OBJECTIVE |
|
import torch |
|
import torchaudio |
|
model = None |
|
max_audio_length = 15 * SQUIM_OBJECTIVE.sample_rate |
|
|
|
def squim_apply(batch, rank=None, audio_column_name='audio'): |
|
global model |
|
if model is None: |
|
model = SQUIM_OBJECTIVE.get_model() |
|
if rank is not None or torch.cuda.device_count() > 0: |
|
device = f'cuda:{(rank or 0) % torch.cuda.device_count()}' |
|
model.to(device) |
|
else: |
|
device = 'cpu' |
|
if isinstance(batch[audio_column_name], list): |
|
sdr = [] |
|
pesq = [] |
|
stoi = [] |
|
for sample in batch[audio_column_name]: |
|
waveform = torchaudio.functional.resample(torch.tensor(sample['array'])[None, :].to(device).float(), sample['sampling_rate'], SQUIM_OBJECTIVE.sample_rate) |
|
with torch.no_grad(): |
|
waveform = waveform[:, :min(max_audio_length, waveform.shape[1])] |
|
(stoi_sample, pesq_sample, sdr_sample) = model(waveform) |
|
sdr.append(sdr_sample.cpu()[0]) |
|
pesq.append(pesq_sample.cpu()[0]) |
|
stoi.append(stoi_sample.cpu()[0]) |
|
batch['sdr'] = sdr |
|
batch['pesq'] = pesq |
|
batch['stoi'] = stoi |
|
else: |
|
waveform = torchaudio.functional.resample(torch.tensor(batch[audio_column_name]['array'][None, :]).to(device).float(), batch[audio_column_name]['sampling_rate'], SQUIM_OBJECTIVE.sample_rate) |
|
with torch.no_grad(): |
|
(stoi_sample, pesq_sample, sdr_sample) = model(waveform) |
|
batch['sdr'] = sdr_sample.cpu()[0] |
|
batch['pesq'] = pesq_sample.cpu()[0] |
|
batch['stoi'] = stoi_sample.cpu()[0] |
|
return batch |
|
|
|
# File: dataspeech-main/main.py |
|
from datasets import load_dataset, Audio |
|
from multiprocess import set_start_method |
|
from dataspeech import rate_apply, pitch_apply, snr_apply, squim_apply |
|
import torch |
|
import argparse |
|
if __name__ == '__main__': |
|
set_start_method('spawn') |
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('dataset_name', type=str, help='Path or name of the dataset. See: https://huggingface.co/docs/datasets/v2.17.0/en/package_reference/loading_methods#datasets.load_dataset.path') |
|
parser.add_argument('--configuration', default=None, type=str, help='Dataset configuration to use, if necessary.') |
|
parser.add_argument('--output_dir', default=None, type=str, help='If specified, save the dataset on disk with this path.') |
|
parser.add_argument('--repo_id', default=None, type=str, help='If specified, push the dataset to the hub.') |
|
parser.add_argument('--audio_column_name', default='audio', type=str, help='Column name of the audio column to be enriched.') |
|
parser.add_argument('--text_column_name', default='text', type=str, help='Text column name.') |
|
parser.add_argument('--rename_column', action='store_true', help="If activated, rename audio and text column names to 'audio' and 'text'. Useful if you want to merge datasets afterwards.") |
|
parser.add_argument('--cpu_num_workers', default=1, type=int, help="Number of CPU workers for transformations that don't use GPUs or if no GPU are available.") |
|
parser.add_argument('--cpu_writer_batch_size', default=1000, type=int, help="writer_batch_size for transformations that don't use GPUs. See: https://huggingface.co/docs/datasets/v2.17.0/en/package_reference/main_classes#datasets.Dataset.map.writer_batch_size") |
|
parser.add_argument('--batch_size', default=2, type=int, help='This parameters specify how many samples are passed by workers for operations that are using GPUs.') |
|
parser.add_argument('--penn_batch_size', default=4096, type=int, help="Pitch estimation chunks audio into smaller pieces and processes them in batch. This specify the batch size. If you are using a gpu, pick a batch size that doesn't cause memory errors.") |
|
parser.add_argument('--num_workers_per_gpu_for_pitch', default=1, type=int, help='Number of workers per GPU for the pitch estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.') |
|
parser.add_argument('--num_workers_per_gpu_for_snr', default=1, type=int, help='Number of workers per GPU for the SNR and reverberation estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.') |
|
parser.add_argument('--apply_squim_quality_estimation', action='store_true', help='If set, will also use torchaudio-squim estimation (SI-SNR, STOI and PESQ).') |
|
parser.add_argument('--num_workers_per_gpu_for_squim', default=1, type=int, help='Number of workers per GPU for the SI-SNR, STOI and PESQ estimation if GPUs are available. Defaults to 1 if some are avaiable. Useful if you want multiple processes per GPUs to maximise GPU usage.') |
|
args = parser.parse_args() |
|
if args.configuration: |
|
dataset = load_dataset(args.dataset_name, args.configuration, num_proc=args.cpu_num_workers) |
|
else: |
|
dataset = load_dataset(args.dataset_name, num_proc=args.cpu_num_workers) |
|
audio_column_name = 'audio' if args.rename_column else args.audio_column_name |
|
text_column_name = 'text' if args.rename_column else args.text_column_name |
|
if args.rename_column: |
|
dataset = dataset.rename_columns({args.audio_column_name: 'audio', args.text_column_name: 'text'}) |
|
if args.apply_squim_quality_estimation: |
|
print('Compute SI-SDR, PESQ, STOI') |
|
squim_dataset = dataset.map(squim_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_squim if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name}) |
|
print('Compute pitch') |
|
pitch_dataset = dataset.cast_column(audio_column_name, Audio(sampling_rate=16000)).map(pitch_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_pitch if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name, 'penn_batch_size': args.penn_batch_size}) |
|
print('Compute snr and reverb') |
|
snr_dataset = dataset.map(snr_apply, batched=True, batch_size=args.batch_size, with_rank=True if torch.cuda.device_count() > 0 else False, num_proc=torch.cuda.device_count() * args.num_workers_per_gpu_for_snr if torch.cuda.device_count() > 0 else args.cpu_num_workers, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name}) |
|
print('Compute speaking rate') |
|
if 'speech_duration' in snr_dataset[next(iter(snr_dataset.keys()))].features: |
|
rate_dataset = snr_dataset.map(rate_apply, with_rank=False, num_proc=args.cpu_num_workers, writer_batch_size=args.cpu_writer_batch_size, fn_kwargs={'audio_column_name': audio_column_name, 'text_column_name': text_column_name}) |
|
else: |
|
rate_dataset = dataset.map(rate_apply, with_rank=False, num_proc=args.cpu_num_workers, writer_batch_size=args.cpu_writer_batch_size, remove_columns=[audio_column_name], fn_kwargs={'audio_column_name': audio_column_name, 'text_column_name': text_column_name}) |
|
for split in dataset.keys(): |
|
dataset[split] = pitch_dataset[split].add_column('snr', snr_dataset[split]['snr']).add_column('c50', snr_dataset[split]['c50']) |
|
if 'speech_duration' in snr_dataset[split]: |
|
dataset[split] = dataset[split].add_column('speech_duration', snr_dataset[split]['speech_duration']) |
|
dataset[split] = dataset[split].add_column('speaking_rate', rate_dataset[split]['speaking_rate']).add_column('phonemes', rate_dataset[split]['phonemes']) |
|
if args.apply_squim_quality_estimation: |
|
dataset[split] = dataset[split].add_column('stoi', squim_dataset[split]['stoi']).add_column('si-sdr', squim_dataset[split]['sdr']).add_column('pesq', squim_dataset[split]['pesq']) |
|
if args.output_dir: |
|
print('Saving to disk...') |
|
dataset.save_to_disk(args.output_dir) |
|
if args.repo_id: |
|
print('Pushing to the hub...') |
|
if args.configuration: |
|
dataset.push_to_hub(args.repo_id, args.configuration) |
|
else: |
|
dataset.push_to_hub(args.repo_id) |
|
|
|
|