|
import argparse |
|
import os |
|
import glob |
|
from helpers import * |
|
from faster_whisper import WhisperModel |
|
import whisperx |
|
import torch |
|
from pydub import AudioSegment |
|
from nemo.collections.asr.models.msdd_models import NeuralDiarizer |
|
import logging |
|
import shutil |
|
import srt |
|
from tqdm import tqdm |
|
import concurrent.futures |
|
|
|
mtypes = {"cpu": "int8", "cuda": "float16"} |
|
|
|
def setup_logging(): |
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
def process_audio_file(audio_file, args): |
|
logging.info(f"Processing file: {audio_file}") |
|
|
|
if args.stemming: |
|
|
|
logging.info("Performing source separation...") |
|
return_code = os.system( |
|
f'python3 -m demucs.separate -n htdemucs --two-stems=vocals "{audio_file}" -o "temp_outputs"' |
|
) |
|
if return_code != 0: |
|
logging.warning("Source splitting failed, using original audio file.") |
|
vocal_target = audio_file |
|
else: |
|
vocal_target = os.path.join( |
|
"temp_outputs", |
|
"htdemucs", |
|
os.path.splitext(os.path.basename(audio_file))[0], |
|
"vocals.wav", |
|
) |
|
else: |
|
vocal_target = audio_file |
|
|
|
|
|
logging.info("Transcribing audio...") |
|
if args.batch_size != 0: |
|
from transcription_helpers import transcribe_batched |
|
whisper_results, language = transcribe_batched( |
|
vocal_target, |
|
args.language, |
|
args.batch_size, |
|
args.model_name, |
|
mtypes[args.device], |
|
args.suppress_numerals, |
|
args.device, |
|
) |
|
else: |
|
from transcription_helpers import transcribe |
|
whisper_results, language = transcribe( |
|
vocal_target, |
|
args.language, |
|
args.model_name, |
|
mtypes[args.device], |
|
args.suppress_numerals, |
|
args.device, |
|
) |
|
|
|
logging.info("Aligning transcription...") |
|
if language in wav2vec2_langs: |
|
alignment_model, metadata = whisperx.load_align_model( |
|
language_code=language, device=args.device |
|
) |
|
result_aligned = whisperx.align( |
|
whisper_results, alignment_model, metadata, vocal_target, args.device |
|
) |
|
word_timestamps = filter_missing_timestamps( |
|
result_aligned["word_segments"], |
|
initial_timestamp=whisper_results[0].get("start"), |
|
final_timestamp=whisper_results[-1].get("end"), |
|
) |
|
del alignment_model |
|
torch.cuda.empty_cache() |
|
else: |
|
word_timestamps = [] |
|
for segment in whisper_results: |
|
for word in segment["words"]: |
|
word_timestamps.append({"word": word[2], "start": word[0], "end": word[1]}) |
|
|
|
|
|
logging.info("Converting audio to mono...") |
|
sound = AudioSegment.from_file(vocal_target).set_channels(1) |
|
ROOT = os.getcwd() |
|
temp_path = os.path.join(ROOT, "temp_outputs") |
|
os.makedirs(temp_path, exist_ok=True) |
|
sound.export(os.path.join(temp_path, "mono_file.wav"), format="wav") |
|
|
|
|
|
logging.info("Performing diarization...") |
|
msdd_model = NeuralDiarizer(cfg=create_config(temp_path)).to(args.device) |
|
msdd_model.diarize() |
|
del msdd_model |
|
torch.cuda.empty_cache() |
|
|
|
|
|
speaker_ts = [] |
|
with open(os.path.join(temp_path, "pred_rttms", "mono_file.rttm"), "r") as f: |
|
lines = f.readlines() |
|
for line in lines: |
|
line_list = line.split(" ") |
|
s = int(float(line_list[5]) * 1000) |
|
e = s + int(float(line_list[8]) * 1000) |
|
speaker_ts.append([s, e, int(line_list[11].split("_")[-1])]) |
|
|
|
wsm = get_words_speaker_mapping(word_timestamps, speaker_ts, "start") |
|
wsm = get_realigned_ws_mapping_with_punctuation(wsm) |
|
ssm = get_sentences_speaker_mapping(wsm, speaker_ts) |
|
|
|
|
|
autodiarization_dir = "autodiarization" |
|
os.makedirs(autodiarization_dir, exist_ok=True) |
|
|
|
|
|
base_name = os.path.splitext(os.path.basename(audio_file))[0] |
|
|
|
|
|
audio_dir = os.path.join(autodiarization_dir, base_name) |
|
os.makedirs(audio_dir, exist_ok=True) |
|
|
|
|
|
speaker_metadata = {} |
|
|
|
|
|
srt_file = f"{os.path.splitext(audio_file)[0]}.srt" |
|
with open(srt_file, "w", encoding="utf-8") as f: |
|
write_srt(ssm, f) |
|
|
|
|
|
with open(srt_file, "r", encoding="utf-8") as f: |
|
srt_data = f.read() |
|
|
|
|
|
srt_segments = list(srt.parse(srt_data)) |
|
|
|
|
|
logging.info("Processing audio segments...") |
|
for segment in tqdm(srt_segments, desc="Processing segments"): |
|
start_time = segment.start.total_seconds() * 1000 |
|
end_time = segment.end.total_seconds() * 1000 |
|
speaker_name, transcript = segment.content.split(": ", 1) |
|
|
|
|
|
speaker_id = int(speaker_name.split(" ")[-1]) |
|
|
|
|
|
segment_audio = sound[start_time:end_time] |
|
segment_path = os.path.join(audio_dir, f"speaker_{speaker_id}", f"speaker_{speaker_id}_{segment.index:03d}.wav") |
|
os.makedirs(os.path.dirname(segment_path), exist_ok=True) |
|
segment_audio.export(segment_path, format="wav") |
|
|
|
|
|
if speaker_name not in speaker_metadata: |
|
speaker_metadata[speaker_name] = [] |
|
speaker_metadata[speaker_name].append(f"speaker_{speaker_id}_{segment.index:03d}|{speaker_name}|{transcript}") |
|
|
|
|
|
for speaker_name, metadata in speaker_metadata.items(): |
|
speaker_id = int(speaker_name.split(" ")[-1]) |
|
speaker_dir = os.path.join(audio_dir, f"speaker_{speaker_id}") |
|
with open(os.path.join(speaker_dir, "metadata.csv"), "w", encoding="utf-8") as f: |
|
f.write("\n".join(metadata)) |
|
|
|
|
|
cleanup(temp_path) |
|
logging.info(f"Finished processing {audio_file}") |
|
|
|
def main(): |
|
setup_logging() |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument( |
|
"-a", "--audio", help="name of the target audio file or directory", required=True |
|
) |
|
parser.add_argument( |
|
"--no-stem", |
|
action="store_false", |
|
dest="stemming", |
|
default=True, |
|
help="Disables source separation. This helps with long files that don't contain a lot of music.", |
|
) |
|
parser.add_argument( |
|
"--suppress_numerals", |
|
action="store_true", |
|
dest="suppress_numerals", |
|
default=False, |
|
help="Suppresses Numerical Digits. This helps the diarization accuracy but converts all digits into written text.", |
|
) |
|
parser.add_argument( |
|
"--whisper-model", |
|
dest="model_name", |
|
default="medium.en", |
|
help="name of the Whisper model to use", |
|
) |
|
parser.add_argument( |
|
"--batch-size", |
|
type=int, |
|
dest="batch_size", |
|
default=8, |
|
help="Batch size for batched inference, reduce if you run out of memory, set to 0 for non-batched inference", |
|
) |
|
parser.add_argument( |
|
"--language", |
|
type=str, |
|
default=None, |
|
choices=whisper_langs, |
|
help="Language spoken in the audio, specify None to perform language detection", |
|
) |
|
parser.add_argument( |
|
"--device", |
|
dest="device", |
|
default="cuda" if torch.cuda.is_available() else "cpu", |
|
help="if you have a GPU use 'cuda', otherwise 'cpu'", |
|
) |
|
args = parser.parse_args() |
|
|
|
if os.path.isdir(args.audio): |
|
audio_files = glob.glob(os.path.join(args.audio, "*.wav")) + glob.glob(os.path.join(args.audio, "*.mp3")) |
|
logging.info(f"Found {len(audio_files)} audio files in the directory.") |
|
with concurrent.futures.ThreadPoolExecutor() as executor: |
|
list(tqdm(executor.map(lambda f: process_audio_file(f, args), audio_files), total=len(audio_files), desc="Processing files")) |
|
else: |
|
process_audio_file(args.audio, args) |
|
|
|
if __name__ == "__main__": |
|
main() |