File size: 8,517 Bytes
c71fb67 9c57e3a c71fb67 9c57e3a c71fb67 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a c71fb67 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a c71fb67 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a c71fb67 9c57e3a c71fb67 9c57e3a c71fb67 9c57e3a c71fb67 9c57e3a c71fb67 9c57e3a c71fb67 9c57e3a 46921cb 9c57e3a 46921cb 9c57e3a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 |
import argparse
import os
import glob
from helpers import *
from faster_whisper import WhisperModel
import whisperx
import torch
from pydub import AudioSegment
from nemo.collections.asr.models.msdd_models import NeuralDiarizer
import logging
import shutil
import srt
from tqdm import tqdm
import concurrent.futures
mtypes = {"cpu": "int8", "cuda": "float16"}
def setup_logging():
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
def process_audio_file(audio_file, args):
logging.info(f"Processing file: {audio_file}")
if args.stemming:
# Isolate vocals from the rest of the audio
logging.info("Performing source separation...")
return_code = os.system(
f'python3 -m demucs.separate -n htdemucs --two-stems=vocals "{audio_file}" -o "temp_outputs"'
)
if return_code != 0:
logging.warning("Source splitting failed, using original audio file.")
vocal_target = audio_file
else:
vocal_target = os.path.join(
"temp_outputs",
"htdemucs",
os.path.splitext(os.path.basename(audio_file))[0],
"vocals.wav",
)
else:
vocal_target = audio_file
# Transcribe the audio file
logging.info("Transcribing audio...")
if args.batch_size != 0:
from transcription_helpers import transcribe_batched
whisper_results, language = transcribe_batched(
vocal_target,
args.language,
args.batch_size,
args.model_name,
mtypes[args.device],
args.suppress_numerals,
args.device,
)
else:
from transcription_helpers import transcribe
whisper_results, language = transcribe(
vocal_target,
args.language,
args.model_name,
mtypes[args.device],
args.suppress_numerals,
args.device,
)
logging.info("Aligning transcription...")
if language in wav2vec2_langs:
alignment_model, metadata = whisperx.load_align_model(
language_code=language, device=args.device
)
result_aligned = whisperx.align(
whisper_results, alignment_model, metadata, vocal_target, args.device
)
word_timestamps = filter_missing_timestamps(
result_aligned["word_segments"],
initial_timestamp=whisper_results[0].get("start"),
final_timestamp=whisper_results[-1].get("end"),
)
del alignment_model
torch.cuda.empty_cache()
else:
word_timestamps = []
for segment in whisper_results:
for word in segment["words"]:
word_timestamps.append({"word": word[2], "start": word[0], "end": word[1]})
# Convert audio to mono for NeMo compatibility
logging.info("Converting audio to mono...")
sound = AudioSegment.from_file(vocal_target).set_channels(1)
ROOT = os.getcwd()
temp_path = os.path.join(ROOT, "temp_outputs")
os.makedirs(temp_path, exist_ok=True)
sound.export(os.path.join(temp_path, "mono_file.wav"), format="wav")
# Initialize NeMo MSDD diarization model
logging.info("Performing diarization...")
msdd_model = NeuralDiarizer(cfg=create_config(temp_path)).to(args.device)
msdd_model.diarize()
del msdd_model
torch.cuda.empty_cache()
# Reading timestamps <> Speaker Labels mapping
speaker_ts = []
with open(os.path.join(temp_path, "pred_rttms", "mono_file.rttm"), "r") as f:
lines = f.readlines()
for line in lines:
line_list = line.split(" ")
s = int(float(line_list[5]) * 1000)
e = s + int(float(line_list[8]) * 1000)
speaker_ts.append([s, e, int(line_list[11].split("_")[-1])])
wsm = get_words_speaker_mapping(word_timestamps, speaker_ts, "start")
wsm = get_realigned_ws_mapping_with_punctuation(wsm)
ssm = get_sentences_speaker_mapping(wsm, speaker_ts)
# Create the autodiarization directory structure
autodiarization_dir = "autodiarization"
os.makedirs(autodiarization_dir, exist_ok=True)
# Get the base name of the audio file
base_name = os.path.splitext(os.path.basename(audio_file))[0]
# Create a subdirectory for the current audio file
audio_dir = os.path.join(autodiarization_dir, base_name)
os.makedirs(audio_dir, exist_ok=True)
# Create a dictionary to store speaker-specific metadata
speaker_metadata = {}
# Generate the SRT file
srt_file = f"{os.path.splitext(audio_file)[0]}.srt"
with open(srt_file, "w", encoding="utf-8") as f:
write_srt(ssm, f)
# Read the generated SRT file
with open(srt_file, "r", encoding="utf-8") as f:
srt_data = f.read()
# Parse the SRT data
srt_segments = list(srt.parse(srt_data))
# Process each segment in the SRT data
logging.info("Processing audio segments...")
for segment in tqdm(srt_segments, desc="Processing segments"):
start_time = segment.start.total_seconds() * 1000
end_time = segment.end.total_seconds() * 1000
speaker_name, transcript = segment.content.split(": ", 1)
# Extract the speaker ID from the speaker name
speaker_id = int(speaker_name.split(" ")[-1])
# Split the audio segment
segment_audio = sound[start_time:end_time]
segment_path = os.path.join(audio_dir, f"speaker_{speaker_id}", f"speaker_{speaker_id}_{segment.index:03d}.wav")
os.makedirs(os.path.dirname(segment_path), exist_ok=True)
segment_audio.export(segment_path, format="wav")
# Store the metadata for each speaker
if speaker_name not in speaker_metadata:
speaker_metadata[speaker_name] = []
speaker_metadata[speaker_name].append(f"speaker_{speaker_id}_{segment.index:03d}|{speaker_name}|{transcript}")
# Write the metadata.csv file for each speaker
for speaker_name, metadata in speaker_metadata.items():
speaker_id = int(speaker_name.split(" ")[-1])
speaker_dir = os.path.join(audio_dir, f"speaker_{speaker_id}")
with open(os.path.join(speaker_dir, "metadata.csv"), "w", encoding="utf-8") as f:
f.write("\n".join(metadata))
# Clean up temporary files
cleanup(temp_path)
logging.info(f"Finished processing {audio_file}")
def main():
setup_logging()
parser = argparse.ArgumentParser()
parser.add_argument(
"-a", "--audio", help="name of the target audio file or directory", required=True
)
parser.add_argument(
"--no-stem",
action="store_false",
dest="stemming",
default=True,
help="Disables source separation. This helps with long files that don't contain a lot of music.",
)
parser.add_argument(
"--suppress_numerals",
action="store_true",
dest="suppress_numerals",
default=False,
help="Suppresses Numerical Digits. This helps the diarization accuracy but converts all digits into written text.",
)
parser.add_argument(
"--whisper-model",
dest="model_name",
default="medium.en",
help="name of the Whisper model to use",
)
parser.add_argument(
"--batch-size",
type=int,
dest="batch_size",
default=8,
help="Batch size for batched inference, reduce if you run out of memory, set to 0 for non-batched inference",
)
parser.add_argument(
"--language",
type=str,
default=None,
choices=whisper_langs,
help="Language spoken in the audio, specify None to perform language detection",
)
parser.add_argument(
"--device",
dest="device",
default="cuda" if torch.cuda.is_available() else "cpu",
help="if you have a GPU use 'cuda', otherwise 'cpu'",
)
args = parser.parse_args()
if os.path.isdir(args.audio):
audio_files = glob.glob(os.path.join(args.audio, "*.wav")) + glob.glob(os.path.join(args.audio, "*.mp3"))
logging.info(f"Found {len(audio_files)} audio files in the directory.")
with concurrent.futures.ThreadPoolExecutor() as executor:
list(tqdm(executor.map(lambda f: process_audio_file(f, args), audio_files), total=len(audio_files), desc="Processing files"))
else:
process_audio_file(args.audio, args)
if __name__ == "__main__":
main() |