Update test3.py
Browse files
test3.py
CHANGED
@@ -12,14 +12,13 @@ import shutil
|
|
12 |
import srt
|
13 |
from tqdm import tqdm
|
14 |
import concurrent.futures
|
15 |
-
import gc
|
16 |
|
17 |
mtypes = {"cpu": "int8", "cuda": "float16"}
|
18 |
|
19 |
def setup_logging():
|
20 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
21 |
|
22 |
-
def process_audio_file(audio_file, args
|
23 |
logging.info(f"Processing file: {audio_file}")
|
24 |
|
25 |
if args.stemming:
|
@@ -49,7 +48,8 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
49 |
vocal_target,
|
50 |
args.language,
|
51 |
args.batch_size,
|
52 |
-
|
|
|
53 |
args.suppress_numerals,
|
54 |
args.device,
|
55 |
)
|
@@ -58,13 +58,17 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
58 |
whisper_results, language = transcribe(
|
59 |
vocal_target,
|
60 |
args.language,
|
61 |
-
|
|
|
62 |
args.suppress_numerals,
|
63 |
args.device,
|
64 |
)
|
65 |
|
66 |
logging.info("Aligning transcription...")
|
67 |
if language in wav2vec2_langs:
|
|
|
|
|
|
|
68 |
result_aligned = whisperx.align(
|
69 |
whisper_results, alignment_model, metadata, vocal_target, args.device
|
70 |
)
|
@@ -73,6 +77,8 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
73 |
initial_timestamp=whisper_results[0].get("start"),
|
74 |
final_timestamp=whisper_results[-1].get("end"),
|
75 |
)
|
|
|
|
|
76 |
else:
|
77 |
word_timestamps = []
|
78 |
for segment in whisper_results:
|
@@ -85,8 +91,7 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
85 |
ROOT = os.getcwd()
|
86 |
temp_path = os.path.join(ROOT, "temp_outputs")
|
87 |
os.makedirs(temp_path, exist_ok=True)
|
88 |
-
|
89 |
-
sound.export(mono_file, format="wav")
|
90 |
|
91 |
# Initialize NeMo MSDD diarization model
|
92 |
logging.info("Performing diarization...")
|
@@ -120,6 +125,9 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
120 |
audio_dir = os.path.join(autodiarization_dir, base_name)
|
121 |
os.makedirs(audio_dir, exist_ok=True)
|
122 |
|
|
|
|
|
|
|
123 |
# Generate the SRT file
|
124 |
srt_file = f"{os.path.splitext(audio_file)[0]}.srt"
|
125 |
with open(srt_file, "w", encoding="utf-8") as f:
|
@@ -134,7 +142,7 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
134 |
|
135 |
# Process each segment in the SRT data
|
136 |
logging.info("Processing audio segments...")
|
137 |
-
for segment in tqdm(srt_segments, desc="Processing segments"
|
138 |
start_time = segment.start.total_seconds() * 1000
|
139 |
end_time = segment.end.total_seconds() * 1000
|
140 |
speaker_name, transcript = segment.content.split(": ", 1)
|
@@ -148,33 +156,21 @@ def process_audio_file(audio_file, args, whisper_model, alignment_model, metadat
|
|
148 |
os.makedirs(os.path.dirname(segment_path), exist_ok=True)
|
149 |
segment_audio.export(segment_path, format="wav")
|
150 |
|
151 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
speaker_dir = os.path.join(audio_dir, f"speaker_{speaker_id}")
|
153 |
-
with open(os.path.join(speaker_dir, "metadata.csv"), "
|
154 |
-
f.write(
|
155 |
|
156 |
# Clean up temporary files
|
157 |
cleanup(temp_path)
|
158 |
logging.info(f"Finished processing {audio_file}")
|
159 |
-
gc.collect()
|
160 |
-
|
161 |
-
def process_batch(audio_files, args):
|
162 |
-
# Load models once for the batch
|
163 |
-
whisper_model = WhisperModel(args.model_name, device=args.device, compute_type=mtypes[args.device])
|
164 |
-
|
165 |
-
if args.language in wav2vec2_langs:
|
166 |
-
alignment_model, metadata = whisperx.load_align_model(
|
167 |
-
language_code=args.language, device=args.device
|
168 |
-
)
|
169 |
-
else:
|
170 |
-
alignment_model, metadata = None, None
|
171 |
-
|
172 |
-
for audio_file in tqdm(audio_files, desc="Processing files in batch"):
|
173 |
-
process_audio_file(audio_file, args, whisper_model, alignment_model, metadata)
|
174 |
-
|
175 |
-
del whisper_model, alignment_model
|
176 |
-
torch.cuda.empty_cache()
|
177 |
-
gc.collect()
|
178 |
|
179 |
def main():
|
180 |
setup_logging()
|
@@ -223,24 +219,15 @@ def main():
|
|
223 |
default="cuda" if torch.cuda.is_available() else "cpu",
|
224 |
help="if you have a GPU use 'cuda', otherwise 'cpu'",
|
225 |
)
|
226 |
-
parser.add_argument(
|
227 |
-
"--batch-files",
|
228 |
-
type=int,
|
229 |
-
default=10,
|
230 |
-
help="Number of files to process in a single batch",
|
231 |
-
)
|
232 |
args = parser.parse_args()
|
233 |
|
234 |
if os.path.isdir(args.audio):
|
235 |
audio_files = glob.glob(os.path.join(args.audio, "*.wav")) + glob.glob(os.path.join(args.audio, "*.mp3"))
|
236 |
logging.info(f"Found {len(audio_files)} audio files in the directory.")
|
237 |
-
|
238 |
-
|
239 |
-
for i in range(0, len(audio_files), args.batch_files):
|
240 |
-
batch = audio_files[i:i+args.batch_files]
|
241 |
-
process_batch(batch, args)
|
242 |
else:
|
243 |
-
process_audio_file(args.audio, args
|
244 |
|
245 |
if __name__ == "__main__":
|
246 |
main()
|
|
|
12 |
import srt
|
13 |
from tqdm import tqdm
|
14 |
import concurrent.futures
|
|
|
15 |
|
16 |
mtypes = {"cpu": "int8", "cuda": "float16"}
|
17 |
|
18 |
def setup_logging():
|
19 |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
20 |
|
21 |
+
def process_audio_file(audio_file, args):
|
22 |
logging.info(f"Processing file: {audio_file}")
|
23 |
|
24 |
if args.stemming:
|
|
|
48 |
vocal_target,
|
49 |
args.language,
|
50 |
args.batch_size,
|
51 |
+
args.model_name,
|
52 |
+
mtypes[args.device],
|
53 |
args.suppress_numerals,
|
54 |
args.device,
|
55 |
)
|
|
|
58 |
whisper_results, language = transcribe(
|
59 |
vocal_target,
|
60 |
args.language,
|
61 |
+
args.model_name,
|
62 |
+
mtypes[args.device],
|
63 |
args.suppress_numerals,
|
64 |
args.device,
|
65 |
)
|
66 |
|
67 |
logging.info("Aligning transcription...")
|
68 |
if language in wav2vec2_langs:
|
69 |
+
alignment_model, metadata = whisperx.load_align_model(
|
70 |
+
language_code=language, device=args.device
|
71 |
+
)
|
72 |
result_aligned = whisperx.align(
|
73 |
whisper_results, alignment_model, metadata, vocal_target, args.device
|
74 |
)
|
|
|
77 |
initial_timestamp=whisper_results[0].get("start"),
|
78 |
final_timestamp=whisper_results[-1].get("end"),
|
79 |
)
|
80 |
+
del alignment_model
|
81 |
+
torch.cuda.empty_cache()
|
82 |
else:
|
83 |
word_timestamps = []
|
84 |
for segment in whisper_results:
|
|
|
91 |
ROOT = os.getcwd()
|
92 |
temp_path = os.path.join(ROOT, "temp_outputs")
|
93 |
os.makedirs(temp_path, exist_ok=True)
|
94 |
+
sound.export(os.path.join(temp_path, "mono_file.wav"), format="wav")
|
|
|
95 |
|
96 |
# Initialize NeMo MSDD diarization model
|
97 |
logging.info("Performing diarization...")
|
|
|
125 |
audio_dir = os.path.join(autodiarization_dir, base_name)
|
126 |
os.makedirs(audio_dir, exist_ok=True)
|
127 |
|
128 |
+
# Create a dictionary to store speaker-specific metadata
|
129 |
+
speaker_metadata = {}
|
130 |
+
|
131 |
# Generate the SRT file
|
132 |
srt_file = f"{os.path.splitext(audio_file)[0]}.srt"
|
133 |
with open(srt_file, "w", encoding="utf-8") as f:
|
|
|
142 |
|
143 |
# Process each segment in the SRT data
|
144 |
logging.info("Processing audio segments...")
|
145 |
+
for segment in tqdm(srt_segments, desc="Processing segments"):
|
146 |
start_time = segment.start.total_seconds() * 1000
|
147 |
end_time = segment.end.total_seconds() * 1000
|
148 |
speaker_name, transcript = segment.content.split(": ", 1)
|
|
|
156 |
os.makedirs(os.path.dirname(segment_path), exist_ok=True)
|
157 |
segment_audio.export(segment_path, format="wav")
|
158 |
|
159 |
+
# Store the metadata for each speaker
|
160 |
+
if speaker_name not in speaker_metadata:
|
161 |
+
speaker_metadata[speaker_name] = []
|
162 |
+
speaker_metadata[speaker_name].append(f"speaker_{speaker_id}_{segment.index:03d}|{speaker_name}|{transcript}")
|
163 |
+
|
164 |
+
# Write the metadata.csv file for each speaker
|
165 |
+
for speaker_name, metadata in speaker_metadata.items():
|
166 |
+
speaker_id = int(speaker_name.split(" ")[-1])
|
167 |
speaker_dir = os.path.join(audio_dir, f"speaker_{speaker_id}")
|
168 |
+
with open(os.path.join(speaker_dir, "metadata.csv"), "w", encoding="utf-8") as f:
|
169 |
+
f.write("\n".join(metadata))
|
170 |
|
171 |
# Clean up temporary files
|
172 |
cleanup(temp_path)
|
173 |
logging.info(f"Finished processing {audio_file}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
174 |
|
175 |
def main():
|
176 |
setup_logging()
|
|
|
219 |
default="cuda" if torch.cuda.is_available() else "cpu",
|
220 |
help="if you have a GPU use 'cuda', otherwise 'cpu'",
|
221 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
222 |
args = parser.parse_args()
|
223 |
|
224 |
if os.path.isdir(args.audio):
|
225 |
audio_files = glob.glob(os.path.join(args.audio, "*.wav")) + glob.glob(os.path.join(args.audio, "*.mp3"))
|
226 |
logging.info(f"Found {len(audio_files)} audio files in the directory.")
|
227 |
+
with concurrent.futures.ThreadPoolExecutor() as executor:
|
228 |
+
list(tqdm(executor.map(lambda f: process_audio_file(f, args), audio_files), total=len(audio_files), desc="Processing files"))
|
|
|
|
|
|
|
229 |
else:
|
230 |
+
process_audio_file(args.audio, args)
|
231 |
|
232 |
if __name__ == "__main__":
|
233 |
main()
|