Spaces:
Sleeping
Sleeping
Commit
·
46c7e88
1
Parent(s):
e57a69d
up
Browse files- run_demo_openai.py +8 -6
- run_demo_openai_merged.py +10 -5
run_demo_openai.py
CHANGED
@@ -103,12 +103,14 @@ def transcribe(microphone, file_upload, with_timestamps, model_name=DEFAULT_MODE
|
|
103 |
return "ERROR: You have to either use the microphone or upload an audio file"
|
104 |
|
105 |
file = microphone if microphone is not None else file_upload
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
|
|
|
|
|
112 |
|
113 |
return warn_output + text
|
114 |
|
|
|
103 |
return "ERROR: You have to either use the microphone or upload an audio file"
|
104 |
|
105 |
file = microphone if microphone is not None else file_upload
|
106 |
+
try:
|
107 |
+
model = maybe_load_cached_pipeline(model_name)
|
108 |
+
# text = model.transcribe(file, **GEN_KWARGS)["text"]
|
109 |
+
text = infer(model, file, with_timestamps)
|
110 |
+
|
111 |
+
logger.info(f"Transcription by `{model_name}`:\n{text}\n")
|
112 |
+
except Exception as e:
|
113 |
+
logger.info(str(e))
|
114 |
|
115 |
return warn_output + text
|
116 |
|
run_demo_openai_merged.py
CHANGED
@@ -3,6 +3,7 @@ import warnings
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import pytube as pt
|
|
|
6 |
import torch
|
7 |
import whisper
|
8 |
from huggingface_hub import hf_hub_download, model_info
|
@@ -42,6 +43,13 @@ logger.info(f"Model will be loaded on device `{device}`")
|
|
42 |
cached_models = {}
|
43 |
|
44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
def print_cuda_memory_info():
|
46 |
used_mem, tot_mem = torch.cuda.mem_get_info()
|
47 |
logger.info(
|
@@ -50,11 +58,8 @@ def print_cuda_memory_info():
|
|
50 |
|
51 |
|
52 |
def print_memory_info():
|
53 |
-
|
54 |
-
|
55 |
-
pass
|
56 |
-
else:
|
57 |
-
print_cuda_memory_info()
|
58 |
|
59 |
|
60 |
def maybe_load_cached_pipeline(model_name):
|
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import pytube as pt
|
6 |
+
import psutil
|
7 |
import torch
|
8 |
import whisper
|
9 |
from huggingface_hub import hf_hub_download, model_info
|
|
|
43 |
cached_models = {}
|
44 |
|
45 |
|
46 |
+
def _print_memory_info():
|
47 |
+
memory = psutil.virtual_memory()
|
48 |
+
logger.info(
|
49 |
+
f"Memory: {memory.total / (1024 ** 3):.2f}GB, used: {memory.percent}%, available: {memory.available / (1024 ** 3):.2f}GB"
|
50 |
+
)
|
51 |
+
|
52 |
+
|
53 |
def print_cuda_memory_info():
|
54 |
used_mem, tot_mem = torch.cuda.mem_get_info()
|
55 |
logger.info(
|
|
|
58 |
|
59 |
|
60 |
def print_memory_info():
|
61 |
+
_print_memory_info()
|
62 |
+
print_cuda_memory_info()
|
|
|
|
|
|
|
63 |
|
64 |
|
65 |
def maybe_load_cached_pipeline(model_name):
|