Spaces:
Sleeping
Sleeping
Hjgugugjhuhjggg
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -19,7 +19,6 @@ import soundfile as sf
|
|
19 |
import numpy as np
|
20 |
import torch
|
21 |
import uvicorn
|
22 |
-
from tqdm import tqdm
|
23 |
|
24 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
25 |
|
@@ -63,9 +62,8 @@ class S3ModelLoader:
|
|
63 |
except EnvironmentError:
|
64 |
logging.info(f"Model {model_name} not found in S3. Downloading...")
|
65 |
try:
|
66 |
-
|
67 |
-
|
68 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HUGGINGFACE_HUB_TOKEN)
|
69 |
logging.info(f"Downloaded {model_name} successfully.")
|
70 |
logging.info(f"Saving {model_name} to S3...")
|
71 |
model.save_pretrained(s3_uri)
|
@@ -150,7 +148,7 @@ async def generate(request: Request, body: GenerateRequest):
|
|
150 |
sf.write(audio_bytesio, audio["sampling_rate"], np.int16(audio["audio"]))
|
151 |
audio_bytes = audio_bytesio.getvalue()
|
152 |
return Response(content=audio_bytes, media_type="audio/wav")
|
153 |
-
|
154 |
elif body.task_type == "text-to-video":
|
155 |
try:
|
156 |
generator = pipeline("text-to-video", model=model, tokenizer=tokenizer, device=device)
|
|
|
19 |
import numpy as np
|
20 |
import torch
|
21 |
import uvicorn
|
|
|
22 |
|
23 |
logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s")
|
24 |
|
|
|
62 |
except EnvironmentError:
|
63 |
logging.info(f"Model {model_name} not found in S3. Downloading...")
|
64 |
try:
|
65 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, token=HUGGINGFACE_HUB_TOKEN)
|
66 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, token=HUGGINGFACE_HUB_TOKEN)
|
|
|
67 |
logging.info(f"Downloaded {model_name} successfully.")
|
68 |
logging.info(f"Saving {model_name} to S3...")
|
69 |
model.save_pretrained(s3_uri)
|
|
|
148 |
sf.write(audio_bytesio, audio["sampling_rate"], np.int16(audio["audio"]))
|
149 |
audio_bytes = audio_bytesio.getvalue()
|
150 |
return Response(content=audio_bytes, media_type="audio/wav")
|
151 |
+
|
152 |
elif body.task_type == "text-to-video":
|
153 |
try:
|
154 |
generator = pipeline("text-to-video", model=model, tokenizer=tokenizer, device=device)
|