Fabrice-TIERCELIN commited on
Commit
5adf87d
·
verified ·
1 Parent(s): 0871c8c

snapshot_download

Browse files
Files changed (1) hide show
  1. app.py +4 -12
app.py CHANGED
@@ -13,18 +13,10 @@ from hyvideo.config import parse_args
13
  from hyvideo.inference import HunyuanVideoSampler
14
  from hyvideo.constants import NEGATIVE_PROMPT
15
 
16
- from huggingface_hub import hf_hub_download
17
 
18
  if torch.cuda.device_count() > 0:
19
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="LICENSE", local_dir="ckpts")
20
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="Notice", local_dir="ckpts")
21
- #hf_hub_download(repo_id="tencent/HunyuanVideo", filename="README.md", local_dir="ckpts")
22
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="config.json", local_dir="ckpts")
23
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states.pt", local_dir="ckpts/hunyuan-video-t2v-720p/transformers")
24
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states_fp8.pt", local_dir="ckpts/hunyuan-video-t2v-720p/transformers")
25
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="hunyuan-video-t2v-720p/transformers/mp_rank_00_model_states_fp8_map.pt", local_dir="ckpts/hunyuan-video-t2v-720p/transformers")
26
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="hunyuan-video-t2v-720p/vae/config.json", local_dir="ckpts/hunyuan-video-t2v-720p/vae")
27
- hf_hub_download(repo_id="tencent/HunyuanVideo", filename="hunyuan-video-t2v-720p/vae/pytorch_model.pt", local_dir="ckpts/hunyuan-video-t2v-720p/vae")
28
 
29
  def initialize_model(model_path):
30
  print('initialize_model: ' + model_path)
@@ -138,14 +130,14 @@ def create_demo(model_path):
138
 
139
  with gr.Accordion("Advanced Options", open=False):
140
  with gr.Column():
141
- seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**63 - 1)
142
  guidance_scale = gr.Slider(1.0, 20.0, value=1.0, step=0.5, label="Guidance Scale")
143
  flow_shift = gr.Slider(0.0, 10.0, value=7.0, step=0.1, label="Flow Shift")
144
  embedded_guidance_scale = gr.Slider(1.0, 20.0, value=6.0, step=0.5, label="Embedded Guidance Scale")
145
 
146
  generate_btn = gr.Button(value = "🚀 Generate Video", variant = "primary")
147
 
148
- with gr.Column():
149
  output = gr.Video(label = "Generated Video", autoplay = True)
150
 
151
  gr.Markdown("""
 
13
  from hyvideo.inference import HunyuanVideoSampler
14
  from hyvideo.constants import NEGATIVE_PROMPT
15
 
16
+ from huggingface_hub import snapshot_download
17
 
18
  if torch.cuda.device_count() > 0:
19
+ snapshot_download(repo_id="tencent/HunyuanVideo", repo_type="model", local_dir="ckpts", force_download=True)
 
 
 
 
 
 
 
 
20
 
21
  def initialize_model(model_path):
22
  print('initialize_model: ' + model_path)
 
130
 
131
  with gr.Accordion("Advanced Options", open=False):
132
  with gr.Column():
133
+ seed = gr.Slider(label="Seed (-1 for random)", value=-1, minimum=-1, maximum=2**63 - 1, step=1)
134
  guidance_scale = gr.Slider(1.0, 20.0, value=1.0, step=0.5, label="Guidance Scale")
135
  flow_shift = gr.Slider(0.0, 10.0, value=7.0, step=0.1, label="Flow Shift")
136
  embedded_guidance_scale = gr.Slider(1.0, 20.0, value=6.0, step=0.5, label="Embedded Guidance Scale")
137
 
138
  generate_btn = gr.Button(value = "🚀 Generate Video", variant = "primary")
139
 
140
+ with gr.Row():
141
  output = gr.Video(label = "Generated Video", autoplay = True)
142
 
143
  gr.Markdown("""