Spaces:
Runtime error
Runtime error
ShaoTengLiu
commited on
Commit
·
2d1bc13
1
Parent(s):
2b2d19c
update
Browse files- {Video-P2P-Demo → Video-P2P}/.DS_Store +0 -0
- {Video-P2P-Demo → Video-P2P}/README.md +0 -0
- {Video-P2P-Demo → Video-P2P}/configs/.DS_Store +0 -0
- {Video-P2P-Demo → Video-P2P}/configs/man-motor-tune.yaml +0 -0
- Video-P2P/configs/man-surfing.yaml +41 -0
- {Video-P2P-Demo → Video-P2P}/configs/rabbit-jump-p2p.yaml +0 -0
- {Video-P2P-Demo → Video-P2P}/configs/rabbit-jump-tune.yaml +0 -0
- {Video-P2P-Demo → Video-P2P}/data/.DS_Store +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/.DS_Store +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/1.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/2.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/3.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/4.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/5.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/6.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/7.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/motorbike/8.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/1.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/2.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/3.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/4.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/5.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/6.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/7.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/data/rabbit/8.jpg +0 -0
- {Video-P2P-Demo → Video-P2P}/ptp_utils.py +0 -0
- {Video-P2P-Demo → Video-P2P}/requirements.txt +0 -0
- {Video-P2P-Demo → Video-P2P}/run_tuning.py +0 -0
- {Video-P2P-Demo → Video-P2P}/run_videop2p.py +0 -0
- {Video-P2P-Demo → Video-P2P}/script.sh +0 -0
- {Video-P2P-Demo → Video-P2P}/seq_aligner.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/data/dataset.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/models/attention.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/models/resnet.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/models/unet.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/models/unet_blocks.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/pipelines/pipeline_tuneavideo.py +0 -0
- {Video-P2P-Demo → Video-P2P}/tuneavideo/util.py +0 -0
- app.py +10 -10
- inference.py +1 -1
- trainer.py +4 -4
{Video-P2P-Demo → Video-P2P}/.DS_Store
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/README.md
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/configs/.DS_Store
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/configs/man-motor-tune.yaml
RENAMED
File without changes
|
Video-P2P/configs/man-surfing.yaml
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
pretrained_model_path: "./checkpoints/stable-diffusion-v1-4"
|
2 |
+
output_dir: "./outputs/man-surfing"
|
3 |
+
|
4 |
+
train_data:
|
5 |
+
video_path: "data/man-surfing.mp4"
|
6 |
+
prompt: "a man is surfing"
|
7 |
+
n_sample_frames: 24
|
8 |
+
width: 512
|
9 |
+
height: 512
|
10 |
+
sample_start_idx: 0
|
11 |
+
sample_frame_rate: 1
|
12 |
+
|
13 |
+
validation_data:
|
14 |
+
prompts:
|
15 |
+
- "a panda is surfing"
|
16 |
+
- "a boy, wearing a birthday hat, is surfing"
|
17 |
+
- "a raccoon is surfing, cartoon style"
|
18 |
+
- "Iron Man is surfing in the desert"
|
19 |
+
video_length: 24
|
20 |
+
width: 512
|
21 |
+
height: 512
|
22 |
+
num_inference_steps: 50
|
23 |
+
guidance_scale: 12.5
|
24 |
+
use_inv_latent: True
|
25 |
+
num_inv_steps: 50
|
26 |
+
|
27 |
+
learning_rate: 3e-5
|
28 |
+
train_batch_size: 1
|
29 |
+
max_train_steps: 500
|
30 |
+
checkpointing_steps: 1000
|
31 |
+
validation_steps: 100
|
32 |
+
trainable_modules:
|
33 |
+
- "attn1.to_q"
|
34 |
+
- "attn2.to_q"
|
35 |
+
- "attn_temp"
|
36 |
+
|
37 |
+
seed: 33
|
38 |
+
mixed_precision: fp16
|
39 |
+
use_8bit_adam: False
|
40 |
+
gradient_checkpointing: True
|
41 |
+
enable_xformers_memory_efficient_attention: True
|
{Video-P2P-Demo → Video-P2P}/configs/rabbit-jump-p2p.yaml
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/configs/rabbit-jump-tune.yaml
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/.DS_Store
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/.DS_Store
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/1.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/2.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/3.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/4.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/5.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/6.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/7.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/motorbike/8.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/1.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/2.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/3.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/4.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/5.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/6.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/7.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/data/rabbit/8.jpg
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/ptp_utils.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/requirements.txt
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/run_tuning.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/run_videop2p.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/script.sh
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/seq_aligner.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/data/dataset.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/models/attention.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/models/resnet.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/models/unet.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/models/unet_blocks.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/pipelines/pipeline_tuneavideo.py
RENAMED
File without changes
|
{Video-P2P-Demo → Video-P2P}/tuneavideo/util.py
RENAMED
File without changes
|
app.py
CHANGED
@@ -8,15 +8,15 @@ from subprocess import getoutput
|
|
8 |
import gradio as gr
|
9 |
import torch
|
10 |
|
11 |
-
|
12 |
from app_training import create_training_demo
|
13 |
-
|
14 |
from inference import InferencePipeline
|
15 |
from trainer import Trainer
|
16 |
|
17 |
TITLE = '# [Video-P2P](https://video-p2p.github.io/) UI'
|
18 |
|
19 |
-
ORIGINAL_SPACE_ID = 'Shaldon/Video-P2P-
|
20 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
21 |
GPU_DATA = getoutput('nvidia-smi')
|
22 |
SHARED_UI_WARNING = f'''## Attention - Training doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU.
|
@@ -70,13 +70,13 @@ with gr.Blocks(css='style.css') as demo:
|
|
70 |
with gr.Tabs():
|
71 |
with gr.TabItem('Train'):
|
72 |
create_training_demo(trainer, pipe)
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
|
81 |
if not HF_TOKEN:
|
82 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
|
|
8 |
import gradio as gr
|
9 |
import torch
|
10 |
|
11 |
+
from app_inference import create_inference_demo
|
12 |
from app_training import create_training_demo
|
13 |
+
from app_upload import create_upload_demo
|
14 |
from inference import InferencePipeline
|
15 |
from trainer import Trainer
|
16 |
|
17 |
TITLE = '# [Video-P2P](https://video-p2p.github.io/) UI'
|
18 |
|
19 |
+
ORIGINAL_SPACE_ID = 'Shaldon/Video-P2P-Demo'
|
20 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
21 |
GPU_DATA = getoutput('nvidia-smi')
|
22 |
SHARED_UI_WARNING = f'''## Attention - Training doesn't work in this shared UI. You can duplicate and use it with a paid private T4 GPU.
|
|
|
70 |
with gr.Tabs():
|
71 |
with gr.TabItem('Train'):
|
72 |
create_training_demo(trainer, pipe)
|
73 |
+
with gr.TabItem('Run'):
|
74 |
+
create_inference_demo(pipe, HF_TOKEN)
|
75 |
+
with gr.TabItem('Upload'):
|
76 |
+
gr.Markdown('''
|
77 |
+
- You can use this tab to upload models later if you choose not to upload models in training time or if upload in training time failed.
|
78 |
+
''')
|
79 |
+
create_upload_demo(HF_TOKEN)
|
80 |
|
81 |
if not HF_TOKEN:
|
82 |
show_warning(HF_TOKEN_NOT_SPECIFIED_WARNING)
|
inference.py
CHANGED
@@ -13,7 +13,7 @@ from diffusers.utils.import_utils import is_xformers_available
|
|
13 |
from einops import rearrange
|
14 |
from huggingface_hub import ModelCard
|
15 |
|
16 |
-
sys.path.append('Video-P2P
|
17 |
|
18 |
from tuneavideo.models.unet import UNet3DConditionModel
|
19 |
from tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline
|
|
|
13 |
from einops import rearrange
|
14 |
from huggingface_hub import ModelCard
|
15 |
|
16 |
+
sys.path.append('Video-P2P')
|
17 |
|
18 |
from tuneavideo.models.unet import UNet3DConditionModel
|
19 |
from tuneavideo.pipelines.pipeline_tuneavideo import TuneAVideoPipeline
|
trainer.py
CHANGED
@@ -17,10 +17,10 @@ from omegaconf import OmegaConf
|
|
17 |
from app_upload import ModelUploader
|
18 |
from utils import save_model_card
|
19 |
|
20 |
-
sys.path.append('Video-P2P
|
21 |
|
22 |
# URL_TO_JOIN_MODEL_LIBRARY_ORG = 'https://huggingface.co/organizations/Tune-A-Video-library/share/YjTcaNJmKyeHFpMBioHhzBcTzCYddVErEk'
|
23 |
-
ORIGINAL_SPACE_ID = 'Shaldon/Video-P2P-
|
24 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
25 |
|
26 |
|
@@ -103,7 +103,7 @@ class Trainer:
|
|
103 |
# self.join_model_library_org(
|
104 |
# self.hf_token if self.hf_token else input_token)
|
105 |
|
106 |
-
config = OmegaConf.load('Video-P2P
|
107 |
config.pretrained_model_path = self.download_base_model(base_model)
|
108 |
config.output_dir = output_dir.as_posix()
|
109 |
config.train_data.video_path = training_video.name # type: ignore
|
@@ -133,7 +133,7 @@ class Trainer:
|
|
133 |
with open(config_path, 'w') as f:
|
134 |
OmegaConf.save(config, f)
|
135 |
|
136 |
-
command = f'accelerate launch Video-P2P
|
137 |
subprocess.run(shlex.split(command))
|
138 |
save_model_card(save_dir=output_dir,
|
139 |
base_model=base_model,
|
|
|
17 |
from app_upload import ModelUploader
|
18 |
from utils import save_model_card
|
19 |
|
20 |
+
sys.path.append('Video-P2P')
|
21 |
|
22 |
# URL_TO_JOIN_MODEL_LIBRARY_ORG = 'https://huggingface.co/organizations/Tune-A-Video-library/share/YjTcaNJmKyeHFpMBioHhzBcTzCYddVErEk'
|
23 |
+
ORIGINAL_SPACE_ID = 'Shaldon/Video-P2P-Demo'
|
24 |
SPACE_ID = os.getenv('SPACE_ID', ORIGINAL_SPACE_ID)
|
25 |
|
26 |
|
|
|
103 |
# self.join_model_library_org(
|
104 |
# self.hf_token if self.hf_token else input_token)
|
105 |
|
106 |
+
config = OmegaConf.load('Video-P2P/configs/man-surfing.yaml')
|
107 |
config.pretrained_model_path = self.download_base_model(base_model)
|
108 |
config.output_dir = output_dir.as_posix()
|
109 |
config.train_data.video_path = training_video.name # type: ignore
|
|
|
133 |
with open(config_path, 'w') as f:
|
134 |
OmegaConf.save(config, f)
|
135 |
|
136 |
+
command = f'accelerate launch Video-P2P/run_tuning.py --config {config_path}'
|
137 |
subprocess.run(shlex.split(command))
|
138 |
save_model_card(save_dir=output_dir,
|
139 |
base_model=base_model,
|