Spaces:
Runtime error
Runtime error
File size: 5,120 Bytes
8aa9c9a 6d06601 6c1b85a 8aa9c9a 689b0f3 53097bd 8aa9c9a 5a1bbac 0fde759 8aa9c9a 8d2200f 8aa9c9a 53097bd 8aa9c9a 570b3f3 53097bd 8aa9c9a 53097bd 8aa9c9a 53097bd 8aa9c9a 0fde759 8aa9c9a 0fde759 8aa9c9a 53097bd 8aa9c9a 547b429 7ab8cd7 91e876c 7ab8cd7 9a8edc2 8aa9c9a 91e876c 547b429 9a8edc2 c75ebe8 48b33ea 8aa9c9a 4ebe638 53097bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 |
# Copyright 2023 ByteDance and/or its affiliates.
#
# Copyright (2023) MagicAnimate Authors
#
# ByteDance, its affiliates and licensors retain all intellectual
# property and proprietary rights in and to this material, related
# documentation and any modifications thereto. Any use, reproduction,
# disclosure or distribution of this material and related documentation
# without an express license agreement from ByteDance or
# its affiliates is strictly prohibited.
import argparse
import imageio
import numpy as np
import gradio as gr
import spaces
import os
from PIL import Image
from subprocess import PIPE, run
from demo.animate import MagicAnimate
from huggingface_hub import snapshot_download
snapshot_download(repo_id="runwayml/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5")
snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse")
snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate")
animator = MagicAnimate()
@spaces.GPU(duration=420, enable_queue=True)
def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5):
return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale)
with gr.Blocks() as demo:
gr.HTML(
"""
<div style="display: flex; justify-content: center; align-items: center; text-align: center;">
<a href="https://github.com/magic-research/magic-animate" style="margin-right: 20px; text-decoration: none; display: flex; align-items: center;">
</a>
<div>
<h1 >MagicAnimate: Temporally Consistent Human Image Animation using Diffusion Model</h1>
<h5 style="margin: 0;">If you like our project, please give us a star ✨ on Github for the latest update.</h5>
<div style="display: flex; justify-content: center; align-items: center; text-align: center;>
<a href="https://arxiv.org/abs/2311.16498"><img src="https://img.shields.io/badge/Arxiv-2311.16498-red"></a>
<a href='https://showlab.github.io/magicanimate'><img src='https://img.shields.io/badge/Project_Page-MagicAnimate-green' alt='Project Page'></a>
<a href='https://github.com/magic-research/magic-animate'><img src='https://img.shields.io/badge/Github-Code-blue'></a>
</div>
</div>
</div>
""")
animation = gr.Video(format="mp4", label="Animation Results", autoplay=True)
with gr.Row():
reference_image = gr.Image(label="Reference Image")
motion_sequence = gr.Video(format="mp4", label="Motion Sequence",max_length=5)
with gr.Column():
random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1")
sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25")
guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5")
submit = gr.Button("Animate")
def read_video(video):
reader = imageio.get_reader(video)
fps = reader.get_meta_data()['fps']
return video
def read_image(image, size=512):
return np.array(Image.fromarray(image).resize((size, size)))
# when user uploads a new video
motion_sequence.upload(
read_video,
motion_sequence,
motion_sequence,
queue=False
)
# when `first_frame` is updated
reference_image.upload(
read_image,
reference_image,
reference_image,
queue=False
)
# when the `submit` button is clicked
submit.click(
animate,
[reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale],
animation
)
cached_examples: dict[str, tuple[list[str], str]] = {
"monalisa": (["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"], "inputs/applications/output/monalisa.mp4"),
"demo4": (["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"], "inputs/applications/output/demo4.mp4"),
"dalle2": (["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"], "inputs/applications/output/dalle2.mp4"),
"dalle8": (["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"], "inputs/applications/output/dalle8.mp4"),
"multi1_source": (["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"], "inputs/applications/output/multi1_source.mp4"),
}
# Examples
gr.Markdown("## Examples")
gr.Examples(
fn=lambda image, video: cached_examples[image['path'].split('/')[-1].split('.')[0]][1],
examples=[inputs for inputs, output in cached_examples.values()],
inputs=[reference_image, motion_sequence],
outputs=animation,
cache_examples=True,
preprocess=False,
)
# demo.queue(max_size=15, api_open=False)
demo.launch(share=True, show_api=False)
|