# Copyright 2023 ByteDance and/or its affiliates. # # Copyright (2023) MagicAnimate Authors # # ByteDance, its affiliates and licensors retain all intellectual # property and proprietary rights in and to this material, related # documentation and any modifications thereto. Any use, reproduction, # disclosure or distribution of this material and related documentation # without an express license agreement from ByteDance or # its affiliates is strictly prohibited. import argparse import imageio import numpy as np import gradio as gr import spaces import os from PIL import Image from subprocess import PIPE, run from demo.animate import MagicAnimate from huggingface_hub import snapshot_download snapshot_download(repo_id="runwayml/stable-diffusion-v1-5", local_dir="./stable-diffusion-v1-5") snapshot_download(repo_id="stabilityai/sd-vae-ft-mse", local_dir="./sd-vae-ft-mse") snapshot_download(repo_id="zcxu-eric/MagicAnimate", local_dir="./MagicAnimate") animator = MagicAnimate() @spaces.GPU(duration=420, enable_queue=True) def animate(reference_image, motion_sequence_state, seed=1, steps=25, guidance_scale=7.5): return animator(reference_image, motion_sequence_state, seed, steps, guidance_scale) with gr.Blocks() as demo: gr.HTML( """
""") animation = gr.Video(format="mp4", label="Animation Results", autoplay=True) with gr.Row(): reference_image = gr.Image(label="Reference Image") motion_sequence = gr.Video(format="mp4", label="Motion Sequence",max_length=5) with gr.Column(): random_seed = gr.Textbox(label="Random seed", value=1, info="default: -1") sampling_steps = gr.Textbox(label="Sampling steps", value=25, info="default: 25") guidance_scale = gr.Textbox(label="Guidance scale", value=7.5, info="default: 7.5") submit = gr.Button("Animate") def read_video(video): reader = imageio.get_reader(video) fps = reader.get_meta_data()['fps'] return video def read_image(image, size=512): return np.array(Image.fromarray(image).resize((size, size))) # when user uploads a new video motion_sequence.upload( read_video, motion_sequence, motion_sequence, queue=False ) # when `first_frame` is updated reference_image.upload( read_image, reference_image, reference_image, queue=False ) # when the `submit` button is clicked submit.click( animate, [reference_image, motion_sequence, random_seed, sampling_steps, guidance_scale], animation ) cached_examples: dict[str, tuple[list[str], str]] = { "monalisa": (["inputs/applications/source_image/monalisa.png", "inputs/applications/driving/densepose/running.mp4"], "inputs/applications/output/monalisa.mp4"), "demo4": (["inputs/applications/source_image/demo4.png", "inputs/applications/driving/densepose/demo4.mp4"], "inputs/applications/output/demo4.mp4"), "dalle2": (["inputs/applications/source_image/dalle2.jpeg", "inputs/applications/driving/densepose/running2.mp4"], "inputs/applications/output/dalle2.mp4"), "dalle8": (["inputs/applications/source_image/dalle8.jpeg", "inputs/applications/driving/densepose/dancing2.mp4"], "inputs/applications/output/dalle8.mp4"), "multi1_source": (["inputs/applications/source_image/multi1_source.png", "inputs/applications/driving/densepose/multi_dancing.mp4"], "inputs/applications/output/multi1_source.mp4"), } # Examples gr.Markdown("## Examples") gr.Examples( fn=lambda image, video: cached_examples[image['path'].split('/')[-1].split('.')[0]][1], examples=[inputs for inputs, output in cached_examples.values()], inputs=[reference_image, motion_sequence], outputs=animation, cache_examples=True, preprocess=False, ) # demo.queue(max_size=15, api_open=False) demo.launch(share=True, show_api=False)