Upload 35 files
Browse files- extensions/sd-webui-animatediff/.github/FUNDING.yml +13 -0
- extensions/sd-webui-animatediff/.github/ISSUE_TEMPLATE/bug_report.yml +91 -0
- extensions/sd-webui-animatediff/.github/ISSUE_TEMPLATE/config.yml +1 -0
- extensions/sd-webui-animatediff/.github/ISSUE_TEMPLATE/feature_request.yml +13 -0
- extensions/sd-webui-animatediff/.gitignore +4 -0
- extensions/sd-webui-animatediff/README.md +349 -0
- extensions/sd-webui-animatediff/__pycache__/motion_module.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/model/.gitkeep +0 -0
- extensions/sd-webui-animatediff/motion_module.py +657 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_cn.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_i2ibatch.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_infotext.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_infv2v.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_latent.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_lcm.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_logger.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_lora.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_mm.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_output.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_prompt.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_ui.cpython-310.pyc +0 -0
- extensions/sd-webui-animatediff/scripts/animatediff.py +279 -0
- extensions/sd-webui-animatediff/scripts/animatediff_cn.py +641 -0
- extensions/sd-webui-animatediff/scripts/animatediff_i2ibatch.py +309 -0
- extensions/sd-webui-animatediff/scripts/animatediff_infotext.py +35 -0
- extensions/sd-webui-animatediff/scripts/animatediff_infv2v.py +322 -0
- extensions/sd-webui-animatediff/scripts/animatediff_latent.py +84 -0
- extensions/sd-webui-animatediff/scripts/animatediff_lcm.py +137 -0
- extensions/sd-webui-animatediff/scripts/animatediff_logger.py +41 -0
- extensions/sd-webui-animatediff/scripts/animatediff_lora.py +84 -0
- extensions/sd-webui-animatediff/scripts/animatediff_mm.py +204 -0
- extensions/sd-webui-animatediff/scripts/animatediff_output.py +361 -0
- extensions/sd-webui-animatediff/scripts/animatediff_prompt.py +143 -0
- extensions/sd-webui-animatediff/scripts/animatediff_ui.py +349 -0
extensions/sd-webui-animatediff/.github/FUNDING.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# These are supported funding model platforms
|
2 |
+
|
3 |
+
github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
|
4 |
+
patreon: conrevo # Replace with a single Patreon username
|
5 |
+
open_collective: # Replace with a single Open Collective username
|
6 |
+
ko_fi: conrevo # Replace with a single Ko-fi username
|
7 |
+
tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
|
8 |
+
community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
|
9 |
+
liberapay: # Replace with a single Liberapay username
|
10 |
+
issuehunt: # Replace with a single IssueHunt username
|
11 |
+
otechie: # Replace with a single Otechie username
|
12 |
+
lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry
|
13 |
+
custom: ['https://paypal.me/conrevo', 'https://afdian.net/a/conrevo'] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
|
extensions/sd-webui-animatediff/.github/ISSUE_TEMPLATE/bug_report.yml
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Bug Report
|
2 |
+
description: Create a bug report
|
3 |
+
title: "[Bug]: "
|
4 |
+
labels: ["bug-report"]
|
5 |
+
|
6 |
+
body:
|
7 |
+
- type: checkboxes
|
8 |
+
attributes:
|
9 |
+
label: Is there an existing issue for this?
|
10 |
+
description: Please search both open issues and closed issues to see if an issue already exists for the bug you encountered, and that it hasn't been fixed in a recent build/commit.
|
11 |
+
options:
|
12 |
+
- label: I have searched the existing issues and checked the recent builds/commits of both this extension and the webui
|
13 |
+
required: true
|
14 |
+
- type: checkboxes
|
15 |
+
attributes:
|
16 |
+
label: Have you read FAQ on README?
|
17 |
+
description: I have collected some common questions from AnimateDiff original repository.
|
18 |
+
options:
|
19 |
+
- label: I have updated WebUI and this extension to the latest version
|
20 |
+
required: true
|
21 |
+
- type: markdown
|
22 |
+
attributes:
|
23 |
+
value: |
|
24 |
+
*Please fill this form with as much information as possible, don't forget to fill "What OS..." and "What browsers" and *provide screenshots if possible**
|
25 |
+
- type: textarea
|
26 |
+
id: what-did
|
27 |
+
attributes:
|
28 |
+
label: What happened?
|
29 |
+
description: Tell us what happened in a very clear and simple way
|
30 |
+
validations:
|
31 |
+
required: true
|
32 |
+
- type: textarea
|
33 |
+
id: steps
|
34 |
+
attributes:
|
35 |
+
label: Steps to reproduce the problem
|
36 |
+
description: Please provide us with precise step by step information on how to reproduce the bug
|
37 |
+
value: |
|
38 |
+
1. Go to ....
|
39 |
+
2. Press ....
|
40 |
+
3. ...
|
41 |
+
validations:
|
42 |
+
required: true
|
43 |
+
- type: textarea
|
44 |
+
id: what-should
|
45 |
+
attributes:
|
46 |
+
label: What should have happened?
|
47 |
+
description: Tell what you think the normal behavior should be
|
48 |
+
validations:
|
49 |
+
required: true
|
50 |
+
- type: textarea
|
51 |
+
id: commits
|
52 |
+
attributes:
|
53 |
+
label: Commit where the problem happens
|
54 |
+
description: Which commit of the extension are you running on? Please include the commit of both the extension and the webui (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.)
|
55 |
+
value: |
|
56 |
+
webui:
|
57 |
+
extension:
|
58 |
+
validations:
|
59 |
+
required: true
|
60 |
+
- type: dropdown
|
61 |
+
id: browsers
|
62 |
+
attributes:
|
63 |
+
label: What browsers do you use to access the UI ?
|
64 |
+
multiple: true
|
65 |
+
options:
|
66 |
+
- Mozilla Firefox
|
67 |
+
- Google Chrome
|
68 |
+
- Brave
|
69 |
+
- Apple Safari
|
70 |
+
- Microsoft Edge
|
71 |
+
- type: textarea
|
72 |
+
id: cmdargs
|
73 |
+
attributes:
|
74 |
+
label: Command Line Arguments
|
75 |
+
description: Are you using any launching parameters/command line arguments (modified webui-user .bat/.sh) ? If yes, please write them below. Write "No" otherwise.
|
76 |
+
render: Shell
|
77 |
+
validations:
|
78 |
+
required: true
|
79 |
+
- type: textarea
|
80 |
+
id: logs
|
81 |
+
attributes:
|
82 |
+
label: Console logs
|
83 |
+
description: Please provide the errors printed on your console log of your browser (type F12 and go to console) and your terminal, after your bug happened.
|
84 |
+
render: Shell
|
85 |
+
validations:
|
86 |
+
required: true
|
87 |
+
- type: textarea
|
88 |
+
id: misc
|
89 |
+
attributes:
|
90 |
+
label: Additional information
|
91 |
+
description: Please provide us with any relevant additional info or context.
|
extensions/sd-webui-animatediff/.github/ISSUE_TEMPLATE/config.yml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
blank_issues_enabled: true
|
extensions/sd-webui-animatediff/.github/ISSUE_TEMPLATE/feature_request.yml
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
name: Feature Request
|
2 |
+
description: Create a feature request
|
3 |
+
title: "[Feature]: "
|
4 |
+
labels: ["feature-request"]
|
5 |
+
|
6 |
+
body:
|
7 |
+
- type: textarea
|
8 |
+
id: feature
|
9 |
+
attributes:
|
10 |
+
label: Expected behavior
|
11 |
+
description: Please describe the feature you want.
|
12 |
+
validations:
|
13 |
+
required: true
|
extensions/sd-webui-animatediff/.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
__pycache__
|
2 |
+
model/*.*
|
3 |
+
model/*.*
|
4 |
+
TODO.md
|
extensions/sd-webui-animatediff/README.md
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# AnimateDiff for Stable Diffusion WebUI
|
2 |
+
This extension aim for integrating [AnimateDiff](https://github.com/guoyww/AnimateDiff/) w/ [CLI](https://github.com/s9roll7/animatediff-cli-prompt-travel) into [AUTOMATIC1111 Stable Diffusion WebUI](https://github.com/AUTOMATIC1111/stable-diffusion-webui) w/ [ControlNet](https://github.com/Mikubill/sd-webui-controlnet). You can generate GIFs in exactly the same way as generating images after enabling this extension.
|
3 |
+
|
4 |
+
This extension implements AnimateDiff in a different way. It does not require you to clone the whole SD1.5 repository. It also applied (probably) the least modification to `ldm`, so that you do not need to reload your model weights if you don't want to.
|
5 |
+
|
6 |
+
You might also be interested in another extension I created: [Segment Anything for Stable Diffusion WebUI](https://github.com/continue-revolution/sd-webui-segment-anything).
|
7 |
+
|
8 |
+
|
9 |
+
## Table of Contents
|
10 |
+
- [Update](#update)
|
11 |
+
- [How to Use](#how-to-use)
|
12 |
+
- [WebUI](#webui)
|
13 |
+
- [API](#api)
|
14 |
+
- [WebUI Parameters](#webui-parameters)
|
15 |
+
- [Img2GIF](#img2gif)
|
16 |
+
- [Prompt Travel](#prompt-travel)
|
17 |
+
- [ControlNet V2V](#controlnet-v2v)
|
18 |
+
- [Model Spec](#model-spec)
|
19 |
+
- [Motion LoRA](#motion-lora)
|
20 |
+
- [V3](#v3)
|
21 |
+
- [SDXL](#sdxl)
|
22 |
+
- [Optimizations](#optimizations)
|
23 |
+
- [Attention](#attention)
|
24 |
+
- [FP8](#fp8)
|
25 |
+
- [LCM](#lcm)
|
26 |
+
- [Others](#others)
|
27 |
+
- [Model Zoo](#model-zoo)
|
28 |
+
- [VRAM](#vram)
|
29 |
+
- [Batch Size](#batch-size)
|
30 |
+
- [Demo](#demo)
|
31 |
+
- [Basic Usage](#basic-usage)
|
32 |
+
- [Motion LoRA](#motion-lora-1)
|
33 |
+
- [Prompt Travel](#prompt-travel-1)
|
34 |
+
- [AnimateDiff V3](#animatediff-v3)
|
35 |
+
- [AnimateDiff SDXL](#animatediff-sdxl)
|
36 |
+
- [ControlNet V2V](#controlnet-v2v-1)
|
37 |
+
- [Tutorial](#tutorial)
|
38 |
+
- [Thanks](#thanks)
|
39 |
+
- [Star History](#star-history)
|
40 |
+
- [Sponsor](#sponsor)
|
41 |
+
|
42 |
+
|
43 |
+
## Update
|
44 |
+
- `2023/07/20` [v1.1.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.1.0): Fix gif duration, add loop number, remove auto-download, remove xformers, remove instructions on gradio UI, refactor README, add [sponsor](#sponsor) QR code.
|
45 |
+
- `2023/07/24` [v1.2.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.2.0): Fix incorrect insertion of motion modules, add option to change path to motion modules in `Settings/AnimateDiff`, fix loading different motion modules.
|
46 |
+
- `2023/09/04` [v1.3.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.3.0): Support any community models with the same architecture; fix grey problem via [#63](https://github.com/continue-revolution/sd-webui-animatediff/issues/63)
|
47 |
+
- `2023/09/11` [v1.4.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.4.0): Support official v2 motion module (different architecture: GroupNorm not hacked, UNet middle layer has motion module).
|
48 |
+
- `2023/09/14`: [v1.4.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.4.1): Always change `beta`, `alpha_comprod` and `alpha_comprod_prev` to resolve grey problem in other samplers.
|
49 |
+
- `2023/09/16`: [v1.5.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.5.0): Randomize init latent to support [better img2gif](#img2gif); add other output formats and infotext output; add appending reversed frames; refactor code to ease maintaining.
|
50 |
+
- `2023/09/19`: [v1.5.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.5.1): Support xformers, sdp, sub-quadratic attention optimization - [VRAM](#vram) usage decrease to 5.60GB with default setting.
|
51 |
+
- `2023/09/22`: [v1.5.2](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.5.2): Option to disable xformers at `Settings/AnimateDiff` [due to a bug in xformers](https://github.com/facebookresearch/xformers/issues/845), [API support](#api), option to enable GIF paletter optimization at `Settings/AnimateDiff`, gifsicle optimization move to `Settings/AnimateDiff`.
|
52 |
+
- `2023/09/25`: [v1.6.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.6.0): [Motion LoRA](https://github.com/guoyww/AnimateDiff#features) supported. See [Motion Lora](#motion-lora) for more information.
|
53 |
+
- `2023/09/27`: [v1.7.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.7.0): [ControlNet](https://github.com/Mikubill/sd-webui-controlnet) supported. See [ControlNet V2V](#controlnet-v2v) for more information. [Safetensors](#model-zoo) for some motion modules are also available now.
|
54 |
+
- `2023/09/29`: [v1.8.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.8.0): Infinite generation supported. See [WebUI Parameters](#webui-parameters) for more information.
|
55 |
+
- `2023/10/01`: [v1.8.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.8.1): Now you can uncheck `Batch cond/uncond` in `Settings/Optimization` if you want. This will reduce your [VRAM](#vram) (5.31GB -> 4.21GB for SDP) but take longer time.
|
56 |
+
- `2023/10/08`: [v1.9.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.9.0): Prompt travel supported. You must have ControlNet installed (you do not need to enable ControlNet) to try it. See [Prompt Travel](#prompt-travel) for how to trigger this feature.
|
57 |
+
- `2023/10/11`: [v1.9.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.9.1): Use state_dict key to guess mm version, replace match case with if else to support python<3.10, option to save PNG to custom dir
|
58 |
+
(see `Settings/AnimateDiff` for detail), move hints to js, install imageio\[ffmpeg\] automatically when MP4 save fails.
|
59 |
+
- `2023/10/16`: [v1.9.2](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.9.2): Add context generator to completely remove any closed loop, prompt travel support closed loop, infotext fully supported including prompt travel, README refactor
|
60 |
+
- `2023/10/19`: [v1.9.3](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.9.3): Support webp output format. See [#233](https://github.com/continue-revolution/sd-webui-animatediff/pull/233) for more information.
|
61 |
+
- `2023/10/21`: [v1.9.4](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.9.4): Save prompt travel to output images, `Reverse` merged to `Closed loop` (See [WebUI Parameters](#webui-parameters)), remove `TimestepEmbedSequential` hijack, remove `hints.js`, better explanation of several context-related parameters.
|
62 |
+
- `2023/10/25`: [v1.10.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.10.0): Support img2img batch. You need ControlNet installed to make it work properly (you do not need to enable ControlNet). See [ControlNet V2V](#controlnet-v2v) for more information.
|
63 |
+
- `2023/10/29`: [v1.11.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.11.0): [HotShot-XL](https://github.com/hotshotco/Hotshot-XL) supported. See [SDXL](#sdxl) for more information.
|
64 |
+
- `2023/11/06`: [v1.11.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.11.1): Optimize VRAM for ControlNet V2V, patch [encode_pil_to_base64](https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/api/api.py#L104-L133) for api return a video, save frames to `AnimateDiff/yy-mm-dd/`, recover from assertion error, optional [request id](#api) for API.
|
65 |
+
- `2023/11/10`: [v1.12.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.12.0): [AnimateDiff for SDXL](https://github.com/guoyww/AnimateDiff/tree/sdxl) supported. See [SDXL](#sdxl) for more information.
|
66 |
+
- `2023/11/16`: [v1.12.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.12.1): FP8 precision and LCM sampler supported. See [Optimizations](#optimizations) for more information. You can also optionally upload videos to AWS S3 storage by configuring appropriately via `Settings/AnimateDiff AWS`.
|
67 |
+
- `2023/12/19`: [v1.13.0](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.13.0): [AnimateDiff V3](https://github.com/guoyww/AnimateDiff?tab=readme-ov-file#202312-animatediff-v3-and-sparsectrl) supported. See [V3](#v3) for more information. Also: release all official models in fp16 & safetensors format [here](https://huggingface.co/conrevo/AnimateDiff-A1111/tree/main), add option to disable LCM sampler in `Settings/AnimateDiff`, remove patch [encode_pil_to_base64](https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/master/modules/api/api.py#L104-L133) because A1111 [v1.7.0](https://github.com/AUTOMATIC1111/stable-diffusion-webui/tree/v1.7.0) now supports video return for API.
|
68 |
+
|
69 |
+
For future update plan, please query [#366](https://github.com/continue-revolution/sd-webui-animatediff/pull/366). `v1.13.x` is the last version update for `v1`. SparseCtrl, Magic Animate and other control methods will be supported in `v2` via updating both this repo and sd-webui-controlnet.
|
70 |
+
|
71 |
+
|
72 |
+
## How to Use
|
73 |
+
1. Update your WebUI to v1.6.0 and ControlNet to v1.1.410, then install this extension via link. I do not plan to support older version.
|
74 |
+
1. Download motion modules and put the model weights under `stable-diffusion-webui/extensions/sd-webui-animatediff/model/`. If you want to use another directory to save model weights, please go to `Settings/AnimateDiff`. See [model zoo](#model-zoo) for a list of available motion modules.
|
75 |
+
1. Enable `Pad prompt/negative prompt to be same length` in `Settings/Optimization` and click `Apply settings`. You must do this to prevent generating two separate unrelated GIFs. Checking `Batch cond/uncond` is optional, which can improve speed but increase VRAM usage.
|
76 |
+
1. DO NOT disable hash calculation, otherwise AnimateDiff will have trouble figuring out when you switch motion module.
|
77 |
+
|
78 |
+
### WebUI
|
79 |
+
1. Go to txt2img if you want to try txt2gif and img2img if you want to try img2gif.
|
80 |
+
1. Choose an SD1.5 checkpoint, write prompts, set configurations such as image width/height. If you want to generate multiple GIFs at once, please [change batch number, instead of batch size](#batch-size).
|
81 |
+
1. Enable AnimateDiff extension, set up [each parameter](#webui-parameters), then click `Generate`.
|
82 |
+
1. You should see the output GIF on the output gallery. You can access GIF output at `stable-diffusion-webui/outputs/{txt2img or img2img}-images/AnimateDiff/{yy-mm-dd}`. You can also access image frames at `stable-diffusion-webui/outputs/{txt2img or img2img}-images/{yy-mm-dd}`. You may choose to save frames for each generation into separate directories in `Settings/AnimateDiff`.
|
83 |
+
|
84 |
+
### API
|
85 |
+
It is quite similar to the way you use ControlNet. API will return a video in base64 format. In `format`, `PNG` means to save frames to your file system without returning all the frames. If you want your API to return all frames, please add `Frame` to `format` list. For most up-to-date parameters, please read [here](https://github.com/continue-revolution/sd-webui-animatediff/blob/master/scripts/animatediff_ui.py#L26).
|
86 |
+
```
|
87 |
+
'alwayson_scripts': {
|
88 |
+
'AnimateDiff': {
|
89 |
+
'args': [{
|
90 |
+
'model': 'mm_sd_v15_v2.ckpt', # Motion module
|
91 |
+
'format': ['GIF'], # Save format, 'GIF' | 'MP4' | 'PNG' | 'WEBP' | 'WEBM' | 'TXT' | 'Frame'
|
92 |
+
'enable': True, # Enable AnimateDiff
|
93 |
+
'video_length': 16, # Number of frames
|
94 |
+
'fps': 8, # FPS
|
95 |
+
'loop_number': 0, # Display loop number
|
96 |
+
'closed_loop': 'R+P', # Closed loop, 'N' | 'R-P' | 'R+P' | 'A'
|
97 |
+
'batch_size': 16, # Context batch size
|
98 |
+
'stride': 1, # Stride
|
99 |
+
'overlap': -1, # Overlap
|
100 |
+
'interp': 'Off', # Frame interpolation, 'Off' | 'FILM'
|
101 |
+
'interp_x': 10 # Interp X
|
102 |
+
'video_source': 'path/to/video.mp4', # Video source
|
103 |
+
'video_path': 'path/to/frames', # Video path
|
104 |
+
'latent_power': 1, # Latent power
|
105 |
+
'latent_scale': 32, # Latent scale
|
106 |
+
'last_frame': None, # Optional last frame
|
107 |
+
'latent_power_last': 1, # Optional latent power for last frame
|
108 |
+
'latent_scale_last': 32,# Optional latent scale for last frame
|
109 |
+
'request_id': '' # Optional request id. If provided, outputs will have request id as filename suffix
|
110 |
+
}
|
111 |
+
]
|
112 |
+
}
|
113 |
+
},
|
114 |
+
```
|
115 |
+
|
116 |
+
|
117 |
+
## WebUI Parameters
|
118 |
+
1. **Save format** — Format of the output. Choose at least one of "GIF"|"MP4"|"WEBP"|"WEBM"|"PNG". Check "TXT" if you want infotext, which will live in the same directory as the output GIF. Infotext is also accessible via `stable-diffusion-webui/params.txt` and outputs in all formats.
|
119 |
+
1. You can optimize GIF with `gifsicle` (`apt install gifsicle` required, read [#91](https://github.com/continue-revolution/sd-webui-animatediff/pull/91) for more information) and/or `palette` (read [#104](https://github.com/continue-revolution/sd-webui-animatediff/pull/104) for more information). Go to `Settings/AnimateDiff` to enable them.
|
120 |
+
1. You can set quality and lossless for WEBP via `Settings/AnimateDiff`. Read [#233](https://github.com/continue-revolution/sd-webui-animatediff/pull/233) for more information.
|
121 |
+
1. If you are using API, by adding "PNG" to `format`, you can save all frames to your file system without returning all the frames. If you want your API to return all frames, please add `Frame` to `format` list.
|
122 |
+
1. **Number of frames** — Choose whatever number you like.
|
123 |
+
|
124 |
+
If you enter 0 (default):
|
125 |
+
- If you submit a video via `Video source` / enter a video path via `Video path` / enable ANY batch ControlNet, the number of frames will be the number of frames in the video (use shortest if more than one videos are submitted).
|
126 |
+
- Otherwise, the number of frames will be your `Context batch size` described below.
|
127 |
+
|
128 |
+
If you enter something smaller than your `Context batch size` other than 0: you will get the first `Number of frames` frames as your output GIF from your whole generation. All following frames will not appear in your generated GIF, but will be saved as PNGs as usual. Do not set `Number of frames` to be something smaler than `Context batch size` other than 0 because of [#213](https://github.com/continue-revolution/sd-webui-animatediff/issues/213).
|
129 |
+
1. **FPS** — Frames per second, which is how many frames (images) are shown every second. If 16 frames are generated at 8 frames per second, your GIF’s duration is 2 seconds. If you submit a source video, your FPS will be the same as the source video.
|
130 |
+
1. **Display loop number** — How many times the GIF is played. A value of `0` means the GIF never stops playing.
|
131 |
+
1. **Context batch size** — How many frames will be passed into the motion module at once. The SD1.5 motion modules are trained with 16 frames, so it’ll give the best results when the number of frames is set to `16`. SDXL HotShotXL motion modules are trained with 8 frames instead. Choose [1, 24] for V1 / HotShotXL motion modules and [1, 32] for V2 / AnimateDiffXL motion modules.
|
132 |
+
1. **Closed loop** — Closed loop means that this extension will try to make the last frame the same as the first frame.
|
133 |
+
1. When `Number of frames` > `Context batch size`, including when ControlNet is enabled and the source video frame number > `Context batch size` and `Number of frames` is 0, closed loop will be performed by AnimateDiff infinite context generator.
|
134 |
+
1. When `Number of frames` <= `Context batch size`, AnimateDiff infinite context generator will not be effective. Only when you choose `A` will AnimateDiff append reversed list of frames to the original list of frames to form closed loop.
|
135 |
+
|
136 |
+
See below for explanation of each choice:
|
137 |
+
|
138 |
+
- `N` means absolutely no closed loop - this is the only available option if `Number of frames` is smaller than `Context batch size` other than 0.
|
139 |
+
- `R-P` means that the extension will try to reduce the number of closed loop context. The prompt travel will not be interpolated to be a closed loop.
|
140 |
+
- `R+P` means that the extension will try to reduce the number of closed loop context. The prompt travel will be interpolated to be a closed loop.
|
141 |
+
- `A` means that the extension will aggressively try to make the last frame the same as the first frame. The prompt travel will be interpolated to be a closed loop.
|
142 |
+
1. **Stride** — Max motion stride as a power of 2 (default: 1).
|
143 |
+
1. Due to the limitation of the infinite context generator, this parameter is effective only when `Number of frames` > `Context batch size`, including when ControlNet is enabled and the source video frame number > `Context batch size` and `Number of frames` is 0.
|
144 |
+
1. "Absolutely no closed loop" is only possible when `Stride` is 1.
|
145 |
+
1. For each 1 <= $2^i$ <= `Stride`, the infinite context generator will try to make frames $2^i$ apart temporal consistent. For example, if `Stride` is 4 and `Number of frames` is 8, it will make the following frames temporal consistent:
|
146 |
+
- `Stride` == 1: [0, 1, 2, 3, 4, 5, 6, 7]
|
147 |
+
- `Stride` == 2: [0, 2, 4, 6], [1, 3, 5, 7]
|
148 |
+
- `Stride` == 4: [0, 4], [1, 5], [2, 6], [3, 7]
|
149 |
+
1. **Overlap** — Number of frames to overlap in context. If overlap is -1 (default): your overlap will be `Context batch size` // 4.
|
150 |
+
1. Due to the limitation of the infinite context generator, this parameter is effective only when `Number of frames` > `Context batch size`, including when ControlNet is enabled and the source video frame number > `Context batch size` and `Number of frames` is 0.
|
151 |
+
1. **Frame Interpolation** — Interpolate between frames with Deforum's FILM implementation. Requires Deforum extension. [#128](https://github.com/continue-revolution/sd-webui-animatediff/pull/128)
|
152 |
+
1. **Interp X** — Replace each input frame with X interpolated output frames. [#128](https://github.com/continue-revolution/sd-webui-animatediff/pull/128).
|
153 |
+
1. **Video source** — [Optional] Video source file for [ControlNet V2V](#controlnet-v2v). You MUST enable ControlNet. It will be the source control for ALL ControlNet units that you enable without submitting a control image or a path to ControlNet panel. You can of course submit one control image via `Single Image` tab or an input directory via `Batch` tab, which will override this video source input and work as usual.
|
154 |
+
1. **Video path** — [Optional] Folder for source frames for [ControlNet V2V](#controlnet-v2v), but lower priority than `Video source`. You MUST enable ControlNet. It will be the source control for ALL ControlNet units that you enable without submitting a control image or a path to ControlNet. You can of course submit one control image via `Single Image` tab or an input directory via `Batch` tab, which will override this video path input and work as usual.
|
155 |
+
- For people who want to inpaint videos: enter a folder which contains two sub-folders `image` and `mask` on ControlNet inpainting unit. These two sub-folders should contain the same number of images. This extension will match them according to the same sequence. Using my [Segment Anything](https://github.com/continue-revolution/sd-webui-segment-anything) extension can make your life much easier.
|
156 |
+
|
157 |
+
Please read
|
158 |
+
- [Img2GIF](#img2gif) for extra parameters on img2gif panel.
|
159 |
+
- [Prompt Travel](#prompt-travel) for how to trigger prompt travel.
|
160 |
+
- [ControlNet V2V](#controlnet-v2v) for how to use ControlNet V2V.
|
161 |
+
- [Model Spec](#model-spec) for how to use Motion LoRA, V3 and SDXL.
|
162 |
+
|
163 |
+
|
164 |
+
## Img2GIF
|
165 |
+
You need to go to img2img and submit an init frame via A1111 panel. You can optionally submit a last frame via extension panel.
|
166 |
+
|
167 |
+
By default: your `init_latent` will be changed to
|
168 |
+
```
|
169 |
+
init_alpha = (1 - frame_number ^ latent_power / latent_scale)
|
170 |
+
init_latent = init_latent * init_alpha + random_tensor * (1 - init_alpha)
|
171 |
+
```
|
172 |
+
|
173 |
+
If you upload a last frame: your `init_latent` will be changed in a similar way. Read [this code](https://github.com/continue-revolution/sd-webui-animatediff/tree/v1.5.0/scripts/animatediff_latent.py#L28-L65) to understand how it works.
|
174 |
+
|
175 |
+
|
176 |
+
## Prompt Travel
|
177 |
+
Write positive prompt following the example below.
|
178 |
+
|
179 |
+
The first line is head prompt, which is optional. You can write no/single/multiple lines of head prompts.
|
180 |
+
|
181 |
+
The second and third lines are for prompt interpolation, in format `frame number`: `prompt`. Your `frame number` should be in ascending order, smaller than the total `Number of frames`. The first frame is 0 index.
|
182 |
+
|
183 |
+
The last line is tail prompt, which is optional. You can write no/single/multiple lines of tail prompts. If you don't need this feature, just write prompts in the old way.
|
184 |
+
```
|
185 |
+
1girl, yoimiya (genshin impact), origen, line, comet, wink, Masterpiece, BestQuality. UltraDetailed, <lora:LineLine2D:0.7>, <lora:yoimiya:0.8>,
|
186 |
+
0: closed mouth
|
187 |
+
8: open mouth
|
188 |
+
smile
|
189 |
+
```
|
190 |
+
|
191 |
+
|
192 |
+
## ControlNet V2V
|
193 |
+
You need to go to txt2img / img2img-batch and submit source video or path to frames. Each ControlNet will find control images according to this priority:
|
194 |
+
1. ControlNet `Single Image` tab or `Batch` tab. Simply upload a control image or a directory of control frames is enough.
|
195 |
+
1. Img2img Batch tab `Input directory` if you are using img2img batch. If you upload a directory of control frames, it will be the source control for ALL ControlNet units that you enable without submitting a control image or a path to ControlNet panel.
|
196 |
+
1. AnimateDiff `Video Source`. If you upload a video through `Video Source`, it will be the source control for ALL ControlNet units that you enable without submitting a control image or a path to ControlNet panel.
|
197 |
+
1. AnimateDiff `Video Path`. If you upload a path to frames through `Video Path`, it will be the source control for ALL ControlNet units that you enable without submitting a control image or a path to ControlNet panel.
|
198 |
+
|
199 |
+
`Number of frames` will be capped to the minimum number of images among all **folders** you provide. Each control image in each folder will be applied to one single frame. If you upload one single image for a ControlNet unit, that image will control **ALL** frames.
|
200 |
+
|
201 |
+
For people who want to inpaint videos: enter a folder which contains two sub-folders `image` and `mask` on ControlNet inpainting unit. These two sub-folders should contain the same number of images. This extension will match them according to the same sequence. Using my [Segment Anything](https://github.com/continue-revolution/sd-webui-segment-anything) extension can make your life much easier.
|
202 |
+
|
203 |
+
AnimateDiff in img2img batch will be available in [v1.10.0](https://github.com/continue-revolution/sd-webui-animatediff/pull/224).
|
204 |
+
|
205 |
+
|
206 |
+
## Model Spec
|
207 |
+
### Motion LoRA
|
208 |
+
[Download](https://huggingface.co/conrevo/AnimateDiff-A1111/tree/main/lora) and use them like any other LoRA you use (example: download motion lora to `stable-diffusion-webui/models/Lora` and add `<lora:mm_sd15_v2_lora_PanLeft:0.8>` to your positive prompt). **Motion LoRA only supports V2 motion modules**.
|
209 |
+
|
210 |
+
### V3
|
211 |
+
V3 has identical state dict keys as V1 but slightly different inference logic (GroupNorm is not hacked for V3). You may optionally use [adapter](https://huggingface.co/conrevo/AnimateDiff-A1111/resolve/main/lora/mm_sd15_v3_adapter.safetensors?download=true) for V3, in the same way as the way you use LoRA. You MUST use [my link](https://huggingface.co/conrevo/AnimateDiff-A1111/resolve/main/lora/mm_sd15_v3_adapter.safetensors?download=true) instead of the [official link](https://huggingface.co/guoyww/animatediff/resolve/main/v3_sd15_adapter.ckpt?download=true). The official adapter won't work for A1111 due to state dict incompatibility.
|
212 |
+
|
213 |
+
### SDXL
|
214 |
+
[AnimateDiffXL](https://github.com/guoyww/AnimateDiff/tree/sdxl) and [HotShot-XL](https://github.com/hotshotco/Hotshot-XL) have identical architecture to AnimateDiff-SD1.5. The only 2 difference are
|
215 |
+
- HotShot-XL is trained with 8 frames instead of 16 frames. You are recommended to set `Context batch size` to 8 for HotShot-XL.
|
216 |
+
- AnimateDiffXL is still trained with 16 frames. You do not need to change `Context batch size` for AnimateDiffXL.
|
217 |
+
- AnimateDiffXL & HotShot-XL have fewer layers compared to AnimateDiff-SD1.5 because of SDXL.
|
218 |
+
- AnimateDiffXL is trained with higher resolution compared to HotShot-XL.
|
219 |
+
|
220 |
+
Although AnimateDiffXL & HotShot-XL have identical structure with AnimateDiff-SD1.5, I strongly discourage you from using AnimateDiff-SD1.5 for SDXL, or using HotShot / AnimateDiffXL for SD1.5 - you will get severe artifect if you do that. I have decided not to supported that, despite the fact that it is not hard for me to do that.
|
221 |
+
|
222 |
+
Technically all features available for AnimateDiff + SD1.5 are also available for (AnimateDiff / HotShot) + SDXL. However, I have not tested all of them. I have tested infinite context generation and prompt travel; I have not tested ControlNet. If you find any bug, please report it to me.
|
223 |
+
|
224 |
+
|
225 |
+
## Optimizations
|
226 |
+
Optimizations can be significantly helpful if you want to improve speed and reduce VRAM usage. With [attention optimization](#attention), [FP8](#fp8) and unchecking `Batch cond/uncond` in `Settings/Optimization`, I am able to run 4 x ControlNet + AnimateDiff + Stable Diffusion to generate 36 frames of 1024 * 1024 images with 18GB VRAM.
|
227 |
+
|
228 |
+
### Attention
|
229 |
+
Adding `--xformers` / `--opt-sdp-attention` to your command lines can significantly reduce VRAM and improve speed. However, due to a bug in xformers, you may or may not get CUDA error. If you get CUDA error, please either completely switch to `--opt-sdp-attention`, or preserve `--xformers` -> go to `Settings/AnimateDiff` -> choose "Optimize attention layers with sdp (torch >= 2.0.0 required)".
|
230 |
+
|
231 |
+
### FP8
|
232 |
+
FP8 requires torch >= 2.1.0 and WebUI [test-fp8](https://github.com/AUTOMATIC1111/stable-diffusion-webui/tree/test-fp8) branch by [@KohakuBlueleaf](https://github.com/KohakuBlueleaf). Follow these steps to enable FP8:
|
233 |
+
1. Switch to `test-fp8` branch via `git checkout test-fp8` in your `stable-diffusion-webui` directory.
|
234 |
+
1. Reinstall torch via adding `--reinstall-torch` ONCE to your command line arguments.
|
235 |
+
1. Goto Settings Tab > Optimizations > FP8 weight, change it to `Enable`
|
236 |
+
|
237 |
+
### LCM
|
238 |
+
[Latent Consistency Model](https://github.com/luosiallen/latent-consistency-model) is a recent breakthrough in Stable Diffusion community. I provide a "gift" to everyone who update this extension to >= [v1.12.1](https://github.com/continue-revolution/sd-webui-animatediff/releases/tag/v1.12.1) - you will find `LCM` sampler in the normal place you select samplers in WebUI. You can generate images / videos within 6-8 steps if you
|
239 |
+
- select `Euler A` / `Euler` / `LCM` sampler (other samplers may also work, subject to further experiments)
|
240 |
+
- use [LCM LoRA](https://civitai.com/models/195519/lcm-lora-weights-stable-diffusion-acceleration-module)
|
241 |
+
- use a low CFG denoising strength (1-2 is recommended)
|
242 |
+
|
243 |
+
Note that LCM sampler is still under experiment and subject to change adhering to [@luosiallen](https://github.com/luosiallen)'s wish.
|
244 |
+
|
245 |
+
Benefits of using this extension instead of [sd-webui-lcm](https://github.com/0xbitches/sd-webui-lcm) are
|
246 |
+
- you do not need to install diffusers
|
247 |
+
- you can use LCM sampler with any other extensions, such as ControlNet and AnimateDiff
|
248 |
+
|
249 |
+
### Others
|
250 |
+
- Remove any VRAM heavy arguments such as `--no-half`. These arguments can significantly increase VRAM usage and reduce speed.
|
251 |
+
- Check `Batch cond/uncond` in `Settings/Optimization` to improve speed; uncheck it to reduce VRAM usage.
|
252 |
+
|
253 |
+
|
254 |
+
## Model Zoo
|
255 |
+
I am maintaining a [huggingface repo](https://huggingface.co/conrevo/AnimateDiff-A1111/tree/main) to provide all official models in fp16 & safetensors format. You are highly recommended to use my link. You MUST use my link to download adapter for V3. You may still use the old links if you want, for all models except adapter for V3.
|
256 |
+
|
257 |
+
- "Official" models by [@guoyww](https://github.com/guoyww): [Google Drive](https://drive.google.com/drive/folders/1EqLC65eR1-W-sGD0Im7fkED6c8GkiNFI) | [HuggingFace](https://huggingface.co/guoyww/animatediff/tree/main) | [CivitAI](https://civitai.com/models/108836)
|
258 |
+
- "Stabilized" community models by [@manshoety](https://huggingface.co/manshoety): [HuggingFace](https://huggingface.co/manshoety/AD_Stabilized_Motion/tree/main)
|
259 |
+
- "TemporalDiff" models by [@CiaraRowles](https://huggingface.co/CiaraRowles): [HuggingFace](https://huggingface.co/CiaraRowles/TemporalDiff/tree/main)
|
260 |
+
- "HotShotXL" models by [@hotshotco](https://huggingface.co/hotshotco/): [HuggingFace](https://huggingface.co/hotshotco/Hotshot-XL/tree/main)
|
261 |
+
|
262 |
+
|
263 |
+
## VRAM
|
264 |
+
Actual VRAM usage depends on your image size and context batch size. You can try to reduce image size or context batch size to reduce VRAM usage.
|
265 |
+
|
266 |
+
The following data are SD1.5 + AnimateDiff, tested on Ubuntu 22.04, NVIDIA 4090, torch 2.0.1+cu117, H=W=512, frame=16 (default setting). `w/`/`w/o` means `Batch cond/uncond` in `Settings/Optimization` is checked/unchecked.
|
267 |
+
| Optimization | VRAM w/ | VRAM w/o |
|
268 |
+
| --- | --- | --- |
|
269 |
+
| No optimization | 12.13GB | |
|
270 |
+
| xformers/sdp | 5.60GB | 4.21GB |
|
271 |
+
| sub-quadratic | 10.39GB | |
|
272 |
+
|
273 |
+
For SDXL + HotShot + SDP, tested on Ubuntu 22.04, NVIDIA 4090, torch 2.0.1+cu117, H=W=512, frame=8 (default setting), you need 8.66GB VRAM.
|
274 |
+
|
275 |
+
For SDXL + AnimateDiff + SDP, tested on Ubuntu 22.04, NVIDIA 4090, torch 2.0.1+cu117, H=1024, W=768, frame=16, you need 13.87GB VRAM.
|
276 |
+
|
277 |
+
|
278 |
+
## Batch Size
|
279 |
+
Batch size on WebUI will be replaced by GIF frame number internally: 1 full GIF generated in 1 batch. If you want to generate multiple GIF at once, please change batch number.
|
280 |
+
|
281 |
+
Batch number is NOT the same as batch size. In A1111 WebUI, batch number is above batch size. Batch number means the number of sequential steps, but batch size means the number of parallel steps. You do not have to worry too much when you increase batch number, but you do need to worry about your VRAM when you increase your batch size (where in this extension, video frame number). You do not need to change batch size at all when you are using this extension.
|
282 |
+
|
283 |
+
We are currently developing approach to support batch size on WebUI in the near future.
|
284 |
+
|
285 |
+
|
286 |
+
## Demo
|
287 |
+
|
288 |
+
### Basic Usage
|
289 |
+
| AnimateDiff | Extension | img2img |
|
290 |
+
| --- | --- | --- |
|
291 |
+
| ![image](https://user-images.githubusercontent.com/63914308/255306527-5105afe8-d497-4ab1-b5c4-37540e9601f8.gif) |![00013-10788741199826055000](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/43b9cf34-dbd1-4120-b220-ea8cb7882272) | ![00018-727621716](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/d04bb573-c8ca-4ae6-a2d9-81f8012bec3a) |
|
292 |
+
|
293 |
+
### Motion LoRA
|
294 |
+
| No LoRA | PanDown | PanLeft |
|
295 |
+
| --- | --- | --- |
|
296 |
+
| ![00094-1401397431](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/d8d2b860-c781-4dd0-8c0a-0eb26970130b) | ![00095-3197605735](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/aed2243f-5494-4fe3-a10a-96c57f6f2906) | ![00093-2722547708](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/c32e9aaf-54f2-4f40-879b-e800c7c7848c) |
|
297 |
+
|
298 |
+
### Prompt Travel
|
299 |
+
![00201-2296305953](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/881f317c-f1d2-4635-b84b-b4c4881650f6)
|
300 |
+
|
301 |
+
The prompt is similar to [above](#prompt-travel).
|
302 |
+
|
303 |
+
### AnimateDiff V3
|
304 |
+
You should be able to read infotext to understand how I generated this sample.
|
305 |
+
![00024-3973810345](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/5f3e3858-8033-4a16-94b0-4dbc0d0a67fc)
|
306 |
+
|
307 |
+
|
308 |
+
### AnimateDiff SDXL
|
309 |
+
You should be able to read infotext to understand how I generated this sample.
|
310 |
+
![00025-1668075705](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/6d32daf9-51c6-490f-a942-db36f84f23cf)
|
311 |
+
|
312 |
+
### ControlNet V2V
|
313 |
+
TODO
|
314 |
+
|
315 |
+
|
316 |
+
## Tutorial
|
317 |
+
TODO
|
318 |
+
|
319 |
+
|
320 |
+
## Thanks
|
321 |
+
I thank researchers from [Shanghai AI Lab](https://www.shlab.org.cn/), especially [@guoyww](https://github.com/guoyww) for creating AnimateDiff. I also thank [@neggles](https://github.com/neggles) and [@s9roll7](https://github.com/s9roll7) for creating and improving [AnimateDiff CLI Prompt Travel](https://github.com/s9roll7/animatediff-cli-prompt-travel). This extension could not be made possible without these creative works.
|
322 |
+
|
323 |
+
I also thank community developers, especially
|
324 |
+
- [@zappityzap](https://github.com/zappityzap) who developed the majority of the [output features](https://github.com/continue-revolution/sd-webui-animatediff/blob/master/scripts/animatediff_output.py)
|
325 |
+
- [@TDS4874](https://github.com/TDS4874) and [@opparco](https://github.com/opparco) for resolving the grey issue which significantly improve the performance
|
326 |
+
- [@talesofai](https://github.com/talesofai) who developed i2v in [this forked repo](https://github.com/talesofai/AnimateDiff)
|
327 |
+
- [@rkfg](https://github.com/rkfg) for developing GIF palette optimization
|
328 |
+
|
329 |
+
and many others who have contributed to this extension.
|
330 |
+
|
331 |
+
I also thank community users, especially [@streamline](https://twitter.com/kaizirod) who provided dataset and workflow of ControlNet V2V. His workflow is extremely amazing and definitely worth checking out.
|
332 |
+
|
333 |
+
|
334 |
+
## Star History
|
335 |
+
<a href="https://star-history.com/#continue-revolution/sd-webui-animatediff&Date">
|
336 |
+
<picture>
|
337 |
+
<source media="(prefers-color-scheme: dark)" srcset="https://api.star-history.com/svg?repos=continue-revolution/sd-webui-animatediff&type=Date&theme=dark" />
|
338 |
+
<source media="(prefers-color-scheme: light)" srcset="https://api.star-history.com/svg?repos=continue-revolution/sd-webui-animatediff&type=Date" />
|
339 |
+
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=continue-revolution/sd-webui-animatediff&type=Date" />
|
340 |
+
</picture>
|
341 |
+
</a>
|
342 |
+
|
343 |
+
|
344 |
+
## Sponsor
|
345 |
+
You can sponsor me via WeChat, AliPay or [PayPal](https://paypal.me/conrevo). You can also support me via [patreon](https://www.patreon.com/conrevo), [ko-fi](https://ko-fi.com/conrevo) or [afdian](https://afdian.net/a/conrevo).
|
346 |
+
|
347 |
+
| WeChat | AliPay | PayPal |
|
348 |
+
| --- | --- | --- |
|
349 |
+
| ![216aff0250c7fd2bb32eeb4f7aae623](https://user-images.githubusercontent.com/63914308/232824466-21051be9-76ce-4862-bb0d-a431c186fce1.jpg) | ![15fe95b4ada738acf3e44c1d45a1805](https://user-images.githubusercontent.com/63914308/232824545-fb108600-729d-4204-8bec-4fd5cc8a14ec.jpg) | ![IMG_1419_](https://github.com/continue-revolution/sd-webui-animatediff/assets/63914308/eaa7b114-a2e6-4ecc-a29f-253ace06d1ea) |
|
extensions/sd-webui-animatediff/__pycache__/motion_module.cpython-310.pyc
ADDED
Binary file (17.6 kB). View file
|
|
extensions/sd-webui-animatediff/model/.gitkeep
ADDED
File without changes
|
extensions/sd-webui-animatediff/motion_module.py
ADDED
@@ -0,0 +1,657 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from enum import Enum
|
2 |
+
from typing import Optional
|
3 |
+
|
4 |
+
import torch
|
5 |
+
import torch.nn.functional as F
|
6 |
+
from torch import nn
|
7 |
+
|
8 |
+
from modules import sd_hijack, shared
|
9 |
+
from ldm.modules.attention import FeedForward
|
10 |
+
|
11 |
+
from einops import rearrange, repeat
|
12 |
+
import math
|
13 |
+
|
14 |
+
|
15 |
+
class MotionModuleType(Enum):
|
16 |
+
AnimateDiffV1 = "AnimateDiff V1, Yuwei GUo, Shanghai AI Lab"
|
17 |
+
AnimateDiffV2 = "AnimateDiff V2, Yuwei Guo, Shanghai AI Lab"
|
18 |
+
AnimateDiffV3 = "AnimateDiff V3, Yuwei Guo, Shanghai AI Lab"
|
19 |
+
AnimateDiffXL = "AnimateDiff SDXL, Yuwei Guo, Shanghai AI Lab"
|
20 |
+
HotShotXL = "HotShot-XL, John Mullan, Natural Synthetics Inc"
|
21 |
+
|
22 |
+
|
23 |
+
@staticmethod
|
24 |
+
def get_mm_type(state_dict: dict[str, torch.Tensor]):
|
25 |
+
keys = list(state_dict.keys())
|
26 |
+
if any(["mid_block" in k for k in keys]):
|
27 |
+
return MotionModuleType.AnimateDiffV2
|
28 |
+
elif any(["temporal_attentions" in k for k in keys]):
|
29 |
+
return MotionModuleType.HotShotXL
|
30 |
+
elif any(["down_blocks.3" in k for k in keys]):
|
31 |
+
if 32 in next((state_dict[key] for key in state_dict if 'pe' in key), None).shape:
|
32 |
+
return MotionModuleType.AnimateDiffV3
|
33 |
+
else:
|
34 |
+
return MotionModuleType.AnimateDiffV1
|
35 |
+
else:
|
36 |
+
return MotionModuleType.AnimateDiffXL
|
37 |
+
|
38 |
+
|
39 |
+
def zero_module(module):
|
40 |
+
# Zero out the parameters of a module and return it.
|
41 |
+
for p in module.parameters():
|
42 |
+
p.detach().zero_()
|
43 |
+
return module
|
44 |
+
|
45 |
+
|
46 |
+
class MotionWrapper(nn.Module):
|
47 |
+
def __init__(self, mm_name: str, mm_hash: str, mm_type: MotionModuleType):
|
48 |
+
super().__init__()
|
49 |
+
self.is_v2 = mm_type == MotionModuleType.AnimateDiffV2
|
50 |
+
self.is_v3 = mm_type == MotionModuleType.AnimateDiffV3
|
51 |
+
self.is_hotshot = mm_type == MotionModuleType.HotShotXL
|
52 |
+
self.is_adxl = mm_type == MotionModuleType.AnimateDiffXL
|
53 |
+
self.is_xl = self.is_hotshot or self.is_adxl
|
54 |
+
max_len = 32 if (self.is_v2 or self.is_adxl or self.is_v3) else 24
|
55 |
+
in_channels = (320, 640, 1280) if (self.is_xl) else (320, 640, 1280, 1280)
|
56 |
+
self.down_blocks = nn.ModuleList([])
|
57 |
+
self.up_blocks = nn.ModuleList([])
|
58 |
+
for c in in_channels:
|
59 |
+
self.down_blocks.append(MotionModule(c, num_mm=2, max_len=max_len, is_hotshot=self.is_hotshot))
|
60 |
+
self.up_blocks.insert(0,MotionModule(c, num_mm=3, max_len=max_len, is_hotshot=self.is_hotshot))
|
61 |
+
if self.is_v2:
|
62 |
+
self.mid_block = MotionModule(1280, num_mm=1, max_len=max_len)
|
63 |
+
self.mm_name = mm_name
|
64 |
+
self.mm_type = mm_type
|
65 |
+
self.mm_hash = mm_hash
|
66 |
+
|
67 |
+
|
68 |
+
def enable_gn_hack(self):
|
69 |
+
return not (self.is_adxl or self.is_v3)
|
70 |
+
|
71 |
+
|
72 |
+
class MotionModule(nn.Module):
|
73 |
+
def __init__(self, in_channels, num_mm, max_len, is_hotshot=False):
|
74 |
+
super().__init__()
|
75 |
+
motion_modules = nn.ModuleList([get_motion_module(in_channels, max_len, is_hotshot) for _ in range(num_mm)])
|
76 |
+
if is_hotshot:
|
77 |
+
self.temporal_attentions = motion_modules
|
78 |
+
else:
|
79 |
+
self.motion_modules = motion_modules
|
80 |
+
|
81 |
+
|
82 |
+
|
83 |
+
def get_motion_module(in_channels, max_len, is_hotshot):
|
84 |
+
vtm = VanillaTemporalModule(in_channels=in_channels, temporal_position_encoding_max_len=max_len, is_hotshot=is_hotshot)
|
85 |
+
return vtm.temporal_transformer if is_hotshot else vtm
|
86 |
+
|
87 |
+
|
88 |
+
class VanillaTemporalModule(nn.Module):
|
89 |
+
def __init__(
|
90 |
+
self,
|
91 |
+
in_channels,
|
92 |
+
num_attention_heads = 8,
|
93 |
+
num_transformer_block = 1,
|
94 |
+
attention_block_types =( "Temporal_Self", "Temporal_Self" ),
|
95 |
+
cross_frame_attention_mode = None,
|
96 |
+
temporal_position_encoding = True,
|
97 |
+
temporal_position_encoding_max_len = 24,
|
98 |
+
temporal_attention_dim_div = 1,
|
99 |
+
zero_initialize = True,
|
100 |
+
is_hotshot = False,
|
101 |
+
):
|
102 |
+
super().__init__()
|
103 |
+
|
104 |
+
self.temporal_transformer = TemporalTransformer3DModel(
|
105 |
+
in_channels=in_channels,
|
106 |
+
num_attention_heads=num_attention_heads,
|
107 |
+
attention_head_dim=in_channels // num_attention_heads // temporal_attention_dim_div,
|
108 |
+
num_layers=num_transformer_block,
|
109 |
+
attention_block_types=attention_block_types,
|
110 |
+
cross_frame_attention_mode=cross_frame_attention_mode,
|
111 |
+
temporal_position_encoding=temporal_position_encoding,
|
112 |
+
temporal_position_encoding_max_len=temporal_position_encoding_max_len,
|
113 |
+
is_hotshot=is_hotshot,
|
114 |
+
)
|
115 |
+
|
116 |
+
if zero_initialize:
|
117 |
+
self.temporal_transformer.proj_out = zero_module(self.temporal_transformer.proj_out)
|
118 |
+
|
119 |
+
|
120 |
+
def forward(self, input_tensor, encoder_hidden_states=None, attention_mask=None): # TODO: encoder_hidden_states do seem to be always None
|
121 |
+
return self.temporal_transformer(input_tensor, encoder_hidden_states, attention_mask)
|
122 |
+
|
123 |
+
|
124 |
+
class TemporalTransformer3DModel(nn.Module):
|
125 |
+
def __init__(
|
126 |
+
self,
|
127 |
+
in_channels,
|
128 |
+
num_attention_heads,
|
129 |
+
attention_head_dim,
|
130 |
+
|
131 |
+
num_layers,
|
132 |
+
attention_block_types = ( "Temporal_Self", "Temporal_Self", ),
|
133 |
+
dropout = 0.0,
|
134 |
+
norm_num_groups = 32,
|
135 |
+
cross_attention_dim = 768,
|
136 |
+
activation_fn = "geglu",
|
137 |
+
attention_bias = False,
|
138 |
+
upcast_attention = False,
|
139 |
+
|
140 |
+
cross_frame_attention_mode = None,
|
141 |
+
temporal_position_encoding = False,
|
142 |
+
temporal_position_encoding_max_len = 24,
|
143 |
+
is_hotshot = False,
|
144 |
+
):
|
145 |
+
super().__init__()
|
146 |
+
|
147 |
+
inner_dim = num_attention_heads * attention_head_dim
|
148 |
+
|
149 |
+
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
|
150 |
+
self.proj_in = nn.Linear(in_channels, inner_dim)
|
151 |
+
|
152 |
+
self.transformer_blocks = nn.ModuleList(
|
153 |
+
[
|
154 |
+
TemporalTransformerBlock(
|
155 |
+
dim=inner_dim,
|
156 |
+
num_attention_heads=num_attention_heads,
|
157 |
+
attention_head_dim=attention_head_dim,
|
158 |
+
attention_block_types=attention_block_types,
|
159 |
+
dropout=dropout,
|
160 |
+
norm_num_groups=norm_num_groups,
|
161 |
+
cross_attention_dim=cross_attention_dim,
|
162 |
+
activation_fn=activation_fn,
|
163 |
+
attention_bias=attention_bias,
|
164 |
+
upcast_attention=upcast_attention,
|
165 |
+
cross_frame_attention_mode=cross_frame_attention_mode,
|
166 |
+
temporal_position_encoding=temporal_position_encoding,
|
167 |
+
temporal_position_encoding_max_len=temporal_position_encoding_max_len,
|
168 |
+
is_hotshot=is_hotshot,
|
169 |
+
)
|
170 |
+
for d in range(num_layers)
|
171 |
+
]
|
172 |
+
)
|
173 |
+
self.proj_out = nn.Linear(inner_dim, in_channels)
|
174 |
+
|
175 |
+
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
176 |
+
video_length = hidden_states.shape[0] // (2 if shared.opts.batch_cond_uncond else 1)
|
177 |
+
batch, channel, height, weight = hidden_states.shape
|
178 |
+
residual = hidden_states
|
179 |
+
|
180 |
+
hidden_states = self.norm(hidden_states).type(hidden_states.dtype)
|
181 |
+
inner_dim = hidden_states.shape[1]
|
182 |
+
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * weight, inner_dim)
|
183 |
+
hidden_states = self.proj_in(hidden_states)
|
184 |
+
|
185 |
+
# Transformer Blocks
|
186 |
+
for block in self.transformer_blocks:
|
187 |
+
hidden_states = block(hidden_states, encoder_hidden_states=encoder_hidden_states, video_length=video_length)
|
188 |
+
|
189 |
+
# output
|
190 |
+
hidden_states = self.proj_out(hidden_states)
|
191 |
+
hidden_states = hidden_states.reshape(batch, height, weight, inner_dim).permute(0, 3, 1, 2).contiguous()
|
192 |
+
|
193 |
+
output = hidden_states + residual
|
194 |
+
return output
|
195 |
+
|
196 |
+
|
197 |
+
class TemporalTransformerBlock(nn.Module):
|
198 |
+
def __init__(
|
199 |
+
self,
|
200 |
+
dim,
|
201 |
+
num_attention_heads,
|
202 |
+
attention_head_dim,
|
203 |
+
attention_block_types = ( "Temporal_Self", "Temporal_Self", ),
|
204 |
+
dropout = 0.0,
|
205 |
+
norm_num_groups = 32,
|
206 |
+
cross_attention_dim = 768,
|
207 |
+
activation_fn = "geglu",
|
208 |
+
attention_bias = False,
|
209 |
+
upcast_attention = False,
|
210 |
+
cross_frame_attention_mode = None,
|
211 |
+
temporal_position_encoding = False,
|
212 |
+
temporal_position_encoding_max_len = 24,
|
213 |
+
is_hotshot = False,
|
214 |
+
):
|
215 |
+
super().__init__()
|
216 |
+
|
217 |
+
attention_blocks = []
|
218 |
+
norms = []
|
219 |
+
|
220 |
+
for block_name in attention_block_types:
|
221 |
+
attention_blocks.append(
|
222 |
+
VersatileAttention(
|
223 |
+
attention_mode=block_name.split("_")[0],
|
224 |
+
cross_attention_dim=cross_attention_dim if block_name.endswith("_Cross") else None,
|
225 |
+
|
226 |
+
query_dim=dim,
|
227 |
+
heads=num_attention_heads,
|
228 |
+
dim_head=attention_head_dim,
|
229 |
+
dropout=dropout,
|
230 |
+
bias=attention_bias,
|
231 |
+
upcast_attention=upcast_attention,
|
232 |
+
|
233 |
+
cross_frame_attention_mode=cross_frame_attention_mode,
|
234 |
+
temporal_position_encoding=temporal_position_encoding,
|
235 |
+
temporal_position_encoding_max_len=temporal_position_encoding_max_len,
|
236 |
+
is_hotshot=is_hotshot,
|
237 |
+
)
|
238 |
+
)
|
239 |
+
norms.append(nn.LayerNorm(dim))
|
240 |
+
|
241 |
+
self.attention_blocks = nn.ModuleList(attention_blocks)
|
242 |
+
self.norms = nn.ModuleList(norms)
|
243 |
+
|
244 |
+
self.ff = FeedForward(dim, dropout=dropout, glu=(activation_fn=='geglu'))
|
245 |
+
self.ff_norm = nn.LayerNorm(dim)
|
246 |
+
|
247 |
+
|
248 |
+
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
|
249 |
+
for attention_block, norm in zip(self.attention_blocks, self.norms):
|
250 |
+
norm_hidden_states = norm(hidden_states).type(hidden_states.dtype)
|
251 |
+
hidden_states = attention_block(
|
252 |
+
norm_hidden_states,
|
253 |
+
encoder_hidden_states=encoder_hidden_states if attention_block.is_cross_attention else None,
|
254 |
+
video_length=video_length,
|
255 |
+
) + hidden_states
|
256 |
+
|
257 |
+
hidden_states = self.ff(self.ff_norm(hidden_states).type(hidden_states.dtype)) + hidden_states
|
258 |
+
|
259 |
+
output = hidden_states
|
260 |
+
return output
|
261 |
+
|
262 |
+
|
263 |
+
class PositionalEncoding(nn.Module):
|
264 |
+
def __init__(
|
265 |
+
self,
|
266 |
+
d_model,
|
267 |
+
dropout = 0.,
|
268 |
+
max_len = 24,
|
269 |
+
is_hotshot = False,
|
270 |
+
):
|
271 |
+
super().__init__()
|
272 |
+
self.dropout = nn.Dropout(p=dropout)
|
273 |
+
position = torch.arange(max_len).unsqueeze(1)
|
274 |
+
div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))
|
275 |
+
pe = torch.zeros(1, max_len, d_model)
|
276 |
+
pe[0, :, 0::2] = torch.sin(position * div_term)
|
277 |
+
pe[0, :, 1::2] = torch.cos(position * div_term)
|
278 |
+
self.register_buffer('positional_encoding' if is_hotshot else 'pe', pe)
|
279 |
+
self.is_hotshot = is_hotshot
|
280 |
+
|
281 |
+
def forward(self, x):
|
282 |
+
x = x + (self.positional_encoding[:, :x.size(1)] if self.is_hotshot else self.pe[:, :x.size(1)])
|
283 |
+
return self.dropout(x)
|
284 |
+
|
285 |
+
|
286 |
+
class CrossAttention(nn.Module):
|
287 |
+
r"""
|
288 |
+
A cross attention layer.
|
289 |
+
|
290 |
+
Parameters:
|
291 |
+
query_dim (`int`): The number of channels in the query.
|
292 |
+
cross_attention_dim (`int`, *optional*):
|
293 |
+
The number of channels in the encoder_hidden_states. If not given, defaults to `query_dim`.
|
294 |
+
heads (`int`, *optional*, defaults to 8): The number of heads to use for multi-head attention.
|
295 |
+
dim_head (`int`, *optional*, defaults to 64): The number of channels in each head.
|
296 |
+
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
297 |
+
bias (`bool`, *optional*, defaults to False):
|
298 |
+
Set to `True` for the query, key, and value linear layers to contain a bias parameter.
|
299 |
+
"""
|
300 |
+
|
301 |
+
def __init__(
|
302 |
+
self,
|
303 |
+
query_dim: int,
|
304 |
+
cross_attention_dim: Optional[int] = None,
|
305 |
+
heads: int = 8,
|
306 |
+
dim_head: int = 64,
|
307 |
+
dropout: float = 0.0,
|
308 |
+
bias=False,
|
309 |
+
upcast_attention: bool = False,
|
310 |
+
upcast_softmax: bool = False,
|
311 |
+
added_kv_proj_dim: Optional[int] = None,
|
312 |
+
norm_num_groups: Optional[int] = None,
|
313 |
+
):
|
314 |
+
super().__init__()
|
315 |
+
inner_dim = dim_head * heads
|
316 |
+
cross_attention_dim = cross_attention_dim if cross_attention_dim is not None else query_dim
|
317 |
+
self.upcast_attention = upcast_attention
|
318 |
+
self.upcast_softmax = upcast_softmax
|
319 |
+
|
320 |
+
self.scale = dim_head**-0.5
|
321 |
+
|
322 |
+
self.heads = heads
|
323 |
+
# for slice_size > 0 the attention score computation
|
324 |
+
# is split across the batch axis to save memory
|
325 |
+
# You can set slice_size with `set_attention_slice`
|
326 |
+
self.sliceable_head_dim = heads
|
327 |
+
self._slice_size = None
|
328 |
+
|
329 |
+
self.added_kv_proj_dim = added_kv_proj_dim
|
330 |
+
|
331 |
+
if norm_num_groups is not None:
|
332 |
+
self.group_norm = nn.GroupNorm(num_channels=inner_dim, num_groups=norm_num_groups, eps=1e-5, affine=True)
|
333 |
+
else:
|
334 |
+
self.group_norm = None
|
335 |
+
|
336 |
+
self.to_q = nn.Linear(query_dim, inner_dim, bias=bias)
|
337 |
+
self.to_k = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
|
338 |
+
self.to_v = nn.Linear(cross_attention_dim, inner_dim, bias=bias)
|
339 |
+
|
340 |
+
if self.added_kv_proj_dim is not None:
|
341 |
+
self.add_k_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
|
342 |
+
self.add_v_proj = nn.Linear(added_kv_proj_dim, cross_attention_dim)
|
343 |
+
|
344 |
+
self.to_out = nn.ModuleList([])
|
345 |
+
self.to_out.append(nn.Linear(inner_dim, query_dim))
|
346 |
+
self.to_out.append(nn.Dropout(dropout))
|
347 |
+
|
348 |
+
def reshape_heads_to_batch_dim(self, tensor):
|
349 |
+
batch_size, seq_len, dim = tensor.shape
|
350 |
+
head_size = self.heads
|
351 |
+
tensor = tensor.reshape(batch_size, seq_len, head_size, dim // head_size)
|
352 |
+
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size * head_size, seq_len, dim // head_size)
|
353 |
+
return tensor
|
354 |
+
|
355 |
+
def reshape_batch_dim_to_heads(self, tensor):
|
356 |
+
batch_size, seq_len, dim = tensor.shape
|
357 |
+
head_size = self.heads
|
358 |
+
tensor = tensor.reshape(batch_size // head_size, head_size, seq_len, dim)
|
359 |
+
tensor = tensor.permute(0, 2, 1, 3).reshape(batch_size // head_size, seq_len, dim * head_size)
|
360 |
+
return tensor
|
361 |
+
|
362 |
+
def set_attention_slice(self, slice_size):
|
363 |
+
if slice_size is not None and slice_size > self.sliceable_head_dim:
|
364 |
+
raise ValueError(f"slice_size {slice_size} has to be smaller or equal to {self.sliceable_head_dim}.")
|
365 |
+
|
366 |
+
self._slice_size = slice_size
|
367 |
+
|
368 |
+
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None):
|
369 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
370 |
+
|
371 |
+
encoder_hidden_states = encoder_hidden_states
|
372 |
+
|
373 |
+
if self.group_norm is not None:
|
374 |
+
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2).type(hidden_states.dtype)
|
375 |
+
|
376 |
+
query = self.to_q(hidden_states)
|
377 |
+
dim = query.shape[-1]
|
378 |
+
query = self.reshape_heads_to_batch_dim(query)
|
379 |
+
|
380 |
+
if self.added_kv_proj_dim is not None:
|
381 |
+
key = self.to_k(hidden_states)
|
382 |
+
value = self.to_v(hidden_states)
|
383 |
+
encoder_hidden_states_key_proj = self.add_k_proj(encoder_hidden_states)
|
384 |
+
encoder_hidden_states_value_proj = self.add_v_proj(encoder_hidden_states)
|
385 |
+
|
386 |
+
key = self.reshape_heads_to_batch_dim(key)
|
387 |
+
value = self.reshape_heads_to_batch_dim(value)
|
388 |
+
encoder_hidden_states_key_proj = self.reshape_heads_to_batch_dim(encoder_hidden_states_key_proj)
|
389 |
+
encoder_hidden_states_value_proj = self.reshape_heads_to_batch_dim(encoder_hidden_states_value_proj)
|
390 |
+
|
391 |
+
key = torch.concat([encoder_hidden_states_key_proj, key], dim=1)
|
392 |
+
value = torch.concat([encoder_hidden_states_value_proj, value], dim=1)
|
393 |
+
else:
|
394 |
+
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
|
395 |
+
key = self.to_k(encoder_hidden_states)
|
396 |
+
value = self.to_v(encoder_hidden_states)
|
397 |
+
|
398 |
+
key = self.reshape_heads_to_batch_dim(key)
|
399 |
+
value = self.reshape_heads_to_batch_dim(value)
|
400 |
+
|
401 |
+
if attention_mask is not None:
|
402 |
+
if attention_mask.shape[-1] != query.shape[1]:
|
403 |
+
target_length = query.shape[1]
|
404 |
+
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
|
405 |
+
attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
|
406 |
+
|
407 |
+
# attention, what we cannot get enough of
|
408 |
+
if sd_hijack.current_optimizer is not None and sd_hijack.current_optimizer.name in ["xformers", "sdp", "sdp-no-mem", "sub-quadratic"]:
|
409 |
+
hidden_states = self._memory_efficient_attention(query, key, value, attention_mask, sd_hijack.current_optimizer.name)
|
410 |
+
# Some versions of xformers return output in fp32, cast it back to the dtype of the input
|
411 |
+
hidden_states = hidden_states.to(query.dtype)
|
412 |
+
else:
|
413 |
+
if self._slice_size is None or query.shape[0] // self._slice_size == 1:
|
414 |
+
hidden_states = self._attention(query, key, value, attention_mask)
|
415 |
+
else:
|
416 |
+
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
|
417 |
+
|
418 |
+
# linear proj
|
419 |
+
hidden_states = self.to_out[0](hidden_states)
|
420 |
+
|
421 |
+
# dropout
|
422 |
+
hidden_states = self.to_out[1](hidden_states)
|
423 |
+
return hidden_states
|
424 |
+
|
425 |
+
def _attention(self, query, key, value, attention_mask=None):
|
426 |
+
if self.upcast_attention:
|
427 |
+
query = query.float()
|
428 |
+
key = key.float()
|
429 |
+
|
430 |
+
attention_scores = torch.baddbmm(
|
431 |
+
torch.empty(query.shape[0], query.shape[1], key.shape[1], dtype=query.dtype, device=query.device),
|
432 |
+
query,
|
433 |
+
key.transpose(-1, -2),
|
434 |
+
beta=0,
|
435 |
+
alpha=self.scale,
|
436 |
+
)
|
437 |
+
|
438 |
+
if attention_mask is not None:
|
439 |
+
attention_scores = attention_scores + attention_mask
|
440 |
+
|
441 |
+
if self.upcast_softmax:
|
442 |
+
attention_scores = attention_scores.float()
|
443 |
+
|
444 |
+
attention_probs = attention_scores.softmax(dim=-1)
|
445 |
+
|
446 |
+
# cast back to the original dtype
|
447 |
+
attention_probs = attention_probs.to(value.dtype)
|
448 |
+
|
449 |
+
# compute attention output
|
450 |
+
hidden_states = torch.bmm(attention_probs, value)
|
451 |
+
|
452 |
+
# reshape hidden_states
|
453 |
+
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
454 |
+
return hidden_states
|
455 |
+
|
456 |
+
def _sliced_attention(self, query, key, value, sequence_length, dim, attention_mask):
|
457 |
+
batch_size_attention = query.shape[0]
|
458 |
+
hidden_states = torch.zeros(
|
459 |
+
(batch_size_attention, sequence_length, dim // self.heads), device=query.device, dtype=query.dtype
|
460 |
+
)
|
461 |
+
slice_size = self._slice_size if self._slice_size is not None else hidden_states.shape[0]
|
462 |
+
for i in range(hidden_states.shape[0] // slice_size):
|
463 |
+
start_idx = i * slice_size
|
464 |
+
end_idx = (i + 1) * slice_size
|
465 |
+
|
466 |
+
query_slice = query[start_idx:end_idx]
|
467 |
+
key_slice = key[start_idx:end_idx]
|
468 |
+
|
469 |
+
if self.upcast_attention:
|
470 |
+
query_slice = query_slice.float()
|
471 |
+
key_slice = key_slice.float()
|
472 |
+
|
473 |
+
attn_slice = torch.baddbmm(
|
474 |
+
torch.empty(slice_size, query.shape[1], key.shape[1], dtype=query_slice.dtype, device=query.device),
|
475 |
+
query_slice,
|
476 |
+
key_slice.transpose(-1, -2),
|
477 |
+
beta=0,
|
478 |
+
alpha=self.scale,
|
479 |
+
)
|
480 |
+
|
481 |
+
if attention_mask is not None:
|
482 |
+
attn_slice = attn_slice + attention_mask[start_idx:end_idx]
|
483 |
+
|
484 |
+
if self.upcast_softmax:
|
485 |
+
attn_slice = attn_slice.float()
|
486 |
+
|
487 |
+
attn_slice = attn_slice.softmax(dim=-1)
|
488 |
+
|
489 |
+
# cast back to the original dtype
|
490 |
+
attn_slice = attn_slice.to(value.dtype)
|
491 |
+
attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
|
492 |
+
|
493 |
+
hidden_states[start_idx:end_idx] = attn_slice
|
494 |
+
|
495 |
+
# reshape hidden_states
|
496 |
+
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
497 |
+
return hidden_states
|
498 |
+
|
499 |
+
def _memory_efficient_attention(self, q, k, v, mask, current_optimizer_name):
|
500 |
+
# TODO attention_mask
|
501 |
+
q = q.contiguous()
|
502 |
+
k = k.contiguous()
|
503 |
+
v = v.contiguous()
|
504 |
+
|
505 |
+
fallthrough = False
|
506 |
+
|
507 |
+
if current_optimizer_name == "xformers" or fallthrough:
|
508 |
+
fallthrough = False
|
509 |
+
try:
|
510 |
+
import xformers.ops
|
511 |
+
from modules.sd_hijack_optimizations import get_xformers_flash_attention_op
|
512 |
+
hidden_states = xformers.ops.memory_efficient_attention(
|
513 |
+
q, k, v, attn_bias=mask,
|
514 |
+
op=get_xformers_flash_attention_op(q, k, v))
|
515 |
+
except (ImportError, RuntimeError, AttributeError):
|
516 |
+
fallthrough = True
|
517 |
+
|
518 |
+
if current_optimizer_name == "sdp" or fallthrough:
|
519 |
+
fallthrough = False
|
520 |
+
try:
|
521 |
+
hidden_states = torch.nn.functional.scaled_dot_product_attention(
|
522 |
+
q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False
|
523 |
+
)
|
524 |
+
except (ImportError, RuntimeError, AttributeError):
|
525 |
+
fallthrough = True
|
526 |
+
|
527 |
+
if current_optimizer_name == "sdp-no-mem" or fallthrough:
|
528 |
+
fallthrough = False
|
529 |
+
try:
|
530 |
+
with torch.backends.cuda.sdp_kernel(enable_flash=True, enable_math=True, enable_mem_efficient=False):
|
531 |
+
hidden_states = torch.nn.functional.scaled_dot_product_attention(
|
532 |
+
q, k, v, attn_mask=mask, dropout_p=0.0, is_causal=False
|
533 |
+
)
|
534 |
+
except (ImportError, RuntimeError, AttributeError):
|
535 |
+
fallthrough = True
|
536 |
+
|
537 |
+
if current_optimizer_name == "sub-quadratic" or fallthrough:
|
538 |
+
fallthrough = False
|
539 |
+
try:
|
540 |
+
from modules.sd_hijack_optimizations import sub_quad_attention
|
541 |
+
from modules import shared
|
542 |
+
hidden_states = sub_quad_attention(
|
543 |
+
q, k, v,
|
544 |
+
q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size,
|
545 |
+
kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size,
|
546 |
+
chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold,
|
547 |
+
use_checkpoint=self.training
|
548 |
+
)
|
549 |
+
except (ImportError, RuntimeError, AttributeError):
|
550 |
+
fallthrough = True
|
551 |
+
|
552 |
+
if fallthrough:
|
553 |
+
fallthrough = False
|
554 |
+
if self._slice_size is None or query.shape[0] // self._slice_size == 1:
|
555 |
+
hidden_states = self._attention(query, key, value, attention_mask)
|
556 |
+
else:
|
557 |
+
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
|
558 |
+
return hidden_states
|
559 |
+
|
560 |
+
hidden_states = self.reshape_batch_dim_to_heads(hidden_states)
|
561 |
+
return hidden_states
|
562 |
+
|
563 |
+
|
564 |
+
class VersatileAttention(CrossAttention):
|
565 |
+
def __init__(
|
566 |
+
self,
|
567 |
+
attention_mode = None,
|
568 |
+
cross_frame_attention_mode = None,
|
569 |
+
temporal_position_encoding = False,
|
570 |
+
temporal_position_encoding_max_len = 24,
|
571 |
+
is_hotshot = False,
|
572 |
+
*args, **kwargs
|
573 |
+
):
|
574 |
+
super().__init__(*args, **kwargs)
|
575 |
+
assert attention_mode == "Temporal"
|
576 |
+
|
577 |
+
self.attention_mode = attention_mode
|
578 |
+
self.is_cross_attention = kwargs["cross_attention_dim"] is not None
|
579 |
+
|
580 |
+
self.pos_encoder = PositionalEncoding(
|
581 |
+
kwargs["query_dim"],
|
582 |
+
dropout=0.,
|
583 |
+
max_len=temporal_position_encoding_max_len,
|
584 |
+
is_hotshot=is_hotshot,
|
585 |
+
) if (temporal_position_encoding and attention_mode == "Temporal") else None
|
586 |
+
|
587 |
+
def extra_repr(self):
|
588 |
+
return f"(Module Info) Attention_Mode: {self.attention_mode}, Is_Cross_Attention: {self.is_cross_attention}"
|
589 |
+
|
590 |
+
def forward(self, hidden_states, encoder_hidden_states=None, attention_mask=None, video_length=None):
|
591 |
+
batch_size, sequence_length, _ = hidden_states.shape
|
592 |
+
|
593 |
+
if self.attention_mode == "Temporal":
|
594 |
+
d = hidden_states.shape[1]
|
595 |
+
hidden_states = rearrange(hidden_states, "(b f) d c -> (b d) f c", f=video_length)
|
596 |
+
|
597 |
+
if self.pos_encoder is not None:
|
598 |
+
hidden_states = self.pos_encoder(hidden_states)
|
599 |
+
|
600 |
+
encoder_hidden_states = repeat(encoder_hidden_states, "b n c -> (b d) n c", d=d) if encoder_hidden_states is not None else encoder_hidden_states
|
601 |
+
else:
|
602 |
+
raise NotImplementedError
|
603 |
+
|
604 |
+
encoder_hidden_states = encoder_hidden_states
|
605 |
+
|
606 |
+
if self.group_norm is not None:
|
607 |
+
hidden_states = self.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2).dtype(hidden_states.dtype)
|
608 |
+
|
609 |
+
query = self.to_q(hidden_states)
|
610 |
+
dim = query.shape[-1]
|
611 |
+
query = self.reshape_heads_to_batch_dim(query)
|
612 |
+
|
613 |
+
if self.added_kv_proj_dim is not None:
|
614 |
+
raise NotImplementedError
|
615 |
+
|
616 |
+
encoder_hidden_states = encoder_hidden_states if encoder_hidden_states is not None else hidden_states
|
617 |
+
key = self.to_k(encoder_hidden_states)
|
618 |
+
value = self.to_v(encoder_hidden_states)
|
619 |
+
|
620 |
+
key = self.reshape_heads_to_batch_dim(key)
|
621 |
+
value = self.reshape_heads_to_batch_dim(value)
|
622 |
+
|
623 |
+
if attention_mask is not None:
|
624 |
+
if attention_mask.shape[-1] != query.shape[1]:
|
625 |
+
target_length = query.shape[1]
|
626 |
+
attention_mask = F.pad(attention_mask, (0, target_length), value=0.0)
|
627 |
+
attention_mask = attention_mask.repeat_interleave(self.heads, dim=0)
|
628 |
+
|
629 |
+
xformers_option = shared.opts.data.get("animatediff_xformers", "Optimize attention layers with xformers")
|
630 |
+
optimizer_collections = ["xformers", "sdp", "sdp-no-mem", "sub-quadratic"]
|
631 |
+
if xformers_option == "Do not optimize attention layers": # "Do not optimize attention layers"
|
632 |
+
optimizer_collections = optimizer_collections[1:]
|
633 |
+
|
634 |
+
# attention, what we cannot get enough of
|
635 |
+
if sd_hijack.current_optimizer is not None and sd_hijack.current_optimizer.name in optimizer_collections:
|
636 |
+
optimizer_name = sd_hijack.current_optimizer.name
|
637 |
+
if xformers_option == "Optimize attention layers with sdp (torch >= 2.0.0 required)" and optimizer_name == "xformers":
|
638 |
+
optimizer_name = "sdp" # "Optimize attention layers with sdp (torch >= 2.0.0 required)"
|
639 |
+
hidden_states = self._memory_efficient_attention(query, key, value, attention_mask, optimizer_name)
|
640 |
+
# Some versions of xformers return output in fp32, cast it back to the dtype of the input
|
641 |
+
hidden_states = hidden_states.to(query.dtype)
|
642 |
+
else:
|
643 |
+
if self._slice_size is None or query.shape[0] // self._slice_size == 1:
|
644 |
+
hidden_states = self._attention(query, key, value, attention_mask)
|
645 |
+
else:
|
646 |
+
hidden_states = self._sliced_attention(query, key, value, sequence_length, dim, attention_mask)
|
647 |
+
|
648 |
+
# linear proj
|
649 |
+
hidden_states = self.to_out[0](hidden_states)
|
650 |
+
|
651 |
+
# dropout
|
652 |
+
hidden_states = self.to_out[1](hidden_states)
|
653 |
+
|
654 |
+
if self.attention_mode == "Temporal":
|
655 |
+
hidden_states = rearrange(hidden_states, "(b d) f c -> (b f) d c", d=d)
|
656 |
+
|
657 |
+
return hidden_states
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff.cpython-310.pyc
ADDED
Binary file (7.49 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_cn.cpython-310.pyc
ADDED
Binary file (19.2 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_i2ibatch.cpython-310.pyc
ADDED
Binary file (9.34 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_infotext.cpython-310.pyc
ADDED
Binary file (1.53 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_infv2v.cpython-310.pyc
ADDED
Binary file (12.4 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_latent.cpython-310.pyc
ADDED
Binary file (2.66 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_lcm.cpython-310.pyc
ADDED
Binary file (5.9 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_logger.cpython-310.pyc
ADDED
Binary file (1.29 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_lora.cpython-310.pyc
ADDED
Binary file (2.77 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_mm.cpython-310.pyc
ADDED
Binary file (6.79 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_output.cpython-310.pyc
ADDED
Binary file (11.2 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_prompt.cpython-310.pyc
ADDED
Binary file (5.22 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/__pycache__/animatediff_ui.cpython-310.pyc
ADDED
Binary file (9.55 kB). View file
|
|
extensions/sd-webui-animatediff/scripts/animatediff.py
ADDED
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from modules import script_callbacks, scripts, shared
|
3 |
+
from modules.processing import (Processed, StableDiffusionProcessing,
|
4 |
+
StableDiffusionProcessingImg2Img)
|
5 |
+
from modules.scripts import PostprocessBatchListArgs, PostprocessImageArgs
|
6 |
+
|
7 |
+
from scripts.animatediff_cn import AnimateDiffControl
|
8 |
+
from scripts.animatediff_infv2v import AnimateDiffInfV2V
|
9 |
+
from scripts.animatediff_latent import AnimateDiffI2VLatent
|
10 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
11 |
+
from scripts.animatediff_lora import AnimateDiffLora
|
12 |
+
from scripts.animatediff_mm import mm_animatediff as motion_module
|
13 |
+
from scripts.animatediff_prompt import AnimateDiffPromptSchedule
|
14 |
+
from scripts.animatediff_output import AnimateDiffOutput
|
15 |
+
from scripts.animatediff_ui import AnimateDiffProcess, AnimateDiffUiGroup
|
16 |
+
from scripts.animatediff_infotext import update_infotext
|
17 |
+
|
18 |
+
script_dir = scripts.basedir()
|
19 |
+
motion_module.set_script_dir(script_dir)
|
20 |
+
|
21 |
+
|
22 |
+
class AnimateDiffScript(scripts.Script):
|
23 |
+
|
24 |
+
def __init__(self):
|
25 |
+
self.lora_hacker = None
|
26 |
+
self.cfg_hacker = None
|
27 |
+
self.cn_hacker = None
|
28 |
+
self.prompt_scheduler = None
|
29 |
+
self.hacked = False
|
30 |
+
|
31 |
+
|
32 |
+
def title(self):
|
33 |
+
return "AnimateDiff"
|
34 |
+
|
35 |
+
|
36 |
+
def show(self, is_img2img):
|
37 |
+
return scripts.AlwaysVisible
|
38 |
+
|
39 |
+
|
40 |
+
def ui(self, is_img2img):
|
41 |
+
return (AnimateDiffUiGroup().render(is_img2img, motion_module.get_model_dir()),)
|
42 |
+
|
43 |
+
|
44 |
+
def before_process(self, p: StableDiffusionProcessing, params: AnimateDiffProcess):
|
45 |
+
if p.is_api and isinstance(params, dict):
|
46 |
+
self.ad_params = AnimateDiffProcess(**params)
|
47 |
+
params = self.ad_params
|
48 |
+
if params.enable:
|
49 |
+
logger.info("AnimateDiff process start.")
|
50 |
+
params.set_p(p)
|
51 |
+
motion_module.inject(p.sd_model, params.model)
|
52 |
+
self.prompt_scheduler = AnimateDiffPromptSchedule()
|
53 |
+
self.lora_hacker = AnimateDiffLora(motion_module.mm.is_v2)
|
54 |
+
self.lora_hacker.hack()
|
55 |
+
self.cfg_hacker = AnimateDiffInfV2V(p, self.prompt_scheduler)
|
56 |
+
self.cfg_hacker.hack(params)
|
57 |
+
self.cn_hacker = AnimateDiffControl(p, self.prompt_scheduler)
|
58 |
+
self.cn_hacker.hack(params)
|
59 |
+
update_infotext(p, params)
|
60 |
+
self.hacked = True
|
61 |
+
elif self.hacked:
|
62 |
+
self.cn_hacker.restore()
|
63 |
+
self.cfg_hacker.restore()
|
64 |
+
self.lora_hacker.restore()
|
65 |
+
motion_module.restore(p.sd_model)
|
66 |
+
self.hacked = False
|
67 |
+
|
68 |
+
|
69 |
+
def before_process_batch(self, p: StableDiffusionProcessing, params: AnimateDiffProcess, **kwargs):
|
70 |
+
if p.is_api and isinstance(params, dict): params = self.ad_params
|
71 |
+
if params.enable and isinstance(p, StableDiffusionProcessingImg2Img) and not hasattr(p, '_animatediff_i2i_batch'):
|
72 |
+
AnimateDiffI2VLatent().randomize(p, params)
|
73 |
+
|
74 |
+
|
75 |
+
def postprocess_batch_list(self, p: StableDiffusionProcessing, pp: PostprocessBatchListArgs, params: AnimateDiffProcess, **kwargs):
|
76 |
+
if p.is_api and isinstance(params, dict): params = self.ad_params
|
77 |
+
if params.enable:
|
78 |
+
self.prompt_scheduler.save_infotext_img(p)
|
79 |
+
|
80 |
+
|
81 |
+
def postprocess_image(self, p: StableDiffusionProcessing, pp: PostprocessImageArgs, params: AnimateDiffProcess, *args):
|
82 |
+
if p.is_api and isinstance(params, dict): params = self.ad_params
|
83 |
+
if params.enable and isinstance(p, StableDiffusionProcessingImg2Img) and hasattr(p, '_animatediff_paste_to_full'):
|
84 |
+
p.paste_to = p._animatediff_paste_to_full[p.batch_index]
|
85 |
+
|
86 |
+
|
87 |
+
def postprocess(self, p: StableDiffusionProcessing, res: Processed, params: AnimateDiffProcess):
|
88 |
+
if p.is_api and isinstance(params, dict): params = self.ad_params
|
89 |
+
if params.enable:
|
90 |
+
self.prompt_scheduler.save_infotext_txt(res)
|
91 |
+
self.cn_hacker.restore()
|
92 |
+
self.cfg_hacker.restore()
|
93 |
+
self.lora_hacker.restore()
|
94 |
+
motion_module.restore(p.sd_model)
|
95 |
+
self.hacked = False
|
96 |
+
AnimateDiffOutput().output(p, res, params)
|
97 |
+
logger.info("AnimateDiff process end.")
|
98 |
+
|
99 |
+
|
100 |
+
def on_ui_settings():
|
101 |
+
section = ("animatediff", "AnimateDiff")
|
102 |
+
s3_selection =("animatediff", "AnimateDiff AWS")
|
103 |
+
shared.opts.add_option(
|
104 |
+
"animatediff_model_path",
|
105 |
+
shared.OptionInfo(
|
106 |
+
None,
|
107 |
+
"Path to save AnimateDiff motion modules",
|
108 |
+
gr.Textbox,
|
109 |
+
section=section,
|
110 |
+
),
|
111 |
+
)
|
112 |
+
shared.opts.add_option(
|
113 |
+
"animatediff_optimize_gif_palette",
|
114 |
+
shared.OptionInfo(
|
115 |
+
False,
|
116 |
+
"Calculate the optimal GIF palette, improves quality significantly, removes banding",
|
117 |
+
gr.Checkbox,
|
118 |
+
section=section
|
119 |
+
)
|
120 |
+
)
|
121 |
+
shared.opts.add_option(
|
122 |
+
"animatediff_optimize_gif_gifsicle",
|
123 |
+
shared.OptionInfo(
|
124 |
+
False,
|
125 |
+
"Optimize GIFs with gifsicle, reduces file size",
|
126 |
+
gr.Checkbox,
|
127 |
+
section=section
|
128 |
+
)
|
129 |
+
)
|
130 |
+
shared.opts.add_option(
|
131 |
+
key="animatediff_mp4_crf",
|
132 |
+
info=shared.OptionInfo(
|
133 |
+
default=23,
|
134 |
+
label="MP4 Quality (CRF)",
|
135 |
+
component=gr.Slider,
|
136 |
+
component_args={
|
137 |
+
"minimum": 0,
|
138 |
+
"maximum": 51,
|
139 |
+
"step": 1},
|
140 |
+
section=section
|
141 |
+
)
|
142 |
+
.link("docs", "https://trac.ffmpeg.org/wiki/Encode/H.264#crf")
|
143 |
+
.info("17 for best quality, up to 28 for smaller size")
|
144 |
+
)
|
145 |
+
shared.opts.add_option(
|
146 |
+
key="animatediff_mp4_preset",
|
147 |
+
info=shared.OptionInfo(
|
148 |
+
default="",
|
149 |
+
label="MP4 Encoding Preset",
|
150 |
+
component=gr.Dropdown,
|
151 |
+
component_args={"choices": ["", 'veryslow', 'slower', 'slow', 'medium', 'fast', 'faster', 'veryfast', 'superfast', 'ultrafast']},
|
152 |
+
section=section,
|
153 |
+
)
|
154 |
+
.link("docs", "https://trac.ffmpeg.org/wiki/Encode/H.264#Preset")
|
155 |
+
.info("encoding speed, use the slowest you can tolerate")
|
156 |
+
)
|
157 |
+
shared.opts.add_option(
|
158 |
+
key="animatediff_mp4_tune",
|
159 |
+
info=shared.OptionInfo(
|
160 |
+
default="",
|
161 |
+
label="MP4 Tune encoding for content type",
|
162 |
+
component=gr.Dropdown,
|
163 |
+
component_args={"choices": ["", "film", "animation", "grain"]},
|
164 |
+
section=section
|
165 |
+
)
|
166 |
+
.link("docs", "https://trac.ffmpeg.org/wiki/Encode/H.264#Tune")
|
167 |
+
.info("optimize for specific content types")
|
168 |
+
)
|
169 |
+
shared.opts.add_option(
|
170 |
+
"animatediff_webp_quality",
|
171 |
+
shared.OptionInfo(
|
172 |
+
80,
|
173 |
+
"WebP Quality (if lossless=True, increases compression and CPU usage)",
|
174 |
+
gr.Slider,
|
175 |
+
{
|
176 |
+
"minimum": 1,
|
177 |
+
"maximum": 100,
|
178 |
+
"step": 1},
|
179 |
+
section=section
|
180 |
+
)
|
181 |
+
)
|
182 |
+
shared.opts.add_option(
|
183 |
+
"animatediff_webp_lossless",
|
184 |
+
shared.OptionInfo(
|
185 |
+
False,
|
186 |
+
"Save WebP in lossless format (highest quality, largest file size)",
|
187 |
+
gr.Checkbox,
|
188 |
+
section=section
|
189 |
+
)
|
190 |
+
)
|
191 |
+
shared.opts.add_option(
|
192 |
+
"animatediff_save_to_custom",
|
193 |
+
shared.OptionInfo(
|
194 |
+
False,
|
195 |
+
"Save frames to stable-diffusion-webui/outputs/{ txt|img }2img-images/AnimateDiff/{gif filename}/{date} "
|
196 |
+
"instead of stable-diffusion-webui/outputs/{ txt|img }2img-images/{date}/.",
|
197 |
+
gr.Checkbox,
|
198 |
+
section=section
|
199 |
+
)
|
200 |
+
)
|
201 |
+
shared.opts.add_option(
|
202 |
+
"animatediff_xformers",
|
203 |
+
shared.OptionInfo(
|
204 |
+
"Optimize attention layers with xformers",
|
205 |
+
"When you have --xformers in your command line args, you want AnimateDiff to ",
|
206 |
+
gr.Radio,
|
207 |
+
{"choices": ["Optimize attention layers with xformers",
|
208 |
+
"Optimize attention layers with sdp (torch >= 2.0.0 required)",
|
209 |
+
"Do not optimize attention layers"]},
|
210 |
+
section=section
|
211 |
+
)
|
212 |
+
)
|
213 |
+
shared.opts.add_option(
|
214 |
+
"animatediff_disable_lcm",
|
215 |
+
shared.OptionInfo(
|
216 |
+
False,
|
217 |
+
"Disable LCM",
|
218 |
+
gr.Checkbox,
|
219 |
+
section=section
|
220 |
+
)
|
221 |
+
)
|
222 |
+
shared.opts.add_option(
|
223 |
+
"animatediff_s3_enable",
|
224 |
+
shared.OptionInfo(
|
225 |
+
False,
|
226 |
+
"Enable to Store file in object storage that supports the s3 protocol",
|
227 |
+
gr.Checkbox,
|
228 |
+
section=s3_selection
|
229 |
+
)
|
230 |
+
)
|
231 |
+
shared.opts.add_option(
|
232 |
+
"animatediff_s3_host",
|
233 |
+
shared.OptionInfo(
|
234 |
+
None,
|
235 |
+
"S3 protocol host",
|
236 |
+
gr.Textbox,
|
237 |
+
section=s3_selection,
|
238 |
+
),
|
239 |
+
)
|
240 |
+
shared.opts.add_option(
|
241 |
+
"animatediff_s3_port",
|
242 |
+
shared.OptionInfo(
|
243 |
+
None,
|
244 |
+
"S3 protocol port",
|
245 |
+
gr.Textbox,
|
246 |
+
section=s3_selection,
|
247 |
+
),
|
248 |
+
)
|
249 |
+
shared.opts.add_option(
|
250 |
+
"animatediff_s3_access_key",
|
251 |
+
shared.OptionInfo(
|
252 |
+
None,
|
253 |
+
"S3 protocol access_key",
|
254 |
+
gr.Textbox,
|
255 |
+
section=s3_selection,
|
256 |
+
),
|
257 |
+
)
|
258 |
+
shared.opts.add_option(
|
259 |
+
"animatediff_s3_secret_key",
|
260 |
+
shared.OptionInfo(
|
261 |
+
None,
|
262 |
+
"S3 protocol secret_key",
|
263 |
+
gr.Textbox,
|
264 |
+
section=s3_selection,
|
265 |
+
),
|
266 |
+
)
|
267 |
+
shared.opts.add_option(
|
268 |
+
"animatediff_s3_storge_bucket",
|
269 |
+
shared.OptionInfo(
|
270 |
+
None,
|
271 |
+
"Bucket for file storage",
|
272 |
+
gr.Textbox,
|
273 |
+
section=s3_selection,
|
274 |
+
),
|
275 |
+
)
|
276 |
+
|
277 |
+
script_callbacks.on_ui_settings(on_ui_settings)
|
278 |
+
script_callbacks.on_after_component(AnimateDiffUiGroup.on_after_component)
|
279 |
+
script_callbacks.on_before_ui(AnimateDiffUiGroup.on_before_ui)
|
extensions/sd-webui-animatediff/scripts/animatediff_cn.py
ADDED
@@ -0,0 +1,641 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from types import MethodType
|
3 |
+
from typing import Optional
|
4 |
+
|
5 |
+
import os
|
6 |
+
import shutil
|
7 |
+
import cv2
|
8 |
+
import numpy as np
|
9 |
+
import torch
|
10 |
+
from tqdm import tqdm
|
11 |
+
from PIL import Image, ImageFilter, ImageOps
|
12 |
+
from modules import processing, shared, masking, images, devices
|
13 |
+
from modules.paths import data_path
|
14 |
+
from modules.processing import (StableDiffusionProcessing,
|
15 |
+
StableDiffusionProcessingImg2Img,
|
16 |
+
StableDiffusionProcessingTxt2Img)
|
17 |
+
|
18 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
19 |
+
from scripts.animatediff_ui import AnimateDiffProcess
|
20 |
+
from scripts.animatediff_prompt import AnimateDiffPromptSchedule
|
21 |
+
from scripts.animatediff_infotext import update_infotext
|
22 |
+
from scripts.animatediff_i2ibatch import animatediff_i2ibatch
|
23 |
+
|
24 |
+
|
25 |
+
class AnimateDiffControl:
|
26 |
+
original_processing_process_images_hijack = None
|
27 |
+
original_controlnet_main_entry = None
|
28 |
+
original_postprocess_batch = None
|
29 |
+
|
30 |
+
def __init__(self, p: StableDiffusionProcessing, prompt_scheduler: AnimateDiffPromptSchedule):
|
31 |
+
try:
|
32 |
+
from scripts.external_code import find_cn_script
|
33 |
+
self.cn_script = find_cn_script(p.scripts)
|
34 |
+
except:
|
35 |
+
self.cn_script = None
|
36 |
+
self.prompt_scheduler = prompt_scheduler
|
37 |
+
|
38 |
+
|
39 |
+
def hack_batchhijack(self, params: AnimateDiffProcess):
|
40 |
+
cn_script = self.cn_script
|
41 |
+
prompt_scheduler = self.prompt_scheduler
|
42 |
+
|
43 |
+
def get_input_frames():
|
44 |
+
if params.video_source is not None and params.video_source != '':
|
45 |
+
cap = cv2.VideoCapture(params.video_source)
|
46 |
+
frame_count = 0
|
47 |
+
tmp_frame_dir = Path(f'{data_path}/tmp/animatediff-frames/')
|
48 |
+
tmp_frame_dir.mkdir(parents=True, exist_ok=True)
|
49 |
+
while cap.isOpened():
|
50 |
+
ret, frame = cap.read()
|
51 |
+
if not ret:
|
52 |
+
break
|
53 |
+
cv2.imwrite(f"{tmp_frame_dir}/{frame_count}.png", frame)
|
54 |
+
frame_count += 1
|
55 |
+
cap.release()
|
56 |
+
return str(tmp_frame_dir)
|
57 |
+
elif params.video_path is not None and params.video_path != '':
|
58 |
+
return params.video_path
|
59 |
+
return ''
|
60 |
+
|
61 |
+
from scripts.batch_hijack import BatchHijack, instance
|
62 |
+
def hacked_processing_process_images_hijack(self, p: StableDiffusionProcessing, *args, **kwargs):
|
63 |
+
from scripts import external_code
|
64 |
+
from scripts.batch_hijack import InputMode
|
65 |
+
|
66 |
+
units = external_code.get_all_units_in_processing(p)
|
67 |
+
units = [unit for unit in units if getattr(unit, 'enabled', False)]
|
68 |
+
|
69 |
+
if len(units) > 0:
|
70 |
+
global_input_frames = get_input_frames()
|
71 |
+
for idx, unit in enumerate(units):
|
72 |
+
# i2i-batch mode
|
73 |
+
if getattr(p, '_animatediff_i2i_batch', None) and not unit.image:
|
74 |
+
unit.input_mode = InputMode.BATCH
|
75 |
+
# if no input given for this unit, use global input
|
76 |
+
if getattr(unit, 'input_mode', InputMode.SIMPLE) == InputMode.BATCH:
|
77 |
+
if not unit.batch_images:
|
78 |
+
assert global_input_frames, 'No input images found for ControlNet module'
|
79 |
+
unit.batch_images = global_input_frames
|
80 |
+
elif not unit.image:
|
81 |
+
try:
|
82 |
+
cn_script.choose_input_image(p, unit, idx)
|
83 |
+
except:
|
84 |
+
assert global_input_frames != '', 'No input images found for ControlNet module'
|
85 |
+
unit.batch_images = global_input_frames
|
86 |
+
unit.input_mode = InputMode.BATCH
|
87 |
+
|
88 |
+
if getattr(unit, 'input_mode', InputMode.SIMPLE) == InputMode.BATCH:
|
89 |
+
if 'inpaint' in unit.module:
|
90 |
+
images = shared.listfiles(f'{unit.batch_images}/image')
|
91 |
+
masks = shared.listfiles(f'{unit.batch_images}/mask')
|
92 |
+
assert len(images) == len(masks), 'Inpainting image mask count mismatch'
|
93 |
+
unit.batch_images = [{'image': images[i], 'mask': masks[i]} for i in range(len(images))]
|
94 |
+
else:
|
95 |
+
unit.batch_images = shared.listfiles(unit.batch_images)
|
96 |
+
|
97 |
+
unit_batch_list = [len(unit.batch_images) for unit in units
|
98 |
+
if getattr(unit, 'input_mode', InputMode.SIMPLE) == InputMode.BATCH]
|
99 |
+
if getattr(p, '_animatediff_i2i_batch', None):
|
100 |
+
unit_batch_list.append(len(p.init_images))
|
101 |
+
|
102 |
+
if len(unit_batch_list) > 0:
|
103 |
+
video_length = min(unit_batch_list)
|
104 |
+
# ensure that params.video_length <= video_length and params.batch_size <= video_length
|
105 |
+
if params.video_length > video_length:
|
106 |
+
params.video_length = video_length
|
107 |
+
if params.batch_size > video_length:
|
108 |
+
params.batch_size = video_length
|
109 |
+
if params.video_default:
|
110 |
+
params.video_length = video_length
|
111 |
+
p.batch_size = video_length
|
112 |
+
for unit in units:
|
113 |
+
if getattr(unit, 'input_mode', InputMode.SIMPLE) == InputMode.BATCH:
|
114 |
+
unit.batch_images = unit.batch_images[:params.video_length]
|
115 |
+
|
116 |
+
animatediff_i2ibatch.cap_init_image(p, params)
|
117 |
+
prompt_scheduler.parse_prompt(p)
|
118 |
+
update_infotext(p, params)
|
119 |
+
return getattr(processing, '__controlnet_original_process_images_inner')(p, *args, **kwargs)
|
120 |
+
|
121 |
+
if AnimateDiffControl.original_processing_process_images_hijack is not None:
|
122 |
+
logger.info('BatchHijack already hacked.')
|
123 |
+
return
|
124 |
+
|
125 |
+
AnimateDiffControl.original_processing_process_images_hijack = BatchHijack.processing_process_images_hijack
|
126 |
+
BatchHijack.processing_process_images_hijack = hacked_processing_process_images_hijack
|
127 |
+
processing.process_images_inner = instance.processing_process_images_hijack
|
128 |
+
|
129 |
+
|
130 |
+
def restore_batchhijack(self):
|
131 |
+
if AnimateDiffControl.original_processing_process_images_hijack is not None:
|
132 |
+
from scripts.batch_hijack import BatchHijack, instance
|
133 |
+
BatchHijack.processing_process_images_hijack = AnimateDiffControl.original_processing_process_images_hijack
|
134 |
+
AnimateDiffControl.original_processing_process_images_hijack = None
|
135 |
+
processing.process_images_inner = instance.processing_process_images_hijack
|
136 |
+
|
137 |
+
|
138 |
+
def hack_cn(self):
|
139 |
+
cn_script = self.cn_script
|
140 |
+
|
141 |
+
|
142 |
+
def hacked_main_entry(self, p: StableDiffusionProcessing):
|
143 |
+
from scripts import external_code, global_state, hook
|
144 |
+
from scripts.controlnet_lora import bind_control_lora
|
145 |
+
from scripts.adapter import Adapter, Adapter_light, StyleAdapter
|
146 |
+
from scripts.batch_hijack import InputMode
|
147 |
+
from scripts.controlnet_lllite import PlugableControlLLLite, clear_all_lllite
|
148 |
+
from scripts.controlmodel_ipadapter import (PlugableIPAdapter,
|
149 |
+
clear_all_ip_adapter)
|
150 |
+
from scripts.hook import ControlModelType, ControlParams, UnetHook
|
151 |
+
from scripts.logging import logger
|
152 |
+
from scripts.processor import model_free_preprocessors
|
153 |
+
|
154 |
+
# TODO: i2i-batch mode, what should I change?
|
155 |
+
def image_has_mask(input_image: np.ndarray) -> bool:
|
156 |
+
return (
|
157 |
+
input_image.ndim == 3 and
|
158 |
+
input_image.shape[2] == 4 and
|
159 |
+
np.max(input_image[:, :, 3]) > 127
|
160 |
+
)
|
161 |
+
|
162 |
+
|
163 |
+
def prepare_mask(
|
164 |
+
mask: Image.Image, p: processing.StableDiffusionProcessing
|
165 |
+
) -> Image.Image:
|
166 |
+
mask = mask.convert("L")
|
167 |
+
if getattr(p, "inpainting_mask_invert", False):
|
168 |
+
mask = ImageOps.invert(mask)
|
169 |
+
|
170 |
+
if hasattr(p, 'mask_blur_x'):
|
171 |
+
if getattr(p, "mask_blur_x", 0) > 0:
|
172 |
+
np_mask = np.array(mask)
|
173 |
+
kernel_size = 2 * int(2.5 * p.mask_blur_x + 0.5) + 1
|
174 |
+
np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), p.mask_blur_x)
|
175 |
+
mask = Image.fromarray(np_mask)
|
176 |
+
if getattr(p, "mask_blur_y", 0) > 0:
|
177 |
+
np_mask = np.array(mask)
|
178 |
+
kernel_size = 2 * int(2.5 * p.mask_blur_y + 0.5) + 1
|
179 |
+
np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), p.mask_blur_y)
|
180 |
+
mask = Image.fromarray(np_mask)
|
181 |
+
else:
|
182 |
+
if getattr(p, "mask_blur", 0) > 0:
|
183 |
+
mask = mask.filter(ImageFilter.GaussianBlur(p.mask_blur))
|
184 |
+
|
185 |
+
return mask
|
186 |
+
|
187 |
+
|
188 |
+
def set_numpy_seed(p: processing.StableDiffusionProcessing) -> Optional[int]:
|
189 |
+
try:
|
190 |
+
tmp_seed = int(p.all_seeds[0] if p.seed == -1 else max(int(p.seed), 0))
|
191 |
+
tmp_subseed = int(p.all_seeds[0] if p.subseed == -1 else max(int(p.subseed), 0))
|
192 |
+
seed = (tmp_seed + tmp_subseed) & 0xFFFFFFFF
|
193 |
+
np.random.seed(seed)
|
194 |
+
return seed
|
195 |
+
except Exception as e:
|
196 |
+
logger.warning(e)
|
197 |
+
logger.warning('Warning: Failed to use consistent random seed.')
|
198 |
+
return None
|
199 |
+
|
200 |
+
sd_ldm = p.sd_model
|
201 |
+
unet = sd_ldm.model.diffusion_model
|
202 |
+
self.noise_modifier = None
|
203 |
+
|
204 |
+
setattr(p, 'controlnet_control_loras', [])
|
205 |
+
|
206 |
+
if self.latest_network is not None:
|
207 |
+
# always restore (~0.05s)
|
208 |
+
self.latest_network.restore()
|
209 |
+
|
210 |
+
# always clear (~0.05s)
|
211 |
+
clear_all_lllite()
|
212 |
+
clear_all_ip_adapter()
|
213 |
+
|
214 |
+
self.enabled_units = cn_script.get_enabled_units(p)
|
215 |
+
|
216 |
+
if len(self.enabled_units) == 0:
|
217 |
+
self.latest_network = None
|
218 |
+
return
|
219 |
+
|
220 |
+
detected_maps = []
|
221 |
+
forward_params = []
|
222 |
+
post_processors = []
|
223 |
+
|
224 |
+
# cache stuff
|
225 |
+
if self.latest_model_hash != p.sd_model.sd_model_hash:
|
226 |
+
cn_script.clear_control_model_cache()
|
227 |
+
|
228 |
+
for idx, unit in enumerate(self.enabled_units):
|
229 |
+
unit.module = global_state.get_module_basename(unit.module)
|
230 |
+
|
231 |
+
# unload unused preproc
|
232 |
+
module_list = [unit.module for unit in self.enabled_units]
|
233 |
+
for key in self.unloadable:
|
234 |
+
if key not in module_list:
|
235 |
+
self.unloadable.get(key, lambda:None)()
|
236 |
+
|
237 |
+
self.latest_model_hash = p.sd_model.sd_model_hash
|
238 |
+
for idx, unit in enumerate(self.enabled_units):
|
239 |
+
cn_script.bound_check_params(unit)
|
240 |
+
|
241 |
+
resize_mode = external_code.resize_mode_from_value(unit.resize_mode)
|
242 |
+
control_mode = external_code.control_mode_from_value(unit.control_mode)
|
243 |
+
|
244 |
+
if unit.module in model_free_preprocessors:
|
245 |
+
model_net = None
|
246 |
+
else:
|
247 |
+
model_net = cn_script.load_control_model(p, unet, unit.model)
|
248 |
+
model_net.reset()
|
249 |
+
if model_net is not None and getattr(devices, "fp8", False) and not isinstance(model_net, PlugableIPAdapter):
|
250 |
+
for _module in model_net.modules():
|
251 |
+
if isinstance(_module, (torch.nn.Conv2d, torch.nn.Linear)):
|
252 |
+
_module.to(torch.float8_e4m3fn)
|
253 |
+
|
254 |
+
if getattr(model_net, 'is_control_lora', False):
|
255 |
+
control_lora = model_net.control_model
|
256 |
+
bind_control_lora(unet, control_lora)
|
257 |
+
p.controlnet_control_loras.append(control_lora)
|
258 |
+
|
259 |
+
if getattr(unit, 'input_mode', InputMode.SIMPLE) == InputMode.BATCH:
|
260 |
+
input_images = []
|
261 |
+
for img in unit.batch_images:
|
262 |
+
unit.image = img
|
263 |
+
input_image, _ = cn_script.choose_input_image(p, unit, idx)
|
264 |
+
input_images.append(input_image)
|
265 |
+
else:
|
266 |
+
input_image, image_from_a1111 = cn_script.choose_input_image(p, unit, idx)
|
267 |
+
input_images = [input_image]
|
268 |
+
|
269 |
+
if image_from_a1111:
|
270 |
+
a1111_i2i_resize_mode = getattr(p, "resize_mode", None)
|
271 |
+
if a1111_i2i_resize_mode is not None:
|
272 |
+
resize_mode = external_code.resize_mode_from_value(a1111_i2i_resize_mode)
|
273 |
+
|
274 |
+
for idx, input_image in enumerate(input_images):
|
275 |
+
a1111_mask_image : Optional[Image.Image] = getattr(p, "image_mask", None)
|
276 |
+
if a1111_mask_image and isinstance(a1111_mask_image, list):
|
277 |
+
a1111_mask_image = a1111_mask_image[idx]
|
278 |
+
if 'inpaint' in unit.module and not image_has_mask(input_image) and a1111_mask_image is not None:
|
279 |
+
a1111_mask = np.array(prepare_mask(a1111_mask_image, p))
|
280 |
+
if a1111_mask.ndim == 2:
|
281 |
+
if a1111_mask.shape[0] == input_image.shape[0]:
|
282 |
+
if a1111_mask.shape[1] == input_image.shape[1]:
|
283 |
+
input_image = np.concatenate([input_image[:, :, 0:3], a1111_mask[:, :, None]], axis=2)
|
284 |
+
a1111_i2i_resize_mode = getattr(p, "resize_mode", None)
|
285 |
+
if a1111_i2i_resize_mode is not None:
|
286 |
+
resize_mode = external_code.resize_mode_from_value(a1111_i2i_resize_mode)
|
287 |
+
|
288 |
+
if 'reference' not in unit.module and issubclass(type(p), StableDiffusionProcessingImg2Img) \
|
289 |
+
and p.inpaint_full_res and a1111_mask_image is not None:
|
290 |
+
logger.debug("A1111 inpaint mask START")
|
291 |
+
input_image = [input_image[:, :, i] for i in range(input_image.shape[2])]
|
292 |
+
input_image = [Image.fromarray(x) for x in input_image]
|
293 |
+
|
294 |
+
mask = prepare_mask(a1111_mask_image, p)
|
295 |
+
|
296 |
+
crop_region = masking.get_crop_region(np.array(mask), p.inpaint_full_res_padding)
|
297 |
+
crop_region = masking.expand_crop_region(crop_region, p.width, p.height, mask.width, mask.height)
|
298 |
+
|
299 |
+
input_image = [
|
300 |
+
images.resize_image(resize_mode.int_value(), i, mask.width, mask.height)
|
301 |
+
for i in input_image
|
302 |
+
]
|
303 |
+
|
304 |
+
input_image = [x.crop(crop_region) for x in input_image]
|
305 |
+
input_image = [
|
306 |
+
images.resize_image(external_code.ResizeMode.OUTER_FIT.int_value(), x, p.width, p.height)
|
307 |
+
for x in input_image
|
308 |
+
]
|
309 |
+
|
310 |
+
input_image = [np.asarray(x)[:, :, 0] for x in input_image]
|
311 |
+
input_image = np.stack(input_image, axis=2)
|
312 |
+
logger.debug("A1111 inpaint mask END")
|
313 |
+
|
314 |
+
# safe numpy
|
315 |
+
logger.debug("Safe numpy convertion START")
|
316 |
+
input_image = np.ascontiguousarray(input_image.copy()).copy()
|
317 |
+
logger.debug("Safe numpy convertion END")
|
318 |
+
|
319 |
+
input_images[idx] = input_image
|
320 |
+
|
321 |
+
if 'inpaint_only' == unit.module and issubclass(type(p), StableDiffusionProcessingImg2Img) and p.image_mask is not None:
|
322 |
+
logger.warning('A1111 inpaint and ControlNet inpaint duplicated. ControlNet support enabled.')
|
323 |
+
unit.module = 'inpaint'
|
324 |
+
|
325 |
+
logger.info(f"Loading preprocessor: {unit.module}")
|
326 |
+
preprocessor = self.preprocessor[unit.module]
|
327 |
+
|
328 |
+
high_res_fix = isinstance(p, StableDiffusionProcessingTxt2Img) and getattr(p, 'enable_hr', False)
|
329 |
+
|
330 |
+
h = (p.height // 8) * 8
|
331 |
+
w = (p.width // 8) * 8
|
332 |
+
|
333 |
+
if high_res_fix:
|
334 |
+
if p.hr_resize_x == 0 and p.hr_resize_y == 0:
|
335 |
+
hr_y = int(p.height * p.hr_scale)
|
336 |
+
hr_x = int(p.width * p.hr_scale)
|
337 |
+
else:
|
338 |
+
hr_y, hr_x = p.hr_resize_y, p.hr_resize_x
|
339 |
+
hr_y = (hr_y // 8) * 8
|
340 |
+
hr_x = (hr_x // 8) * 8
|
341 |
+
else:
|
342 |
+
hr_y = h
|
343 |
+
hr_x = w
|
344 |
+
|
345 |
+
if unit.module == 'inpaint_only+lama' and resize_mode == external_code.ResizeMode.OUTER_FIT:
|
346 |
+
# inpaint_only+lama is special and required outpaint fix
|
347 |
+
for idx, input_image in enumerate(input_images):
|
348 |
+
_, input_image = cn_script.detectmap_proc(input_image, unit.module, resize_mode, hr_y, hr_x)
|
349 |
+
input_images[idx] = input_image
|
350 |
+
|
351 |
+
control_model_type = ControlModelType.ControlNet
|
352 |
+
global_average_pooling = False
|
353 |
+
|
354 |
+
if 'reference' in unit.module:
|
355 |
+
control_model_type = ControlModelType.AttentionInjection
|
356 |
+
elif 'revision' in unit.module:
|
357 |
+
control_model_type = ControlModelType.ReVision
|
358 |
+
elif hasattr(model_net, 'control_model') and (isinstance(model_net.control_model, Adapter) or isinstance(model_net.control_model, Adapter_light)):
|
359 |
+
control_model_type = ControlModelType.T2I_Adapter
|
360 |
+
elif hasattr(model_net, 'control_model') and isinstance(model_net.control_model, StyleAdapter):
|
361 |
+
control_model_type = ControlModelType.T2I_StyleAdapter
|
362 |
+
elif isinstance(model_net, PlugableIPAdapter):
|
363 |
+
control_model_type = ControlModelType.IPAdapter
|
364 |
+
elif isinstance(model_net, PlugableControlLLLite):
|
365 |
+
control_model_type = ControlModelType.Controlllite
|
366 |
+
|
367 |
+
if control_model_type is ControlModelType.ControlNet:
|
368 |
+
global_average_pooling = model_net.control_model.global_average_pooling
|
369 |
+
|
370 |
+
preprocessor_resolution = unit.processor_res
|
371 |
+
if unit.pixel_perfect:
|
372 |
+
preprocessor_resolution = external_code.pixel_perfect_resolution(
|
373 |
+
input_images[0],
|
374 |
+
target_H=h,
|
375 |
+
target_W=w,
|
376 |
+
resize_mode=resize_mode
|
377 |
+
)
|
378 |
+
|
379 |
+
logger.info(f'preprocessor resolution = {preprocessor_resolution}')
|
380 |
+
# Preprocessor result may depend on numpy random operations, use the
|
381 |
+
# random seed in `StableDiffusionProcessing` to make the
|
382 |
+
# preprocessor result reproducable.
|
383 |
+
# Currently following preprocessors use numpy random:
|
384 |
+
# - shuffle
|
385 |
+
seed = set_numpy_seed(p)
|
386 |
+
logger.debug(f"Use numpy seed {seed}.")
|
387 |
+
|
388 |
+
controls = []
|
389 |
+
hr_controls = []
|
390 |
+
controls_ipadapter = {'hidden_states': [], 'image_embeds': []}
|
391 |
+
hr_controls_ipadapter = {'hidden_states': [], 'image_embeds': []}
|
392 |
+
for idx, input_image in tqdm(enumerate(input_images), total=len(input_images)):
|
393 |
+
detected_map, is_image = preprocessor(
|
394 |
+
input_image,
|
395 |
+
res=preprocessor_resolution,
|
396 |
+
thr_a=unit.threshold_a,
|
397 |
+
thr_b=unit.threshold_b,
|
398 |
+
)
|
399 |
+
|
400 |
+
if high_res_fix:
|
401 |
+
if is_image:
|
402 |
+
hr_control, hr_detected_map = cn_script.detectmap_proc(detected_map, unit.module, resize_mode, hr_y, hr_x)
|
403 |
+
detected_maps.append((hr_detected_map, unit.module))
|
404 |
+
else:
|
405 |
+
hr_control = detected_map
|
406 |
+
else:
|
407 |
+
hr_control = None
|
408 |
+
|
409 |
+
if is_image:
|
410 |
+
control, detected_map = cn_script.detectmap_proc(detected_map, unit.module, resize_mode, h, w)
|
411 |
+
detected_maps.append((detected_map, unit.module))
|
412 |
+
else:
|
413 |
+
control = detected_map
|
414 |
+
detected_maps.append((input_image, unit.module))
|
415 |
+
|
416 |
+
if control_model_type == ControlModelType.T2I_StyleAdapter:
|
417 |
+
control = control['last_hidden_state']
|
418 |
+
|
419 |
+
if control_model_type == ControlModelType.ReVision:
|
420 |
+
control = control['image_embeds']
|
421 |
+
|
422 |
+
if control_model_type == ControlModelType.IPAdapter:
|
423 |
+
if model_net.is_plus:
|
424 |
+
controls_ipadapter['hidden_states'].append(control['hidden_states'][-2].cpu())
|
425 |
+
else:
|
426 |
+
controls_ipadapter['image_embeds'].append(control['image_embeds'].cpu())
|
427 |
+
if hr_control is not None:
|
428 |
+
if model_net.is_plus:
|
429 |
+
hr_controls_ipadapter['hidden_states'].append(hr_control['hidden_states'][-2].cpu())
|
430 |
+
else:
|
431 |
+
hr_controls_ipadapter['image_embeds'].append(hr_control['image_embeds'].cpu())
|
432 |
+
else:
|
433 |
+
hr_controls_ipadapter = None
|
434 |
+
hr_controls = None
|
435 |
+
else:
|
436 |
+
controls.append(control.cpu())
|
437 |
+
if hr_control is not None:
|
438 |
+
hr_controls.append(hr_control.cpu())
|
439 |
+
else:
|
440 |
+
hr_controls = None
|
441 |
+
|
442 |
+
if control_model_type == ControlModelType.IPAdapter:
|
443 |
+
ipadapter_key = 'hidden_states' if model_net.is_plus else 'image_embeds'
|
444 |
+
controls = {ipadapter_key: torch.cat(controls_ipadapter[ipadapter_key], dim=0)}
|
445 |
+
if controls[ipadapter_key].shape[0] > 1:
|
446 |
+
controls[ipadapter_key] = torch.cat([controls[ipadapter_key], controls[ipadapter_key]], dim=0)
|
447 |
+
if model_net.is_plus:
|
448 |
+
controls[ipadapter_key] = [controls[ipadapter_key], None]
|
449 |
+
if hr_controls_ipadapter is not None:
|
450 |
+
hr_controls = {ipadapter_key: torch.cat(hr_controls_ipadapter[ipadapter_key], dim=0)}
|
451 |
+
if hr_controls[ipadapter_key].shape[0] > 1:
|
452 |
+
hr_controls[ipadapter_key] = torch.cat([hr_controls[ipadapter_key], hr_controls[ipadapter_key]], dim=0)
|
453 |
+
if model_net.is_plus:
|
454 |
+
hr_controls[ipadapter_key] = [hr_controls[ipadapter_key], None]
|
455 |
+
else:
|
456 |
+
controls = torch.cat(controls, dim=0)
|
457 |
+
if controls.shape[0] > 1:
|
458 |
+
controls = torch.cat([controls, controls], dim=0)
|
459 |
+
if hr_controls is not None:
|
460 |
+
hr_controls = torch.cat(hr_controls, dim=0)
|
461 |
+
if hr_controls.shape[0] > 1:
|
462 |
+
hr_controls = torch.cat([hr_controls, hr_controls], dim=0)
|
463 |
+
|
464 |
+
preprocessor_dict = dict(
|
465 |
+
name=unit.module,
|
466 |
+
preprocessor_resolution=preprocessor_resolution,
|
467 |
+
threshold_a=unit.threshold_a,
|
468 |
+
threshold_b=unit.threshold_b
|
469 |
+
)
|
470 |
+
|
471 |
+
forward_param = ControlParams(
|
472 |
+
control_model=model_net,
|
473 |
+
preprocessor=preprocessor_dict,
|
474 |
+
hint_cond=controls,
|
475 |
+
weight=unit.weight,
|
476 |
+
guidance_stopped=False,
|
477 |
+
start_guidance_percent=unit.guidance_start,
|
478 |
+
stop_guidance_percent=unit.guidance_end,
|
479 |
+
advanced_weighting=None,
|
480 |
+
control_model_type=control_model_type,
|
481 |
+
global_average_pooling=global_average_pooling,
|
482 |
+
hr_hint_cond=hr_controls,
|
483 |
+
soft_injection=control_mode != external_code.ControlMode.BALANCED,
|
484 |
+
cfg_injection=control_mode == external_code.ControlMode.CONTROL,
|
485 |
+
)
|
486 |
+
forward_params.append(forward_param)
|
487 |
+
|
488 |
+
unit_is_batch = getattr(unit, 'input_mode', InputMode.SIMPLE) == InputMode.BATCH
|
489 |
+
if 'inpaint_only' in unit.module:
|
490 |
+
final_inpaint_raws = []
|
491 |
+
final_inpaint_masks = []
|
492 |
+
for i in range(len(controls)):
|
493 |
+
final_inpaint_feed = hr_controls[i] if hr_controls is not None else controls[i]
|
494 |
+
final_inpaint_feed = final_inpaint_feed.detach().cpu().numpy()
|
495 |
+
final_inpaint_feed = np.ascontiguousarray(final_inpaint_feed).copy()
|
496 |
+
final_inpaint_mask = final_inpaint_feed[0, 3, :, :].astype(np.float32)
|
497 |
+
final_inpaint_raw = final_inpaint_feed[0, :3].astype(np.float32)
|
498 |
+
sigma = shared.opts.data.get("control_net_inpaint_blur_sigma", 7)
|
499 |
+
final_inpaint_mask = cv2.dilate(final_inpaint_mask, np.ones((sigma, sigma), dtype=np.uint8))
|
500 |
+
final_inpaint_mask = cv2.blur(final_inpaint_mask, (sigma, sigma))[None]
|
501 |
+
_, Hmask, Wmask = final_inpaint_mask.shape
|
502 |
+
final_inpaint_raw = torch.from_numpy(np.ascontiguousarray(final_inpaint_raw).copy())
|
503 |
+
final_inpaint_mask = torch.from_numpy(np.ascontiguousarray(final_inpaint_mask).copy())
|
504 |
+
final_inpaint_raws.append(final_inpaint_raw)
|
505 |
+
final_inpaint_masks.append(final_inpaint_mask)
|
506 |
+
|
507 |
+
def inpaint_only_post_processing(x, i):
|
508 |
+
_, H, W = x.shape
|
509 |
+
if Hmask != H or Wmask != W:
|
510 |
+
logger.error('Error: ControlNet find post-processing resolution mismatch. This could be related to other extensions hacked processing.')
|
511 |
+
return x
|
512 |
+
idx = i if unit_is_batch else 0
|
513 |
+
r = final_inpaint_raw[idx].to(x.dtype).to(x.device)
|
514 |
+
m = final_inpaint_mask[idx].to(x.dtype).to(x.device)
|
515 |
+
y = m * x.clip(0, 1) + (1 - m) * r
|
516 |
+
y = y.clip(0, 1)
|
517 |
+
return y
|
518 |
+
|
519 |
+
post_processors.append(inpaint_only_post_processing)
|
520 |
+
|
521 |
+
if 'recolor' in unit.module:
|
522 |
+
final_feeds = []
|
523 |
+
for i in range(len(controls)):
|
524 |
+
final_feed = hr_control if hr_control is not None else control
|
525 |
+
final_feed = final_feed.detach().cpu().numpy()
|
526 |
+
final_feed = np.ascontiguousarray(final_feed).copy()
|
527 |
+
final_feed = final_feed[0, 0, :, :].astype(np.float32)
|
528 |
+
final_feed = (final_feed * 255).clip(0, 255).astype(np.uint8)
|
529 |
+
Hfeed, Wfeed = final_feed.shape
|
530 |
+
final_feeds.append(final_feed)
|
531 |
+
|
532 |
+
if 'luminance' in unit.module:
|
533 |
+
|
534 |
+
def recolor_luminance_post_processing(x, i):
|
535 |
+
C, H, W = x.shape
|
536 |
+
if Hfeed != H or Wfeed != W or C != 3:
|
537 |
+
logger.error('Error: ControlNet find post-processing resolution mismatch. This could be related to other extensions hacked processing.')
|
538 |
+
return x
|
539 |
+
h = x.detach().cpu().numpy().transpose((1, 2, 0))
|
540 |
+
h = (h * 255).clip(0, 255).astype(np.uint8)
|
541 |
+
h = cv2.cvtColor(h, cv2.COLOR_RGB2LAB)
|
542 |
+
h[:, :, 0] = final_feed[i if unit_is_batch else 0]
|
543 |
+
h = cv2.cvtColor(h, cv2.COLOR_LAB2RGB)
|
544 |
+
h = (h.astype(np.float32) / 255.0).transpose((2, 0, 1))
|
545 |
+
y = torch.from_numpy(h).clip(0, 1).to(x)
|
546 |
+
return y
|
547 |
+
|
548 |
+
post_processors.append(recolor_luminance_post_processing)
|
549 |
+
|
550 |
+
if 'intensity' in unit.module:
|
551 |
+
|
552 |
+
def recolor_intensity_post_processing(x, i):
|
553 |
+
C, H, W = x.shape
|
554 |
+
if Hfeed != H or Wfeed != W or C != 3:
|
555 |
+
logger.error('Error: ControlNet find post-processing resolution mismatch. This could be related to other extensions hacked processing.')
|
556 |
+
return x
|
557 |
+
h = x.detach().cpu().numpy().transpose((1, 2, 0))
|
558 |
+
h = (h * 255).clip(0, 255).astype(np.uint8)
|
559 |
+
h = cv2.cvtColor(h, cv2.COLOR_RGB2HSV)
|
560 |
+
h[:, :, 2] = final_feed[i if unit_is_batch else 0]
|
561 |
+
h = cv2.cvtColor(h, cv2.COLOR_HSV2RGB)
|
562 |
+
h = (h.astype(np.float32) / 255.0).transpose((2, 0, 1))
|
563 |
+
y = torch.from_numpy(h).clip(0, 1).to(x)
|
564 |
+
return y
|
565 |
+
|
566 |
+
post_processors.append(recolor_intensity_post_processing)
|
567 |
+
|
568 |
+
if '+lama' in unit.module:
|
569 |
+
forward_param.used_hint_cond_latent = hook.UnetHook.call_vae_using_process(p, control)
|
570 |
+
self.noise_modifier = forward_param.used_hint_cond_latent
|
571 |
+
|
572 |
+
del model_net
|
573 |
+
|
574 |
+
is_low_vram = any(unit.low_vram for unit in self.enabled_units)
|
575 |
+
|
576 |
+
self.latest_network = UnetHook(lowvram=is_low_vram)
|
577 |
+
self.latest_network.hook(model=unet, sd_ldm=sd_ldm, control_params=forward_params, process=p)
|
578 |
+
|
579 |
+
for param in forward_params:
|
580 |
+
if param.control_model_type == ControlModelType.IPAdapter:
|
581 |
+
param.control_model.hook(
|
582 |
+
model=unet,
|
583 |
+
clip_vision_output=param.hint_cond,
|
584 |
+
weight=param.weight,
|
585 |
+
dtype=torch.float32,
|
586 |
+
start=param.start_guidance_percent,
|
587 |
+
end=param.stop_guidance_percent
|
588 |
+
)
|
589 |
+
if param.control_model_type == ControlModelType.Controlllite:
|
590 |
+
param.control_model.hook(
|
591 |
+
model=unet,
|
592 |
+
cond=param.hint_cond,
|
593 |
+
weight=param.weight,
|
594 |
+
start=param.start_guidance_percent,
|
595 |
+
end=param.stop_guidance_percent
|
596 |
+
)
|
597 |
+
|
598 |
+
self.detected_map = detected_maps
|
599 |
+
self.post_processors = post_processors
|
600 |
+
|
601 |
+
if os.path.exists(f'{data_path}/tmp/animatediff-frames/'):
|
602 |
+
shutil.rmtree(f'{data_path}/tmp/animatediff-frames/')
|
603 |
+
|
604 |
+
def hacked_postprocess_batch(self, p, *args, **kwargs):
|
605 |
+
images = kwargs.get('images', [])
|
606 |
+
for post_processor in self.post_processors:
|
607 |
+
for i in range(len(images)):
|
608 |
+
images[i] = post_processor(images[i], i)
|
609 |
+
return
|
610 |
+
|
611 |
+
if AnimateDiffControl.original_controlnet_main_entry is not None:
|
612 |
+
logger.info('ControlNet Main Entry already hacked.')
|
613 |
+
return
|
614 |
+
|
615 |
+
AnimateDiffControl.original_controlnet_main_entry = self.cn_script.controlnet_main_entry
|
616 |
+
AnimateDiffControl.original_postprocess_batch = self.cn_script.postprocess_batch
|
617 |
+
self.cn_script.controlnet_main_entry = MethodType(hacked_main_entry, self.cn_script)
|
618 |
+
self.cn_script.postprocess_batch = MethodType(hacked_postprocess_batch, self.cn_script)
|
619 |
+
|
620 |
+
|
621 |
+
def restore_cn(self):
|
622 |
+
if AnimateDiffControl.original_controlnet_main_entry is not None:
|
623 |
+
self.cn_script.controlnet_main_entry = AnimateDiffControl.original_controlnet_main_entry
|
624 |
+
AnimateDiffControl.original_controlnet_main_entry = None
|
625 |
+
if AnimateDiffControl.original_postprocess_batch is not None:
|
626 |
+
self.cn_script.postprocess_batch = AnimateDiffControl.original_postprocess_batch
|
627 |
+
AnimateDiffControl.original_postprocess_batch = None
|
628 |
+
|
629 |
+
|
630 |
+
def hack(self, params: AnimateDiffProcess):
|
631 |
+
if self.cn_script is not None:
|
632 |
+
logger.info(f"Hacking ControlNet.")
|
633 |
+
self.hack_batchhijack(params)
|
634 |
+
self.hack_cn()
|
635 |
+
|
636 |
+
|
637 |
+
def restore(self):
|
638 |
+
if self.cn_script is not None:
|
639 |
+
logger.info(f"Restoring ControlNet.")
|
640 |
+
self.restore_batchhijack()
|
641 |
+
self.restore_cn()
|
extensions/sd-webui-animatediff/scripts/animatediff_i2ibatch.py
ADDED
@@ -0,0 +1,309 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pathlib import Path
|
2 |
+
from types import MethodType
|
3 |
+
|
4 |
+
import os
|
5 |
+
import cv2
|
6 |
+
import numpy as np
|
7 |
+
import torch
|
8 |
+
import hashlib
|
9 |
+
from PIL import Image, ImageOps, UnidentifiedImageError
|
10 |
+
from modules import processing, shared, scripts, img2img, devices, masking, sd_samplers, images
|
11 |
+
from modules.processing import (StableDiffusionProcessingImg2Img,
|
12 |
+
process_images,
|
13 |
+
create_binary_mask,
|
14 |
+
create_random_tensors,
|
15 |
+
images_tensor_to_samples,
|
16 |
+
setup_color_correction,
|
17 |
+
opt_f)
|
18 |
+
from modules.shared import opts
|
19 |
+
from modules.sd_samplers_common import images_tensor_to_samples, approximation_indexes
|
20 |
+
|
21 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
22 |
+
|
23 |
+
|
24 |
+
class AnimateDiffI2IBatch:
|
25 |
+
original_img2img_process_batch = None
|
26 |
+
|
27 |
+
def hack(self):
|
28 |
+
# TODO: PR this hack to A1111
|
29 |
+
if AnimateDiffI2IBatch.original_img2img_process_batch is not None:
|
30 |
+
logger.info("Hacking i2i-batch is already done.")
|
31 |
+
return
|
32 |
+
|
33 |
+
logger.info("Hacking i2i-batch.")
|
34 |
+
AnimateDiffI2IBatch.original_img2img_process_batch = img2img.process_batch
|
35 |
+
original_img2img_process_batch = AnimateDiffI2IBatch.original_img2img_process_batch
|
36 |
+
|
37 |
+
def hacked_i2i_init(self, all_prompts, all_seeds, all_subseeds): # only hack this when i2i-batch with batch mask
|
38 |
+
self.image_cfg_scale: float = self.image_cfg_scale if shared.sd_model.cond_stage_key == "edit" else None
|
39 |
+
|
40 |
+
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
|
41 |
+
crop_regions = []
|
42 |
+
paste_to = []
|
43 |
+
masks_for_overlay = []
|
44 |
+
|
45 |
+
image_masks = self.image_mask
|
46 |
+
|
47 |
+
for idx, image_mask in enumerate(image_masks):
|
48 |
+
# image_mask is passed in as RGBA by Gradio to support alpha masks,
|
49 |
+
# but we still want to support binary masks.
|
50 |
+
image_mask = create_binary_mask(image_mask)
|
51 |
+
|
52 |
+
if self.inpainting_mask_invert:
|
53 |
+
image_mask = ImageOps.invert(image_mask)
|
54 |
+
|
55 |
+
if self.mask_blur_x > 0:
|
56 |
+
np_mask = np.array(image_mask)
|
57 |
+
kernel_size = 2 * int(2.5 * self.mask_blur_x + 0.5) + 1
|
58 |
+
np_mask = cv2.GaussianBlur(np_mask, (kernel_size, 1), self.mask_blur_x)
|
59 |
+
image_mask = Image.fromarray(np_mask)
|
60 |
+
|
61 |
+
if self.mask_blur_y > 0:
|
62 |
+
np_mask = np.array(image_mask)
|
63 |
+
kernel_size = 2 * int(2.5 * self.mask_blur_y + 0.5) + 1
|
64 |
+
np_mask = cv2.GaussianBlur(np_mask, (1, kernel_size), self.mask_blur_y)
|
65 |
+
image_mask = Image.fromarray(np_mask)
|
66 |
+
|
67 |
+
if self.inpaint_full_res:
|
68 |
+
masks_for_overlay.append(image_mask)
|
69 |
+
mask = image_mask.convert('L')
|
70 |
+
crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding)
|
71 |
+
crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height)
|
72 |
+
crop_regions.append(crop_region)
|
73 |
+
x1, y1, x2, y2 = crop_region
|
74 |
+
|
75 |
+
mask = mask.crop(crop_region)
|
76 |
+
image_mask = images.resize_image(2, mask, self.width, self.height)
|
77 |
+
paste_to.append((x1, y1, x2-x1, y2-y1))
|
78 |
+
else:
|
79 |
+
image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height)
|
80 |
+
np_mask = np.array(image_mask)
|
81 |
+
np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8)
|
82 |
+
masks_for_overlay.append(Image.fromarray(np_mask))
|
83 |
+
|
84 |
+
image_masks[idx] = image_mask
|
85 |
+
|
86 |
+
self.mask_for_overlay = masks_for_overlay[0] # only for saving purpose
|
87 |
+
if paste_to:
|
88 |
+
self.paste_to = paste_to[0]
|
89 |
+
self._animatediff_paste_to_full = paste_to
|
90 |
+
|
91 |
+
self.overlay_images = []
|
92 |
+
add_color_corrections = opts.img2img_color_correction and self.color_corrections is None
|
93 |
+
if add_color_corrections:
|
94 |
+
self.color_corrections = []
|
95 |
+
imgs = []
|
96 |
+
for idx, img in enumerate(self.init_images):
|
97 |
+
latent_mask = (self.latent_mask[idx] if isinstance(self.latent_mask, list) else self.latent_mask) if self.latent_mask is not None else image_masks[idx]
|
98 |
+
# Save init image
|
99 |
+
if opts.save_init_img:
|
100 |
+
self.init_img_hash = hashlib.md5(img.tobytes()).hexdigest()
|
101 |
+
images.save_image(img, path=opts.outdir_init_images, basename=None, forced_filename=self.init_img_hash, save_to_dirs=False)
|
102 |
+
|
103 |
+
image = images.flatten(img, opts.img2img_background_color)
|
104 |
+
|
105 |
+
if not crop_regions and self.resize_mode != 3:
|
106 |
+
image = images.resize_image(self.resize_mode, image, self.width, self.height)
|
107 |
+
|
108 |
+
if image_masks:
|
109 |
+
image_masked = Image.new('RGBa', (image.width, image.height))
|
110 |
+
image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(masks_for_overlay[idx].convert('L')))
|
111 |
+
|
112 |
+
self.overlay_images.append(image_masked.convert('RGBA'))
|
113 |
+
|
114 |
+
# crop_region is not None if we are doing inpaint full res
|
115 |
+
if crop_regions:
|
116 |
+
image = image.crop(crop_regions[idx])
|
117 |
+
image = images.resize_image(2, image, self.width, self.height)
|
118 |
+
|
119 |
+
if image_masks:
|
120 |
+
if self.inpainting_fill != 1:
|
121 |
+
image = masking.fill(image, latent_mask)
|
122 |
+
|
123 |
+
if add_color_corrections:
|
124 |
+
self.color_corrections.append(setup_color_correction(image))
|
125 |
+
|
126 |
+
image = np.array(image).astype(np.float32) / 255.0
|
127 |
+
image = np.moveaxis(image, 2, 0)
|
128 |
+
|
129 |
+
imgs.append(image)
|
130 |
+
|
131 |
+
if len(imgs) == 1:
|
132 |
+
batch_images = np.expand_dims(imgs[0], axis=0).repeat(self.batch_size, axis=0)
|
133 |
+
if self.overlay_images is not None:
|
134 |
+
self.overlay_images = self.overlay_images * self.batch_size
|
135 |
+
|
136 |
+
if self.color_corrections is not None and len(self.color_corrections) == 1:
|
137 |
+
self.color_corrections = self.color_corrections * self.batch_size
|
138 |
+
|
139 |
+
elif len(imgs) <= self.batch_size:
|
140 |
+
self.batch_size = len(imgs)
|
141 |
+
batch_images = np.array(imgs)
|
142 |
+
else:
|
143 |
+
raise RuntimeError(f"bad number of images passed: {len(imgs)}; expecting {self.batch_size} or less")
|
144 |
+
|
145 |
+
image = torch.from_numpy(batch_images)
|
146 |
+
image = image.to(shared.device, dtype=devices.dtype_vae)
|
147 |
+
|
148 |
+
if opts.sd_vae_encode_method != 'Full':
|
149 |
+
self.extra_generation_params['VAE Encoder'] = opts.sd_vae_encode_method
|
150 |
+
|
151 |
+
self.init_latent = images_tensor_to_samples(image, approximation_indexes.get(opts.sd_vae_encode_method), self.sd_model)
|
152 |
+
devices.torch_gc()
|
153 |
+
|
154 |
+
if self.resize_mode == 3:
|
155 |
+
self.init_latent = torch.nn.functional.interpolate(self.init_latent, size=(self.height // opt_f, self.width // opt_f), mode="bilinear")
|
156 |
+
|
157 |
+
if image_masks is not None:
|
158 |
+
def process_letmask(init_mask):
|
159 |
+
# init_mask = latent_mask
|
160 |
+
latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2]))
|
161 |
+
latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255
|
162 |
+
latmask = latmask[0]
|
163 |
+
latmask = np.around(latmask)
|
164 |
+
return np.tile(latmask[None], (4, 1, 1))
|
165 |
+
|
166 |
+
if self.latent_mask is not None and not isinstance(self.latent_mask, list):
|
167 |
+
latmask = process_letmask(self.latent_mask)
|
168 |
+
else:
|
169 |
+
if isinstance(self.latent_mask, list):
|
170 |
+
latmask = [process_letmask(x) for x in self.latent_mask]
|
171 |
+
else:
|
172 |
+
latmask = [process_letmask(x) for x in image_masks]
|
173 |
+
latmask = np.stack(latmask, axis=0)
|
174 |
+
|
175 |
+
self.mask = torch.asarray(1.0 - latmask).to(shared.device).type(self.sd_model.dtype)
|
176 |
+
self.nmask = torch.asarray(latmask).to(shared.device).type(self.sd_model.dtype)
|
177 |
+
|
178 |
+
# this needs to be fixed to be done in sample() using actual seeds for batches
|
179 |
+
if self.inpainting_fill == 2:
|
180 |
+
self.init_latent = self.init_latent * self.mask + create_random_tensors(self.init_latent.shape[1:], all_seeds[0:self.init_latent.shape[0]]) * self.nmask
|
181 |
+
elif self.inpainting_fill == 3:
|
182 |
+
self.init_latent = self.init_latent * self.mask
|
183 |
+
|
184 |
+
self.image_conditioning = self.img2img_image_conditioning(image * 2 - 1, self.init_latent, image_masks) # let's ignore this image_masks which is related to inpaint model with different arch
|
185 |
+
|
186 |
+
def hacked_img2img_process_batch_hijack(
|
187 |
+
p: StableDiffusionProcessingImg2Img, input_dir: str, output_dir: str, inpaint_mask_dir: str,
|
188 |
+
args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
|
189 |
+
if p.scripts:
|
190 |
+
for script in p.scripts.alwayson_scripts:
|
191 |
+
if script.title().lower() == "animatediff":
|
192 |
+
ad_arg = p.script_args[script.args_from]
|
193 |
+
ad_enabled = ad_arg.get('enable', False) if isinstance(ad_arg, dict) else getattr(ad_arg, 'enable', False)
|
194 |
+
if ad_enabled:
|
195 |
+
p._animatediff_i2i_batch = 1 # i2i-batch mode, ordinary
|
196 |
+
|
197 |
+
if not hasattr(p, '_animatediff_i2i_batch'):
|
198 |
+
return original_img2img_process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale, scale_by, use_png_info, png_info_props, png_info_dir)
|
199 |
+
output_dir = output_dir.strip()
|
200 |
+
processing.fix_seed(p)
|
201 |
+
|
202 |
+
images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
|
203 |
+
|
204 |
+
is_inpaint_batch = False
|
205 |
+
if inpaint_mask_dir:
|
206 |
+
inpaint_masks = shared.listfiles(inpaint_mask_dir)
|
207 |
+
is_inpaint_batch = bool(inpaint_masks)
|
208 |
+
|
209 |
+
if is_inpaint_batch:
|
210 |
+
assert len(inpaint_masks) == 1 or len(inpaint_masks) == len(images), 'The number of masks must be 1 or equal to the number of images.'
|
211 |
+
logger.info(f"\n[i2i batch] Inpaint batch is enabled. {len(inpaint_masks)} masks found.")
|
212 |
+
if len(inpaint_masks) > 1: # batch mask
|
213 |
+
p.init = MethodType(hacked_i2i_init, p)
|
214 |
+
|
215 |
+
logger.info(f"[i2i batch] Will process {len(images)} images, creating {p.n_iter} new videos.")
|
216 |
+
|
217 |
+
# extract "default" params to use in case getting png info fails
|
218 |
+
prompt = p.prompt
|
219 |
+
negative_prompt = p.negative_prompt
|
220 |
+
seed = p.seed
|
221 |
+
cfg_scale = p.cfg_scale
|
222 |
+
sampler_name = p.sampler_name
|
223 |
+
steps = p.steps
|
224 |
+
frame_images = []
|
225 |
+
frame_masks = []
|
226 |
+
|
227 |
+
for i, image in enumerate(images):
|
228 |
+
|
229 |
+
try:
|
230 |
+
img = Image.open(image)
|
231 |
+
except UnidentifiedImageError as e:
|
232 |
+
print(e)
|
233 |
+
continue
|
234 |
+
# Use the EXIF orientation of photos taken by smartphones.
|
235 |
+
img = ImageOps.exif_transpose(img)
|
236 |
+
|
237 |
+
if to_scale:
|
238 |
+
p.width = int(img.width * scale_by)
|
239 |
+
p.height = int(img.height * scale_by)
|
240 |
+
|
241 |
+
frame_images.append(img)
|
242 |
+
|
243 |
+
image_path = Path(image)
|
244 |
+
if is_inpaint_batch:
|
245 |
+
if len(inpaint_masks) == 1:
|
246 |
+
mask_image_path = inpaint_masks[0]
|
247 |
+
p.image_mask = Image.open(mask_image_path)
|
248 |
+
else:
|
249 |
+
# try to find corresponding mask for an image using index matching
|
250 |
+
mask_image_path = inpaint_masks[i]
|
251 |
+
frame_masks.append(Image.open(mask_image_path))
|
252 |
+
|
253 |
+
mask_image = Image.open(mask_image_path)
|
254 |
+
p.image_mask = mask_image
|
255 |
+
|
256 |
+
if use_png_info:
|
257 |
+
try:
|
258 |
+
info_img = frame_images[0]
|
259 |
+
if png_info_dir:
|
260 |
+
info_img_path = os.path.join(png_info_dir, os.path.basename(image))
|
261 |
+
info_img = Image.open(info_img_path)
|
262 |
+
from modules import images as imgutil
|
263 |
+
from modules.generation_parameters_copypaste import parse_generation_parameters
|
264 |
+
geninfo, _ = imgutil.read_info_from_image(info_img)
|
265 |
+
parsed_parameters = parse_generation_parameters(geninfo)
|
266 |
+
parsed_parameters = {k: v for k, v in parsed_parameters.items() if k in (png_info_props or {})}
|
267 |
+
except Exception:
|
268 |
+
parsed_parameters = {}
|
269 |
+
|
270 |
+
p.prompt = prompt + (" " + parsed_parameters["Prompt"] if "Prompt" in parsed_parameters else "")
|
271 |
+
p.negative_prompt = negative_prompt + (" " + parsed_parameters["Negative prompt"] if "Negative prompt" in parsed_parameters else "")
|
272 |
+
p.seed = int(parsed_parameters.get("Seed", seed))
|
273 |
+
p.cfg_scale = float(parsed_parameters.get("CFG scale", cfg_scale))
|
274 |
+
p.sampler_name = parsed_parameters.get("Sampler", sampler_name)
|
275 |
+
p.steps = int(parsed_parameters.get("Steps", steps))
|
276 |
+
|
277 |
+
p.init_images = frame_images
|
278 |
+
if len(frame_masks) > 0:
|
279 |
+
p.image_mask = frame_masks
|
280 |
+
|
281 |
+
proc = scripts.scripts_img2img.run(p, *args) # we should not support this, but just leave it here
|
282 |
+
if proc is None:
|
283 |
+
if output_dir:
|
284 |
+
p.outpath_samples = output_dir
|
285 |
+
p.override_settings['save_to_dirs'] = False
|
286 |
+
if p.n_iter > 1 or p.batch_size > 1:
|
287 |
+
p.override_settings['samples_filename_pattern'] = f'{image_path.stem}-[generation_number]'
|
288 |
+
else:
|
289 |
+
p.override_settings['samples_filename_pattern'] = f'{image_path.stem}'
|
290 |
+
return process_images(p)
|
291 |
+
else:
|
292 |
+
logger.warn("Warning: you are using an unsupported external script. AnimateDiff may not work properly.")
|
293 |
+
|
294 |
+
img2img.process_batch = hacked_img2img_process_batch_hijack
|
295 |
+
|
296 |
+
|
297 |
+
def cap_init_image(self, p: StableDiffusionProcessingImg2Img, params):
|
298 |
+
if params.enable and isinstance(p, StableDiffusionProcessingImg2Img) and hasattr(p, '_animatediff_i2i_batch'):
|
299 |
+
if len(p.init_images) > params.video_length:
|
300 |
+
p.init_images = p.init_images[:params.video_length]
|
301 |
+
if p.image_mask and isinstance(p.image_mask, list) and len(p.image_mask) > params.video_length:
|
302 |
+
p.image_mask = p.image_mask[:params.video_length]
|
303 |
+
if len(p.init_images) < params.video_length:
|
304 |
+
params.video_length = len(p.init_images)
|
305 |
+
if len(p.init_images) < params.batch_size:
|
306 |
+
params.batch_size = len(p.init_images)
|
307 |
+
|
308 |
+
|
309 |
+
animatediff_i2ibatch = AnimateDiffI2IBatch()
|
extensions/sd-webui-animatediff/scripts/animatediff_infotext.py
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
from modules.paths import data_path
|
4 |
+
from modules.processing import StableDiffusionProcessing, StableDiffusionProcessingImg2Img
|
5 |
+
|
6 |
+
from scripts.animatediff_ui import AnimateDiffProcess
|
7 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
8 |
+
|
9 |
+
|
10 |
+
def update_infotext(p: StableDiffusionProcessing, params: AnimateDiffProcess):
|
11 |
+
if p.extra_generation_params is not None:
|
12 |
+
p.extra_generation_params["AnimateDiff"] = params.get_dict(isinstance(p, StableDiffusionProcessingImg2Img))
|
13 |
+
|
14 |
+
|
15 |
+
def write_params_txt(info: str):
|
16 |
+
with open(os.path.join(data_path, "params.txt"), "w", encoding="utf8") as file:
|
17 |
+
file.write(info)
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
def infotext_pasted(infotext, results):
|
22 |
+
for k, v in results.items():
|
23 |
+
if not k.startswith("AnimateDiff"):
|
24 |
+
continue
|
25 |
+
|
26 |
+
assert isinstance(v, str), f"Expect string but got {v}."
|
27 |
+
try:
|
28 |
+
for items in v.split(', '):
|
29 |
+
field, value = items.split(': ')
|
30 |
+
results[f"AnimateDiff {field}"] = value
|
31 |
+
except Exception:
|
32 |
+
logger.warn(
|
33 |
+
f"Failed to parse infotext, legacy format infotext is no longer supported:\n{v}"
|
34 |
+
)
|
35 |
+
break
|
extensions/sd-webui-animatediff/scripts/animatediff_infv2v.py
ADDED
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
|
6 |
+
from modules import prompt_parser, devices, sd_samplers_common, shared
|
7 |
+
from modules.shared import opts, state
|
8 |
+
from modules.script_callbacks import CFGDenoiserParams, cfg_denoiser_callback
|
9 |
+
from modules.script_callbacks import CFGDenoisedParams, cfg_denoised_callback
|
10 |
+
from modules.script_callbacks import AfterCFGCallbackParams, cfg_after_cfg_callback
|
11 |
+
from modules.sd_samplers_cfg_denoiser import CFGDenoiser, catenate_conds, subscript_cond, pad_cond
|
12 |
+
|
13 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
14 |
+
from scripts.animatediff_ui import AnimateDiffProcess
|
15 |
+
from scripts.animatediff_prompt import AnimateDiffPromptSchedule
|
16 |
+
|
17 |
+
|
18 |
+
class AnimateDiffInfV2V:
|
19 |
+
cfg_original_forward = None
|
20 |
+
|
21 |
+
def __init__(self, p, prompt_scheduler: AnimateDiffPromptSchedule):
|
22 |
+
try:
|
23 |
+
from scripts.external_code import find_cn_script
|
24 |
+
self.cn_script = find_cn_script(p.scripts)
|
25 |
+
except:
|
26 |
+
self.cn_script = None
|
27 |
+
self.prompt_scheduler = prompt_scheduler
|
28 |
+
|
29 |
+
|
30 |
+
# Returns fraction that has denominator that is a power of 2
|
31 |
+
@staticmethod
|
32 |
+
def ordered_halving(val):
|
33 |
+
# get binary value, padded with 0s for 64 bits
|
34 |
+
bin_str = f"{val:064b}"
|
35 |
+
# flip binary value, padding included
|
36 |
+
bin_flip = bin_str[::-1]
|
37 |
+
# convert binary to int
|
38 |
+
as_int = int(bin_flip, 2)
|
39 |
+
# divide by 1 << 64, equivalent to 2**64, or 18446744073709551616,
|
40 |
+
# or b10000000000000000000000000000000000000000000000000000000000000000 (1 with 64 zero's)
|
41 |
+
final = as_int / (1 << 64)
|
42 |
+
return final
|
43 |
+
|
44 |
+
|
45 |
+
# Generator that returns lists of latent indeces to diffuse on
|
46 |
+
@staticmethod
|
47 |
+
def uniform(
|
48 |
+
step: int = ...,
|
49 |
+
video_length: int = 0,
|
50 |
+
batch_size: int = 16,
|
51 |
+
stride: int = 1,
|
52 |
+
overlap: int = 4,
|
53 |
+
loop_setting: str = 'R-P',
|
54 |
+
):
|
55 |
+
if video_length <= batch_size:
|
56 |
+
yield list(range(batch_size))
|
57 |
+
return
|
58 |
+
|
59 |
+
closed_loop = (loop_setting == 'A')
|
60 |
+
stride = min(stride, int(np.ceil(np.log2(video_length / batch_size))) + 1)
|
61 |
+
|
62 |
+
for context_step in 1 << np.arange(stride):
|
63 |
+
pad = int(round(video_length * AnimateDiffInfV2V.ordered_halving(step)))
|
64 |
+
both_close_loop = False
|
65 |
+
for j in range(
|
66 |
+
int(AnimateDiffInfV2V.ordered_halving(step) * context_step) + pad,
|
67 |
+
video_length + pad + (0 if closed_loop else -overlap),
|
68 |
+
(batch_size * context_step - overlap),
|
69 |
+
):
|
70 |
+
if loop_setting == 'N' and context_step == 1:
|
71 |
+
current_context = [e % video_length for e in range(j, j + batch_size * context_step, context_step)]
|
72 |
+
first_context = [e % video_length for e in range(0, batch_size * context_step, context_step)]
|
73 |
+
last_context = [e % video_length for e in range(video_length - batch_size * context_step, video_length, context_step)]
|
74 |
+
def get_unsorted_index(lst):
|
75 |
+
for i in range(1, len(lst)):
|
76 |
+
if lst[i] < lst[i-1]:
|
77 |
+
return i
|
78 |
+
return None
|
79 |
+
unsorted_index = get_unsorted_index(current_context)
|
80 |
+
if unsorted_index is None:
|
81 |
+
yield current_context
|
82 |
+
elif both_close_loop: # last and this context are close loop
|
83 |
+
both_close_loop = False
|
84 |
+
yield first_context
|
85 |
+
elif unsorted_index < batch_size - overlap: # only this context is close loop
|
86 |
+
yield last_context
|
87 |
+
yield first_context
|
88 |
+
else: # this and next context are close loop
|
89 |
+
both_close_loop = True
|
90 |
+
yield last_context
|
91 |
+
else:
|
92 |
+
yield [e % video_length for e in range(j, j + batch_size * context_step, context_step)]
|
93 |
+
|
94 |
+
|
95 |
+
def hack(self, params: AnimateDiffProcess):
|
96 |
+
if AnimateDiffInfV2V.cfg_original_forward is not None:
|
97 |
+
logger.info("CFGDenoiser already hacked")
|
98 |
+
return
|
99 |
+
|
100 |
+
logger.info(f"Hacking CFGDenoiser forward function.")
|
101 |
+
AnimateDiffInfV2V.cfg_original_forward = CFGDenoiser.forward
|
102 |
+
cn_script = self.cn_script
|
103 |
+
prompt_scheduler = self.prompt_scheduler
|
104 |
+
|
105 |
+
def mm_cn_select(context: List[int]):
|
106 |
+
# take control images for current context.
|
107 |
+
if cn_script and cn_script.latest_network:
|
108 |
+
from scripts.hook import ControlModelType
|
109 |
+
for control in cn_script.latest_network.control_params:
|
110 |
+
if control.control_model_type not in [ControlModelType.IPAdapter, ControlModelType.Controlllite]:
|
111 |
+
if control.hint_cond.shape[0] > len(context):
|
112 |
+
control.hint_cond_backup = control.hint_cond
|
113 |
+
control.hint_cond = control.hint_cond[context]
|
114 |
+
control.hint_cond = control.hint_cond.to(device=devices.get_device_for("controlnet"))
|
115 |
+
if control.hr_hint_cond is not None:
|
116 |
+
if control.hr_hint_cond.shape[0] > len(context):
|
117 |
+
control.hr_hint_cond_backup = control.hr_hint_cond
|
118 |
+
control.hr_hint_cond = control.hr_hint_cond[context]
|
119 |
+
control.hr_hint_cond = control.hr_hint_cond.to(device=devices.get_device_for("controlnet"))
|
120 |
+
# IPAdapter and Controlllite are always on CPU.
|
121 |
+
elif control.control_model_type == ControlModelType.IPAdapter and control.control_model.image_emb.shape[0] > len(context):
|
122 |
+
control.control_model.image_emb_backup = control.control_model.image_emb
|
123 |
+
control.control_model.image_emb = control.control_model.image_emb[context]
|
124 |
+
control.control_model.uncond_image_emb_backup = control.control_model.uncond_image_emb
|
125 |
+
control.control_model.uncond_image_emb = control.control_model.uncond_image_emb[context]
|
126 |
+
elif control.control_model_type == ControlModelType.Controlllite:
|
127 |
+
for module in control.control_model.modules.values():
|
128 |
+
if module.cond_image.shape[0] > len(context):
|
129 |
+
module.cond_image_backup = module.cond_image
|
130 |
+
module.set_cond_image(module.cond_image[context])
|
131 |
+
|
132 |
+
def mm_cn_restore(context: List[int]):
|
133 |
+
# restore control images for next context
|
134 |
+
if cn_script and cn_script.latest_network:
|
135 |
+
from scripts.hook import ControlModelType
|
136 |
+
for control in cn_script.latest_network.control_params:
|
137 |
+
if control.control_model_type not in [ControlModelType.IPAdapter, ControlModelType.Controlllite]:
|
138 |
+
if getattr(control, "hint_cond_backup", None) is not None:
|
139 |
+
control.hint_cond_backup[context] = control.hint_cond.to(device="cpu")
|
140 |
+
control.hint_cond = control.hint_cond_backup
|
141 |
+
if control.hr_hint_cond is not None and getattr(control, "hr_hint_cond_backup", None) is not None:
|
142 |
+
control.hr_hint_cond_backup[context] = control.hr_hint_cond.to(device="cpu")
|
143 |
+
control.hr_hint_cond = control.hr_hint_cond_backup
|
144 |
+
elif control.control_model_type == ControlModelType.IPAdapter and getattr(control.control_model, "image_emb_backup", None) is not None:
|
145 |
+
control.control_model.image_emb = control.control_model.image_emb_backup
|
146 |
+
control.control_model.uncond_image_emb = control.control_model.uncond_image_emb_backup
|
147 |
+
elif control.control_model_type == ControlModelType.Controlllite:
|
148 |
+
for module in control.control_model.modules.values():
|
149 |
+
if getattr(module, "cond_image_backup", None) is not None:
|
150 |
+
module.set_cond_image(module.cond_image_backup)
|
151 |
+
|
152 |
+
def mm_sd_forward(self, x_in, sigma_in, cond_in, image_cond_in, make_condition_dict):
|
153 |
+
x_out = torch.zeros_like(x_in)
|
154 |
+
for context in AnimateDiffInfV2V.uniform(self.step, params.video_length, params.batch_size, params.stride, params.overlap, params.closed_loop):
|
155 |
+
if shared.opts.batch_cond_uncond:
|
156 |
+
_context = context + [c + params.video_length for c in context]
|
157 |
+
else:
|
158 |
+
_context = context
|
159 |
+
mm_cn_select(_context)
|
160 |
+
out = self.inner_model(
|
161 |
+
x_in[_context], sigma_in[_context],
|
162 |
+
cond=make_condition_dict(
|
163 |
+
cond_in[_context] if not isinstance(cond_in, dict) else {k: v[_context] for k, v in cond_in.items()},
|
164 |
+
image_cond_in[_context]))
|
165 |
+
x_out = x_out.to(dtype=out.dtype)
|
166 |
+
x_out[_context] = out
|
167 |
+
mm_cn_restore(_context)
|
168 |
+
return x_out
|
169 |
+
|
170 |
+
def mm_cfg_forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond):
|
171 |
+
if state.interrupted or state.skipped:
|
172 |
+
raise sd_samplers_common.InterruptedException
|
173 |
+
|
174 |
+
if sd_samplers_common.apply_refiner(self):
|
175 |
+
cond = self.sampler.sampler_extra_args['cond']
|
176 |
+
uncond = self.sampler.sampler_extra_args['uncond']
|
177 |
+
|
178 |
+
# at self.image_cfg_scale == 1.0 produced results for edit model are the same as with normal sampling,
|
179 |
+
# so is_edit_model is set to False to support AND composition.
|
180 |
+
is_edit_model = shared.sd_model.cond_stage_key == "edit" and self.image_cfg_scale is not None and self.image_cfg_scale != 1.0
|
181 |
+
|
182 |
+
conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
|
183 |
+
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
|
184 |
+
|
185 |
+
assert not is_edit_model or all(len(conds) == 1 for conds in conds_list), "AND is not supported for InstructPix2Pix checkpoint (unless using Image CFG scale = 1.0)"
|
186 |
+
|
187 |
+
if self.mask_before_denoising and self.mask is not None:
|
188 |
+
x = self.init_latent * self.mask + self.nmask * x
|
189 |
+
|
190 |
+
batch_size = len(conds_list)
|
191 |
+
repeats = [len(conds_list[i]) for i in range(batch_size)]
|
192 |
+
|
193 |
+
if shared.sd_model.model.conditioning_key == "crossattn-adm":
|
194 |
+
image_uncond = torch.zeros_like(image_cond) # this should not be supported.
|
195 |
+
make_condition_dict = lambda c_crossattn, c_adm: {"c_crossattn": [c_crossattn], "c_adm": c_adm}
|
196 |
+
else:
|
197 |
+
image_uncond = image_cond
|
198 |
+
if isinstance(uncond, dict):
|
199 |
+
make_condition_dict = lambda c_crossattn, c_concat: {**c_crossattn, "c_concat": [c_concat]}
|
200 |
+
else:
|
201 |
+
make_condition_dict = lambda c_crossattn, c_concat: {"c_crossattn": [c_crossattn], "c_concat": [c_concat]}
|
202 |
+
|
203 |
+
if not is_edit_model:
|
204 |
+
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
|
205 |
+
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
|
206 |
+
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond])
|
207 |
+
else:
|
208 |
+
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x] + [x])
|
209 |
+
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma] + [sigma])
|
210 |
+
image_cond_in = torch.cat([torch.stack([image_cond[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [image_uncond] + [torch.zeros_like(self.init_latent)])
|
211 |
+
|
212 |
+
denoiser_params = CFGDenoiserParams(x_in, image_cond_in, sigma_in, state.sampling_step, state.sampling_steps, tensor, uncond)
|
213 |
+
cfg_denoiser_callback(denoiser_params)
|
214 |
+
x_in = denoiser_params.x
|
215 |
+
image_cond_in = denoiser_params.image_cond
|
216 |
+
sigma_in = denoiser_params.sigma
|
217 |
+
tensor = denoiser_params.text_cond
|
218 |
+
uncond = denoiser_params.text_uncond
|
219 |
+
skip_uncond = False
|
220 |
+
|
221 |
+
# alternating uncond allows for higher thresholds without the quality loss normally expected from raising it
|
222 |
+
if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
|
223 |
+
skip_uncond = True
|
224 |
+
x_in = x_in[:-batch_size]
|
225 |
+
sigma_in = sigma_in[:-batch_size]
|
226 |
+
|
227 |
+
self.padded_cond_uncond = False
|
228 |
+
if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]:
|
229 |
+
empty = shared.sd_model.cond_stage_model_empty_prompt
|
230 |
+
num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1]
|
231 |
+
|
232 |
+
if num_repeats < 0:
|
233 |
+
tensor = pad_cond(tensor, -num_repeats, empty)
|
234 |
+
self.padded_cond_uncond = True
|
235 |
+
elif num_repeats > 0:
|
236 |
+
uncond = pad_cond(uncond, num_repeats, empty)
|
237 |
+
self.padded_cond_uncond = True
|
238 |
+
|
239 |
+
if tensor.shape[1] == uncond.shape[1] or skip_uncond:
|
240 |
+
prompt_closed_loop = (params.video_length > params.batch_size) and (params.closed_loop in ['R+P', 'A']) # hook
|
241 |
+
tensor = prompt_scheduler.multi_cond(tensor, prompt_closed_loop) # hook
|
242 |
+
if is_edit_model:
|
243 |
+
cond_in = catenate_conds([tensor, uncond, uncond])
|
244 |
+
elif skip_uncond:
|
245 |
+
cond_in = tensor
|
246 |
+
else:
|
247 |
+
cond_in = catenate_conds([tensor, uncond])
|
248 |
+
|
249 |
+
if shared.opts.batch_cond_uncond:
|
250 |
+
x_out = mm_sd_forward(self, x_in, sigma_in, cond_in, image_cond_in, make_condition_dict) # hook
|
251 |
+
else:
|
252 |
+
x_out = torch.zeros_like(x_in)
|
253 |
+
for batch_offset in range(0, x_out.shape[0], batch_size):
|
254 |
+
a = batch_offset
|
255 |
+
b = a + batch_size
|
256 |
+
x_out[a:b] = mm_sd_forward(self, x_in[a:b], sigma_in[a:b], subscript_cond(cond_in, a, b), subscript_cond(image_cond_in, a, b), make_condition_dict) # hook
|
257 |
+
else:
|
258 |
+
x_out = torch.zeros_like(x_in)
|
259 |
+
batch_size = batch_size*2 if shared.opts.batch_cond_uncond else batch_size
|
260 |
+
for batch_offset in range(0, tensor.shape[0], batch_size):
|
261 |
+
a = batch_offset
|
262 |
+
b = min(a + batch_size, tensor.shape[0])
|
263 |
+
|
264 |
+
if not is_edit_model:
|
265 |
+
c_crossattn = subscript_cond(tensor, a, b)
|
266 |
+
else:
|
267 |
+
c_crossattn = torch.cat([tensor[a:b]], uncond)
|
268 |
+
|
269 |
+
x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=make_condition_dict(c_crossattn, image_cond_in[a:b]))
|
270 |
+
|
271 |
+
if not skip_uncond:
|
272 |
+
x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=make_condition_dict(uncond, image_cond_in[-uncond.shape[0]:]))
|
273 |
+
|
274 |
+
denoised_image_indexes = [x[0][0] for x in conds_list]
|
275 |
+
if skip_uncond:
|
276 |
+
fake_uncond = torch.cat([x_out[i:i+1] for i in denoised_image_indexes])
|
277 |
+
x_out = torch.cat([x_out, fake_uncond]) # we skipped uncond denoising, so we put cond-denoised image to where the uncond-denoised image should be
|
278 |
+
|
279 |
+
denoised_params = CFGDenoisedParams(x_out, state.sampling_step, state.sampling_steps, self.inner_model)
|
280 |
+
cfg_denoised_callback(denoised_params)
|
281 |
+
|
282 |
+
devices.test_for_nans(x_out, "unet")
|
283 |
+
|
284 |
+
if is_edit_model:
|
285 |
+
denoised = self.combine_denoised_for_edit_model(x_out, cond_scale)
|
286 |
+
elif skip_uncond:
|
287 |
+
denoised = self.combine_denoised(x_out, conds_list, uncond, 1.0)
|
288 |
+
else:
|
289 |
+
denoised = self.combine_denoised(x_out, conds_list, uncond, cond_scale)
|
290 |
+
|
291 |
+
if not self.mask_before_denoising and self.mask is not None:
|
292 |
+
denoised = self.init_latent * self.mask + self.nmask * denoised
|
293 |
+
|
294 |
+
self.sampler.last_latent = self.get_pred_x0(torch.cat([x_in[i:i + 1] for i in denoised_image_indexes]), torch.cat([x_out[i:i + 1] for i in denoised_image_indexes]), sigma)
|
295 |
+
|
296 |
+
if opts.live_preview_content == "Prompt":
|
297 |
+
preview = self.sampler.last_latent
|
298 |
+
elif opts.live_preview_content == "Negative prompt":
|
299 |
+
preview = self.get_pred_x0(x_in[-uncond.shape[0]:], x_out[-uncond.shape[0]:], sigma)
|
300 |
+
else:
|
301 |
+
preview = self.get_pred_x0(torch.cat([x_in[i:i+1] for i in denoised_image_indexes]), torch.cat([denoised[i:i+1] for i in denoised_image_indexes]), sigma)
|
302 |
+
|
303 |
+
sd_samplers_common.store_latent(preview)
|
304 |
+
|
305 |
+
after_cfg_callback_params = AfterCFGCallbackParams(denoised, state.sampling_step, state.sampling_steps)
|
306 |
+
cfg_after_cfg_callback(after_cfg_callback_params)
|
307 |
+
denoised = after_cfg_callback_params.x
|
308 |
+
|
309 |
+
self.step += 1
|
310 |
+
return denoised
|
311 |
+
|
312 |
+
CFGDenoiser.forward = mm_cfg_forward
|
313 |
+
|
314 |
+
|
315 |
+
def restore(self):
|
316 |
+
if AnimateDiffInfV2V.cfg_original_forward is None:
|
317 |
+
logger.info("CFGDenoiser already restored.")
|
318 |
+
return
|
319 |
+
|
320 |
+
logger.info(f"Restoring CFGDenoiser forward function.")
|
321 |
+
CFGDenoiser.forward = AnimateDiffInfV2V.cfg_original_forward
|
322 |
+
AnimateDiffInfV2V.cfg_original_forward = None
|
extensions/sd-webui-animatediff/scripts/animatediff_latent.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import torch
|
3 |
+
from modules import images, shared
|
4 |
+
from modules.devices import device, dtype_vae, torch_gc
|
5 |
+
from modules.processing import StableDiffusionProcessingImg2Img
|
6 |
+
from modules.sd_samplers_common import (approximation_indexes,
|
7 |
+
images_tensor_to_samples)
|
8 |
+
|
9 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
10 |
+
from scripts.animatediff_ui import AnimateDiffProcess
|
11 |
+
|
12 |
+
|
13 |
+
class AnimateDiffI2VLatent:
|
14 |
+
def randomize(
|
15 |
+
self, p: StableDiffusionProcessingImg2Img, params: AnimateDiffProcess
|
16 |
+
):
|
17 |
+
# Get init_alpha
|
18 |
+
init_alpha = [
|
19 |
+
1 - pow(i, params.latent_power) / params.latent_scale
|
20 |
+
for i in range(params.video_length)
|
21 |
+
]
|
22 |
+
logger.info(f"Randomizing init_latent according to {init_alpha}.")
|
23 |
+
init_alpha = torch.tensor(init_alpha, dtype=torch.float32, device=device)[
|
24 |
+
:, None, None, None
|
25 |
+
]
|
26 |
+
init_alpha[init_alpha < 0] = 0
|
27 |
+
|
28 |
+
if params.last_frame is not None:
|
29 |
+
last_frame = params.last_frame
|
30 |
+
if type(last_frame) == str:
|
31 |
+
from modules.api.api import decode_base64_to_image
|
32 |
+
last_frame = decode_base64_to_image(last_frame)
|
33 |
+
# Get last_alpha
|
34 |
+
last_alpha = [
|
35 |
+
1 - pow(i, params.latent_power_last) / params.latent_scale_last
|
36 |
+
for i in range(params.video_length)
|
37 |
+
]
|
38 |
+
last_alpha.reverse()
|
39 |
+
logger.info(f"Randomizing last_latent according to {last_alpha}.")
|
40 |
+
last_alpha = torch.tensor(last_alpha, dtype=torch.float32, device=device)[
|
41 |
+
:, None, None, None
|
42 |
+
]
|
43 |
+
last_alpha[last_alpha < 0] = 0
|
44 |
+
|
45 |
+
# Normalize alpha
|
46 |
+
sum_alpha = init_alpha + last_alpha
|
47 |
+
mask_alpha = sum_alpha > 1
|
48 |
+
scaling_factor = 1 / sum_alpha[mask_alpha]
|
49 |
+
init_alpha[mask_alpha] *= scaling_factor
|
50 |
+
last_alpha[mask_alpha] *= scaling_factor
|
51 |
+
init_alpha[0] = 1
|
52 |
+
init_alpha[-1] = 0
|
53 |
+
last_alpha[0] = 0
|
54 |
+
last_alpha[-1] = 1
|
55 |
+
|
56 |
+
# Calculate last_latent
|
57 |
+
if p.resize_mode != 3:
|
58 |
+
last_frame = images.resize_image(
|
59 |
+
p.resize_mode, last_frame, p.width, p.height
|
60 |
+
)
|
61 |
+
last_frame = np.array(last_frame).astype(np.float32) / 255.0
|
62 |
+
last_frame = np.moveaxis(last_frame, 2, 0)[None, ...]
|
63 |
+
last_frame = torch.from_numpy(last_frame).to(device).to(dtype_vae)
|
64 |
+
last_latent = images_tensor_to_samples(
|
65 |
+
last_frame,
|
66 |
+
approximation_indexes.get(shared.opts.sd_vae_encode_method),
|
67 |
+
p.sd_model,
|
68 |
+
)
|
69 |
+
torch_gc()
|
70 |
+
if p.resize_mode == 3:
|
71 |
+
opt_f = 8
|
72 |
+
last_latent = torch.nn.functional.interpolate(
|
73 |
+
last_latent,
|
74 |
+
size=(p.height // opt_f, p.width // opt_f),
|
75 |
+
mode="bilinear",
|
76 |
+
)
|
77 |
+
# Modify init_latent
|
78 |
+
p.init_latent = (
|
79 |
+
p.init_latent * init_alpha
|
80 |
+
+ last_latent * last_alpha
|
81 |
+
+ p.rng.next() * (1 - init_alpha - last_alpha)
|
82 |
+
)
|
83 |
+
else:
|
84 |
+
p.init_latent = p.init_latent * init_alpha + p.rng.next() * (1 - init_alpha)
|
extensions/sd-webui-animatediff/scripts/animatediff_lcm.py
ADDED
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# TODO: remove this file when LCM is merged to A1111
|
3 |
+
import torch
|
4 |
+
|
5 |
+
from k_diffusion import utils, sampling
|
6 |
+
from k_diffusion.external import DiscreteEpsDDPMDenoiser
|
7 |
+
from k_diffusion.sampling import default_noise_sampler, trange
|
8 |
+
|
9 |
+
from modules import shared, sd_samplers_cfg_denoiser, sd_samplers_kdiffusion
|
10 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
11 |
+
|
12 |
+
|
13 |
+
class LCMCompVisDenoiser(DiscreteEpsDDPMDenoiser):
|
14 |
+
def __init__(self, model):
|
15 |
+
timesteps = 1000
|
16 |
+
beta_start = 0.00085
|
17 |
+
beta_end = 0.012
|
18 |
+
|
19 |
+
betas = torch.linspace(beta_start**0.5, beta_end**0.5, timesteps, dtype=torch.float32) ** 2
|
20 |
+
alphas = 1.0 - betas
|
21 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
22 |
+
|
23 |
+
original_timesteps = 50 # LCM Original Timesteps (default=50, for current version of LCM)
|
24 |
+
self.skip_steps = timesteps // original_timesteps
|
25 |
+
|
26 |
+
|
27 |
+
alphas_cumprod_valid = torch.zeros((original_timesteps), dtype=torch.float32, device=model.device)
|
28 |
+
for x in range(original_timesteps):
|
29 |
+
alphas_cumprod_valid[original_timesteps - 1 - x] = alphas_cumprod[timesteps - 1 - x * self.skip_steps]
|
30 |
+
|
31 |
+
super().__init__(model, alphas_cumprod_valid, quantize=None)
|
32 |
+
|
33 |
+
|
34 |
+
def get_sigmas(self, n=None, sgm=False):
|
35 |
+
if n is None:
|
36 |
+
return sampling.append_zero(self.sigmas.flip(0))
|
37 |
+
|
38 |
+
start = self.sigma_to_t(self.sigma_max)
|
39 |
+
end = self.sigma_to_t(self.sigma_min)
|
40 |
+
|
41 |
+
if sgm:
|
42 |
+
t = torch.linspace(start, end, n + 1, device=shared.sd_model.device)[:-1]
|
43 |
+
else:
|
44 |
+
t = torch.linspace(start, end, n, device=shared.sd_model.device)
|
45 |
+
|
46 |
+
return sampling.append_zero(self.t_to_sigma(t))
|
47 |
+
|
48 |
+
|
49 |
+
def sigma_to_t(self, sigma, quantize=None):
|
50 |
+
log_sigma = sigma.log()
|
51 |
+
dists = log_sigma - self.log_sigmas[:, None]
|
52 |
+
return dists.abs().argmin(dim=0).view(sigma.shape) * self.skip_steps + (self.skip_steps - 1)
|
53 |
+
|
54 |
+
|
55 |
+
def t_to_sigma(self, timestep):
|
56 |
+
t = torch.clamp(((timestep - (self.skip_steps - 1)) / self.skip_steps).float(), min=0, max=(len(self.sigmas) - 1))
|
57 |
+
return super().t_to_sigma(t)
|
58 |
+
|
59 |
+
|
60 |
+
def get_eps(self, *args, **kwargs):
|
61 |
+
return self.inner_model.apply_model(*args, **kwargs)
|
62 |
+
|
63 |
+
|
64 |
+
def get_scaled_out(self, sigma, output, input):
|
65 |
+
sigma_data = 0.5
|
66 |
+
scaled_timestep = utils.append_dims(self.sigma_to_t(sigma), output.ndim) * 10.0
|
67 |
+
|
68 |
+
c_skip = sigma_data**2 / (scaled_timestep**2 + sigma_data**2)
|
69 |
+
c_out = scaled_timestep / (scaled_timestep**2 + sigma_data**2) ** 0.5
|
70 |
+
|
71 |
+
return c_out * output + c_skip * input
|
72 |
+
|
73 |
+
|
74 |
+
def forward(self, input, sigma, **kwargs):
|
75 |
+
c_out, c_in = [utils.append_dims(x, input.ndim) for x in self.get_scalings(sigma)]
|
76 |
+
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
|
77 |
+
return self.get_scaled_out(sigma, input + eps * c_out, input)
|
78 |
+
|
79 |
+
|
80 |
+
def sample_lcm(model, x, sigmas, extra_args=None, callback=None, disable=None, noise_sampler=None):
|
81 |
+
extra_args = {} if extra_args is None else extra_args
|
82 |
+
noise_sampler = default_noise_sampler(x) if noise_sampler is None else noise_sampler
|
83 |
+
s_in = x.new_ones([x.shape[0]])
|
84 |
+
|
85 |
+
for i in trange(len(sigmas) - 1, disable=disable):
|
86 |
+
denoised = model(x, sigmas[i] * s_in, **extra_args)
|
87 |
+
|
88 |
+
if callback is not None:
|
89 |
+
callback({'x': x, 'i': i, 'sigma': sigmas[i], 'sigma_hat': sigmas[i], 'denoised': denoised})
|
90 |
+
|
91 |
+
x = denoised
|
92 |
+
if sigmas[i + 1] > 0:
|
93 |
+
x += sigmas[i + 1] * noise_sampler(sigmas[i], sigmas[i + 1])
|
94 |
+
return x
|
95 |
+
|
96 |
+
|
97 |
+
class CFGDenoiserLCM(sd_samplers_cfg_denoiser.CFGDenoiser):
|
98 |
+
@property
|
99 |
+
def inner_model(self):
|
100 |
+
if self.model_wrap is None:
|
101 |
+
denoiser = LCMCompVisDenoiser
|
102 |
+
self.model_wrap = denoiser(shared.sd_model)
|
103 |
+
|
104 |
+
return self.model_wrap
|
105 |
+
|
106 |
+
|
107 |
+
class LCMSampler(sd_samplers_kdiffusion.KDiffusionSampler):
|
108 |
+
def __init__(self, funcname, sd_model, options=None):
|
109 |
+
super().__init__(funcname, sd_model, options)
|
110 |
+
self.model_wrap_cfg = CFGDenoiserLCM(self)
|
111 |
+
self.model_wrap = self.model_wrap_cfg.inner_model
|
112 |
+
|
113 |
+
|
114 |
+
class AnimateDiffLCM:
|
115 |
+
lcm_ui_injected = False
|
116 |
+
|
117 |
+
|
118 |
+
@staticmethod
|
119 |
+
def hack_kdiff_ui():
|
120 |
+
if shared.opts.data.get("animatediff_disable_lcm", False):
|
121 |
+
return
|
122 |
+
|
123 |
+
if AnimateDiffLCM.lcm_ui_injected:
|
124 |
+
logger.info(f"LCM UI already injected.")
|
125 |
+
return
|
126 |
+
|
127 |
+
logger.info(f"Injecting LCM to UI.")
|
128 |
+
from modules import sd_samplers, sd_samplers_common
|
129 |
+
samplers_lcm = [('LCM', sample_lcm, ['k_lcm'], {})]
|
130 |
+
samplers_data_lcm = [
|
131 |
+
sd_samplers_common.SamplerData(label, lambda model, funcname=funcname: LCMSampler(funcname, model), aliases, options)
|
132 |
+
for label, funcname, aliases, options in samplers_lcm
|
133 |
+
]
|
134 |
+
sd_samplers.all_samplers.extend(samplers_data_lcm)
|
135 |
+
sd_samplers.all_samplers_map = {x.name: x for x in sd_samplers.all_samplers}
|
136 |
+
sd_samplers.set_samplers()
|
137 |
+
AnimateDiffLCM.lcm_ui_injected = True
|
extensions/sd-webui-animatediff/scripts/animatediff_logger.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import logging
|
3 |
+
import sys
|
4 |
+
|
5 |
+
from modules import shared
|
6 |
+
|
7 |
+
|
8 |
+
class ColoredFormatter(logging.Formatter):
|
9 |
+
COLORS = {
|
10 |
+
"DEBUG": "\033[0;36m", # CYAN
|
11 |
+
"INFO": "\033[0;32m", # GREEN
|
12 |
+
"WARNING": "\033[0;33m", # YELLOW
|
13 |
+
"ERROR": "\033[0;31m", # RED
|
14 |
+
"CRITICAL": "\033[0;37;41m", # WHITE ON RED
|
15 |
+
"RESET": "\033[0m", # RESET COLOR
|
16 |
+
}
|
17 |
+
|
18 |
+
def format(self, record):
|
19 |
+
colored_record = copy.copy(record)
|
20 |
+
levelname = colored_record.levelname
|
21 |
+
seq = self.COLORS.get(levelname, self.COLORS["RESET"])
|
22 |
+
colored_record.levelname = f"{seq}{levelname}{self.COLORS['RESET']}"
|
23 |
+
return super().format(colored_record)
|
24 |
+
|
25 |
+
|
26 |
+
# Create a new logger
|
27 |
+
logger_animatediff = logging.getLogger("AnimateDiff")
|
28 |
+
logger_animatediff.propagate = False
|
29 |
+
|
30 |
+
# Add handler if we don't have one.
|
31 |
+
if not logger_animatediff.handlers:
|
32 |
+
handler = logging.StreamHandler(sys.stdout)
|
33 |
+
handler.setFormatter(
|
34 |
+
ColoredFormatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
|
35 |
+
)
|
36 |
+
logger_animatediff.addHandler(handler)
|
37 |
+
|
38 |
+
# Configure logger
|
39 |
+
loglevel_string = getattr(shared.cmd_opts, "animatediff_loglevel", "INFO")
|
40 |
+
loglevel = getattr(logging, loglevel_string.upper(), None)
|
41 |
+
logger_animatediff.setLevel(loglevel)
|
extensions/sd-webui-animatediff/scripts/animatediff_lora.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import re
|
3 |
+
import sys
|
4 |
+
|
5 |
+
from modules import sd_models, shared
|
6 |
+
from modules.paths import extensions_builtin_dir
|
7 |
+
|
8 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
9 |
+
|
10 |
+
sys.path.append(f"{extensions_builtin_dir}/Lora")
|
11 |
+
|
12 |
+
class AnimateDiffLora:
|
13 |
+
original_load_network = None
|
14 |
+
|
15 |
+
def __init__(self, v2: bool):
|
16 |
+
self.v2 = v2
|
17 |
+
|
18 |
+
def hack(self):
|
19 |
+
if not self.v2:
|
20 |
+
return
|
21 |
+
|
22 |
+
if AnimateDiffLora.original_load_network is not None:
|
23 |
+
logger.info("AnimateDiff LoRA already hacked")
|
24 |
+
return
|
25 |
+
|
26 |
+
logger.info("Hacking LoRA module to support motion LoRA")
|
27 |
+
import network
|
28 |
+
import networks
|
29 |
+
AnimateDiffLora.original_load_network = networks.load_network
|
30 |
+
original_load_network = AnimateDiffLora.original_load_network
|
31 |
+
|
32 |
+
def mm_load_network(name, network_on_disk):
|
33 |
+
|
34 |
+
def convert_mm_name_to_compvis(key):
|
35 |
+
sd_module_key, _, network_part = re.split(r'(_lora\.)', key)
|
36 |
+
sd_module_key = sd_module_key.replace("processor.", "").replace("to_out", "to_out.0")
|
37 |
+
return sd_module_key, 'lora_' + network_part
|
38 |
+
|
39 |
+
net = network.Network(name, network_on_disk)
|
40 |
+
net.mtime = os.path.getmtime(network_on_disk.filename)
|
41 |
+
|
42 |
+
sd = sd_models.read_state_dict(network_on_disk.filename)
|
43 |
+
|
44 |
+
if 'motion_modules' in list(sd.keys())[0]:
|
45 |
+
logger.info(f"Loading motion LoRA {name} from {network_on_disk.filename}")
|
46 |
+
matched_networks = {}
|
47 |
+
|
48 |
+
for key_network, weight in sd.items():
|
49 |
+
key, network_part = convert_mm_name_to_compvis(key_network)
|
50 |
+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
|
51 |
+
|
52 |
+
assert sd_module is not None, f"Failed to find sd module for key {key}."
|
53 |
+
|
54 |
+
if key not in matched_networks:
|
55 |
+
matched_networks[key] = network.NetworkWeights(
|
56 |
+
network_key=key_network, sd_key=key, w={}, sd_module=sd_module)
|
57 |
+
|
58 |
+
matched_networks[key].w[network_part] = weight
|
59 |
+
|
60 |
+
for key, weights in matched_networks.items():
|
61 |
+
net_module = networks.module_types[0].create_module(net, weights)
|
62 |
+
assert net_module is not None, "Failed to create motion module LoRA"
|
63 |
+
net.modules[key] = net_module
|
64 |
+
|
65 |
+
return net
|
66 |
+
else:
|
67 |
+
del sd
|
68 |
+
return original_load_network(name, network_on_disk)
|
69 |
+
|
70 |
+
networks.load_network = mm_load_network
|
71 |
+
|
72 |
+
|
73 |
+
def restore(self):
|
74 |
+
if not self.v2:
|
75 |
+
return
|
76 |
+
|
77 |
+
if AnimateDiffLora.original_load_network is None:
|
78 |
+
logger.info("AnimateDiff LoRA already restored")
|
79 |
+
return
|
80 |
+
|
81 |
+
logger.info("Restoring hacked LoRA")
|
82 |
+
import networks
|
83 |
+
networks.load_network = AnimateDiffLora.original_load_network
|
84 |
+
AnimateDiffLora.original_load_network = None
|
extensions/sd-webui-animatediff/scripts/animatediff_mm.py
ADDED
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gc
|
2 |
+
import os
|
3 |
+
|
4 |
+
import torch
|
5 |
+
from einops import rearrange
|
6 |
+
from modules import hashes, shared, sd_models, devices
|
7 |
+
from modules.devices import cpu, device, torch_gc
|
8 |
+
|
9 |
+
from motion_module import MotionWrapper, MotionModuleType
|
10 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
11 |
+
|
12 |
+
|
13 |
+
class AnimateDiffMM:
|
14 |
+
mm_injected = False
|
15 |
+
|
16 |
+
def __init__(self):
|
17 |
+
self.mm: MotionWrapper = None
|
18 |
+
self.script_dir = None
|
19 |
+
self.prev_alpha_cumprod = None
|
20 |
+
self.gn32_original_forward = None
|
21 |
+
|
22 |
+
|
23 |
+
def set_script_dir(self, script_dir):
|
24 |
+
self.script_dir = script_dir
|
25 |
+
|
26 |
+
|
27 |
+
def get_model_dir(self):
|
28 |
+
model_dir = shared.opts.data.get("animatediff_model_path", os.path.join(self.script_dir, "model"))
|
29 |
+
if not model_dir:
|
30 |
+
model_dir = os.path.join(self.script_dir, "model")
|
31 |
+
return model_dir
|
32 |
+
|
33 |
+
|
34 |
+
def _load(self, model_name):
|
35 |
+
model_path = os.path.join(self.get_model_dir(), model_name)
|
36 |
+
if not os.path.isfile(model_path):
|
37 |
+
raise RuntimeError("Please download models manually.")
|
38 |
+
if self.mm is None or self.mm.mm_name != model_name:
|
39 |
+
logger.info(f"Loading motion module {model_name} from {model_path}")
|
40 |
+
model_hash = hashes.sha256(model_path, f"AnimateDiff/{model_name}")
|
41 |
+
mm_state_dict = sd_models.read_state_dict(model_path)
|
42 |
+
model_type = MotionModuleType.get_mm_type(mm_state_dict)
|
43 |
+
logger.info(f"Guessed {model_name} architecture: {model_type}")
|
44 |
+
self.mm = MotionWrapper(model_name, model_hash, model_type)
|
45 |
+
missed_keys = self.mm.load_state_dict(mm_state_dict)
|
46 |
+
logger.warn(f"Missing keys {missed_keys}")
|
47 |
+
self.mm.to(device).eval()
|
48 |
+
if not shared.cmd_opts.no_half:
|
49 |
+
self.mm.half()
|
50 |
+
if getattr(devices, "fp8", False):
|
51 |
+
for module in self.mm.modules():
|
52 |
+
if isinstance(module, (torch.nn.Conv2d, torch.nn.Linear)):
|
53 |
+
module.to(torch.float8_e4m3fn)
|
54 |
+
|
55 |
+
|
56 |
+
def inject(self, sd_model, model_name="mm_sd_v15.ckpt"):
|
57 |
+
if AnimateDiffMM.mm_injected:
|
58 |
+
logger.info("Motion module already injected. Trying to restore.")
|
59 |
+
self.restore(sd_model)
|
60 |
+
|
61 |
+
unet = sd_model.model.diffusion_model
|
62 |
+
self._load(model_name)
|
63 |
+
inject_sdxl = sd_model.is_sdxl or self.mm.is_xl
|
64 |
+
sd_ver = "SDXL" if sd_model.is_sdxl else "SD1.5"
|
65 |
+
assert sd_model.is_sdxl == self.mm.is_xl, f"Motion module incompatible with SD. You are using {sd_ver} with {self.mm.mm_type}."
|
66 |
+
|
67 |
+
if self.mm.is_v2:
|
68 |
+
logger.info(f"Injecting motion module {model_name} into {sd_ver} UNet middle block.")
|
69 |
+
unet.middle_block.insert(-1, self.mm.mid_block.motion_modules[0])
|
70 |
+
elif self.mm.enable_gn_hack():
|
71 |
+
logger.info(f"Hacking {sd_ver} GroupNorm32 forward function.")
|
72 |
+
if self.mm.is_hotshot:
|
73 |
+
from sgm.modules.diffusionmodules.util import GroupNorm32
|
74 |
+
else:
|
75 |
+
from ldm.modules.diffusionmodules.util import GroupNorm32
|
76 |
+
self.gn32_original_forward = GroupNorm32.forward
|
77 |
+
gn32_original_forward = self.gn32_original_forward
|
78 |
+
|
79 |
+
def groupnorm32_mm_forward(self, x):
|
80 |
+
x = rearrange(x, "(b f) c h w -> b c f h w", b=2)
|
81 |
+
x = gn32_original_forward(self, x)
|
82 |
+
x = rearrange(x, "b c f h w -> (b f) c h w", b=2)
|
83 |
+
return x
|
84 |
+
|
85 |
+
GroupNorm32.forward = groupnorm32_mm_forward
|
86 |
+
|
87 |
+
logger.info(f"Injecting motion module {model_name} into {sd_ver} UNet input blocks.")
|
88 |
+
for mm_idx, unet_idx in enumerate([1, 2, 4, 5, 7, 8, 10, 11]):
|
89 |
+
if inject_sdxl and mm_idx >= 6:
|
90 |
+
break
|
91 |
+
mm_idx0, mm_idx1 = mm_idx // 2, mm_idx % 2
|
92 |
+
mm_inject = getattr(self.mm.down_blocks[mm_idx0], "temporal_attentions" if self.mm.is_hotshot else "motion_modules")[mm_idx1]
|
93 |
+
unet.input_blocks[unet_idx].append(mm_inject)
|
94 |
+
|
95 |
+
logger.info(f"Injecting motion module {model_name} into {sd_ver} UNet output blocks.")
|
96 |
+
for unet_idx in range(12):
|
97 |
+
if inject_sdxl and unet_idx >= 9:
|
98 |
+
break
|
99 |
+
mm_idx0, mm_idx1 = unet_idx // 3, unet_idx % 3
|
100 |
+
mm_inject = getattr(self.mm.up_blocks[mm_idx0], "temporal_attentions" if self.mm.is_hotshot else "motion_modules")[mm_idx1]
|
101 |
+
if unet_idx % 3 == 2 and unet_idx != (8 if self.mm.is_xl else 11):
|
102 |
+
unet.output_blocks[unet_idx].insert(-1, mm_inject)
|
103 |
+
else:
|
104 |
+
unet.output_blocks[unet_idx].append(mm_inject)
|
105 |
+
|
106 |
+
self._set_ddim_alpha(sd_model)
|
107 |
+
self._set_layer_mapping(sd_model)
|
108 |
+
AnimateDiffMM.mm_injected = True
|
109 |
+
logger.info(f"Injection finished.")
|
110 |
+
|
111 |
+
|
112 |
+
def restore(self, sd_model):
|
113 |
+
if not AnimateDiffMM.mm_injected:
|
114 |
+
logger.info("Motion module already removed.")
|
115 |
+
return
|
116 |
+
|
117 |
+
inject_sdxl = sd_model.is_sdxl or self.mm.is_xl
|
118 |
+
sd_ver = "SDXL" if sd_model.is_sdxl else "SD1.5"
|
119 |
+
self._restore_ddim_alpha(sd_model)
|
120 |
+
unet = sd_model.model.diffusion_model
|
121 |
+
|
122 |
+
logger.info(f"Removing motion module from {sd_ver} UNet input blocks.")
|
123 |
+
for unet_idx in [1, 2, 4, 5, 7, 8, 10, 11]:
|
124 |
+
if inject_sdxl and unet_idx >= 9:
|
125 |
+
break
|
126 |
+
unet.input_blocks[unet_idx].pop(-1)
|
127 |
+
|
128 |
+
logger.info(f"Removing motion module from {sd_ver} UNet output blocks.")
|
129 |
+
for unet_idx in range(12):
|
130 |
+
if inject_sdxl and unet_idx >= 9:
|
131 |
+
break
|
132 |
+
if unet_idx % 3 == 2 and unet_idx != (8 if self.mm.is_xl else 11):
|
133 |
+
unet.output_blocks[unet_idx].pop(-2)
|
134 |
+
else:
|
135 |
+
unet.output_blocks[unet_idx].pop(-1)
|
136 |
+
|
137 |
+
if self.mm.is_v2:
|
138 |
+
logger.info(f"Removing motion module from {sd_ver} UNet middle block.")
|
139 |
+
unet.middle_block.pop(-2)
|
140 |
+
elif self.mm.enable_gn_hack():
|
141 |
+
logger.info(f"Restoring {sd_ver} GroupNorm32 forward function.")
|
142 |
+
if self.mm.is_hotshot:
|
143 |
+
from sgm.modules.diffusionmodules.util import GroupNorm32
|
144 |
+
else:
|
145 |
+
from ldm.modules.diffusionmodules.util import GroupNorm32
|
146 |
+
GroupNorm32.forward = self.gn32_original_forward
|
147 |
+
self.gn32_original_forward = None
|
148 |
+
|
149 |
+
AnimateDiffMM.mm_injected = False
|
150 |
+
logger.info(f"Removal finished.")
|
151 |
+
if shared.cmd_opts.lowvram:
|
152 |
+
self.unload()
|
153 |
+
|
154 |
+
|
155 |
+
def _set_ddim_alpha(self, sd_model):
|
156 |
+
logger.info(f"Setting DDIM alpha.")
|
157 |
+
beta_start = 0.00085
|
158 |
+
beta_end = 0.020 if self.mm.is_adxl else 0.012
|
159 |
+
if self.mm.is_adxl:
|
160 |
+
betas = torch.linspace(beta_start**0.5, beta_end**0.5, 1000, dtype=torch.float32, device=device) ** 2
|
161 |
+
else:
|
162 |
+
betas = torch.linspace(
|
163 |
+
beta_start,
|
164 |
+
beta_end,
|
165 |
+
1000 if sd_model.is_sdxl else sd_model.num_timesteps,
|
166 |
+
dtype=torch.float32,
|
167 |
+
device=device,
|
168 |
+
)
|
169 |
+
alphas = 1.0 - betas
|
170 |
+
alphas_cumprod = torch.cumprod(alphas, dim=0)
|
171 |
+
self.prev_alpha_cumprod = sd_model.alphas_cumprod
|
172 |
+
sd_model.alphas_cumprod = alphas_cumprod
|
173 |
+
|
174 |
+
|
175 |
+
def _set_layer_mapping(self, sd_model):
|
176 |
+
if hasattr(sd_model, 'network_layer_mapping'):
|
177 |
+
for name, module in self.mm.named_modules():
|
178 |
+
sd_model.network_layer_mapping[name] = module
|
179 |
+
module.network_layer_name = name
|
180 |
+
|
181 |
+
|
182 |
+
def _restore_ddim_alpha(self, sd_model):
|
183 |
+
logger.info(f"Restoring DDIM alpha.")
|
184 |
+
sd_model.alphas_cumprod = self.prev_alpha_cumprod
|
185 |
+
self.prev_alpha_cumprod = None
|
186 |
+
|
187 |
+
|
188 |
+
def unload(self):
|
189 |
+
logger.info("Moving motion module to CPU")
|
190 |
+
if self.mm is not None:
|
191 |
+
self.mm.to(cpu)
|
192 |
+
torch_gc()
|
193 |
+
gc.collect()
|
194 |
+
|
195 |
+
|
196 |
+
def remove(self):
|
197 |
+
logger.info("Removing motion module from any memory")
|
198 |
+
del self.mm
|
199 |
+
self.mm = None
|
200 |
+
torch_gc()
|
201 |
+
gc.collect()
|
202 |
+
|
203 |
+
|
204 |
+
mm_animatediff = AnimateDiffMM()
|
extensions/sd-webui-animatediff/scripts/animatediff_output.py
ADDED
@@ -0,0 +1,361 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import datetime
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
import imageio.v3 as imageio
|
6 |
+
import numpy as np
|
7 |
+
from PIL import Image, PngImagePlugin
|
8 |
+
import PIL.features
|
9 |
+
import piexif
|
10 |
+
from modules import images, shared
|
11 |
+
from modules.processing import Processed, StableDiffusionProcessing
|
12 |
+
|
13 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
14 |
+
from scripts.animatediff_ui import AnimateDiffProcess
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
class AnimateDiffOutput:
|
19 |
+
def output(self, p: StableDiffusionProcessing, res: Processed, params: AnimateDiffProcess):
|
20 |
+
video_paths = []
|
21 |
+
logger.info("Merging images into GIF.")
|
22 |
+
date = datetime.datetime.now().strftime('%Y-%m-%d')
|
23 |
+
output_dir = Path(f"{p.outpath_samples}/AnimateDiff/{date}")
|
24 |
+
output_dir.mkdir(parents=True, exist_ok=True)
|
25 |
+
step = params.video_length if params.video_length > params.batch_size else params.batch_size
|
26 |
+
for i in range(res.index_of_first_image, len(res.images), step):
|
27 |
+
# frame interpolation replaces video_list with interpolated frames
|
28 |
+
# so make a copy instead of a slice (reference), to avoid modifying res
|
29 |
+
frame_list = [image.copy() for image in res.images[i : i + params.video_length]]
|
30 |
+
|
31 |
+
seq = images.get_next_sequence_number(output_dir, "")
|
32 |
+
filename_suffix = f"-{params.request_id}" if params.request_id else ""
|
33 |
+
filename = f"{seq:05}-{res.all_seeds[(i-res.index_of_first_image)]}{filename_suffix}"
|
34 |
+
|
35 |
+
video_path_prefix = output_dir / filename
|
36 |
+
|
37 |
+
frame_list = self._add_reverse(params, frame_list)
|
38 |
+
frame_list = self._interp(p, params, frame_list, filename)
|
39 |
+
video_paths += self._save(params, frame_list, video_path_prefix, res, i)
|
40 |
+
|
41 |
+
if len(video_paths) == 0:
|
42 |
+
return
|
43 |
+
|
44 |
+
res.images = video_paths if not p.is_api else (self._encode_video_to_b64(video_paths) + (frame_list if 'Frame' in params.format else []))
|
45 |
+
|
46 |
+
|
47 |
+
def _add_reverse(self, params: AnimateDiffProcess, frame_list: list):
|
48 |
+
if params.video_length <= params.batch_size and params.closed_loop in ['A']:
|
49 |
+
frame_list_reverse = frame_list[::-1]
|
50 |
+
if len(frame_list_reverse) > 0:
|
51 |
+
frame_list_reverse.pop(0)
|
52 |
+
if len(frame_list_reverse) > 0:
|
53 |
+
frame_list_reverse.pop(-1)
|
54 |
+
return frame_list + frame_list_reverse
|
55 |
+
return frame_list
|
56 |
+
|
57 |
+
|
58 |
+
def _interp(
|
59 |
+
self,
|
60 |
+
p: StableDiffusionProcessing,
|
61 |
+
params: AnimateDiffProcess,
|
62 |
+
frame_list: list,
|
63 |
+
filename: str
|
64 |
+
):
|
65 |
+
if params.interp not in ['FILM']:
|
66 |
+
return frame_list
|
67 |
+
|
68 |
+
try:
|
69 |
+
from deforum_helpers.frame_interpolation import (
|
70 |
+
calculate_frames_to_add, check_and_download_film_model)
|
71 |
+
from film_interpolation.film_inference import run_film_interp_infer
|
72 |
+
except ImportError:
|
73 |
+
logger.error("Deforum not found. Please install: https://github.com/deforum-art/deforum-for-automatic1111-webui.git")
|
74 |
+
return frame_list
|
75 |
+
|
76 |
+
import glob
|
77 |
+
import os
|
78 |
+
import shutil
|
79 |
+
|
80 |
+
import modules.paths as ph
|
81 |
+
|
82 |
+
# load film model
|
83 |
+
deforum_models_path = ph.models_path + '/Deforum'
|
84 |
+
film_model_folder = os.path.join(deforum_models_path,'film_interpolation')
|
85 |
+
film_model_name = 'film_net_fp16.pt'
|
86 |
+
film_model_path = os.path.join(film_model_folder, film_model_name)
|
87 |
+
check_and_download_film_model('film_net_fp16.pt', film_model_folder)
|
88 |
+
|
89 |
+
film_in_between_frames_count = calculate_frames_to_add(len(frame_list), params.interp_x)
|
90 |
+
|
91 |
+
# save original frames to tmp folder for deforum input
|
92 |
+
tmp_folder = f"{p.outpath_samples}/AnimateDiff/tmp"
|
93 |
+
input_folder = f"{tmp_folder}/input"
|
94 |
+
os.makedirs(input_folder, exist_ok=True)
|
95 |
+
for tmp_seq, frame in enumerate(frame_list):
|
96 |
+
imageio.imwrite(f"{input_folder}/{tmp_seq:05}.png", frame)
|
97 |
+
|
98 |
+
# deforum saves output frames to tmp/{filename}
|
99 |
+
save_folder = f"{tmp_folder}/{filename}"
|
100 |
+
os.makedirs(save_folder, exist_ok=True)
|
101 |
+
|
102 |
+
run_film_interp_infer(
|
103 |
+
model_path = film_model_path,
|
104 |
+
input_folder = input_folder,
|
105 |
+
save_folder = save_folder,
|
106 |
+
inter_frames = film_in_between_frames_count)
|
107 |
+
|
108 |
+
# load deforum output frames and replace video_list
|
109 |
+
interp_frame_paths = sorted(glob.glob(os.path.join(save_folder, '*.png')))
|
110 |
+
frame_list = []
|
111 |
+
for f in interp_frame_paths:
|
112 |
+
with Image.open(f) as img:
|
113 |
+
img.load()
|
114 |
+
frame_list.append(img)
|
115 |
+
|
116 |
+
# if saving PNG, enforce saving to custom folder
|
117 |
+
if "PNG" in params.format:
|
118 |
+
params.force_save_to_custom = True
|
119 |
+
|
120 |
+
# remove tmp folder
|
121 |
+
try: shutil.rmtree(tmp_folder)
|
122 |
+
except OSError as e: print(f"Error: {e}")
|
123 |
+
|
124 |
+
return frame_list
|
125 |
+
|
126 |
+
|
127 |
+
def _save(
|
128 |
+
self,
|
129 |
+
params: AnimateDiffProcess,
|
130 |
+
frame_list: list,
|
131 |
+
video_path_prefix: Path,
|
132 |
+
res: Processed,
|
133 |
+
index: int,
|
134 |
+
):
|
135 |
+
video_paths = []
|
136 |
+
video_array = [np.array(v) for v in frame_list]
|
137 |
+
infotext = res.infotexts[index]
|
138 |
+
s3_enable =shared.opts.data.get("animatediff_s3_enable", False)
|
139 |
+
use_infotext = shared.opts.enable_pnginfo and infotext is not None
|
140 |
+
if "PNG" in params.format and (shared.opts.data.get("animatediff_save_to_custom", False) or getattr(params, "force_save_to_custom", False)):
|
141 |
+
video_path_prefix.mkdir(exist_ok=True, parents=True)
|
142 |
+
for i, frame in enumerate(frame_list):
|
143 |
+
png_filename = video_path_prefix/f"{i:05}.png"
|
144 |
+
png_info = PngImagePlugin.PngInfo()
|
145 |
+
png_info.add_text('parameters', infotext)
|
146 |
+
imageio.imwrite(png_filename, frame, pnginfo=png_info)
|
147 |
+
|
148 |
+
if "GIF" in params.format:
|
149 |
+
video_path_gif = str(video_path_prefix) + ".gif"
|
150 |
+
video_paths.append(video_path_gif)
|
151 |
+
if shared.opts.data.get("animatediff_optimize_gif_palette", False):
|
152 |
+
try:
|
153 |
+
import av
|
154 |
+
except ImportError:
|
155 |
+
from launch import run_pip
|
156 |
+
run_pip(
|
157 |
+
"install imageio[pyav]",
|
158 |
+
"sd-webui-animatediff GIF palette optimization requirement: imageio[pyav]",
|
159 |
+
)
|
160 |
+
imageio.imwrite(
|
161 |
+
video_path_gif, video_array, plugin='pyav', fps=params.fps,
|
162 |
+
codec='gif', out_pixel_format='pal8',
|
163 |
+
filter_graph=(
|
164 |
+
{
|
165 |
+
"split": ("split", ""),
|
166 |
+
"palgen": ("palettegen", ""),
|
167 |
+
"paluse": ("paletteuse", ""),
|
168 |
+
"scale": ("scale", f"{frame_list[0].width}:{frame_list[0].height}")
|
169 |
+
},
|
170 |
+
[
|
171 |
+
("video_in", "scale", 0, 0),
|
172 |
+
("scale", "split", 0, 0),
|
173 |
+
("split", "palgen", 1, 0),
|
174 |
+
("split", "paluse", 0, 0),
|
175 |
+
("palgen", "paluse", 0, 1),
|
176 |
+
("paluse", "video_out", 0, 0),
|
177 |
+
]
|
178 |
+
)
|
179 |
+
)
|
180 |
+
# imageio[pyav].imwrite doesn't support comment parameter
|
181 |
+
if use_infotext:
|
182 |
+
try:
|
183 |
+
import exiftool
|
184 |
+
except ImportError:
|
185 |
+
from launch import run_pip
|
186 |
+
run_pip(
|
187 |
+
"install PyExifTool",
|
188 |
+
"sd-webui-animatediff GIF palette optimization requirement: PyExifTool",
|
189 |
+
)
|
190 |
+
import exiftool
|
191 |
+
finally:
|
192 |
+
try:
|
193 |
+
exif_tool = exiftool.ExifTool()
|
194 |
+
with exif_tool:
|
195 |
+
escaped_infotext = infotext.replace('\n', r'\n')
|
196 |
+
exif_tool.execute("-overwrite_original", f"-Comment={escaped_infotext}", video_path_gif)
|
197 |
+
except FileNotFoundError:
|
198 |
+
logger.warn(
|
199 |
+
"exiftool not found, required for infotext with optimized GIF palette, try: apt install libimage-exiftool-perl or https://exiftool.org/"
|
200 |
+
)
|
201 |
+
else:
|
202 |
+
imageio.imwrite(
|
203 |
+
video_path_gif,
|
204 |
+
video_array,
|
205 |
+
plugin='pillow',
|
206 |
+
duration=(1000 / params.fps),
|
207 |
+
loop=params.loop_number,
|
208 |
+
comment=(infotext if use_infotext else "")
|
209 |
+
)
|
210 |
+
if shared.opts.data.get("animatediff_optimize_gif_gifsicle", False):
|
211 |
+
self._optimize_gif(video_path_gif)
|
212 |
+
|
213 |
+
if "MP4" in params.format:
|
214 |
+
video_path_mp4 = str(video_path_prefix) + ".mp4"
|
215 |
+
video_paths.append(video_path_mp4)
|
216 |
+
try:
|
217 |
+
import av
|
218 |
+
except ImportError:
|
219 |
+
from launch import run_pip
|
220 |
+
run_pip(
|
221 |
+
"install pyav",
|
222 |
+
"sd-webui-animatediff MP4 save requirement: PyAV",
|
223 |
+
)
|
224 |
+
import av
|
225 |
+
options = {
|
226 |
+
"crf": str(shared.opts.data.get("animatediff_mp4_crf", 23))
|
227 |
+
}
|
228 |
+
preset = shared.opts.data.get("animatediff_mp4_preset", "")
|
229 |
+
if preset != "": options["preset"] = preset
|
230 |
+
tune = shared.opts.data.get("animatediff_mp4_tune", "")
|
231 |
+
if tune != "": options["tune"] = tune
|
232 |
+
output = av.open(video_path_mp4, "w")
|
233 |
+
logger.info(f"Saving {video_path_mp4}")
|
234 |
+
if use_infotext:
|
235 |
+
output.metadata["Comment"] = infotext
|
236 |
+
stream = output.add_stream('libx264', params.fps, options=options)
|
237 |
+
stream.width = frame_list[0].width
|
238 |
+
stream.height = frame_list[0].height
|
239 |
+
for img in video_array:
|
240 |
+
frame = av.VideoFrame.from_ndarray(img)
|
241 |
+
packet = stream.encode(frame)
|
242 |
+
output.mux(packet)
|
243 |
+
packet = stream.encode(None)
|
244 |
+
output.mux(packet)
|
245 |
+
output.close()
|
246 |
+
|
247 |
+
if "TXT" in params.format and res.images[index].info is not None:
|
248 |
+
video_path_txt = str(video_path_prefix) + ".txt"
|
249 |
+
with open(video_path_txt, "w", encoding="utf8") as file:
|
250 |
+
file.write(f"{infotext}\n")
|
251 |
+
|
252 |
+
if "WEBP" in params.format:
|
253 |
+
if PIL.features.check('webp_anim'):
|
254 |
+
video_path_webp = str(video_path_prefix) + ".webp"
|
255 |
+
video_paths.append(video_path_webp)
|
256 |
+
exif_bytes = b''
|
257 |
+
if use_infotext:
|
258 |
+
exif_bytes = piexif.dump({
|
259 |
+
"Exif":{
|
260 |
+
piexif.ExifIFD.UserComment:piexif.helper.UserComment.dump(infotext, encoding="unicode")
|
261 |
+
}})
|
262 |
+
lossless = shared.opts.data.get("animatediff_webp_lossless", False)
|
263 |
+
quality = shared.opts.data.get("animatediff_webp_quality", 80)
|
264 |
+
logger.info(f"Saving {video_path_webp} with lossless={lossless} and quality={quality}")
|
265 |
+
imageio.imwrite(video_path_webp, video_array, plugin='pillow',
|
266 |
+
duration=int(1 / params.fps * 1000), loop=params.loop_number,
|
267 |
+
lossless=lossless, quality=quality, exif=exif_bytes
|
268 |
+
)
|
269 |
+
# see additional Pillow WebP options at https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html#webp
|
270 |
+
else:
|
271 |
+
logger.warn("WebP animation in Pillow requires system WebP library v0.5.0 or later")
|
272 |
+
if "WEBM" in params.format:
|
273 |
+
video_path_webm = str(video_path_prefix) + ".webm"
|
274 |
+
video_paths.append(video_path_webm)
|
275 |
+
logger.info(f"Saving {video_path_webm}")
|
276 |
+
with imageio.imopen(video_path_webm, "w", plugin="pyav") as file:
|
277 |
+
if use_infotext:
|
278 |
+
file.container_metadata["Title"] = infotext
|
279 |
+
file.container_metadata["Comment"] = infotext
|
280 |
+
file.write(video_array, codec="vp9", fps=params.fps)
|
281 |
+
|
282 |
+
if s3_enable:
|
283 |
+
for video_path in video_paths: self._save_to_s3_stroge(video_path)
|
284 |
+
return video_paths
|
285 |
+
|
286 |
+
|
287 |
+
def _optimize_gif(self, video_path: str):
|
288 |
+
try:
|
289 |
+
import pygifsicle
|
290 |
+
except ImportError:
|
291 |
+
from launch import run_pip
|
292 |
+
|
293 |
+
run_pip(
|
294 |
+
"install pygifsicle",
|
295 |
+
"sd-webui-animatediff GIF optimization requirement: pygifsicle",
|
296 |
+
)
|
297 |
+
import pygifsicle
|
298 |
+
finally:
|
299 |
+
try:
|
300 |
+
pygifsicle.optimize(video_path)
|
301 |
+
except FileNotFoundError:
|
302 |
+
logger.warn("gifsicle not found, required for optimized GIFs, try: apt install gifsicle")
|
303 |
+
|
304 |
+
|
305 |
+
def _encode_video_to_b64(self, paths):
|
306 |
+
videos = []
|
307 |
+
for v_path in paths:
|
308 |
+
with open(v_path, "rb") as video_file:
|
309 |
+
videos.append(base64.b64encode(video_file.read()).decode("utf-8"))
|
310 |
+
return videos
|
311 |
+
|
312 |
+
def _install_requirement_if_absent(self,lib):
|
313 |
+
import launch
|
314 |
+
if not launch.is_installed(lib):
|
315 |
+
launch.run_pip(f"install {lib}", f"animatediff requirement: {lib}")
|
316 |
+
|
317 |
+
def _exist_bucket(self,s3_client,bucketname):
|
318 |
+
try:
|
319 |
+
s3_client.head_bucket(Bucket=bucketname)
|
320 |
+
return True
|
321 |
+
except ClientError as e:
|
322 |
+
if e.response['Error']['Code'] == '404':
|
323 |
+
return False
|
324 |
+
else:
|
325 |
+
raise
|
326 |
+
|
327 |
+
def _save_to_s3_stroge(self ,file_path):
|
328 |
+
"""
|
329 |
+
put object to object storge
|
330 |
+
:type bucketname: string
|
331 |
+
:param bucketname: will save to this 'bucket' , access_key and secret_key must have permissions to save
|
332 |
+
:type file : file
|
333 |
+
:param file : the local file
|
334 |
+
"""
|
335 |
+
self._install_requirement_if_absent('boto3')
|
336 |
+
import boto3
|
337 |
+
from botocore.exceptions import ClientError
|
338 |
+
import os
|
339 |
+
host = shared.opts.data.get("animatediff_s3_host", '127.0.0.1')
|
340 |
+
port = shared.opts.data.get("animatediff_s3_port", '9001')
|
341 |
+
access_key = shared.opts.data.get("animatediff_s3_access_key", '')
|
342 |
+
secret_key = shared.opts.data.get("animatediff_s3_secret_key", '')
|
343 |
+
bucket = shared.opts.data.get("animatediff_s3_storge_bucket", '')
|
344 |
+
client = boto3.client(
|
345 |
+
service_name='s3',
|
346 |
+
aws_access_key_id = access_key,
|
347 |
+
aws_secret_access_key = secret_key,
|
348 |
+
endpoint_url=f'http://{host}:{port}',
|
349 |
+
)
|
350 |
+
|
351 |
+
if not os.path.exists(file_path): return
|
352 |
+
date = datetime.datetime.now().strftime('%Y-%m-%d')
|
353 |
+
if not self._exist_bucket(client,bucket):
|
354 |
+
client.create_bucket(Bucket=bucket)
|
355 |
+
|
356 |
+
filename = os.path.split(file_path)[1]
|
357 |
+
targetpath = f"{date}/{filename}"
|
358 |
+
client.upload_file(file_path, bucket, targetpath)
|
359 |
+
logger.info(f"{file_path} saved to s3 in bucket: {bucket}")
|
360 |
+
return f"http://{host}:{port}/{bucket}/{targetpath}"
|
361 |
+
|
extensions/sd-webui-animatediff/scripts/animatediff_prompt.py
ADDED
@@ -0,0 +1,143 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import torch
|
3 |
+
|
4 |
+
from modules.processing import StableDiffusionProcessing, Processed
|
5 |
+
|
6 |
+
from scripts.animatediff_logger import logger_animatediff as logger
|
7 |
+
from scripts.animatediff_infotext import write_params_txt
|
8 |
+
|
9 |
+
|
10 |
+
class AnimateDiffPromptSchedule:
|
11 |
+
|
12 |
+
def __init__(self):
|
13 |
+
self.prompt_map = None
|
14 |
+
self.original_prompt = None
|
15 |
+
|
16 |
+
|
17 |
+
def save_infotext_img(self, p: StableDiffusionProcessing):
|
18 |
+
if self.prompt_map is not None:
|
19 |
+
p.prompts = [self.original_prompt for _ in range(p.batch_size)]
|
20 |
+
|
21 |
+
|
22 |
+
def save_infotext_txt(self, res: Processed):
|
23 |
+
if self.prompt_map is not None:
|
24 |
+
parts = res.info.split('\nNegative prompt: ', 1)
|
25 |
+
if len(parts) > 1:
|
26 |
+
res.info = f"{self.original_prompt}\nNegative prompt: {parts[1]}"
|
27 |
+
for i in range(len(res.infotexts)):
|
28 |
+
parts = res.infotexts[i].split('\nNegative prompt: ', 1)
|
29 |
+
if len(parts) > 1:
|
30 |
+
res.infotexts[i] = f"{self.original_prompt}\nNegative prompt: {parts[1]}"
|
31 |
+
write_params_txt(res.info)
|
32 |
+
|
33 |
+
|
34 |
+
def parse_prompt(self, p: StableDiffusionProcessing):
|
35 |
+
if type(p.prompt) is not str:
|
36 |
+
logger.warn("prompt is not str, cannot support prompt map")
|
37 |
+
return
|
38 |
+
|
39 |
+
lines = p.prompt.strip().split('\n')
|
40 |
+
data = {
|
41 |
+
'head_prompts': [],
|
42 |
+
'mapp_prompts': {},
|
43 |
+
'tail_prompts': []
|
44 |
+
}
|
45 |
+
|
46 |
+
mode = 'head'
|
47 |
+
for line in lines:
|
48 |
+
if mode == 'head':
|
49 |
+
if re.match(r'^\d+:', line):
|
50 |
+
mode = 'mapp'
|
51 |
+
else:
|
52 |
+
data['head_prompts'].append(line)
|
53 |
+
|
54 |
+
if mode == 'mapp':
|
55 |
+
match = re.match(r'^(\d+): (.+)$', line)
|
56 |
+
if match:
|
57 |
+
frame, prompt = match.groups()
|
58 |
+
data['mapp_prompts'][int(frame)] = prompt
|
59 |
+
else:
|
60 |
+
mode = 'tail'
|
61 |
+
|
62 |
+
if mode == 'tail':
|
63 |
+
data['tail_prompts'].append(line)
|
64 |
+
|
65 |
+
if data['mapp_prompts']:
|
66 |
+
logger.info("You are using prompt travel.")
|
67 |
+
self.prompt_map = {}
|
68 |
+
prompt_list = []
|
69 |
+
last_frame = 0
|
70 |
+
current_prompt = ''
|
71 |
+
for frame, prompt in data['mapp_prompts'].items():
|
72 |
+
prompt_list += [current_prompt for _ in range(last_frame, frame)]
|
73 |
+
last_frame = frame
|
74 |
+
current_prompt = f"{', '.join(data['head_prompts'])}, {prompt}, {', '.join(data['tail_prompts'])}"
|
75 |
+
self.prompt_map[frame] = current_prompt
|
76 |
+
prompt_list += [current_prompt for _ in range(last_frame, p.batch_size)]
|
77 |
+
assert len(prompt_list) == p.batch_size, f"prompt_list length {len(prompt_list)} != batch_size {p.batch_size}"
|
78 |
+
self.original_prompt = p.prompt
|
79 |
+
p.prompt = prompt_list * p.n_iter
|
80 |
+
|
81 |
+
|
82 |
+
def single_cond(self, center_frame, video_length: int, cond: torch.Tensor, closed_loop = False):
|
83 |
+
if closed_loop:
|
84 |
+
key_prev = list(self.prompt_map.keys())[-1]
|
85 |
+
key_next = list(self.prompt_map.keys())[0]
|
86 |
+
else:
|
87 |
+
key_prev = list(self.prompt_map.keys())[0]
|
88 |
+
key_next = list(self.prompt_map.keys())[-1]
|
89 |
+
|
90 |
+
for p in self.prompt_map.keys():
|
91 |
+
if p > center_frame:
|
92 |
+
key_next = p
|
93 |
+
break
|
94 |
+
key_prev = p
|
95 |
+
|
96 |
+
dist_prev = center_frame - key_prev
|
97 |
+
if dist_prev < 0:
|
98 |
+
dist_prev += video_length
|
99 |
+
dist_next = key_next - center_frame
|
100 |
+
if dist_next < 0:
|
101 |
+
dist_next += video_length
|
102 |
+
|
103 |
+
if key_prev == key_next or dist_prev + dist_next == 0:
|
104 |
+
return cond[key_prev] if isinstance(cond, torch.Tensor) else {k: v[key_prev] for k, v in cond.items()}
|
105 |
+
|
106 |
+
rate = dist_prev / (dist_prev + dist_next)
|
107 |
+
if isinstance(cond, torch.Tensor):
|
108 |
+
return AnimateDiffPromptSchedule.slerp(cond[key_prev], cond[key_next], rate)
|
109 |
+
else: # isinstance(cond, dict)
|
110 |
+
return {
|
111 |
+
k: AnimateDiffPromptSchedule.slerp(v[key_prev], v[key_next], rate)
|
112 |
+
for k, v in cond.items()
|
113 |
+
}
|
114 |
+
|
115 |
+
|
116 |
+
def multi_cond(self, cond: torch.Tensor, closed_loop = False):
|
117 |
+
if self.prompt_map is None:
|
118 |
+
return cond
|
119 |
+
cond_list = [] if isinstance(cond, torch.Tensor) else {k: [] for k in cond.keys()}
|
120 |
+
for i in range(cond.shape[0]):
|
121 |
+
single_cond = self.single_cond(i, cond.shape[0], cond, closed_loop)
|
122 |
+
if isinstance(cond, torch.Tensor):
|
123 |
+
cond_list.append(single_cond)
|
124 |
+
else:
|
125 |
+
for k, v in single_cond.items():
|
126 |
+
cond_list[k].append(v)
|
127 |
+
if isinstance(cond, torch.Tensor):
|
128 |
+
return torch.stack(cond_list).to(cond.dtype).to(cond.device)
|
129 |
+
else:
|
130 |
+
return {k: torch.stack(v).to(cond[k].dtype).to(cond[k].device) for k, v in cond_list.items()}
|
131 |
+
|
132 |
+
|
133 |
+
@staticmethod
|
134 |
+
def slerp(
|
135 |
+
v0: torch.Tensor, v1: torch.Tensor, t: float, DOT_THRESHOLD: float = 0.9995
|
136 |
+
) -> torch.Tensor:
|
137 |
+
u0 = v0 / v0.norm()
|
138 |
+
u1 = v1 / v1.norm()
|
139 |
+
dot = (u0 * u1).sum()
|
140 |
+
if dot.abs() > DOT_THRESHOLD:
|
141 |
+
return (1.0 - t) * v0 + t * v1
|
142 |
+
omega = dot.acos()
|
143 |
+
return (((1.0 - t) * omega).sin() * v0 + (t * omega).sin() * v1) / omega.sin()
|
extensions/sd-webui-animatediff/scripts/animatediff_ui.py
ADDED
@@ -0,0 +1,349 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import cv2
|
4 |
+
import gradio as gr
|
5 |
+
|
6 |
+
from modules import shared
|
7 |
+
from modules.processing import StableDiffusionProcessing
|
8 |
+
|
9 |
+
from scripts.animatediff_mm import mm_animatediff as motion_module
|
10 |
+
from scripts.animatediff_i2ibatch import animatediff_i2ibatch
|
11 |
+
from scripts.animatediff_lcm import AnimateDiffLCM
|
12 |
+
|
13 |
+
|
14 |
+
class ToolButton(gr.Button, gr.components.FormComponent):
|
15 |
+
"""Small button with single emoji as text, fits inside gradio forms"""
|
16 |
+
|
17 |
+
def __init__(self, **kwargs):
|
18 |
+
super().__init__(variant="tool", **kwargs)
|
19 |
+
|
20 |
+
|
21 |
+
def get_block_name(self):
|
22 |
+
return "button"
|
23 |
+
|
24 |
+
|
25 |
+
class AnimateDiffProcess:
|
26 |
+
|
27 |
+
def __init__(
|
28 |
+
self,
|
29 |
+
model="mm_sd_v15_v2.ckpt",
|
30 |
+
enable=False,
|
31 |
+
video_length=0,
|
32 |
+
fps=8,
|
33 |
+
loop_number=0,
|
34 |
+
closed_loop='R-P',
|
35 |
+
batch_size=16,
|
36 |
+
stride=1,
|
37 |
+
overlap=-1,
|
38 |
+
format=["GIF", "PNG"],
|
39 |
+
interp='Off',
|
40 |
+
interp_x=10,
|
41 |
+
video_source=None,
|
42 |
+
video_path='',
|
43 |
+
latent_power=1,
|
44 |
+
latent_scale=32,
|
45 |
+
last_frame=None,
|
46 |
+
latent_power_last=1,
|
47 |
+
latent_scale_last=32,
|
48 |
+
request_id = '',
|
49 |
+
):
|
50 |
+
self.model = model
|
51 |
+
self.enable = enable
|
52 |
+
self.video_length = video_length
|
53 |
+
self.fps = fps
|
54 |
+
self.loop_number = loop_number
|
55 |
+
self.closed_loop = closed_loop
|
56 |
+
self.batch_size = batch_size
|
57 |
+
self.stride = stride
|
58 |
+
self.overlap = overlap
|
59 |
+
self.format = format
|
60 |
+
self.interp = interp
|
61 |
+
self.interp_x = interp_x
|
62 |
+
self.video_source = video_source
|
63 |
+
self.video_path = video_path
|
64 |
+
self.latent_power = latent_power
|
65 |
+
self.latent_scale = latent_scale
|
66 |
+
self.last_frame = last_frame
|
67 |
+
self.latent_power_last = latent_power_last
|
68 |
+
self.latent_scale_last = latent_scale_last
|
69 |
+
self.request_id = request_id
|
70 |
+
|
71 |
+
|
72 |
+
def get_list(self, is_img2img: bool):
|
73 |
+
list_var = list(vars(self).values())[:-1]
|
74 |
+
if is_img2img:
|
75 |
+
animatediff_i2ibatch.hack()
|
76 |
+
else:
|
77 |
+
list_var = list_var[:-5]
|
78 |
+
return list_var
|
79 |
+
|
80 |
+
|
81 |
+
def get_dict(self, is_img2img: bool):
|
82 |
+
infotext = {
|
83 |
+
"enable": self.enable,
|
84 |
+
"model": self.model,
|
85 |
+
"video_length": self.video_length,
|
86 |
+
"fps": self.fps,
|
87 |
+
"loop_number": self.loop_number,
|
88 |
+
"closed_loop": self.closed_loop,
|
89 |
+
"batch_size": self.batch_size,
|
90 |
+
"stride": self.stride,
|
91 |
+
"overlap": self.overlap,
|
92 |
+
"interp": self.interp,
|
93 |
+
"interp_x": self.interp_x,
|
94 |
+
}
|
95 |
+
if self.request_id:
|
96 |
+
infotext['request_id'] = self.request_id
|
97 |
+
if motion_module.mm is not None and motion_module.mm.mm_hash is not None:
|
98 |
+
infotext['mm_hash'] = motion_module.mm.mm_hash[:8]
|
99 |
+
if is_img2img:
|
100 |
+
infotext.update({
|
101 |
+
"latent_power": self.latent_power,
|
102 |
+
"latent_scale": self.latent_scale,
|
103 |
+
"latent_power_last": self.latent_power_last,
|
104 |
+
"latent_scale_last": self.latent_scale_last,
|
105 |
+
})
|
106 |
+
infotext_str = ', '.join(f"{k}: {v}" for k, v in infotext.items())
|
107 |
+
return infotext_str
|
108 |
+
|
109 |
+
|
110 |
+
def _check(self):
|
111 |
+
assert (
|
112 |
+
self.video_length >= 0 and self.fps > 0
|
113 |
+
), "Video length and FPS should be positive."
|
114 |
+
assert not set(["GIF", "MP4", "PNG", "WEBP", "WEBM"]).isdisjoint(
|
115 |
+
self.format
|
116 |
+
), "At least one saving format should be selected."
|
117 |
+
|
118 |
+
|
119 |
+
def set_p(self, p: StableDiffusionProcessing):
|
120 |
+
self._check()
|
121 |
+
if self.video_length < self.batch_size:
|
122 |
+
p.batch_size = self.batch_size
|
123 |
+
else:
|
124 |
+
p.batch_size = self.video_length
|
125 |
+
if self.video_length == 0:
|
126 |
+
self.video_length = p.batch_size
|
127 |
+
self.video_default = True
|
128 |
+
else:
|
129 |
+
self.video_default = False
|
130 |
+
if self.overlap == -1:
|
131 |
+
self.overlap = self.batch_size // 4
|
132 |
+
if "PNG" not in self.format or shared.opts.data.get("animatediff_save_to_custom", False):
|
133 |
+
p.do_not_save_samples = True
|
134 |
+
|
135 |
+
|
136 |
+
class AnimateDiffUiGroup:
|
137 |
+
txt2img_submit_button = None
|
138 |
+
img2img_submit_button = None
|
139 |
+
|
140 |
+
def __init__(self):
|
141 |
+
self.params = AnimateDiffProcess()
|
142 |
+
|
143 |
+
|
144 |
+
def render(self, is_img2img: bool, model_dir: str):
|
145 |
+
if not os.path.isdir(model_dir):
|
146 |
+
os.mkdir(model_dir)
|
147 |
+
elemid_prefix = "img2img-ad-" if is_img2img else "txt2img-ad-"
|
148 |
+
model_list = [f for f in os.listdir(model_dir) if f != ".gitkeep"]
|
149 |
+
with gr.Accordion("AnimateDiff", open=False):
|
150 |
+
gr.Markdown(value="Please click [this link](https://github.com/continue-revolution/sd-webui-animatediff#webui-parameters) to read the documentation of each parameter.")
|
151 |
+
with gr.Row():
|
152 |
+
|
153 |
+
def refresh_models(*inputs):
|
154 |
+
new_model_list = [
|
155 |
+
f for f in os.listdir(model_dir) if f != ".gitkeep"
|
156 |
+
]
|
157 |
+
dd = inputs[0]
|
158 |
+
if dd in new_model_list:
|
159 |
+
selected = dd
|
160 |
+
elif len(new_model_list) > 0:
|
161 |
+
selected = new_model_list[0]
|
162 |
+
else:
|
163 |
+
selected = None
|
164 |
+
return gr.Dropdown.update(choices=new_model_list, value=selected)
|
165 |
+
|
166 |
+
with gr.Row():
|
167 |
+
self.params.model = gr.Dropdown(
|
168 |
+
choices=model_list,
|
169 |
+
value=(self.params.model if self.params.model in model_list else None),
|
170 |
+
label="Motion module",
|
171 |
+
type="value",
|
172 |
+
elem_id=f"{elemid_prefix}motion-module",
|
173 |
+
)
|
174 |
+
refresh_model = ToolButton(value="\U0001f504")
|
175 |
+
refresh_model.click(refresh_models, self.params.model, self.params.model)
|
176 |
+
|
177 |
+
self.params.format = gr.CheckboxGroup(
|
178 |
+
choices=["GIF", "MP4", "WEBP", "WEBM", "PNG", "TXT"],
|
179 |
+
label="Save format",
|
180 |
+
type="value",
|
181 |
+
elem_id=f"{elemid_prefix}save-format",
|
182 |
+
value=self.params.format,
|
183 |
+
)
|
184 |
+
with gr.Row():
|
185 |
+
self.params.enable = gr.Checkbox(
|
186 |
+
value=self.params.enable, label="Enable AnimateDiff",
|
187 |
+
elem_id=f"{elemid_prefix}enable"
|
188 |
+
)
|
189 |
+
self.params.video_length = gr.Number(
|
190 |
+
minimum=0,
|
191 |
+
value=self.params.video_length,
|
192 |
+
label="Number of frames",
|
193 |
+
precision=0,
|
194 |
+
elem_id=f"{elemid_prefix}video-length",
|
195 |
+
)
|
196 |
+
self.params.fps = gr.Number(
|
197 |
+
value=self.params.fps, label="FPS", precision=0,
|
198 |
+
elem_id=f"{elemid_prefix}fps"
|
199 |
+
)
|
200 |
+
self.params.loop_number = gr.Number(
|
201 |
+
minimum=0,
|
202 |
+
value=self.params.loop_number,
|
203 |
+
label="Display loop number",
|
204 |
+
precision=0,
|
205 |
+
elem_id=f"{elemid_prefix}loop-number",
|
206 |
+
)
|
207 |
+
with gr.Row():
|
208 |
+
self.params.closed_loop = gr.Radio(
|
209 |
+
choices=["N", "R-P", "R+P", "A"],
|
210 |
+
value=self.params.closed_loop,
|
211 |
+
label="Closed loop",
|
212 |
+
elem_id=f"{elemid_prefix}closed-loop",
|
213 |
+
)
|
214 |
+
self.params.batch_size = gr.Slider(
|
215 |
+
minimum=1,
|
216 |
+
maximum=32,
|
217 |
+
value=self.params.batch_size,
|
218 |
+
label="Context batch size",
|
219 |
+
step=1,
|
220 |
+
precision=0,
|
221 |
+
elem_id=f"{elemid_prefix}batch-size",
|
222 |
+
)
|
223 |
+
self.params.stride = gr.Number(
|
224 |
+
minimum=1,
|
225 |
+
value=self.params.stride,
|
226 |
+
label="Stride",
|
227 |
+
precision=0,
|
228 |
+
elem_id=f"{elemid_prefix}stride",
|
229 |
+
)
|
230 |
+
self.params.overlap = gr.Number(
|
231 |
+
minimum=-1,
|
232 |
+
value=self.params.overlap,
|
233 |
+
label="Overlap",
|
234 |
+
precision=0,
|
235 |
+
elem_id=f"{elemid_prefix}overlap",
|
236 |
+
)
|
237 |
+
with gr.Row():
|
238 |
+
self.params.interp = gr.Radio(
|
239 |
+
choices=["Off", "FILM"],
|
240 |
+
label="Frame Interpolation",
|
241 |
+
elem_id=f"{elemid_prefix}interp-choice",
|
242 |
+
value=self.params.interp
|
243 |
+
)
|
244 |
+
self.params.interp_x = gr.Number(
|
245 |
+
value=self.params.interp_x, label="Interp X", precision=0,
|
246 |
+
elem_id=f"{elemid_prefix}interp-x"
|
247 |
+
)
|
248 |
+
self.params.video_source = gr.Video(
|
249 |
+
value=self.params.video_source,
|
250 |
+
label="Video source",
|
251 |
+
)
|
252 |
+
def update_fps(video_source):
|
253 |
+
if video_source is not None and video_source != '':
|
254 |
+
cap = cv2.VideoCapture(video_source)
|
255 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
256 |
+
cap.release()
|
257 |
+
return fps
|
258 |
+
else:
|
259 |
+
return int(self.params.fps.value)
|
260 |
+
self.params.video_source.change(update_fps, inputs=self.params.video_source, outputs=self.params.fps)
|
261 |
+
def update_frames(video_source):
|
262 |
+
if video_source is not None and video_source != '':
|
263 |
+
cap = cv2.VideoCapture(video_source)
|
264 |
+
frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
265 |
+
cap.release()
|
266 |
+
return frames
|
267 |
+
else:
|
268 |
+
return int(self.params.video_length.value)
|
269 |
+
self.params.video_source.change(update_frames, inputs=self.params.video_source, outputs=self.params.video_length)
|
270 |
+
self.params.video_path = gr.Textbox(
|
271 |
+
value=self.params.video_path,
|
272 |
+
label="Video path",
|
273 |
+
elem_id=f"{elemid_prefix}video-path"
|
274 |
+
)
|
275 |
+
if is_img2img:
|
276 |
+
with gr.Row():
|
277 |
+
self.params.latent_power = gr.Slider(
|
278 |
+
minimum=0.1,
|
279 |
+
maximum=10,
|
280 |
+
value=self.params.latent_power,
|
281 |
+
step=0.1,
|
282 |
+
label="Latent power",
|
283 |
+
elem_id=f"{elemid_prefix}latent-power",
|
284 |
+
)
|
285 |
+
self.params.latent_scale = gr.Slider(
|
286 |
+
minimum=1,
|
287 |
+
maximum=128,
|
288 |
+
value=self.params.latent_scale,
|
289 |
+
label="Latent scale",
|
290 |
+
elem_id=f"{elemid_prefix}latent-scale"
|
291 |
+
)
|
292 |
+
self.params.latent_power_last = gr.Slider(
|
293 |
+
minimum=0.1,
|
294 |
+
maximum=10,
|
295 |
+
value=self.params.latent_power_last,
|
296 |
+
step=0.1,
|
297 |
+
label="Optional latent power for last frame",
|
298 |
+
elem_id=f"{elemid_prefix}latent-power-last",
|
299 |
+
)
|
300 |
+
self.params.latent_scale_last = gr.Slider(
|
301 |
+
minimum=1,
|
302 |
+
maximum=128,
|
303 |
+
value=self.params.latent_scale_last,
|
304 |
+
label="Optional latent scale for last frame",
|
305 |
+
elem_id=f"{elemid_prefix}latent-scale-last"
|
306 |
+
)
|
307 |
+
self.params.last_frame = gr.Image(
|
308 |
+
label="Optional last frame. Leave it blank if you do not need one.",
|
309 |
+
type="pil",
|
310 |
+
)
|
311 |
+
with gr.Row():
|
312 |
+
unload = gr.Button(value="Move motion module to CPU (default if lowvram)")
|
313 |
+
remove = gr.Button(value="Remove motion module from any memory")
|
314 |
+
unload.click(fn=motion_module.unload)
|
315 |
+
remove.click(fn=motion_module.remove)
|
316 |
+
return self.register_unit(is_img2img)
|
317 |
+
|
318 |
+
|
319 |
+
def register_unit(self, is_img2img: bool):
|
320 |
+
unit = gr.State(value=AnimateDiffProcess)
|
321 |
+
(
|
322 |
+
AnimateDiffUiGroup.img2img_submit_button
|
323 |
+
if is_img2img
|
324 |
+
else AnimateDiffUiGroup.txt2img_submit_button
|
325 |
+
).click(
|
326 |
+
fn=AnimateDiffProcess,
|
327 |
+
inputs=self.params.get_list(is_img2img),
|
328 |
+
outputs=unit,
|
329 |
+
queue=False,
|
330 |
+
)
|
331 |
+
return unit
|
332 |
+
|
333 |
+
|
334 |
+
@staticmethod
|
335 |
+
def on_after_component(component, **_kwargs):
|
336 |
+
elem_id = getattr(component, "elem_id", None)
|
337 |
+
|
338 |
+
if elem_id == "txt2img_generate":
|
339 |
+
AnimateDiffUiGroup.txt2img_submit_button = component
|
340 |
+
return
|
341 |
+
|
342 |
+
if elem_id == "img2img_generate":
|
343 |
+
AnimateDiffUiGroup.img2img_submit_button = component
|
344 |
+
return
|
345 |
+
|
346 |
+
|
347 |
+
@staticmethod
|
348 |
+
def on_before_ui():
|
349 |
+
AnimateDiffLCM.hack_kdiff_ui()
|