Spaces:
Runtime error
Runtime error
Init
Browse files- README.md +5 -4
- app.py +74 -0
- audiodiffusion/__init__.py +141 -0
- audiodiffusion/image_encoder.py +21 -0
- audiodiffusion/mel.py +169 -0
- audiodiffusion/pipeline_audio_diffusion.py +257 -0
- audiodiffusion/utils.py +303 -0
- requirements.txt +7 -0
README.md
CHANGED
@@ -1,9 +1,10 @@
|
|
1 |
---
|
2 |
-
title: Image
|
3 |
-
emoji:
|
4 |
-
colorFrom:
|
5 |
-
colorTo:
|
6 |
sdk: gradio
|
|
|
7 |
sdk_version: 4.5.0
|
8 |
app_file: app.py
|
9 |
pinned: false
|
|
|
1 |
---
|
2 |
+
title: Image-based soundtrack generation
|
3 |
+
emoji: 🎶
|
4 |
+
colorFrom: purple
|
5 |
+
colorTo: blue
|
6 |
sdk: gradio
|
7 |
+
python_version: 3.10.8
|
8 |
sdk_version: 4.5.0
|
9 |
app_file: app.py
|
10 |
pinned: false
|
app.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import gradio as gr
|
3 |
+
from transformers import ViTImageProcessor, ViTModel
|
4 |
+
from audiodiffusion import AudioDiffusionPipeline, ImageEncoder
|
5 |
+
|
6 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
7 |
+
generator1 = torch.Generator(device)
|
8 |
+
generator2 = torch.Generator(device)
|
9 |
+
|
10 |
+
pipe = AudioDiffusionPipeline.from_pretrained('Woleek/clMusDiff').to(device)
|
11 |
+
|
12 |
+
processor = ViTImageProcessor.from_pretrained('google/vit-base-patch16-224-in21k')
|
13 |
+
extractor = ViTModel.from_pretrained('google/vit-base-patch16-224-in21k')
|
14 |
+
image_encoder = ImageEncoder(processor, extractor)
|
15 |
+
|
16 |
+
def _encode_image(image):
|
17 |
+
return torch.unsqueeze(image_encoder.encode(image), axis=1).to(device)
|
18 |
+
|
19 |
+
def _generate_spectrogram(condition, steps, eta):
|
20 |
+
images, (sample_rate, audios) = pipe(
|
21 |
+
batch_size=1,
|
22 |
+
steps=steps,
|
23 |
+
generator=generator1,
|
24 |
+
step_generator=generator2,
|
25 |
+
encoding=condition,
|
26 |
+
eta=eta,
|
27 |
+
return_dict=False,
|
28 |
+
)
|
29 |
+
return images[0], (sample_rate, audios[0])
|
30 |
+
|
31 |
+
def run_generation(image, steps, eta):
|
32 |
+
condition = _encode_image(image)
|
33 |
+
spectrogram, (sr, audio) = _generate_spectrogram(condition, steps, eta)
|
34 |
+
return spectrogram, (sr, audio)
|
35 |
+
|
36 |
+
with gr.Blocks(title="Image-based soundtrack generation") as demo:
|
37 |
+
gr.Markdown('''
|
38 |
+
# Image-based soundtrack generation
|
39 |
+
''')
|
40 |
+
with gr.Row():
|
41 |
+
with gr.Column():
|
42 |
+
image = gr.Image(
|
43 |
+
type="pil",
|
44 |
+
label="Conditioning image"
|
45 |
+
)
|
46 |
+
steps = gr.Slider(
|
47 |
+
minimum=1,
|
48 |
+
maximum=1000,
|
49 |
+
step=1,
|
50 |
+
value=50,
|
51 |
+
label="Denoising steps"
|
52 |
+
)
|
53 |
+
eta = gr.Slider(
|
54 |
+
minimum=0.1,
|
55 |
+
maximum=1.0,
|
56 |
+
step=0.1,
|
57 |
+
value=0.9,
|
58 |
+
label="η"
|
59 |
+
)
|
60 |
+
gr.Markdown('''
|
61 |
+
Eta (η) is a variable that controls the level of interpolation between a deterministic DDIM (η=0.0) and a stochastic DDPM (η=1.0).
|
62 |
+
''')
|
63 |
+
btn = gr.Button("Generate")
|
64 |
+
clear = gr.ClearButton(image)
|
65 |
+
with gr.Column():
|
66 |
+
spectrogram = gr.Image(
|
67 |
+
label="Generated Mel spectrogram"
|
68 |
+
)
|
69 |
+
audio = gr.Audio(
|
70 |
+
label="Resulting audio"
|
71 |
+
)
|
72 |
+
btn.click(run_generation, inputs=[image, steps, eta], outputs=[spectrogram, audio])
|
73 |
+
|
74 |
+
demo.launch()
|
audiodiffusion/__init__.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Iterable, Tuple
|
2 |
+
|
3 |
+
import numpy as np
|
4 |
+
import torch
|
5 |
+
from librosa.beat import beat_track
|
6 |
+
from PIL import Image
|
7 |
+
from tqdm.auto import tqdm
|
8 |
+
|
9 |
+
# from diffusers import AudioDiffusionPipeline
|
10 |
+
from .pipeline_audio_diffusion import AudioDiffusionPipeline
|
11 |
+
from .image_encoder import ImageEncoder
|
12 |
+
|
13 |
+
VERSION = "1.5.6"
|
14 |
+
|
15 |
+
|
16 |
+
class AudioDiffusion:
|
17 |
+
def __init__(
|
18 |
+
self,
|
19 |
+
model_id: str = "teticio/audio-diffusion-256",
|
20 |
+
cuda: bool = torch.cuda.is_available(),
|
21 |
+
progress_bar: Iterable = tqdm,
|
22 |
+
):
|
23 |
+
"""Class for generating audio using De-noising Diffusion Probabilistic Models.
|
24 |
+
|
25 |
+
Args:
|
26 |
+
model_id (String): name of model (local directory or Hugging Face Hub)
|
27 |
+
cuda (bool): use CUDA?
|
28 |
+
progress_bar (iterable): iterable callback for progress updates or None
|
29 |
+
"""
|
30 |
+
self.model_id = model_id
|
31 |
+
self.pipe = AudioDiffusionPipeline.from_pretrained(self.model_id)
|
32 |
+
if cuda:
|
33 |
+
self.pipe.to("cuda")
|
34 |
+
self.progress_bar = progress_bar or (lambda _: _)
|
35 |
+
|
36 |
+
def generate_spectrogram_and_audio(
|
37 |
+
self,
|
38 |
+
steps: int = None,
|
39 |
+
generator: torch.Generator = None,
|
40 |
+
step_generator: torch.Generator = None,
|
41 |
+
eta: float = 0,
|
42 |
+
noise: torch.Tensor = None,
|
43 |
+
encoding: torch.Tensor = None,
|
44 |
+
) -> Tuple[Image.Image, Tuple[int, np.ndarray]]:
|
45 |
+
"""Generate random mel spectrogram and convert to audio.
|
46 |
+
|
47 |
+
Args:
|
48 |
+
steps (int): number of de-noising steps (defaults to 50 for DDIM, 1000 for DDPM)
|
49 |
+
generator (torch.Generator): random number generator or None
|
50 |
+
step_generator (torch.Generator): random number generator used to de-noise or None
|
51 |
+
eta (float): parameter between 0 and 1 used with DDIM scheduler
|
52 |
+
noise (torch.Tensor): noisy image or None
|
53 |
+
encoding (`torch.Tensor`): for UNet2DConditionModel shape (batch_size, seq_length, cross_attention_dim)
|
54 |
+
|
55 |
+
Returns:
|
56 |
+
PIL Image: mel spectrogram
|
57 |
+
(float, np.ndarray): sample rate and raw audio
|
58 |
+
"""
|
59 |
+
images, (sample_rate, audios) = self.pipe(
|
60 |
+
batch_size=1,
|
61 |
+
steps=steps,
|
62 |
+
generator=generator,
|
63 |
+
step_generator=step_generator,
|
64 |
+
eta=eta,
|
65 |
+
noise=noise,
|
66 |
+
encoding=encoding,
|
67 |
+
return_dict=False,
|
68 |
+
)
|
69 |
+
return images[0], (sample_rate, audios[0])
|
70 |
+
|
71 |
+
def generate_spectrogram_and_audio_from_audio(
|
72 |
+
self,
|
73 |
+
audio_file: str = None,
|
74 |
+
raw_audio: np.ndarray = None,
|
75 |
+
slice: int = 0,
|
76 |
+
start_step: int = 0,
|
77 |
+
steps: int = None,
|
78 |
+
generator: torch.Generator = None,
|
79 |
+
mask_start_secs: float = 0,
|
80 |
+
mask_end_secs: float = 0,
|
81 |
+
step_generator: torch.Generator = None,
|
82 |
+
eta: float = 0,
|
83 |
+
encoding: torch.Tensor = None,
|
84 |
+
noise: torch.Tensor = None,
|
85 |
+
) -> Tuple[Image.Image, Tuple[int, np.ndarray]]:
|
86 |
+
"""Generate random mel spectrogram from audio input and convert to audio.
|
87 |
+
|
88 |
+
Args:
|
89 |
+
audio_file (str): must be a file on disk due to Librosa limitation or
|
90 |
+
raw_audio (np.ndarray): audio as numpy array
|
91 |
+
slice (int): slice number of audio to convert
|
92 |
+
start_step (int): step to start from
|
93 |
+
steps (int): number of de-noising steps (defaults to 50 for DDIM, 1000 for DDPM)
|
94 |
+
generator (torch.Generator): random number generator or None
|
95 |
+
mask_start_secs (float): number of seconds of audio to mask (not generate) at start
|
96 |
+
mask_end_secs (float): number of seconds of audio to mask (not generate) at end
|
97 |
+
step_generator (torch.Generator): random number generator used to de-noise or None
|
98 |
+
eta (float): parameter between 0 and 1 used with DDIM scheduler
|
99 |
+
encoding (`torch.Tensor`): for UNet2DConditionModel shape (batch_size, seq_length, cross_attention_dim)
|
100 |
+
noise (torch.Tensor): noisy image or None
|
101 |
+
|
102 |
+
Returns:
|
103 |
+
PIL Image: mel spectrogram
|
104 |
+
(float, np.ndarray): sample rate and raw audio
|
105 |
+
"""
|
106 |
+
|
107 |
+
images, (sample_rate, audios) = self.pipe(
|
108 |
+
batch_size=1,
|
109 |
+
audio_file=audio_file,
|
110 |
+
raw_audio=raw_audio,
|
111 |
+
slice=slice,
|
112 |
+
start_step=start_step,
|
113 |
+
steps=steps,
|
114 |
+
generator=generator,
|
115 |
+
mask_start_secs=mask_start_secs,
|
116 |
+
mask_end_secs=mask_end_secs,
|
117 |
+
step_generator=step_generator,
|
118 |
+
eta=eta,
|
119 |
+
noise=noise,
|
120 |
+
encoding=encoding,
|
121 |
+
return_dict=False,
|
122 |
+
)
|
123 |
+
return images[0], (sample_rate, audios[0])
|
124 |
+
|
125 |
+
@staticmethod
|
126 |
+
def loop_it(audio: np.ndarray, sample_rate: int, loops: int = 12) -> np.ndarray:
|
127 |
+
"""Loop audio
|
128 |
+
|
129 |
+
Args:
|
130 |
+
audio (np.ndarray): audio as numpy array
|
131 |
+
sample_rate (int): sample rate of audio
|
132 |
+
loops (int): number of times to loop
|
133 |
+
|
134 |
+
Returns:
|
135 |
+
(float, np.ndarray): sample rate and raw audio or None
|
136 |
+
"""
|
137 |
+
_, beats = beat_track(y=audio, sr=sample_rate, units="samples")
|
138 |
+
beats_in_bar = (len(beats) - 1) // 4 * 4
|
139 |
+
if beats_in_bar > 0:
|
140 |
+
return np.tile(audio[beats[0] : beats[beats_in_bar]], loops)
|
141 |
+
return None
|
audiodiffusion/image_encoder.py
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
from diffusers import ConfigMixin, Mel, ModelMixin
|
3 |
+
|
4 |
+
class ImageEncoder(ModelMixin, ConfigMixin):
|
5 |
+
def __init__(self, image_processor, encoder_model):
|
6 |
+
super().__init__()
|
7 |
+
self.processor = image_processor
|
8 |
+
self.encoder = encoder_model
|
9 |
+
self.eval()
|
10 |
+
|
11 |
+
def forward(self, x):
|
12 |
+
x = self.encoder(x)
|
13 |
+
return x
|
14 |
+
|
15 |
+
@torch.no_grad()
|
16 |
+
def encode(self, image):
|
17 |
+
x = self.processor(image, return_tensors="pt")['pixel_values']
|
18 |
+
y = self(x)
|
19 |
+
y = y.last_hidden_state
|
20 |
+
embedings = y[:,0,:]
|
21 |
+
return embedings
|
audiodiffusion/mel.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This code has been migrated to diffusers but can be run locally with
|
2 |
+
# pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256", custom_pipeline="audio-diffusion/audiodiffusion/pipeline_audio_diffusion.py")
|
3 |
+
|
4 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
|
18 |
+
|
19 |
+
import warnings
|
20 |
+
from typing import Callable, Union
|
21 |
+
|
22 |
+
from diffusers.configuration_utils import ConfigMixin, register_to_config
|
23 |
+
from diffusers.schedulers.scheduling_utils import SchedulerMixin
|
24 |
+
|
25 |
+
warnings.filterwarnings("ignore")
|
26 |
+
|
27 |
+
import numpy as np # noqa: E402
|
28 |
+
|
29 |
+
|
30 |
+
try:
|
31 |
+
import librosa # noqa: E402
|
32 |
+
|
33 |
+
_librosa_can_be_imported = True
|
34 |
+
_import_error = ""
|
35 |
+
except Exception as e:
|
36 |
+
_librosa_can_be_imported = False
|
37 |
+
_import_error = (
|
38 |
+
f"Cannot import librosa because {e}. Make sure to correctly install librosa to be able to install it."
|
39 |
+
)
|
40 |
+
|
41 |
+
|
42 |
+
from PIL import Image # noqa: E402
|
43 |
+
|
44 |
+
|
45 |
+
class Mel(ConfigMixin, SchedulerMixin):
|
46 |
+
"""
|
47 |
+
Parameters:
|
48 |
+
x_res (`int`): x resolution of spectrogram (time)
|
49 |
+
y_res (`int`): y resolution of spectrogram (frequency bins)
|
50 |
+
sample_rate (`int`): sample rate of audio
|
51 |
+
n_fft (`int`): number of Fast Fourier Transforms
|
52 |
+
hop_length (`int`): hop length (a higher number is recommended for lower than 256 y_res)
|
53 |
+
top_db (`int`): loudest in decibels
|
54 |
+
n_iter (`int`): number of iterations for Griffin Linn mel inversion
|
55 |
+
"""
|
56 |
+
|
57 |
+
config_name = "mel_config.json"
|
58 |
+
|
59 |
+
@register_to_config
|
60 |
+
def __init__(
|
61 |
+
self,
|
62 |
+
x_res: int = 256,
|
63 |
+
y_res: int = 256,
|
64 |
+
sample_rate: int = 22050,
|
65 |
+
n_fft: int = 2048,
|
66 |
+
hop_length: int = 512,
|
67 |
+
top_db: int = 80,
|
68 |
+
n_iter: int = 32,
|
69 |
+
):
|
70 |
+
self.hop_length = hop_length
|
71 |
+
self.sr = sample_rate
|
72 |
+
self.n_fft = n_fft
|
73 |
+
self.top_db = top_db
|
74 |
+
self.n_iter = n_iter
|
75 |
+
self.set_resolution(x_res, y_res)
|
76 |
+
self.audio = None
|
77 |
+
|
78 |
+
if not _librosa_can_be_imported:
|
79 |
+
raise ValueError(_import_error)
|
80 |
+
|
81 |
+
def set_resolution(self, x_res: int, y_res: int):
|
82 |
+
"""Set resolution.
|
83 |
+
|
84 |
+
Args:
|
85 |
+
x_res (`int`): x resolution of spectrogram (time)
|
86 |
+
y_res (`int`): y resolution of spectrogram (frequency bins)
|
87 |
+
"""
|
88 |
+
self.x_res = x_res
|
89 |
+
self.y_res = y_res
|
90 |
+
self.n_mels = self.y_res
|
91 |
+
self.slice_size = self.x_res * self.hop_length - 1
|
92 |
+
|
93 |
+
def load_audio(self, audio_file: str = None, raw_audio: np.ndarray = None):
|
94 |
+
"""Load audio.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
audio_file (`str`): must be a file on disk due to Librosa limitation or
|
98 |
+
raw_audio (`np.ndarray`): audio as numpy array
|
99 |
+
"""
|
100 |
+
if audio_file is not None:
|
101 |
+
self.audio, _ = librosa.load(audio_file, mono=True, sr=self.sr)
|
102 |
+
else:
|
103 |
+
self.audio = raw_audio
|
104 |
+
|
105 |
+
# Pad with silence if necessary.
|
106 |
+
if len(self.audio) < self.x_res * self.hop_length:
|
107 |
+
self.audio = np.concatenate([self.audio, np.zeros((self.x_res * self.hop_length - len(self.audio),))])
|
108 |
+
|
109 |
+
def get_number_of_slices(self) -> int:
|
110 |
+
"""Get number of slices in audio.
|
111 |
+
|
112 |
+
Returns:
|
113 |
+
`int`: number of spectograms audio can be sliced into
|
114 |
+
"""
|
115 |
+
return len(self.audio) // self.slice_size
|
116 |
+
|
117 |
+
def get_audio_slice(self, slice: int = 0) -> np.ndarray:
|
118 |
+
"""Get slice of audio.
|
119 |
+
|
120 |
+
Args:
|
121 |
+
slice (`int`): slice number of audio (out of get_number_of_slices())
|
122 |
+
|
123 |
+
Returns:
|
124 |
+
`np.ndarray`: audio as numpy array
|
125 |
+
"""
|
126 |
+
return self.audio[self.slice_size * slice : self.slice_size * (slice + 1)]
|
127 |
+
|
128 |
+
def get_sample_rate(self) -> int:
|
129 |
+
"""Get sample rate:
|
130 |
+
|
131 |
+
Returns:
|
132 |
+
`int`: sample rate of audio
|
133 |
+
"""
|
134 |
+
return self.sr
|
135 |
+
|
136 |
+
def audio_slice_to_image(self, slice: int, ref: Union[float, Callable] = np.max) -> Image.Image:
|
137 |
+
"""Convert slice of audio to spectrogram.
|
138 |
+
|
139 |
+
Args:
|
140 |
+
slice (`int`): slice number of audio to convert (out of get_number_of_slices())
|
141 |
+
ref (`Union[float, Callable]`): reference value for spectrogram
|
142 |
+
|
143 |
+
Returns:
|
144 |
+
`PIL Image`: grayscale image of x_res x y_res
|
145 |
+
"""
|
146 |
+
S = librosa.feature.melspectrogram(
|
147 |
+
y=self.get_audio_slice(slice), sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_mels=self.n_mels
|
148 |
+
)
|
149 |
+
log_S = librosa.power_to_db(S, ref=ref, top_db=self.top_db)
|
150 |
+
bytedata = (((log_S + self.top_db) * 255 / self.top_db).clip(0, 255) + 0.5).astype(np.uint8)
|
151 |
+
image = Image.fromarray(bytedata)
|
152 |
+
return image
|
153 |
+
|
154 |
+
def image_to_audio(self, image: Image.Image) -> np.ndarray:
|
155 |
+
"""Converts spectrogram to audio.
|
156 |
+
|
157 |
+
Args:
|
158 |
+
image (`PIL Image`): x_res x y_res grayscale image
|
159 |
+
|
160 |
+
Returns:
|
161 |
+
audio (`np.ndarray`): raw audio
|
162 |
+
"""
|
163 |
+
bytedata = np.frombuffer(image.tobytes(), dtype="uint8").reshape((image.height, image.width))
|
164 |
+
log_S = bytedata.astype("float") * self.top_db / 255 - self.top_db
|
165 |
+
S = librosa.db_to_power(log_S)
|
166 |
+
audio = librosa.feature.inverse.mel_to_audio(
|
167 |
+
S, sr=self.sr, n_fft=self.n_fft, hop_length=self.hop_length, n_iter=self.n_iter
|
168 |
+
)
|
169 |
+
return audio
|
audiodiffusion/pipeline_audio_diffusion.py
ADDED
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This code has been migrated to diffusers but can be run locally with
|
2 |
+
# pipe = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-256", custom_pipeline="audio-diffusion/audiodiffusion/pipeline_audio_diffusion.py")
|
3 |
+
|
4 |
+
# Copyright 2022 The HuggingFace Team. All rights reserved.
|
5 |
+
#
|
6 |
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
7 |
+
# you may not use this file except in compliance with the License.
|
8 |
+
# You may obtain a copy of the License at
|
9 |
+
#
|
10 |
+
# http://www.apache.org/licenses/LICENSE-2.0
|
11 |
+
#
|
12 |
+
# Unless required by applicable law or agreed to in writing, software
|
13 |
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
14 |
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
15 |
+
# See the License for the specific language governing permissions and
|
16 |
+
# limitations under the License.
|
17 |
+
|
18 |
+
|
19 |
+
from math import acos, sin
|
20 |
+
from typing import List, Tuple, Union
|
21 |
+
|
22 |
+
import numpy as np
|
23 |
+
import torch
|
24 |
+
from diffusers import (
|
25 |
+
AudioPipelineOutput,
|
26 |
+
AutoencoderKL,
|
27 |
+
DDIMScheduler,
|
28 |
+
DDPMScheduler,
|
29 |
+
DiffusionPipeline,
|
30 |
+
ImagePipelineOutput,
|
31 |
+
UNet2DConditionModel,
|
32 |
+
)
|
33 |
+
from diffusers.utils import BaseOutput
|
34 |
+
from PIL import Image
|
35 |
+
|
36 |
+
from .mel import Mel
|
37 |
+
|
38 |
+
class AudioDiffusionPipeline(DiffusionPipeline):
|
39 |
+
"""
|
40 |
+
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the
|
41 |
+
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)
|
42 |
+
|
43 |
+
Parameters:
|
44 |
+
vqae ([`AutoencoderKL`]): Variational AutoEncoder for Latent Audio Diffusion or None
|
45 |
+
unet ([`UNet2DConditionModel`]): UNET model
|
46 |
+
mel ([`Mel`]): transform audio <-> spectrogram
|
47 |
+
scheduler ([`DDIMScheduler` or `DDPMScheduler`]): de-noising scheduler
|
48 |
+
"""
|
49 |
+
|
50 |
+
_optional_components = ["vqvae"]
|
51 |
+
|
52 |
+
def __init__(
|
53 |
+
self,
|
54 |
+
vqvae: AutoencoderKL,
|
55 |
+
unet: UNet2DConditionModel,
|
56 |
+
mel: Mel,
|
57 |
+
scheduler: Union[DDIMScheduler, DDPMScheduler],
|
58 |
+
):
|
59 |
+
super().__init__()
|
60 |
+
self.register_modules(unet=unet, scheduler=scheduler, mel=mel, vqvae=vqvae)
|
61 |
+
|
62 |
+
def get_default_steps(self) -> int:
|
63 |
+
"""Returns default number of steps recommended for inference
|
64 |
+
|
65 |
+
Returns:
|
66 |
+
`int`: number of steps
|
67 |
+
"""
|
68 |
+
return 50 if isinstance(self.scheduler, DDIMScheduler) else 1000
|
69 |
+
|
70 |
+
@torch.no_grad()
|
71 |
+
def __call__(
|
72 |
+
self,
|
73 |
+
batch_size: int = 1,
|
74 |
+
audio_file: str = None,
|
75 |
+
raw_audio: np.ndarray = None,
|
76 |
+
slice: int = 0,
|
77 |
+
start_step: int = 0,
|
78 |
+
steps: int = None,
|
79 |
+
generator: torch.Generator = None,
|
80 |
+
mask_start_secs: float = 0,
|
81 |
+
mask_end_secs: float = 0,
|
82 |
+
step_generator: torch.Generator = None,
|
83 |
+
eta: float = 0,
|
84 |
+
noise: torch.Tensor = None,
|
85 |
+
encoding: torch.Tensor = None,
|
86 |
+
return_dict=True,
|
87 |
+
) -> Union[
|
88 |
+
Union[AudioPipelineOutput, ImagePipelineOutput],
|
89 |
+
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
|
90 |
+
]:
|
91 |
+
"""Generate random mel spectrogram from audio input and convert to audio.
|
92 |
+
|
93 |
+
Args:
|
94 |
+
batch_size (`int`): number of samples to generate
|
95 |
+
audio_file (`str`): must be a file on disk due to Librosa limitation or
|
96 |
+
raw_audio (`np.ndarray`): audio as numpy array
|
97 |
+
slice (`int`): slice number of audio to convert
|
98 |
+
start_step (int): step to start from
|
99 |
+
steps (`int`): number of de-noising steps (defaults to 50 for DDIM, 1000 for DDPM)
|
100 |
+
generator (`torch.Generator`): random number generator or None
|
101 |
+
mask_start_secs (`float`): number of seconds of audio to mask (not generate) at start
|
102 |
+
mask_end_secs (`float`): number of seconds of audio to mask (not generate) at end
|
103 |
+
step_generator (`torch.Generator`): random number generator used to de-noise or None
|
104 |
+
eta (`float`): parameter between 0 and 1 used with DDIM scheduler
|
105 |
+
noise (`torch.Tensor`): noise tensor of shape (batch_size, 1, height, width) or None
|
106 |
+
encoding (`torch.Tensor`): for UNet2DConditionModel shape (batch_size, seq_length, cross_attention_dim)
|
107 |
+
return_dict (`bool`): if True return AudioPipelineOutput, ImagePipelineOutput else Tuple
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
`List[PIL Image]`: mel spectrograms (`float`, `List[np.ndarray]`): sample rate and raw audios
|
111 |
+
"""
|
112 |
+
|
113 |
+
steps = steps or self.get_default_steps()
|
114 |
+
self.scheduler.set_timesteps(steps)
|
115 |
+
step_generator = step_generator or generator
|
116 |
+
# For backwards compatibility
|
117 |
+
if type(self.unet.sample_size) == int:
|
118 |
+
self.unet.sample_size = (self.unet.sample_size, self.unet.sample_size)
|
119 |
+
if noise is None:
|
120 |
+
noise = torch.randn(
|
121 |
+
(
|
122 |
+
batch_size,
|
123 |
+
self.unet.in_channels,
|
124 |
+
self.unet.sample_size[0],
|
125 |
+
self.unet.sample_size[1],
|
126 |
+
),
|
127 |
+
generator=generator,
|
128 |
+
device=self.device,
|
129 |
+
)
|
130 |
+
images = noise
|
131 |
+
mask = None
|
132 |
+
|
133 |
+
if audio_file is not None or raw_audio is not None:
|
134 |
+
self.mel.load_audio(audio_file, raw_audio)
|
135 |
+
input_image = self.mel.audio_slice_to_image(slice)
|
136 |
+
input_image = np.frombuffer(input_image.tobytes(), dtype="uint8").reshape(
|
137 |
+
(input_image.height, input_image.width)
|
138 |
+
)
|
139 |
+
input_image = (input_image / 255) * 2 - 1
|
140 |
+
input_images = torch.tensor(input_image[np.newaxis, :, :], dtype=torch.float).to(self.device)
|
141 |
+
|
142 |
+
if self.vqvae is not None:
|
143 |
+
input_images = self.vqvae.encode(torch.unsqueeze(input_images, 0)).latent_dist.sample(
|
144 |
+
generator=generator
|
145 |
+
)[0]
|
146 |
+
input_images = 0.18215 * input_images
|
147 |
+
|
148 |
+
if start_step > 0:
|
149 |
+
images[0, 0] = self.scheduler.add_noise(input_images, noise, self.scheduler.timesteps[start_step - 1])
|
150 |
+
|
151 |
+
pixels_per_second = (
|
152 |
+
self.unet.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
|
153 |
+
)
|
154 |
+
mask_start = int(mask_start_secs * pixels_per_second)
|
155 |
+
mask_end = int(mask_end_secs * pixels_per_second)
|
156 |
+
mask = self.scheduler.add_noise(input_images, noise, torch.tensor(self.scheduler.timesteps[start_step:]))
|
157 |
+
|
158 |
+
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
|
159 |
+
if isinstance(self.unet, UNet2DConditionModel):
|
160 |
+
model_output = self.unet(images, t, encoding)["sample"]
|
161 |
+
else:
|
162 |
+
model_output = self.unet(images, t)["sample"]
|
163 |
+
|
164 |
+
if isinstance(self.scheduler, DDIMScheduler):
|
165 |
+
images = self.scheduler.step(
|
166 |
+
model_output=model_output,
|
167 |
+
timestep=t,
|
168 |
+
sample=images,
|
169 |
+
eta=eta,
|
170 |
+
generator=step_generator,
|
171 |
+
)["prev_sample"]
|
172 |
+
else:
|
173 |
+
images = self.scheduler.step(
|
174 |
+
model_output=model_output,
|
175 |
+
timestep=t,
|
176 |
+
sample=images,
|
177 |
+
generator=step_generator,
|
178 |
+
)["prev_sample"]
|
179 |
+
|
180 |
+
if mask is not None:
|
181 |
+
if mask_start > 0:
|
182 |
+
images[:, :, :, :mask_start] = mask[:, step, :, :mask_start]
|
183 |
+
if mask_end > 0:
|
184 |
+
images[:, :, :, -mask_end:] = mask[:, step, :, -mask_end:]
|
185 |
+
|
186 |
+
if self.vqvae is not None:
|
187 |
+
# 0.18215 was scaling factor used in training to ensure unit variance
|
188 |
+
images = 1 / 0.18215 * images
|
189 |
+
images = self.vqvae.decode(images)["sample"]
|
190 |
+
|
191 |
+
images = (images / 2 + 0.5).clamp(0, 1)
|
192 |
+
images = images.cpu().permute(0, 2, 3, 1).numpy()
|
193 |
+
images = (images * 255).round().astype("uint8")
|
194 |
+
images = list(
|
195 |
+
map(lambda _: Image.fromarray(_[:, :, 0]), images)
|
196 |
+
if images.shape[3] == 1
|
197 |
+
else map(lambda _: Image.fromarray(_, mode="RGB").convert("L"), images)
|
198 |
+
)
|
199 |
+
|
200 |
+
audios = list(map(lambda _: self.mel.image_to_audio(_), images))
|
201 |
+
if not return_dict:
|
202 |
+
return images, (self.mel.get_sample_rate(), audios)
|
203 |
+
|
204 |
+
return BaseOutput(**AudioPipelineOutput(np.array(audios)[:, np.newaxis, :]), **ImagePipelineOutput(images))
|
205 |
+
|
206 |
+
@torch.no_grad()
|
207 |
+
def encode(self, images: List[Image.Image], steps: int = 50) -> np.ndarray:
|
208 |
+
"""Reverse step process: recover noisy image from generated image.
|
209 |
+
|
210 |
+
Args:
|
211 |
+
images (`List[PIL Image]`): list of images to encode
|
212 |
+
steps (`int`): number of encoding steps to perform (defaults to 50)
|
213 |
+
|
214 |
+
Returns:
|
215 |
+
`np.ndarray`: noise tensor of shape (batch_size, 1, height, width)
|
216 |
+
"""
|
217 |
+
|
218 |
+
# Only works with DDIM as this method is deterministic
|
219 |
+
assert isinstance(self.scheduler, DDIMScheduler)
|
220 |
+
self.scheduler.set_timesteps(steps)
|
221 |
+
sample = np.array(
|
222 |
+
[np.frombuffer(image.tobytes(), dtype="uint8").reshape((1, image.height, image.width)) for image in images]
|
223 |
+
)
|
224 |
+
sample = (sample / 255) * 2 - 1
|
225 |
+
sample = torch.Tensor(sample).to(self.device)
|
226 |
+
|
227 |
+
for t in self.progress_bar(torch.flip(self.scheduler.timesteps, (0,))):
|
228 |
+
prev_timestep = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
|
229 |
+
alpha_prod_t = self.scheduler.alphas_cumprod[t]
|
230 |
+
alpha_prod_t_prev = (
|
231 |
+
self.scheduler.alphas_cumprod[prev_timestep]
|
232 |
+
if prev_timestep >= 0
|
233 |
+
else self.scheduler.final_alpha_cumprod
|
234 |
+
)
|
235 |
+
beta_prod_t = 1 - alpha_prod_t
|
236 |
+
model_output = self.unet(sample, t)["sample"]
|
237 |
+
pred_sample_direction = (1 - alpha_prod_t_prev) ** (0.5) * model_output
|
238 |
+
sample = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
|
239 |
+
sample = sample * alpha_prod_t ** (0.5) + beta_prod_t ** (0.5) * model_output
|
240 |
+
|
241 |
+
return sample
|
242 |
+
|
243 |
+
@staticmethod
|
244 |
+
def slerp(x0: torch.Tensor, x1: torch.Tensor, alpha: float) -> torch.Tensor:
|
245 |
+
"""Spherical Linear intERPolation
|
246 |
+
|
247 |
+
Args:
|
248 |
+
x0 (`torch.Tensor`): first tensor to interpolate between
|
249 |
+
x1 (`torch.Tensor`): seconds tensor to interpolate between
|
250 |
+
alpha (`float`): interpolation between 0 and 1
|
251 |
+
|
252 |
+
Returns:
|
253 |
+
`torch.Tensor`: interpolated tensor
|
254 |
+
"""
|
255 |
+
|
256 |
+
theta = acos(torch.dot(torch.flatten(x0), torch.flatten(x1)) / torch.norm(x0) / torch.norm(x1))
|
257 |
+
return sin((1 - alpha) * theta) * x0 / sin(theta) + sin(alpha * theta) * x1 / sin(theta)
|
audiodiffusion/utils.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# adpated from https://github.com/huggingface/diffusers/blob/main/scripts/convert_original_stable_diffusion_to_diffusers.py
|
2 |
+
|
3 |
+
import torch
|
4 |
+
from diffusers import AutoencoderKL
|
5 |
+
|
6 |
+
|
7 |
+
def shave_segments(path, n_shave_prefix_segments=1):
|
8 |
+
"""
|
9 |
+
Removes segments. Positive values shave the first segments, negative shave the last segments.
|
10 |
+
"""
|
11 |
+
if n_shave_prefix_segments >= 0:
|
12 |
+
return ".".join(path.split(".")[n_shave_prefix_segments:])
|
13 |
+
else:
|
14 |
+
return ".".join(path.split(".")[:n_shave_prefix_segments])
|
15 |
+
|
16 |
+
|
17 |
+
def renew_vae_resnet_paths(old_list, n_shave_prefix_segments=0):
|
18 |
+
"""
|
19 |
+
Updates paths inside resnets to the new naming scheme (local renaming)
|
20 |
+
"""
|
21 |
+
mapping = []
|
22 |
+
for old_item in old_list:
|
23 |
+
new_item = old_item
|
24 |
+
|
25 |
+
new_item = new_item.replace("nin_shortcut", "conv_shortcut")
|
26 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
27 |
+
|
28 |
+
mapping.append({"old": old_item, "new": new_item})
|
29 |
+
|
30 |
+
return mapping
|
31 |
+
|
32 |
+
|
33 |
+
def renew_vae_attention_paths(old_list, n_shave_prefix_segments=0):
|
34 |
+
"""
|
35 |
+
Updates paths inside attentions to the new naming scheme (local renaming)
|
36 |
+
"""
|
37 |
+
mapping = []
|
38 |
+
for old_item in old_list:
|
39 |
+
new_item = old_item
|
40 |
+
|
41 |
+
new_item = new_item.replace("norm.weight", "group_norm.weight")
|
42 |
+
new_item = new_item.replace("norm.bias", "group_norm.bias")
|
43 |
+
|
44 |
+
new_item = new_item.replace("q.weight", "query.weight")
|
45 |
+
new_item = new_item.replace("q.bias", "query.bias")
|
46 |
+
|
47 |
+
new_item = new_item.replace("k.weight", "key.weight")
|
48 |
+
new_item = new_item.replace("k.bias", "key.bias")
|
49 |
+
|
50 |
+
new_item = new_item.replace("v.weight", "value.weight")
|
51 |
+
new_item = new_item.replace("v.bias", "value.bias")
|
52 |
+
|
53 |
+
new_item = new_item.replace("proj_out.weight", "proj_attn.weight")
|
54 |
+
new_item = new_item.replace("proj_out.bias", "proj_attn.bias")
|
55 |
+
|
56 |
+
new_item = shave_segments(new_item, n_shave_prefix_segments=n_shave_prefix_segments)
|
57 |
+
|
58 |
+
mapping.append({"old": old_item, "new": new_item})
|
59 |
+
|
60 |
+
return mapping
|
61 |
+
|
62 |
+
|
63 |
+
def assign_to_checkpoint(
|
64 |
+
paths,
|
65 |
+
checkpoint,
|
66 |
+
old_checkpoint,
|
67 |
+
attention_paths_to_split=None,
|
68 |
+
additional_replacements=None,
|
69 |
+
config=None,
|
70 |
+
):
|
71 |
+
"""
|
72 |
+
This does the final conversion step: take locally converted weights and apply a global renaming
|
73 |
+
to them. It splits attention layers, and takes into account additional replacements
|
74 |
+
that may arise.
|
75 |
+
|
76 |
+
Assigns the weights to the new checkpoint.
|
77 |
+
"""
|
78 |
+
assert isinstance(paths, list), "Paths should be a list of dicts containing 'old' and 'new' keys."
|
79 |
+
|
80 |
+
# Splits the attention layers into three variables.
|
81 |
+
if attention_paths_to_split is not None:
|
82 |
+
for path, path_map in attention_paths_to_split.items():
|
83 |
+
old_tensor = old_checkpoint[path]
|
84 |
+
channels = old_tensor.shape[0] // 3
|
85 |
+
|
86 |
+
target_shape = (-1, channels) if len(old_tensor.shape) == 3 else (-1)
|
87 |
+
|
88 |
+
num_heads = old_tensor.shape[0] // config["num_head_channels"] // 3
|
89 |
+
|
90 |
+
old_tensor = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:])
|
91 |
+
query, key, value = old_tensor.split(channels // num_heads, dim=1)
|
92 |
+
|
93 |
+
checkpoint[path_map["query"]] = query.reshape(target_shape)
|
94 |
+
checkpoint[path_map["key"]] = key.reshape(target_shape)
|
95 |
+
checkpoint[path_map["value"]] = value.reshape(target_shape)
|
96 |
+
|
97 |
+
for path in paths:
|
98 |
+
new_path = path["new"]
|
99 |
+
|
100 |
+
# These have already been assigned
|
101 |
+
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
|
102 |
+
continue
|
103 |
+
|
104 |
+
# Global renaming happens here
|
105 |
+
new_path = new_path.replace("middle_block.0", "mid_block.resnets.0")
|
106 |
+
new_path = new_path.replace("middle_block.1", "mid_block.attentions.0")
|
107 |
+
new_path = new_path.replace("middle_block.2", "mid_block.resnets.1")
|
108 |
+
|
109 |
+
if additional_replacements is not None:
|
110 |
+
for replacement in additional_replacements:
|
111 |
+
new_path = new_path.replace(replacement["old"], replacement["new"])
|
112 |
+
|
113 |
+
# proj_attn.weight has to be converted from conv 1D to linear
|
114 |
+
if "proj_attn.weight" in new_path:
|
115 |
+
checkpoint[new_path] = old_checkpoint[path["old"]][:, :, 0]
|
116 |
+
else:
|
117 |
+
checkpoint[new_path] = old_checkpoint[path["old"]]
|
118 |
+
|
119 |
+
|
120 |
+
def conv_attn_to_linear(checkpoint):
|
121 |
+
keys = list(checkpoint.keys())
|
122 |
+
attn_keys = ["query.weight", "key.weight", "value.weight"]
|
123 |
+
for key in keys:
|
124 |
+
if ".".join(key.split(".")[-2:]) in attn_keys:
|
125 |
+
if checkpoint[key].ndim > 2:
|
126 |
+
checkpoint[key] = checkpoint[key][:, :, 0, 0]
|
127 |
+
elif "proj_attn.weight" in key:
|
128 |
+
if checkpoint[key].ndim > 2:
|
129 |
+
checkpoint[key] = checkpoint[key][:, :, 0]
|
130 |
+
|
131 |
+
|
132 |
+
def create_vae_diffusers_config(original_config):
|
133 |
+
"""
|
134 |
+
Creates a config for the diffusers based on the config of the LDM model.
|
135 |
+
"""
|
136 |
+
vae_params = original_config.model.params.ddconfig
|
137 |
+
_ = original_config.model.params.embed_dim
|
138 |
+
|
139 |
+
block_out_channels = [vae_params.ch * mult for mult in vae_params.ch_mult]
|
140 |
+
down_block_types = ["DownEncoderBlock2D"] * len(block_out_channels)
|
141 |
+
up_block_types = ["UpDecoderBlock2D"] * len(block_out_channels)
|
142 |
+
|
143 |
+
config = dict(
|
144 |
+
sample_size=tuple(vae_params.resolution),
|
145 |
+
in_channels=vae_params.in_channels,
|
146 |
+
out_channels=vae_params.out_ch,
|
147 |
+
down_block_types=tuple(down_block_types),
|
148 |
+
up_block_types=tuple(up_block_types),
|
149 |
+
block_out_channels=tuple(block_out_channels),
|
150 |
+
latent_channels=vae_params.z_channels,
|
151 |
+
layers_per_block=vae_params.num_res_blocks,
|
152 |
+
)
|
153 |
+
return config
|
154 |
+
|
155 |
+
|
156 |
+
def convert_ldm_vae_checkpoint(checkpoint, config):
|
157 |
+
# extract state dict for VAE
|
158 |
+
vae_state_dict = checkpoint
|
159 |
+
|
160 |
+
new_checkpoint = {}
|
161 |
+
|
162 |
+
new_checkpoint["encoder.conv_in.weight"] = vae_state_dict["encoder.conv_in.weight"]
|
163 |
+
new_checkpoint["encoder.conv_in.bias"] = vae_state_dict["encoder.conv_in.bias"]
|
164 |
+
new_checkpoint["encoder.conv_out.weight"] = vae_state_dict["encoder.conv_out.weight"]
|
165 |
+
new_checkpoint["encoder.conv_out.bias"] = vae_state_dict["encoder.conv_out.bias"]
|
166 |
+
new_checkpoint["encoder.conv_norm_out.weight"] = vae_state_dict["encoder.norm_out.weight"]
|
167 |
+
new_checkpoint["encoder.conv_norm_out.bias"] = vae_state_dict["encoder.norm_out.bias"]
|
168 |
+
|
169 |
+
new_checkpoint["decoder.conv_in.weight"] = vae_state_dict["decoder.conv_in.weight"]
|
170 |
+
new_checkpoint["decoder.conv_in.bias"] = vae_state_dict["decoder.conv_in.bias"]
|
171 |
+
new_checkpoint["decoder.conv_out.weight"] = vae_state_dict["decoder.conv_out.weight"]
|
172 |
+
new_checkpoint["decoder.conv_out.bias"] = vae_state_dict["decoder.conv_out.bias"]
|
173 |
+
new_checkpoint["decoder.conv_norm_out.weight"] = vae_state_dict["decoder.norm_out.weight"]
|
174 |
+
new_checkpoint["decoder.conv_norm_out.bias"] = vae_state_dict["decoder.norm_out.bias"]
|
175 |
+
|
176 |
+
new_checkpoint["quant_conv.weight"] = vae_state_dict["quant_conv.weight"]
|
177 |
+
new_checkpoint["quant_conv.bias"] = vae_state_dict["quant_conv.bias"]
|
178 |
+
new_checkpoint["post_quant_conv.weight"] = vae_state_dict["post_quant_conv.weight"]
|
179 |
+
new_checkpoint["post_quant_conv.bias"] = vae_state_dict["post_quant_conv.bias"]
|
180 |
+
|
181 |
+
# Retrieves the keys for the encoder down blocks only
|
182 |
+
num_down_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "encoder.down" in layer})
|
183 |
+
down_blocks = {
|
184 |
+
layer_id: [key for key in vae_state_dict if f"down.{layer_id}" in key] for layer_id in range(num_down_blocks)
|
185 |
+
}
|
186 |
+
|
187 |
+
# Retrieves the keys for the decoder up blocks only
|
188 |
+
num_up_blocks = len({".".join(layer.split(".")[:3]) for layer in vae_state_dict if "decoder.up" in layer})
|
189 |
+
up_blocks = {
|
190 |
+
layer_id: [key for key in vae_state_dict if f"up.{layer_id}" in key] for layer_id in range(num_up_blocks)
|
191 |
+
}
|
192 |
+
|
193 |
+
for i in range(num_down_blocks):
|
194 |
+
resnets = [key for key in down_blocks[i] if f"down.{i}" in key and f"down.{i}.downsample" not in key]
|
195 |
+
|
196 |
+
if f"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
|
197 |
+
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.weight"] = vae_state_dict.pop(
|
198 |
+
f"encoder.down.{i}.downsample.conv.weight"
|
199 |
+
)
|
200 |
+
new_checkpoint[f"encoder.down_blocks.{i}.downsamplers.0.conv.bias"] = vae_state_dict.pop(
|
201 |
+
f"encoder.down.{i}.downsample.conv.bias"
|
202 |
+
)
|
203 |
+
|
204 |
+
paths = renew_vae_resnet_paths(resnets)
|
205 |
+
meta_path = {"old": f"down.{i}.block", "new": f"down_blocks.{i}.resnets"}
|
206 |
+
assign_to_checkpoint(
|
207 |
+
paths,
|
208 |
+
new_checkpoint,
|
209 |
+
vae_state_dict,
|
210 |
+
additional_replacements=[meta_path],
|
211 |
+
config=config,
|
212 |
+
)
|
213 |
+
|
214 |
+
mid_resnets = [key for key in vae_state_dict if "encoder.mid.block" in key]
|
215 |
+
num_mid_res_blocks = 2
|
216 |
+
for i in range(1, num_mid_res_blocks + 1):
|
217 |
+
resnets = [key for key in mid_resnets if f"encoder.mid.block_{i}" in key]
|
218 |
+
|
219 |
+
paths = renew_vae_resnet_paths(resnets)
|
220 |
+
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
221 |
+
assign_to_checkpoint(
|
222 |
+
paths,
|
223 |
+
new_checkpoint,
|
224 |
+
vae_state_dict,
|
225 |
+
additional_replacements=[meta_path],
|
226 |
+
config=config,
|
227 |
+
)
|
228 |
+
|
229 |
+
mid_attentions = [key for key in vae_state_dict if "encoder.mid.attn" in key]
|
230 |
+
paths = renew_vae_attention_paths(mid_attentions)
|
231 |
+
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
232 |
+
assign_to_checkpoint(
|
233 |
+
paths,
|
234 |
+
new_checkpoint,
|
235 |
+
vae_state_dict,
|
236 |
+
additional_replacements=[meta_path],
|
237 |
+
config=config,
|
238 |
+
)
|
239 |
+
conv_attn_to_linear(new_checkpoint)
|
240 |
+
|
241 |
+
for i in range(num_up_blocks):
|
242 |
+
block_id = num_up_blocks - 1 - i
|
243 |
+
resnets = [
|
244 |
+
key for key in up_blocks[block_id] if f"up.{block_id}" in key and f"up.{block_id}.upsample" not in key
|
245 |
+
]
|
246 |
+
|
247 |
+
if f"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
|
248 |
+
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.weight"] = vae_state_dict[
|
249 |
+
f"decoder.up.{block_id}.upsample.conv.weight"
|
250 |
+
]
|
251 |
+
new_checkpoint[f"decoder.up_blocks.{i}.upsamplers.0.conv.bias"] = vae_state_dict[
|
252 |
+
f"decoder.up.{block_id}.upsample.conv.bias"
|
253 |
+
]
|
254 |
+
|
255 |
+
paths = renew_vae_resnet_paths(resnets)
|
256 |
+
meta_path = {"old": f"up.{block_id}.block", "new": f"up_blocks.{i}.resnets"}
|
257 |
+
assign_to_checkpoint(
|
258 |
+
paths,
|
259 |
+
new_checkpoint,
|
260 |
+
vae_state_dict,
|
261 |
+
additional_replacements=[meta_path],
|
262 |
+
config=config,
|
263 |
+
)
|
264 |
+
|
265 |
+
mid_resnets = [key for key in vae_state_dict if "decoder.mid.block" in key]
|
266 |
+
num_mid_res_blocks = 2
|
267 |
+
for i in range(1, num_mid_res_blocks + 1):
|
268 |
+
resnets = [key for key in mid_resnets if f"decoder.mid.block_{i}" in key]
|
269 |
+
|
270 |
+
paths = renew_vae_resnet_paths(resnets)
|
271 |
+
meta_path = {"old": f"mid.block_{i}", "new": f"mid_block.resnets.{i - 1}"}
|
272 |
+
assign_to_checkpoint(
|
273 |
+
paths,
|
274 |
+
new_checkpoint,
|
275 |
+
vae_state_dict,
|
276 |
+
additional_replacements=[meta_path],
|
277 |
+
config=config,
|
278 |
+
)
|
279 |
+
|
280 |
+
mid_attentions = [key for key in vae_state_dict if "decoder.mid.attn" in key]
|
281 |
+
paths = renew_vae_attention_paths(mid_attentions)
|
282 |
+
meta_path = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
|
283 |
+
assign_to_checkpoint(
|
284 |
+
paths,
|
285 |
+
new_checkpoint,
|
286 |
+
vae_state_dict,
|
287 |
+
additional_replacements=[meta_path],
|
288 |
+
config=config,
|
289 |
+
)
|
290 |
+
conv_attn_to_linear(new_checkpoint)
|
291 |
+
return new_checkpoint
|
292 |
+
|
293 |
+
|
294 |
+
def convert_ldm_to_hf_vae(ldm_checkpoint, ldm_config, hf_checkpoint, sample_size):
|
295 |
+
checkpoint = torch.load(ldm_checkpoint)["state_dict"]
|
296 |
+
|
297 |
+
# Convert the VAE model.
|
298 |
+
vae_config = create_vae_diffusers_config(ldm_config)
|
299 |
+
converted_vae_checkpoint = convert_ldm_vae_checkpoint(checkpoint, vae_config)
|
300 |
+
|
301 |
+
vae = AutoencoderKL(**vae_config)
|
302 |
+
vae.load_state_dict(converted_vae_checkpoint)
|
303 |
+
vae.save_pretrained(hf_checkpoint)
|
requirements.txt
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch==2.0.1
|
2 |
+
gradio==4.5.0
|
3 |
+
transformers==4.35.2
|
4 |
+
numpy==1.23.5
|
5 |
+
Pillow==9.3.0
|
6 |
+
diffusers==0.23.1
|
7 |
+
librosa==0.10.1
|