Spaces:
Sleeping
Sleeping
File size: 6,004 Bytes
24c345c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
# Adapted from https://github.com/Rudrabha/Wav2Lip/blob/master/audio.py
import librosa
import librosa.filters
import numpy as np
from scipy import signal
from scipy.io import wavfile
from omegaconf import OmegaConf
import torch
audio_config_path = "configs/audio.yaml"
config = OmegaConf.load(audio_config_path)
def load_wav(path, sr):
return librosa.core.load(path, sr=sr)[0]
def save_wav(wav, path, sr):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
# proposed by @dsmiller
wavfile.write(path, sr, wav.astype(np.int16))
def save_wavenet_wav(wav, path, sr):
librosa.output.write_wav(path, wav, sr=sr)
def preemphasis(wav, k, preemphasize=True):
if preemphasize:
return signal.lfilter([1, -k], [1], wav)
return wav
def inv_preemphasis(wav, k, inv_preemphasize=True):
if inv_preemphasize:
return signal.lfilter([1], [1, -k], wav)
return wav
def get_hop_size():
hop_size = config.audio.hop_size
if hop_size is None:
assert config.audio.frame_shift_ms is not None
hop_size = int(config.audio.frame_shift_ms / 1000 * config.audio.sample_rate)
return hop_size
def linearspectrogram(wav):
D = _stft(preemphasis(wav, config.audio.preemphasis, config.audio.preemphasize))
S = _amp_to_db(np.abs(D)) - config.audio.ref_level_db
if config.audio.signal_normalization:
return _normalize(S)
return S
def melspectrogram(wav):
D = _stft(preemphasis(wav, config.audio.preemphasis, config.audio.preemphasize))
S = _amp_to_db(_linear_to_mel(np.abs(D))) - config.audio.ref_level_db
if config.audio.signal_normalization:
return _normalize(S)
return S
def _lws_processor():
import lws
return lws.lws(config.audio.n_fft, get_hop_size(), fftsize=config.audio.win_size, mode="speech")
def _stft(y):
if config.audio.use_lws:
return _lws_processor(config.audio).stft(y).T
else:
return librosa.stft(y=y, n_fft=config.audio.n_fft, hop_length=get_hop_size(), win_length=config.audio.win_size)
##########################################################
# Those are only correct when using lws!!! (This was messing with Wavenet quality for a long time!)
def num_frames(length, fsize, fshift):
"""Compute number of time frames of spectrogram"""
pad = fsize - fshift
if length % fshift == 0:
M = (length + pad * 2 - fsize) // fshift + 1
else:
M = (length + pad * 2 - fsize) // fshift + 2
return M
def pad_lr(x, fsize, fshift):
"""Compute left and right padding"""
M = num_frames(len(x), fsize, fshift)
pad = fsize - fshift
T = len(x) + 2 * pad
r = (M - 1) * fshift + fsize - T
return pad, pad + r
##########################################################
# Librosa correct padding
def librosa_pad_lr(x, fsize, fshift):
return 0, (x.shape[0] // fshift + 1) * fshift - x.shape[0]
# Conversions
_mel_basis = None
def _linear_to_mel(spectogram):
global _mel_basis
if _mel_basis is None:
_mel_basis = _build_mel_basis()
return np.dot(_mel_basis, spectogram)
def _build_mel_basis():
assert config.audio.fmax <= config.audio.sample_rate // 2
return librosa.filters.mel(
sr=config.audio.sample_rate,
n_fft=config.audio.n_fft,
n_mels=config.audio.num_mels,
fmin=config.audio.fmin,
fmax=config.audio.fmax,
)
def _amp_to_db(x):
min_level = np.exp(config.audio.min_level_db / 20 * np.log(10))
return 20 * np.log10(np.maximum(min_level, x))
def _db_to_amp(x):
return np.power(10.0, (x) * 0.05)
def _normalize(S):
if config.audio.allow_clipping_in_normalization:
if config.audio.symmetric_mels:
return np.clip(
(2 * config.audio.max_abs_value) * ((S - config.audio.min_level_db) / (-config.audio.min_level_db))
- config.audio.max_abs_value,
-config.audio.max_abs_value,
config.audio.max_abs_value,
)
else:
return np.clip(
config.audio.max_abs_value * ((S - config.audio.min_level_db) / (-config.audio.min_level_db)),
0,
config.audio.max_abs_value,
)
assert S.max() <= 0 and S.min() - config.audio.min_level_db >= 0
if config.audio.symmetric_mels:
return (2 * config.audio.max_abs_value) * (
(S - config.audio.min_level_db) / (-config.audio.min_level_db)
) - config.audio.max_abs_value
else:
return config.audio.max_abs_value * ((S - config.audio.min_level_db) / (-config.audio.min_level_db))
def _denormalize(D):
if config.audio.allow_clipping_in_normalization:
if config.audio.symmetric_mels:
return (
(np.clip(D, -config.audio.max_abs_value, config.audio.max_abs_value) + config.audio.max_abs_value)
* -config.audio.min_level_db
/ (2 * config.audio.max_abs_value)
) + config.audio.min_level_db
else:
return (
np.clip(D, 0, config.audio.max_abs_value) * -config.audio.min_level_db / config.audio.max_abs_value
) + config.audio.min_level_db
if config.audio.symmetric_mels:
return (
(D + config.audio.max_abs_value) * -config.audio.min_level_db / (2 * config.audio.max_abs_value)
) + config.audio.min_level_db
else:
return (D * -config.audio.min_level_db / config.audio.max_abs_value) + config.audio.min_level_db
def get_melspec_overlap(audio_samples, melspec_length=52):
mel_spec_overlap = melspectrogram(audio_samples.numpy())
mel_spec_overlap = torch.from_numpy(mel_spec_overlap)
i = 0
mel_spec_overlap_list = []
while i + melspec_length < mel_spec_overlap.shape[1] - 3:
mel_spec_overlap_list.append(mel_spec_overlap[:, i : i + melspec_length].unsqueeze(0))
i += 3
mel_spec_overlap = torch.stack(mel_spec_overlap_list)
return mel_spec_overlap
|