AisakaMikoto commited on
Commit
5b04e0d
·
verified ·
1 Parent(s): 15eff8b

Delete CLAPSep

Browse files
CLAPSep/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: CLAPSep
3
- emoji: 👁
4
- colorFrom: pink
5
- colorTo: green
6
- sdk: gradio
7
- sdk_version: 4.19.2
8
- app_file: app.py
9
- pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
CLAPSep/app.py DELETED
@@ -1,57 +0,0 @@
1
- import gradio as gr
2
- from model.CLAPSep import CLAPSep
3
- import torch
4
- import librosa
5
-
6
- model_config = {"lan_embed_dim": 1024,
7
- "depths": [1, 1, 1, 1],
8
- "embed_dim": 128,
9
- "encoder_embed_dim": 128,
10
- "phase": False,
11
- "spec_factor": 8,
12
- "d_attn": 640,
13
- "n_masker_layer": 3,
14
- "conv": False}
15
- DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'
16
- CLAP_path = "model/music_audioset_epoch_15_esc_90.14.pt"
17
-
18
-
19
- model = CLAPSep(model_config, CLAP_path).to(DEVICE)
20
- ckpt = torch.load('model/best_model.ckpt', map_location=DEVICE)
21
- model.load_state_dict(ckpt, strict=False)
22
- model.eval()
23
-
24
-
25
-
26
- def inference(audio_file_path: str, text_p: str, text_n: str):
27
- print(f"Separate audio from [{audio_file_path}] with textual query p: [{text_p}] and n: [{text_n}]")
28
-
29
- mixture, _ = librosa.load(audio_file_path, sr=32000)
30
- with torch.no_grad():
31
- sep_segment = model.inference_from_data(torch.tensor(mixture).unsqueeze(0), [text_p], [text_n])
32
-
33
- return 32000, sep_segment.squeeze().numpy()
34
-
35
-
36
- with gr.Blocks(title="AudioSep") as demo:
37
- with gr.Row():
38
- with gr.Column():
39
- input_audio = gr.Audio(label="Mixture", type="filepath")
40
- text_p = gr.Textbox(label="Positive Query")
41
- text_n = gr.Textbox(label="Negative Query")
42
- with gr.Column():
43
- with gr.Column():
44
- output_audio = gr.Audio(label="Separation Result", scale=10)
45
- button = gr.Button(
46
- "Separate",
47
- variant="primary",
48
- scale=2,
49
- size="lg",
50
- interactive=True,
51
- )
52
- button.click(
53
- fn=inference, inputs=[input_audio, text_p, text_n], outputs=[output_audio]
54
- )
55
-
56
-
57
- demo.queue().launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CLAPSep/model/CLAPSep.py DELETED
@@ -1,126 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: UTF-8 -*-
3
- '''
4
- @Project :Waveformer-main
5
- @File :CLAPSep.py
6
- @IDE :PyCharm
7
- @Author :Aisaka/Hao Ma @SDU
8
- @Date :2024/2/28 下午1:12
9
- '''
10
-
11
- import torch
12
- from torch import nn
13
- import torchaudio
14
- import laion_clap
15
- from .CLAPSep_decoder import HTSAT_Decoder
16
- import copy
17
- import loralib as lora
18
- from torchlibrosa import ISTFT, STFT
19
- from torchlibrosa.stft import magphase
20
- import librosa
21
-
22
- def set_module(model, submodule_key, module):
23
- tokens = submodule_key.split('.')
24
- sub_tokens = tokens[:-1]
25
- cur_mod = model
26
- for s in sub_tokens:
27
- cur_mod = getattr(cur_mod, s)
28
- setattr(cur_mod, tokens[-1], module)
29
-
30
-
31
- def process_model(model, rank):
32
- for n, module in model.named_modules():
33
- if 'WindowAttention' in str(type(module)):
34
- for n_, layer in module.named_modules():
35
- if isinstance(layer, torch.nn.Linear):
36
- lora_layer = lora.Linear(layer.in_features, layer.out_features, r=rank,
37
- bias=hasattr(layer, 'bias'), merge_weights=True)
38
- lora_layer.weight = layer.weight
39
- if hasattr(layer, 'bias'):
40
- lora_layer.bias = layer.bias
41
- set_module(model, n+'.'+n_, lora_layer)
42
- return model
43
-
44
-
45
- class CLAPSep(nn.Module):
46
- def __init__(self, model_config, CLAP_path, use_lora=True, rank=16, nfft=1024):
47
- super().__init__()
48
- self.resampler = torchaudio.transforms.Resample(32000, 48000)
49
- self.clap_model = laion_clap.CLAP_Module(enable_fusion=False, amodel='HTSAT-base', device='cpu')
50
- self.clap_model.load_ckpt(CLAP_path)
51
- for p in self.clap_model.parameters():
52
- p.requires_grad = False
53
- self.audio_branch = copy.deepcopy(self.clap_model.model.audio_branch)
54
- if use_lora:
55
- process_model(self.audio_branch, rank)
56
- self.decoder_model = HTSAT_Decoder(**model_config)
57
- self.stft = STFT(n_fft=nfft, hop_length=320,
58
- win_length=nfft, window='hann', center=True, pad_mode='reflect',
59
- freeze_parameters=True)
60
- self.istft = ISTFT(n_fft=nfft, hop_length=320,
61
- win_length=nfft, window='hann', center=True, pad_mode='reflect',
62
- freeze_parameters=True)
63
- self.features = self.install_forward_hooks()
64
-
65
- def wav_reconstruct(self, mask, mag_x, cos_x, sin_x, length):
66
- mag_y = torch.nn.functional.relu_(mag_x * mask)
67
- cos_y = cos_x
68
- sin_y = sin_x
69
- pred = self.istft(mag_y * cos_y, mag_y * sin_y, length=length)
70
- return pred
71
-
72
- def inference_from_data(self, mixed, pos_prompt, neg_prompt):
73
- self.eval()
74
- real, imag = self.stft(mixed)
75
- mag, cos, sin = magphase(real, imag)
76
- self.features.append(mag)
77
- with torch.no_grad():
78
- embed_pos, embed_neg = torch.chunk(self.clap_model.get_text_embedding(pos_prompt + neg_prompt,
79
- use_tensor=True), dim=0, chunks=2)
80
- embed_pos = torch.zeros_like(embed_pos) if pos_prompt == '' else embed_pos
81
- embed_neg = torch.zeros_like(embed_neg) if neg_prompt == '' else embed_neg
82
- embed = torch.concat([embed_pos, embed_neg], dim=-1)
83
- self.audio_branch({"waveform": self.resampler(mixed)})
84
- mask = self.decoder_model(hidden_state=self.features[-1], skip_features=self.features[:-1], embed=embed)
85
- pred = self.wav_reconstruct(mask, mag, cos, sin, length=mixed.size(-1))
86
- del self.features[:]
87
- return pred
88
-
89
- def install_forward_hooks(self):
90
- features = []
91
-
92
- def get_features_list(_, __, output):
93
- features.append(output)
94
-
95
- def get_features_list_basic_layer(_, __, output):
96
- features.append(output[0])
97
-
98
- def spectrogram_padding(_, __, out):
99
- return torch.nn.functional.pad(out, (0, 0, 0, 1024 - out.size(2)))
100
-
101
- self.audio_branch.spectrogram_extractor.register_forward_hook(spectrogram_padding)
102
- self.audio_branch.patch_embed.register_forward_hook(get_features_list)
103
- for module in self.audio_branch.layers:
104
- module.register_forward_hook(get_features_list_basic_layer)
105
- return features
106
-
107
- if __name__ == '__main__':
108
- model_config = {"lan_embed_dim": 1024,
109
- "depths": [1, 1, 1, 1],
110
- "embed_dim": 128,
111
- "encoder_embed_dim": 128,
112
- "phase": False,
113
- "spec_factor": 8,
114
- "d_attn": 640,
115
- "n_masker_layer": 3,
116
- "conv": False}
117
- CLAP_path = "./music_audioset_epoch_15_esc_90.14.pt"
118
-
119
- model = CLAPSep(model_config, CLAP_path)
120
- ckpt = torch.load('best_model.ckpt', map_location='cpu')
121
- model.load_state_dict(ckpt, strict=False)
122
- model.eval()
123
- audio, fs = librosa.load("./510_25.221254348754883_mixture.wav", sr=32000)
124
- pred = model.inference_from_data(torch.tensor(audio).unsqueeze(0), pos_prompt=[''], neg_prompt=['A vehicle engine revving then powering down.'])
125
- import soundfile as sf
126
- sf.write('./pred.wav', pred.squeeze().numpy(), 32000)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CLAPSep/model/CLAPSep_decoder.py DELETED
@@ -1,605 +0,0 @@
1
- #!/usr/bin/env python
2
- # -*- coding: UTF-8 -*-
3
- '''
4
- @Project :Waveformer-main
5
- @File :CLAPsep_decoder.py
6
- @IDE :PyCharm
7
- @Author :Aisaka/Hao Ma @SDU
8
- @Date :2023/10/31 下午8:34
9
- '''
10
-
11
- from laion_clap.clap_module.htsat import *
12
- from einops import rearrange
13
- import numpy as np
14
-
15
- class Transpose(nn.Module):
16
-
17
- def __init__(self, dim0, dim1):
18
- super(Transpose, self).__init__()
19
- self.dim0 = dim0
20
- self.dim1 = dim1
21
-
22
- def forward(self, x):
23
- return x.transpose(self.dim0, self.dim1)
24
-
25
-
26
- class Swish(nn.Module):
27
-
28
- def __init__(self):
29
- super(Swish, self).__init__()
30
-
31
- def forward(self, x):
32
- return x * x.sigmoid()
33
-
34
-
35
- class Glu(nn.Module):
36
-
37
- def __init__(self, dim):
38
- super(Glu, self).__init__()
39
- self.dim = dim
40
-
41
- def forward(self, x):
42
- x_in, x_gate = x.chunk(2, dim=self.dim)
43
- return x_in * x_gate.sigmoid()
44
-
45
-
46
- class FiLM(nn.Module):
47
- def __init__(self, dim_in=1024, hidden_dim=768):
48
- super(FiLM, self).__init__()
49
- self.beta = nn.Linear(dim_in, hidden_dim)
50
- self.gamma = nn.Linear(dim_in, hidden_dim)
51
-
52
- def forward(self, hidden_state, embed):
53
- embed = embed.unsqueeze(1)
54
- return self.gamma(embed) * hidden_state + self.beta(embed)
55
-
56
-
57
- class SkipTrans(nn.Module):
58
- def __init__(self, in_features, out_features, embed_dim=512, film=True):
59
- super(SkipTrans, self).__init__()
60
- self.film = film
61
- if film:
62
- self.skip_conv = FiLM(embed_dim, out_features)
63
- self.feature_proj = nn.Linear(in_features, out_features)
64
- self.norm = nn.LayerNorm(out_features)
65
-
66
- def forward(self, skip, embed, x=None):
67
- out = self.feature_proj(skip)
68
- if self.film:
69
- out = self.skip_conv(out, embed)
70
- return self.norm(out) if x is None else self.norm(out + x)
71
-
72
- class Conv1d(nn.Conv1d):
73
-
74
- def __init__(
75
- self,
76
- in_channels,
77
- out_channels,
78
- kernel_size,
79
- stride = 1,
80
- padding = "same",
81
- dilation = 1,
82
- groups = 1,
83
- bias = True
84
- ):
85
- super(Conv1d, self).__init__(
86
- in_channels=in_channels,
87
- out_channels=out_channels,
88
- kernel_size=kernel_size,
89
- stride=stride,
90
- padding=0,
91
- dilation=dilation,
92
- groups=groups,
93
- bias=bias,
94
- padding_mode="zeros")
95
-
96
- # Assert
97
- assert padding in ["valid", "same", "causal"]
98
-
99
- # Padding
100
- if padding == "valid":
101
- self.pre_padding = None
102
- elif padding == "same":
103
- self.pre_padding = nn.ConstantPad1d(padding=((kernel_size - 1) // 2, (kernel_size - 1) // 2), value=0)
104
- elif padding == "causal":
105
- self.pre_padding = nn.ConstantPad1d(padding=(kernel_size - 1, 0), value=0)
106
-
107
- # Variational Noise
108
- self.noise = None
109
- self.vn_std = None
110
-
111
- def init_vn(self, vn_std):
112
-
113
- # Variational Noise
114
- self.vn_std = vn_std
115
-
116
- def sample_synaptic_noise(self, distributed):
117
-
118
- # Sample Noise
119
- self.noise = torch.normal(mean=0.0, std=1.0, size=self.weight.size(), device=self.weight.device, dtype=self.weight.dtype)
120
-
121
- # Broadcast Noise
122
- if distributed:
123
- torch.distributed.broadcast(self.noise, 0)
124
-
125
- def forward(self, input):
126
-
127
- # Weight
128
- weight = self.weight
129
-
130
- # Add Noise
131
- if self.noise is not None and self.training:
132
- weight = weight + self.vn_std * self.noise
133
-
134
- # Padding
135
- if self.pre_padding is not None:
136
- input = self.pre_padding(input)
137
-
138
- # Apply Weight
139
- return F.conv1d(input, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
140
-
141
-
142
- class ConvolutionModule(nn.Module):
143
- """Conformer Convolution Module
144
-
145
- Args:
146
- dim_model: input feature dimension
147
- dim_expand: output feature dimension
148
- kernel_size: 1D depthwise convolution kernel size
149
- Pdrop: residual dropout probability
150
- stride: 1D depthwise convolution stride
151
- padding: "valid", "same" or "causal"
152
-
153
- Input: (batch size, input length, dim_model)
154
- Output: (batch size, output length, dim_expand)
155
-
156
- """
157
-
158
- def __init__(self, dim_model, dim_expand, kernel_size, Pdrop, stride, padding):
159
- super(ConvolutionModule, self).__init__()
160
-
161
- # Layers
162
- self.layers = nn.Sequential(
163
- nn.LayerNorm(dim_model, eps=1e-6),
164
- Transpose(1, 2),
165
- Conv1d(dim_model, 2 * dim_expand, kernel_size=1),
166
- Glu(dim=1),
167
- Conv1d(dim_expand, dim_expand, kernel_size, stride=stride, padding=padding, groups=dim_expand),
168
- nn.BatchNorm1d(dim_expand),
169
- Swish(),
170
- Conv1d(dim_expand, dim_expand, kernel_size=1),
171
- Transpose(1, 2),
172
- nn.Dropout(p=Pdrop)
173
- )
174
- self.ln = nn.LayerNorm(dim_expand)
175
-
176
- def forward(self, x):
177
- return self.ln(self.layers(x)+x)
178
-
179
-
180
- class BasicLayerDec(nn.Module):
181
- """ A basic Swin Transformer layer for one stage.
182
- Args:
183
- dim (int): Number of input channels.
184
- input_resolution (tuple[int]): Input resolution.
185
- depth (int): Number of blocks.
186
- num_heads (int): Number of attention heads.
187
- window_size (int): Local window size.
188
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
189
- qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
190
- qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
191
- drop (float, optional): Dropout rate. Default: 0.0
192
- attn_drop (float, optional): Attention dropout rate. Default: 0.0
193
- drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
194
- norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
195
- downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
196
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
197
- """
198
-
199
- def __init__(self, dim, input_resolution, depth, num_heads, window_size,
200
- mlp_ratio=4., qkv_bias=True, qk_scale=None, drop=0., attn_drop=0.,
201
- drop_path=0., norm_layer=nn.LayerNorm, downsample=None, use_checkpoint=False,
202
- norm_before_mlp='ln'):
203
-
204
- super().__init__()
205
- self.dim = dim
206
- self.input_resolution = input_resolution
207
- self.depth = depth
208
- self.use_checkpoint = use_checkpoint
209
-
210
- # build blocks
211
- self.blocks = nn.ModuleList([
212
- SwinTransformerBlock(dim=dim, input_resolution=input_resolution,
213
- num_heads=num_heads, window_size=window_size,
214
- shift_size=0 if (i % 2 == 0) else window_size // 2,
215
- mlp_ratio=mlp_ratio,
216
- qkv_bias=qkv_bias, qk_scale=qk_scale,
217
- drop=drop, attn_drop=attn_drop,
218
- drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
219
- norm_layer=norm_layer, norm_before_mlp=norm_before_mlp)
220
- for i in range(depth)])
221
-
222
- # patch merging layer
223
- if downsample is not None:
224
- self.downsample = downsample((input_resolution[0]//2, input_resolution[1]//2), dim=dim * 2, norm_layer=norm_layer)
225
- else:
226
- self.downsample = None
227
-
228
- def forward(self, x):
229
- attns = []
230
- if self.downsample is not None:
231
- x = self.downsample(x)
232
- for blk in self.blocks:
233
- if self.use_checkpoint:
234
- x = checkpoint.checkpoint(blk, x)
235
- else:
236
- x, attn = blk(x)
237
- if not self.training:
238
- attns.append(attn.unsqueeze(0))
239
- if not self.training:
240
- attn = torch.cat(attns, dim = 0)
241
- attn = torch.mean(attn, dim = 0)
242
- return x, attn
243
-
244
- def extra_repr(self):
245
- return f"dim={self.dim}, input_resolution={self.input_resolution}, depth={self.depth}"
246
-
247
-
248
- class PatchExpand(nn.Module):
249
- def __init__(self, input_resolution, dim, dim_scale=2, norm_layer=nn.LayerNorm):
250
- super().__init__()
251
- self.input_resolution = input_resolution
252
- self.dim = dim
253
- self.expand = nn.Linear(dim, 2 * dim, bias=False) if dim_scale == 2 else nn.Identity()
254
- self.norm = norm_layer(dim // dim_scale)
255
-
256
- def forward(self, x):
257
- """
258
- x: B, H*W, C
259
- """
260
- H, W = self.input_resolution
261
- x = self.expand(x)
262
- B, L, C = x.shape
263
- assert L == H * W, "input feature has wrong size"
264
-
265
- x = x.view(B, H, W, C)
266
- # This is the original implementation in SwinUnet
267
- # x = rearrange(x, 'b h w (p1 p2 c)-> b (h p1) (w p2) c', p1=2, p2=2, c=C // 4)
268
-
269
- # here is our implementation
270
- # can reverse patch-emerging in Swin-Transformer encoder, seems helpful
271
- x0, x2, x1, x3 = x.chunk(4, dim=-1)
272
- x = torch.stack((x0, x1, x2, x3), dim=-1)
273
- x = torch.chunk(x, C // 4, dim=-2)
274
- x = torch.concat(x, dim=-1).squeeze(-2)
275
- x = rearrange(x, 'b h w c -> b c h w')
276
- x = torch.nn.functional.pixel_shuffle(x, 2)
277
- x = rearrange(x, 'b c h w -> b h w c')
278
- x = x.view(B, -1, C // 4)
279
- x = self.norm(x)
280
-
281
- return x
282
-
283
-
284
- class InversePatchEmbed(nn.Module):
285
- """
286
- Patch Embedding to 2D Image.
287
- """
288
-
289
- def __init__(self, img_size=224, patch_size=16, in_chans=3, embed_dim=768, norm_layer=None, flatten=True,
290
- patch_stride=16):
291
- super().__init__()
292
- img_size = to_2tuple(img_size)
293
- patch_size = to_2tuple(patch_size)
294
- patch_stride = to_2tuple(patch_stride)
295
- self.img_size = img_size
296
- self.patch_size = patch_size
297
- self.patch_stride = patch_stride
298
- self.grid_size = (img_size[0] // patch_stride[0], img_size[1] // patch_stride[1])
299
- self.num_patches = self.grid_size[0] * self.grid_size[1]
300
- self.flatten = flatten
301
- self.in_chans = in_chans
302
- self.embed_dim = embed_dim
303
-
304
- padding = ((patch_size[0] - patch_stride[0]) // 2, (patch_size[1] - patch_stride[1]) // 2)
305
-
306
- self.proj = nn.ConvTranspose2d(embed_dim, in_chans, kernel_size=patch_size, stride=patch_stride, padding=padding)
307
- self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
308
-
309
- def forward(self, x):
310
- # B, C, H, W = x.shape
311
- # assert H == self.img_size[0] and W == self.img_size[1], \
312
- # f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
313
- x = self.norm(x)
314
- if self.flatten:
315
- # x = x.flatten(2).transpose(1, 2) # BCHW -> BNC
316
- x = x.transpose(1, 2).unflatten(2, self.grid_size).contiguous() # BNC -> BCHW
317
- x = self.proj(x)
318
-
319
- return x
320
-
321
-
322
- class HTSAT_Decoder(nn.Module):
323
- r"""HTSAT_decoder based on the Swin Transformer
324
- Args:
325
- spec_size (int | tuple(int)): Input Spectrogram size. Default 256
326
- patch_size (int | tuple(int)): Patch size. Default: 4
327
- path_stride (iot | tuple(int)): Patch Stride for Frequency and Time Axis. Default: 4
328
- in_chans (int): Number of input image channels. Default: 1 (mono)
329
- num_classes (int): Number of classes for classification head. Default: 527
330
- embed_dim (int): Patch embedding dimension. Default: 96
331
- depths (tuple(int)): Depth of each HTSAT-Swin Transformer layer.
332
- num_heads (tuple(int)): Number of attention heads in different layers.
333
- window_size (int): Window size. Default: 8
334
- mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
335
- qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
336
- qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
337
- drop_rate (float): Dropout rate. Default: 0
338
- attn_drop_rate (float): Attention dropout rate. Default: 0
339
- drop_path_rate (float): Stochastic depth rate. Default: 0.1
340
- norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
341
- ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
342
- patch_norm (bool): If True, add normalization after patch embedding. Default: True
343
- use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
344
- """
345
-
346
- def __init__(self, lan_embed_dim=512, spec_size=256, patch_size=4, patch_stride=(4, 4),
347
- in_chans=1, num_classes=527,
348
- embed_dim=48, depths=[1, 1, 1, 1], num_heads=[4, 8, 16, 32],
349
- window_size=8, mlp_ratio=4., qkv_bias=True, qk_scale=None,
350
- drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1,
351
- norm_layer=nn.LayerNorm,
352
- ape=False, patch_norm=True,
353
- use_checkpoint=False, norm_before_mlp='ln', encoder_embed_dim=96, phase=False,
354
- spec_factor=8, d_attn=640, n_masker_layer=4, conv=False):
355
- super(HTSAT_Decoder, self).__init__()
356
- self.mel_bins = 64
357
- self.spec_size = spec_size
358
- self.phase = phase
359
- self.patch_stride = patch_stride
360
- self.patch_size = patch_size
361
- self.window_size = window_size
362
- self.embed_dim = embed_dim
363
- self.depths = depths
364
- self.ape = ape
365
- self.in_chans = in_chans
366
- self.num_classes = num_classes
367
- self.num_heads = num_heads
368
- self.num_layers = len(self.depths)
369
- self.num_features = int(self.embed_dim * 2 ** (self.num_layers - 1))
370
-
371
- self.drop_rate = drop_rate
372
- self.attn_drop_rate = attn_drop_rate
373
- self.drop_path_rate = drop_path_rate
374
-
375
- self.qkv_bias = qkv_bias
376
- self.qk_scale = None
377
-
378
- self.patch_norm = patch_norm
379
- self.norm_layer = norm_layer if self.patch_norm else None
380
- self.norm_before_mlp = norm_before_mlp
381
- self.mlp_ratio = mlp_ratio
382
-
383
- self.use_checkpoint = use_checkpoint
384
-
385
- # process mel-spec ; used only once
386
- self.freq_ratio = self.spec_size // self.mel_bins
387
-
388
-
389
- # split spctrogram into non-overlapping patches
390
- self.inverse_patch_embed = InversePatchEmbed(
391
- img_size=self.spec_size, patch_size=self.patch_size, in_chans=self.in_chans,
392
- embed_dim=self.embed_dim, norm_layer=self.norm_layer, patch_stride=patch_stride)
393
-
394
- patches_resolution = self.inverse_patch_embed.grid_size
395
- self.patches_resolution = patches_resolution
396
-
397
-
398
- # stochastic depth
399
- dpr = [x.item() for x in
400
- torch.linspace(0, self.drop_path_rate, sum(self.depths))] # stochastic depth decay rule
401
-
402
- # build layers
403
- self.layers = nn.ModuleList()
404
- self.skip = nn.ModuleList()
405
- for i_layer in range(self.num_layers):
406
- layer = BasicLayerDec(dim=int(self.embed_dim * 2 ** i_layer),
407
- input_resolution=(patches_resolution[0] // (2 ** i_layer),
408
- patches_resolution[1] // (2 ** i_layer)),
409
- depth=self.depths[i_layer],
410
- num_heads=self.num_heads[i_layer],
411
- window_size=self.window_size,
412
- mlp_ratio=self.mlp_ratio,
413
- qkv_bias=self.qkv_bias, qk_scale=self.qk_scale,
414
- drop=self.drop_rate, attn_drop=self.attn_drop_rate,
415
- drop_path=dpr[sum(self.depths[:i_layer]):sum(self.depths[:i_layer + 1])],
416
- norm_layer=self.norm_layer,
417
- downsample=PatchExpand if (i_layer < self.num_layers - 1) else None,
418
- use_checkpoint=use_checkpoint,
419
- norm_before_mlp=self.norm_before_mlp)
420
- self.layers.append(layer)
421
- self.skip.append(
422
- SkipTrans(embed_dim=lan_embed_dim, in_features=int(encoder_embed_dim * 2 ** i_layer), out_features=int(self.embed_dim * 2 ** i_layer)),
423
- )
424
- self.layers = self.layers[::-1]
425
- self.skip = self.skip[::-1]
426
- # self.skip.append(
427
- # SkipTrans(embed_dim=lan_embed_dim, in_features=self.mel_bins, out_features=self.mel_bins),
428
- # )
429
-
430
- d_spec = self.mel_bins * spec_factor + 1
431
-
432
- self.spec_norm = nn.BatchNorm2d(d_spec, momentum=0.01)
433
- self.conv = conv
434
- if not conv:
435
- encoder_layer = nn.TransformerEncoderLayer(d_model=d_attn, nhead=8,
436
- dim_feedforward=int(d_attn * self.mlp_ratio),
437
- batch_first=True, dropout=0)
438
- transformer_encoder = nn.TransformerEncoder(encoder_layer, num_layers=n_masker_layer)
439
-
440
- self.mask_net = nn.Sequential(
441
- nn.Linear(self.mel_bins + d_spec, d_attn),
442
- nn.LayerNorm(d_attn),
443
- transformer_encoder,
444
- nn.Linear(d_attn, d_spec)
445
- )
446
- else:
447
- self.mask_net = nn.Sequential(
448
- nn.Linear(self.mel_bins + d_spec, d_spec),
449
- nn.LayerNorm(d_spec),
450
- *[ConvolutionModule(dim_model=d_spec, dim_expand=d_spec, kernel_size=9, padding='same',
451
- Pdrop=0, stride=1) for i in range(n_masker_layer)]
452
- )
453
- if self.phase:
454
- self.phase_net = nn.Sequential(
455
- nn.Linear(self.mel_bins + d_spec, d_spec * 2),
456
- nn.LayerNorm(d_spec * 2),
457
- *[ConvolutionModule(dim_model=d_spec * 2, dim_expand=d_spec * 2, kernel_size=9, padding='same',
458
- Pdrop=0, stride=1) for i in range(n_masker_layer)]
459
- )
460
-
461
- self.film = SkipTrans(embed_dim=lan_embed_dim, in_features=encoder_embed_dim * 8, out_features=self.num_features)
462
-
463
- self.apply(self._init_weights)
464
-
465
- def _init_weights(self, m):
466
- if isinstance(m, nn.Linear):
467
- trunc_normal_(m.weight, std=.02)
468
- if isinstance(m, nn.Linear) and m.bias is not None:
469
- nn.init.constant_(m.bias, 0)
470
- elif isinstance(m, nn.LayerNorm):
471
- nn.init.constant_(m.bias, 0)
472
- nn.init.constant_(m.weight, 1.0)
473
-
474
- # @torch.jit.ignore
475
- # def no_weight_decay(self):
476
- # return {'absolute_pos_embed'}
477
- #
478
- # @torch.jit.ignore
479
- # def no_weight_decay_keywords(self):
480
- # return {'relative_position_bias_table'}
481
-
482
- def forward(self, hidden_state, skip_features, embed):
483
- skip_features = skip_features[::-1]
484
- # hidden_state = torch.randn(hidden_state.shape).type_as(hidden_state)
485
-
486
- spec = skip_features[-1]
487
-
488
- h = self.film(hidden_state, embed)
489
-
490
- for i, (layer, f, skip) in enumerate(zip(self.layers, skip_features, self.skip)):
491
- h = layer(h)[0]
492
- h = skip(skip=f, embed=embed, x=h)
493
-
494
- h = self.reshape_img2wav(self.inverse_patch_embed(h)).squeeze(1)
495
-
496
- h = h[:, :spec.size(2), :]
497
-
498
- spec = spec.transpose(1, 3)
499
-
500
- spec = self.spec_norm(spec).transpose(1, 3).squeeze(1)
501
-
502
- h = torch.concat([spec, h], dim=-1)
503
-
504
- mask = self.mask_net(h).unsqueeze(1)
505
-
506
- if self.phase:
507
- mask_r, mask_i = torch.chunk(self.phase_net(h).unsqueeze(1), chunks=2, dim=-1)
508
- return torch.sigmoid(mask), torch.tanh(mask_r), torch.tanh(mask_i)
509
- else:
510
- return torch.sigmoid(mask)
511
-
512
- def reshape_img2wav(self, x):
513
- # (B, 1, 256, 256)
514
- x = x.reshape(x.shape[0], x.shape[1], self.freq_ratio, x.shape[2]//self.freq_ratio, x.shape[3]) # (B, 1, 4, 64, 256)
515
- x = x.permute(0, 1, 3, 2, 4).contiguous()
516
- x = x.reshape(x.shape[0], x.shape[1], x.shape[2], x.shape[3] * x.shape[4])
517
- x = x.permute(0, 1, 3, 2).contiguous()
518
- return x
519
-
520
-
521
- # if __name__ == "__main__":
522
- # import torch
523
- # from msclap import CLAP
524
- # import os
525
- # import torchaudio
526
- # import torchaudio.transforms as T
527
- # import numpy as np
528
- # import random
529
- # from torchlibrosa import Spectrogram, LogmelFilterBank
530
- # clap_model = CLAP(model_fp="/home/user/202212661/clapsep/Waveformer-main/checkpoint_path/CLAP_weights_2023.pth",
531
- # version='2023', use_cuda=True)
532
- # text_data = [
533
- # "Acoustic_guitar", "Applause", "Bark", "Bass_drum", "Burping_or_eructation",
534
- # "Bus", "Cello", "Chime", "Clarinet", "Computer_keyboard",
535
- # "Cough", "Cowbell", "Double_bass", "Drawer_open_or_close", "Electric_piano",
536
- # "Fart", "Finger_snapping", "Fireworks", "Flute", "Glockenspiel",
537
- # "Gong", "Gunshot_or_gunfire", "Harmonica", "Hi-hat", "Keys_jangling",
538
- # "Knock", "Laughter", "Meow", "Microwave_oven", "Oboe",
539
- # "Saxophone", "Scissors", "Shatter", "Snare_drum", "Squeak",
540
- # "Tambourine", "Tearing", "Telephone", "Trumpet", "Violin_or_fiddle",
541
- # "Writing"]
542
- # # Extract text embeddings
543
- # text_embeddings = clap_model.get_text_embeddings(text_data)
544
- # path = "/home/user/202212661/clapsep/Waveformer-main/data/FSDSoundScapes/FSDKaggle2018/train/Tearing/2232ce13.wav"
545
- # # Extract audio embeddings
546
- # audio_embeddings_ = clap_model.get_audio_embeddings([path])
547
- #
548
- # window = 'hann'
549
- # center = True
550
- # pad_mode = 'reflect'
551
- # ref = 1.0
552
- # amin = 1e-10
553
- # top_db = None
554
- #
555
- # spectrogram_extractor = Spectrogram(n_fft=512, hop_length=160,
556
- # win_length=512, window=window, center=center, pad_mode=pad_mode,
557
- # freeze_parameters=True).cuda()
558
- # # Logmel feature extractor
559
- # logmel_extractor = LogmelFilterBank(sr=16000, n_fft=512,
560
- # n_mels=64, fmin=0, fmax=8000, ref=ref, amin=amin,
561
- # top_db=top_db,
562
- # freeze_parameters=True).cuda()
563
- #
564
- # clap_model.clap.audio_encoder.base.htsat.spectrogram_extractor = spectrogram_extractor
565
- # clap_model.clap.audio_encoder.base.htsat.logmel_extractor = logmel_extractor
566
- #
567
- # features = []
568
- #
569
- #
570
- # def get_features_list(module, input, output):
571
- # features.append(output)
572
- #
573
- #
574
- # def get_features_list_basic_layer(module, input, output):
575
- # features.append(output[0])
576
- #
577
- #
578
- # clap_model.clap.audio_encoder.base.htsat.patch_embed.register_forward_hook(get_features_list)
579
- # for module in clap_model.clap.audio_encoder.base.htsat.layers:
580
- # module.register_forward_hook(get_features_list_basic_layer)
581
- #
582
- # audio_time_series, sample_rate = torchaudio.load(path)
583
- # resample_rate = 16000
584
- # if resample_rate != sample_rate:
585
- # resampler = T.Resample(sample_rate, resample_rate)
586
- # audio_time_series = resampler(audio_time_series)
587
- #
588
- # sample_rate = resample_rate
589
- # audio_duration = 10
590
- # audio_time_series = audio_time_series.reshape(-1)
591
- # if audio_duration * sample_rate >= audio_time_series.shape[0]:
592
- # repeat_factor = int(np.ceil((audio_duration * sample_rate) /
593
- # audio_time_series.shape[0]))
594
- # # Repeat audio_time_series by repeat_factor to match audio_duration
595
- # audio_time_series = audio_time_series.repeat(repeat_factor)
596
- # # remove excess part of audio_time_series
597
- # audio_time_series = audio_time_series[0:audio_duration * sample_rate]
598
- # else:
599
- # # audio_time_series is longer than predefined audio duration,
600
- # # so audio_time_series is trimmed
601
- # start_index = random.randrange(
602
- # audio_time_series.shape[0] - audio_duration * sample_rate)
603
- # audio_time_series = audio_time_series[start_index:start_index +
604
- # audio_duration * sample_rate]
605
- #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CLAPSep/model/best_model.ckpt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:6fcc8dbcd7174af86266cf16b4105eced0802352762f81dbfefdce29af3dba04
3
- size 177818986
 
 
 
 
CLAPSep/model/music_audioset_epoch_15_esc_90.14.pt DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:fae3e9c087f2909c28a09dc31c8dfcdacbc42ba44c70e972b58c1bd1caf6dedd
3
- size 2352471003
 
 
 
 
CLAPSep/requirements.txt DELETED
@@ -1,8 +0,0 @@
1
- torch
2
- librosa
3
- torchaudio
4
- torchlibrosa
5
- numpy
6
- einops
7
- loralib
8
- laion-clap