Text-to-Audio
Transformers
music
text-to-music
Inference Endpoints
deepanway commited on
Commit
386c9b1
1 Parent(s): 3bc249a

upload files

Browse files
beats/microsoft-deberta-v3-large.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1498d38c9cff8fb278b0d2e7a40a7b01c90e78caf9edba137258a35987849e9
3
+ size 1744651736
chords/flan-t5-large.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6d73492576022d13ac89e2c20c2e08c87a87f0edc164d5c883c3c7e32024a8e8
3
+ size 3132793669
config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"text_encoder_name": "google/flan-t5-large", "scheduler_name": "stabilityai/stable-diffusion-2-1", "unet_model_name": null, "unet_model_config_path": "configs/music_diffusion_model_config.json", "snr_gamma": 5.0}
configs/main_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"text_encoder_name": "google/flan-t5-large", "scheduler_name": "stabilityai/stable-diffusion-2-1", "unet_model_name": null, "unet_model_config_path": "configs/music_diffusion_model_config.json", "snr_gamma": 5.0}
configs/music_diffusion_model_config.json ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_class_name": "UNet2DConditionModel",
3
+ "_diffusers_version": "0.10.0.dev0",
4
+ "act_fn": "silu",
5
+ "attention_head_dim": [
6
+ 5,
7
+ 10,
8
+ 20,
9
+ 20
10
+ ],
11
+ "block_out_channels": [
12
+ 320,
13
+ 640,
14
+ 1280,
15
+ 1280
16
+ ],
17
+ "center_input_sample": false,
18
+ "cross_attention_dim": 1024,
19
+ "down_block_types": [
20
+ "CrossAttnDownBlock2DMusic",
21
+ "CrossAttnDownBlock2DMusic",
22
+ "CrossAttnDownBlock2DMusic",
23
+ "DownBlock2D"
24
+ ],
25
+ "downsample_padding": 1,
26
+ "dual_cross_attention": false,
27
+ "flip_sin_to_cos": true,
28
+ "freq_shift": 0,
29
+ "in_channels": 8,
30
+ "layers_per_block": 2,
31
+ "mid_block_type": "UNetMidBlock2DCrossAttnMusic",
32
+ "mid_block_scale_factor": 1,
33
+ "norm_eps": 1e-05,
34
+ "norm_num_groups": 32,
35
+ "num_class_embeds": null,
36
+ "only_cross_attention": false,
37
+ "out_channels": 8,
38
+ "sample_size": [32, 2],
39
+ "up_block_types": [
40
+ "UpBlock2D",
41
+ "CrossAttnUpBlock2DMusic",
42
+ "CrossAttnUpBlock2DMusic",
43
+ "CrossAttnUpBlock2DMusic"
44
+ ],
45
+ "use_linear_projection": true,
46
+ "upcast_attention": true
47
+ }
configs/stft_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"filter_length": 1024, "hop_length": 160, "win_length": 1024, "n_mel_channels": 64, "sampling_rate": 16000, "mel_fmin": 0, "mel_fmax": 8000}
configs/vae_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"image_key": "fbank", "subband": 1, "embed_dim": 8, "time_shuffle": 1, "ddconfig": {"double_z": true, "z_channels": 8, "resolution": 256, "downsample_time": false, "in_channels": 1, "out_ch": 1, "ch": 128, "ch_mult": [1, 2, 4], "num_res_blocks": 2, "attn_resolutions": [], "dropout": 0.0}, "scale_factor": 0.9227914214134216}
ldm/pytorch_model_ldm.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7cad0ff3dd6b346898b12b2e3627e8262684646ff82f59d0a69891e42fcaa66
3
+ size 7051656406
stft/pytorch_model_stft.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8674a4cc9755fafa48350bfa3412cf9b9a0d357d18289dbfd86f0fb34e1ca4db
3
+ size 8537803
vae/pytorch_model_vae.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d49e1881f38bd4f4fcaaf1c56686c02fb15f75e80dec5f773ae235b2cf1b61b
3
+ size 442713669