sino commited on
Commit
99f673d
·
1 Parent(s): 8fb04ef

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -6
README.md CHANGED
@@ -40,8 +40,8 @@ device = model.device
40
  # sample rate: 16k
41
  music_path = '/path/to/music.wav'
42
 
43
- # extract logmel spectrogram
44
- # 1. parameters
45
  class FFT_parameters:
46
  sample_rate = 16000
47
  window_size = 400
@@ -51,7 +51,7 @@ class FFT_parameters:
51
  f_min = 50
52
  f_max = 8000
53
  prms = FFT_parameters()
54
- # 2. extract
55
  import nnAudio.Spectrogram
56
  import librosa
57
  to_spec = nnAudio.Spectrogram.MelSpectrogram(
@@ -69,7 +69,7 @@ to_spec = nnAudio.Spectrogram.MelSpectrogram(
69
  wav, ori_sr = librosa.load(music_path, mono=True, sr=prms.sample_rate)
70
  lms = to_spec(torch.tensor(wav))
71
  lms = (lms + torch.finfo().eps).log().to(device)
72
- # 3. processing
73
  import os
74
  from torch.nn.utils.rnn import pad_sequence
75
  import random
@@ -83,7 +83,7 @@ lms = lms.numpy()
83
  for trans in transforms:
84
  lms = trans(lms)
85
 
86
- # template of input
87
  input = dict()
88
  input['filenames'] = [music_path.split('/')[-1]]
89
  input['ans_crds'] = [0]
@@ -91,7 +91,7 @@ input['audio_crds'] = [0]
91
  input['attention_mask'] = torch.tensor([[1, 1, 1, 1, 1]]).to(device)
92
  input['input_ids'] = torch.tensor([[1, 694, 5777, 683, 13]]).to(device)
93
  input['spectrogram'] = torch.from_numpy(lms).unsqueez(dim=0).to(device)
94
- # generation
95
  model.eval()
96
  gen_ids = model.forward_test(input)
97
  gen_text = model.neck.tokenizer.batch_decode(gen_ids.clip(0))
 
40
  # sample rate: 16k
41
  music_path = '/path/to/music.wav'
42
 
43
+ # 1. extract logmel spectrogram
44
+ # 1.1 parameters
45
  class FFT_parameters:
46
  sample_rate = 16000
47
  window_size = 400
 
51
  f_min = 50
52
  f_max = 8000
53
  prms = FFT_parameters()
54
+ # 1.2. extract
55
  import nnAudio.Spectrogram
56
  import librosa
57
  to_spec = nnAudio.Spectrogram.MelSpectrogram(
 
69
  wav, ori_sr = librosa.load(music_path, mono=True, sr=prms.sample_rate)
70
  lms = to_spec(torch.tensor(wav))
71
  lms = (lms + torch.finfo().eps).log().to(device)
72
+ # 1.3. processing
73
  import os
74
  from torch.nn.utils.rnn import pad_sequence
75
  import random
 
83
  for trans in transforms:
84
  lms = trans(lms)
85
 
86
+ # 2. template of input
87
  input = dict()
88
  input['filenames'] = [music_path.split('/')[-1]]
89
  input['ans_crds'] = [0]
 
91
  input['attention_mask'] = torch.tensor([[1, 1, 1, 1, 1]]).to(device)
92
  input['input_ids'] = torch.tensor([[1, 694, 5777, 683, 13]]).to(device)
93
  input['spectrogram'] = torch.from_numpy(lms).unsqueez(dim=0).to(device)
94
+ # 3. generation
95
  model.eval()
96
  gen_ids = model.forward_test(input)
97
  gen_text = model.neck.tokenizer.batch_decode(gen_ids.clip(0))