cahya commited on
Commit
6087af1
·
1 Parent(s): 878a84a

updated the readme

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -124,8 +124,8 @@ def load_dataset_sundanese():
124
 
125
  dfs = []
126
 
127
- dfs.append(pd.read_csv(filenames[0], sep='\\\\\\\\\\\\\\\\t\\\\\\\\\\\\\\\\t', names=["path", "sentence"]))
128
- dfs.append(pd.read_csv(filenames[1], sep='\\\\\\\\\\\\\\\\t\\\\\\\\\\\\\\\\t', names=["path", "sentence"]))
129
 
130
  for i, dir in enumerate(data_dirs):
131
  dfs[i]["path"] = dfs[i].apply(lambda row: str(data_dirs[i]) + "/" + row + ".wav", axis=1)
@@ -145,7 +145,7 @@ processor = Wav2Vec2Processor.from_pretrained("cahya/wav2vec2-large-xlsr-sundane
145
  model = Wav2Vec2ForCTC.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
146
  model.to("cuda")
147
 
148
- chars_to_ignore_regex = '[\\\\,\\\\?\\\\.\\\\!\\\\-\\\\;\\\\:\\\\"\\\\“\\\\%\\\\‘\\\\'\\\\”_\\\\�]'
149
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
150
 
151
  # Preprocessing the datasets.
 
124
 
125
  dfs = []
126
 
127
+ dfs.append(pd.read_csv(filenames[0], sep='\t\t', names=["path", "sentence"]))
128
+ dfs.append(pd.read_csv(filenames[1], sep='\t\t', names=["path", "sentence"]))
129
 
130
  for i, dir in enumerate(data_dirs):
131
  dfs[i]["path"] = dfs[i].apply(lambda row: str(data_dirs[i]) + "/" + row + ".wav", axis=1)
 
145
  model = Wav2Vec2ForCTC.from_pretrained("cahya/wav2vec2-large-xlsr-sundanese")
146
  model.to("cuda")
147
 
148
+ chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\“\%\‘\'\”_\�]'
149
  resampler = torchaudio.transforms.Resample(48_000, 16_000)
150
 
151
  # Preprocessing the datasets.