gorkemgoknar commited on
Commit
79987d1
β€’
1 Parent(s): 95d7862

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +23 -4
README.md CHANGED
@@ -27,24 +27,43 @@ target_lang="tr" # change to your target lang
27
 
28
  from datasets import load_dataset
29
  #ted-multi is a multiple language translated dataset
30
- #fits for our case , not to big and curated
31
 
32
  dataset = load_dataset("ted_multi")
33
-
34
- #there is no Turkish lanugage in europarl, so will need to choose one
35
  dataset.cleanup_cache_files()
36
 
37
-
38
  #chars_to_ignore_regex = '[,?.!\-\;\:\"β€œ%β€˜β€οΏ½β€”β€™β€¦β€“]' # change to the ignored characters of your fine-tuned model
39
 
40
  #will use cahya/wav2vec2-base-turkish-artificial-cv
41
  #checking inside model repository to find which chars removed (no run.sh)
42
  chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\β€œ\β€˜\”\'\`…\’»«]'
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  cols_to_remove = ['translations', 'talk_name']
45
  dataset = dataset.map(extract_target_lang_entries, remove_columns=cols_to_remove)
46
 
47
 
 
48
  dataset_cleaned = dataset.filter(lambda x: x['text'] is not None)
49
  dataset_cleaned
50
 
 
27
 
28
  from datasets import load_dataset
29
  #ted-multi is a multiple language translated dataset
30
+ #fits for our case , not to big and curated but need a simple processing
31
 
32
  dataset = load_dataset("ted_multi")
 
 
33
  dataset.cleanup_cache_files()
34
 
35
+ #original from patrick's
36
  #chars_to_ignore_regex = '[,?.!\-\;\:\"β€œ%β€˜β€οΏ½β€”β€™β€¦β€“]' # change to the ignored characters of your fine-tuned model
37
 
38
  #will use cahya/wav2vec2-base-turkish-artificial-cv
39
  #checking inside model repository to find which chars removed (no run.sh)
40
  chars_to_ignore_regex = '[\,\?\.\!\-\;\:\"\β€œ\β€˜\”\'\`…\’»«]'
41
 
42
+
43
+ import re
44
+
45
+ def extract_target_lang_entries(batch):
46
+ #specific mapping for ted_multi dataset
47
+ #need to find index of language in each translation as it can shift
48
+ try:
49
+ target_index_for_lang= batch["translations"]["language"].index(target_lang)
50
+ except ValueError:
51
+ #target not in list empty it for later processing
52
+ batch["text"] = None
53
+ return batch
54
+
55
+ #index_translation_pairs = zip(batch, target_index_for_batch)
56
+ text= batch["translations"]["translation"][target_index_for_lang]
57
+ batch["text"] = re.sub(chars_to_ignore_regex, "", text.lower())
58
+ return batch
59
+
60
+
61
+ #this dataset has additional columns need to say it
62
  cols_to_remove = ['translations', 'talk_name']
63
  dataset = dataset.map(extract_target_lang_entries, remove_columns=cols_to_remove)
64
 
65
 
66
+ #on preocessing we tagged None for empty ones
67
  dataset_cleaned = dataset.filter(lambda x: x['text'] is not None)
68
  dataset_cleaned
69