Update README.md
Browse files
README.md
CHANGED
@@ -1,8 +1,107 @@
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
-
|
5 |
-
In order to count
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6 |
```python
|
7 |
from datasets import load_dataset
|
8 |
import json
|
|
|
1 |
---
|
2 |
license: mit
|
3 |
---
|
4 |
+
# Counting bilingual and monolingual instances
|
5 |
+
In order to count bilingual and monolingual instances, we use the following code. We count bilingual instances where there are two languages, one of them is English and the other is either German, French, Spanish, Italian, Portuguese or Dutch. All other instances fall into the "Other" category.
|
6 |
+
```python
|
7 |
+
from datasets import load_dataset
|
8 |
+
import json
|
9 |
+
from tqdm import tqdm
|
10 |
+
|
11 |
+
|
12 |
+
#Specify the dataset name
|
13 |
+
dataset_name = "RaiBP/openwebtext2-first-30-chunks-lang-detect-raw-output"
|
14 |
+
|
15 |
+
# Load the dataset
|
16 |
+
bilingual_dataset = load_dataset(dataset_name, data_dir='bilingual')
|
17 |
+
|
18 |
+
dataset = bilingual_dataset["train"]
|
19 |
+
n_examples = len(dataset)
|
20 |
+
keys_dict = {}
|
21 |
+
for document in tqdm(dataset, total=n_examples):
|
22 |
+
|
23 |
+
instance_labels = document["instance_labels"]
|
24 |
+
instance_languages = document["instance_languages"]
|
25 |
+
|
26 |
+
for languages in instance_languages:
|
27 |
+
unique_languages = list(set(languages))
|
28 |
+
lang_key = "-".join(sorted(unique_languages))
|
29 |
+
if lang_key not in keys_dict.keys():
|
30 |
+
keys_dict[lang_key] = 1
|
31 |
+
else:
|
32 |
+
keys_dict[lang_key] += 1
|
33 |
+
|
34 |
+
english_keys_list = [] # keys where "en" is present
|
35 |
+
non_english_keys_list = [] # keys where "en" is not present
|
36 |
+
for key in keys_dict.keys():
|
37 |
+
key_list = key.split('-')
|
38 |
+
if "en" in key_list:
|
39 |
+
english_keys_list.append(key_list)
|
40 |
+
else:
|
41 |
+
non_english_keys_list.append(key_list)
|
42 |
+
|
43 |
+
# more than two languages, none of them English
|
44 |
+
nen_multi_count = 0
|
45 |
+
# one language, one of the following: de, fr, es, pt, it, nl
|
46 |
+
lang_mono_count = {'de': 0, 'fr': 0, 'es': 0, 'pt': 0, 'it': 0, 'nl': 0}
|
47 |
+
# one language, not one of the following: de, fr, es, pt, it, nl
|
48 |
+
other_mono_count = 0
|
49 |
+
# two languages, none of them English
|
50 |
+
nen_bi_count = 0
|
51 |
+
for key in non_english_keys_list:
|
52 |
+
if len(key) > 2:
|
53 |
+
nen_multi_count += keys_dict['-'.join(key)]
|
54 |
+
elif len(key) == 2:
|
55 |
+
nen_bi_count += keys_dict['-'.join(key)]
|
56 |
+
elif len(key) == 1:
|
57 |
+
nen_lang = key[0]
|
58 |
+
if nen_lang in lang_mono_count.keys():
|
59 |
+
lang_mono_count[nen_lang] += keys_dict[nen_lang]
|
60 |
+
else:
|
61 |
+
other_mono_count += keys_dict[nen_lang]
|
62 |
+
|
63 |
+
# more than two languages, at least one of them English
|
64 |
+
english_multi_count = 0
|
65 |
+
# one language, English
|
66 |
+
english_mono_count = 0
|
67 |
+
for key in english_keys_list:
|
68 |
+
if len(key) == 1 and key[0] == 'en':
|
69 |
+
english_mono_count += keys_dict[key[0]]
|
70 |
+
if len(key) > 2:
|
71 |
+
english_multi_count += keys_dict['-'.join(key)]
|
72 |
+
|
73 |
+
# two languages, one of them English, the other one not one of the following: de, fr, es, pt, it, nl
|
74 |
+
other_bi_count = 0
|
75 |
+
# two languages, one of them English, the other one of the following: de, fr, es, pt, it, nl
|
76 |
+
lang_bi_count = {'de': 0, 'fr': 0, 'es': 0, 'pt': 0, 'it': 0, 'nl': 0}
|
77 |
+
for key in english_keys_list:
|
78 |
+
if len(key) == 2:
|
79 |
+
nen_lang = key[0] if key[1] == 'en' else key[1]
|
80 |
+
if nen_lang in lang_bi_count.keys():
|
81 |
+
lang_bi_count[nen_lang] += keys_dict['-'.join(key)]
|
82 |
+
else:
|
83 |
+
other_bi_count += keys_dict['-'.join(key)]
|
84 |
+
|
85 |
+
# Save the counts for monolingual
|
86 |
+
counts_dict_monolingual = {"en": english_mono_count}
|
87 |
+
for lang in lang_mono_count.keys():
|
88 |
+
counts_dict_monolingual[lang] = lang_mono_count[lang]
|
89 |
+
counts_dict_monolingual["other"] = other_mono_count
|
90 |
+
|
91 |
+
with open('monolingual_counts.json', 'w') as json_file:
|
92 |
+
json.dump(counts_dict_monolingual, json_file)
|
93 |
+
|
94 |
+
# Save the counts for bilingual
|
95 |
+
counts_dict_bilingual = {}
|
96 |
+
for lang in lang_bi_count.keys():
|
97 |
+
counts_dict_bilingual[lang] = lang_bi_count[lang]
|
98 |
+
counts_dict_bilingual["other"] = other_bi_count + nen_bi_count + english_multi_count + nen_multi_count
|
99 |
+
|
100 |
+
with open('bilingual_counts.json', 'w') as json_file:
|
101 |
+
json.dump(counts_dict_bilingual, json_file)
|
102 |
+
```
|
103 |
+
# Counting translation instances
|
104 |
+
In order to count translation instances containing English paired with German, French, Spanish, Portuguese, Italian or Dutch, we use:
|
105 |
```python
|
106 |
from datasets import load_dataset
|
107 |
import json
|