File size: 6,134 Bytes
d8e99e8 c8a6ae5 d8e99e8 f86cb48 bd26805 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 |
---
license: mit
language:
- en
- de
- fr
- es
- it
- pt
- nl
---
# Counting bilingual and monolingual instances
In order to count bilingual and monolingual instances, we use the following code. We count bilingual instances where there are two languages, one of them is English and the other is either German, French, Spanish, Italian, Portuguese or Dutch. All other instances fall into the "Other" category.
```python
from datasets import load_dataset
import json
from tqdm import tqdm
#Specify the dataset name
dataset_name = "RaiBP/openwebtext2-first-30-chunks-lang-detect-raw-output"
# Load the dataset
bilingual_dataset = load_dataset(dataset_name, data_dir='bilingual')
dataset = bilingual_dataset["train"]
n_examples = len(dataset)
keys_dict = {}
for document in tqdm(dataset, total=n_examples):
instance_labels = document["instance_labels"]
instance_languages = document["instance_languages"]
for languages in instance_languages:
unique_languages = list(set(languages))
lang_key = "-".join(sorted(unique_languages))
if lang_key not in keys_dict.keys():
keys_dict[lang_key] = 1
else:
keys_dict[lang_key] += 1
english_keys_list = [] # keys where "en" is present
non_english_keys_list = [] # keys where "en" is not present
for key in keys_dict.keys():
key_list = key.split('-')
if "en" in key_list:
english_keys_list.append(key_list)
else:
non_english_keys_list.append(key_list)
# more than two languages, none of them English
nen_multi_count = 0
# one language, one of the following: de, fr, es, pt, it, nl
lang_mono_count = {'de': 0, 'fr': 0, 'es': 0, 'pt': 0, 'it': 0, 'nl': 0}
# one language, not one of the following: de, fr, es, pt, it, nl
other_mono_count = 0
# two languages, none of them English
nen_bi_count = 0
for key in non_english_keys_list:
if len(key) > 2:
nen_multi_count += keys_dict['-'.join(key)]
elif len(key) == 2:
nen_bi_count += keys_dict['-'.join(key)]
elif len(key) == 1:
nen_lang = key[0]
if nen_lang in lang_mono_count.keys():
lang_mono_count[nen_lang] += keys_dict[nen_lang]
else:
other_mono_count += keys_dict[nen_lang]
# more than two languages, at least one of them English
english_multi_count = 0
# one language, English
english_mono_count = 0
for key in english_keys_list:
if len(key) == 1 and key[0] == 'en':
english_mono_count += keys_dict[key[0]]
if len(key) > 2:
english_multi_count += keys_dict['-'.join(key)]
# two languages, one of them English, the other one not one of the following: de, fr, es, pt, it, nl
other_bi_count = 0
# two languages, one of them English, the other one of the following: de, fr, es, pt, it, nl
lang_bi_count = {'de': 0, 'fr': 0, 'es': 0, 'pt': 0, 'it': 0, 'nl': 0}
for key in english_keys_list:
if len(key) == 2:
nen_lang = key[0] if key[1] == 'en' else key[1]
if nen_lang in lang_bi_count.keys():
lang_bi_count[nen_lang] += keys_dict['-'.join(key)]
else:
other_bi_count += keys_dict['-'.join(key)]
# Save the counts for monolingual
counts_dict_monolingual = {"en": english_mono_count}
for lang in lang_mono_count.keys():
counts_dict_monolingual[lang] = lang_mono_count[lang]
counts_dict_monolingual["other"] = other_mono_count
with open('monolingual_counts.json', 'w') as json_file:
json.dump(counts_dict_monolingual, json_file)
# Save the counts for bilingual
counts_dict_bilingual = {}
for lang in lang_bi_count.keys():
counts_dict_bilingual[lang] = lang_bi_count[lang]
counts_dict_bilingual["other"] = other_bi_count + nen_bi_count + english_multi_count + nen_multi_count
with open('bilingual_counts.json', 'w') as json_file:
json.dump(counts_dict_bilingual, json_file)
```
# Counting translation instances
In order to count translation instances containing English paired with German, French, Spanish, Portuguese, Italian or Dutch, we use:
```python
from datasets import load_dataset
import json
from tqdm import tqdm
# Specify the dataset name
dataset_name = "RaiBP/openwebtext2-first-30-chunks-lang-detect-raw-output"
# Load the dataset
translation_dataset = load_dataset(dataset_name, data_dir="translation")
dataset = translation_dataset["train"]
n_examples = len(dataset)
total_instances = 0
counts_dict = {"de": 0, "fr": 0, "es": 0, "pt": 0, "it": 0, "nl": 0}
others_count = 0
instances = {}
for document in tqdm(dataset, total=n_examples):
embedded_label = document["embedded_label"]
primary_label = document["primary_label"]
document_id = document["document_index"]
instance_id = document["instance_index"]
id = f"{document_id}-{instance_id}"
if id not in instances.keys():
instances[id] = [f"{embedded_label}-{primary_label}"]
else:
instances[id].append(f"{embedded_label}-{primary_label}")
for id, labels in instances.items():
state = 0
found_langs = []
for langs in labels:
lang_pair = langs.split("-")
if "en" in lang_pair:
non_english = lang_pair[0] if lang_pair[1] == "en" else lang_pair[1]
if non_english in counts_dict.keys():
state = 1 # found a translation with English and a language in the counts_dict
found_langs.append(non_english)
elif state != 1:
state = 2 # found a translation with English and a language not in the counts_dict
elif state != 1:
state = 2 # found a translation without English
if state == 1:
majority_lang = max(set(found_langs), key=found_langs.count)
counts_dict[majority_lang] += 1
elif state == 2:
others_count += 1
else:
print("Error: state is 0")
# Specify the file path where you want to save the JSON file
file_path = "translation_counts.json"
counts_dict["others"] = others_count
# Save the dictionary as a JSON file
with open(file_path, "w") as json_file:
json.dump(
counts_dict, json_file, indent=2
) # indent argument is optional, but it makes the file more human-readable
``` |