|
import json |
|
from functools import lru_cache |
|
|
|
import datasets |
|
import pandas as pd |
|
|
|
SUPPORTED_LANGUAGES = [ |
|
"sl", |
|
"ur", |
|
"sw", |
|
"uz", |
|
"vi", |
|
"sq", |
|
"ms", |
|
"km", |
|
"hy", |
|
"da", |
|
"ky", |
|
"mg", |
|
"mn", |
|
"ja", |
|
"el", |
|
"it", |
|
"is", |
|
"ru", |
|
"tl", |
|
"so", |
|
"pt", |
|
"uk", |
|
"sr", |
|
"sn", |
|
"ht", |
|
"bs", |
|
"my", |
|
"ar", |
|
"hr", |
|
"nl", |
|
"bn", |
|
"ne", |
|
"hi", |
|
"ka", |
|
"az", |
|
"ko", |
|
"id", |
|
"fr", |
|
"es", |
|
"en", |
|
"fa", |
|
"lo", |
|
"iw", |
|
"th", |
|
"tr", |
|
"zht", |
|
"zhs", |
|
"ti", |
|
"tg", |
|
"control", |
|
] |
|
SYSTEMS = ["openai", "m3"] |
|
MODES = ["qlang", "qlang_en", "en", "rel_langs"] |
|
|
|
|
|
|
|
ROOT_DIR = "data" |
|
|
|
|
|
class BordIRlinesConfig(datasets.BuilderConfig): |
|
def __init__(self, language, n_hits=10, **kwargs): |
|
super(BordIRlinesConfig, self).__init__(**kwargs) |
|
self.language = language |
|
self.n_hits = n_hits |
|
self.data_root_dir = ROOT_DIR |
|
|
|
|
|
def load_json(path): |
|
with open(path, "r", encoding="utf-8") as f: |
|
return json.load(f) |
|
|
|
|
|
@lru_cache |
|
def replace_lang_str(path, lang): |
|
parent = path.rsplit("/", 2)[0] |
|
return f"{parent}/{lang}/{lang}_docs.json" |
|
|
|
|
|
class BordIRLinesDataset(datasets.GeneratorBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [ |
|
BordIRlinesConfig( |
|
name=lang, |
|
language=lang, |
|
description=f"{lang.upper()} dataset", |
|
) |
|
for lang in SUPPORTED_LANGUAGES |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description="IR Dataset for BordIRLines paper.", |
|
features=datasets.Features( |
|
{ |
|
"query_id": datasets.Value("string"), |
|
"query": datasets.Value("string"), |
|
"territory": datasets.Value("string"), |
|
"rank": datasets.Value("int32"), |
|
"score": datasets.Value("float32"), |
|
"doc_id": datasets.Value("string"), |
|
"doc_text": datasets.Value("string"), |
|
"doc_lang": datasets.Value("string"), |
|
} |
|
), |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
base_url = self.config.data_root_dir |
|
queries_path = f"{base_url}/queries.tsv" |
|
docs_path = dl_manager.download_and_extract(f"{base_url}/all_docs.json") |
|
|
|
lang = self.config.language |
|
|
|
splits = [] |
|
downloaded_data = {} |
|
|
|
for system in SYSTEMS: |
|
for mode in MODES: |
|
source = f"{system}.{mode}" |
|
downloaded_data[source] = dl_manager.download_and_extract( |
|
{ |
|
"hits": f"{base_url}/{lang}/{system}/{mode}/{lang}_query_hits.tsv", |
|
"docs": docs_path, |
|
"queries": queries_path, |
|
} |
|
) |
|
|
|
split = datasets.SplitGenerator( |
|
name=f"{system}.{mode}", |
|
gen_kwargs={ |
|
"hits_path": downloaded_data[source]["hits"], |
|
"docs_path": downloaded_data[source]["docs"], |
|
"queries_path": downloaded_data[source]["queries"], |
|
}, |
|
) |
|
splits.append(split) |
|
|
|
return splits |
|
|
|
def _generate_examples(self, hits_path, docs_path, queries_path): |
|
n_hits = self.config.n_hits |
|
queries_df = pd.read_csv(queries_path, sep="\t") |
|
query_map = dict(zip(queries_df["query_id"], queries_df["query_text"])) |
|
counter = 0 |
|
|
|
docs = load_json(docs_path) |
|
|
|
hits = pd.read_csv(hits_path, sep="\t") |
|
if n_hits: |
|
hits = hits.groupby("query_id").head(n_hits) |
|
|
|
|
|
hits["query_id_int"] = hits["query_id"].str[1:].astype(int) |
|
hits = hits.sort_values(by=["query_id_int", "rank"]) |
|
hits = hits.drop(columns=["query_id_int"]) |
|
|
|
for _, row in hits.iterrows(): |
|
doc_id = row["doc_id"] |
|
doc_lang = row["doc_lang"] |
|
query_id = row["query_id"] |
|
query_text = query_map[query_id] |
|
yield ( |
|
counter, |
|
{ |
|
"query_id": query_id, |
|
"query": query_text, |
|
"territory": row["territory"], |
|
"rank": row["rank"], |
|
"score": row["score"], |
|
"doc_id": doc_id, |
|
"doc_text": docs[doc_lang][doc_id], |
|
"doc_lang": doc_lang, |
|
}, |
|
) |
|
|
|
counter += 1 |
|
|