add split in results export (#32)
Browse files* add split in results
* remove local path
- paths.json +0 -0
- results.py +23 -12
paths.json
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
results.py
CHANGED
@@ -15,7 +15,7 @@ _CITATION = """@article{muennighoff2022mteb,
|
|
15 |
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils},
|
16 |
title = {MTEB: Massive Text Embedding Benchmark},
|
17 |
publisher = {arXiv},
|
18 |
-
journal={arXiv preprint arXiv:2210.07316},
|
19 |
year = {2022}
|
20 |
}
|
21 |
"""
|
@@ -55,8 +55,8 @@ MODELS = [
|
|
55 |
"Cohere-embed-multilingual-v3.0",
|
56 |
"DanskBERT",
|
57 |
"FollowIR-7B",
|
58 |
-
"
|
59 |
-
"
|
60 |
"LASER2",
|
61 |
"LLM2Vec-Llama-2-supervised",
|
62 |
"LLM2Vec-Llama-2-unsupervised",
|
@@ -313,6 +313,7 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
313 |
"metric": datasets.Value("string"),
|
314 |
"score": datasets.Value("float"),
|
315 |
"split": datasets.Value("string"),
|
|
|
316 |
}
|
317 |
),
|
318 |
supervised_keys=None,
|
@@ -365,27 +366,33 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
365 |
split = "devtest"
|
366 |
elif (ds_name in TEST_AVG_SPLIT):
|
367 |
# Average splits
|
368 |
-
res_dict
|
369 |
for split in TEST_AVG_SPLIT[ds_name]:
|
370 |
# Old MTEB format
|
371 |
if isinstance(res_dict.get(split), dict):
|
372 |
for k, v in res_dict.get(split, {}).items():
|
|
|
|
|
|
|
373 |
v /= len(TEST_AVG_SPLIT[ds_name])
|
374 |
-
if k not in res_dict
|
375 |
-
res_dict[
|
376 |
else:
|
377 |
-
res_dict[
|
378 |
# New MTEB format
|
379 |
elif isinstance(res_dict.get(split), list):
|
380 |
assert len(res_dict[split]) == 1, "Only single-lists supported for now"
|
381 |
for k, v in res_dict[split][0].items():
|
|
|
|
|
382 |
if not isinstance(v, float): continue
|
383 |
v /= len(TEST_AVG_SPLIT[ds_name])
|
384 |
-
if k not in res_dict
|
385 |
-
res_dict[
|
386 |
else:
|
387 |
-
res_dict[
|
388 |
split = "test_avg"
|
|
|
389 |
elif "test" not in res_dict:
|
390 |
print(f"Skipping {ds_name} as split {split} not present.")
|
391 |
continue
|
@@ -412,7 +419,7 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
412 |
# Legacy format with e.g. {cosine: {spearman: ...}}
|
413 |
# Now it is {cosine_spearman: ...}
|
414 |
for k, v in score.items():
|
415 |
-
if not isinstance(v, float):
|
416 |
print(f'WARNING: Expected float, got {v} for {ds_name} {lang} {metric} {k}')
|
417 |
continue
|
418 |
if metric in SKIP_KEYS: continue
|
@@ -421,9 +428,10 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
421 |
"eval_language": lang,
|
422 |
"metric": metric + "_" + k,
|
423 |
"score": v * 100,
|
|
|
424 |
})
|
425 |
else:
|
426 |
-
if not isinstance(score, float):
|
427 |
print(f'WARNING: Expected float, got {score} for {ds_name} {lang} {metric}')
|
428 |
continue
|
429 |
out.append({
|
@@ -432,6 +440,7 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
432 |
"metric": metric,
|
433 |
"score": score * 100,
|
434 |
"split": split,
|
|
|
435 |
})
|
436 |
|
437 |
### Old MTEB format ###
|
@@ -441,6 +450,7 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
441 |
for lang in langs:
|
442 |
if lang in SKIP_KEYS: continue
|
443 |
test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
|
|
|
444 |
for metric, score in test_result_lang.items():
|
445 |
if not isinstance(score, dict):
|
446 |
score = {metric: score}
|
@@ -453,6 +463,7 @@ class MTEBResults(datasets.GeneratorBasedBuilder):
|
|
453 |
"metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric,
|
454 |
"score": sub_score * 100,
|
455 |
"split": split,
|
|
|
456 |
})
|
457 |
for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])):
|
458 |
yield idx, row
|
|
|
15 |
author = {Muennighoff, Niklas and Tazi, Nouamane and Magne, Lo{\"\i}c and Reimers, Nils},
|
16 |
title = {MTEB: Massive Text Embedding Benchmark},
|
17 |
publisher = {arXiv},
|
18 |
+
journal={arXiv preprint arXiv:2210.07316},
|
19 |
year = {2022}
|
20 |
}
|
21 |
"""
|
|
|
55 |
"Cohere-embed-multilingual-v3.0",
|
56 |
"DanskBERT",
|
57 |
"FollowIR-7B",
|
58 |
+
"GritLM-7B",
|
59 |
+
"GritLM-7B-noinstruct",
|
60 |
"LASER2",
|
61 |
"LLM2Vec-Llama-2-supervised",
|
62 |
"LLM2Vec-Llama-2-unsupervised",
|
|
|
313 |
"metric": datasets.Value("string"),
|
314 |
"score": datasets.Value("float"),
|
315 |
"split": datasets.Value("string"),
|
316 |
+
"hf_subset": datasets.Value("string"),
|
317 |
}
|
318 |
),
|
319 |
supervised_keys=None,
|
|
|
366 |
split = "devtest"
|
367 |
elif (ds_name in TEST_AVG_SPLIT):
|
368 |
# Average splits
|
369 |
+
res_dict = {}
|
370 |
for split in TEST_AVG_SPLIT[ds_name]:
|
371 |
# Old MTEB format
|
372 |
if isinstance(res_dict.get(split), dict):
|
373 |
for k, v in res_dict.get(split, {}).items():
|
374 |
+
if key in ["hf_subset", "languages"]:
|
375 |
+
res_dict[k] = v
|
376 |
+
|
377 |
v /= len(TEST_AVG_SPLIT[ds_name])
|
378 |
+
if k not in res_dict:
|
379 |
+
res_dict[k] = v
|
380 |
else:
|
381 |
+
res_dict[k] += v
|
382 |
# New MTEB format
|
383 |
elif isinstance(res_dict.get(split), list):
|
384 |
assert len(res_dict[split]) == 1, "Only single-lists supported for now"
|
385 |
for k, v in res_dict[split][0].items():
|
386 |
+
if key in ["hf_subset", "languages"]:
|
387 |
+
res_dict[k] = v
|
388 |
if not isinstance(v, float): continue
|
389 |
v /= len(TEST_AVG_SPLIT[ds_name])
|
390 |
+
if k not in res_dict:
|
391 |
+
res_dict[k] = v
|
392 |
else:
|
393 |
+
res_dict[k] += v
|
394 |
split = "test_avg"
|
395 |
+
res_dict = {split: [res_dict]}
|
396 |
elif "test" not in res_dict:
|
397 |
print(f"Skipping {ds_name} as split {split} not present.")
|
398 |
continue
|
|
|
419 |
# Legacy format with e.g. {cosine: {spearman: ...}}
|
420 |
# Now it is {cosine_spearman: ...}
|
421 |
for k, v in score.items():
|
422 |
+
if not isinstance(v, float):
|
423 |
print(f'WARNING: Expected float, got {v} for {ds_name} {lang} {metric} {k}')
|
424 |
continue
|
425 |
if metric in SKIP_KEYS: continue
|
|
|
428 |
"eval_language": lang,
|
429 |
"metric": metric + "_" + k,
|
430 |
"score": v * 100,
|
431 |
+
"hf_subset": subset,
|
432 |
})
|
433 |
else:
|
434 |
+
if not isinstance(score, float):
|
435 |
print(f'WARNING: Expected float, got {score} for {ds_name} {lang} {metric}')
|
436 |
continue
|
437 |
out.append({
|
|
|
440 |
"metric": metric,
|
441 |
"score": score * 100,
|
442 |
"split": split,
|
443 |
+
"hf_subset": subset,
|
444 |
})
|
445 |
|
446 |
### Old MTEB format ###
|
|
|
450 |
for lang in langs:
|
451 |
if lang in SKIP_KEYS: continue
|
452 |
test_result_lang = res_dict.get(lang) if is_multilingual else res_dict
|
453 |
+
subset = test_result_lang.pop("hf_subset", "")
|
454 |
for metric, score in test_result_lang.items():
|
455 |
if not isinstance(score, dict):
|
456 |
score = {metric: score}
|
|
|
463 |
"metric": f"{metric}_{sub_metric}" if metric != sub_metric else metric,
|
464 |
"score": sub_score * 100,
|
465 |
"split": split,
|
466 |
+
"hf_subset": subset,
|
467 |
})
|
468 |
for idx, row in enumerate(sorted(out, key=lambda x: x["mteb_dataset_name"])):
|
469 |
yield idx, row
|