spacemanidol's picture
Upload 9 files
537f60a verified
raw
history blame
2.96 kB
import glob
import json
import pickle
import sys
from typing import Dict
import numpy as np
from beir.retrieval.evaluation import EvaluateRetrieval
def load_qrels(filename: str) -> Dict:
with open(filename, "r") as f:
qrels = json.load(f)
return qrels
def merge_retrieved_shards(
suffix: str, output_file: str, top_n: int, qrels: dict, metric: str
) -> None:
shard_files = glob.glob(f"*{suffix}")
print(f"There are {len(shard_files)} shards found")
merged_results = {}
print("Loading All shards")
for shard_file in shard_files:
print(f"Loading shard {shard_file} ")
with open(shard_file, "rb") as f:
shard_results = pickle.load(f)
for query_id, doc_scores in shard_results.items():
if query_id not in merged_results:
merged_results[query_id] = []
merged_results[query_id].extend(doc_scores.items())
print("Shards all loaded, merging results and sorting by score")
run = {}
per_query = []
for query_id, doc_scores in merged_results.items():
if query_id in qrels:
doc_score_dict = {}
for passage_id, score in doc_scores:
doc_id = passage_id.split("#")[
0
] # everything after # is the passage idenfitier withing a doc
if doc_id not in doc_score_dict:
doc_score_dict[doc_id] = (
-1
) # scores are in range -1 to 1 on similairty so starting at -1 is floor
if score > doc_score_dict[doc_id]:
doc_score_dict[doc_id] = score
top_docs = sorted(doc_score_dict.items(), key=lambda x: x[1], reverse=True)[
:top_n
]
run[query_id] = {
doc_id: round(score * 100, 2) for doc_id, score in top_docs
}
scores = EvaluateRetrieval.evaluate(
qrels, {query_id: run[query_id]}, k_values=[1, 3, 5, 10, 100, 1000]
)
scores = {k: v for d in scores for k, v in d.items()}
per_query.append(scores[metric])
print("Done merging and sorting results, Evaluating and saving run")
print(f"There are {len(run)} queries being evaled agaisnt qrels")
print(f"There were {len(shard_files)} shards found")
print(
f"Per Query Score average: {np.array(per_query).mean()} for {metric}. Individual scores{per_query}"
)
print("Overall Score Numbers:")
print(EvaluateRetrieval.evaluate(qrels, run, k_values=[1, 3, 5, 10, 100, 1000]))
with open(output_file, "wb") as w:
pickle.dump(run, w)
if __name__ == "__main__":
suffix = sys.argv[1]
output_file = sys.argv[2]
top_n = int(sys.argv[3])
qrel_filename = sys.argv[4]
metric = sys.argv[5]
merge_retrieved_shards(
suffix, output_file, top_n, load_qrels(qrel_filename), metric
)