File size: 4,220 Bytes
537f60a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
import pickle
import sys

import pyarrow.parquet as pq
import torch
import torch.nn.functional as F
import faiss
import numpy as np

def main(
    path: str, query_prefix: str, shard_num: int, retrieval_depth: int, num_dim: int, use_faiss_gpu: bool = False
) -> None:
    query_filenames = [
        "topics.dl21.parquet",
        "topics.msmarco-v2-doc.dev2.parquet",
        "topics.dl22.parquet",
        "topics.rag24.raggy-dev.parquet",
        "topics.dl23.parquet",
        "topics.rag24.researchy-dev.parquet",
        "topics.msmarco-v2-doc.dev.parquet",
    ]
    shard_filename = f"{path}{shard_num:02}.parquet"
    print(f"Starting retrieval on Chunk {shard_num} for {shard_filename}")
    doc_embeddings = []
    idx2docid = {}
    print("Reading Document Embeddings File")
    table = pq.read_table(shard_filename)
    print("Parquet file read, looping")
    print(f"Chunk {shard_filename} loaded with {len(table)} documents")
    for idx in range(len(table)):
        doc_id = str(table[0][idx])
        doc_embeddings.append(table[1][idx].as_py()[:num_dim])
        idx2docid[idx] = doc_id

    doc_embeddings = torch.tensor(doc_embeddings, dtype=torch.float32)
    print(f"Embeddings loaded. Size {doc_embeddings.shape}")
    doc_embeddings = F.normalize(doc_embeddings, p=2, dim=1)
    print("Document Embeddings normalized")
    print("Document Embeddings Loaded into index")

    if use_faiss_gpu:
        # Create a FAISS index on GPU
        index = faiss.IndexFlatL2(num_dim)
        index = faiss.index_cpu_to_gpu(faiss.StandardGpuResources(), 0, index)
        index.add(doc_embeddings.numpy())
    else:
        # Use numpy for similarity calculations
        doc_embeddings_numpy = doc_embeddings.numpy()

    for query_filename in query_filenames:
        query_embeddings = []
        retrieved_results = {}
        idx2query_id = {}
        query_filename_full = f"{path}{query_prefix}{query_filename}"
        print(f"Retrieving from {shard_filename} for query set {query_filename_full}")
        query_embeddings = []
        print("Loading Query Embedding file")
        table = pq.read_table(query_filename_full)
        print("Done loading parquet query file")
        for idx in range(len(table)):
            query_id = str(table[0][idx])
            query_embeddings.append(table[2][idx].as_py()[:num_dim])
            idx2query_id[idx] = query_id
        query_embeddings = torch.tensor(query_embeddings, dtype=torch.float32)
        query_embeddings = F.normalize(query_embeddings, p=2, dim=1)
        print(f"Query Embeddings loaded with size {query_embeddings.shape}")

        if use_faiss_gpu:
            # Search the FAISS index on GPU
            similarities, indices = index.search(query_embeddings.numpy(), retrieval_depth)
            for idx in range(query_embeddings.shape[0]):
                qid = idx2query_id[idx]
                retrieved_results[qid] = {}
                for jdx in range(retrieval_depth):
                    idx_doc = int(indices[idx, jdx])
                    doc_id = idx2docid[idx_doc]
                    retrieved_results[qid][doc_id] = float(similarities[idx, jdx])
        else:
            # Use numpy for similarity calculations
            for idx in range(query_embeddings.shape[0]):
                similarities = np.dot(query_embeddings[idx].numpy(), doc_embeddings_numpy.T)
                top_n = np.argsort(-similarities)[:retrieval_depth]
                qid = idx2query_id[idx]
                retrieved_results[qid] = {}
                for jdx in range(retrieval_depth):
                    idx_doc = int(top_n[jdx])
                    doc_id = idx2docid[idx_doc]
                    retrieved_results[qid][doc_id] = float(similarities[idx_doc])

        with open(f"{shard_num}-{query_prefix}{num_dim}-{query_filename}", "wb") as w:
            pickle.dump(retrieved_results, w)


if __name__ == "__main__":
    path = sys.argv[1]
    query_prefix = sys.argv[2]
    shard_num = int(sys.argv[3])
    retrieval_depth = int(sys.argv[4])
    num_dim = int(sys.argv[5])
    use_faiss_gpu = bool(int(sys.argv[6]))  # 0 for numpy, 1 for FAISS GPU
    main(path, query_prefix, shard_num, retrieval_depth, num_dim, use_faiss_gpu)