File size: 2,303 Bytes
537f60a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from transformers import AutoModel, AutoTokenizer

query_prefix = "Represent this sentence for searching relevant passages: "
topic_file_names = [
    "topics.dl21.txt",
    "topics.dl22.txt",
    "topics.dl23.txt",
    "topics.msmarco-v2-doc.dev.txt",
    "topics.msmarco-v2-doc.dev2.txt",
    "topics.rag24.raggy-dev.txt",
    "topics.rag24.researchy-dev.txt",
]
model_names = [
    "Snowflake/snowflake-arctic-embed-l",
    "Snowflake/snowflake-arctic-embed-m-v1.5",
]

for model_name in model_names:
    print(f"Running query embeddings using {model_name}")
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(
        model_name,
        add_pooling_layer=False,
    )
    model.eval()
    device = "cuda"
    model = model.to(device)
    for file_name in topic_file_names:
        short_file_name = ".".join(file_name.split(".")[:-1])
        data = []
        print(f"starting on {file_name}")
        with open(file_name, "r") as f:
            for line in f:
                line = line.strip().split("\t")
                qid = line[0]
                query_text = line[1]
                queries_with_prefix = [
                    "{}{}".format(query_prefix, i) for i in [query_text]
                ]
                query_tokens = tokenizer(
                    queries_with_prefix,
                    padding=True,
                    truncation=True,
                    return_tensors="pt",
                    max_length=512,
                )
                # Compute token embeddings
                with torch.autocast(
                    "cuda", dtype=torch.bfloat16
                ), torch.inference_mode():
                    query_embeddings = (
                        model(**query_tokens.to(device))[0][:, 0]
                        .cpu()
                        .to(torch.float32)
                        .detach()
                        .numpy()[0]
                    )
                item = {"id": qid, "text": query_text, "embedding": query_embeddings}
                data.append(item)
            table = pa.Table.from_pylist(data)
            pq.write_table(
                table, f"{model_name.split('/')[1]}-{short_file_name}.parquet"
            )