File size: 2,799 Bytes
537f60a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import json
import os

import pyarrow as pa
import pyarrow.parquet as pq
import torch
from tqdm import tqdm
from transformers import AutoModel, AutoTokenizer

file_name_prefix = "msmarco_v2.1_doc_segmented_"
path = "/home/mltraining/msmarco_v2.1_doc_segmented/"
model_names = [
    "Snowflake/snowflake-arctic-embed-l",
    "Snowflake/snowflake-arctic-embed-m-v1.5",
]
for model_name in model_names:
    print(f"Running doc embeddings using {model_name}")
    tokenizer = AutoTokenizer.from_pretrained(model_name)
    model = AutoModel.from_pretrained(
        model_name,
        add_pooling_layer=False,
    )
    model.eval()
    device = "cuda"
    model = model.to(device)
    dir_path = f"{path}{model_name.split('/')[1]}/"
    if not os.path.exists(dir_path):
        os.makedirs(dir_path)
    for i in range(0, 59):
        try:
            filename = f"{path}{file_name_prefix}{i:02}.json"
            filename_out = f"{dir_path}{i:02}.parquet"
            print(f"Starting doc embeddings on {filename}")
            data = []
            ids = []
            with open(filename, "r") as f:
                for line in tqdm(f, desc="Processing JSONL file"):
                    j = json.loads(line)
                    doc_id = j["docid"]
                    text = j["segment"]
                    title = j["title"]
                    heading = j["headings"]
                    doc_text = "{} {}".format(title, text)
                    data.append(doc_text)
                    ids.append(doc_id)

            print("Documents fully loaded")
            batch_size = 512
            chunks = [data[i: i + batch_size] for i in range(0, len(data), batch_size)]
            embds = []
            for chunk in tqdm(chunks, desc="inference"):
                tokens = tokenizer(
                    chunk,
                    padding=True,
                    truncation=True,
                    return_tensors="pt",
                    max_length=512,
                ).to(device)
                with torch.autocast(
                    "cuda", dtype=torch.bfloat16
                ), torch.inference_mode():
                    embds.append(
                        model(**tokens)[0][:, 0]
                        .cpu()
                        .to(torch.float32)
                        .detach()
                        .numpy()
                    )
            del data, chunks
            embds = [item for batch in embds for item in batch]
            out_data = []
            for emb, doc_id in zip(embds, ids):
                out_data.append({"doc_id": doc_id, "embedding": emb})
            del embds, ids
            table = pa.Table.from_pylist(out_data)
            del out_data
            pq.write_table(table, filename_out)
        except Exception:
            pass