spacemanidol's picture
Upload 9 files
537f60a verified
raw
history blame
2.8 kB
import json
import os
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from tqdm import tqdm
from transformers import AutoModel, AutoTokenizer
file_name_prefix = "msmarco_v2.1_doc_segmented_"
path = "/home/mltraining/msmarco_v2.1_doc_segmented/"
model_names = [
"Snowflake/snowflake-arctic-embed-l",
"Snowflake/snowflake-arctic-embed-m-v1.5",
]
for model_name in model_names:
print(f"Running doc embeddings using {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(
model_name,
add_pooling_layer=False,
)
model.eval()
device = "cuda"
model = model.to(device)
dir_path = f"{path}{model_name.split('/')[1]}/"
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for i in range(0, 59):
try:
filename = f"{path}{file_name_prefix}{i:02}.json"
filename_out = f"{dir_path}{i:02}.parquet"
print(f"Starting doc embeddings on {filename}")
data = []
ids = []
with open(filename, "r") as f:
for line in tqdm(f, desc="Processing JSONL file"):
j = json.loads(line)
doc_id = j["docid"]
text = j["segment"]
title = j["title"]
heading = j["headings"]
doc_text = "{} {}".format(title, text)
data.append(doc_text)
ids.append(doc_id)
print("Documents fully loaded")
batch_size = 512
chunks = [data[i: i + batch_size] for i in range(0, len(data), batch_size)]
embds = []
for chunk in tqdm(chunks, desc="inference"):
tokens = tokenizer(
chunk,
padding=True,
truncation=True,
return_tensors="pt",
max_length=512,
).to(device)
with torch.autocast(
"cuda", dtype=torch.bfloat16
), torch.inference_mode():
embds.append(
model(**tokens)[0][:, 0]
.cpu()
.to(torch.float32)
.detach()
.numpy()
)
del data, chunks
embds = [item for batch in embds for item in batch]
out_data = []
for emb, doc_id in zip(embds, ids):
out_data.append({"doc_id": doc_id, "embedding": emb})
del embds, ids
table = pa.Table.from_pylist(out_data)
del out_data
pq.write_table(table, filename_out)
except Exception:
pass