spacemanidol's picture
Upload 9 files
537f60a verified
import pyarrow as pa
import pyarrow.parquet as pq
import torch
from transformers import AutoModel, AutoTokenizer
query_prefix = "Represent this sentence for searching relevant passages: "
topic_file_names = [
"topics.dl21.txt",
"topics.dl22.txt",
"topics.dl23.txt",
"topics.msmarco-v2-doc.dev.txt",
"topics.msmarco-v2-doc.dev2.txt",
"topics.rag24.raggy-dev.txt",
"topics.rag24.researchy-dev.txt",
]
model_names = [
"Snowflake/snowflake-arctic-embed-l",
"Snowflake/snowflake-arctic-embed-m-v1.5",
]
for model_name in model_names:
print(f"Running query embeddings using {model_name}")
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModel.from_pretrained(
model_name,
add_pooling_layer=False,
)
model.eval()
device = "cuda"
model = model.to(device)
for file_name in topic_file_names:
short_file_name = ".".join(file_name.split(".")[:-1])
data = []
print(f"starting on {file_name}")
with open(file_name, "r") as f:
for line in f:
line = line.strip().split("\t")
qid = line[0]
query_text = line[1]
queries_with_prefix = [
"{}{}".format(query_prefix, i) for i in [query_text]
]
query_tokens = tokenizer(
queries_with_prefix,
padding=True,
truncation=True,
return_tensors="pt",
max_length=512,
)
# Compute token embeddings
with torch.autocast(
"cuda", dtype=torch.bfloat16
), torch.inference_mode():
query_embeddings = (
model(**query_tokens.to(device))[0][:, 0]
.cpu()
.to(torch.float32)
.detach()
.numpy()[0]
)
item = {"id": qid, "text": query_text, "embedding": query_embeddings}
data.append(item)
table = pa.Table.from_pylist(data)
pq.write_table(
table, f"{model_name.split('/')[1]}-{short_file_name}.parquet"
)