harisarang
commited on
Commit
•
e42d8a5
1
Parent(s):
d8afbed
add: jsonl datasets
Browse files- .gitattributes +3 -0
- item_embeds.jsonl +3 -0
- items.jsonl +3 -0
- test.jsonl +0 -0
- test_user_embeds.jsonl +0 -0
- train.jsonl +0 -0
- train_user_embeds.jsonl +3 -0
- trainer.py +303 -0
.gitattributes
CHANGED
@@ -56,3 +56,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
56 |
# Video files - compressed
|
57 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
58 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
59 |
+
item_embeds.jsonl filter=lfs diff=lfs merge=lfs -text
|
60 |
+
items.jsonl filter=lfs diff=lfs merge=lfs -text
|
61 |
+
train_user_embeds.jsonl filter=lfs diff=lfs merge=lfs -text
|
item_embeds.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:532fbf779ba0eb43e912ba2b71d5a365ef81338cfa15b21f48a73dd9a9ad299b
|
3 |
+
size 131523872
|
items.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fe287350120532df4faebc2800fcf3a12fd1b6c9c64aef297201505ca526cc02
|
3 |
+
size 22533508
|
test.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
test_user_embeds.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train.jsonl
ADDED
The diff for this file is too large to render.
See raw diff
|
|
train_user_embeds.jsonl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3aba551cdd6686fbde99e38a7e5e6c15d4d39e29a2dd767e9de390e2e1da25d6
|
3 |
+
size 29748871
|
trainer.py
ADDED
@@ -0,0 +1,303 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from os import environ, path
|
2 |
+
from transformers import BertTokenizerFast, FlaxAutoModel
|
3 |
+
import jax.numpy as jnp
|
4 |
+
import jax
|
5 |
+
from flax.training.train_state import TrainState
|
6 |
+
import pandas as pd
|
7 |
+
from tyrec.trainer import BaseTrainer, loss, HFConfig
|
8 |
+
from tyrec.recommendations.model import RecommendationModel
|
9 |
+
from tyrec.utils import compute_mean, logger
|
10 |
+
from tyrec.evaluator import RetrivalEvaluator
|
11 |
+
from tyrec.utils import trange
|
12 |
+
|
13 |
+
|
14 |
+
class RecommendationsTrainer(BaseTrainer):
|
15 |
+
def __init__(
|
16 |
+
self,
|
17 |
+
hf_config: HFConfig,
|
18 |
+
data_dir,
|
19 |
+
event_to_train,
|
20 |
+
threshold=0,
|
21 |
+
dimensions=0,
|
22 |
+
other_features=False,
|
23 |
+
*args,
|
24 |
+
**kwargs,
|
25 |
+
):
|
26 |
+
self.model_name = hf_config.model_name
|
27 |
+
self.data_dir = data_dir
|
28 |
+
self.query_prompt = hf_config.query_prompt
|
29 |
+
self.doc_prompt = hf_config.doc_prompt
|
30 |
+
self.event_to_train = event_to_train
|
31 |
+
self.other_features = other_features
|
32 |
+
self.text_encoder = FlaxAutoModel.from_pretrained(self.model_name)
|
33 |
+
self.tokenizer = BertTokenizerFast.from_pretrained(self.model_name)
|
34 |
+
self.base_path = (
|
35 |
+
"/content/drive/MyDrive/"
|
36 |
+
if "COLAB_ENV" in environ and environ["COLAB_ENV"] == "true"
|
37 |
+
else "./"
|
38 |
+
)
|
39 |
+
self.train_file = path.join(self.base_path, data_dir, "train.jsonl")
|
40 |
+
self.eval_file = path.join(self.base_path, data_dir, "test.jsonl")
|
41 |
+
self.items_file = path.join(self.base_path, data_dir, "items.jsonl")
|
42 |
+
self.item_embeddings_file = path.join(
|
43 |
+
self.base_path, data_dir, "item_embeds.jsonl"
|
44 |
+
)
|
45 |
+
self.train_user_embeddings_file = path.join(
|
46 |
+
self.base_path, data_dir, "train_user_embeds.jsonl"
|
47 |
+
)
|
48 |
+
self.test_user_embeddings_file = path.join(
|
49 |
+
self.base_path, data_dir, "test_user_embeds.jsonl"
|
50 |
+
)
|
51 |
+
self.dimensions = (
|
52 |
+
dimensions if dimensions > 0 else self.text_encoder.config.hidden_size
|
53 |
+
)
|
54 |
+
model = RecommendationModel(self.dimensions)
|
55 |
+
super().__init__(*args, model=model, **kwargs)
|
56 |
+
self.dataloader = pd.read_json(self.train_file, lines=True)
|
57 |
+
self.dataloader = self.dataloader[
|
58 |
+
self.dataloader["event"] == self.event_to_train
|
59 |
+
].reset_index(drop=True)
|
60 |
+
self.test_dataset = pd.read_json(self.eval_file, lines=True)
|
61 |
+
self.test_dataset = self.test_dataset[
|
62 |
+
self.test_dataset["event"] == self.event_to_train
|
63 |
+
].reset_index(drop=True)
|
64 |
+
unique_did = (
|
65 |
+
pd.concat([self.dataloader, self.test_dataset], ignore_index=True)["did"]
|
66 |
+
.unique()
|
67 |
+
.tolist()
|
68 |
+
)
|
69 |
+
self.items = pd.read_json(self.items_file, lines=True)
|
70 |
+
self.items = self.items[self.items["did"].isin(unique_did)].reset_index(
|
71 |
+
drop=True
|
72 |
+
)
|
73 |
+
self.threshold = threshold
|
74 |
+
self.evaluator: RetrivalEvaluator | None = None
|
75 |
+
self.item_embeds = []
|
76 |
+
self.train_user_embeds = []
|
77 |
+
self.train_label_embeds = []
|
78 |
+
self.test_user_embeds = []
|
79 |
+
self.test_label_embeds = []
|
80 |
+
self.rng = jax.random.PRNGKey(0)
|
81 |
+
|
82 |
+
def embed_items(self, examples):
|
83 |
+
texts = [self.doc_prompt + x for x in examples["text"].tolist()]
|
84 |
+
tokens = self.tokenizer(
|
85 |
+
texts,
|
86 |
+
truncation=True,
|
87 |
+
padding="max_length",
|
88 |
+
return_tensors="jax",
|
89 |
+
max_length=70,
|
90 |
+
)
|
91 |
+
embeddings = self.text_encoder(**tokens).last_hidden_state
|
92 |
+
embeddings = self.mean_pooling(embeddings, tokens["attention_mask"])
|
93 |
+
embeddings = embeddings / jnp.linalg.norm(embeddings, axis=-1, keepdims=True)
|
94 |
+
if self.other_features:
|
95 |
+
embeddings = [embeddings[i] for i in range(embeddings.shape[0])]
|
96 |
+
features = examples["features"].tolist()
|
97 |
+
for i in range(len(embeddings)):
|
98 |
+
embeddings[i] = jnp.concatenate([embeddings[i], jnp.array(features[i])])
|
99 |
+
embeddings = jnp.array(embeddings)
|
100 |
+
return [embeddings[i] for i in range(embeddings.shape[0])]
|
101 |
+
|
102 |
+
def embed_events(self, df):
|
103 |
+
user_vecs = []
|
104 |
+
label_vecs = []
|
105 |
+
for _, row in df.iterrows():
|
106 |
+
label = self.items[self.items["did"] == row["label"]["did"]].index.tolist()[
|
107 |
+
0
|
108 |
+
]
|
109 |
+
history = [x["did"] for x in row["data"]]
|
110 |
+
multi_hot = [0] * len(self.item_embeds)
|
111 |
+
indexes = (
|
112 |
+
self.items[self.items["did"].isin(history)]
|
113 |
+
.index.reindex(
|
114 |
+
self.items[self.items["did"].isin(history)]["did"]
|
115 |
+
.map(dict(zip(history, range(len(history)))))
|
116 |
+
.sort_values()
|
117 |
+
.index
|
118 |
+
)[0]
|
119 |
+
.tolist()
|
120 |
+
)
|
121 |
+
for idx in indexes:
|
122 |
+
multi_hot[idx] = 1
|
123 |
+
multi_hot = jnp.array(multi_hot)
|
124 |
+
user_vecs.append(compute_mean(self.item_embeds, multi_hot))
|
125 |
+
label_vecs.append(label)
|
126 |
+
return jnp.array(user_vecs), jnp.array(label_vecs)
|
127 |
+
|
128 |
+
def group_events(self, df):
|
129 |
+
df = df.sort_values(["sid", "ts"])
|
130 |
+
|
131 |
+
def group_to_dict_array(to_dict):
|
132 |
+
return to_dict.drop(["sid", "event"], axis=1).to_dict("records")
|
133 |
+
|
134 |
+
grouped_data = []
|
135 |
+
for (sid,), group in df.groupby(["sid"]):
|
136 |
+
data = group_to_dict_array(group)
|
137 |
+
if len(data) > 2:
|
138 |
+
grouped_data.append(
|
139 |
+
{
|
140 |
+
"sid": sid,
|
141 |
+
"data": data[:-1],
|
142 |
+
"label": data[-1],
|
143 |
+
}
|
144 |
+
)
|
145 |
+
grouped_data = pd.DataFrame(grouped_data)
|
146 |
+
return grouped_data
|
147 |
+
|
148 |
+
@staticmethod
|
149 |
+
def users_to_sessions(file_path, threshold):
|
150 |
+
df = pd.read_json(file_path, lines=True)
|
151 |
+
if threshold > 0:
|
152 |
+
|
153 |
+
def create_intervals(group):
|
154 |
+
group = group.copy()
|
155 |
+
group["time_diff"] = group["ts"].diff()
|
156 |
+
group["interval"] = (group["time_diff"] > threshold).cumsum()
|
157 |
+
return group.drop("time_diff", axis=1)
|
158 |
+
|
159 |
+
df_list = [create_intervals(group) for _, group in df.groupby("sid")]
|
160 |
+
df = pd.concat(df_list, ignore_index=True)
|
161 |
+
else:
|
162 |
+
df["interval"] = 0
|
163 |
+
|
164 |
+
return df
|
165 |
+
|
166 |
+
def load_item_embeddings(self):
|
167 |
+
item_embeds = pd.read_json(self.item_embeddings_file, lines=True)
|
168 |
+
item_with_embeds = pd.merge(self.items, item_embeds, on="did", how="left")
|
169 |
+
return [jnp.array(x) for x in item_with_embeds["embed"]]
|
170 |
+
|
171 |
+
def load_user_embeddings(self, df, file_path):
|
172 |
+
user_embeds = pd.read_json(file_path, lines=True)
|
173 |
+
user_with_embeds = pd.merge(df, user_embeds, on="sid", how="left")
|
174 |
+
return jnp.array([jnp.array(x) for x in user_with_embeds["embed"]]), jnp.array(
|
175 |
+
[
|
176 |
+
self.items[self.items["did"] == x["did"]].index.tolist()[0]
|
177 |
+
for x in user_with_embeds["label"]
|
178 |
+
]
|
179 |
+
)
|
180 |
+
|
181 |
+
def setup(self):
|
182 |
+
corpus = {
|
183 |
+
f"{self.items.loc[x]['did']}": self.items.loc[x]["text"]
|
184 |
+
for x in range(len(self.items))
|
185 |
+
}
|
186 |
+
if path.exists(self.item_embeddings_file):
|
187 |
+
logger.info("Found a saved item embedding file...")
|
188 |
+
self.item_embeds = self.load_item_embeddings()
|
189 |
+
else:
|
190 |
+
for start in trange(0, len(self.items), 128, desc="Embedding items"):
|
191 |
+
end = min(start + 128, len(self.items))
|
192 |
+
e = self.embed_items(self.items.loc[start : end - 1])
|
193 |
+
self.item_embeds.extend(e)
|
194 |
+
self.item_embeds = jnp.array(self.item_embeds)
|
195 |
+
self.dataloader = self.group_events(self.dataloader)
|
196 |
+
self.dataset_len = len(self.dataloader)
|
197 |
+
self.test_dataset = self.group_events(self.test_dataset)
|
198 |
+
self.dataset_len = len(self.dataloader)
|
199 |
+
|
200 |
+
if path.exists(self.train_user_embeddings_file):
|
201 |
+
logger.info("Found a saved train embedding file...")
|
202 |
+
self.train_user_embeds, self.train_label_embeds = self.load_user_embeddings(
|
203 |
+
self.dataloader, self.train_user_embeddings_file
|
204 |
+
)
|
205 |
+
else:
|
206 |
+
self.train_user_embeds, self.train_label_embeds = self.embed_events(
|
207 |
+
self.dataloader
|
208 |
+
)
|
209 |
+
|
210 |
+
if path.exists(self.test_user_embeddings_file):
|
211 |
+
logger.info("Found a saved test embedding file...")
|
212 |
+
self.test_user_embeds, self.test_label_embeds = self.load_user_embeddings(
|
213 |
+
self.test_dataset, self.test_user_embeddings_file
|
214 |
+
)
|
215 |
+
else:
|
216 |
+
self.test_user_embeds, self.test_label_embeds = self.embed_events(
|
217 |
+
self.test_dataset
|
218 |
+
)
|
219 |
+
users = {
|
220 |
+
f"{self.test_dataset.loc[x]['sid']}": self.test_dataset.loc[x]["sid"]
|
221 |
+
for x in range(len(self.test_dataset))
|
222 |
+
}
|
223 |
+
relevant_docs = {
|
224 |
+
f"{self.test_dataset.loc[x]['sid']}": [
|
225 |
+
f"{self.test_dataset.loc[x]['label']['did']}"
|
226 |
+
]
|
227 |
+
for x in range(len(self.test_dataset))
|
228 |
+
}
|
229 |
+
self.evaluator = RetrivalEvaluator(
|
230 |
+
queries=users,
|
231 |
+
corpus=corpus,
|
232 |
+
relevant_docs=relevant_docs,
|
233 |
+
corpus_chunk_size=40000,
|
234 |
+
batch_size=512,
|
235 |
+
show_progress_bar=True,
|
236 |
+
)
|
237 |
+
|
238 |
+
def get_initial_params(self):
|
239 |
+
batch = jnp.array([jnp.zeros(self.text_encoder.config.hidden_size)])
|
240 |
+
params = self.model.init(jax.random.PRNGKey(0), batch, batch, training=False)
|
241 |
+
return params["params"]
|
242 |
+
|
243 |
+
def train_step(self, _batch, start, end):
|
244 |
+
self.rng, rng = jax.random.split(self.rng)
|
245 |
+
batch = jax.random.permutation(rng, jnp.array(self.train_user_embeds))[
|
246 |
+
start:end
|
247 |
+
]
|
248 |
+
labels = jax.random.permutation(rng, jnp.array(self.train_label_embeds))[
|
249 |
+
start:end
|
250 |
+
]
|
251 |
+
user_vec = jnp.array(batch)
|
252 |
+
items_vec = jnp.array(self.item_embeds)
|
253 |
+
state, l = train_step(self.state, user_vec, items_vec, labels, rng)
|
254 |
+
q, d = self.model.apply(
|
255 |
+
{"params": self.state.params},
|
256 |
+
jnp.array(self.test_user_embeds),
|
257 |
+
jnp.array(self.item_embeds),
|
258 |
+
training=False,
|
259 |
+
rngs=rng,
|
260 |
+
)
|
261 |
+
q = q / jnp.linalg.norm(q, axis=1, keepdims=True)
|
262 |
+
d = d / jnp.linalg.norm(d, axis=1, keepdims=True)
|
263 |
+
val_l = loss.sparse_categorical_cross_entropy(
|
264 |
+
q,
|
265 |
+
d,
|
266 |
+
self.test_label_embeds,
|
267 |
+
)
|
268 |
+
self.val_loss.append(val_l)
|
269 |
+
return state, l
|
270 |
+
|
271 |
+
def eval_step(self):
|
272 |
+
q, d = self.model.apply(
|
273 |
+
{"params": self.state.params},
|
274 |
+
jnp.array(self.test_user_embeds),
|
275 |
+
jnp.array(self.item_embeds),
|
276 |
+
training=False,
|
277 |
+
)
|
278 |
+
q = q / jnp.linalg.norm(q, axis=1, keepdims=True)
|
279 |
+
d = d / jnp.linalg.norm(d, axis=1, keepdims=True)
|
280 |
+
self.evaluator(query_embeddings=q, corpus_embeddings=d, metrics=["recall"])
|
281 |
+
|
282 |
+
|
283 |
+
@jax.jit
|
284 |
+
def train_step(state: TrainState, user_embeds, item_embeds, labels, rng):
|
285 |
+
def loss_fn(params):
|
286 |
+
u = user_embeds / jnp.linalg.norm(user_embeds, axis=-1, keepdims=True)
|
287 |
+
i = item_embeds / jnp.linalg.norm(item_embeds, axis=-1, keepdims=True)
|
288 |
+
u, i = state.apply_fn(
|
289 |
+
{"params": params},
|
290 |
+
u,
|
291 |
+
i,
|
292 |
+
training=True,
|
293 |
+
rngs={"dropout": rng},
|
294 |
+
)
|
295 |
+
u = u / jnp.linalg.norm(u, axis=-1, keepdims=True)
|
296 |
+
i = i / jnp.linalg.norm(i, axis=-1, keepdims=True)
|
297 |
+
l = loss.sparse_categorical_cross_entropy(u, i, labels)
|
298 |
+
return l
|
299 |
+
|
300 |
+
grad_fn = jax.value_and_grad(loss_fn)
|
301 |
+
l, grads = grad_fn(state.params)
|
302 |
+
state = state.apply_gradients(grads=grads)
|
303 |
+
return state, l
|