liuganghuggingface
commited on
Update graph_decoder/diffusion_utils.py
Browse files- graph_decoder/diffusion_utils.py +388 -389
graph_decoder/diffusion_utils.py
CHANGED
@@ -1,10 +1,11 @@
|
|
|
|
|
|
|
|
|
|
1 |
import torch
|
2 |
import numpy as np
|
3 |
from torch.nn import functional as F
|
4 |
from torch_geometric.utils import to_dense_adj, to_dense_batch, remove_self_loops
|
5 |
-
import os
|
6 |
-
import json
|
7 |
-
import yaml
|
8 |
from types import SimpleNamespace
|
9 |
|
10 |
def dict_to_namespace(d):
|
@@ -127,402 +128,400 @@ def encode_no_edge(E):
|
|
127 |
return E
|
128 |
|
129 |
|
130 |
-
#### diffusion utils
|
131 |
-
class DistributionNodes:
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
152 |
-
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
class PredefinedNoiseScheduleDiscrete(torch.nn.Module):
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
class DiscreteUniformTransition:
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
class MarginalTransition:
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
333 |
-
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
def sum_except_batch(x):
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
421 |
-
|
422 |
-
|
423 |
-
|
424 |
-
|
425 |
-
|
426 |
-
|
427 |
-
|
428 |
-
|
429 |
-
|
430 |
-
|
431 |
-
|
432 |
-
|
433 |
-
|
434 |
-
|
435 |
-
|
436 |
-
|
437 |
-
|
438 |
-
|
439 |
-
|
440 |
-
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
449 |
-
|
450 |
-
|
451 |
-
|
452 |
-
|
453 |
-
|
454 |
-
|
455 |
-
|
456 |
-
|
457 |
-
|
458 |
-
|
459 |
-
|
460 |
-
|
461 |
-
|
462 |
-
|
463 |
-
|
464 |
-
|
465 |
-
|
466 |
-
|
467 |
-
|
468 |
-
|
469 |
-
|
470 |
-
|
471 |
-
|
472 |
-
|
473 |
-
|
474 |
-
|
475 |
-
|
476 |
-
|
477 |
-
|
478 |
-
|
479 |
-
|
480 |
-
|
481 |
-
def reverse_tensor(x):
|
482 |
-
return x[torch.arange(x.size(0) - 1, -1, -1)]
|
483 |
|
484 |
-
def sample_discrete_feature_noise(limit_dist, node_mask):
|
485 |
-
|
486 |
-
|
487 |
-
|
488 |
-
|
489 |
|
490 |
-
|
491 |
-
|
492 |
|
493 |
-
|
494 |
-
|
495 |
-
|
496 |
|
497 |
-
|
498 |
-
|
499 |
|
500 |
-
|
501 |
-
|
502 |
-
|
503 |
-
|
504 |
|
505 |
-
|
506 |
-
|
507 |
|
508 |
-
|
509 |
-
|
510 |
|
511 |
|
512 |
-
def index_QE(X, q_e, n_bond=5):
|
513 |
-
|
514 |
-
|
515 |
|
516 |
-
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
|
521 |
-
|
522 |
-
|
523 |
|
524 |
-
|
525 |
-
|
526 |
-
|
527 |
|
528 |
-
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
import yaml
|
4 |
+
|
5 |
import torch
|
6 |
import numpy as np
|
7 |
from torch.nn import functional as F
|
8 |
from torch_geometric.utils import to_dense_adj, to_dense_batch, remove_self_loops
|
|
|
|
|
|
|
9 |
from types import SimpleNamespace
|
10 |
|
11 |
def dict_to_namespace(d):
|
|
|
128 |
return E
|
129 |
|
130 |
|
131 |
+
# #### diffusion utils
|
132 |
+
# class DistributionNodes:
|
133 |
+
# def __init__(self, histogram):
|
134 |
+
# """Compute the distribution of the number of nodes in the dataset, and sample from this distribution.
|
135 |
+
# historgram: dict. The keys are num_nodes, the values are counts
|
136 |
+
# """
|
137 |
+
|
138 |
+
# if type(histogram) == dict:
|
139 |
+
# max_n_nodes = max(histogram.keys())
|
140 |
+
# prob = torch.zeros(max_n_nodes + 1)
|
141 |
+
# for num_nodes, count in histogram.items():
|
142 |
+
# prob[num_nodes] = count
|
143 |
+
# else:
|
144 |
+
# prob = histogram
|
145 |
+
|
146 |
+
# self.prob = prob / prob.sum()
|
147 |
+
# self.m = torch.distributions.Categorical(prob)
|
148 |
+
|
149 |
+
# def sample_n(self, n_samples, device):
|
150 |
+
# idx = self.m.sample((n_samples,))
|
151 |
+
# return idx.to(device)
|
152 |
+
|
153 |
+
# def log_prob(self, batch_n_nodes):
|
154 |
+
# assert len(batch_n_nodes.size()) == 1
|
155 |
+
# p = self.prob.to(batch_n_nodes.device)
|
156 |
+
|
157 |
+
# probas = p[batch_n_nodes]
|
158 |
+
# log_p = torch.log(probas + 1e-30)
|
159 |
+
# return log_p
|
160 |
+
|
161 |
+
|
162 |
+
# class PredefinedNoiseScheduleDiscrete(torch.nn.Module):
|
163 |
+
# def __init__(self, noise_schedule, timesteps):
|
164 |
+
# super(PredefinedNoiseScheduleDiscrete, self).__init__()
|
165 |
+
# self.timesteps = timesteps
|
166 |
+
|
167 |
+
# betas = cosine_beta_schedule_discrete(timesteps)
|
168 |
+
# self.register_buffer("betas", torch.from_numpy(betas).float())
|
169 |
+
|
170 |
+
# # 0.9999
|
171 |
+
# self.alphas = 1 - torch.clamp(self.betas, min=0, max=1)
|
172 |
+
|
173 |
+
# log_alpha = torch.log(self.alphas)
|
174 |
+
# log_alpha_bar = torch.cumsum(log_alpha, dim=0)
|
175 |
+
# self.alphas_bar = torch.exp(log_alpha_bar)
|
176 |
+
|
177 |
+
# def forward(self, t_normalized=None, t_int=None):
|
178 |
+
# assert int(t_normalized is None) + int(t_int is None) == 1
|
179 |
+
# if t_int is None:
|
180 |
+
# t_int = torch.round(t_normalized * self.timesteps)
|
181 |
+
# self.betas = self.betas.type_as(t_int)
|
182 |
+
# return self.betas[t_int.long()]
|
183 |
+
|
184 |
+
# def get_alpha_bar(self, t_normalized=None, t_int=None):
|
185 |
+
# assert int(t_normalized is None) + int(t_int is None) == 1
|
186 |
+
# if t_int is None:
|
187 |
+
# t_int = torch.round(t_normalized * self.timesteps)
|
188 |
+
# self.alphas_bar = self.alphas_bar.type_as(t_int)
|
189 |
+
# return self.alphas_bar[t_int.long()]
|
190 |
+
|
191 |
+
|
192 |
+
# # class DiscreteUniformTransition:
|
193 |
+
# # def __init__(self, x_classes: int, e_classes: int, y_classes: int):
|
194 |
+
# # self.X_classes = x_classes
|
195 |
+
# # self.E_classes = e_classes
|
196 |
+
# # self.y_classes = y_classes
|
197 |
+
# # self.u_x = torch.ones(1, self.X_classes, self.X_classes)
|
198 |
+
# # if self.X_classes > 0:
|
199 |
+
# # self.u_x = self.u_x / self.X_classes
|
200 |
+
|
201 |
+
# # self.u_e = torch.ones(1, self.E_classes, self.E_classes)
|
202 |
+
# # if self.E_classes > 0:
|
203 |
+
# # self.u_e = self.u_e / self.E_classes
|
204 |
+
|
205 |
+
# # self.u_y = torch.ones(1, self.y_classes, self.y_classes)
|
206 |
+
# # if self.y_classes > 0:
|
207 |
+
# # self.u_y = self.u_y / self.y_classes
|
208 |
+
|
209 |
+
# # def get_Qt(self, beta_t, device, X=None, flatten_e=None):
|
210 |
+
# # """Returns one-step transition matrices for X and E, from step t - 1 to step t.
|
211 |
+
# # Qt = (1 - beta_t) * I + beta_t / K
|
212 |
+
|
213 |
+
# # beta_t: (bs) noise level between 0 and 1
|
214 |
+
# # returns: qx (bs, dx, dx), qe (bs, de, de), qy (bs, dy, dy).
|
215 |
+
# # """
|
216 |
+
# # beta_t = beta_t.unsqueeze(1)
|
217 |
+
# # beta_t = beta_t.to(device)
|
218 |
+
# # self.u_x = self.u_x.to(device)
|
219 |
+
# # self.u_e = self.u_e.to(device)
|
220 |
+
# # self.u_y = self.u_y.to(device)
|
221 |
+
|
222 |
+
# # q_x = beta_t * self.u_x + (1 - beta_t) * torch.eye(
|
223 |
+
# # self.X_classes, device=device
|
224 |
+
# # ).unsqueeze(0)
|
225 |
+
# # q_e = beta_t * self.u_e + (1 - beta_t) * torch.eye(
|
226 |
+
# # self.E_classes, device=device
|
227 |
+
# # ).unsqueeze(0)
|
228 |
+
# # q_y = beta_t * self.u_y + (1 - beta_t) * torch.eye(
|
229 |
+
# # self.y_classes, device=device
|
230 |
+
# # ).unsqueeze(0)
|
231 |
+
|
232 |
+
# # return PlaceHolder(X=q_x, E=q_e, y=q_y)
|
233 |
+
|
234 |
+
# # def get_Qt_bar(self, alpha_bar_t, device, X=None, flatten_e=None):
|
235 |
+
# # """Returns t-step transition matrices for X and E, from step 0 to step t.
|
236 |
+
# # Qt = prod(1 - beta_t) * I + (1 - prod(1 - beta_t)) / K
|
237 |
+
|
238 |
+
# # alpha_bar_t: (bs) Product of the (1 - beta_t) for each time step from 0 to t.
|
239 |
+
# # returns: qx (bs, dx, dx), qe (bs, de, de), qy (bs, dy, dy).
|
240 |
+
# # """
|
241 |
+
# # alpha_bar_t = alpha_bar_t.unsqueeze(1)
|
242 |
+
# # alpha_bar_t = alpha_bar_t.to(device)
|
243 |
+
# # self.u_x = self.u_x.to(device)
|
244 |
+
# # self.u_e = self.u_e.to(device)
|
245 |
+
# # self.u_y = self.u_y.to(device)
|
246 |
+
|
247 |
+
# # q_x = (
|
248 |
+
# # alpha_bar_t * torch.eye(self.X_classes, device=device).unsqueeze(0)
|
249 |
+
# # + (1 - alpha_bar_t) * self.u_x
|
250 |
+
# # )
|
251 |
+
# # q_e = (
|
252 |
+
# # alpha_bar_t * torch.eye(self.E_classes, device=device).unsqueeze(0)
|
253 |
+
# # + (1 - alpha_bar_t) * self.u_e
|
254 |
+
# # )
|
255 |
+
# # q_y = (
|
256 |
+
# # alpha_bar_t * torch.eye(self.y_classes, device=device).unsqueeze(0)
|
257 |
+
# # + (1 - alpha_bar_t) * self.u_y
|
258 |
+
# # )
|
259 |
+
|
260 |
+
# # return PlaceHolder(X=q_x, E=q_e, y=q_y)
|
261 |
+
|
262 |
+
|
263 |
+
# class MarginalTransition:
|
264 |
+
# def __init__(
|
265 |
+
# self, x_marginals, e_marginals, xe_conditions, ex_conditions, y_classes, n_nodes
|
266 |
+
# ):
|
267 |
+
# self.X_classes = len(x_marginals)
|
268 |
+
# self.E_classes = len(e_marginals)
|
269 |
+
# self.y_classes = y_classes
|
270 |
+
# self.x_marginals = x_marginals # Dx
|
271 |
+
# self.e_marginals = e_marginals # Dx, De
|
272 |
+
# self.xe_conditions = xe_conditions
|
273 |
+
# # print('e_marginals.dtype', e_marginals.dtype)
|
274 |
+
# # print('x_marginals.dtype', x_marginals.dtype)
|
275 |
+
# # print('xe_conditions.dtype', xe_conditions.dtype)
|
276 |
+
|
277 |
+
# self.u_x = (
|
278 |
+
# x_marginals.unsqueeze(0).expand(self.X_classes, -1).unsqueeze(0)
|
279 |
+
# ) # 1, Dx, Dx
|
280 |
+
# self.u_e = (
|
281 |
+
# e_marginals.unsqueeze(0).expand(self.E_classes, -1).unsqueeze(0)
|
282 |
+
# ) # 1, De, De
|
283 |
+
# self.u_xe = xe_conditions.unsqueeze(0) # 1, Dx, De
|
284 |
+
# self.u_ex = ex_conditions.unsqueeze(0) # 1, De, Dx
|
285 |
+
# self.u = self.get_union_transition(
|
286 |
+
# self.u_x, self.u_e, self.u_xe, self.u_ex, n_nodes
|
287 |
+
# ) # 1, Dx + n*De, Dx + n*De
|
288 |
+
|
289 |
+
# def get_union_transition(self, u_x, u_e, u_xe, u_ex, n_nodes):
|
290 |
+
# u_e = u_e.repeat(1, n_nodes, n_nodes) # (1, n*de, n*de)
|
291 |
+
# u_xe = u_xe.repeat(1, 1, n_nodes) # (1, dx, n*de)
|
292 |
+
# u_ex = u_ex.repeat(1, n_nodes, 1) # (1, n*de, dx)
|
293 |
+
# u0 = torch.cat([u_x, u_xe], dim=2) # (1, dx, dx + n*de)
|
294 |
+
# u1 = torch.cat([u_ex, u_e], dim=2) # (1, n*de, dx + n*de)
|
295 |
+
# u = torch.cat([u0, u1], dim=1) # (1, dx + n*de, dx + n*de)
|
296 |
+
# return u
|
297 |
+
|
298 |
+
# def index_edge_margin(self, X, q_e, n_bond=5):
|
299 |
+
# # q_e: (bs, dx, de) --> (bs, n, de)
|
300 |
+
# bs, n, n_atom = X.shape
|
301 |
+
# node_indices = X.argmax(-1) # (bs, n)
|
302 |
+
# ind = node_indices[:, :, None].expand(bs, n, n_bond)
|
303 |
+
# q_e = torch.gather(q_e, 1, ind)
|
304 |
+
# return q_e
|
305 |
+
|
306 |
+
# def get_Qt(self, beta_t, device):
|
307 |
+
# """Returns one-step transition matrices for X and E, from step t - 1 to step t.
|
308 |
+
# Qt = (1 - beta_t) * I + beta_t / K
|
309 |
+
# beta_t: (bs)
|
310 |
+
# returns: q (bs, d0, d0)
|
311 |
+
# """
|
312 |
+
# bs = beta_t.size(0)
|
313 |
+
# d0 = self.u.size(-1)
|
314 |
+
# self.u = self.u.to(device)
|
315 |
+
# u = self.u.expand(bs, d0, d0)
|
316 |
+
|
317 |
+
# beta_t = beta_t.to(device)
|
318 |
+
# beta_t = beta_t.view(bs, 1, 1)
|
319 |
+
# q = beta_t * u + (1 - beta_t) * torch.eye(d0, device=device, dtype=self.u.dtype).unsqueeze(0)
|
320 |
+
|
321 |
+
# return PlaceHolder(X=q, E=None, y=None)
|
322 |
+
|
323 |
+
# def get_Qt_bar(self, alpha_bar_t, device):
|
324 |
+
# """Returns t-step transition matrices for X and E, from step 0 to step t.
|
325 |
+
# Qt = prod(1 - beta_t) * I + (1 - prod(1 - beta_t)) * K
|
326 |
+
# alpha_bar_t: (bs, 1) roduct of the (1 - beta_t) for each time step from 0 to t.
|
327 |
+
# returns: q (bs, d0, d0)
|
328 |
+
# """
|
329 |
+
# bs = alpha_bar_t.size(0)
|
330 |
+
# d0 = self.u.size(-1)
|
331 |
+
# alpha_bar_t = alpha_bar_t.to(device)
|
332 |
+
# alpha_bar_t = alpha_bar_t.view(bs, 1, 1)
|
333 |
+
# self.u = self.u.to(device)
|
334 |
+
# q = (
|
335 |
+
# alpha_bar_t * torch.eye(d0, device=device, dtype=self.u.dtype).unsqueeze(0)
|
336 |
+
# + (1 - alpha_bar_t) * self.u
|
337 |
+
# )
|
338 |
+
|
339 |
+
# return PlaceHolder(X=q, E=None, y=None)
|
340 |
+
|
341 |
+
|
342 |
+
# def sum_except_batch(x):
|
343 |
+
# return x.reshape(x.size(0), -1).sum(dim=-1)
|
344 |
+
|
345 |
+
# def assert_correctly_masked(variable, node_mask):
|
346 |
+
# assert (
|
347 |
+
# variable * (1 - node_mask.long())
|
348 |
+
# ).abs().max().item() < 1e-4, "Variables not masked properly."
|
349 |
+
|
350 |
+
# def cosine_beta_schedule_discrete(timesteps, s=0.008):
|
351 |
+
# """Cosine schedule as proposed in https://openreview.net/forum?id=-NEXDKk8gZ."""
|
352 |
+
# steps = timesteps + 2
|
353 |
+
# x = np.linspace(0, steps, steps)
|
354 |
+
|
355 |
+
# alphas_cumprod = np.cos(0.5 * np.pi * ((x / steps) + s) / (1 + s)) ** 2
|
356 |
+
# alphas_cumprod = alphas_cumprod / alphas_cumprod[0]
|
357 |
+
# alphas = alphas_cumprod[1:] / alphas_cumprod[:-1]
|
358 |
+
# betas = 1 - alphas
|
359 |
+
# return betas.squeeze()
|
360 |
+
|
361 |
+
|
362 |
+
# def sample_discrete_features(probX, probE, node_mask, step=None, add_nose=True):
|
363 |
+
# """Sample features from multinomial distribution with given probabilities (probX, probE, proby)
|
364 |
+
# :param probX: bs, n, dx_out node features
|
365 |
+
# :param probE: bs, n, n, de_out edge features
|
366 |
+
# :param proby: bs, dy_out global features.
|
367 |
+
# """
|
368 |
+
# bs, n, _ = probX.shape
|
369 |
+
|
370 |
+
# # Noise X
|
371 |
+
# # The masked rows should define probability distributions as well
|
372 |
+
# probX[~node_mask] = 1 / probX.shape[-1]
|
373 |
+
|
374 |
+
# # Flatten the probability tensor to sample with multinomial
|
375 |
+
# probX = probX.reshape(bs * n, -1) # (bs * n, dx_out)
|
376 |
+
|
377 |
+
# # Sample X
|
378 |
+
# probX = probX.clamp_min(1e-5)
|
379 |
+
# probX = probX / probX.sum(dim=-1, keepdim=True)
|
380 |
+
# X_t = probX.multinomial(1) # (bs * n, 1)
|
381 |
+
# X_t = X_t.reshape(bs, n) # (bs, n)
|
382 |
+
|
383 |
+
# # Noise E
|
384 |
+
# # The masked rows should define probability distributions as well
|
385 |
+
# inverse_edge_mask = ~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2))
|
386 |
+
# diag_mask = torch.eye(n).unsqueeze(0).expand(bs, -1, -1)
|
387 |
+
|
388 |
+
# probE[inverse_edge_mask] = 1 / probE.shape[-1]
|
389 |
+
# probE[diag_mask.bool()] = 1 / probE.shape[-1]
|
390 |
+
# probE = probE.reshape(bs * n * n, -1) # (bs * n * n, de_out)
|
391 |
+
# probE = probE.clamp_min(1e-5)
|
392 |
+
# probE = probE / probE.sum(dim=-1, keepdim=True)
|
393 |
+
|
394 |
+
# # Sample E
|
395 |
+
# E_t = probE.multinomial(1).reshape(bs, n, n) # (bs, n, n)
|
396 |
+
# E_t = torch.triu(E_t, diagonal=1)
|
397 |
+
# E_t = E_t + torch.transpose(E_t, 1, 2)
|
398 |
+
|
399 |
+
# return PlaceHolder(X=X_t, E=E_t, y=torch.zeros(bs, 0).type_as(X_t))
|
400 |
+
|
401 |
+
|
402 |
+
# def mask_distributions(true_X, true_E, pred_X, pred_E, node_mask):
|
403 |
+
# # Add a small value everywhere to avoid nans
|
404 |
+
# pred_X = pred_X.clamp_min(1e-5)
|
405 |
+
# pred_X = pred_X / torch.sum(pred_X, dim=-1, keepdim=True)
|
406 |
+
|
407 |
+
# pred_E = pred_E.clamp_min(1e-5)
|
408 |
+
# pred_E = pred_E / torch.sum(pred_E, dim=-1, keepdim=True)
|
409 |
+
|
410 |
+
# # Set masked rows to arbitrary distributions, so it doesn't contribute to loss
|
411 |
+
# row_X = torch.ones(true_X.size(-1), dtype=true_X.dtype, device=true_X.device)
|
412 |
+
# row_E = torch.zeros(
|
413 |
+
# true_E.size(-1), dtype=true_E.dtype, device=true_E.device
|
414 |
+
# ).clamp_min(1e-5)
|
415 |
+
# row_E[0] = 1.0
|
416 |
+
|
417 |
+
# diag_mask = ~torch.eye(
|
418 |
+
# node_mask.size(1), device=node_mask.device, dtype=torch.bool
|
419 |
+
# ).unsqueeze(0)
|
420 |
+
# true_X[~node_mask] = row_X
|
421 |
+
# true_E[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2) * diag_mask), :] = row_E
|
422 |
+
# pred_X[~node_mask] = row_X.type_as(pred_X)
|
423 |
+
# pred_E[~(node_mask.unsqueeze(1) * node_mask.unsqueeze(2) * diag_mask), :] = (
|
424 |
+
# row_E.type_as(pred_E)
|
425 |
+
# )
|
426 |
+
|
427 |
+
# return true_X, true_E, pred_X, pred_E
|
428 |
+
|
429 |
+
|
430 |
+
# def forward_diffusion(X, X_t, Qt, Qsb, Qtb, X_dim):
|
431 |
+
# bs, n, d = X.shape
|
432 |
+
|
433 |
+
# Qt_X_T = torch.transpose(Qt.X, -2, -1) # (bs, d, d)
|
434 |
+
# left_term = X_t @ Qt_X_T # (bs, N, d)
|
435 |
+
# right_term = X @ Qsb.X # (bs, N, d)
|
436 |
+
|
437 |
+
# numerator = left_term * right_term # (bs, N, d)
|
438 |
+
# denominator = X @ Qtb.X # (bs, N, d) @ (bs, d, d) = (bs, N, d)
|
439 |
+
# denominator = denominator * X_t
|
440 |
+
|
441 |
+
# num_X = numerator[:, :, :X_dim]
|
442 |
+
# num_E = numerator[:, :, X_dim:].reshape(bs, n * n, -1)
|
443 |
+
|
444 |
+
# deno_X = denominator[:, :, :X_dim]
|
445 |
+
# deno_E = denominator[:, :, X_dim:].reshape(bs, n * n, -1)
|
446 |
+
|
447 |
+
# denominator = denominator.unsqueeze(-1) # (bs, N, 1)
|
448 |
+
|
449 |
+
# deno_X = deno_X.sum(dim=-1, keepdim=True)
|
450 |
+
# deno_E = deno_E.sum(dim=-1, keepdim=True)
|
451 |
+
|
452 |
+
# deno_X[deno_X == 0.0] = 1
|
453 |
+
# deno_E[deno_E == 0.0] = 1
|
454 |
+
# prob_X = num_X / deno_X
|
455 |
+
# prob_E = num_E / deno_E
|
456 |
+
|
457 |
+
# prob_E = prob_E / prob_E.sum(dim=-1, keepdim=True)
|
458 |
+
# prob_X = prob_X / prob_X.sum(dim=-1, keepdim=True)
|
459 |
+
# return PlaceHolder(X=prob_X, E=prob_E, y=None)
|
460 |
+
|
461 |
+
|
462 |
+
# def reverse_diffusion(predX_0, X_t, Qt, Qsb, Qtb):
|
463 |
+
# """M: X or E
|
464 |
+
# Compute xt @ Qt.T * x0 @ Qsb / x0 @ Qtb @ xt.T for each possible value of x0
|
465 |
+
# X_t: bs, n, dt or bs, n, n, dt
|
466 |
+
# Qt: bs, d_t-1, dt
|
467 |
+
# Qsb: bs, d0, d_t-1
|
468 |
+
# Qtb: bs, d0, dt.
|
469 |
+
# """
|
470 |
+
# Qt_T = Qt.transpose(-1, -2) # bs, N, dt
|
471 |
+
# assert Qt.dim() == 3
|
472 |
+
# left_term = X_t @ Qt_T # bs, N, d_t-1
|
473 |
+
# right_term = predX_0 @ Qsb
|
474 |
+
# numerator = left_term * right_term # bs, N, d_t-1
|
475 |
+
|
476 |
+
# denominator = Qtb @ X_t.transpose(-1, -2) # bs, d0, N
|
477 |
+
# denominator = denominator.transpose(-1, -2) # bs, N, d0
|
478 |
+
# return numerator / denominator.clamp_min(1e-5)
|
479 |
+
|
480 |
+
# def reverse_tensor(x):
|
481 |
+
# return x[torch.arange(x.size(0) - 1, -1, -1)]
|
|
|
|
|
482 |
|
483 |
+
# def sample_discrete_feature_noise(limit_dist, node_mask):
|
484 |
+
# """Sample from the limit distribution of the diffusion process"""
|
485 |
+
# bs, n_max = node_mask.shape
|
486 |
+
# x_limit = limit_dist.X[None, None, :].expand(bs, n_max, -1)
|
487 |
+
# x_limit = x_limit.to(node_mask.device)
|
488 |
|
489 |
+
# U_X = x_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max)
|
490 |
+
# U_X = F.one_hot(U_X.long(), num_classes=x_limit.shape[-1]).type_as(x_limit)
|
491 |
|
492 |
+
# e_limit = limit_dist.E[None, None, None, :].expand(bs, n_max, n_max, -1)
|
493 |
+
# U_E = e_limit.flatten(end_dim=-2).multinomial(1).reshape(bs, n_max, n_max)
|
494 |
+
# U_E = F.one_hot(U_E.long(), num_classes=e_limit.shape[-1]).type_as(x_limit)
|
495 |
|
496 |
+
# U_X = U_X.to(node_mask.device)
|
497 |
+
# U_E = U_E.to(node_mask.device)
|
498 |
|
499 |
+
# # Get upper triangular part of edge noise, without main diagonal
|
500 |
+
# upper_triangular_mask = torch.zeros_like(U_E)
|
501 |
+
# indices = torch.triu_indices(row=U_E.size(1), col=U_E.size(2), offset=1)
|
502 |
+
# upper_triangular_mask[:, indices[0], indices[1], :] = 1
|
503 |
|
504 |
+
# U_E = U_E * upper_triangular_mask
|
505 |
+
# U_E = U_E + torch.transpose(U_E, 1, 2)
|
506 |
|
507 |
+
# assert (U_E == torch.transpose(U_E, 1, 2)).all()
|
508 |
+
# return PlaceHolder(X=U_X, E=U_E, y=None).mask(node_mask)
|
509 |
|
510 |
|
511 |
+
# def index_QE(X, q_e, n_bond=5):
|
512 |
+
# bs, n, n_atom = X.shape
|
513 |
+
# node_indices = X.argmax(-1) # (bs, n)
|
514 |
|
515 |
+
# exp_ind1 = node_indices[:, :, None, None, None].expand(
|
516 |
+
# bs, n, n_atom, n_bond, n_bond
|
517 |
+
# )
|
518 |
+
# exp_ind2 = node_indices[:, :, None, None, None].expand(bs, n, n, n_bond, n_bond)
|
519 |
|
520 |
+
# q_e = torch.gather(q_e, 1, exp_ind1)
|
521 |
+
# q_e = torch.gather(q_e, 2, exp_ind2) # (bs, n, n, n_bond, n_bond)
|
522 |
|
523 |
+
# node_mask = X.sum(-1) != 0
|
524 |
+
# no_edge = (~node_mask)[:, :, None] & (~node_mask)[:, None, :]
|
525 |
+
# q_e[no_edge] = torch.tensor([1, 0, 0, 0, 0]).type_as(q_e)
|
526 |
|
527 |
+
# return q_e
|