antoinelouis commited on
Commit
37ae2c0
1 Parent(s): 0b332c6

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -4
README.md CHANGED
@@ -56,6 +56,7 @@ Start by installing the [library](https://huggingface.co/docs/transformers): `pi
56
 
57
  ```python
58
  import torch
 
59
  from transformers import AutoTokenizer, AutoModel
60
 
61
  queries = ["Ceci est un exemple de requête.", "Voici un second exemple."]
@@ -71,11 +72,11 @@ with torch.no_grad():
71
  q_output = model(**q_input)
72
  p_output = model(**p_input)
73
 
74
- q_activations = torch.amax(torch.log1p(input=self.relu(q_output.logits * q_input['attention_mask'].unsqueeze(-1))), dim=1)
75
- p_activations = torch.amax(torch.log1p(input=self.relu(p_output.logits * p_input['attention_mask'].unsqueeze(-1))), dim=1)
76
 
77
- q_activations = torch.nn.functional.normalize(q_activations, p=2, dim=1)
78
- p_activations = torch.nn.functional.normalize(p_activations, p=2, dim=1)
79
 
80
  similarity = q_embeddings @ p_embeddings.T
81
  print(similarity)
 
56
 
57
  ```python
58
  import torch
59
+ from torch.nn.functional import relu, normalize
60
  from transformers import AutoTokenizer, AutoModel
61
 
62
  queries = ["Ceci est un exemple de requête.", "Voici un second exemple."]
 
72
  q_output = model(**q_input)
73
  p_output = model(**p_input)
74
 
75
+ q_activations = torch.amax(torch.log1p(relu(q_output.logits * q_input['attention_mask'].unsqueeze(-1))), dim=1)
76
+ p_activations = torch.amax(torch.log1p(relu(p_output.logits * p_input['attention_mask'].unsqueeze(-1))), dim=1)
77
 
78
+ q_activations = normalize(q_activations, p=2, dim=1)
79
+ p_activations = normalize(p_activations, p=2, dim=1)
80
 
81
  similarity = q_embeddings @ p_embeddings.T
82
  print(similarity)