Update README.md
Browse files
README.md
CHANGED
@@ -43,13 +43,23 @@ pip install -U sentence-transformers
|
|
43 |
Then you can use the model like this:
|
44 |
|
45 |
```python
|
46 |
-
from sentence_transformers import SentenceTransformer
|
47 |
-
sentences = ["This is
|
48 |
|
49 |
model = SentenceTransformer('NbAiLab/nb-sbert')
|
50 |
embeddings = model.encode(sentences)
|
51 |
print(embeddings)
|
52 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
53 |
|
54 |
```
|
55 |
|
@@ -71,7 +81,7 @@ def mean_pooling(model_output, attention_mask):
|
|
71 |
|
72 |
|
73 |
# Sentences we want sentence embeddings for
|
74 |
-
sentences = [
|
75 |
|
76 |
# Load model from HuggingFace Hub
|
77 |
tokenizer = AutoTokenizer.from_pretrained('NbAiLab/nb-sbert')
|
@@ -85,10 +95,17 @@ with torch.no_grad():
|
|
85 |
model_output = model(**encoded_input)
|
86 |
|
87 |
# Perform pooling. In this case, mean pooling.
|
88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
89 |
|
90 |
-
print("Sentence embeddings:")
|
91 |
-
print(sentence_embeddings)
|
92 |
```
|
93 |
|
94 |
|
|
|
43 |
Then you can use the model like this:
|
44 |
|
45 |
```python
|
46 |
+
from sentence_transformers import SentenceTransformer, util
|
47 |
+
sentences = ["This is a Norwegian boy", "Dette er en norsk gutt"]
|
48 |
|
49 |
model = SentenceTransformer('NbAiLab/nb-sbert')
|
50 |
embeddings = model.encode(sentences)
|
51 |
print(embeddings)
|
52 |
|
53 |
+
# Compute cosine-similarities with sentence transformers
|
54 |
+
cosine_scores = util.cos_sim(embeddings[0],embeddings[1])
|
55 |
+
print(cosine_scores)
|
56 |
+
|
57 |
+
# Compute cosine-similarities with SciPy
|
58 |
+
from scipy import spatial
|
59 |
+
scipy_cosine_scores = 1 - spatial.distance.cosine(embeddings[0],embeddings[1])
|
60 |
+
print(scipy_cosine_scores)
|
61 |
+
|
62 |
+
# Both should give 0.8250 in the example above.
|
63 |
|
64 |
```
|
65 |
|
|
|
81 |
|
82 |
|
83 |
# Sentences we want sentence embeddings for
|
84 |
+
sentences = ["This is a Norwegian boy", "Dette er en norsk gutt"]
|
85 |
|
86 |
# Load model from HuggingFace Hub
|
87 |
tokenizer = AutoTokenizer.from_pretrained('NbAiLab/nb-sbert')
|
|
|
95 |
model_output = model(**encoded_input)
|
96 |
|
97 |
# Perform pooling. In this case, mean pooling.
|
98 |
+
embeddings = mean_pooling(model_output, encoded_input['attention_mask'])
|
99 |
+
|
100 |
+
print(embeddings)
|
101 |
+
|
102 |
+
# Compute cosine-similarities with SciPy
|
103 |
+
from scipy import spatial
|
104 |
+
scipy_cosine_scores = 1 - spatial.distance.cosine(embeddings[0],embeddings[1])
|
105 |
+
print(scipy_cosine_scores)
|
106 |
+
|
107 |
+
# This should give 0.8250 in the example above.
|
108 |
|
|
|
|
|
109 |
```
|
110 |
|
111 |
|