Replace boilerplate example code with actual huggingface repo model paths
Browse files
README.md
CHANGED
@@ -25,7 +25,7 @@ Then you can use the model like this:
|
|
25 |
|
26 |
```python
|
27 |
from sentence_transformers import SentenceTransformer
|
28 |
-
sentences = ["
|
29 |
|
30 |
model = SentenceTransformer('KBLab/sentence-bert-swedish-cased')
|
31 |
embeddings = model.encode(sentences)
|
@@ -50,11 +50,11 @@ def mean_pooling(model_output, attention_mask):
|
|
50 |
|
51 |
|
52 |
# Sentences we want sentence embeddings for
|
53 |
-
sentences = ['
|
54 |
|
55 |
# Load model from HuggingFace Hub
|
56 |
-
tokenizer = AutoTokenizer.from_pretrained('
|
57 |
-
model = AutoModel.from_pretrained('
|
58 |
|
59 |
# Tokenize sentences
|
60 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|
|
|
25 |
|
26 |
```python
|
27 |
from sentence_transformers import SentenceTransformer
|
28 |
+
sentences = ["Det här är en exempelmening", "Varje exempel blir konverterad"]
|
29 |
|
30 |
model = SentenceTransformer('KBLab/sentence-bert-swedish-cased')
|
31 |
embeddings = model.encode(sentences)
|
|
|
50 |
|
51 |
|
52 |
# Sentences we want sentence embeddings for
|
53 |
+
sentences = ['Det här är en exempelmening', 'Varje exempel blir konverterad']
|
54 |
|
55 |
# Load model from HuggingFace Hub
|
56 |
+
tokenizer = AutoTokenizer.from_pretrained('KBLab/sentence-bert-swedish-cased')
|
57 |
+
model = AutoModel.from_pretrained('KBLab/sentence-bert-swedish-cased')
|
58 |
|
59 |
# Tokenize sentences
|
60 |
encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')
|