Update README.md
Browse files
README.md
CHANGED
@@ -55,7 +55,26 @@ for t in text:
|
|
55 |
|
56 |
#### NLI use-case
|
57 |
```python
|
|
|
|
|
58 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
```
|
60 |
|
61 |
## Training and evaluation data
|
|
|
55 |
|
56 |
#### NLI use-case
|
57 |
```python
|
58 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
59 |
+
import torch
|
60 |
|
61 |
+
device = torch.device("mps" if torch.backends.mps.is_available() else "cpu")
|
62 |
+
model_name = "thkkvui/mDeBERTa-v3-base-finetuned-nli-jnli"
|
63 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
64 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name)
|
65 |
+
|
66 |
+
premise = "NY Yankees is the professional baseball team in America."
|
67 |
+
hypothesis = "メジャーリーグのチームは、日本ではニューヨークヤンキースが有名だ。"
|
68 |
+
|
69 |
+
inputs = tokenizer(premise, hypothesis, truncation=True, return_tensors="pt")
|
70 |
+
|
71 |
+
with torch.no_grad():
|
72 |
+
output = model(**inputs)
|
73 |
+
|
74 |
+
preds = torch.softmax(output["logits"][0], -1).tolist()
|
75 |
+
label_names = ["entailment", "neutral", "contradiction"]
|
76 |
+
result = {name: round(float(pred) * 100, 1) for pred, name in zip(preds, label_names)}
|
77 |
+
print(result)
|
78 |
```
|
79 |
|
80 |
## Training and evaluation data
|