commit files to HF hub
Browse files
README.md
CHANGED
@@ -11,15 +11,15 @@ This is the [csarron/bert-base-uncased-squad-v1](https://huggingface.co/csarron/
|
|
11 |
|
12 |
An example of how to do inference on this model:
|
13 |
```python
|
14 |
-
from optimum.intel.openvino import
|
15 |
from transformers import AutoTokenizer, pipeline
|
16 |
|
17 |
# model_id should be set to either a local directory or a model available on the HuggingFace hub.
|
18 |
-
model_id = "
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
-
model =
|
21 |
-
pipe = pipeline("
|
22 |
-
result = pipe(
|
23 |
print(result)
|
24 |
```
|
25 |
|
|
|
11 |
|
12 |
An example of how to do inference on this model:
|
13 |
```python
|
14 |
+
from optimum.intel.openvino import OVModelForQuestionAnswering
|
15 |
from transformers import AutoTokenizer, pipeline
|
16 |
|
17 |
# model_id should be set to either a local directory or a model available on the HuggingFace hub.
|
18 |
+
model_id = "helenai/csarron-bert-base-uncased-squad-v1-ov-fp32"
|
19 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
20 |
+
model = OVModelForQuestionAnswering.from_pretrained(model_id)
|
21 |
+
pipe = pipeline("question-answering", model=model, tokenizer=tokenizer)
|
22 |
+
result = pipe("What is OpenVINO?", "OpenVINO is a framework that accelerates deep learning inferencing")
|
23 |
print(result)
|
24 |
```
|
25 |
|