update-readme
#3
by
echarlaix
HF staff
- opened
README.md
CHANGED
@@ -56,13 +56,10 @@ For example, you can optimize it with Intel OpenVINO and enjoy a 2x inference sp
|
|
56 |
|
57 |
```
|
58 |
from optimum.intel.openvino import OVModelForSequenceClassification
|
59 |
-
from transformers import
|
60 |
-
pipeline)
|
61 |
|
62 |
model_id = "juliensimon/xlm-v-base-language-id"
|
63 |
-
ov_model = OVModelForSequenceClassification.from_pretrained(
|
64 |
-
model_id, from_transformers=True
|
65 |
-
)
|
66 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
67 |
p = pipeline("text-classification", model=ov_model, tokenizer=tokenizer)
|
68 |
p("Hello world")
|
@@ -114,4 +111,4 @@ The following hyperparameters were used during training:
|
|
114 |
- Transformers 4.26.0
|
115 |
- Pytorch 1.13.1
|
116 |
- Datasets 2.8.0
|
117 |
-
- Tokenizers 0.13.2
|
|
|
56 |
|
57 |
```
|
58 |
from optimum.intel.openvino import OVModelForSequenceClassification
|
59 |
+
from transformers import AutoTokenizer, pipeline
|
|
|
60 |
|
61 |
model_id = "juliensimon/xlm-v-base-language-id"
|
62 |
+
ov_model = OVModelForSequenceClassification.from_pretrained(model_id)
|
|
|
|
|
63 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
64 |
p = pipeline("text-classification", model=ov_model, tokenizer=tokenizer)
|
65 |
p("Hello world")
|
|
|
111 |
- Transformers 4.26.0
|
112 |
- Pytorch 1.13.1
|
113 |
- Datasets 2.8.0
|
114 |
+
- Tokenizers 0.13.2
|