Update README.md
Browse files
README.md
CHANGED
@@ -65,8 +65,13 @@ The easiest way is to load the inference api from huggingface and second method
|
|
65 |
```python
|
66 |
# Use a pipeline as a high-level helper
|
67 |
from transformers import pipeline
|
68 |
-
|
69 |
pipe = pipeline("token-classification", model="blaze999/deberta-med-ner-2", aggregation_strategy='simple')
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
```
|
71 |
|
72 |
|
|
|
65 |
```python
|
66 |
# Use a pipeline as a high-level helper
|
67 |
from transformers import pipeline
|
|
|
68 |
pipe = pipeline("token-classification", model="blaze999/deberta-med-ner-2", aggregation_strategy='simple')
|
69 |
+
|
70 |
+
# Load model directly
|
71 |
+
from transformers import AutoTokenizer, AutoModelForTokenClassification
|
72 |
+
|
73 |
+
tokenizer = AutoTokenizer.from_pretrained("blaze999/deberta-med-ner-2")
|
74 |
+
model = AutoModelForTokenClassification.from_pretrained("blaze999/deberta-med-ner-2")
|
75 |
```
|
76 |
|
77 |
|