arubenruben
commited on
Commit
•
59a0d5a
1
Parent(s):
438e391
Update deploy_pipeline.py
Browse files- deploy_pipeline.py +2 -3
deploy_pipeline.py
CHANGED
@@ -13,9 +13,7 @@ class TokenizeAndAlignLabelsStep():
|
|
13 |
|
14 |
# Adapted From : https://huggingface.co/docs/transformers/tasks/token_classification
|
15 |
def tokenize_and_align_labels(self, examples, tokenizer):
|
16 |
-
|
17 |
-
print(examples)
|
18 |
-
|
19 |
tokenized_inputs = tokenizer(examples, padding='max_length', truncation=True, max_length=128, is_split_into_words=True)
|
20 |
|
21 |
# Map tokens to their respective word.
|
@@ -41,6 +39,7 @@ class TokenizeAndAlignLabelsStep():
|
|
41 |
return tokenized_inputs
|
42 |
|
43 |
|
|
|
44 |
class BERT_CRF_Pipeline(Pipeline):
|
45 |
|
46 |
def _sanitize_parameters(self, **kwargs):
|
|
|
13 |
|
14 |
# Adapted From : https://huggingface.co/docs/transformers/tasks/token_classification
|
15 |
def tokenize_and_align_labels(self, examples, tokenizer):
|
16 |
+
|
|
|
|
|
17 |
tokenized_inputs = tokenizer(examples, padding='max_length', truncation=True, max_length=128, is_split_into_words=True)
|
18 |
|
19 |
# Map tokens to their respective word.
|
|
|
39 |
return tokenized_inputs
|
40 |
|
41 |
|
42 |
+
|
43 |
class BERT_CRF_Pipeline(Pipeline):
|
44 |
|
45 |
def _sanitize_parameters(self, **kwargs):
|