Update README.md
Browse files
README.md
CHANGED
@@ -44,8 +44,51 @@ The following hyperparameters were used during training:
|
|
44 |
- lr_scheduler_type: cosine
|
45 |
- training_steps: 1000
|
46 |
|
47 |
-
###
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
|
51 |
### Framework versions
|
|
|
44 |
- lr_scheduler_type: cosine
|
45 |
- training_steps: 1000
|
46 |
|
47 |
+
### Inference Code
|
48 |
+
|
49 |
+
```python
|
50 |
+
from peft import PeftModel, PeftConfig
|
51 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
52 |
+
import torch
|
53 |
+
from transformers import StoppingCriteria
|
54 |
+
|
55 |
+
config = PeftConfig.from_pretrained("Mit1208/phi-2-universal-NER")
|
56 |
+
base_model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2",device_map="auto", trust_remote_code=True)
|
57 |
+
model = PeftModel.from_pretrained(base_model, "Mit1208/phi-2-universal-NER", trust_remote_code=True)
|
58 |
+
tokenizer = AutoTokenizer.from_pretrained("Mit1208/phi-2-universal-NER", trust_remote_code=True)
|
59 |
+
|
60 |
+
conversations = [ { "from": "human", "value": "Text: Mit Patel here from India"}, {"from": "gpt", "value": "I've read this text."},
|
61 |
+
{"from":"human", "value":"what is a name of the person in the text?"}]
|
62 |
+
inference_text = tokenizer.apply_chat_template(conversations, tokenize=False) + '<|im_start|>gpt:\n'
|
63 |
+
inputs = tokenizer(inference_text, return_tensors="pt", return_attention_mask=False)
|
64 |
+
|
65 |
+
class EosListStoppingCriteria(StoppingCriteria):
|
66 |
+
def __init__(self, eos_sequence = tokenizer.encode("<|im_end|>")):
|
67 |
+
self.eos_sequence = eos_sequence
|
68 |
+
|
69 |
+
def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool:
|
70 |
+
last_ids = input_ids[:,-len(self.eos_sequence):].tolist()
|
71 |
+
return self.eos_sequence in last_ids
|
72 |
+
|
73 |
+
outputs = model.generate(**inputs, max_length=512, pad_token_id= tokenizer.eos_token_id,
|
74 |
+
stopping_criteria = [EosListStoppingCriteria()])
|
75 |
+
|
76 |
+
text = tokenizer.batch_decode(outputs)[0]
|
77 |
+
|
78 |
+
print(text)
|
79 |
+
|
80 |
+
# Output
|
81 |
+
'''
|
82 |
+
<|im_start|>human
|
83 |
+
Text: Mit Patel here from India<|im_end|>
|
84 |
+
<|im_start|>gpt
|
85 |
+
I've read this text.<|im_end|>
|
86 |
+
<|im_start|>human
|
87 |
+
what is a name of the person in the text?<|im_end|>
|
88 |
+
<|im_start|>gpt:
|
89 |
+
["Mit Patel"]<|im_end|>
|
90 |
+
'''
|
91 |
+
```
|
92 |
|
93 |
|
94 |
### Framework versions
|