Update README.md
Browse files
README.md
CHANGED
@@ -20,6 +20,8 @@ pipeline_tag: text-generation
|
|
20 |
|
21 |
this model use Task classification and the converstation is between USER and Answer or AI
|
22 |
|
|
|
|
|
23 |
## EG
|
24 |
|
25 |
```text
|
@@ -60,6 +62,29 @@ with torch.no_grad():
|
|
60 |
print(verify_text(output[0]['generated_text']))
|
61 |
|
62 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
### Result
|
65 |
|
|
|
20 |
|
21 |
this model use Task classification and the converstation is between USER and Answer or AI
|
22 |
|
23 |
+
# Using Model in Huggingface Transformers
|
24 |
+
|
25 |
## EG
|
26 |
|
27 |
```text
|
|
|
62 |
print(verify_text(output[0]['generated_text']))
|
63 |
|
64 |
```
|
65 |
+
### Generate Method to get res Text by Text
|
66 |
+
|
67 |
+
```python
|
68 |
+
|
69 |
+
def generate(model_,input_ids_,tokeinzer_,max_length:int=256,temperature :float= 1,eos_token_id:int=2):
|
70 |
+
with torch.no_grad():
|
71 |
+
before_start = len(input_ids_[0])+1
|
72 |
+
for _ in range(max_length):
|
73 |
+
out = model_(
|
74 |
+
input_ids=input_ids_,
|
75 |
+
return_dict=True,
|
76 |
+
)
|
77 |
+
opa = torch.nn.functional.softmax(out.logits[:,-1,:]/temperature)
|
78 |
+
Camila = torch.multinomial(opa,1)
|
79 |
+
input_ids_ = torch.cat([input_ids_,Camila],-1)
|
80 |
+
clear_output(wait=True)
|
81 |
+
print(f"\r{tokeinzer_.decode(input_ids_[0],skip_special_tokens=True)[before_start:]}",end='')
|
82 |
+
if Camila[0].item() == eos_token_id:
|
83 |
+
break
|
84 |
+
yield tokeinzer_.decode(Camila[0],skip_special_tokens=True)
|
85 |
+
return f"{tokeinzer_.decode(input_ids_[0],skip_special_tokens=True)[before_start:]}"
|
86 |
+
```
|
87 |
+
|
88 |
|
89 |
### Result
|
90 |
|