Update README.md
Browse files
README.md
CHANGED
@@ -43,8 +43,8 @@ Here's a simple Python code snippet to use this model:
|
|
43 |
|
44 |
```
|
45 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
46 |
-
tokenizer = AutoTokenizer.from_pretrained("rapidfhir-procedures")
|
47 |
-
model = AutoModelForSeq2SeqLM.from_pretrained("rapidfhir-procedures")
|
48 |
prompt = "SQM9PZ2545XHC4TE9RS27V183DD9KPW6JOI53UU5NYY8XRGIW6NZ0227WOAAW6NDNO79SR2K75T6J104XSAKMITKD8B8GPHGLQY424SHKI8OKQXXQN8BG435OKAMLFEN"
|
49 |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
50 |
# Generate text with a maximum length of 4096 tokens
|
|
|
43 |
|
44 |
```
|
45 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
46 |
+
tokenizer = AutoTokenizer.from_pretrained("fhirfly/rapidfhir-procedures")
|
47 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("fhirfly/rapidfhir-procedures")
|
48 |
prompt = "SQM9PZ2545XHC4TE9RS27V183DD9KPW6JOI53UU5NYY8XRGIW6NZ0227WOAAW6NDNO79SR2K75T6J104XSAKMITKD8B8GPHGLQY424SHKI8OKQXXQN8BG435OKAMLFEN"
|
49 |
input_ids = tokenizer.encode(prompt, return_tensors="pt").to(device)
|
50 |
# Generate text with a maximum length of 4096 tokens
|