gabrielmotablima
commited on
Commit
•
e41e34f
1
Parent(s):
c2b972b
Update README.md
Browse files
README.md
CHANGED
@@ -40,12 +40,12 @@ Use the code below to get started with the model.
|
|
40 |
import requests
|
41 |
from PIL import Image
|
42 |
|
43 |
-
from transformers import AutoTokenizer,
|
44 |
|
45 |
# load a fine-tuned image captioning model and corresponding tokenizer and image processor
|
46 |
model = VisionEncoderDecoderModel.from_pretrained("laicsiifes/swin-gpt2-flickr30k-pt-br")
|
47 |
tokenizer = AutoTokenizer.from_pretrained("laicsiifes/swin-gpt2-flickr30k-pt-br")
|
48 |
-
image_processor =
|
49 |
|
50 |
# perform inference on an image
|
51 |
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
|
40 |
import requests
|
41 |
from PIL import Image
|
42 |
|
43 |
+
from transformers import AutoTokenizer, AutoImageProcessor, VisionEncoderDecoderModel
|
44 |
|
45 |
# load a fine-tuned image captioning model and corresponding tokenizer and image processor
|
46 |
model = VisionEncoderDecoderModel.from_pretrained("laicsiifes/swin-gpt2-flickr30k-pt-br")
|
47 |
tokenizer = AutoTokenizer.from_pretrained("laicsiifes/swin-gpt2-flickr30k-pt-br")
|
48 |
+
image_processor = AutoImageProcessor.from_pretrained("laicsiifes/swin-gpt2-flickr30k-pt-br")
|
49 |
|
50 |
# perform inference on an image
|
51 |
url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|