moranyanuka
commited on
Commit
•
be79c08
1
Parent(s):
cdf1289
Update README.md
Browse files
README.md
CHANGED
@@ -23,7 +23,7 @@ import requests
|
|
23 |
from PIL import Image
|
24 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
25 |
|
26 |
-
processor = BlipProcessor.from_pretrained(""
|
27 |
model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha")
|
28 |
|
29 |
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
@@ -56,7 +56,7 @@ import requests
|
|
56 |
from PIL import Image
|
57 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
58 |
|
59 |
-
processor = BlipProcessor.from_pretrained("
|
60 |
model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha").to("cuda")
|
61 |
|
62 |
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
@@ -88,7 +88,7 @@ import requests
|
|
88 |
from PIL import Image
|
89 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
90 |
|
91 |
-
processor = BlipProcessor.from_pretrained("
|
92 |
model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha", torch_dtype=torch.float16).to("cuda")
|
93 |
|
94 |
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
|
|
23 |
from PIL import Image
|
24 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
25 |
|
26 |
+
processor = BlipProcessor.from_pretrained(""moranyanuka/blip-image-captioning-base-mocha"")
|
27 |
model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha")
|
28 |
|
29 |
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
|
|
56 |
from PIL import Image
|
57 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
58 |
|
59 |
+
processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-base-mocha")
|
60 |
model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha").to("cuda")
|
61 |
|
62 |
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|
|
|
88 |
from PIL import Image
|
89 |
from transformers import BlipProcessor, BlipForConditionalGeneration
|
90 |
|
91 |
+
processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-base-mocha")
|
92 |
model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-base-mocha", torch_dtype=torch.float16).to("cuda")
|
93 |
|
94 |
img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
|