|
import requests |
|
from PIL import Image |
|
from transformers import BlipProcessor, BlipForConditionalGeneration |
|
|
|
|
|
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large") |
|
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-large").to("cuda") |
|
|
|
|
|
def caption_image(image_url): |
|
try: |
|
|
|
raw_image = Image.open(requests.get(image_url, stream=True).raw).convert('RGB') |
|
|
|
|
|
text = "a photography of" |
|
inputs = processor(raw_image, text, return_tensors="pt").to("cuda") |
|
out = model.generate(**inputs) |
|
conditional_caption = processor.decode(out[0], skip_special_tokens=True) |
|
|
|
|
|
inputs = processor(raw_image, return_tensors="pt").to("cuda") |
|
out = model.generate(**inputs) |
|
unconditional_caption = processor.decode(out[0], skip_special_tokens=True) |
|
|
|
|
|
print("Conditional Caption:", conditional_caption) |
|
print("Unconditional Caption:", unconditional_caption) |
|
|
|
except Exception as e: |
|
print(f"Error occurred: {e}") |
|
|
|
|
|
image_url = input("Enter the image URL: ") |
|
caption_image(image_url) |
|
|