kavyashankar20 commited on
Commit
973ce57
1 Parent(s): a6458ba

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +39 -0
  2. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ from PIL import Image
3
+ from transformers import BlipProcessor, BlipForConditionalGeneration
4
+ import requests
5
+ from io import BytesIO
6
+
7
+ # Load model and processor
8
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
9
+ model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
10
+
11
+ def generate_caption(image):
12
+ inputs = processor(image, return_tensors="pt")
13
+ out = model.generate(**inputs)
14
+ return processor.decode(out[0], skip_special_tokens=True)
15
+
16
+ st.title("Image Captioning")
17
+
18
+ # URL input
19
+ url = st.text_input("Enter image URL (optional):")
20
+ if url:
21
+ try:
22
+ response = requests.get(url)
23
+ image = Image.open(BytesIO(response.content)).convert("RGB")
24
+ st.image(image, caption="Image from URL", use_column_width=True)
25
+ caption = generate_caption(image)
26
+ st.write(f"Caption: {caption}")
27
+ except Exception as e:
28
+ st.error(f"Error fetching image from URL: {e}")
29
+
30
+ # File upload
31
+ uploaded_file = st.file_uploader("Upload an image file (optional):", type=["jpg", "jpeg", "png"])
32
+ if uploaded_file:
33
+ image = Image.open(uploaded_file).convert("RGB")
34
+ st.image(image, caption="Uploaded Image", use_column_width=True)
35
+ caption = generate_caption(image)
36
+ st.write(f"Caption: {caption}")
37
+
38
+ if not url and not uploaded_file:
39
+ st.write("Please enter an image URL or upload an image file to get a caption.")
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ transformers
2
+ torch
3
+ pillow
4
+ streamlit