|
import streamlit as st |
|
from PIL import Image |
|
import os |
|
import conch |
|
from conch.open_clip_custom import create_model_from_pretrained, get_tokenizer, tokenize |
|
|
|
import torch |
|
from huggingface_hub import login |
|
hf=os.getenv('hf') |
|
login(hf) |
|
|
|
|
|
|
|
@st.cache_resource |
|
def load_model(): |
|
model, preprocess = create_model_from_pretrained('conch_ViT-B-16', "hf_hub:MahmoodLab/conch",hf_auth_token=hf) |
|
return model, preprocess |
|
|
|
model, preprocess = load_model() |
|
|
|
st.title("CONCH - Image Captioning and Retrieval") |
|
|
|
|
|
uploaded_file = st.file_uploader("Upload an image", type=["jpg", "png", "jpeg"]) |
|
|
|
if uploaded_file: |
|
|
|
image = Image.open(uploaded_file) |
|
st.image(image, caption='Uploaded Image', use_column_width=True) |
|
|
|
|
|
image = preprocess(image).unsqueeze(0) |
|
|
|
with torch.no_grad(): |
|
image_embs = model.encode_image(image, proj_contrast=True, normalize=True) |
|
|
|
st.write("Image embeddings generated successfully.") |
|
|
|
|
|
|
|
num_inputs = st.number_input("How many text inputs?", min_value=1, max_value=10, value=3) |
|
|
|
|
|
input_list = [] |
|
|
|
|
|
for i in range(num_inputs): |
|
user_input = st.text_input(f"Input Text {i+1}") |
|
input_list.append(user_input) |
|
|
|
populated_status = ["Populated" if text.strip() else "Empty" for text in input_list] |
|
if "Populated" in populated_status: |
|
|
|
tokenizer = get_tokenizer() |
|
text_tokens = tokenize(texts=input_list, tokenizer=tokenizer) |
|
text_embs = model.encode_text(text_tokens) |
|
|
|
|
|
|
|
|
|
|
|
st.write("Text embeddings generated successfully.") |
|
|
|
|
|
similarity = torch.cosine_similarity(image_embs, text_embs) |
|
st.write("Similarity check completed.") |
|
st.write(similarity) |