Ellie Sleightholm
updating item list
2a4d097
raw
history blame
6.98 kB
import gradio as gr
import open_clip
import torch
import requests
import numpy as np
from PIL import Image
# Sidebar content
sidebar_markdown = """
We have several resources related to our new fashion models.
## Documentation
πŸ“š [Blog Post](https://www.marqo.ai/blog/search-model-for-fashion)
πŸ“ [Use Case Blog Post](https://www.marqo.ai/blog/ecommerce-image-classification-with-marqo-fashionclip)
## Code
πŸ’» [GitHub Repo](https://github.com/marqo-ai/marqo-FashionCLIP)
🀝 [Google Colab](https://colab.research.google.com/drive/1nq978xFJjJcnyrJ2aE5l82GHAXOvTmfd?usp=sharing)
πŸ€— [Hugging Face Collection](https://huggingface.co/collections/Marqo/marqo-fashionclip-and-marqo-fashionsiglip-66b43f2d09a06ad2368d4af6)
## Citation
If you use Marqo-FashionSigLIP or Marqo-FashionCLIP, please cite us:
```
@software{Jung_Marqo-FashionCLIP_and_Marqo-FashionSigLIP_2024,
author = {Jung, Myong Chol and Clark, Jesse},
month = aug,
title = {{Marqo-FashionCLIP and Marqo-FashionSigLIP}},
url = {https://github.com/marqo-ai/marqo-FashionCLIP},
version = {1.0.0},
year = {2024}
```
"""
# List of fashion items
items = [
"Leggings", "Jogger", "Palazzo", "Cargo", "Dresspants", "Chinos",
"Dress", "Blouse", "T-Shirt", "Jeans", "Skirt", "Shorts",
"Sweater", "Cardigan", "Tank Top", "Hoodie", "Coat",
"Jacket", "Polo Shirt", "Crop Top", "Romper",
"Overalls", "Blazer", "Sweatpants", "Vest",
"Dungarees", "Poncho", "Bodysuit", "Maxi Dress",
"Hat", "Sunglasses", "Glasses", "Shoes", "Sandals",
"Heels", "Trainers", "Belt", "Tie", "Dress Shirt",
"Boots", "Slippers", "Sneakers", "Insoles", "Socks",
"Insulated Jacket", "Fleece", "Rain Jacket", "Running Jacket",
"Windbreaker", "Shirt", "Graphic Top", "Sweatshirt",
"Pant", "Legging", "Short", "Skort", "Brief",
"Sports Bra", "Base Layer Top", "Base Layer Bottom",
"Swimsuit", "Rashguard", "Water Shorts", "Cover Up",
"Goggle", "Sun Hat", "Glove", "Mitten", "Leg Gaiter",
"Bandana", "Neck Gaiter", "Balaclava", "Sunglass",
"Watch", "Bag", "Boxer", "Swim Trunk", "Ring",
"Necklace", "Earing", "Pocket Watch", "Smock",
"Trouser", "Tuxedo", "Cufflinks", "Suspenders",
"Handkerchief", "Scarf", "Wristband", "Beanie",
"Fedora", "Beret", "Clutch Bag", "Crossbody Bag",
"Duffel Bag", "Backpack", "Umbrella", "Flip Flops",
"Espadrilles", "Loafers", "Moccasins", "Chelsea Boots",
"Ankle Boots", "Bow Tie", "Tie Clip", "Hair Clip",
"Headband", "Visor", "Baseball Cap", "Bucket Hat",
"Fingerless Gloves", "Touchscreen Gloves", "Trench Coat",
"Peacoat", "Parka", "Bomber Jacket", "Utility Vest",
"Puffer Jacket", "Cape", "Shrug", "Kimono", "Sarong",
"Apron", "Bikini", "Poncho", "Wristwatch", "Choker",
"Brooch", "Anklet", "Toe Ring", "Waist Belt"
]
# Initialize the model and tokenizer
model_name = 'hf-hub:Marqo/marqo-fashionSigLIP'
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms(model_name)
tokenizer = open_clip.get_tokenizer(model_name)
def generate_description(item):
if "Pants" in item or item in ["Leggings", "Jogger", "Cargo", "Chinos", "Palazzo", "Dresspants", "Sweatpants", "Pant", "Legging", "Skort", "Trouser"]:
return f"A pair of {item} pants"
elif item in ["Dress", "Blouse", "T-Shirt", "Tank Top", "Sweater", "Cardigan", "Hoodie", "Coat", "Jacket", "Polo Shirt", "Crop Top", "Romper", "Blazer", "Vest", "Bodysuit", "Maxi Dress", "Graphic Top", "Shirt", "Base Layer Top", "Base Layer Bottom", "Swimsuit", "Rashguard", "Cover Up", "Smock", "Tuxedo"]:
return f"A {item}"
elif item in ["Hat", "Sunglasses", "Glasses", "Sun Hat", "Goggle", "Balaclava"]:
return f"A {item} worn on the head or face"
elif item in ["Shoes", "Sandals", "Heels", "Trainers", "Boots", "Slippers", "Sneakers", "Insoles", "Socks"]:
return f"A pair of {item} worn on the feet"
elif item in ["Jeans", "Skirt", "Shorts", "Dungarees", "Poncho", "Overalls", "Brief", "Boxer", "Swim Trunk", "Ring", "Necklace", "Earing", "Pocket Watch"]:
return f"A {item} piece of clothing"
elif item in ["Boxing Gloves", "Glove", "Mitten"]:
return f"An item of {item} worn on the hands"
else:
return f"A fashion item called {item}"
items_desc = [generate_description(item) for item in items]
text = tokenizer(items_desc)
# Encode text features
with torch.no_grad(), torch.cuda.amp.autocast():
text_features = model.encode_text(text)
text_features /= text_features.norm(dim=-1, keepdim=True)
# Prediction function
def predict(inp):
image = preprocess_val(inp).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
image_features = model.encode_image(image)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_probs = (100 * image_features @ text_features.T).softmax(dim=-1)
# Sort the confidences and get the top 10
sorted_confidences = sorted(
{items[i]: float(text_probs[0, i]) for i in range(len(items))}.items(),
key=lambda x: x[1],
reverse=True
)
top_10_confidences = dict(sorted_confidences[:10])
return top_10_confidences
# Gradio interface
title = "Fashion Item Classifier with Marqo-FashionSigLIP"
description = "Upload an image of a fashion item and classify it using [Marqo-FashionSigLIP](https://huggingface.co/Marqo/marqo-fashionSigLIP)!"
# Example image paths with thumbnails
examples = [
["images/dress.jpg", "Dress"],
["images/sweatpants.jpg", "Sweatpants"],
["images/t-shirt.jpg", "T-Shirt"],
["images/hat.jpg", "Hat"],
["images/blouse.jpg", "Blouse"],
["images/cargo.jpg", "Cargos"],
["images/sunglasses.jpg", "Sunglasses"],
["images/polo-shirt.jpg", "Polo Shirt"],
]
with gr.Blocks(css="""
.remove-btn {
font-size: 24px !important; /* Increase the font size of the cross button */
line-height: 24px !important;
width: 30px !important; /* Increase the width */
height: 30px !important; /* Increase the height */
}
""") as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown(f"# {title}")
gr.Markdown(description)
gr.Markdown(sidebar_markdown)
gr.Markdown(" ", elem_id="vertical-line") # Add an empty Markdown with a custom ID
with gr.Column(scale=2):
input_image = gr.Image(type="pil", label="Upload Fashion Item Image", height=312)
predict_button = gr.Button("Classify")
gr.Markdown("Or click on one of the images below to classify it:")
gr.Examples(examples=examples, inputs=input_image)
# with gr.Column(scale=3):
output_label = gr.Label(num_top_classes=6)
predict_button.click(predict, inputs=input_image, outputs=output_label)
# Launch the interface
demo.launch(share=True)