File size: 6,980 Bytes
8e62182 2a4d097 8e62182 2a4d097 8e62182 2a4d097 8e62182 2a4d097 8e62182 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 |
import gradio as gr
import open_clip
import torch
import requests
import numpy as np
from PIL import Image
# Sidebar content
sidebar_markdown = """
We have several resources related to our new fashion models.
## Documentation
π [Blog Post](https://www.marqo.ai/blog/search-model-for-fashion)
π [Use Case Blog Post](https://www.marqo.ai/blog/ecommerce-image-classification-with-marqo-fashionclip)
## Code
π» [GitHub Repo](https://github.com/marqo-ai/marqo-FashionCLIP)
π€ [Google Colab](https://colab.research.google.com/drive/1nq978xFJjJcnyrJ2aE5l82GHAXOvTmfd?usp=sharing)
π€ [Hugging Face Collection](https://huggingface.co/collections/Marqo/marqo-fashionclip-and-marqo-fashionsiglip-66b43f2d09a06ad2368d4af6)
## Citation
If you use Marqo-FashionSigLIP or Marqo-FashionCLIP, please cite us:
```
@software{Jung_Marqo-FashionCLIP_and_Marqo-FashionSigLIP_2024,
author = {Jung, Myong Chol and Clark, Jesse},
month = aug,
title = {{Marqo-FashionCLIP and Marqo-FashionSigLIP}},
url = {https://github.com/marqo-ai/marqo-FashionCLIP},
version = {1.0.0},
year = {2024}
```
"""
# List of fashion items
items = [
"Leggings", "Jogger", "Palazzo", "Cargo", "Dresspants", "Chinos",
"Dress", "Blouse", "T-Shirt", "Jeans", "Skirt", "Shorts",
"Sweater", "Cardigan", "Tank Top", "Hoodie", "Coat",
"Jacket", "Polo Shirt", "Crop Top", "Romper",
"Overalls", "Blazer", "Sweatpants", "Vest",
"Dungarees", "Poncho", "Bodysuit", "Maxi Dress",
"Hat", "Sunglasses", "Glasses", "Shoes", "Sandals",
"Heels", "Trainers", "Belt", "Tie", "Dress Shirt",
"Boots", "Slippers", "Sneakers", "Insoles", "Socks",
"Insulated Jacket", "Fleece", "Rain Jacket", "Running Jacket",
"Windbreaker", "Shirt", "Graphic Top", "Sweatshirt",
"Pant", "Legging", "Short", "Skort", "Brief",
"Sports Bra", "Base Layer Top", "Base Layer Bottom",
"Swimsuit", "Rashguard", "Water Shorts", "Cover Up",
"Goggle", "Sun Hat", "Glove", "Mitten", "Leg Gaiter",
"Bandana", "Neck Gaiter", "Balaclava", "Sunglass",
"Watch", "Bag", "Boxer", "Swim Trunk", "Ring",
"Necklace", "Earing", "Pocket Watch", "Smock",
"Trouser", "Tuxedo", "Cufflinks", "Suspenders",
"Handkerchief", "Scarf", "Wristband", "Beanie",
"Fedora", "Beret", "Clutch Bag", "Crossbody Bag",
"Duffel Bag", "Backpack", "Umbrella", "Flip Flops",
"Espadrilles", "Loafers", "Moccasins", "Chelsea Boots",
"Ankle Boots", "Bow Tie", "Tie Clip", "Hair Clip",
"Headband", "Visor", "Baseball Cap", "Bucket Hat",
"Fingerless Gloves", "Touchscreen Gloves", "Trench Coat",
"Peacoat", "Parka", "Bomber Jacket", "Utility Vest",
"Puffer Jacket", "Cape", "Shrug", "Kimono", "Sarong",
"Apron", "Bikini", "Poncho", "Wristwatch", "Choker",
"Brooch", "Anklet", "Toe Ring", "Waist Belt"
]
# Initialize the model and tokenizer
model_name = 'hf-hub:Marqo/marqo-fashionSigLIP'
model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms(model_name)
tokenizer = open_clip.get_tokenizer(model_name)
def generate_description(item):
if "Pants" in item or item in ["Leggings", "Jogger", "Cargo", "Chinos", "Palazzo", "Dresspants", "Sweatpants", "Pant", "Legging", "Skort", "Trouser"]:
return f"A pair of {item} pants"
elif item in ["Dress", "Blouse", "T-Shirt", "Tank Top", "Sweater", "Cardigan", "Hoodie", "Coat", "Jacket", "Polo Shirt", "Crop Top", "Romper", "Blazer", "Vest", "Bodysuit", "Maxi Dress", "Graphic Top", "Shirt", "Base Layer Top", "Base Layer Bottom", "Swimsuit", "Rashguard", "Cover Up", "Smock", "Tuxedo"]:
return f"A {item}"
elif item in ["Hat", "Sunglasses", "Glasses", "Sun Hat", "Goggle", "Balaclava"]:
return f"A {item} worn on the head or face"
elif item in ["Shoes", "Sandals", "Heels", "Trainers", "Boots", "Slippers", "Sneakers", "Insoles", "Socks"]:
return f"A pair of {item} worn on the feet"
elif item in ["Jeans", "Skirt", "Shorts", "Dungarees", "Poncho", "Overalls", "Brief", "Boxer", "Swim Trunk", "Ring", "Necklace", "Earing", "Pocket Watch"]:
return f"A {item} piece of clothing"
elif item in ["Boxing Gloves", "Glove", "Mitten"]:
return f"An item of {item} worn on the hands"
else:
return f"A fashion item called {item}"
items_desc = [generate_description(item) for item in items]
text = tokenizer(items_desc)
# Encode text features
with torch.no_grad(), torch.cuda.amp.autocast():
text_features = model.encode_text(text)
text_features /= text_features.norm(dim=-1, keepdim=True)
# Prediction function
def predict(inp):
image = preprocess_val(inp).unsqueeze(0)
with torch.no_grad(), torch.cuda.amp.autocast():
image_features = model.encode_image(image)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_probs = (100 * image_features @ text_features.T).softmax(dim=-1)
# Sort the confidences and get the top 10
sorted_confidences = sorted(
{items[i]: float(text_probs[0, i]) for i in range(len(items))}.items(),
key=lambda x: x[1],
reverse=True
)
top_10_confidences = dict(sorted_confidences[:10])
return top_10_confidences
# Gradio interface
title = "Fashion Item Classifier with Marqo-FashionSigLIP"
description = "Upload an image of a fashion item and classify it using [Marqo-FashionSigLIP](https://huggingface.co/Marqo/marqo-fashionSigLIP)!"
# Example image paths with thumbnails
examples = [
["images/dress.jpg", "Dress"],
["images/sweatpants.jpg", "Sweatpants"],
["images/t-shirt.jpg", "T-Shirt"],
["images/hat.jpg", "Hat"],
["images/blouse.jpg", "Blouse"],
["images/cargo.jpg", "Cargos"],
["images/sunglasses.jpg", "Sunglasses"],
["images/polo-shirt.jpg", "Polo Shirt"],
]
with gr.Blocks(css="""
.remove-btn {
font-size: 24px !important; /* Increase the font size of the cross button */
line-height: 24px !important;
width: 30px !important; /* Increase the width */
height: 30px !important; /* Increase the height */
}
""") as demo:
with gr.Row():
with gr.Column(scale=1):
gr.Markdown(f"# {title}")
gr.Markdown(description)
gr.Markdown(sidebar_markdown)
gr.Markdown(" ", elem_id="vertical-line") # Add an empty Markdown with a custom ID
with gr.Column(scale=2):
input_image = gr.Image(type="pil", label="Upload Fashion Item Image", height=312)
predict_button = gr.Button("Classify")
gr.Markdown("Or click on one of the images below to classify it:")
gr.Examples(examples=examples, inputs=input_image)
# with gr.Column(scale=3):
output_label = gr.Label(num_top_classes=6)
predict_button.click(predict, inputs=input_image, outputs=output_label)
# Launch the interface
demo.launch(share=True)
|