|
import torch |
|
from transformers import pipeline, AutoTokenizer |
|
import gradio as gr |
|
|
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained("bert-base-uncased") |
|
tokenizer.clean_up_tokenization_spaces = False |
|
|
|
|
|
clip_checkpoint = "DrChamyoung/Powerviewwtiten" |
|
clip_detector = pipeline(model=clip_checkpoint, task="zero-shot-image-classification") |
|
|
|
|
|
def postprocess(output): |
|
return {out["label"]: float(out["score"]) for out in output} |
|
|
|
|
|
def infer(image, candidate_labels): |
|
candidate_labels = [label.lstrip(" ") for label in candidate_labels.split(",")] |
|
clip_out = clip_detector(image, candidate_labels=candidate_labels) |
|
return postprocess(clip_out) |
|
|
|
|
|
with gr.Blocks() as app: |
|
gr.Markdown("# Custom Classification") |
|
with gr.Row(): |
|
with gr.Column(): |
|
image_input = gr.Image(type="pil") |
|
text_input = gr.Textbox(label="Input a list of labels") |
|
run_button = gr.Button("Run") |
|
|
|
with gr.Column(): |
|
clip_output = gr.Label(label="Output", num_top_classes=3) |
|
|
|
examples = [["image_8.webp", "girl, boy, lgbtq"],["image_8.webp", "seo jun park, dr chamyoung , dr stone"],["image_8.webp", "human , dog, god"],["image_8.webp", "asian , russian , american, indian , european"]] |
|
gr.Examples( |
|
examples=examples, |
|
inputs=[image_input, text_input], |
|
outputs=[clip_output], |
|
fn=infer, |
|
cache_examples=True |
|
) |
|
|
|
run_button.click(fn=infer, |
|
inputs=[image_input, text_input], |
|
outputs=[clip_output]) |
|
|
|
app.launch() |
|
|