File size: 1,161 Bytes
c0d2744
 
f3e91bc
 
 
89bb5fe
c0d2744
8f198c5
c0d2744
 
32a4a70
c0d2744
 
 
 
 
 
 
 
 
 
 
 
 
89bb5fe
c0d2744
f3e91bc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import gradio as gr
from transformers import ImageClassificationPipeline, PerceiverForImageClassificationConvProcessing, PerceiverFeatureExtractor
import torch

torch.hub.download_url_to_file('http://images.cocodataset.org/val2017/000000039769.jpg', 'cats.jpg')
torch.hub.download_url_to_file('https://storage.googleapis.com/perceiver_io/dalmation.jpg', 'dog.jpg')
 
feature_extractor = PerceiverFeatureExtractor.from_pretrained("deepmind/vision-perceiver-conv")
model = PerceiverForImageClassificationConvProcessing.from_pretrained("deepmind/vision-perceiver-conv")

image_pipe = ImageClassificationPipeline(model=model, feature_extractor=feature_extractor)

def classify_image(image):
  results = image_pipe(image)
  # convert to format Gradio expects
  output = {}
  for prediction in results:
    predicted_label = prediction['label']
    score = prediction['score']
    output[predicted_label] = score
  return output

image = gr.inputs.Image(type="pil")
label = gr.outputs.Label(num_top_classes=5)
examples = [["cats.jpg"], ["dog.jpg"]]

gr.Interface(fn=classify_image, inputs=image, outputs=label, examples=examples, enable_queue=True).launch(debug=True)