Upload app.py
Browse files
app.py
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import datasets
|
3 |
+
import gradio as gr
|
4 |
+
import torch
|
5 |
+
from transformers import AutoFeatureExtractor, AutoModelForImageClassification
|
6 |
+
|
7 |
+
dataset = load_dataset("beans")
|
8 |
+
|
9 |
+
extractor = AutoFeatureExtractor.from_pretrained("saved_model_files")
|
10 |
+
model = AutoModelForImageClassification.from_pretrained("saved_model_files")
|
11 |
+
|
12 |
+
labels = dataset['train'].features['labels'].names
|
13 |
+
|
14 |
+
def classify(im):
|
15 |
+
features = image_processor(im, return_tensors='pt')
|
16 |
+
logits = model(features["pixel_values"])[-1]
|
17 |
+
probability = torch.nn.functional.softmax(logits, dim=-1)
|
18 |
+
probs = probability[0].detach().numpy()
|
19 |
+
confidences = {label: float(probs[i]) for i, label in enumerate(labels)}
|
20 |
+
return confidences
|
21 |
+
|
22 |
+
|
23 |
+
title = """<h1 id="title">Bean plant health predictor through images of leaves using ViT image classifier</h1>"""
|
24 |
+
|
25 |
+
description = """
|
26 |
+
Use Case: A farming company that is having issues with diseases affecting their bean plants. The farmers have to constantly monitor the leaves of the plants so that they can immediately treat the leaves if they show any signs of disease.
|
27 |
+
We are asked to build a machine learning-based app they can deploy on a drone to quickly identify diseased plants.
|
28 |
+
|
29 |
+
|
30 |
+
Solution: Building a Leaf Classification App that focuses on image classification to quickly identify diseased plants.
|
31 |
+
|
32 |
+
- The Dataset used for finetuning the model [Beans](https://huggingface.co/datasets/beans).
|
33 |
+
- The model used for classifying the images [Vision Transformer (base-sized model)](https://huggingface.co/google/vit-base-patch16-224).
|
34 |
+
"""
|
35 |
+
|
36 |
+
css = '''
|
37 |
+
h1#title {
|
38 |
+
text-align: center;
|
39 |
+
}
|
40 |
+
'''
|
41 |
+
theme = gr.themes.Soft()
|
42 |
+
demo = gr.Blocks(css=css, theme=theme)
|
43 |
+
|
44 |
+
with demo:
|
45 |
+
gr.Markdown(title)
|
46 |
+
gr.Markdown(description)
|
47 |
+
|
48 |
+
|
49 |
+
interface = gr.Interface(fn=classify, inputs="image", outputs="label")
|
50 |
+
|
51 |
+
gr.Markdown("## Example Images")
|
52 |
+
gr.Examples(["images/1.png","images/2.png", "images/3.png"],
|
53 |
+
inputs="image",
|
54 |
+
outputs="label",
|
55 |
+
fn=classify,
|
56 |
+
cache_examples=True
|
57 |
+
)
|
58 |
+
|
59 |
+
demo.launch()
|