Spaces:
Sleeping
Sleeping
File size: 1,672 Bytes
6989a37 7b15e8e 30f242a 6989a37 7b15e8e 43c8e35 75e60e4 7b15e8e 40b061b 7b15e8e 9e69fb2 7b15e8e 6989a37 7b15e8e 6f2a51c 9e69fb2 7b15e8e e3a012f 9e69fb2 90d35fd 40b061b 680be8b 90d35fd 680be8b 90d35fd 40b061b 30f242a e3a012f 90d35fd 9e69fb2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 |
import gradio as gr
from transformers import AutoImageProcessor, AutoModelForImageClassification
import torch
from PIL import Image
# Load the image processor and model
# Load model directly
from transformers import AutoImageProcessor, AutoModelForImageClassification
processor = AutoImageProcessor.from_pretrained("linkanjarad/mobilenet_v2_1.0_224-plant-disease-identification")
model = AutoModelForImageClassification.from_pretrained("linkanjarad/mobilenet_v2_1.0_224-plant-disease-identification")
# Define the function to process the image and make predictions
def classify_leaf_disease(image):
# Preprocess the image
inputs = processor(images=image, return_tensors="pt")
# Run the model on the image
with torch.no_grad():
outputs = model(**inputs)
# Get the predicted label and confidence score
probs = torch.softmax(outputs.logits, dim=1)
predicted_class_idx = probs.argmax(dim=1).item()
predicted_label = model.config.id2label[predicted_class_idx]
confidence_score = probs[0][predicted_class_idx].item()
# Format the output
return predicted_label, f"{confidence_score:.2f}", image
# Create Gradio Interface
interface = gr.Interface(
fn=classify_leaf_disease,
inputs=gr.Image(type="pil"),
outputs=[
gr.Textbox(label="Disease Name"),
gr.Textbox(label="Confidence Score"),
gr.Image(type="pil", label="Uploaded Image")
],
title="Leaf Disease Identification",
description="Upload an image of any plant leaf, and this model will identify the disease and show the confidence score."
)
# Launch the app
if __name__ == "__main__":
interface.launch()
|