amirkhanbloch commited on
Commit
f15e691
β€’
1 Parent(s): 2ac4465

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -111
app.py CHANGED
@@ -1,115 +1,58 @@
1
- import gradio as gr
2
- import numpy as np
 
3
  from PIL import Image
4
- from tensorflow.keras import models
5
- from tensorflow.keras.preprocessing.image import img_to_array
6
- import matplotlib.pyplot as plt
7
- import io
8
- import base64
9
-
10
-
11
- def inference(image, model_choice):
12
- label_map = {'cassava-healthy': 0, 'cassava-not-healthy:bacteria blight': 1}
13
- inverse_map = {v: k for k, v in label_map.items()}
14
-
15
- image = Image.fromarray(image.astype('uint8'), 'RGB')
16
- image = image.resize((64, 64))
17
- image_arr = img_to_array(image)
18
- image_arr /= 255
19
- image_arr = image_arr[np.newaxis, :]
20
-
21
- if model_choice == "Cassava Model πŸƒ":
22
- model = models.load_model("cassava_model.keras")
23
- else:
24
- model = models.load_model("/content/maize_model.keras")
25
- proba = model.predict(image_arr)
26
- label = (proba > 0.5).squeeze().astype(int)
27
-
28
- result = {
29
- "label": inverse_map.get(int(label)),
30
- "probability": float(proba.squeeze())
31
- }
32
-
33
- # Create visualization
34
- fig, ax = plt.subplots(figsize=(8, 6))
35
- ax.bar(['Healthy 🌿', 'Not Healthy πŸ‚'], [1 - result['probability'], result['probability']], color=['#2ecc71', '#e74c3c'])
36
- ax.set_ylim(0, 1)
37
- ax.set_ylabel('Probability')
38
- ax.set_title('Plant Health Prediction πŸ”', fontsize=16, fontweight='bold')
39
- ax.spines['top'].set_visible(False)
40
- ax.spines['right'].set_visible(False)
41
- plt.tight_layout()
42
-
43
- # Convert plot to image
44
- buf = io.BytesIO()
45
- plt.savefig(buf, format='png')
46
- buf.seek(0)
47
- plot_image = Image.open(buf)
48
-
49
- return result["label"], f"{result['probability']:.2%} of illness(bacteria blight)", plot_image
50
 
51
- # Custom CSS for styling
52
- custom_css = """
53
- #component-0 {
54
- max-width: 730px;
55
- margin: auto;
56
- padding: 1.5rem;
57
- border-radius: 10px;
58
- background: linear-gradient(135deg, #f6d365 0%, #fda085 100%);
59
- box-shadow: 0 10px 20px rgba(0,0,0,0.19), 0 6px 6px rgba(0,0,0,0.23);
60
- }
61
- #component-1 {
62
- border-radius: 10px;
63
- overflow: hidden;
64
- }
65
- #component-5 {
66
- border-radius: 10px;
67
- overflow: hidden;
68
- box-shadow: 0 4px 6px rgba(0,0,0,0.1);
69
- }
70
- .label {
71
- font-size: 18px !important;
72
- color: #2c3e50;
73
- font-weight: bold;
74
- }
75
- .output-class {
76
- font-size: 24px !important;
77
- color: #2980b9;
78
- font-weight: bold;
79
- }
80
- .output-prob {
81
- font-size: 20px !important;
82
- color: #16a085;
83
- }
84
  """
85
 
86
- # Gradio interface
87
- with gr.Blocks(css=custom_css) as demo:
88
- gr.Markdown("# 🌱 Crop Diseases Detector πŸ•΅οΈβ€β™‚οΈ")
89
- gr.Markdown("Upload an image of a cassava plant and let's check its health!")
90
-
91
- with gr.Row():
92
- input_image = gr.Image(type="numpy", label="πŸ“Έ Upload or Capture Image")
93
- output_image = gr.Image(type="pil", label="πŸ–ΌοΈ Health Prediction Visualization")
94
-
95
- model_choice = gr.Dropdown(["Cassava Model πŸƒ"], label="πŸ€– Select Model", value="Cassava Model πŸƒ")
96
-
97
- with gr.Row():
98
- detect_btn = gr.Button("πŸ” Detect Plant Health", variant="primary")
99
-
100
- output_label = gr.Textbox(label="🏷️ Diagnosis")
101
- output_confidence = gr.Textbox(label="πŸ“Š Confidence")
102
-
103
- detect_btn.click(
104
- inference,
105
- inputs=[input_image, model_choice],
106
- outputs=[output_label, output_confidence, output_image]
107
- )
108
-
109
- gr.Markdown("## How to use:")
110
- gr.Markdown("1. πŸ“€ Upload an image or πŸ“Έ take a picture of a cassava plant")
111
- gr.Markdown("2. πŸ€– Select the model you want to use")
112
- gr.Markdown("3. πŸ” Click 'Detect Plant Health' to get the results")
113
- gr.Markdown("4. πŸ“Š View the diagnosis, confidence score, and health prediction chart")
114
-
115
- demo.launch(debug=True)
 
1
+ from dotenv import load_dotenv
2
+ import os
3
+ import google.generativeai as genai
4
  from PIL import Image
5
+ import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
+ # Load all the environment variables
8
+ load_dotenv()
9
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
10
+
11
+ # Function to load Google Gemini Pro Vision API and get a response
12
+ def get_gemini_response(input_prompt, uploaded_image):
13
+ model = genai.GenerativeModel('gemini-1.5-flash')
14
+
15
+ # Convert the uploaded image to bytes
16
+ bytes_data = uploaded_image.read()
17
+ image_parts = [
18
+ {
19
+ "mime_type": uploaded_image.type,
20
+ "data": bytes_data
21
+ }
22
+ ]
23
+
24
+ # Generate the content
25
+ response = model.generate_content([input_prompt, image_parts[0], ""])
26
+ return response.text
27
+
28
+ # Input prompt for the model
29
+ input_prompt = """
30
+ "You are an expert in computer vision and agriculture who can easily predict the disease of the plant. "
31
+ "Analyze the following image and provide 6 outputs in a structured table format: "
32
+ "1. Crop in the image, "
33
+ "2. Whether it is infected or healthy, "
34
+ "3. Type of disease (if any), "
35
+ "4. How confident out of 100% whether image is healthy or infected, "
36
+ "5. Reason for the disease such as whether it is happening due to fungus, bacteria, insect bite, poor nutrition, etc., "
37
+ "6. Precautions for it."
 
 
38
  """
39
 
40
+ # Define the Gradio interface
41
+ def predict_crop_health(uploaded_image):
42
+ if uploaded_image is None:
43
+ return "No image uploaded."
44
+
45
+ response = get_gemini_response(input_prompt, uploaded_image)
46
+ return response
47
+
48
+ # Create a Gradio interface
49
+ iface = gr.Interface(
50
+ fn=predict_crop_health,
51
+ inputs=gr.Image(type="file", label="Upload Crop Image"),
52
+ outputs="text",
53
+ title="Gemini Crop Disease Detection App",
54
+ description="Upload an image of a crop to predict its health and identify any diseases."
55
+ )
56
+
57
+ # Launch the Gradio app
58
+ iface.launch()