|
import os |
|
import shutil |
|
import subprocess |
|
import zipfile |
|
import time |
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from torchvision import datasets, transforms, models |
|
from torch.optim import lr_scheduler |
|
import subprocess |
|
import zipfile |
|
from PIL import Image |
|
import gradio as gr |
|
|
|
|
|
|
|
kaggle_dir = os.path.expanduser("~/.kaggle") |
|
if not os.path.exists(kaggle_dir): |
|
os.makedirs(kaggle_dir) |
|
|
|
|
|
kaggle_json_path = "kaggle.json" |
|
kaggle_dest_path = os.path.join(kaggle_dir, "kaggle.json") |
|
|
|
if not os.path.exists(kaggle_dest_path): |
|
shutil.copy(kaggle_json_path, kaggle_dest_path) |
|
os.chmod(kaggle_dest_path, 0o600) |
|
print("Kaggle API key copied and permissions set.") |
|
else: |
|
print("Kaggle API key already exists.") |
|
|
|
|
|
dataset_name = "mostafaabla/garbage-classification" |
|
print(f"Downloading the dataset: {dataset_name}") |
|
download_command = f"kaggle datasets download -d {dataset_name}" |
|
|
|
|
|
subprocess.run(download_command, shell=True) |
|
|
|
|
|
dataset_zip = "garbage-classification.zip" |
|
extracted_folder = "./garbage-classification" |
|
|
|
|
|
if os.path.exists(dataset_zip): |
|
if not os.path.exists(extracted_folder): |
|
with zipfile.ZipFile(dataset_zip, 'r') as zip_ref: |
|
zip_ref.extractall(extracted_folder) |
|
print("Dataset unzipped successfully!") |
|
else: |
|
print("Dataset already unzipped.") |
|
else: |
|
print(f"Dataset zip file '{dataset_zip}' not found.") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def load_model(): |
|
model = models.resnet50(weights='DEFAULT') |
|
num_ftrs = model.fc.in_features |
|
model.fc = nn.Linear(num_ftrs, 12) |
|
|
|
|
|
model.load_state_dict(torch.load('resnet50_garbage_classification.pth', map_location=torch.device('cpu'))) |
|
|
|
model.eval() |
|
return model |
|
|
|
model = load_model() |
|
|
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(224), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
|
]) |
|
|
|
|
|
class_names = ['battery', 'biological', 'brown-glass', 'cardboard', |
|
'clothes', 'green-glass', 'metal', 'paper', |
|
'plastic', 'shoes', 'trash', 'white-glass'] |
|
|
|
|
|
bin_colors = { |
|
'battery': 'Merah (Red)', |
|
'biological': 'Cokelat (Brown)', |
|
'brown-glass': 'Hijau (Green)', |
|
'cardboard': 'Kuning (Yellow)', |
|
'clothes': 'Biru (Blue)', |
|
'green-glass': 'Hijau (Green)', |
|
'metal': 'Kuning (Yellow)', |
|
'paper': 'Kuning (Yellow)', |
|
'plastic': 'Kuning (Yellow)', |
|
'shoes': 'Biru (Blue)', |
|
'trash': 'Hitam (Black)', |
|
'white-glass': 'Putih (White)' |
|
} |
|
|
|
|
|
def predict(image): |
|
image = Image.fromarray(image) |
|
image = transform(image) |
|
image = image.unsqueeze(0) |
|
|
|
with torch.no_grad(): |
|
outputs = model(image) |
|
_, predicted = torch.max(outputs, 1) |
|
|
|
class_name = class_names[predicted.item()] |
|
bin_color = bin_colors[class_name] |
|
return class_name, bin_color |
|
|
|
|
|
iface = gr.Interface( |
|
fn=predict, |
|
inputs=gr.Image(type="numpy", label="Unggah Gambar"), |
|
outputs=[ |
|
gr.Textbox(label="Jenis Sampah"), |
|
gr.Textbox(label="Tong Sampah yang Sesuai") |
|
], |
|
title="Klasifikasi Sampah dengan ResNet50", |
|
description="Unggah gambar sampah, dan model akan mengklasifikasikannya ke dalam salah satu dari 12 kategori bersama dengan warna tempat sampah yang sesuai." |
|
) |
|
|
|
|
|
iface.launch(share=True) |
|
|