|
import os |
|
import shutil |
|
import subprocess |
|
import zipfile |
|
import time |
|
import torch |
|
import torch.nn as nn |
|
import torch.optim as optim |
|
from torchvision import datasets, transforms, models |
|
from torch.optim import lr_scheduler |
|
import subprocess |
|
import zipfile |
|
from PIL import Image |
|
import gradio as gr |
|
|
|
|
|
|
|
kaggle_dir = os.path.expanduser("~/.kaggle") |
|
if not os.path.exists(kaggle_dir): |
|
os.makedirs(kaggle_dir) |
|
|
|
|
|
kaggle_json_path = "kaggle.json" |
|
kaggle_dest_path = os.path.join(kaggle_dir, "kaggle.json") |
|
|
|
if not os.path.exists(kaggle_dest_path): |
|
shutil.copy(kaggle_json_path, kaggle_dest_path) |
|
os.chmod(kaggle_dest_path, 0o600) |
|
print("Kaggle API key copied and permissions set.") |
|
else: |
|
print("Kaggle API key already exists.") |
|
|
|
|
|
dataset_name = "mostafaabla/garbage-classification" |
|
print(f"Downloading the dataset: {dataset_name}") |
|
download_command = f"kaggle datasets download -d {dataset_name}" |
|
|
|
|
|
subprocess.run(download_command, shell=True) |
|
|
|
|
|
dataset_zip = "garbage-classification.zip" |
|
extracted_folder = "./garbage-classification" |
|
|
|
|
|
if os.path.exists(dataset_zip): |
|
if not os.path.exists(extracted_folder): |
|
with zipfile.ZipFile(dataset_zip, 'r') as zip_ref: |
|
zip_ref.extractall(extracted_folder) |
|
print("Dataset unzipped successfully!") |
|
else: |
|
print("Dataset already unzipped.") |
|
else: |
|
print(f"Dataset zip file '{dataset_zip}' not found.") |
|
|
|
|
|
|
|
|
|
import pickle |
|
|
|
|
|
history = { |
|
'train_loss': [ |
|
0.9568, 0.6937, 0.5917, 0.5718, 0.5109, |
|
0.4824, 0.4697, 0.3318, 0.2785, 0.2680, |
|
0.2371, 0.2333, 0.2198, 0.2060, 0.1962, |
|
0.1951, 0.1880, 0.1912, 0.1811, 0.1810 |
|
], |
|
'train_acc': [ |
|
0.7011, 0.7774, 0.8094, 0.8146, 0.8331, |
|
0.8452, 0.8447, 0.8899, 0.9068, 0.9114, |
|
0.9216, 0.9203, 0.9254, 0.9306, 0.9352, |
|
0.9346, 0.9368, 0.9353, 0.9396, 0.9409 |
|
], |
|
'val_loss': [ |
|
0.4934, 0.3939, 0.4377, 0.3412, 0.2614, |
|
0.2966, 0.2439, 0.1065, 0.0926, 0.0797, |
|
0.0738, 0.0639, 0.0555, 0.0560, 0.0490, |
|
0.0479, 0.0455, 0.0454, 0.0438, 0.0427 |
|
], |
|
'val_acc': [ |
|
0.8481, 0.8734, 0.8663, 0.8915, 0.9172, |
|
0.9011, 0.9221, 0.9649, 0.9714, 0.9759, |
|
0.9762, 0.9791, 0.9827, 0.9812, 0.9843, |
|
0.9850, 0.9852, 0.9854, 0.9854, 0.9866 |
|
] |
|
} |
|
|
|
|
|
with open('training_history.pkl', 'wb') as f: |
|
pickle.dump(history, f) |
|
|
|
print('Training history saved as training_history.pkl') |
|
|
|
|
|
|
|
|
|
import torch |
|
import torch.nn as nn |
|
from torchvision import models, transforms |
|
from PIL import Image |
|
import gradio as gr |
|
|
|
|
|
def load_model(): |
|
model = models.resnet50(weights='DEFAULT') |
|
num_ftrs = model.fc.in_features |
|
model.fc = nn.Linear(num_ftrs, 12) |
|
|
|
|
|
model.load_state_dict(torch.load('resnet50_garbage_classification.pth', map_location=torch.device('cpu'))) |
|
|
|
model.eval() |
|
return model |
|
|
|
model = load_model() |
|
|
|
|
|
transform = transforms.Compose([ |
|
transforms.Resize(256), |
|
transforms.CenterCrop(224), |
|
transforms.ToTensor(), |
|
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) |
|
]) |
|
|
|
|
|
class_names = ['battery', 'biological', 'brown-glass', 'cardboard', |
|
'clothes', 'green-glass', 'metal', 'paper', |
|
'plastic', 'shoes', 'trash', 'white-glass'] |
|
|
|
|
|
bin_colors = { |
|
'battery': 'Merah (Red)', |
|
'biological': 'Hijau (Green)', |
|
'brown-glass': 'Kuning (Yellow or trash banks / recycling centers)', |
|
'cardboard': 'Biru (Blue)', |
|
'clothes': 'Kuning atau Bank Sampah (Yellow or trash banks / recycling centers)', |
|
'green-glass': 'Kuning (Yellow)', |
|
'metal': 'Kuning (Yellow)', |
|
'paper': 'Biru (Blue)', |
|
'plastic': 'Kuning (Yellow)', |
|
'shoes': 'Kuning atau Bank Sampah (Yellow or trash banks / recycling centers)', |
|
'trash': 'Abu-abu (Gray)', |
|
'white-glass': 'Kuning (Yellow or trash banks / recycling centers)' |
|
} |
|
|
|
|
|
def predict(image): |
|
image = Image.fromarray(image) |
|
image = transform(image) |
|
image = image.unsqueeze(0) |
|
|
|
with torch.no_grad(): |
|
outputs = model(image) |
|
_, predicted = torch.max(outputs, 1) |
|
|
|
class_name = class_names[predicted.item()] |
|
bin_color = bin_colors[class_name] |
|
return class_name, bin_color |
|
|
|
|
|
iface = gr.Interface( |
|
fn=predict, |
|
inputs=gr.Image(type="numpy", label="Unggah Gambar"), |
|
outputs=[ |
|
gr.Textbox(label="Jenis Sampah"), |
|
gr.Textbox(label="Tong Sampah yang Sesuai") |
|
], |
|
title="Klasifikasi Sampah dengan ResNet50 v1", |
|
description="Unggah gambar sampah, dan model kami akan mengklasifikasikannya ke dalam salah satu dari 12 kategori bersama dengan warna tempat sampah yang sesuai. " |
|
"<strong>Model ini bisa memprediksi jenis sampah dari ke-12 jenis berikut:</strong> Baterai, Sampah organik, Gelas Kaca Coklat, " |
|
"Kardus, Pakaian, Gelas Kaca Hijau, Metal, Kertas, Plastik, Sepatu/sandal, Popok/pampers, Gelas Kaca bening." |
|
) |
|
|
|
iface.launch(share=True) |
|
|