import os import shutil import subprocess import zipfile import time import torch import torch.nn as nn import torch.optim as optim from torchvision import datasets, transforms, models from torch.optim import lr_scheduler import subprocess import zipfile from PIL import Image import gradio as gr # Step 1: Setup Kaggle API # Ensure the .kaggle directory exists kaggle_dir = os.path.expanduser("~/.kaggle") if not os.path.exists(kaggle_dir): os.makedirs(kaggle_dir) # Step 2: Copy the kaggle.json file to the ~/.kaggle directory kaggle_json_path = "kaggle.json" kaggle_dest_path = os.path.join(kaggle_dir, "kaggle.json") if not os.path.exists(kaggle_dest_path): shutil.copy(kaggle_json_path, kaggle_dest_path) os.chmod(kaggle_dest_path, 0o600) print("Kaggle API key copied and permissions set.") else: print("Kaggle API key already exists.") # Step 3: Download the dataset from Kaggle using Kaggle CLI dataset_name = "mostafaabla/garbage-classification" print(f"Downloading the dataset: {dataset_name}") download_command = f"kaggle datasets download -d {dataset_name}" # Run the download command subprocess.run(download_command, shell=True) # Step 4: Unzip the downloaded dataset dataset_zip = "garbage-classification.zip" extracted_folder = "./garbage-classification" # Check if the zip file exists if os.path.exists(dataset_zip): if not os.path.exists(extracted_folder): with zipfile.ZipFile(dataset_zip, 'r') as zip_ref: zip_ref.extractall(extracted_folder) print("Dataset unzipped successfully!") else: print("Dataset already unzipped.") else: print(f"Dataset zip file '{dataset_zip}' not found.") import pickle # Mengupdate hasil train dan validate terbaru history = { 'train_loss': [ 0.9568, 0.6937, 0.5917, 0.5718, 0.5109, 0.4824, 0.4697, 0.3318, 0.2785, 0.2680, 0.2371, 0.2333, 0.2198, 0.2060, 0.1962, 0.1951, 0.1880, 0.1912, 0.1811, 0.1810 ], 'train_acc': [ 0.7011, 0.7774, 0.8094, 0.8146, 0.8331, 0.8452, 0.8447, 0.8899, 0.9068, 0.9114, 0.9216, 0.9203, 0.9254, 0.9306, 0.9352, 0.9346, 0.9368, 0.9353, 0.9396, 0.9409 ], 'val_loss': [ 0.4934, 0.3939, 0.4377, 0.3412, 0.2614, 0.2966, 0.2439, 0.1065, 0.0926, 0.0797, 0.0738, 0.0639, 0.0555, 0.0560, 0.0490, 0.0479, 0.0455, 0.0454, 0.0438, 0.0427 ], 'val_acc': [ 0.8481, 0.8734, 0.8663, 0.8915, 0.9172, 0.9011, 0.9221, 0.9649, 0.9714, 0.9759, 0.9762, 0.9791, 0.9827, 0.9812, 0.9843, 0.9850, 0.9852, 0.9854, 0.9854, 0.9866 ] } # Simpan history sebagai file pickle with open('training_history.pkl', 'wb') as f: pickle.dump(history, f) print('Training history saved as training_history.pkl') import torch import torch.nn as nn from torchvision import models, transforms from PIL import Image import gradio as gr # Load your model def load_model(): model = models.resnet50(weights='DEFAULT') # Using default weights for initialization num_ftrs = model.fc.in_features model.fc = nn.Linear(num_ftrs, 12) # Adjust to the number of classes you have # Load the state dict model.load_state_dict(torch.load('resnet50_garbage_classification.pth', map_location=torch.device('cpu'))) model.eval() # Set to evaluation mode return model model = load_model() # Define image transformations transform = transforms.Compose([ transforms.Resize(256), transforms.CenterCrop(224), transforms.ToTensor(), transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) ]) # Class names class_names = ['battery', 'biological', 'brown-glass', 'cardboard', 'clothes', 'green-glass', 'metal', 'paper', 'plastic', 'shoes', 'trash', 'white-glass'] # Define bin colors for each class bin_colors = { 'battery': 'Merah (Red)', # Limbah berbahaya (B3) 'biological': 'Hijau (Green)', # Limbah organik 'brown-glass': 'Kuning (Yellow or trash banks / recycling centers)', # Gelas berwarna coklat (anorganik/daur ulang) 'cardboard': 'Biru (Blue)', # Kertas (daur ulang) 'clothes': 'Kuning atau Bank Sampah (Yellow or trash banks / recycling centers)', # Pakaian (dimasukkan sebagai daur ulang) 'green-glass': 'Kuning (Yellow)', # Gelas berwarna hijau (anorganik/daur ulang) 'metal': 'Kuning (Yellow)', # Logam (anorganik/daur ulang) 'paper': 'Biru (Blue)', # Kertas (daur ulang) 'plastic': 'Kuning (Yellow)', # Plastik (anorganik/daur ulang) 'shoes': 'Kuning atau Bank Sampah (Yellow or trash banks / recycling centers)', # Sepatu (dimasukkan sebagai daur ulang) 'trash': 'Abu-abu (Gray)', # Limbah umum 'white-glass': 'Kuning (Yellow or trash banks / recycling centers)' # Gelas berwarna putih (anorganik/daur ulang) } # Define the prediction function def predict(image): image = Image.fromarray(image) # Convert numpy array to PIL Image image = transform(image) # Apply transformations image = image.unsqueeze(0) # Add batch dimension with torch.no_grad(): outputs = model(image) _, predicted = torch.max(outputs, 1) class_name = class_names[predicted.item()] # Return predicted class name bin_color = bin_colors[class_name] # Get the corresponding bin color return class_name, bin_color # Return both class name and bin color # Buat antarmuka Gradio dengan deskripsi iface = gr.Interface( fn=predict, inputs=gr.Image(type="numpy", label="Unggah Gambar"), outputs=[ gr.Textbox(label="Jenis Sampah"), gr.Textbox(label="Tong Sampah yang Sesuai") # 2 output with label ], title="Klasifikasi Sampah dengan ResNet50 v1", description="Unggah gambar sampah, dan model kami akan mengklasifikasikannya ke dalam salah satu dari 12 kategori bersama dengan warna tempat sampah yang sesuai. " "Model ini bisa memprediksi jenis sampah dari ke-12 jenis berikut: Baterai, Sampah organik, Gelas Kaca Coklat, " "Kardus, Pakaian, Gelas Kaca Hijau, Metal, Kertas, Plastik, Sepatu/sandal, Popok/pampers, Gelas Kaca bening." ) iface.launch(share=True)