youssefabdelmottaleb commited on
Commit
9b01bfc
1 Parent(s): 2c1c5ab

Add: SWIN-Transformer-Model-Deployment

Browse files
Dockerfile ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker
2
+ # you will also find guides on how best to write your Dockerfile
3
+
4
+ FROM python:3.9
5
+
6
+ RUN useradd -m -u 1000 user
7
+
8
+ WORKDIR /app
9
+
10
+ COPY --chown=user ./requirements.txt requirements.txt
11
+
12
+ RUN pip install --no-cache-dir --upgrade -r requirements.txt
13
+
14
+ COPY --chown=user . /app
15
+
16
+ CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "youssefabdelmottaleb-Garbage-Classification.hf.space"]
__init__.py ADDED
File without changes
__pycache__/app.cpython-310.pyc ADDED
Binary file (1.35 kB). View file
 
__pycache__/models.cpython-310.pyc ADDED
Binary file (1.56 kB). View file
 
app.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, UploadFile, File, HTTPException
2
+ from PIL import Image
3
+ import torch
4
+ import logging
5
+ from models import GarbageClassifier
6
+
7
+ app = FastAPI()
8
+
9
+ logging.basicConfig(level=logging.INFO)
10
+ logger = logging.getLogger(__name__)
11
+
12
+ # Load model and processor
13
+ model_dir = "youssefabdelmottaleb/Garbage-Classification-SWIN-Transformer"
14
+ classifier = GarbageClassifier(model_dir)
15
+
16
+
17
+ # Endpoint to receive images and return predictions
18
+ @app.post("/predict")
19
+ async def predict_endpoint(file: UploadFile = File(...)):
20
+ try:
21
+ image = Image.open(file.file).convert("RGB")
22
+ inputs = classifier.processor(images=image, return_tensors="pt")
23
+
24
+ with torch.no_grad():
25
+ outputs = classifier.model(**inputs)
26
+
27
+ logits = outputs.logits
28
+ predicted_class_idx = logits.argmax(-1).item()
29
+ predicted_class = classifier.labels[predicted_class_idx]
30
+
31
+ return {"class": predicted_class}
32
+
33
+ except Exception as e:
34
+ logger.error(f"Error processing image: {e}")
35
+ raise HTTPException(status_code=500, detail="Error processing image")
36
+
37
+
38
+ image_path = "C:/Users/youss/Downloads/paperr.jpg"
39
+ classifier.evaluate_image(image_path)
40
+
41
+ # To run the app: uvicorn app:app --host 0.0.0.0 --port 8000
models.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from transformers import SwinForImageClassification, AutoImageProcessor
3
+ from PIL import Image
4
+ import matplotlib.pyplot as plt
5
+
6
+ from transformers import SwinForImageClassification, AutoImageProcessor
7
+
8
+ class GarbageClassifier:
9
+ def __init__(self, model_dir, num_labels=4):
10
+ self.labels = ['glass', 'metal', 'paper', 'plastic']
11
+ self.model, self.processor = self.load_model_and_processor(model_dir, num_labels)
12
+
13
+ def load_model_and_processor(self, model_dir, num_labels):
14
+ model = SwinForImageClassification.from_pretrained(model_dir, num_labels=num_labels)
15
+ processor = AutoImageProcessor.from_pretrained(model_dir)
16
+ return model, processor
17
+
18
+
19
+ def evaluate_image(self, image_path):
20
+ image = Image.open(image_path)
21
+ inputs = self.processor(images=image, return_tensors="pt")
22
+
23
+ with torch.no_grad():
24
+ outputs = self.model(**inputs)
25
+ logits = outputs.logits
26
+ predicted_class = logits.argmax(-1).item()
27
+
28
+ plt.imshow(image)
29
+ plt.title(f"Predicted Class: {self.labels[predicted_class]}")
30
+ plt.axis('off')
31
+ plt.show()
predict.py ADDED
@@ -0,0 +1,89 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import torch
3
+ import os
4
+ import threading
5
+ import time
6
+ from dotenv import load_dotenv
7
+ from azure.storage.blob import BlobServiceClient
8
+ from transformers import SwinForImageClassification, AutoImageProcessor
9
+ from models import GarbageClassifier
10
+
11
+ CURRENT_DIR = os.getcwd()
12
+ ROOT_DIR = os.path.abspath(os.path.join(CURRENT_DIR, ".."))
13
+
14
+ load_dotenv()
15
+
16
+ # Define Azure Blob Storage settings and model path
17
+ AZURE_CONNECTION_STRING = os.environ.get("AZURE_CONNECTION_STRING")
18
+ AZURE_CONTAINER_NAME = os.environ.get("AZURE_CONTAINER_NAME")
19
+ MODEL_PATH = os.environ.get("MODEL_PATH", "youssefabdelmottaleb/Garbage-Classification-SWIN-Transformer")
20
+
21
+ # Initialize the Swin Transformer model and processor
22
+ model = SwinForImageClassification.from_pretrained(MODEL_PATH)
23
+ processor = AutoImageProcessor.from_pretrained(MODEL_PATH)
24
+
25
+ # Function to save image to Azure Blob Storage
26
+ def save_image_to_azure(image, result_class):
27
+ # Convert PIL image to bytes
28
+ from io import BytesIO
29
+ image_bytes = BytesIO()
30
+ image.save(image_bytes, format='JPEG')
31
+ image_bytes = image_bytes.getvalue()
32
+
33
+ # Create the BlobServiceClient object
34
+ blob_service_client = BlobServiceClient.from_connection_string(AZURE_CONNECTION_STRING)
35
+
36
+ # Create a unique blob name
37
+ blob_name = f"predicted_images/{result_class}/{result_class}_{int(time.time())}.jpg"
38
+
39
+ # Create a blob client using the local file name as the name for the blob
40
+ blob_client = blob_service_client.get_blob_client(container=AZURE_CONTAINER_NAME, blob=blob_name)
41
+
42
+ # Upload the created file
43
+ blob_client.upload_blob(image_bytes)
44
+ print(f"Image saved to Azure Blob Storage: {blob_name}")
45
+
46
+ # Function to save image to local path
47
+ def save_image_to_local_path(image, result_class):
48
+ # Define the directory based on the class
49
+ directory = os.path.join('./predicted_images', result_class)
50
+ os.makedirs(directory, exist_ok=True)
51
+
52
+ # Define the image path
53
+ image_path = os.path.join(directory, f"{result_class}_{int(time.time())}.jpg")
54
+
55
+ # Save the image
56
+ image.save(image_path)
57
+ print(f"Image saved to {image_path}")
58
+
59
+ # Function to predict using Swin Transformer model
60
+ def predict(image_file):
61
+ image = Image.open(image_file).convert('RGB') # Ensure image is RGB
62
+
63
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
64
+
65
+ # Preprocess the image
66
+ inputs = processor(images=image, return_tensors="pt")
67
+ inputs = {k: v.unsqueeze(0).to(device) for k, v in inputs.items()}
68
+
69
+ # Perform inference
70
+ model.to(device)
71
+ model.eval()
72
+ with torch.no_grad():
73
+ outputs = model(**inputs)
74
+
75
+ # Post-process the outputs to get the predicted class
76
+ logits = outputs.logits
77
+ predicted_class_idx = logits.argmax(-1).item()
78
+ predicted_class = processor.labels[predicted_class_idx]
79
+
80
+ # Start a thread to save the image based on the result
81
+ thread = threading.Thread(target=save_image_to_azure, args=(image, predicted_class))
82
+ thread.start()
83
+
84
+ return {"class": predicted_class}
85
+
86
+ if __name__ == "__main__":
87
+ image_file = os.path.join(ROOT_DIR, "input/Ecomate_Dataset/metal/ecomate_metal_41.jpg")
88
+ result = predict(image_file)
89
+ print("Prediction result:", result)
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ fastapi
2
+ uvicorn[standard]
3
+
4
+ numpy==1.26.3
5
+ torch==2.2.0
6
+ torchvision
7
+ fastapi
8
+ uvicorn
9
+ pillow
10
+ python-multipart
11
+ azure-storage-blob
12
+ uvicorn==0.30.1