Cherryblade29 commited on
Commit
3f25510
1 Parent(s): 4c2c165

Upload 8 files

Browse files
Files changed (8) hide show
  1. .gitattributes +36 -35
  2. Deplaoy torch model.ipynb +0 -0
  3. Foodvision +30 -0
  4. README.md +13 -13
  5. app.py +65 -0
  6. effnetb2.pth +3 -0
  7. model.py +31 -0
  8. requirements.txt +3 -0
.gitattributes CHANGED
@@ -1,35 +1,36 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.file_extension filter=lfs diff=lfs merge=lfs -text
Deplaoy torch model.ipynb ADDED
The diff for this file is too large to render. See raw diff
 
Foodvision ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN RSA PRIVATE KEY-----
2
+ Proc-Type: 4,ENCRYPTED
3
+ DEK-Info: AES-128-CBC,EAF747408140DF2CEFF35BCCB681CA94
4
+
5
+ 2SzWmN4xTh2nd8PM/ts3Wx+y6imJptr+gm9YnxTtKCsa/ULKdGIo5fvQlAk6qULt
6
+ jnA6EB3oMHhRgINO/qqMCV6nKjifAE/YCeeRQMOlbacgmPUM+Sn8Twa35fR0mGfY
7
+ a/2KD4hjZe8ETMUcm2r6DVZ21mdRYwwFIL+9UL30OebDAR/XsQtXJKIr7oveN/H2
8
+ HQrNN0SDxqQ4K8yryNq83wdcZstsS2+7Uu03BihbzBwJCA157CoA6yYy+1gVHfLJ
9
+ GsCf2ZjaCUeJRhqMm9XWz/5tRtCCgTHpFpeyHCshqb1KhS6xTgLBjkFxqio6BSbf
10
+ 660P75dsdimTOo8+Sr8bZLByIL4Rju8OsaJoJnb/633XvkukVLUXuuMKB00LGLWL
11
+ 2SY3bEIZzqo8AJz4Gq2t3bcM1k5e1HWDsFfWbbnwrH/eGJWgBPuOFrmuIUaOzreo
12
+ yeVzUYzTRhrTUmKSHAgx37xVBc3FRx7jSIG+TmLOEen/D/+v/VROT4L/iSMv7tD0
13
+ 3ymfeK4R59uttprqfJ3HPhD90H2tERePXTxNi6dePRmxnmDptl0nj9aQLXzunhto
14
+ I+hTgZ1m3cANPT6DHLb+kNUaqfr74wbq7GlJK7nedw0LWsVgt0qYK+XyXCl/25yb
15
+ SCLpDAhfoh6gu6hDRmAonEI7izLjDMfo7ESdmK69g6NqAS8Tl8+gjonNHqHrfnlR
16
+ rmkPXcxK+J29VR6vQ99nmbasQGixi/EMlAu/sB086yhL6D2k/Cd5cmih+7gSb0EJ
17
+ Pork41vPStmc2Tlq9yxdTv/KgGVj1YgvRKPXDCDjX4uftjVneopFeXjB9/JVUycq
18
+ BECDYvConbvS5mYt5c0iNFkdJp4rDggAIIXPQlIePQ+qEdGm6aJQkJhNtO7GWysa
19
+ iIARyihw6CivAgpKizfW4IixY9bJCNNEvNnQE7FUdqLv6BJ6DJNxtJg6/KTSWU5L
20
+ jb/gSlV2exwEWyi8ZjUW5AN9PQ2D9nZGVpUKRsJuwXQb9l6m9qRj6kipPdZsGrN1
21
+ gZ25bk+eL11/Hh0NFELujzLihOezcFA8V/L3dAFIhriK6q3S7CX+V1PNu4gMWcnT
22
+ joKXtj2t9EpO4VOzQxBxl12mpSiqN2EDBQd0YYgtTH3cLb1WzpltcZ6PosBbqi6K
23
+ dmy9soNAdTfTFoFlT/Xoagb0tDONteGuwOhlHZp2SMvmN3ZtUuwr6p/irXwqg1PG
24
+ 5BqfMnUKVMH12IV0Bk6G/Hv24GiCjb8LANwHBfLspqgl0fEi/k/bY34LuRPwQV+y
25
+ gOapL1+OFCuKvHanPVefm/ILpyzxJ2hF/W4ntgDC3xo/Myix7huJxL4fAHtvV5gv
26
+ PeIHAZvtzoobDWLZUKgopakDsAvav23/vPt6LObNIY0jVme0Absgk0M5qobmBR5F
27
+ NpTvyMG4xFNdx4+8WLfRURDNyH/dW7INBq9ic+Av/XrpwQ81aGd/V3NGKh6Cbm3E
28
+ dv1gmN2Dw6v7eLOKz0+P/zdCtbZH7JX2qERhy2jbi5TkXCKWTpB6UHqSZ6wwfYFj
29
+ tcSjOIzGrUBBsKXGAEnlC035no/y27xpmQFCCxtiuiySrtlg6JqGEUR6A2GlCXt0
30
+ -----END RSA PRIVATE KEY-----
README.md CHANGED
@@ -1,13 +1,13 @@
1
- ---
2
- title: FoodVision Mini
3
- emoji: 📈
4
- colorFrom: red
5
- colorTo: yellow
6
- sdk: gradio
7
- sdk_version: 4.38.1
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ ---
2
+ title: FoodVision Mini
3
+ emoji: 📈
4
+ colorFrom: red
5
+ colorTo: yellow
6
+ sdk: gradio
7
+ sdk_version: 4.38.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ ---
12
+
13
+ Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 1. Imports and class names setup
2
+ import gradio as gr
3
+ import os
4
+ import torch
5
+
6
+ from model import create_effnetb2_model
7
+ from timeit import default_timer as timer
8
+ from typing import Tuple , Dict
9
+
10
+ # Setup class names
11
+ class_names = ['pizza','steak','sushi']
12
+
13
+ # Model and transforms preparation
14
+ # Create EffNetB2 model
15
+ effnetb2 , effnetb2_transforms = create_effnetb2_model(num_classes=len(class_names))
16
+
17
+ # load and save weights
18
+ effnetb2.load_state_dict(torch.load("effnetb2.pth",map_location=torch.device('cpu')))
19
+
20
+ # Predict function
21
+ def predict(img):
22
+ """
23
+ Transforms and performs a prediction on img and returns prediction and time taken.
24
+ """
25
+ # Start timer
26
+ start_time = timer()
27
+
28
+ # transform the target image and add a batch dimension
29
+ img = effnetb2_transforms(img).unsqueeze(0)
30
+
31
+ # put model into evaluation mode and turn on inference mode
32
+ effnetb2.eval()
33
+ with torch.inference_mode():
34
+ # pass the transformed image through the model and turn the pred logits into prediction probabilities
35
+ pred_probs = torch.softmax(effnetb2(img), dim=1)
36
+ # create a prediction label and prediction probability dictionary
37
+ pred_labels_and_probs = {class_names[i]: float(pred_probs[0][i]) for i in range(len(class_names))}
38
+ # calculate time
39
+ pred_time = round(timer() - start_time , 5)
40
+ # return the prediction dictionary
41
+ return pred_labels_and_probs, pred_time
42
+
43
+ ## Gradio app
44
+
45
+ # Create title, description and article strings
46
+ title = "FoodVision Mini 🍕🥩🍣"
47
+ description = "An EfficientNetB2 feature extractor computer vision model to classify images of food as pizza, steak or sushi."
48
+ article = "Created "
49
+
50
+ # Create examples list from "examples/" directory
51
+ example_list = [["examples/" + example] for example in os.listdir("examples")]
52
+
53
+ # Create the Gradio demo
54
+ demo = gr.Interface(fn=predict, # mapping function from input to output
55
+ inputs=gr.Image(type="pil"), # what are the inputs?
56
+ outputs=[gr.Label(num_top_classes=3, label="Predictions"), # what are the outputs?
57
+ gr.Number(label="Prediction time (s)")], # our fn has two outputs, therefore we have two outputs
58
+ # Create examples list from "examples/" directory
59
+ examples=example_list,
60
+ title=title,
61
+ description=description,
62
+ article=article)
63
+
64
+ # Launch the demo!
65
+ demo.launch()
effnetb2.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ae45551ee9305afa43726d6802c0d97701a32641b6a8adab6c3e9e023018ae
3
+ size 31274298
model.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torchvision
3
+ from torch import nn
4
+ def create_effnetb2_model(num_classes : int ,
5
+ seed : int=42):
6
+ """
7
+ Create an EffNetB2 feature extractor model and move it to the target device.
8
+ Args:
9
+ num_classes (int, optional): number of classes in the classifier head.
10
+ Defaults to 3.
11
+ seed (int, optional): random seed value. Defaults to 42.
12
+
13
+ Returns:
14
+ model (torch.nn.Module): EffNetB2 feature extractor model.
15
+ transforms (torchvision.transforms): EffNetB2 image transforms.
16
+ """
17
+ # Create EffNetB2 pretrained weights , transforms and model
18
+ weights = torchvision.models.EfficientNet_B2_Weights.DEFAULT
19
+ transforms = weights.transforms()
20
+ model = torchvision.models.efficientnet_b2(weights)
21
+
22
+ # Freeze all layers in base model
23
+ for param in model.parameters():
24
+ param.requires_grad = False
25
+ # change classifier head with random seed for reproducilityù
26
+ torch.manual_seed(seed)
27
+ model.classifier = nn.Sequential(
28
+ nn.Dropout(p=0.2, inplace=True),
29
+ nn.Linear(in_features=1408, out_features=num_classes, bias=True)
30
+ )
31
+ return model, transforms
requirements.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ gradio==3.21.0
2
+ torch==1.13.1
3
+ torchvision==0.14.1