MikkoLipsanen commited on
Commit
93a3c63
1 Parent(s): 9377114

Update test.py

Browse files
Files changed (1) hide show
  1. test.py +14 -26
test.py CHANGED
@@ -21,26 +21,23 @@ print("Torchvision Version: ",torchvision.__version__)
21
 
22
  parser = argparse.ArgumentParser('arguments for testing the model')
23
 
24
- parser.add_argument('--ts_empty_folder', type=str, default="/data/taulukot/solukuvat/empty/test/",
25
  help='path to test data')
26
- parser.add_argument('--ts_ok_folder', type=str, default="/data/taulukot/solukuvat/ok/test/",
27
  help='path to test data')
28
- parser.add_argument('--results_folder', type=str, default="./results/aug_28022024/",
29
  help='Folder for saving results')
30
- parser.add_argument('--model_path', type=str, default="/koodit/table_segmentation/empty_cell_detection/train/models/aug_b32_lr0001_28022024.onnx",
31
  help='path to load model file from')
32
  parser.add_argument('--batch_size', type=int, default=16,
33
  help='batch_size')
34
  parser.add_argument('--num_classes', type=int, default=2,
35
  help='number of classes for classification')
36
- parser.add_argument('--name', type=str, default='empty_cell_augment_28022024',
37
  help='name given to result files')
38
 
39
  start = time.time()
40
 
41
- # nohup python test.py > logs/aug_test_28022024.txt 2>&1 &
42
- # echo $! > output/save_pid.txt
43
-
44
  torch.manual_seed(67)
45
  random.seed(67)
46
 
@@ -49,10 +46,8 @@ args = parser.parse_args()
49
  ImageFile.LOAD_TRUNCATED_IMAGES = True
50
  Image.MAX_IMAGE_PIXELS = None
51
 
52
- # https://pytorch.org/tutorials/beginner/finetuning_torchvision_models_tutorial.html
53
-
54
-
55
  def get_data():
 
56
  empty_path = Path(args.ts_empty_folder)
57
  ok_path = Path(args.ts_ok_folder)
58
 
@@ -62,11 +57,6 @@ def get_data():
62
  empty_labels = np.zeros(len(empty_files))
63
  ok_labels = np.ones(len(ok_files))
64
 
65
- #ts_data_files = ts_data_files[:20]
66
- #ts_data_labels = ts_data_labels[:20]
67
- #ts_ok_files = ts_ok_files[:20]
68
- #ts_ok_labels = ts_ok_labels[:20]
69
-
70
  ts_files = empty_files + ok_files
71
  ts_labels = np.concatenate((empty_labels, ok_labels))
72
 
@@ -77,12 +67,14 @@ def get_data():
77
 
78
 
79
  def initialize_model():
 
80
  model = onnxruntime.InferenceSession(args.model_path)
81
  input_size = 224
82
  return model, input_size
83
 
84
- # Function for getting precision, recall and F-score metrics
85
  def get_precision_recall(y_true, y_pred):
 
86
  precision_recall_fscore = precision_recall_fscore_support(y_true, y_pred, average=None)
87
 
88
  prec_0 = precision_recall_fscore[0][0]
@@ -103,6 +95,7 @@ def get_precision_recall(y_true, y_pred):
103
 
104
 
105
  def createConfusionMatrix(y_true, y_pred):
 
106
  classes = np.array(['empty', 'ok'])
107
 
108
  # Build confusion matrix
@@ -114,6 +107,7 @@ def createConfusionMatrix(y_true, y_pred):
114
  return sn.heatmap(df_cm, annot=True).get_figure()
115
 
116
  def save_preds(y_true, y_pred, paths):
 
117
  # Identifies images that were not classified correctly
118
  incorrect_indices = np.where(y_true != y_pred)
119
  incorrectly_predicted_images = paths[incorrect_indices]
@@ -122,15 +116,12 @@ def save_preds(y_true, y_pred, paths):
122
 
123
  print(f'{len(incorrect_preds)} incorrect predictions')
124
 
125
- # Save file names and labels of incorrectly classified images
126
  with open(args.results_folder + args.name + '_incorrect_preds', "w") as fp:
127
  json.dump(incorrect_preds, fp)
128
 
129
  # Initialize the model for this run
130
  model, input_size = initialize_model()
131
 
132
- # Print the model we just instantiated
133
- #print(model_ft)
134
 
135
  data_transforms = transforms.Compose([
136
  transforms.Resize((input_size, input_size)),
@@ -141,8 +132,8 @@ print("Initializing Datasets and Dataloaders...")
141
 
142
  ts_files, ts_labels = get_data()
143
 
144
- # Function for getting model predictions on test data
145
  def test_model(model, ts_files, ts_labels):
 
146
  since = time.time()
147
  label_preds = []
148
  true_labels = []
@@ -175,14 +166,11 @@ ts_labels = np.array(ts_labels)
175
 
176
  # Test model
177
  y_pred, y_true, paths = test_model(model, ts_files, ts_labels)
178
- # Saves information of incorrect predictions
179
  save_preds(y_true, y_pred, paths)
180
- # Calculates and prints precision, recall and F-score metrics
181
  get_precision_recall(y_true, y_pred)
182
 
183
- # Save confusion matrix to Tensorboard
184
- #cm = createConfusionMatrix(y_true, y_pred)
185
- #writer.add_figure("Confusion matrix", cm)
186
  # Create and save confusion matrix of the predictions and true labels
187
  conf_matrix = ConfusionMatrixDisplay.from_predictions(y_true, y_pred, normalize='true', display_labels=np.array(['empty', 'ok']))
188
  plt.savefig(args.results_folder + args.name + '_conf_matrix.jpg', bbox_inches='tight')
 
21
 
22
  parser = argparse.ArgumentParser('arguments for testing the model')
23
 
24
+ parser.add_argument('--ts_empty_folder', type=str, default="/path/to/empty/test/data/",
25
  help='path to test data')
26
+ parser.add_argument('--ts_ok_folder', type=str, default="/path/to/non-empty/test/data/",
27
  help='path to test data')
28
+ parser.add_argument('--results_folder', type=str, default="./results/",
29
  help='Folder for saving results')
30
+ parser.add_argument('--model_path', type=str, default="/path/to/model.onnx",
31
  help='path to load model file from')
32
  parser.add_argument('--batch_size', type=int, default=16,
33
  help='batch_size')
34
  parser.add_argument('--num_classes', type=int, default=2,
35
  help='number of classes for classification')
36
+ parser.add_argument('--name', type=str, default='test',
37
  help='name given to result files')
38
 
39
  start = time.time()
40
 
 
 
 
41
  torch.manual_seed(67)
42
  random.seed(67)
43
 
 
46
  ImageFile.LOAD_TRUNCATED_IMAGES = True
47
  Image.MAX_IMAGE_PIXELS = None
48
 
 
 
 
49
  def get_data():
50
+ """Combines test data paths and labels"""
51
  empty_path = Path(args.ts_empty_folder)
52
  ok_path = Path(args.ts_ok_folder)
53
 
 
57
  empty_labels = np.zeros(len(empty_files))
58
  ok_labels = np.ones(len(ok_files))
59
 
 
 
 
 
 
60
  ts_files = empty_files + ok_files
61
  ts_labels = np.concatenate((empty_labels, ok_labels))
62
 
 
67
 
68
 
69
  def initialize_model():
70
+ """Initializes .onnx model."""
71
  model = onnxruntime.InferenceSession(args.model_path)
72
  input_size = 224
73
  return model, input_size
74
 
75
+
76
  def get_precision_recall(y_true, y_pred):
77
+ """Calculates precision, recall and F-score metrics."""
78
  precision_recall_fscore = precision_recall_fscore_support(y_true, y_pred, average=None)
79
 
80
  prec_0 = precision_recall_fscore[0][0]
 
95
 
96
 
97
  def createConfusionMatrix(y_true, y_pred):
98
+ """Creates confusion matrix based on the predicted and true labels."""
99
  classes = np.array(['empty', 'ok'])
100
 
101
  # Build confusion matrix
 
107
  return sn.heatmap(df_cm, annot=True).get_figure()
108
 
109
  def save_preds(y_true, y_pred, paths):
110
+ """Saves file names and labels of incorrectly classified images."""
111
  # Identifies images that were not classified correctly
112
  incorrect_indices = np.where(y_true != y_pred)
113
  incorrectly_predicted_images = paths[incorrect_indices]
 
116
 
117
  print(f'{len(incorrect_preds)} incorrect predictions')
118
 
 
119
  with open(args.results_folder + args.name + '_incorrect_preds', "w") as fp:
120
  json.dump(incorrect_preds, fp)
121
 
122
  # Initialize the model for this run
123
  model, input_size = initialize_model()
124
 
 
 
125
 
126
  data_transforms = transforms.Compose([
127
  transforms.Resize((input_size, input_size)),
 
132
 
133
  ts_files, ts_labels = get_data()
134
 
 
135
  def test_model(model, ts_files, ts_labels):
136
+ """Get model predictions on test data."""
137
  since = time.time()
138
  label_preds = []
139
  true_labels = []
 
166
 
167
  # Test model
168
  y_pred, y_true, paths = test_model(model, ts_files, ts_labels)
169
+ # Save information of incorrect predictions
170
  save_preds(y_true, y_pred, paths)
171
+ # Calculate and print precision, recall and F-score metrics
172
  get_precision_recall(y_true, y_pred)
173
 
 
 
 
174
  # Create and save confusion matrix of the predictions and true labels
175
  conf_matrix = ConfusionMatrixDisplay.from_predictions(y_true, y_pred, normalize='true', display_labels=np.array(['empty', 'ok']))
176
  plt.savefig(args.results_folder + args.name + '_conf_matrix.jpg', bbox_inches='tight')