Spaces:
Paused
Paused
Dean
commited on
Commit
•
818ec2e
1
Parent(s):
ad95853
Finalized evaluation step, which now works. Ready to merge into master
Browse files- dvc.lock +33 -7
- logs/test_metrics.csv +10 -0
- logs/train_metrics.csv +0 -0
- logs/train_params.yml +1 -1
- requirements.txt +2 -1
- src/.gitignore +1 -0
- src/code/custom_data_loading.py +2 -9
- src/code/eval.py +13 -14
- src/code/eval_metric_calculation.py +24 -16
dvc.lock
CHANGED
@@ -20,8 +20,8 @@ train:
|
|
20 |
cmd: python3 src/code/training.py src/data/processed/train
|
21 |
deps:
|
22 |
- path: src/code/custom_data_loading.py
|
23 |
-
md5:
|
24 |
-
size:
|
25 |
- path: src/code/params.yml
|
26 |
md5: 2263ca2167c1bb4b0f53a9aedb5f238e
|
27 |
size: 217
|
@@ -34,12 +34,38 @@ train:
|
|
34 |
nfiles: 1590
|
35 |
outs:
|
36 |
- path: logs/train_metrics.csv
|
37 |
-
md5:
|
38 |
-
size:
|
39 |
- path: logs/train_params.yml
|
40 |
-
md5:
|
41 |
size: 886
|
42 |
- path: src/models/
|
43 |
-
md5:
|
44 |
-
size:
|
45 |
nfiles: 1
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
cmd: python3 src/code/training.py src/data/processed/train
|
21 |
deps:
|
22 |
- path: src/code/custom_data_loading.py
|
23 |
+
md5: c94ea029ed76ca94bb1ad4c1655e5e68
|
24 |
+
size: 1916
|
25 |
- path: src/code/params.yml
|
26 |
md5: 2263ca2167c1bb4b0f53a9aedb5f238e
|
27 |
size: 217
|
|
|
34 |
nfiles: 1590
|
35 |
outs:
|
36 |
- path: logs/train_metrics.csv
|
37 |
+
md5: 437a06e6c6c5b4f6eec5e546c1ce6930
|
38 |
+
size: 103916
|
39 |
- path: logs/train_params.yml
|
40 |
+
md5: e06e92ac0f3ac1d367c22a10c28cccf9
|
41 |
size: 886
|
42 |
- path: src/models/
|
43 |
+
md5: fab42526c433987e0e6370db31a1869d.dir
|
44 |
+
size: 494927196
|
45 |
nfiles: 1
|
46 |
+
eval:
|
47 |
+
cmd: python3 src/code/eval.py src/data/processed/test
|
48 |
+
deps:
|
49 |
+
- path: src/code/custom_data_loading.py
|
50 |
+
md5: c94ea029ed76ca94bb1ad4c1655e5e68
|
51 |
+
size: 1916
|
52 |
+
- path: src/code/eval.py
|
53 |
+
md5: fcc66ed80bb4466ab0438f556acd125c
|
54 |
+
size: 1775
|
55 |
+
- path: src/code/eval_metric_calculation.py
|
56 |
+
md5: 2fc866e1107042a996087d5716d44bf0
|
57 |
+
size: 2999
|
58 |
+
- path: src/code/params.yml
|
59 |
+
md5: 2263ca2167c1bb4b0f53a9aedb5f238e
|
60 |
+
size: 217
|
61 |
+
- path: src/data/processed/test
|
62 |
+
md5: bcccd66f3f561b53ba97c89a558c08a0.dir
|
63 |
+
size: 88596370
|
64 |
+
nfiles: 1308
|
65 |
+
- path: src/models/model.pth
|
66 |
+
md5: 2fd77305fd779eefd11e307ee3f201d7
|
67 |
+
size: 494927196
|
68 |
+
outs:
|
69 |
+
- path: logs/test_metrics.csv
|
70 |
+
md5: 0add355c58eb4dfa1ae7e28e47750d33
|
71 |
+
size: 340
|
logs/test_metrics.csv
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Name,Value,Timestamp,Step
|
2 |
+
"a1",0.056999333,1613824849186,1
|
3 |
+
"a2",0.118539445,1613824849186,1
|
4 |
+
"a3",0.19929159,1613824849186,1
|
5 |
+
"abs_rel",2.5860002,1613824849186,1
|
6 |
+
"sq_rel",15.912783,1613824849186,1
|
7 |
+
"rmse",5.257741,1613824849186,1
|
8 |
+
"rmse_log",1.2291939,1613824849186,1
|
9 |
+
"log10",0.49469143,1613824849186,1
|
10 |
+
"silog",43.5198,1613824849186,1
|
logs/train_metrics.csv
CHANGED
The diff for this file is too large to render.
See raw diff
|
|
logs/train_params.yml
CHANGED
@@ -1,5 +1,5 @@
|
|
1 |
DAGsHubLogger: true
|
2 |
-
Learner: <fastai.learner.Learner object at
|
3 |
ParamScheduler: true
|
4 |
ProgressCallback: true
|
5 |
Recorder: {add_time: true, train_metrics: false, valid_metrics: true}
|
|
|
1 |
DAGsHubLogger: true
|
2 |
+
Learner: <fastai.learner.Learner object at 0x7f051ecfcac8>
|
3 |
ParamScheduler: true
|
4 |
ProgressCallback: true
|
5 |
Recorder: {add_time: true, train_metrics: false, valid_metrics: true}
|
requirements.txt
CHANGED
@@ -6,4 +6,5 @@ opencv-python==4.4.0.42
|
|
6 |
tqdm==4.52.0
|
7 |
numpy==1.19.4
|
8 |
scikit-learn==0.23.2
|
9 |
-
dagshub==0.1.5
|
|
|
|
6 |
tqdm==4.52.0
|
7 |
numpy==1.19.4
|
8 |
scikit-learn==0.23.2
|
9 |
+
dagshub==0.1.5
|
10 |
+
tables==3.6.1
|
src/.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
/models
|
|
|
|
1 |
/models
|
2 |
+
/eval
|
src/code/custom_data_loading.py
CHANGED
@@ -8,8 +8,7 @@ from fastai.vision.all import \
|
|
8 |
PILImageBW, \
|
9 |
RandomSplitter, \
|
10 |
Path, \
|
11 |
-
get_files
|
12 |
-
L
|
13 |
|
14 |
|
15 |
class ImageImageDataLoaders(DataLoaders):
|
@@ -35,7 +34,7 @@ def get_y_fn(x):
|
|
35 |
return y
|
36 |
|
37 |
|
38 |
-
def create_data(data_path
|
39 |
with open(r"./src/code/params.yml") as f:
|
40 |
params = yaml.safe_load(f)
|
41 |
|
@@ -49,10 +48,4 @@ def create_data(data_path, is_test=False):
|
|
49 |
filenames=filenames,
|
50 |
label_func=get_y_fn)
|
51 |
|
52 |
-
if is_test:
|
53 |
-
filenames = get_files(Path(data_path), extensions='.jpg')
|
54 |
-
test_files = L([Path(i) for i in filenames])
|
55 |
-
test_dl = dataset.test_dl(test_files, with_labels=True)
|
56 |
-
return dataset, test_dl
|
57 |
-
|
58 |
return dataset
|
|
|
8 |
PILImageBW, \
|
9 |
RandomSplitter, \
|
10 |
Path, \
|
11 |
+
get_files
|
|
|
12 |
|
13 |
|
14 |
class ImageImageDataLoaders(DataLoaders):
|
|
|
34 |
return y
|
35 |
|
36 |
|
37 |
+
def create_data(data_path):
|
38 |
with open(r"./src/code/params.yml") as f:
|
39 |
params = yaml.safe_load(f)
|
40 |
|
|
|
48 |
filenames=filenames,
|
49 |
label_func=get_y_fn)
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
return dataset
|
src/code/eval.py
CHANGED
@@ -1,9 +1,12 @@
|
|
1 |
import sys
|
2 |
import yaml
|
3 |
-
|
|
|
|
|
4 |
from custom_data_loading import create_data
|
5 |
from eval_metric_calculation import compute_eval_metrics
|
6 |
from dagshub import dagshub_logger
|
|
|
7 |
|
8 |
|
9 |
if __name__ == "__main__":
|
@@ -15,7 +18,7 @@ if __name__ == "__main__":
|
|
15 |
params = yaml.safe_load(f)
|
16 |
|
17 |
data_path = Path(sys.argv[1])
|
18 |
-
data
|
19 |
|
20 |
arch = {'resnet34': resnet34}
|
21 |
loss = {'MSELossFlat': MSELossFlat()}
|
@@ -28,20 +31,16 @@ if __name__ == "__main__":
|
|
28 |
model_dir='models')
|
29 |
learner = learner.load('model')
|
30 |
|
31 |
-
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
print("Decoding test data...")
|
39 |
-
inputs = (inputs,)
|
40 |
-
decoded_predictions = learner.dls.decode(inputs + tuplify(decoded))[1]
|
41 |
-
decoded_targets = learner.dls.decode(inputs + tuplify(targets))[1]
|
42 |
|
43 |
print("Calculating metrics...")
|
44 |
-
metrics = compute_eval_metrics(
|
45 |
|
46 |
with dagshub_logger(
|
47 |
metrics_path="logs/test_metrics.csv",
|
|
|
1 |
import sys
|
2 |
import yaml
|
3 |
+
import torch
|
4 |
+
from torchvision import transforms
|
5 |
+
from fastai.vision.all import unet_learner, Path, resnet34, MSELossFlat, get_files, L, tuplify
|
6 |
from custom_data_loading import create_data
|
7 |
from eval_metric_calculation import compute_eval_metrics
|
8 |
from dagshub import dagshub_logger
|
9 |
+
from tqdm import tqdm
|
10 |
|
11 |
|
12 |
if __name__ == "__main__":
|
|
|
18 |
params = yaml.safe_load(f)
|
19 |
|
20 |
data_path = Path(sys.argv[1])
|
21 |
+
data = create_data(data_path)
|
22 |
|
23 |
arch = {'resnet34': resnet34}
|
24 |
loss = {'MSELossFlat': MSELossFlat()}
|
|
|
31 |
model_dir='models')
|
32 |
learner = learner.load('model')
|
33 |
|
34 |
+
filenames = get_files(Path(data_path), extensions='.jpg')
|
35 |
+
test_files = L([Path(i) for i in filenames])
|
36 |
+
|
37 |
+
for sample in tqdm(test_files.items, desc="Predicting on test images", total=len(test_files.items)):
|
38 |
+
pred = learner.predict(sample)[0]
|
39 |
+
pred = transforms.ToPILImage()(pred[:, :, :].type(torch.FloatTensor)).convert('L')
|
40 |
+
pred.save("src/eval/" + str(sample.stem) + "_pred.png")
|
|
|
|
|
|
|
|
|
41 |
|
42 |
print("Calculating metrics...")
|
43 |
+
metrics = compute_eval_metrics(test_files)
|
44 |
|
45 |
with dagshub_logger(
|
46 |
metrics_path="logs/test_metrics.csv",
|
src/code/eval_metric_calculation.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import numpy as np
|
2 |
-
|
|
|
3 |
|
4 |
def compute_errors(target, prediction):
|
5 |
thresh = np.maximum((target / prediction), (prediction / target))
|
@@ -24,14 +25,11 @@ def compute_errors(target, prediction):
|
|
24 |
return a1, a2, a3, abs_rel, sq_rel, rmse, rmse_log, silog, log_10
|
25 |
|
26 |
|
27 |
-
def compute_eval_metrics(
|
28 |
-
targets = targets / 25.0
|
29 |
-
predictions = predictions / 25.0
|
30 |
-
|
31 |
min_depth_eval = 1e-3
|
32 |
max_depth_eval = 10
|
33 |
|
34 |
-
num_samples =
|
35 |
|
36 |
a1 = np.zeros(num_samples, np.float32)
|
37 |
a2 = np.zeros(num_samples, np.float32)
|
@@ -43,21 +41,31 @@ def compute_eval_metrics(targets, predictions):
|
|
43 |
silog = np.zeros(num_samples, np.float32)
|
44 |
log10 = np.zeros(num_samples, np.float32)
|
45 |
|
46 |
-
for i in range(num_samples):
|
47 |
-
|
48 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
|
54 |
-
|
55 |
-
|
56 |
|
57 |
-
valid_mask = np.logical_and(
|
58 |
|
59 |
a1[i], a2[i], a3[i], abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], silog[i], log10[i] = \
|
60 |
-
compute_errors(
|
61 |
|
62 |
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
|
63 |
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
|
|
|
1 |
import numpy as np
|
2 |
+
from PIL import Image
|
3 |
+
from tqdm import tqdm
|
4 |
|
5 |
def compute_errors(target, prediction):
|
6 |
thresh = np.maximum((target / prediction), (prediction / target))
|
|
|
25 |
return a1, a2, a3, abs_rel, sq_rel, rmse, rmse_log, silog, log_10
|
26 |
|
27 |
|
28 |
+
def compute_eval_metrics(test_files):
|
|
|
|
|
|
|
29 |
min_depth_eval = 1e-3
|
30 |
max_depth_eval = 10
|
31 |
|
32 |
+
num_samples = len(test_files)
|
33 |
|
34 |
a1 = np.zeros(num_samples, np.float32)
|
35 |
a2 = np.zeros(num_samples, np.float32)
|
|
|
41 |
silog = np.zeros(num_samples, np.float32)
|
42 |
log10 = np.zeros(num_samples, np.float32)
|
43 |
|
44 |
+
for i in tqdm(range(num_samples), desc="Calculating metrics for test data", total=num_samples):
|
45 |
+
sample_path = test_files[i]
|
46 |
+
target_path = str(sample_path.parent/(sample_path.stem + "_depth.png"))
|
47 |
+
pred_path = "src/eval/" + str(sample_path.stem) + "_pred.png"
|
48 |
+
|
49 |
+
target_image = Image.open(target_path)
|
50 |
+
pred_image = Image.open(pred_path)
|
51 |
+
|
52 |
+
target = np.asarray(target_image)
|
53 |
+
pred = np.asarray(pred_image)
|
54 |
+
|
55 |
+
target = target / 25.0
|
56 |
+
pred = pred / 25.0
|
57 |
|
58 |
+
pred[pred < min_depth_eval] = min_depth_eval
|
59 |
+
pred[pred > max_depth_eval] = max_depth_eval
|
60 |
+
pred[np.isinf(pred)] = max_depth_eval
|
61 |
|
62 |
+
target[np.isinf(target)] = 0
|
63 |
+
target[np.isnan(target)] = 0
|
64 |
|
65 |
+
valid_mask = np.logical_and(target > min_depth_eval, target < max_depth_eval)
|
66 |
|
67 |
a1[i], a2[i], a3[i], abs_rel[i], sq_rel[i], rmse[i], rmse_log[i], silog[i], log10[i] = \
|
68 |
+
compute_errors(target[valid_mask], pred[valid_mask])
|
69 |
|
70 |
print("{:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}, {:>7}".format(
|
71 |
'd1', 'd2', 'd3', 'AbsRel', 'SqRel', 'RMSE', 'RMSElog', 'SILog', 'log10'))
|