ahnobari commited on
Commit
d7c069b
1 Parent(s): 6f203b4

updated load scripts

Browse files
Files changed (3) hide show
  1. __init__.py +2 -17
  2. bikefusion/__init__.py +1 -1
  3. bikefusion/data_utils.py +21 -1
__init__.py CHANGED
@@ -1,23 +1,8 @@
1
  from bikefusion import *
2
- from torch.utils.data import TensorDataset, random_split
3
- import torch
4
  import os
5
 
6
  current_dir = os.path.dirname(os.path.abspath(__file__))
7
 
8
- def load_bikefusion_and_data():
9
- partial_images, masks, parametric, description, targets = load_data(os.path.join(current_dir, 'data/'))
10
-
11
- training_images = preprocess(targets)
12
-
13
- dataset = TensorDataset(training_images)
14
 
15
- # split to training and validation
16
- train_size = int(0.8 * len(dataset))
17
- val_size = len(dataset) - train_size
18
- train_dataset, val_dataset = random_split(dataset, [train_size, val_size], generator=torch.Generator().manual_seed(42))
19
-
20
- Diffuser = InpaintingDenoisingDiffusion(train_dataset, val_dataset, image_size=128)
21
- Diffuser.load_checkpoint(os.path.join(current_dir, 'chekpoint/bikefusion.pt'))
22
-
23
- return Diffuser
 
1
  from bikefusion import *
 
 
2
  import os
3
 
4
  current_dir = os.path.dirname(os.path.abspath(__file__))
5
 
6
+ load_bikefusion = lambda: load_bikefusion_and_data(current_dir)
 
 
 
 
 
7
 
8
+ __all__ = [name for name in dir() if not name.startswith("_") and name not in {"os"}]
 
 
 
 
 
 
 
 
bikefusion/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- from .data_utils import load_data, preprocess, postprocess, to_mask_map
2
  from .visualizers import visualize_imagesets, visualize_image_evolution
3
  from .pipline import InpaintingDenoisingPipeline
4
  from .diffusion import InpaintingDenoisingDiffusion
 
1
+ from .data_utils import load_data, preprocess, postprocess, to_mask_map, load_bikefusion_and_data
2
  from .visualizers import visualize_imagesets, visualize_image_evolution
3
  from .pipline import InpaintingDenoisingPipeline
4
  from .diffusion import InpaintingDenoisingDiffusion
bikefusion/data_utils.py CHANGED
@@ -2,6 +2,9 @@ import torchvision.transforms.functional as F
2
  import torch
3
  import numpy as np
4
  import pickle
 
 
 
5
 
6
  def to_mask_map(maks, image_size=(80,128)):
7
  bs = maks.shape[0]
@@ -96,4 +99,21 @@ def load_data(split="train", path="data/"):
96
  masks = np.load(f"{path}mask_{split}.npy")
97
 
98
 
99
- return masked_images, masks, parametric, description, images
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import torch
3
  import numpy as np
4
  import pickle
5
+ from torch.utils.data import TensorDataset, random_split
6
+ from .diffusion import InpaintingDenoisingDiffusion
7
+ import os
8
 
9
  def to_mask_map(maks, image_size=(80,128)):
10
  bs = maks.shape[0]
 
99
  masks = np.load(f"{path}mask_{split}.npy")
100
 
101
 
102
+ return masked_images, masks, parametric, description, images
103
+
104
+ def load_bikefusion_and_data(current_dir):
105
+ partial_images, masks, parametric, description, targets = load_data(os.path.join(current_dir, 'data/'))
106
+
107
+ training_images = preprocess(targets)
108
+
109
+ dataset = TensorDataset(training_images)
110
+
111
+ # split to training and validation
112
+ train_size = int(0.8 * len(dataset))
113
+ val_size = len(dataset) - train_size
114
+ train_dataset, val_dataset = random_split(dataset, [train_size, val_size], generator=torch.Generator().manual_seed(42))
115
+
116
+ Diffuser = InpaintingDenoisingDiffusion(train_dataset, val_dataset, image_size=128)
117
+ Diffuser.load_checkpoint(os.path.join(current_dir, 'chekpoint/bikefusion.pt'))
118
+
119
+ return Diffuser