Ayush Chaurasia glenn-jocher commited on
Commit
7316b78
1 Parent(s): d1182c4

W&B: Refactor the wandb_utils.py file (#4496)

Browse files

* Improve docstrings and run names

* default wandb login prompt with timeout

* return key

* Update api_key check logic

* Properly support zipped dataset feature

* update docstring

* Revert tuorial change

* extend changes to log_dataset

* add run name

* bug fix

* bug fix

* Update comment

* fix import check

* remove unused import

* Hardcore .yaml file extension

* reduce code

* Reformat using pycharm

* Remove redundant try catch

* More refactoring and bug fixes

* retry

* Reformat using pycharm

* respect LOGGERS include list

* Fix

* fix

* refactor constructor

* refactor

* refactor

* refactor

* PyCharm reformat

Co-authored-by: Glenn Jocher <[email protected]>

Files changed (1) hide show
  1. utils/loggers/wandb/wandb_utils.py +43 -34
utils/loggers/wandb/wandb_utils.py CHANGED
@@ -38,6 +38,19 @@ def check_wandb_config_file(data_config_file):
38
  return data_config_file
39
 
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  def get_run_info(run_path):
42
  run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
43
  run_id = run_path.stem
@@ -104,7 +117,7 @@ class WandbLogger():
104
  - Initialize WandbLogger instance
105
  - Upload dataset if opt.upload_dataset is True
106
  - Setup trainig processes if job_type is 'Training'
107
-
108
  arguments:
109
  opt (namespace) -- Commandline arguments for this run
110
  run_id (str) -- Run ID of W&B run to be resumed
@@ -147,26 +160,24 @@ class WandbLogger():
147
  allow_val_change=True) if not wandb.run else wandb.run
148
  if self.wandb_run:
149
  if self.job_type == 'Training':
150
- if not opt.resume:
151
- if opt.upload_dataset:
152
  self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)
153
 
154
- elif opt.data.endswith('_wandb.yaml'): # When dataset is W&B artifact
155
- with open(opt.data, errors='ignore') as f:
156
- data_dict = yaml.safe_load(f)
157
- self.data_dict = data_dict
158
- else: # Local .yaml dataset file or .zip file
159
- self.data_dict = check_dataset(opt.data)
160
  else:
161
- self.data_dict = check_dataset(opt.data)
 
162
 
163
- self.setup_training(opt)
164
- if not self.wandb_artifact_data_dict:
165
- self.wandb_artifact_data_dict = self.data_dict
166
- # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
167
- if not opt.resume:
168
  self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict},
169
  allow_val_change=True)
 
170
 
171
  if self.job_type == 'Dataset Creation':
172
  self.data_dict = self.check_and_upload_dataset(opt)
@@ -174,10 +185,10 @@ class WandbLogger():
174
  def check_and_upload_dataset(self, opt):
175
  """
176
  Check if the dataset format is compatible and upload it as W&B artifact
177
-
178
  arguments:
179
  opt (namespace)-- Commandline arguments for current run
180
-
181
  returns:
182
  Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
183
  """
@@ -196,10 +207,10 @@ class WandbLogger():
196
  - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
197
  - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
198
  - Setup log_dict, initialize bbox_interval
199
-
200
  arguments:
201
  opt (namespace) -- commandline arguments for this run
202
-
203
  """
204
  self.log_dict, self.current_epoch = {}, 0
205
  self.bbox_interval = opt.bbox_interval
@@ -211,9 +222,7 @@ class WandbLogger():
211
  opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
212
  self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
213
  config.hyp
214
- data_dict = dict(self.wandb_run.config.data_dict) # eliminates the need for config file to resume
215
- else:
216
- data_dict = self.data_dict
217
  if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download
218
  self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
219
  opt.artifact_alias)
@@ -243,11 +252,11 @@ class WandbLogger():
243
  def download_dataset_artifact(self, path, alias):
244
  """
245
  download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
246
-
247
  arguments:
248
  path -- path of the dataset to be used for training
249
  alias (str)-- alias of the artifact to be download/used for training
250
-
251
  returns:
252
  (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
253
  is found otherwise returns (None, None)
@@ -263,7 +272,7 @@ class WandbLogger():
263
  def download_model_artifact(self, opt):
264
  """
265
  download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
266
-
267
  arguments:
268
  opt (namespace) -- Commandline arguments for this run
269
  """
@@ -281,7 +290,7 @@ class WandbLogger():
281
  def log_model(self, path, opt, epoch, fitness_score, best_model=False):
282
  """
283
  Log the model checkpoint as W&B artifact
284
-
285
  arguments:
286
  path (Path) -- Path of directory containing the checkpoints
287
  opt (namespace) -- Command line arguments for this run
@@ -305,14 +314,14 @@ class WandbLogger():
305
  def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
306
  """
307
  Log the dataset as W&B artifact and return the new data file with W&B links
308
-
309
  arguments:
310
  data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
311
  single_class (boolean) -- train multi-class data as single-class
312
  project (str) -- project name. Used to construct the artifact path
313
  overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
314
  file with _wandb postfix. Eg -> data_wandb.yaml
315
-
316
  returns:
317
  the new .yaml file with artifact links. it can be used to start training directly from artifacts
318
  """
@@ -359,12 +368,12 @@ class WandbLogger():
359
  def create_dataset_table(self, dataset, class_to_id, name='dataset'):
360
  """
361
  Create and return W&B artifact containing W&B Table of the dataset.
362
-
363
  arguments:
364
  dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
365
  class_to_id (dict(int, str)) -- hash map that maps class ids to labels
366
  name (str) -- name of the artifact
367
-
368
  returns:
369
  dataset artifact to be logged or used
370
  """
@@ -401,7 +410,7 @@ class WandbLogger():
401
  def log_training_progress(self, predn, path, names):
402
  """
403
  Build evaluation Table. Uses reference from validation dataset table.
404
-
405
  arguments:
406
  predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
407
  path (str): local path of the current evaluation image
@@ -431,7 +440,7 @@ class WandbLogger():
431
  def val_one_image(self, pred, predn, path, names, im):
432
  """
433
  Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
434
-
435
  arguments:
436
  pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
437
  predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
@@ -453,7 +462,7 @@ class WandbLogger():
453
  def log(self, log_dict):
454
  """
455
  save the metrics to the logging dictionary
456
-
457
  arguments:
458
  log_dict (Dict) -- metrics/media to be logged in current step
459
  """
@@ -464,7 +473,7 @@ class WandbLogger():
464
  def end_epoch(self, best_result=False):
465
  """
466
  commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
467
-
468
  arguments:
469
  best_result (boolean): Boolean representing if the result of this evaluation is best or not
470
  """
 
38
  return data_config_file
39
 
40
 
41
+ def check_wandb_dataset(data_file):
42
+ is_wandb_artifact = False
43
+ if check_file(data_file) and data_file.endswith('.yaml'):
44
+ with open(data_file, errors='ignore') as f:
45
+ data_dict = yaml.safe_load(f)
46
+ is_wandb_artifact = (data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX) or
47
+ data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX))
48
+ if is_wandb_artifact:
49
+ return data_dict
50
+ else:
51
+ return check_dataset(data_file)
52
+
53
+
54
  def get_run_info(run_path):
55
  run_path = Path(remove_prefix(run_path, WANDB_ARTIFACT_PREFIX))
56
  run_id = run_path.stem
 
117
  - Initialize WandbLogger instance
118
  - Upload dataset if opt.upload_dataset is True
119
  - Setup trainig processes if job_type is 'Training'
120
+
121
  arguments:
122
  opt (namespace) -- Commandline arguments for this run
123
  run_id (str) -- Run ID of W&B run to be resumed
 
160
  allow_val_change=True) if not wandb.run else wandb.run
161
  if self.wandb_run:
162
  if self.job_type == 'Training':
163
+ if opt.upload_dataset:
164
+ if not opt.resume:
165
  self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt)
166
 
167
+ if opt.resume:
168
+ # resume from artifact
169
+ if isinstance(opt.resume, str) and opt.resume.startswith(WANDB_ARTIFACT_PREFIX):
170
+ self.data_dict = dict(self.wandb_run.config.data_dict)
171
+ else: # local resume
172
+ self.data_dict = check_wandb_dataset(opt.data)
173
  else:
174
+ self.data_dict = check_wandb_dataset(opt.data)
175
+ self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict
176
 
177
+ # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming.
 
 
 
 
178
  self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict},
179
  allow_val_change=True)
180
+ self.setup_training(opt)
181
 
182
  if self.job_type == 'Dataset Creation':
183
  self.data_dict = self.check_and_upload_dataset(opt)
 
185
  def check_and_upload_dataset(self, opt):
186
  """
187
  Check if the dataset format is compatible and upload it as W&B artifact
188
+
189
  arguments:
190
  opt (namespace)-- Commandline arguments for current run
191
+
192
  returns:
193
  Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links.
194
  """
 
207
  - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX
208
  - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded
209
  - Setup log_dict, initialize bbox_interval
210
+
211
  arguments:
212
  opt (namespace) -- commandline arguments for this run
213
+
214
  """
215
  self.log_dict, self.current_epoch = {}, 0
216
  self.bbox_interval = opt.bbox_interval
 
222
  opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str(
223
  self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \
224
  config.hyp
225
+ data_dict = self.data_dict
 
 
226
  if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download
227
  self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'),
228
  opt.artifact_alias)
 
252
  def download_dataset_artifact(self, path, alias):
253
  """
254
  download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX
255
+
256
  arguments:
257
  path -- path of the dataset to be used for training
258
  alias (str)-- alias of the artifact to be download/used for training
259
+
260
  returns:
261
  (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset
262
  is found otherwise returns (None, None)
 
272
  def download_model_artifact(self, opt):
273
  """
274
  download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX
275
+
276
  arguments:
277
  opt (namespace) -- Commandline arguments for this run
278
  """
 
290
  def log_model(self, path, opt, epoch, fitness_score, best_model=False):
291
  """
292
  Log the model checkpoint as W&B artifact
293
+
294
  arguments:
295
  path (Path) -- Path of directory containing the checkpoints
296
  opt (namespace) -- Command line arguments for this run
 
314
  def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False):
315
  """
316
  Log the dataset as W&B artifact and return the new data file with W&B links
317
+
318
  arguments:
319
  data_file (str) -- the .yaml file with information about the dataset like - path, classes etc.
320
  single_class (boolean) -- train multi-class data as single-class
321
  project (str) -- project name. Used to construct the artifact path
322
  overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new
323
  file with _wandb postfix. Eg -> data_wandb.yaml
324
+
325
  returns:
326
  the new .yaml file with artifact links. it can be used to start training directly from artifacts
327
  """
 
368
  def create_dataset_table(self, dataset, class_to_id, name='dataset'):
369
  """
370
  Create and return W&B artifact containing W&B Table of the dataset.
371
+
372
  arguments:
373
  dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table
374
  class_to_id (dict(int, str)) -- hash map that maps class ids to labels
375
  name (str) -- name of the artifact
376
+
377
  returns:
378
  dataset artifact to be logged or used
379
  """
 
410
  def log_training_progress(self, predn, path, names):
411
  """
412
  Build evaluation Table. Uses reference from validation dataset table.
413
+
414
  arguments:
415
  predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class]
416
  path (str): local path of the current evaluation image
 
440
  def val_one_image(self, pred, predn, path, names, im):
441
  """
442
  Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel
443
+
444
  arguments:
445
  pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class]
446
  predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class]
 
462
  def log(self, log_dict):
463
  """
464
  save the metrics to the logging dictionary
465
+
466
  arguments:
467
  log_dict (Dict) -- metrics/media to be logged in current step
468
  """
 
473
  def end_epoch(self, best_result=False):
474
  """
475
  commit the log_dict, model artifacts and Tables to W&B and flush the log_dict.
476
+
477
  arguments:
478
  best_result (boolean): Boolean representing if the result of this evaluation is best or not
479
  """