glenn-jocher commited on
Commit
62d77a1
1 Parent(s): 84a8099

Created using Colaboratory

Browse files
Files changed (1) hide show
  1. tutorial.ipynb +199 -128
tutorial.ipynb CHANGED
@@ -15,7 +15,7 @@
15
  "accelerator": "GPU",
16
  "widgets": {
17
  "application/vnd.jupyter.widget-state+json": {
18
- "484511f272e64eab8b42e68dac5f7a66": {
19
  "model_module": "@jupyter-widgets/controls",
20
  "model_name": "HBoxModel",
21
  "model_module_version": "1.5.0",
@@ -28,16 +28,16 @@
28
  "_view_count": null,
29
  "_view_module_version": "1.5.0",
30
  "box_style": "",
31
- "layout": "IPY_MODEL_78cceec059784f2bb36988d3336e4d56",
32
  "_model_module": "@jupyter-widgets/controls",
33
  "children": [
34
- "IPY_MODEL_ab93d8b65c134605934ff9ec5efb1bb6",
35
- "IPY_MODEL_30df865ded4c434191bce772c9a82f3a",
36
- "IPY_MODEL_20cdc61eb3404f42a12b37901b0d85fb"
37
  ]
38
  }
39
  },
40
- "78cceec059784f2bb36988d3336e4d56": {
41
  "model_module": "@jupyter-widgets/base",
42
  "model_name": "LayoutModel",
43
  "model_module_version": "1.2.0",
@@ -89,13 +89,13 @@
89
  "left": null
90
  }
91
  },
92
- "ab93d8b65c134605934ff9ec5efb1bb6": {
93
  "model_module": "@jupyter-widgets/controls",
94
  "model_name": "HTMLModel",
95
  "model_module_version": "1.5.0",
96
  "state": {
97
  "_view_name": "HTMLView",
98
- "style": "IPY_MODEL_2d7239993a9645b09b221405ac682743",
99
  "_dom_classes": [],
100
  "description": "",
101
  "_model_name": "HTMLModel",
@@ -107,16 +107,16 @@
107
  "_view_module_version": "1.5.0",
108
  "description_tooltip": null,
109
  "_model_module": "@jupyter-widgets/controls",
110
- "layout": "IPY_MODEL_17b5a87f92104ec7ab96bf507637d0d2"
111
  }
112
  },
113
- "30df865ded4c434191bce772c9a82f3a": {
114
  "model_module": "@jupyter-widgets/controls",
115
  "model_name": "FloatProgressModel",
116
  "model_module_version": "1.5.0",
117
  "state": {
118
  "_view_name": "ProgressView",
119
- "style": "IPY_MODEL_2358bfb2270247359e94b066b3cc3d1f",
120
  "_dom_classes": [],
121
  "description": "",
122
  "_model_name": "FloatProgressModel",
@@ -131,31 +131,31 @@
131
  "min": 0,
132
  "description_tooltip": null,
133
  "_model_module": "@jupyter-widgets/controls",
134
- "layout": "IPY_MODEL_3e984405db654b0b83b88b2db08baffd"
135
  }
136
  },
137
- "20cdc61eb3404f42a12b37901b0d85fb": {
138
  "model_module": "@jupyter-widgets/controls",
139
  "model_name": "HTMLModel",
140
  "model_module_version": "1.5.0",
141
  "state": {
142
  "_view_name": "HTMLView",
143
- "style": "IPY_MODEL_654d8a19b9f949c6bbdaf8b0875c931e",
144
  "_dom_classes": [],
145
  "description": "",
146
  "_model_name": "HTMLModel",
147
  "placeholder": "​",
148
  "_view_module": "@jupyter-widgets/controls",
149
  "_model_module_version": "1.5.0",
150
- "value": " 780M/780M [00:33<00:00, 24.4MB/s]",
151
  "_view_count": null,
152
  "_view_module_version": "1.5.0",
153
  "description_tooltip": null,
154
  "_model_module": "@jupyter-widgets/controls",
155
- "layout": "IPY_MODEL_896030c5d13b415aaa05032818d81a6e"
156
  }
157
  },
158
- "2d7239993a9645b09b221405ac682743": {
159
  "model_module": "@jupyter-widgets/controls",
160
  "model_name": "DescriptionStyleModel",
161
  "model_module_version": "1.5.0",
@@ -170,7 +170,7 @@
170
  "_model_module": "@jupyter-widgets/controls"
171
  }
172
  },
173
- "17b5a87f92104ec7ab96bf507637d0d2": {
174
  "model_module": "@jupyter-widgets/base",
175
  "model_name": "LayoutModel",
176
  "model_module_version": "1.2.0",
@@ -222,7 +222,7 @@
222
  "left": null
223
  }
224
  },
225
- "2358bfb2270247359e94b066b3cc3d1f": {
226
  "model_module": "@jupyter-widgets/controls",
227
  "model_name": "ProgressStyleModel",
228
  "model_module_version": "1.5.0",
@@ -238,7 +238,7 @@
238
  "_model_module": "@jupyter-widgets/controls"
239
  }
240
  },
241
- "3e984405db654b0b83b88b2db08baffd": {
242
  "model_module": "@jupyter-widgets/base",
243
  "model_name": "LayoutModel",
244
  "model_module_version": "1.2.0",
@@ -290,7 +290,7 @@
290
  "left": null
291
  }
292
  },
293
- "654d8a19b9f949c6bbdaf8b0875c931e": {
294
  "model_module": "@jupyter-widgets/controls",
295
  "model_name": "DescriptionStyleModel",
296
  "model_module_version": "1.5.0",
@@ -305,7 +305,7 @@
305
  "_model_module": "@jupyter-widgets/controls"
306
  }
307
  },
308
- "896030c5d13b415aaa05032818d81a6e": {
309
  "model_module": "@jupyter-widgets/base",
310
  "model_name": "LayoutModel",
311
  "model_module_version": "1.2.0",
@@ -402,7 +402,7 @@
402
  "colab": {
403
  "base_uri": "https://localhost:8080/"
404
  },
405
- "outputId": "4d67116a-43e9-4d84-d19e-1edd83f23a04"
406
  },
407
  "source": [
408
  "!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
@@ -415,14 +415,14 @@
415
  "clear_output()\n",
416
  "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
417
  ],
418
- "execution_count": null,
419
  "outputs": [
420
  {
421
  "output_type": "stream",
 
422
  "text": [
423
- "Setup complete. Using torch 1.9.0+cu102 (Tesla V100-SXM2-16GB)\n"
424
- ],
425
- "name": "stdout"
426
  }
427
  ]
428
  },
@@ -454,28 +454,28 @@
454
  "colab": {
455
  "base_uri": "https://localhost:8080/"
456
  },
457
- "outputId": "8b728908-81ab-4861-edb0-4d0c46c439fb"
458
  },
459
  "source": [
460
- "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images/\n",
461
  "Image(filename='runs/detect/exp/zidane.jpg', width=600)"
462
  ],
463
- "execution_count": null,
464
  "outputs": [
465
  {
466
  "output_type": "stream",
 
467
  "text": [
468
- "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images/, imgsz=640, conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False\n",
469
- "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
470
  "\n",
471
  "Fusing layers... \n",
472
- "Model Summary: 224 layers, 7266973 parameters, 0 gradients\n",
473
- "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, 1 fire hydrant, Done. (0.007s)\n",
474
- "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 2 ties, Done. (0.007s)\n",
475
- "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n",
476
- "Done. (0.091s)\n"
477
- ],
478
- "name": "stdout"
479
  }
480
  ]
481
  },
@@ -517,33 +517,33 @@
517
  "base_uri": "https://localhost:8080/",
518
  "height": 48,
519
  "referenced_widgets": [
520
- "484511f272e64eab8b42e68dac5f7a66",
521
- "78cceec059784f2bb36988d3336e4d56",
522
- "ab93d8b65c134605934ff9ec5efb1bb6",
523
- "30df865ded4c434191bce772c9a82f3a",
524
- "20cdc61eb3404f42a12b37901b0d85fb",
525
- "2d7239993a9645b09b221405ac682743",
526
- "17b5a87f92104ec7ab96bf507637d0d2",
527
- "2358bfb2270247359e94b066b3cc3d1f",
528
- "3e984405db654b0b83b88b2db08baffd",
529
- "654d8a19b9f949c6bbdaf8b0875c931e",
530
- "896030c5d13b415aaa05032818d81a6e"
531
  ]
532
  },
533
- "outputId": "7e6f5c96-c819-43e1-cd03-d3b9878cf8de"
534
  },
535
  "source": [
536
  "# Download COCO val\n",
537
  "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n",
538
  "!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
539
  ],
540
- "execution_count": null,
541
  "outputs": [
542
  {
543
  "output_type": "display_data",
544
  "data": {
545
  "application/vnd.jupyter.widget-view+json": {
546
- "model_id": "484511f272e64eab8b42e68dac5f7a66",
547
  "version_minor": 0,
548
  "version_major": 2
549
  },
@@ -551,9 +551,7 @@
551
  " 0%| | 0.00/780M [00:00<?, ?B/s]"
552
  ]
553
  },
554
- "metadata": {
555
- "tags": []
556
- }
557
  }
558
  ]
559
  },
@@ -564,30 +562,31 @@
564
  "colab": {
565
  "base_uri": "https://localhost:8080/"
566
  },
567
- "outputId": "3dd0e2fc-aecf-4108-91b1-6392da1863cb"
568
  },
569
  "source": [
570
  "# Run YOLOv5x on COCO val\n",
571
  "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
572
  ],
573
- "execution_count": null,
574
  "outputs": [
575
  {
576
  "output_type": "stream",
 
577
  "text": [
578
- "\u001b[34m\u001b[1mval: \u001b[0mdata=./data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n",
579
- "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
580
  "\n",
581
- "Downloading https://github.com/ultralytics/yolov5/releases/download/v5.0/yolov5x.pt to yolov5x.pt...\n",
582
- "100% 168M/168M [00:08<00:00, 20.6MB/s]\n",
583
  "\n",
584
  "Fusing layers... \n",
585
- "Model Summary: 476 layers, 87730285 parameters, 0 gradients\n",
586
- "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2749.96it/s]\n",
587
  "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n",
588
- " Class Images Labels P R [email protected] [email protected]:.95: 100% 157/157 [01:08<00:00, 2.28it/s]\n",
589
- " all 5000 36335 0.746 0.626 0.68 0.49\n",
590
- "Speed: 0.1ms pre-process, 5.1ms inference, 1.6ms NMS per image at shape (32, 3, 640, 640)\n",
591
  "\n",
592
  "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n",
593
  "loading annotations into memory...\n",
@@ -595,29 +594,28 @@
595
  "creating index...\n",
596
  "index created!\n",
597
  "Loading and preparing results...\n",
598
- "DONE (t=4.94s)\n",
599
  "creating index...\n",
600
  "index created!\n",
601
  "Running per image evaluation...\n",
602
  "Evaluate annotation type *bbox*\n",
603
- "DONE (t=83.60s).\n",
604
  "Accumulating evaluation results...\n",
605
- "DONE (t=13.22s).\n",
606
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.504\n",
607
- " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.688\n",
608
- " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.546\n",
609
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.351\n",
610
- " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.551\n",
611
- " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.644\n",
612
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.382\n",
613
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.629\n",
614
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.681\n",
615
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.524\n",
616
- " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.735\n",
617
- " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.827\n",
618
  "Results saved to \u001b[1mruns/val/exp\u001b[0m\n"
619
- ],
620
- "name": "stdout"
621
  }
622
  ]
623
  },
@@ -722,37 +720,37 @@
722
  "colab": {
723
  "base_uri": "https://localhost:8080/"
724
  },
725
- "outputId": "00ea4b14-a75c-44a2-a913-03b431b69de5"
726
  },
727
  "source": [
728
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
729
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
730
  ],
731
- "execution_count": null,
732
  "outputs": [
733
  {
734
  "output_type": "stream",
 
735
  "text": [
736
- "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, entity=None, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, upload_dataset=False, bbox_interval=-1, save_period=-1, artifact_alias=latest, local_rank=-1, freeze=0\n",
737
  "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
738
- "YOLOv5 🚀 v5.0-367-g01cdb76 torch 1.9.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
739
  "\n",
740
- "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
741
  "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
742
  "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
743
- "2021-08-15 14:40:43.449642: I tensorflow/stream_executor/platform/default/dso_loader.cc:53] Successfully opened dynamic library libcudart.so.11.0\n",
744
  "\n",
745
  " from n params module arguments \n",
746
- " 0 -1 1 3520 models.common.Focus [3, 32, 3] \n",
747
  " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
748
  " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
749
  " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
750
- " 4 -1 3 156928 models.common.C3 [128, 128, 3] \n",
751
  " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
752
  " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
753
  " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
754
- " 8 -1 1 656896 models.common.SPP [512, 512, [5, 9, 13]] \n",
755
- " 9 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
756
  " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
757
  " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
758
  " 12 [-1, 6] 1 0 models.common.Concat [1] \n",
@@ -768,48 +766,121 @@
768
  " 22 [-1, 10] 1 0 models.common.Concat [1] \n",
769
  " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
770
  " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
771
- "Model Summary: 283 layers, 7276605 parameters, 7276605 gradients, 17.1 GFLOPs\n",
772
  "\n",
773
- "Transferred 362/362 items from yolov5s.pt\n",
774
  "Scaled weight_decay = 0.0005\n",
775
- "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 59 weight, 62 weight (no decay), 62 bias\n",
776
  "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n",
777
- "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017' images and labels...128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2440.28it/s]\n",
778
- "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../datasets/coco128/labels/train2017.cache\n",
779
- "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 302.61it/s]\n",
780
  "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<?, ?it/s]\n",
781
- "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 142.55it/s]\n",
782
- "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n",
783
- "[W pthreadpool-cpp.cc:90] Warning: Leaking Caffe2 thread-pool after fork. (function pthreadpool)\n",
784
  "Plotting labels... \n",
785
  "\n",
786
  "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n",
787
  "Image sizes 640 train, 640 val\n",
788
  "Using 2 dataloader workers\n",
789
- "Logging results to runs/train/exp\n",
790
  "Starting training for 3 epochs...\n",
791
  "\n",
792
  " Epoch gpu_mem box obj cls labels img_size\n",
793
- " 0/2 3.64G 0.04492 0.0674 0.02213 298 640: 100% 8/8 [00:03<00:00, 2.05it/s]\n",
794
- " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:00<00:00, 4.70it/s]\n",
795
- " all 128 929 0.686 0.565 0.642 0.421\n",
796
  "\n",
797
  " Epoch gpu_mem box obj cls labels img_size\n",
798
- " 1/2 5.04G 0.04403 0.0611 0.01986 232 640: 100% 8/8 [00:01<00:00, 5.59it/s]\n",
799
- " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:00<00:00, 4.46it/s]\n",
800
- " all 128 929 0.694 0.563 0.654 0.425\n",
801
  "\n",
802
  " Epoch gpu_mem box obj cls labels img_size\n",
803
- " 2/2 5.04G 0.04616 0.07056 0.02071 214 640: 100% 8/8 [00:01<00:00, 5.94it/s]\n",
804
- " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:02<00:00, 1.52it/s]\n",
805
- " all 128 929 0.711 0.562 0.66 0.431\n",
 
 
 
 
806
  "\n",
807
- "3 epochs completed in 0.005 hours.\n",
808
- "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
809
- "Optimizer stripped from runs/train/exp/weights/best.pt, 14.8MB\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
810
  "Results saved to \u001b[1mruns/train/exp\u001b[0m\n"
811
- ],
812
- "name": "stdout"
813
  }
814
  ]
815
  },
@@ -953,19 +1024,19 @@
953
  "%%shell\n",
954
  "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n",
955
  "rm -rf runs # remove runs/\n",
956
- "for m in yolov5s; do # models\n",
957
- " python train.py --weights $m.pt --epochs 3 --img 320 --device 0 # train pretrained\n",
958
- " python train.py --weights '' --cfg $m.yaml --epochs 3 --img 320 --device 0 # train scratch\n",
959
  " for d in 0 cpu; do # devices\n",
960
- " python detect.py --weights $m.pt --device $d # detect official\n",
961
- " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n",
962
  " python val.py --weights $m.pt --device $d # val official\n",
963
  " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n",
 
 
964
  " done\n",
965
- "python hubconf.py # hub\n",
966
- "python models/yolo.py --cfg $m.yaml # build PyTorch model\n",
967
- "python models/tf.py --weights $m.pt # build TensorFlow model\n",
968
- "python export.py --img 128 --batch 1 --weights $m.pt --include torchscript onnx # export\n",
969
  "done"
970
  ],
971
  "execution_count": null,
@@ -1014,4 +1085,4 @@
1014
  "outputs": []
1015
  }
1016
  ]
1017
- }
 
15
  "accelerator": "GPU",
16
  "widgets": {
17
  "application/vnd.jupyter.widget-state+json": {
18
+ "eb95db7cae194218b3fcefb439b6352f": {
19
  "model_module": "@jupyter-widgets/controls",
20
  "model_name": "HBoxModel",
21
  "model_module_version": "1.5.0",
 
28
  "_view_count": null,
29
  "_view_module_version": "1.5.0",
30
  "box_style": "",
31
+ "layout": "IPY_MODEL_769ecde6f2e64bacb596ce972f8d3d2d",
32
  "_model_module": "@jupyter-widgets/controls",
33
  "children": [
34
+ "IPY_MODEL_384a001876054c93b0af45cd1e960bfe",
35
+ "IPY_MODEL_dded0aeae74440f7ba2ffa0beb8dd612",
36
+ "IPY_MODEL_5296d28be75740b2892ae421bbec3657"
37
  ]
38
  }
39
  },
40
+ "769ecde6f2e64bacb596ce972f8d3d2d": {
41
  "model_module": "@jupyter-widgets/base",
42
  "model_name": "LayoutModel",
43
  "model_module_version": "1.2.0",
 
89
  "left": null
90
  }
91
  },
92
+ "384a001876054c93b0af45cd1e960bfe": {
93
  "model_module": "@jupyter-widgets/controls",
94
  "model_name": "HTMLModel",
95
  "model_module_version": "1.5.0",
96
  "state": {
97
  "_view_name": "HTMLView",
98
+ "style": "IPY_MODEL_9f09facb2a6c4a7096810d327c8b551c",
99
  "_dom_classes": [],
100
  "description": "",
101
  "_model_name": "HTMLModel",
 
107
  "_view_module_version": "1.5.0",
108
  "description_tooltip": null,
109
  "_model_module": "@jupyter-widgets/controls",
110
+ "layout": "IPY_MODEL_25621cff5d16448cb7260e839fd0f543"
111
  }
112
  },
113
+ "dded0aeae74440f7ba2ffa0beb8dd612": {
114
  "model_module": "@jupyter-widgets/controls",
115
  "model_name": "FloatProgressModel",
116
  "model_module_version": "1.5.0",
117
  "state": {
118
  "_view_name": "ProgressView",
119
+ "style": "IPY_MODEL_0ce7164fc0c74bb9a2b5c7037375a727",
120
  "_dom_classes": [],
121
  "description": "",
122
  "_model_name": "FloatProgressModel",
 
131
  "min": 0,
132
  "description_tooltip": null,
133
  "_model_module": "@jupyter-widgets/controls",
134
+ "layout": "IPY_MODEL_c4c4593c10904cb5b8a5724d60c7e181"
135
  }
136
  },
137
+ "5296d28be75740b2892ae421bbec3657": {
138
  "model_module": "@jupyter-widgets/controls",
139
  "model_name": "HTMLModel",
140
  "model_module_version": "1.5.0",
141
  "state": {
142
  "_view_name": "HTMLView",
143
+ "style": "IPY_MODEL_473371611126476c88d5d42ec7031ed6",
144
  "_dom_classes": [],
145
  "description": "",
146
  "_model_name": "HTMLModel",
147
  "placeholder": "​",
148
  "_view_module": "@jupyter-widgets/controls",
149
  "_model_module_version": "1.5.0",
150
+ "value": " 780M/780M [00:11&lt;00:00, 91.9MB/s]",
151
  "_view_count": null,
152
  "_view_module_version": "1.5.0",
153
  "description_tooltip": null,
154
  "_model_module": "@jupyter-widgets/controls",
155
+ "layout": "IPY_MODEL_65efdfd0d26c46e79c8c5ff3b77126cc"
156
  }
157
  },
158
+ "9f09facb2a6c4a7096810d327c8b551c": {
159
  "model_module": "@jupyter-widgets/controls",
160
  "model_name": "DescriptionStyleModel",
161
  "model_module_version": "1.5.0",
 
170
  "_model_module": "@jupyter-widgets/controls"
171
  }
172
  },
173
+ "25621cff5d16448cb7260e839fd0f543": {
174
  "model_module": "@jupyter-widgets/base",
175
  "model_name": "LayoutModel",
176
  "model_module_version": "1.2.0",
 
222
  "left": null
223
  }
224
  },
225
+ "0ce7164fc0c74bb9a2b5c7037375a727": {
226
  "model_module": "@jupyter-widgets/controls",
227
  "model_name": "ProgressStyleModel",
228
  "model_module_version": "1.5.0",
 
238
  "_model_module": "@jupyter-widgets/controls"
239
  }
240
  },
241
+ "c4c4593c10904cb5b8a5724d60c7e181": {
242
  "model_module": "@jupyter-widgets/base",
243
  "model_name": "LayoutModel",
244
  "model_module_version": "1.2.0",
 
290
  "left": null
291
  }
292
  },
293
+ "473371611126476c88d5d42ec7031ed6": {
294
  "model_module": "@jupyter-widgets/controls",
295
  "model_name": "DescriptionStyleModel",
296
  "model_module_version": "1.5.0",
 
305
  "_model_module": "@jupyter-widgets/controls"
306
  }
307
  },
308
+ "65efdfd0d26c46e79c8c5ff3b77126cc": {
309
  "model_module": "@jupyter-widgets/base",
310
  "model_name": "LayoutModel",
311
  "model_module_version": "1.2.0",
 
402
  "colab": {
403
  "base_uri": "https://localhost:8080/"
404
  },
405
+ "outputId": "e2e839d5-d6fc-409c-e44c-0b0b6aa9319d"
406
  },
407
  "source": [
408
  "!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
 
415
  "clear_output()\n",
416
  "print(f\"Setup complete. Using torch {torch.__version__} ({torch.cuda.get_device_properties(0).name if torch.cuda.is_available() else 'CPU'})\")"
417
  ],
418
+ "execution_count": 11,
419
  "outputs": [
420
  {
421
  "output_type": "stream",
422
+ "name": "stdout",
423
  "text": [
424
+ "Setup complete. Using torch 1.10.0+cu102 (Tesla V100-SXM2-16GB)\n"
425
+ ]
 
426
  }
427
  ]
428
  },
 
454
  "colab": {
455
  "base_uri": "https://localhost:8080/"
456
  },
457
+ "outputId": "8f7e6588-215d-4ebd-93af-88b871e770a7"
458
  },
459
  "source": [
460
+ "!python detect.py --weights yolov5s.pt --img 640 --conf 0.25 --source data/images\n",
461
  "Image(filename='runs/detect/exp/zidane.jpg', width=600)"
462
  ],
463
+ "execution_count": 17,
464
  "outputs": [
465
  {
466
  "output_type": "stream",
467
+ "name": "stdout",
468
  "text": [
469
+ "\u001b[34m\u001b[1mdetect: \u001b[0mweights=['yolov5s.pt'], source=data/images, imgsz=[640, 640], conf_thres=0.25, iou_thres=0.45, max_det=1000, device=, view_img=False, save_txt=False, save_conf=False, save_crop=False, nosave=False, classes=None, agnostic_nms=False, augment=False, visualize=False, update=False, project=runs/detect, name=exp, exist_ok=False, line_thickness=3, hide_labels=False, hide_conf=False, half=False, dnn=False\n",
470
+ "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
471
  "\n",
472
  "Fusing layers... \n",
473
+ "Model Summary: 213 layers, 7225885 parameters, 0 gradients\n",
474
+ "image 1/2 /content/yolov5/data/images/bus.jpg: 640x480 4 persons, 1 bus, Done. (0.007s)\n",
475
+ "image 2/2 /content/yolov5/data/images/zidane.jpg: 384x640 2 persons, 1 tie, Done. (0.007s)\n",
476
+ "Speed: 0.5ms pre-process, 6.9ms inference, 1.3ms NMS per image at shape (1, 3, 640, 640)\n",
477
+ "Results saved to \u001b[1mruns/detect/exp\u001b[0m\n"
478
+ ]
 
479
  }
480
  ]
481
  },
 
517
  "base_uri": "https://localhost:8080/",
518
  "height": 48,
519
  "referenced_widgets": [
520
+ "eb95db7cae194218b3fcefb439b6352f",
521
+ "769ecde6f2e64bacb596ce972f8d3d2d",
522
+ "384a001876054c93b0af45cd1e960bfe",
523
+ "dded0aeae74440f7ba2ffa0beb8dd612",
524
+ "5296d28be75740b2892ae421bbec3657",
525
+ "9f09facb2a6c4a7096810d327c8b551c",
526
+ "25621cff5d16448cb7260e839fd0f543",
527
+ "0ce7164fc0c74bb9a2b5c7037375a727",
528
+ "c4c4593c10904cb5b8a5724d60c7e181",
529
+ "473371611126476c88d5d42ec7031ed6",
530
+ "65efdfd0d26c46e79c8c5ff3b77126cc"
531
  ]
532
  },
533
+ "outputId": "bcf9a448-1f9b-4a41-ad49-12f181faf05a"
534
  },
535
  "source": [
536
  "# Download COCO val\n",
537
  "torch.hub.download_url_to_file('https://ultralytics.com/assets/coco2017val.zip', 'tmp.zip')\n",
538
  "!unzip -q tmp.zip -d ../datasets && rm tmp.zip"
539
  ],
540
+ "execution_count": 18,
541
  "outputs": [
542
  {
543
  "output_type": "display_data",
544
  "data": {
545
  "application/vnd.jupyter.widget-view+json": {
546
+ "model_id": "eb95db7cae194218b3fcefb439b6352f",
547
  "version_minor": 0,
548
  "version_major": 2
549
  },
 
551
  " 0%| | 0.00/780M [00:00<?, ?B/s]"
552
  ]
553
  },
554
+ "metadata": {}
 
 
555
  }
556
  ]
557
  },
 
562
  "colab": {
563
  "base_uri": "https://localhost:8080/"
564
  },
565
+ "outputId": "74f1dfa9-6b6d-4b36-f67e-bbae243869f9"
566
  },
567
  "source": [
568
  "# Run YOLOv5x on COCO val\n",
569
  "!python val.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65 --half"
570
  ],
571
+ "execution_count": 19,
572
  "outputs": [
573
  {
574
  "output_type": "stream",
575
+ "name": "stdout",
576
  "text": [
577
+ "\u001b[34m\u001b[1mval: \u001b[0mdata=/content/yolov5/data/coco.yaml, weights=['yolov5x.pt'], batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.65, task=val, device=, single_cls=False, augment=False, verbose=False, save_txt=False, save_hybrid=False, save_conf=False, save_json=True, project=runs/val, name=exp, exist_ok=False, half=True\n",
578
+ "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
579
  "\n",
580
+ "Downloading https://github.com/ultralytics/yolov5/releases/download/v6.0/yolov5x.pt to yolov5x.pt...\n",
581
+ "100% 166M/166M [00:03<00:00, 54.1MB/s]\n",
582
  "\n",
583
  "Fusing layers... \n",
584
+ "Model Summary: 444 layers, 86705005 parameters, 0 gradients\n",
585
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco/val2017' images and labels...4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2636.64it/s]\n",
586
  "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../datasets/coco/val2017.cache\n",
587
+ " Class Images Labels P R [email protected] [email protected]:.95: 100% 157/157 [01:12<00:00, 2.17it/s]\n",
588
+ " all 5000 36335 0.729 0.63 0.683 0.496\n",
589
+ "Speed: 0.1ms pre-process, 4.9ms inference, 1.9ms NMS per image at shape (32, 3, 640, 640)\n",
590
  "\n",
591
  "Evaluating pycocotools mAP... saving runs/val/exp/yolov5x_predictions.json...\n",
592
  "loading annotations into memory...\n",
 
594
  "creating index...\n",
595
  "index created!\n",
596
  "Loading and preparing results...\n",
597
+ "DONE (t=5.15s)\n",
598
  "creating index...\n",
599
  "index created!\n",
600
  "Running per image evaluation...\n",
601
  "Evaluate annotation type *bbox*\n",
602
+ "DONE (t=90.39s).\n",
603
  "Accumulating evaluation results...\n",
604
+ "DONE (t=14.54s).\n",
605
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.507\n",
606
+ " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.689\n",
607
+ " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.552\n",
608
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.345\n",
609
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.559\n",
610
+ " Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.652\n",
611
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.381\n",
612
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.630\n",
613
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.682\n",
614
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.526\n",
615
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.732\n",
616
+ " Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.829\n",
617
  "Results saved to \u001b[1mruns/val/exp\u001b[0m\n"
618
+ ]
 
619
  }
620
  ]
621
  },
 
720
  "colab": {
721
  "base_uri": "https://localhost:8080/"
722
  },
723
+ "outputId": "8724d13d-6711-4a12-d96a-1c655e5c3549"
724
  },
725
  "source": [
726
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
727
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --cache"
728
  ],
729
+ "execution_count": 24,
730
  "outputs": [
731
  {
732
  "output_type": "stream",
733
+ "name": "stdout",
734
  "text": [
735
+ "\u001b[34m\u001b[1mtrain: \u001b[0mweights=yolov5s.pt, cfg=, data=coco128.yaml, hyp=data/hyps/hyp.scratch.yaml, epochs=3, batch_size=16, imgsz=640, rect=False, resume=False, nosave=False, noval=False, noautoanchor=False, evolve=None, bucket=, cache=ram, image_weights=False, device=, multi_scale=False, single_cls=False, adam=False, sync_bn=False, workers=8, project=runs/train, name=exp, exist_ok=False, quad=False, linear_lr=False, label_smoothing=0.0, patience=100, freeze=0, save_period=-1, local_rank=-1, entity=None, upload_dataset=False, bbox_interval=-1, artifact_alias=latest\n",
736
  "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 ✅\n",
737
+ "YOLOv5 🚀 v6.0-48-g84a8099 torch 1.10.0+cu102 CUDA:0 (Tesla V100-SXM2-16GB, 16160MiB)\n",
738
  "\n",
739
+ "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.1, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0, copy_paste=0.0\n",
740
  "\u001b[34m\u001b[1mWeights & Biases: \u001b[0mrun 'pip install wandb' to automatically track and visualize YOLOv5 🚀 runs (RECOMMENDED)\n",
741
  "\u001b[34m\u001b[1mTensorBoard: \u001b[0mStart with 'tensorboard --logdir runs/train', view at http://localhost:6006/\n",
 
742
  "\n",
743
  " from n params module arguments \n",
744
+ " 0 -1 1 3520 models.common.Conv [3, 32, 6, 2, 2] \n",
745
  " 1 -1 1 18560 models.common.Conv [32, 64, 3, 2] \n",
746
  " 2 -1 1 18816 models.common.C3 [64, 64, 1] \n",
747
  " 3 -1 1 73984 models.common.Conv [64, 128, 3, 2] \n",
748
+ " 4 -1 2 115712 models.common.C3 [128, 128, 2] \n",
749
  " 5 -1 1 295424 models.common.Conv [128, 256, 3, 2] \n",
750
  " 6 -1 3 625152 models.common.C3 [256, 256, 3] \n",
751
  " 7 -1 1 1180672 models.common.Conv [256, 512, 3, 2] \n",
752
+ " 8 -1 1 1182720 models.common.C3 [512, 512, 1] \n",
753
+ " 9 -1 1 656896 models.common.SPPF [512, 512, 5] \n",
754
  " 10 -1 1 131584 models.common.Conv [512, 256, 1, 1] \n",
755
  " 11 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
756
  " 12 [-1, 6] 1 0 models.common.Concat [1] \n",
 
766
  " 22 [-1, 10] 1 0 models.common.Concat [1] \n",
767
  " 23 -1 1 1182720 models.common.C3 [512, 512, 1, False] \n",
768
  " 24 [17, 20, 23] 1 229245 models.yolo.Detect [80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]]\n",
769
+ "Model Summary: 270 layers, 7235389 parameters, 7235389 gradients, 16.5 GFLOPs\n",
770
  "\n",
771
+ "Transferred 349/349 items from yolov5s.pt\n",
772
  "Scaled weight_decay = 0.0005\n",
773
+ "\u001b[34m\u001b[1moptimizer:\u001b[0m SGD with parameter groups 57 weight, 60 weight (no decay), 60 bias\n",
774
  "\u001b[34m\u001b[1malbumentations: \u001b[0mversion 1.0.3 required by YOLOv5, but version 0.1.12 is currently installed\n",
775
+ "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<?, ?it/s]\n",
776
+ "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:00<00:00, 296.04it/s]\n",
 
777
  "\u001b[34m\u001b[1mval: \u001b[0mScanning '../datasets/coco128/labels/train2017.cache' images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<?, ?it/s]\n",
778
+ "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB ram): 100% 128/128 [00:01<00:00, 121.58it/s]\n",
 
 
779
  "Plotting labels... \n",
780
  "\n",
781
  "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.27, Best Possible Recall (BPR) = 0.9935\n",
782
  "Image sizes 640 train, 640 val\n",
783
  "Using 2 dataloader workers\n",
784
+ "Logging results to \u001b[1mruns/train/exp\u001b[0m\n",
785
  "Starting training for 3 epochs...\n",
786
  "\n",
787
  " Epoch gpu_mem box obj cls labels img_size\n",
788
+ " 0/2 3.62G 0.04621 0.0711 0.02112 203 640: 100% 8/8 [00:04<00:00, 1.99it/s]\n",
789
+ " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:00<00:00, 4.37it/s]\n",
790
+ " all 128 929 0.655 0.547 0.622 0.41\n",
791
  "\n",
792
  " Epoch gpu_mem box obj cls labels img_size\n",
793
+ " 1/2 5.31G 0.04564 0.06898 0.02116 143 640: 100% 8/8 [00:01<00:00, 4.77it/s]\n",
794
+ " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:00<00:00, 4.27it/s]\n",
795
+ " all 128 929 0.68 0.554 0.632 0.419\n",
796
  "\n",
797
  " Epoch gpu_mem box obj cls labels img_size\n",
798
+ " 2/2 5.31G 0.04487 0.06883 0.01998 253 640: 100% 8/8 [00:01<00:00, 4.91it/s]\n",
799
+ " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:00<00:00, 4.30it/s]\n",
800
+ " all 128 929 0.71 0.544 0.629 0.423\n",
801
+ "\n",
802
+ "3 epochs completed in 0.003 hours.\n",
803
+ "Optimizer stripped from runs/train/exp/weights/last.pt, 14.9MB\n",
804
+ "Optimizer stripped from runs/train/exp/weights/best.pt, 14.9MB\n",
805
  "\n",
806
+ "Validating runs/train/exp/weights/best.pt...\n",
807
+ "Fusing layers... \n",
808
+ "Model Summary: 213 layers, 7225885 parameters, 0 gradients, 16.5 GFLOPs\n",
809
+ " Class Images Labels P R [email protected] [email protected]:.95: 100% 4/4 [00:03<00:00, 1.04it/s]\n",
810
+ " all 128 929 0.71 0.544 0.63 0.423\n",
811
+ " person 128 254 0.816 0.669 0.774 0.507\n",
812
+ " bicycle 128 6 0.799 0.667 0.614 0.371\n",
813
+ " car 128 46 0.803 0.355 0.486 0.209\n",
814
+ " motorcycle 128 5 0.704 0.6 0.791 0.583\n",
815
+ " airplane 128 6 1 0.795 0.995 0.717\n",
816
+ " bus 128 7 0.656 0.714 0.72 0.606\n",
817
+ " train 128 3 0.852 1 0.995 0.682\n",
818
+ " truck 128 12 0.521 0.25 0.395 0.215\n",
819
+ " boat 128 6 0.795 0.333 0.445 0.137\n",
820
+ " traffic light 128 14 0.576 0.143 0.24 0.161\n",
821
+ " stop sign 128 2 0.636 0.5 0.828 0.713\n",
822
+ " bench 128 9 0.972 0.444 0.575 0.25\n",
823
+ " bird 128 16 0.939 0.968 0.988 0.645\n",
824
+ " cat 128 4 0.984 0.75 0.822 0.694\n",
825
+ " dog 128 9 0.888 0.667 0.903 0.54\n",
826
+ " horse 128 2 0.689 1 0.995 0.697\n",
827
+ " elephant 128 17 0.96 0.882 0.943 0.681\n",
828
+ " bear 128 1 0.549 1 0.995 0.995\n",
829
+ " zebra 128 4 0.86 1 0.995 0.952\n",
830
+ " giraffe 128 9 0.822 0.778 0.905 0.57\n",
831
+ " backpack 128 6 1 0.309 0.457 0.195\n",
832
+ " umbrella 128 18 0.775 0.576 0.74 0.418\n",
833
+ " handbag 128 19 0.628 0.105 0.167 0.111\n",
834
+ " tie 128 7 0.96 0.571 0.701 0.441\n",
835
+ " suitcase 128 4 1 0.895 0.995 0.621\n",
836
+ " frisbee 128 5 0.641 0.8 0.798 0.664\n",
837
+ " skis 128 1 0.627 1 0.995 0.497\n",
838
+ " snowboard 128 7 0.988 0.714 0.768 0.556\n",
839
+ " sports ball 128 6 0.671 0.5 0.579 0.339\n",
840
+ " kite 128 10 0.631 0.515 0.598 0.221\n",
841
+ " baseball bat 128 4 0.47 0.456 0.277 0.137\n",
842
+ " baseball glove 128 7 0.459 0.429 0.334 0.182\n",
843
+ " skateboard 128 5 0.7 0.48 0.736 0.548\n",
844
+ " tennis racket 128 7 0.559 0.571 0.538 0.315\n",
845
+ " bottle 128 18 0.607 0.389 0.484 0.282\n",
846
+ " wine glass 128 16 0.722 0.812 0.82 0.385\n",
847
+ " cup 128 36 0.881 0.361 0.532 0.312\n",
848
+ " fork 128 6 0.384 0.167 0.239 0.191\n",
849
+ " knife 128 16 0.908 0.616 0.681 0.443\n",
850
+ " spoon 128 22 0.836 0.364 0.536 0.264\n",
851
+ " bowl 128 28 0.793 0.536 0.633 0.471\n",
852
+ " banana 128 1 0 0 0.142 0.0995\n",
853
+ " sandwich 128 2 0 0 0.0951 0.0717\n",
854
+ " orange 128 4 1 0 0.67 0.317\n",
855
+ " broccoli 128 11 0.345 0.182 0.283 0.243\n",
856
+ " carrot 128 24 0.688 0.459 0.612 0.402\n",
857
+ " hot dog 128 2 0.424 0.771 0.497 0.473\n",
858
+ " pizza 128 5 0.622 1 0.824 0.551\n",
859
+ " donut 128 14 0.703 1 0.952 0.853\n",
860
+ " cake 128 4 0.733 1 0.945 0.777\n",
861
+ " chair 128 35 0.512 0.486 0.488 0.222\n",
862
+ " couch 128 6 0.68 0.36 0.746 0.406\n",
863
+ " potted plant 128 14 0.797 0.714 0.808 0.482\n",
864
+ " bed 128 3 1 0 0.474 0.318\n",
865
+ " dining table 128 13 0.852 0.445 0.478 0.315\n",
866
+ " toilet 128 2 0.512 0.5 0.554 0.487\n",
867
+ " tv 128 2 0.754 1 0.995 0.895\n",
868
+ " laptop 128 3 1 0 0.39 0.147\n",
869
+ " mouse 128 2 1 0 0.0283 0.0226\n",
870
+ " remote 128 8 0.747 0.625 0.636 0.488\n",
871
+ " cell phone 128 8 0.555 0.166 0.417 0.222\n",
872
+ " microwave 128 3 0.417 1 0.995 0.732\n",
873
+ " oven 128 5 0.37 0.4 0.432 0.249\n",
874
+ " sink 128 6 0.356 0.167 0.269 0.149\n",
875
+ " refrigerator 128 5 0.705 0.8 0.814 0.45\n",
876
+ " book 128 29 0.628 0.138 0.298 0.136\n",
877
+ " clock 128 9 0.857 0.778 0.893 0.574\n",
878
+ " vase 128 2 0.242 1 0.663 0.622\n",
879
+ " scissors 128 1 1 0 0.0207 0.00207\n",
880
+ " teddy bear 128 21 0.847 0.381 0.622 0.345\n",
881
+ " toothbrush 128 5 0.99 0.6 0.662 0.45\n",
882
  "Results saved to \u001b[1mruns/train/exp\u001b[0m\n"
883
+ ]
 
884
  }
885
  ]
886
  },
 
1024
  "%%shell\n",
1025
  "export PYTHONPATH=\"$PWD\" # to run *.py. files in subdirectories\n",
1026
  "rm -rf runs # remove runs/\n",
1027
+ "for m in yolov5n; do # models\n",
1028
+ " python train.py --img 64 --batch 32 --weights $m.pt --epochs 1 --device 0 # train pretrained\n",
1029
+ " python train.py --img 64 --batch 32 --weights '' --cfg $m.yaml --epochs 1 --device 0 # train scratch\n",
1030
  " for d in 0 cpu; do # devices\n",
 
 
1031
  " python val.py --weights $m.pt --device $d # val official\n",
1032
  " python val.py --weights runs/train/exp/weights/best.pt --device $d # val custom\n",
1033
+ " python detect.py --weights $m.pt --device $d # detect official\n",
1034
+ " python detect.py --weights runs/train/exp/weights/best.pt --device $d # detect custom\n",
1035
  " done\n",
1036
+ " python hubconf.py # hub\n",
1037
+ " python models/yolo.py --cfg $m.yaml # build PyTorch model\n",
1038
+ " python models/tf.py --weights $m.pt # build TensorFlow model\n",
1039
+ " python export.py --img 64 --batch 1 --weights $m.pt --include torchscript onnx # export\n",
1040
  "done"
1041
  ],
1042
  "execution_count": null,
 
1085
  "outputs": []
1086
  }
1087
  ]
1088
+ }