glenn-jocher commited on
Commit
17ac94b
β€’
1 Parent(s): bdd88e1

Created using Colaboratory

Browse files
Files changed (1) hide show
  1. tutorial.ipynb +90 -92
tutorial.ipynb CHANGED
@@ -16,7 +16,7 @@
16
  "accelerator": "GPU",
17
  "widgets": {
18
  "application/vnd.jupyter.widget-state+json": {
19
- "811fd52fef65422c8267bafcde8a2c3d": {
20
  "model_module": "@jupyter-widgets/controls",
21
  "model_name": "HBoxModel",
22
  "state": {
@@ -28,15 +28,15 @@
28
  "_view_count": null,
29
  "_view_module_version": "1.5.0",
30
  "box_style": "",
31
- "layout": "IPY_MODEL_8f41b90117224eef9133a9c3a103dbba",
32
  "_model_module": "@jupyter-widgets/controls",
33
  "children": [
34
- "IPY_MODEL_ca2fb37af6ed43d4a74cdc9f2ac5c4a5",
35
- "IPY_MODEL_29419ae5ebb9403ea73f7e5a68037bdd"
36
  ]
37
  }
38
  },
39
- "8f41b90117224eef9133a9c3a103dbba": {
40
  "model_module": "@jupyter-widgets/base",
41
  "model_name": "LayoutModel",
42
  "state": {
@@ -87,12 +87,12 @@
87
  "left": null
88
  }
89
  },
90
- "ca2fb37af6ed43d4a74cdc9f2ac5c4a5": {
91
  "model_module": "@jupyter-widgets/controls",
92
  "model_name": "FloatProgressModel",
93
  "state": {
94
  "_view_name": "ProgressView",
95
- "style": "IPY_MODEL_6511b4dfb10b48d1bc98bcfb3987bfa0",
96
  "_dom_classes": [],
97
  "description": "100%",
98
  "_model_name": "FloatProgressModel",
@@ -107,30 +107,30 @@
107
  "min": 0,
108
  "description_tooltip": null,
109
  "_model_module": "@jupyter-widgets/controls",
110
- "layout": "IPY_MODEL_64f0badf1a8f489885aa984dd62d37dc"
111
  }
112
  },
113
- "29419ae5ebb9403ea73f7e5a68037bdd": {
114
  "model_module": "@jupyter-widgets/controls",
115
  "model_name": "HTMLModel",
116
  "state": {
117
  "_view_name": "HTMLView",
118
- "style": "IPY_MODEL_f569911c5cfc4d81bb1bdfa83447afc8",
119
  "_dom_classes": [],
120
  "description": "",
121
  "_model_name": "HTMLModel",
122
  "placeholder": "​",
123
  "_view_module": "@jupyter-widgets/controls",
124
  "_model_module_version": "1.5.0",
125
- "value": " 781M/781M [00:23<00:00, 34.2MB/s]",
126
  "_view_count": null,
127
  "_view_module_version": "1.5.0",
128
  "description_tooltip": null,
129
  "_model_module": "@jupyter-widgets/controls",
130
- "layout": "IPY_MODEL_84943ade566440aaa2dcf3b3b27e7074"
131
  }
132
  },
133
- "6511b4dfb10b48d1bc98bcfb3987bfa0": {
134
  "model_module": "@jupyter-widgets/controls",
135
  "model_name": "ProgressStyleModel",
136
  "state": {
@@ -145,7 +145,7 @@
145
  "_model_module": "@jupyter-widgets/controls"
146
  }
147
  },
148
- "64f0badf1a8f489885aa984dd62d37dc": {
149
  "model_module": "@jupyter-widgets/base",
150
  "model_name": "LayoutModel",
151
  "state": {
@@ -196,7 +196,7 @@
196
  "left": null
197
  }
198
  },
199
- "f569911c5cfc4d81bb1bdfa83447afc8": {
200
  "model_module": "@jupyter-widgets/controls",
201
  "model_name": "DescriptionStyleModel",
202
  "state": {
@@ -210,7 +210,7 @@
210
  "_model_module": "@jupyter-widgets/controls"
211
  }
212
  },
213
- "84943ade566440aaa2dcf3b3b27e7074": {
214
  "model_module": "@jupyter-widgets/base",
215
  "model_name": "LayoutModel",
216
  "state": {
@@ -261,7 +261,7 @@
261
  "left": null
262
  }
263
  },
264
- "8501ed1563e4452eac9df6b7a66e8f8c": {
265
  "model_module": "@jupyter-widgets/controls",
266
  "model_name": "HBoxModel",
267
  "state": {
@@ -273,15 +273,15 @@
273
  "_view_count": null,
274
  "_view_module_version": "1.5.0",
275
  "box_style": "",
276
- "layout": "IPY_MODEL_d2bb96801e1f46f4a58e02534f7026ff",
277
  "_model_module": "@jupyter-widgets/controls",
278
  "children": [
279
- "IPY_MODEL_468a796ef06b4a24bcba6fbd4a0a8db5",
280
- "IPY_MODEL_42ad5c1ea7be4835bffebf90642178f1"
281
  ]
282
  }
283
  },
284
- "d2bb96801e1f46f4a58e02534f7026ff": {
285
  "model_module": "@jupyter-widgets/base",
286
  "model_name": "LayoutModel",
287
  "state": {
@@ -332,12 +332,12 @@
332
  "left": null
333
  }
334
  },
335
- "468a796ef06b4a24bcba6fbd4a0a8db5": {
336
  "model_module": "@jupyter-widgets/controls",
337
  "model_name": "FloatProgressModel",
338
  "state": {
339
  "_view_name": "ProgressView",
340
- "style": "IPY_MODEL_c58b5536d98f4814831934e9c30c4d78",
341
  "_dom_classes": [],
342
  "description": "100%",
343
  "_model_name": "FloatProgressModel",
@@ -352,30 +352,30 @@
352
  "min": 0,
353
  "description_tooltip": null,
354
  "_model_module": "@jupyter-widgets/controls",
355
- "layout": "IPY_MODEL_505597101151486ea29e9ab754544d27"
356
  }
357
  },
358
- "42ad5c1ea7be4835bffebf90642178f1": {
359
  "model_module": "@jupyter-widgets/controls",
360
  "model_name": "HTMLModel",
361
  "state": {
362
  "_view_name": "HTMLView",
363
- "style": "IPY_MODEL_de6e7b4b4a1c408c9f89d89b07a13bcd",
364
  "_dom_classes": [],
365
  "description": "",
366
  "_model_name": "HTMLModel",
367
  "placeholder": "​",
368
  "_view_module": "@jupyter-widgets/controls",
369
  "_model_module_version": "1.5.0",
370
- "value": " 21.1M/21.1M [00:01<00:00, 18.2MB/s]",
371
  "_view_count": null,
372
  "_view_module_version": "1.5.0",
373
  "description_tooltip": null,
374
  "_model_module": "@jupyter-widgets/controls",
375
- "layout": "IPY_MODEL_f5cc9c7d4c274b2d81327ba3163c43fd"
376
  }
377
  },
378
- "c58b5536d98f4814831934e9c30c4d78": {
379
  "model_module": "@jupyter-widgets/controls",
380
  "model_name": "ProgressStyleModel",
381
  "state": {
@@ -390,7 +390,7 @@
390
  "_model_module": "@jupyter-widgets/controls"
391
  }
392
  },
393
- "505597101151486ea29e9ab754544d27": {
394
  "model_module": "@jupyter-widgets/base",
395
  "model_name": "LayoutModel",
396
  "state": {
@@ -441,7 +441,7 @@
441
  "left": null
442
  }
443
  },
444
- "de6e7b4b4a1c408c9f89d89b07a13bcd": {
445
  "model_module": "@jupyter-widgets/controls",
446
  "model_name": "DescriptionStyleModel",
447
  "state": {
@@ -455,7 +455,7 @@
455
  "_model_module": "@jupyter-widgets/controls"
456
  }
457
  },
458
- "f5cc9c7d4c274b2d81327ba3163c43fd": {
459
  "model_module": "@jupyter-widgets/base",
460
  "model_name": "LayoutModel",
461
  "state": {
@@ -550,7 +550,7 @@
550
  "colab": {
551
  "base_uri": "https://localhost:8080/"
552
  },
553
- "outputId": "c6ad57c2-40b7-4764-b07d-19ee2ceaabaf"
554
  },
555
  "source": [
556
  "!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
@@ -563,12 +563,12 @@
563
  "clear_output()\n",
564
  "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
565
  ],
566
- "execution_count": null,
567
  "outputs": [
568
  {
569
  "output_type": "stream",
570
  "text": [
571
- "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16130MB, multi_processor_count=80)\n"
572
  ],
573
  "name": "stdout"
574
  }
@@ -672,30 +672,30 @@
672
  "base_uri": "https://localhost:8080/",
673
  "height": 65,
674
  "referenced_widgets": [
675
- "811fd52fef65422c8267bafcde8a2c3d",
676
- "8f41b90117224eef9133a9c3a103dbba",
677
- "ca2fb37af6ed43d4a74cdc9f2ac5c4a5",
678
- "29419ae5ebb9403ea73f7e5a68037bdd",
679
- "6511b4dfb10b48d1bc98bcfb3987bfa0",
680
- "64f0badf1a8f489885aa984dd62d37dc",
681
- "f569911c5cfc4d81bb1bdfa83447afc8",
682
- "84943ade566440aaa2dcf3b3b27e7074"
683
  ]
684
  },
685
- "outputId": "59a7a546-8492-492e-861d-70a2c85a6794"
686
  },
687
  "source": [
688
  "# Download COCO val2017\n",
689
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
690
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
691
  ],
692
- "execution_count": null,
693
  "outputs": [
694
  {
695
  "output_type": "display_data",
696
  "data": {
697
  "application/vnd.jupyter.widget-view+json": {
698
- "model_id": "811fd52fef65422c8267bafcde8a2c3d",
699
  "version_minor": 0,
700
  "version_major": 2
701
  },
@@ -723,46 +723,45 @@
723
  "colab": {
724
  "base_uri": "https://localhost:8080/"
725
  },
726
- "outputId": "427c211e-e283-4e87-f7b3-7b8dfb11a4a5"
727
  },
728
  "source": [
729
  "# Run YOLOv5x on COCO val2017\n",
730
  "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65"
731
  ],
732
- "execution_count": null,
733
  "outputs": [
734
  {
735
  "output_type": "stream",
736
  "text": [
737
  "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
738
- "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n",
739
  "\n",
740
  "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n",
741
- "100% 168M/168M [00:05<00:00, 31.9MB/s]\n",
742
  "\n",
743
  "Fusing layers... \n",
744
  "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
745
- "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2791.81it/s]\n",
746
- "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/labels/val2017.cache\n",
747
- "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/labels/val2017.cache' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:00<00:00, 13332180.55it/s]\n",
748
- " Class Images Targets P R [email protected] [email protected]:.95: 100% 157/157 [01:30<00:00, 1.73it/s]\n",
749
- " all 5e+03 3.63e+04 0.419 0.765 0.68 0.486\n",
750
- "Speed: 5.2/2.0/7.2 ms inference/NMS/total per 640x640 image at batch-size 32\n",
751
  "\n",
752
  "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
753
  "loading annotations into memory...\n",
754
- "Done (t=0.41s)\n",
755
  "creating index...\n",
756
  "index created!\n",
757
  "Loading and preparing results...\n",
758
- "DONE (t=5.26s)\n",
759
  "creating index...\n",
760
  "index created!\n",
761
  "Running per image evaluation...\n",
762
  "Evaluate annotation type *bbox*\n",
763
- "DONE (t=93.97s).\n",
764
  "Accumulating evaluation results...\n",
765
- "DONE (t=15.06s).\n",
766
  " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n",
767
  " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n",
768
  " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n",
@@ -837,30 +836,30 @@
837
  "base_uri": "https://localhost:8080/",
838
  "height": 65,
839
  "referenced_widgets": [
840
- "8501ed1563e4452eac9df6b7a66e8f8c",
841
- "d2bb96801e1f46f4a58e02534f7026ff",
842
- "468a796ef06b4a24bcba6fbd4a0a8db5",
843
- "42ad5c1ea7be4835bffebf90642178f1",
844
- "c58b5536d98f4814831934e9c30c4d78",
845
- "505597101151486ea29e9ab754544d27",
846
- "de6e7b4b4a1c408c9f89d89b07a13bcd",
847
- "f5cc9c7d4c274b2d81327ba3163c43fd"
848
  ]
849
  },
850
- "outputId": "c68a3db4-1314-46b4-9e52-83532eb65749"
851
  },
852
  "source": [
853
  "# Download COCO128\n",
854
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
855
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
856
  ],
857
- "execution_count": null,
858
  "outputs": [
859
  {
860
  "output_type": "display_data",
861
  "data": {
862
  "application/vnd.jupyter.widget-view+json": {
863
- "model_id": "8501ed1563e4452eac9df6b7a66e8f8c",
864
  "version_minor": 0,
865
  "version_major": 2
866
  },
@@ -925,27 +924,27 @@
925
  "colab": {
926
  "base_uri": "https://localhost:8080/"
927
  },
928
- "outputId": "6af7116a-01ab-4b94-e5d7-b37c17dc95de"
929
  },
930
  "source": [
931
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
932
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
933
  ],
934
- "execution_count": null,
935
  "outputs": [
936
  {
937
  "output_type": "stream",
938
  "text": [
939
  "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 βœ…\n",
940
- "YOLOv5 v4.0-21-gb26a2f6 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16130.5MB)\n",
941
  "\n",
942
- "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
943
  "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
944
  "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
945
- "2021-01-17 19:56:03.945851: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n",
946
  "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
947
  "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n",
948
- "100% 14.1M/14.1M [00:00<00:00, 15.8MB/s]\n",
949
  "\n",
950
  "\n",
951
  " from n params module arguments \n",
@@ -979,12 +978,11 @@
979
  "Transferred 362/362 items from yolov5s.pt\n",
980
  "Scaled weight_decay = 0.0005\n",
981
  "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
982
- "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2647.74it/s]\n",
983
  "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n",
984
- "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 1503840.09it/s]\n",
985
- "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 176.03it/s]\n",
986
- "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 24200.82it/s]\n",
987
- "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:01<00:00, 123.25it/s]\n",
988
  "Plotting labels... \n",
989
  "\n",
990
  "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
@@ -994,19 +992,19 @@
994
  "Starting training for 3 epochs...\n",
995
  "\n",
996
  " Epoch gpu_mem box obj cls total targets img_size\n",
997
- " 0/2 3.27G 0.04357 0.06779 0.01869 0.1301 207 640: 100% 8/8 [00:04<00:00, 1.95it/s]\n",
998
- " Class Images Targets P R [email protected] [email protected]:.95: 100% 8/8 [00:05<00:00, 1.36it/s]\n",
999
- " all 128 929 0.392 0.732 0.657 0.428\n",
1000
  "\n",
1001
  " Epoch gpu_mem box obj cls total targets img_size\n",
1002
- " 1/2 7.47G 0.04308 0.06636 0.02083 0.1303 227 640: 100% 8/8 [00:02<00:00, 3.88it/s]\n",
1003
- " Class Images Targets P R [email protected] [email protected]:.95: 100% 8/8 [00:01<00:00, 5.07it/s]\n",
1004
- " all 128 929 0.387 0.737 0.657 0.432\n",
1005
  "\n",
1006
  " Epoch gpu_mem box obj cls total targets img_size\n",
1007
- " 2/2 7.48G 0.04461 0.06864 0.01866 0.1319 191 640: 100% 8/8 [00:02<00:00, 3.57it/s]\n",
1008
- " Class Images Targets P R [email protected] [email protected]:.95: 100% 8/8 [00:02<00:00, 2.82it/s]\n",
1009
- " all 128 929 0.385 0.742 0.658 0.431\n",
1010
  "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
1011
  "3 epochs completed in 0.007 hours.\n",
1012
  "\n"
@@ -1238,4 +1236,4 @@
1238
  "outputs": []
1239
  }
1240
  ]
1241
- }
 
16
  "accelerator": "GPU",
17
  "widgets": {
18
  "application/vnd.jupyter.widget-state+json": {
19
+ "1f8e9b8ebded4175b2eaa9f75c3ceb00": {
20
  "model_module": "@jupyter-widgets/controls",
21
  "model_name": "HBoxModel",
22
  "state": {
 
28
  "_view_count": null,
29
  "_view_module_version": "1.5.0",
30
  "box_style": "",
31
+ "layout": "IPY_MODEL_0a1246a73077468ab80e979cc0576cd2",
32
  "_model_module": "@jupyter-widgets/controls",
33
  "children": [
34
+ "IPY_MODEL_d327cde5a85a4a51bb8b1b3e9cf06c97",
35
+ "IPY_MODEL_d5ef1cb2cbed4b87b3c5d292ff2b0da6"
36
  ]
37
  }
38
  },
39
+ "0a1246a73077468ab80e979cc0576cd2": {
40
  "model_module": "@jupyter-widgets/base",
41
  "model_name": "LayoutModel",
42
  "state": {
 
87
  "left": null
88
  }
89
  },
90
+ "d327cde5a85a4a51bb8b1b3e9cf06c97": {
91
  "model_module": "@jupyter-widgets/controls",
92
  "model_name": "FloatProgressModel",
93
  "state": {
94
  "_view_name": "ProgressView",
95
+ "style": "IPY_MODEL_8d5dff8bca14435a88fa1814533acd85",
96
  "_dom_classes": [],
97
  "description": "100%",
98
  "_model_name": "FloatProgressModel",
 
107
  "min": 0,
108
  "description_tooltip": null,
109
  "_model_module": "@jupyter-widgets/controls",
110
+ "layout": "IPY_MODEL_3d5136c19e7645ca9bc8f51ceffb2be1"
111
  }
112
  },
113
+ "d5ef1cb2cbed4b87b3c5d292ff2b0da6": {
114
  "model_module": "@jupyter-widgets/controls",
115
  "model_name": "HTMLModel",
116
  "state": {
117
  "_view_name": "HTMLView",
118
+ "style": "IPY_MODEL_2919396dbd4b4c8e821d12bd28665d8a",
119
  "_dom_classes": [],
120
  "description": "",
121
  "_model_name": "HTMLModel",
122
  "placeholder": "​",
123
  "_view_module": "@jupyter-widgets/controls",
124
  "_model_module_version": "1.5.0",
125
+ "value": " 781M/781M [00:12&lt;00:00, 65.5MB/s]",
126
  "_view_count": null,
127
  "_view_module_version": "1.5.0",
128
  "description_tooltip": null,
129
  "_model_module": "@jupyter-widgets/controls",
130
+ "layout": "IPY_MODEL_6feb16f2b2fa4021b1a271e1dd442d04"
131
  }
132
  },
133
+ "8d5dff8bca14435a88fa1814533acd85": {
134
  "model_module": "@jupyter-widgets/controls",
135
  "model_name": "ProgressStyleModel",
136
  "state": {
 
145
  "_model_module": "@jupyter-widgets/controls"
146
  }
147
  },
148
+ "3d5136c19e7645ca9bc8f51ceffb2be1": {
149
  "model_module": "@jupyter-widgets/base",
150
  "model_name": "LayoutModel",
151
  "state": {
 
196
  "left": null
197
  }
198
  },
199
+ "2919396dbd4b4c8e821d12bd28665d8a": {
200
  "model_module": "@jupyter-widgets/controls",
201
  "model_name": "DescriptionStyleModel",
202
  "state": {
 
210
  "_model_module": "@jupyter-widgets/controls"
211
  }
212
  },
213
+ "6feb16f2b2fa4021b1a271e1dd442d04": {
214
  "model_module": "@jupyter-widgets/base",
215
  "model_name": "LayoutModel",
216
  "state": {
 
261
  "left": null
262
  }
263
  },
264
+ "e6459e0bcee449b090fc9807672725bc": {
265
  "model_module": "@jupyter-widgets/controls",
266
  "model_name": "HBoxModel",
267
  "state": {
 
273
  "_view_count": null,
274
  "_view_module_version": "1.5.0",
275
  "box_style": "",
276
+ "layout": "IPY_MODEL_c341e1d3bf3b40d1821ce392eb966c68",
277
  "_model_module": "@jupyter-widgets/controls",
278
  "children": [
279
+ "IPY_MODEL_660afee173694231a6dce3cd94df6cae",
280
+ "IPY_MODEL_261218485cef48df961519dde5edfcbe"
281
  ]
282
  }
283
  },
284
+ "c341e1d3bf3b40d1821ce392eb966c68": {
285
  "model_module": "@jupyter-widgets/base",
286
  "model_name": "LayoutModel",
287
  "state": {
 
332
  "left": null
333
  }
334
  },
335
+ "660afee173694231a6dce3cd94df6cae": {
336
  "model_module": "@jupyter-widgets/controls",
337
  "model_name": "FloatProgressModel",
338
  "state": {
339
  "_view_name": "ProgressView",
340
+ "style": "IPY_MODEL_32736d503c06497abfae8c0421918255",
341
  "_dom_classes": [],
342
  "description": "100%",
343
  "_model_name": "FloatProgressModel",
 
352
  "min": 0,
353
  "description_tooltip": null,
354
  "_model_module": "@jupyter-widgets/controls",
355
+ "layout": "IPY_MODEL_e257738711f54d5280c8393d9d3dce1c"
356
  }
357
  },
358
+ "261218485cef48df961519dde5edfcbe": {
359
  "model_module": "@jupyter-widgets/controls",
360
  "model_name": "HTMLModel",
361
  "state": {
362
  "_view_name": "HTMLView",
363
+ "style": "IPY_MODEL_beb7a6fe34b840899bb79c062681696f",
364
  "_dom_classes": [],
365
  "description": "",
366
  "_model_name": "HTMLModel",
367
  "placeholder": "​",
368
  "_view_module": "@jupyter-widgets/controls",
369
  "_model_module_version": "1.5.0",
370
+ "value": " 21.1M/21.1M [00:00&lt;00:00, 33.5MB/s]",
371
  "_view_count": null,
372
  "_view_module_version": "1.5.0",
373
  "description_tooltip": null,
374
  "_model_module": "@jupyter-widgets/controls",
375
+ "layout": "IPY_MODEL_e639132395d64d70b99d8b72c32f8fbb"
376
  }
377
  },
378
+ "32736d503c06497abfae8c0421918255": {
379
  "model_module": "@jupyter-widgets/controls",
380
  "model_name": "ProgressStyleModel",
381
  "state": {
 
390
  "_model_module": "@jupyter-widgets/controls"
391
  }
392
  },
393
+ "e257738711f54d5280c8393d9d3dce1c": {
394
  "model_module": "@jupyter-widgets/base",
395
  "model_name": "LayoutModel",
396
  "state": {
 
441
  "left": null
442
  }
443
  },
444
+ "beb7a6fe34b840899bb79c062681696f": {
445
  "model_module": "@jupyter-widgets/controls",
446
  "model_name": "DescriptionStyleModel",
447
  "state": {
 
455
  "_model_module": "@jupyter-widgets/controls"
456
  }
457
  },
458
+ "e639132395d64d70b99d8b72c32f8fbb": {
459
  "model_module": "@jupyter-widgets/base",
460
  "model_name": "LayoutModel",
461
  "state": {
 
550
  "colab": {
551
  "base_uri": "https://localhost:8080/"
552
  },
553
+ "outputId": "ae8805a9-ce15-4e1c-f6b4-baa1c1033f56"
554
  },
555
  "source": [
556
  "!git clone https://github.com/ultralytics/yolov5 # clone repo\n",
 
563
  "clear_output()\n",
564
  "print('Setup complete. Using torch %s %s' % (torch.__version__, torch.cuda.get_device_properties(0) if torch.cuda.is_available() else 'CPU'))"
565
  ],
566
+ "execution_count": 1,
567
  "outputs": [
568
  {
569
  "output_type": "stream",
570
  "text": [
571
+ "Setup complete. Using torch 1.7.0+cu101 _CudaDeviceProperties(name='Tesla V100-SXM2-16GB', major=7, minor=0, total_memory=16160MB, multi_processor_count=80)\n"
572
  ],
573
  "name": "stdout"
574
  }
 
672
  "base_uri": "https://localhost:8080/",
673
  "height": 65,
674
  "referenced_widgets": [
675
+ "1f8e9b8ebded4175b2eaa9f75c3ceb00",
676
+ "0a1246a73077468ab80e979cc0576cd2",
677
+ "d327cde5a85a4a51bb8b1b3e9cf06c97",
678
+ "d5ef1cb2cbed4b87b3c5d292ff2b0da6",
679
+ "8d5dff8bca14435a88fa1814533acd85",
680
+ "3d5136c19e7645ca9bc8f51ceffb2be1",
681
+ "2919396dbd4b4c8e821d12bd28665d8a",
682
+ "6feb16f2b2fa4021b1a271e1dd442d04"
683
  ]
684
  },
685
+ "outputId": "d6ace7c6-1be5-41ff-d607-1c716b88d298"
686
  },
687
  "source": [
688
  "# Download COCO val2017\n",
689
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco2017val.zip', 'tmp.zip')\n",
690
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
691
  ],
692
+ "execution_count": 2,
693
  "outputs": [
694
  {
695
  "output_type": "display_data",
696
  "data": {
697
  "application/vnd.jupyter.widget-view+json": {
698
+ "model_id": "1f8e9b8ebded4175b2eaa9f75c3ceb00",
699
  "version_minor": 0,
700
  "version_major": 2
701
  },
 
723
  "colab": {
724
  "base_uri": "https://localhost:8080/"
725
  },
726
+ "outputId": "cc25f70c-0a11-44f6-cc44-e92c5083488c"
727
  },
728
  "source": [
729
  "# Run YOLOv5x on COCO val2017\n",
730
  "!python test.py --weights yolov5x.pt --data coco.yaml --img 640 --iou 0.65"
731
  ],
732
+ "execution_count": 3,
733
  "outputs": [
734
  {
735
  "output_type": "stream",
736
  "text": [
737
  "Namespace(augment=False, batch_size=32, conf_thres=0.001, data='./data/coco.yaml', device='', exist_ok=False, img_size=640, iou_thres=0.65, name='exp', project='runs/test', save_conf=False, save_hybrid=False, save_json=True, save_txt=False, single_cls=False, task='val', verbose=False, weights=['yolov5x.pt'])\n",
738
+ "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
739
  "\n",
740
  "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5x.pt to yolov5x.pt...\n",
741
+ "100% 168M/168M [00:04<00:00, 39.7MB/s]\n",
742
  "\n",
743
  "Fusing layers... \n",
744
  "Model Summary: 476 layers, 87730285 parameters, 0 gradients, 218.8 GFLOPS\n",
745
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco/val2017' for images and labels... 4952 found, 48 missing, 0 empty, 0 corrupted: 100% 5000/5000 [00:01<00:00, 2824.78it/s]\n",
746
+ "\u001b[34m\u001b[1mval: \u001b[0mNew cache created: ../coco/val2017.cache\n",
747
+ " Class Images Targets P R [email protected] [email protected]:.95: 100% 157/157 [01:33<00:00, 1.68it/s]\n",
748
+ " all 5e+03 3.63e+04 0.749 0.619 0.68 0.486\n",
749
+ "Speed: 5.2/2.0/7.3 ms inference/NMS/total per 640x640 image at batch-size 32\n",
 
750
  "\n",
751
  "Evaluating pycocotools mAP... saving runs/test/exp/yolov5x_predictions.json...\n",
752
  "loading annotations into memory...\n",
753
+ "Done (t=0.44s)\n",
754
  "creating index...\n",
755
  "index created!\n",
756
  "Loading and preparing results...\n",
757
+ "DONE (t=4.47s)\n",
758
  "creating index...\n",
759
  "index created!\n",
760
  "Running per image evaluation...\n",
761
  "Evaluate annotation type *bbox*\n",
762
+ "DONE (t=94.87s).\n",
763
  "Accumulating evaluation results...\n",
764
+ "DONE (t=15.96s).\n",
765
  " Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.501\n",
766
  " Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.687\n",
767
  " Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.544\n",
 
836
  "base_uri": "https://localhost:8080/",
837
  "height": 65,
838
  "referenced_widgets": [
839
+ "e6459e0bcee449b090fc9807672725bc",
840
+ "c341e1d3bf3b40d1821ce392eb966c68",
841
+ "660afee173694231a6dce3cd94df6cae",
842
+ "261218485cef48df961519dde5edfcbe",
843
+ "32736d503c06497abfae8c0421918255",
844
+ "e257738711f54d5280c8393d9d3dce1c",
845
+ "beb7a6fe34b840899bb79c062681696f",
846
+ "e639132395d64d70b99d8b72c32f8fbb"
847
  ]
848
  },
849
+ "outputId": "e8b7d5b3-a71e-4446-eec2-ad13419cf700"
850
  },
851
  "source": [
852
  "# Download COCO128\n",
853
  "torch.hub.download_url_to_file('https://github.com/ultralytics/yolov5/releases/download/v1.0/coco128.zip', 'tmp.zip')\n",
854
  "!unzip -q tmp.zip -d ../ && rm tmp.zip"
855
  ],
856
+ "execution_count": 4,
857
  "outputs": [
858
  {
859
  "output_type": "display_data",
860
  "data": {
861
  "application/vnd.jupyter.widget-view+json": {
862
+ "model_id": "e6459e0bcee449b090fc9807672725bc",
863
  "version_minor": 0,
864
  "version_major": 2
865
  },
 
924
  "colab": {
925
  "base_uri": "https://localhost:8080/"
926
  },
927
+ "outputId": "38e51b29-2df4-4f00-cde8-5f6e4a34da9e"
928
  },
929
  "source": [
930
  "# Train YOLOv5s on COCO128 for 3 epochs\n",
931
  "!python train.py --img 640 --batch 16 --epochs 3 --data coco128.yaml --weights yolov5s.pt --nosave --cache"
932
  ],
933
+ "execution_count": 5,
934
  "outputs": [
935
  {
936
  "output_type": "stream",
937
  "text": [
938
  "\u001b[34m\u001b[1mgithub: \u001b[0mup to date with https://github.com/ultralytics/yolov5 βœ…\n",
939
+ "YOLOv5 v4.0-75-gbdd88e1 torch 1.7.0+cu101 CUDA:0 (Tesla V100-SXM2-16GB, 16160.5MB)\n",
940
  "\n",
941
+ "Namespace(adam=False, batch_size=16, bucket='', cache_images=True, cfg='', data='./data/coco128.yaml', device='', epochs=3, evolve=False, exist_ok=False, global_rank=-1, hyp='data/hyp.scratch.yaml', image_weights=False, img_size=[640, 640], linear_lr=False, local_rank=-1, log_artifacts=False, log_imgs=16, multi_scale=False, name='exp', noautoanchor=False, nosave=True, notest=False, project='runs/train', quad=False, rect=False, resume=False, save_dir='runs/train/exp', single_cls=False, sync_bn=False, total_batch_size=16, weights='yolov5s.pt', workers=8, world_size=1)\n",
942
  "\u001b[34m\u001b[1mwandb: \u001b[0mInstall Weights & Biases for YOLOv5 logging with 'pip install wandb' (recommended)\n",
943
  "Start Tensorboard with \"tensorboard --logdir runs/train\", view at http://localhost:6006/\n",
944
+ "2021-02-12 06:38:28.027271: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.10.1\n",
945
  "\u001b[34m\u001b[1mhyperparameters: \u001b[0mlr0=0.01, lrf=0.2, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=0.05, cls=0.5, cls_pw=1.0, obj=1.0, obj_pw=1.0, iou_t=0.2, anchor_t=4.0, fl_gamma=0.0, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, mosaic=1.0, mixup=0.0\n",
946
  "Downloading https://github.com/ultralytics/yolov5/releases/download/v4.0/yolov5s.pt to yolov5s.pt...\n",
947
+ "100% 14.1M/14.1M [00:01<00:00, 13.2MB/s]\n",
948
  "\n",
949
  "\n",
950
  " from n params module arguments \n",
 
978
  "Transferred 362/362 items from yolov5s.pt\n",
979
  "Scaled weight_decay = 0.0005\n",
980
  "Optimizer groups: 62 .bias, 62 conv.weight, 59 other\n",
981
+ "\u001b[34m\u001b[1mtrain: \u001b[0mScanning '../coco128/labels/train2017' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 2566.00it/s]\n",
982
  "\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: ../coco128/labels/train2017.cache\n",
983
+ "\u001b[34m\u001b[1mtrain: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 175.07it/s]\n",
984
+ "\u001b[34m\u001b[1mval: \u001b[0mScanning '../coco128/labels/train2017.cache' for images and labels... 128 found, 0 missing, 2 empty, 0 corrupted: 100% 128/128 [00:00<00:00, 764773.38it/s]\n",
985
+ "\u001b[34m\u001b[1mval: \u001b[0mCaching images (0.1GB): 100% 128/128 [00:00<00:00, 128.17it/s]\n",
 
986
  "Plotting labels... \n",
987
  "\n",
988
  "\u001b[34m\u001b[1mautoanchor: \u001b[0mAnalyzing anchors... anchors/target = 4.26, Best Possible Recall (BPR) = 0.9946\n",
 
992
  "Starting training for 3 epochs...\n",
993
  "\n",
994
  " Epoch gpu_mem box obj cls total targets img_size\n",
995
+ " 0/2 3.27G 0.04357 0.06781 0.01869 0.1301 207 640: 100% 8/8 [00:03<00:00, 2.03it/s]\n",
996
+ " Class Images Targets P R [email protected] [email protected]:.95: 100% 4/4 [00:04<00:00, 1.14s/it]\n",
997
+ " all 128 929 0.646 0.627 0.659 0.431\n",
998
  "\n",
999
  " Epoch gpu_mem box obj cls total targets img_size\n",
1000
+ " 1/2 7.75G 0.04308 0.06654 0.02083 0.1304 227 640: 100% 8/8 [00:01<00:00, 4.11it/s]\n",
1001
+ " Class Images Targets P R [email protected] [email protected]:.95: 100% 4/4 [00:01<00:00, 2.94it/s]\n",
1002
+ " all 128 929 0.681 0.607 0.663 0.434\n",
1003
  "\n",
1004
  " Epoch gpu_mem box obj cls total targets img_size\n",
1005
+ " 2/2 7.75G 0.04461 0.06896 0.01866 0.1322 191 640: 100% 8/8 [00:02<00:00, 3.94it/s]\n",
1006
+ " Class Images Targets P R [email protected] [email protected]:.95: 100% 4/4 [00:03<00:00, 1.22it/s]\n",
1007
+ " all 128 929 0.642 0.632 0.662 0.432\n",
1008
  "Optimizer stripped from runs/train/exp/weights/last.pt, 14.8MB\n",
1009
  "3 epochs completed in 0.007 hours.\n",
1010
  "\n"
 
1236
  "outputs": []
1237
  }
1238
  ]
1239
+ }