1019 lines
73 KiB
Plaintext
1019 lines
73 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "1e3446fa",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Training menggunakan: 0\n",
|
|
"\u001b[KDownloading https://github.com/ultralytics/assets/releases/download/v8.4.0/yolov8n.pt to 'yolov8n.pt': 100% ━━━━━━━━━━━━ 6.2MB 1.0MB/s 6.1s6.1s<0.0ss1s8s\n",
|
|
"Ultralytics 8.4.14 Python-3.12.12 torch-2.10.0+cu130 CUDA:0 (NVIDIA GeForce RTX 3050 Ti Laptop GPU, 4096MiB)\n",
|
|
"\u001b[34m\u001b[1mengine\\trainer: \u001b[0magnostic_nms=False, amp=True, angle=1.0, augment=False, auto_augment=randaugment, batch=16, bgr=0.0, box=7.5, cache=False, cfg=None, classes=None, close_mosaic=10, cls=0.5, compile=False, conf=None, copy_paste=0.0, copy_paste_mode=flip, cos_lr=False, cutmix=0.0, data=C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\data.yaml, degrees=0.0, deterministic=True, device=0, dfl=1.5, dnn=False, dropout=0.0, dynamic=False, embed=None, end2end=None, epochs=50, erasing=0.4, exist_ok=False, fliplr=0.5, flipud=0.0, format=torchscript, fraction=1.0, freeze=None, half=False, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, imgsz=256, int8=False, iou=0.7, keras=False, kobj=1.0, line_width=None, lr0=0.01, lrf=0.01, mask_ratio=4, max_det=300, mixup=0.0, mode=train, model=yolov8n.pt, momentum=0.937, mosaic=1.0, multi_scale=0.0, name=train, nbs=64, nms=False, opset=None, optimize=False, optimizer=auto, overlap_mask=True, patience=100, perspective=0.0, plots=True, pose=12.0, pretrained=True, profile=False, project=None, rect=False, resume=False, retina_masks=False, rle=1.0, save=True, save_conf=False, save_crop=False, save_dir=C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train, save_frames=False, save_json=False, save_period=-1, save_txt=False, scale=0.5, seed=0, shear=0.0, show=False, show_boxes=True, show_conf=True, show_labels=True, simplify=True, single_cls=False, source=None, split=val, stream_buffer=False, task=detect, time=None, tracker=botsort.yaml, translate=0.1, val=True, verbose=True, vid_stride=1, visualize=False, warmup_bias_lr=0.1, warmup_epochs=3.0, warmup_momentum=0.8, weight_decay=0.0005, workers=2, workspace=None\n",
|
|
"Overriding model.yaml nc=80 with nc=3\n",
|
|
"\n",
|
|
" from n params module arguments \n",
|
|
" 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n",
|
|
" 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n",
|
|
" 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n",
|
|
" 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n",
|
|
" 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n",
|
|
" 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n",
|
|
" 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n",
|
|
" 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n",
|
|
" 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n",
|
|
" 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n",
|
|
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
|
" 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
|
|
" 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n",
|
|
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
|
|
" 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
|
|
" 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n",
|
|
" 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n",
|
|
" 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
|
|
" 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n",
|
|
" 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n",
|
|
" 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
|
|
" 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n",
|
|
" 22 [15, 18, 21] 1 751897 ultralytics.nn.modules.head.Detect [3, 16, None, [64, 128, 256]] \n",
|
|
"Model summary: 130 layers, 3,011,433 parameters, 3,011,417 gradients, 8.2 GFLOPs\n",
|
|
"\n",
|
|
"Transferred 319/355 items from pretrained weights\n",
|
|
"Freezing layer 'model.22.dfl.conv.weight'\n",
|
|
"\u001b[34m\u001b[1mAMP: \u001b[0mrunning Automatic Mixed Precision (AMP) checks...\n",
|
|
"\u001b[KDownloading https://github.com/ultralytics/assets/releases/download/v8.4.0/yolo26n.pt to 'yolo26n.pt': 100% ━━━━━━━━━━━━ 5.3MB 1.5MB/s 3.4s.3s<0.1s2s6s8s\n",
|
|
"\u001b[34m\u001b[1mAMP: \u001b[0mchecks passed \n",
|
|
"\u001b[34m\u001b[1mtrain: \u001b[0mFast image access (ping: 0.10.0 ms, read: 1.51.2 MB/s, size: 12.5 KB)\n",
|
|
"\u001b[K\u001b[34m\u001b[1mtrain: \u001b[0mScanning C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\train\\labels... 1130 images, 0 backgrounds, 0 corrupt: 100% ━━━━━━━━━━━━ 1130/1130 619.1it/s 1.8s0.1s\n",
|
|
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\train\\labels.cache\n",
|
|
"\u001b[34m\u001b[1mval: \u001b[0mFast image access (ping: 0.10.0 ms, read: 0.60.3 MB/s, size: 9.1 KB)\n",
|
|
"\u001b[K\u001b[34m\u001b[1mval: \u001b[0mScanning C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\valid\\labels... 161 images, 0 backgrounds, 0 corrupt: 100% ━━━━━━━━━━━━ 161/161 382.7it/s 0.4s0.0s\n",
|
|
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\valid\\labels.cache\n",
|
|
"\u001b[34m\u001b[1moptimizer:\u001b[0m 'optimizer=auto' found, ignoring 'lr0=0.01' and 'momentum=0.937' and determining best 'optimizer', 'lr0' and 'momentum' automatically... \n",
|
|
"\u001b[34m\u001b[1moptimizer:\u001b[0m AdamW(lr=0.001429, momentum=0.9) with parameter groups 57 weight(decay=0.0), 64 weight(decay=0.0005), 63 bias(decay=0.0)\n",
|
|
"Plotting labels to C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\labels.jpg... \n",
|
|
"Image sizes 256 train, 256 val\n",
|
|
"Using 2 dataloader workers\n",
|
|
"Logging results to \u001b[1mC:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\u001b[0m\n",
|
|
"Starting training for 50 epochs...\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 1/50 0.391G 1.723 2.883 1.561 54 256: 100% ━━━━━━━━━━━━ 71/71 6.9it/s 10.2s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 6.6it/s 0.9s0.2s\n",
|
|
" all 161 335 0.392 0.321 0.322 0.165\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 2/50 0.436G 1.669 1.966 1.517 41 256: 100% ━━━━━━━━━━━━ 71/71 11.0it/s 6.4s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.9it/s 0.7s0.2s\n",
|
|
" all 161 335 0.407 0.436 0.392 0.181\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 3/50 0.436G 1.641 1.838 1.543 36 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.1s<0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.2it/s 0.7s0.2s\n",
|
|
" all 161 335 0.379 0.495 0.389 0.183\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 4/50 0.436G 1.646 1.751 1.543 38 256: 100% ━━━━━━━━━━━━ 71/71 11.2it/s 6.3s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.8it/s 0.6s0.1s\n",
|
|
" all 161 335 0.599 0.581 0.613 0.314\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 5/50 0.436G 1.563 1.614 1.504 30 256: 100% ━━━━━━━━━━━━ 71/71 11.4it/s 6.2s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.1it/s 0.7s0.1s\n",
|
|
" all 161 335 0.576 0.591 0.583 0.302\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 6/50 0.436G 1.568 1.57 1.503 41 256: 100% ━━━━━━━━━━━━ 71/71 10.9it/s 6.5s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.7it/s 0.7s0.2s\n",
|
|
" all 161 335 0.658 0.662 0.696 0.35\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 7/50 0.436G 1.559 1.507 1.474 24 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.2s0.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.8it/s 0.8s0.2s\n",
|
|
" all 161 335 0.76 0.654 0.759 0.405\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 8/50 0.436G 1.523 1.426 1.451 53 256: 100% ━━━━━━━━━━━━ 71/71 9.7it/s 7.3s<0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.3it/s 0.7s0.2s\n",
|
|
" all 161 335 0.74 0.667 0.715 0.389\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 9/50 0.436G 1.501 1.403 1.435 42 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.2s<0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.1it/s 0.7s0.1s\n",
|
|
" all 161 335 0.67 0.707 0.734 0.423\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 10/50 0.436G 1.489 1.33 1.425 44 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.2s0.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.6it/s 0.7s0.2s\n",
|
|
" all 161 335 0.717 0.716 0.771 0.454\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 11/50 0.436G 1.459 1.311 1.407 38 256: 100% ━━━━━━━━━━━━ 71/71 10.5it/s 6.7s.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.6it/s 0.8s0.2s\n",
|
|
" all 161 335 0.753 0.684 0.773 0.437\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 12/50 0.436G 1.414 1.238 1.379 47 256: 100% ━━━━━━━━━━━━ 71/71 8.3it/s 8.6s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 6.7it/s 0.9s0.2s\n",
|
|
" all 161 335 0.773 0.747 0.818 0.478\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 13/50 0.436G 1.442 1.27 1.393 33 256: 100% ━━━━━━━━━━━━ 71/71 9.0it/s 7.9s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.9it/s 0.8s0.2s\n",
|
|
" all 161 335 0.701 0.684 0.716 0.372\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 14/50 0.436G 1.426 1.225 1.386 39 256: 100% ━━━━━━━━━━━━ 71/71 9.4it/s 7.5s<0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.2it/s 0.7s0.2s\n",
|
|
" all 161 335 0.845 0.69 0.841 0.47\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 15/50 0.436G 1.392 1.172 1.376 49 256: 100% ━━━━━━━━━━━━ 71/71 8.9it/s 7.9s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.1it/s 0.8s0.2s\n",
|
|
" all 161 335 0.818 0.831 0.881 0.51\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 16/50 0.436G 1.372 1.146 1.354 43 256: 100% ━━━━━━━━━━━━ 71/71 10.2it/s 6.9s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.2it/s 0.7s0.1s\n",
|
|
" all 161 335 0.852 0.747 0.848 0.503\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 17/50 0.436G 1.365 1.14 1.352 39 256: 100% ━━━━━━━━━━━━ 71/71 10.4it/s 6.8s.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.2it/s 0.7s0.2s\n",
|
|
" all 161 335 0.846 0.783 0.868 0.498\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 18/50 0.436G 1.348 1.104 1.337 32 256: 100% ━━━━━━━━━━━━ 71/71 10.2it/s 7.0s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.6it/s 0.6s0.1s\n",
|
|
" all 161 335 0.746 0.776 0.807 0.459\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 19/50 0.436G 1.354 1.103 1.344 57 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.2s0.2ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.8it/s 0.7s0.2s\n",
|
|
" all 161 335 0.839 0.788 0.848 0.489\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 20/50 0.436G 1.34 1.096 1.343 43 256: 100% ━━━━━━━━━━━━ 71/71 10.8it/s 6.6s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.0it/s 0.7s0.2s\n",
|
|
" all 161 335 0.822 0.754 0.845 0.495\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 21/50 0.436G 1.34 1.06 1.318 36 256: 100% ━━━━━━━━━━━━ 71/71 10.8it/s 6.6s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.7it/s 0.7s0.2s\n",
|
|
" all 161 335 0.87 0.802 0.91 0.544\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 22/50 0.436G 1.314 1.02 1.317 44 256: 100% ━━━━━━━━━━━━ 71/71 10.3it/s 6.9s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.3it/s 0.7s0.2s\n",
|
|
" all 161 335 0.87 0.809 0.892 0.533\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 23/50 0.436G 1.292 1 1.3 48 256: 100% ━━━━━━━━━━━━ 71/71 10.7it/s 6.7s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.2it/s 0.7s0.1s\n",
|
|
" all 161 335 0.835 0.822 0.887 0.526\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 24/50 0.436G 1.31 1.004 1.304 51 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.1s<0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.1it/s 0.7s0.2s\n",
|
|
" all 161 335 0.805 0.812 0.859 0.53\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 25/50 0.436G 1.292 0.9828 1.297 36 256: 100% ━━━━━━━━━━━━ 71/71 10.3it/s 6.9s.2ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.8it/s 0.7s0.2s\n",
|
|
" all 161 335 0.841 0.817 0.878 0.537\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 26/50 0.436G 1.256 0.9505 1.278 39 256: 100% ━━━━━━━━━━━━ 71/71 10.5it/s 6.7s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.0it/s 0.7s0.2s\n",
|
|
" all 161 335 0.893 0.837 0.916 0.552\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 27/50 0.436G 1.245 0.9397 1.277 38 256: 100% ━━━━━━━━━━━━ 71/71 10.7it/s 6.7s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.2it/s 0.7s0.1s\n",
|
|
" all 161 335 0.848 0.856 0.901 0.546\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 28/50 0.436G 1.254 0.9277 1.277 50 256: 100% ━━━━━━━━━━━━ 71/71 10.4it/s 6.8s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.9it/s 0.7s0.2s\n",
|
|
" all 161 335 0.86 0.833 0.893 0.537\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 29/50 0.436G 1.242 0.9021 1.268 34 256: 100% ━━━━━━━━━━━━ 71/71 10.4it/s 6.8s.2ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.5it/s 0.7s0.2s\n",
|
|
" all 161 335 0.923 0.847 0.922 0.563\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 30/50 0.436G 1.236 0.9032 1.276 42 256: 100% ━━━━━━━━━━━━ 71/71 8.6it/s 8.3s0.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 6.5it/s 0.9s0.2s\n",
|
|
" all 161 335 0.88 0.841 0.891 0.554\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 31/50 0.436G 1.228 0.8729 1.26 27 256: 100% ━━━━━━━━━━━━ 71/71 9.5it/s 7.4s0.2ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.8it/s 0.8s0.2s\n",
|
|
" all 161 335 0.877 0.832 0.904 0.562\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 32/50 0.436G 1.198 0.8616 1.249 37 256: 100% ━━━━━━━━━━━━ 71/71 7.0it/s 10.1s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.5it/s 0.8s0.2s\n",
|
|
" all 161 335 0.913 0.804 0.909 0.55\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 33/50 0.436G 1.193 0.8423 1.241 34 256: 100% ━━━━━━━━━━━━ 71/71 10.5it/s 6.7s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.7it/s 0.7s0.2s\n",
|
|
" all 161 335 0.853 0.866 0.908 0.568\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 34/50 0.436G 1.215 0.8694 1.253 24 256: 100% ━━━━━━━━━━━━ 71/71 10.8it/s 6.6s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.3it/s 0.6s0.1s\n",
|
|
" all 161 335 0.88 0.838 0.897 0.552\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 35/50 0.436G 1.166 0.8396 1.234 32 256: 100% ━━━━━━━━━━━━ 71/71 10.6it/s 6.7s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.5it/s 0.7s0.2s\n",
|
|
" all 161 335 0.893 0.806 0.907 0.562\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 36/50 0.436G 1.18 0.8359 1.241 32 256: 100% ━━━━━━━━━━━━ 71/71 10.6it/s 6.7s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.5it/s 0.7s0.1s\n",
|
|
" all 161 335 0.851 0.849 0.907 0.565\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 37/50 0.436G 1.152 0.808 1.23 43 256: 100% ━━━━━━━━━━━━ 71/71 10.4it/s 6.8s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.6it/s 0.7s0.2s\n",
|
|
" all 161 335 0.893 0.874 0.927 0.566\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 38/50 0.436G 1.159 0.8119 1.219 36 256: 100% ━━━━━━━━━━━━ 71/71 9.2it/s 7.7s0.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.8it/s 0.7s0.2s\n",
|
|
" all 161 335 0.911 0.823 0.921 0.556\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 39/50 0.436G 1.148 0.7934 1.213 37 256: 100% ━━━━━━━━━━━━ 71/71 10.4it/s 6.8s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.8it/s 0.7s0.2s\n",
|
|
" all 161 335 0.919 0.845 0.92 0.574\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 40/50 0.436G 1.142 0.7889 1.217 28 256: 100% ━━━━━━━━━━━━ 71/71 10.3it/s 6.9s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.7it/s 0.8s0.2s\n",
|
|
" all 161 335 0.88 0.861 0.91 0.567\n",
|
|
"Closing dataloader mosaic\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 41/50 0.436G 1.048 0.6422 1.179 22 256: 100% ━━━━━━━━━━━━ 71/71 8.6it/s 8.2s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.0it/s 0.9s0.2s\n",
|
|
" all 161 335 0.937 0.807 0.917 0.559\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 42/50 0.436G 1.02 0.6125 1.15 27 256: 100% ━━━━━━━━━━━━ 71/71 8.6it/s 8.2s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 7.3it/s 0.8s0.2s\n",
|
|
" all 161 335 0.905 0.839 0.916 0.563\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 43/50 0.436G 1 0.5929 1.143 21 256: 100% ━━━━━━━━━━━━ 71/71 7.8it/s 9.1s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 6.5it/s 0.9s0.2s\n",
|
|
" all 161 335 0.928 0.81 0.908 0.563\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 44/50 0.436G 0.9971 0.5882 1.134 17 256: 100% ━━━━━━━━━━━━ 71/71 9.7it/s 7.3s0.2ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.3it/s 0.7s0.2s\n",
|
|
" all 161 335 0.918 0.825 0.914 0.568\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 45/50 0.436G 0.9828 0.5665 1.134 22 256: 100% ━━━━━━━━━━━━ 71/71 10.4it/s 6.8s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.8it/s 0.7s0.2s\n",
|
|
" all 161 335 0.922 0.827 0.915 0.574\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 46/50 0.436G 0.9606 0.5562 1.126 23 256: 100% ━━━━━━━━━━━━ 71/71 9.9it/s 7.2s0.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 9.2it/s 0.7s0.1s\n",
|
|
" all 161 335 0.938 0.815 0.92 0.577\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 47/50 0.436G 0.9371 0.535 1.104 21 256: 100% ━━━━━━━━━━━━ 71/71 10.6it/s 6.7s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.6it/s 0.7s0.2s\n",
|
|
" all 161 335 0.903 0.841 0.925 0.575\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 48/50 0.436G 0.9295 0.5333 1.11 19 256: 100% ━━━━━━━━━━━━ 71/71 10.7it/s 6.6s0.1s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.6it/s 0.7s0.2s\n",
|
|
" all 161 335 0.954 0.818 0.928 0.587\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 49/50 0.436G 0.9272 0.5225 1.097 26 256: 100% ━━━━━━━━━━━━ 71/71 10.5it/s 6.7s0.2s\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.4it/s 0.7s0.2s\n",
|
|
" all 161 335 0.952 0.82 0.923 0.58\n",
|
|
"\n",
|
|
" Epoch GPU_mem box_loss cls_loss dfl_loss Instances Size\n",
|
|
"\u001b[K 50/50 0.436G 0.9043 0.5125 1.093 26 256: 100% ━━━━━━━━━━━━ 71/71 10.1it/s 7.0s.1ss\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 8.4it/s 0.7s0.2s\n",
|
|
" all 161 335 0.953 0.808 0.925 0.582\n",
|
|
"\n",
|
|
"50 epochs completed in 0.118 hours.\n",
|
|
"Optimizer stripped from C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\weights\\last.pt, 6.2MB\n",
|
|
"Optimizer stripped from C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\weights\\best.pt, 6.2MB\n",
|
|
"\n",
|
|
"Validating C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\weights\\best.pt...\n",
|
|
"Ultralytics 8.4.14 Python-3.12.12 torch-2.10.0+cu130 CUDA:0 (NVIDIA GeForce RTX 3050 Ti Laptop GPU, 4096MiB)\n",
|
|
"Model summary (fused): 73 layers, 3,006,233 parameters, 0 gradients, 8.1 GFLOPs\n",
|
|
"\u001b[K Class Images Instances Box(P R mAP50 mAP50-95): 100% ━━━━━━━━━━━━ 6/6 5.6it/s 1.1s0.2s\n",
|
|
" all 161 335 0.959 0.817 0.927 0.586\n",
|
|
" Fitur_Clear 80 126 0.934 0.785 0.889 0.545\n",
|
|
" Fitur_Colored 81 100 0.955 0.83 0.947 0.628\n",
|
|
" Label_botol 108 109 0.989 0.835 0.946 0.583\n",
|
|
"Speed: 0.1ms preprocess, 1.8ms inference, 0.0ms loss, 1.6ms postprocess per image\n",
|
|
"Results saved to \u001b[1mC:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\u001b[0m\n",
|
|
"--------------------------------------------------\n",
|
|
"TRAINING SELESAI!\n",
|
|
"Model PyTorch (.pt) kamu tersimpan otomatis di folder:\n",
|
|
"C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\weights\\best.pt\n",
|
|
"--------------------------------------------------\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"from ultralytics import YOLO\n",
|
|
"import torch\n",
|
|
"\n",
|
|
"# Cek apakah pakai GPU atau CPU\n",
|
|
"device = 0 if torch.cuda.is_available() else 'cpu'\n",
|
|
"print(f\"Training menggunakan: {device}\")\n",
|
|
"\n",
|
|
"def main():\n",
|
|
" # 1. Load model Nano (paling ringan)\n",
|
|
" model = YOLO('yolov8n.pt') \n",
|
|
"\n",
|
|
" # 2. Mulai Training\n",
|
|
" # ganti 'data.yaml' dengan path lengkap file yaml kamu tadi\n",
|
|
" # Saya rapikan path-nya agar konsisten pakai backslash (\\) untuk Windows\n",
|
|
" results = model.train(\n",
|
|
" data=r'C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\data.yaml', \n",
|
|
" epochs=50, # Coba 50 dulu\n",
|
|
" imgsz=256, # Saran: 320 atau 416 (256 takut terlalu kecil)\n",
|
|
" batch=16, # Kalau out of memory, turunkan ke 8\n",
|
|
" device=device,\n",
|
|
" workers=2 # Kurangi workers kalau CPU usage 100%\n",
|
|
" )\n",
|
|
"\n",
|
|
" # 3. SELESAI (Tidak perlu export manual)\n",
|
|
" # File .pt otomatis tersimpan. Kita print saja lokasinya.\n",
|
|
" print(\"-\" * 50)\n",
|
|
" print(\"TRAINING SELESAI!\")\n",
|
|
" print(f\"Model PyTorch (.pt) kamu tersimpan otomatis di folder:\")\n",
|
|
" # results.save_dir memberitahu di folder 'runs' mana file itu disimpan\n",
|
|
" print(f\"{results.save_dir}\\\\weights\\\\best.pt\") \n",
|
|
" print(\"-\" * 50)\n",
|
|
"\n",
|
|
"if __name__ == '__main__':\n",
|
|
" main()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"id": "95bf6bb5",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Memuat Model AI...\n",
|
|
"✅ ROI Captured: roi_151308.jpg\n",
|
|
"✅ ROI Captured: roi_151321.jpg\n",
|
|
"✅ ROI Captured: roi_151333.jpg\n",
|
|
"✅ ROI Captured: roi_151345.jpg\n",
|
|
"✅ ROI Captured: roi_151400.jpg\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import cv2\n",
|
|
"import numpy as np\n",
|
|
"import time\n",
|
|
"import os\n",
|
|
"from ultralytics import YOLO\n",
|
|
"\n",
|
|
"# ================= KONFIGURASI =================\n",
|
|
"MODEL_PATH = 'runs/detect/train/weights/best.pt' \n",
|
|
"CONF_THRESHOLD = 0.5\n",
|
|
"ROI_BOX = [200, 150, 250, 250] # [x, y, w, h]\n",
|
|
"SENSITIVITY = 1500 \n",
|
|
"STABLE_TIME_NEEDED = 1.5 \n",
|
|
"MAX_FILES = 10 \n",
|
|
"\n",
|
|
"def main():\n",
|
|
" print(\"Memuat Model AI...\")\n",
|
|
" try:\n",
|
|
" model = YOLO(MODEL_PATH)\n",
|
|
" except Exception as e:\n",
|
|
" print(f\"Error: {e}\")\n",
|
|
" return\n",
|
|
"\n",
|
|
" cap = cv2.VideoCapture(0)\n",
|
|
" fgbg = cv2.createBackgroundSubtractorMOG2(history=100, varThreshold=40)\n",
|
|
" \n",
|
|
" if not os.path.exists('captures'): os.makedirs('captures')\n",
|
|
"\n",
|
|
" is_moving = False\n",
|
|
" stable_start_time = None\n",
|
|
" prev_time = time.time()\n",
|
|
" \n",
|
|
" last_detections = [] \n",
|
|
" last_detection_time = 0\n",
|
|
" show_label_duration = 4.0 \n",
|
|
"\n",
|
|
" while True:\n",
|
|
" success, frame = cap.read()\n",
|
|
" if not success: break\n",
|
|
" frame = cv2.flip(frame, 1)\n",
|
|
" display_frame = frame.copy()\n",
|
|
" \n",
|
|
" # 1. POTONG AREA ROI UNTUK ANALISIS\n",
|
|
" x, y, w, h = ROI_BOX\n",
|
|
" roi = frame[y:y+h, x:x+w]\n",
|
|
" \n",
|
|
" # Deteksi Gerakan\n",
|
|
" gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n",
|
|
" fgmask = fgbg.apply(cv2.GaussianBlur(gray, (21, 21), 0))\n",
|
|
" motion_score = np.sum(fgmask > 0)\n",
|
|
"\n",
|
|
" status = \"Menunggu Objek...\"\n",
|
|
" color = (0, 255, 0)\n",
|
|
"\n",
|
|
" # 2. LOGIKA TRIGGER & AI\n",
|
|
" if motion_score > SENSITIVITY:\n",
|
|
" status = \"Objek Bergerak...\"\n",
|
|
" color = (0, 255, 255)\n",
|
|
" is_moving = True\n",
|
|
" stable_start_time = None\n",
|
|
" \n",
|
|
" elif is_moving and motion_score < 200:\n",
|
|
" if stable_start_time is None: stable_start_time = time.time()\n",
|
|
" elapsed = time.time() - stable_start_time\n",
|
|
" status = f\"Menganalisa... ({elapsed:.1f}s)\"\n",
|
|
" color = (255, 165, 0)\n",
|
|
"\n",
|
|
" if elapsed >= STABLE_TIME_NEEDED:\n",
|
|
" # JALANKAN AI HANYA PADA ROI\n",
|
|
" results = model(roi, conf=CONF_THRESHOLD, verbose=False)\n",
|
|
" \n",
|
|
" detected_list = []\n",
|
|
" # Salinan ROI untuk disimpan (agar tidak ada coretan UI)\n",
|
|
" roi_to_save = roi.copy()\n",
|
|
" \n",
|
|
" for r in results:\n",
|
|
" for box in r.boxes:\n",
|
|
" cls_id = int(box.cls[0])\n",
|
|
" cls_name = model.names[cls_id]\n",
|
|
" conf = float(box.conf[0])\n",
|
|
" coords = map(int, box.xyxy[0])\n",
|
|
" \n",
|
|
" detected_list.append({\n",
|
|
" 'name': cls_name,\n",
|
|
" 'conf': conf,\n",
|
|
" 'box': list(coords)\n",
|
|
" })\n",
|
|
"\n",
|
|
" # Urutkan & Ambil Top 2\n",
|
|
" detected_list.sort(key=lambda x: x['conf'], reverse=True)\n",
|
|
" last_detections = detected_list[:2]\n",
|
|
" \n",
|
|
" if last_detections:\n",
|
|
" last_detection_time = time.time()\n",
|
|
" for item in last_detections:\n",
|
|
" # Gambar kotak pada hasil crop ROI\n",
|
|
" bx1, by1, bx2, by2 = item['box']\n",
|
|
" cv2.rectangle(roi_to_save, (bx1, by1), (bx2, by2), (0, 255, 0), 2)\n",
|
|
" cv2.putText(roi_to_save, f\"{item['name']}\", (bx1, by1-5),\n",
|
|
" cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)\n",
|
|
" \n",
|
|
" # SIMPAN HASIL CROP ROI SAJA\n",
|
|
" ts = time.strftime(\"%H%M%S\")\n",
|
|
" cv2.imwrite(f\"captures/roi_{ts}.jpg\", roi_to_save)\n",
|
|
" print(f\"✅ ROI Captured: roi_{ts}.jpg\")\n",
|
|
" else:\n",
|
|
" last_detections = [{\"name\": \"OBJEK TIDAK DIKENALI\", \"conf\": 0}]\n",
|
|
" last_detection_time = time.time()\n",
|
|
"\n",
|
|
" is_moving = False\n",
|
|
" stable_start_time = None\n",
|
|
"\n",
|
|
" # 3. UI DISPLAY\n",
|
|
" # Gambar kotak ROI di layar utama\n",
|
|
" cv2.rectangle(display_frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n",
|
|
" \n",
|
|
" # Tampilkan Label Top 2\n",
|
|
" if time.time() - last_detection_time < show_label_duration:\n",
|
|
" for i, item in enumerate(last_detections):\n",
|
|
" text = f\"{item['name']} ({item['conf']*100:.1f}%)\" if item['conf'] > 0 else item['name']\n",
|
|
" cv2.putText(display_frame, text, (x, y + h + 30 + (i * 25)), \n",
|
|
" cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0) if item['conf'] > 0 else (0,0,255), 2)\n",
|
|
"\n",
|
|
" # Bar Status & FPS\n",
|
|
" cv2.rectangle(display_frame, (0, 0), (display_frame.shape[1], 40), (0, 0, 0), -1)\n",
|
|
" cv2.putText(display_frame, status, (15, 28), cv2.FONT_HERSHEY_SIMPLEX, 0.7, color, 2)\n",
|
|
" \n",
|
|
" fps = 1 / (time.time() - prev_time)\n",
|
|
" prev_time = time.time()\n",
|
|
" cv2.putText(display_frame, f\"FPS: {int(fps)}\", (display_frame.shape[1]-100, 28), \n",
|
|
" cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)\n",
|
|
"\n",
|
|
" cv2.imshow(\"ROI-Only Capture System\", display_frame)\n",
|
|
" if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
|
|
"\n",
|
|
" cap.release()\n",
|
|
" cv2.destroyAllWindows()\n",
|
|
"\n",
|
|
"if __name__ == \"__main__\":\n",
|
|
" main()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3cf7a822",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import cv2\n",
|
|
"import time\n",
|
|
"from ultralytics import YOLO\n",
|
|
"\n",
|
|
"VIDEO_PATH = 'test_botol.mp4' \n",
|
|
"MODEL_PATH = 'runs/detect/train/weights/best.pt'\n",
|
|
" \n",
|
|
"# Pilih ukuran target: (320, 320) atau (256, 256)\n",
|
|
"# Saran: 320 lebih akurat, 256 lebih ngebut\n",
|
|
"TARGET_SIZE = (320, 320) \n",
|
|
"\n",
|
|
"def main():\n",
|
|
" model = YOLO(MODEL_PATH)\n",
|
|
" cap = cv2.VideoCapture(VIDEO_PATH)\n",
|
|
"\n",
|
|
" if not cap.isOpened():\n",
|
|
" print(\"Error membuka video.\")\n",
|
|
" return\n",
|
|
"\n",
|
|
" prev_time = 0\n",
|
|
"\n",
|
|
" while True:\n",
|
|
" success, frame = cap.read()\n",
|
|
"\n",
|
|
" # --- LOGIKA LOOPING ---\n",
|
|
" if not success:\n",
|
|
" cap.set(cv2.CAP_PROP_POS_FRAMES, 0)\n",
|
|
" continue\n",
|
|
"\n",
|
|
" # --- 1. RESIZE PAKSA (RAHASIA NGEBUT) ---\n",
|
|
" # Kita kecilkan gambar SEBELUM diproses AI dan ditampilkan.\n",
|
|
" # Ini meringankan beban CPU Raspberry Pi secara signifikan.\n",
|
|
" frame = cv2.resize(frame, TARGET_SIZE)\n",
|
|
"\n",
|
|
" # --- 2. DETEKSI ---\n",
|
|
" # Karena gambar sudah 320, AI tidak perlu kerja keras resize lagi\n",
|
|
" results = model(frame, verbose=False, conf=0.5)\n",
|
|
" \n",
|
|
" # --- 3. GAMBAR KOTAK ---\n",
|
|
" annotated_frame = results[0].plot()\n",
|
|
"\n",
|
|
" # --- 4. HITUNG FPS ---\n",
|
|
" curr_time = time.time()\n",
|
|
" fps = 1 / (curr_time - prev_time) if (curr_time - prev_time) > 0 else 0\n",
|
|
" prev_time = curr_time\n",
|
|
"\n",
|
|
" # Tampilkan FPS & Resolusi\n",
|
|
" info_text = f\"FPS: {int(fps)} | Res: {TARGET_SIZE}\"\n",
|
|
" cv2.putText(annotated_frame, info_text, (10, 30), \n",
|
|
" cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)\n",
|
|
"\n",
|
|
" # Tampilkan\n",
|
|
" cv2.imshow(\"Preview Ringan (Resized)\", annotated_frame)\n",
|
|
"\n",
|
|
" if cv2.waitKey(1) & 0xFF == ord('q'):\n",
|
|
" break\n",
|
|
"\n",
|
|
" cap.release()\n",
|
|
" cv2.destroyAllWindows()\n",
|
|
"\n",
|
|
"if __name__ == \"__main__\":\n",
|
|
" main()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "50c437fd",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Ultralytics 8.4.14 Python-3.12.12 torch-2.10.0+cu130 CPU (11th Gen Intel Core i5-11400H @ 2.70GHz)\n",
|
|
"Model summary (fused): 73 layers, 3,006,233 parameters, 0 gradients, 8.1 GFLOPs\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mPyTorch:\u001b[0m starting from 'runs\\detect\\train\\weights\\best.pt' with input shape (1, 3, 256, 256) BCHW and output shape(s) (1, 7, 1344) (5.9 MB)\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m starting export with onnx 1.20.1 opset 22...\n",
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m slimming with onnxslim 0.1.85...\n",
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m converting to FP16...\n",
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m export success 1.3s, saved as 'runs\\detect\\train\\weights\\best.onnx' (5.8 MB)\n",
|
|
"\n",
|
|
"Export complete (1.4s)\n",
|
|
"Results saved to \u001b[1mC:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\weights\u001b[0m\n",
|
|
"Predict: yolo predict task=detect model=runs\\detect\\train\\weights\\best.onnx imgsz=256 half\n",
|
|
"Validate: yolo val task=detect model=runs\\detect\\train\\weights\\best.onnx imgsz=256 data=C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\data.yaml half \n",
|
|
"Visualize: https://netron.app\n",
|
|
"Ultralytics 8.4.14 Python-3.12.12 torch-2.10.0+cu130 CPU (11th Gen Intel Core i5-11400H @ 2.70GHz)\n",
|
|
"WARNING INT8 export requires a missing 'data' arg for calibration. Using default 'data=coco8.yaml'.\n",
|
|
"Model summary (fused): 73 layers, 3,006,233 parameters, 0 gradients, 8.1 GFLOPs\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mPyTorch:\u001b[0m starting from 'runs\\detect\\train\\weights\\best.pt' with input shape (1, 3, 256, 256) BCHW and output shape(s) (1, 7, 1344) (5.9 MB)\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mTensorFlow SavedModel:\u001b[0m starting export with tensorflow 2.19.1...\n",
|
|
"\u001b[34m\u001b[1mTensorFlow SavedModel:\u001b[0m collecting INT8 calibration images from 'data=coco8.yaml'\n",
|
|
"\n",
|
|
"WARNING Dataset 'coco8.yaml' images not found, missing path 'C:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\\coco8\\images\\val'\n",
|
|
"\u001b[KDownloading https://ultralytics.com/assets/coco8.zip to 'C:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\\coco8.zip': 100% ━━━━━━━━━━━━ 432.8KB 4.8MB/s 0.1s\n",
|
|
"\u001b[KUnzipping C:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\\coco8.zip to C:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\\coco8...: 100% ━━━━━━━━━━━━ 25/25 2.1Kfiles/s 0.0s\n",
|
|
"Dataset download success (0.7s), saved to \u001b[1mC:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\u001b[0m\n",
|
|
"\n",
|
|
"Fast image access (ping: 0.10.0 ms, read: 6.93.9 MB/s, size: 54.0 KB)\n",
|
|
"\u001b[KScanning C:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\\coco8\\labels\\val... 4 images, 0 backgrounds, 0 corrupt: 100% ━━━━━━━━━━━━ 4/4 145.1it/s 0.0s\n",
|
|
"New cache created: C:\\Users\\MSI-PC\\Downloads\\My First Project.v3i.yolov8\\datasets\\coco8\\labels\\val.cache\n",
|
|
"WARNING \u001b[34m\u001b[1mTensorFlow SavedModel:\u001b[0m >300 images recommended for INT8 calibration, found 4 images.\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m starting export with onnx 1.20.1 opset 22...\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stderr",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"c:\\Users\\MSI-PC\\miniconda3\\envs\\skripsi\\Lib\\site-packages\\torch\\onnx\\_internal\\torchscript_exporter\\utils.py:552: OnnxExporterWarning: Exporting to ONNX opset version 22 is not supported. by 'torch.onnx.export()'. The highest opset version supported is 20. To use a newer opset version, consider 'torch.onnx.export(..., dynamo=True)'. \n",
|
|
" _export(\n"
|
|
]
|
|
},
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m slimming with onnxslim 0.1.85...\n",
|
|
"\u001b[34m\u001b[1mONNX:\u001b[0m export success 1.3s, saved as 'runs\\detect\\train\\weights\\best.onnx' (11.6 MB)\n",
|
|
"\u001b[KDownloading https://github.com/ultralytics/assets/releases/download/v8.4.0/calibration_image_sample_data_20x128x128x3_float32.npy.zip to 'calibration_image_sample_data_20x128x128x3_float32.npy.zip': 100% ━━━━━━━━━━━━ 1.1MB 7.5MB/s 0.1s/s 0.1s<0.1s\n",
|
|
"\u001b[KUnzipping calibration_image_sample_data_20x128x128x3_float32.npy.zip to C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\calibration_image_sample_data_20x128x128x3_float32.npy...: 100% ━━━━━━━━━━━━ 1/1 91.8files/s 0.0s\n",
|
|
"WARNING:tensorflow:From c:\\Users\\MSI-PC\\miniconda3\\envs\\skripsi\\Lib\\site-packages\\tf_keras\\src\\losses.py:2976: The name tf.losses.sparse_softmax_cross_entropy is deprecated. Please use tf.compat.v1.losses.sparse_softmax_cross_entropy instead.\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mTensorFlow SavedModel:\u001b[0m starting TFLite export with onnx2tf 1.28.8...\n",
|
|
"Saved artifact at 'runs\\detect\\train\\weights\\best_saved_model'. The following endpoints are available:\n",
|
|
"\n",
|
|
"* Endpoint 'serving_default'\n",
|
|
" inputs_0 (POSITIONAL_ONLY): TensorSpec(shape=(1, 256, 256, 3), dtype=tf.float32, name='images')\n",
|
|
"Output Type:\n",
|
|
" TensorSpec(shape=(1, 7, 1344), dtype=tf.float32, name=None)\n",
|
|
"Captures:\n",
|
|
" 1741884918096: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1741884917712: TensorSpec(shape=(3, 3, 3, 16), dtype=tf.float32, name=None)\n",
|
|
" 1741884918480: TensorSpec(shape=(16,), dtype=tf.float32, name=None)\n",
|
|
" 1741884921360: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1741884921936: TensorSpec(shape=(3, 3, 16, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884920208: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884920976: TensorSpec(shape=(1, 1, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884922128: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884922896: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884922704: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741880486672: TensorSpec(shape=(3, 3, 16, 16), dtype=tf.float32, name=None)\n",
|
|
" 1741880486288: TensorSpec(shape=(16,), dtype=tf.float32, name=None)\n",
|
|
" 1741884924240: TensorSpec(shape=(3, 3, 16, 16), dtype=tf.float32, name=None)\n",
|
|
" 1741884924048: TensorSpec(shape=(16,), dtype=tf.float32, name=None)\n",
|
|
" 1741884922320: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884923088: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884923472: TensorSpec(shape=(1, 1, 48, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884923664: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884924432: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1741884924816: TensorSpec(shape=(3, 3, 32, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741884925200: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741884925584: TensorSpec(shape=(1, 1, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741884925776: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741884926352: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884926160: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884928272: TensorSpec(shape=(3, 3, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884927696: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884929040: TensorSpec(shape=(3, 3, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884928464: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884929424: TensorSpec(shape=(3, 3, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884925392: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884928848: TensorSpec(shape=(3, 3, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741884929808: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741884926544: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884926736: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884929616: TensorSpec(shape=(1, 1, 128, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741884929232: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741884928656: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1741884930000: TensorSpec(shape=(3, 3, 64, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741884925968: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741884927312: TensorSpec(shape=(1, 1, 128, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741884927888: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741884930192: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884930576: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884930960: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741884931728: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741884930384: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741884931344: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741884931536: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984744656: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984743696: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984744464: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741884930768: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741884931152: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984745232: TensorSpec(shape=(1, 1, 256, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984744272: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741984745424: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1741984744848: TensorSpec(shape=(3, 3, 128, 256), dtype=tf.float32, name=None)\n",
|
|
" 1741984745040: TensorSpec(shape=(256,), dtype=tf.float32, name=None)\n",
|
|
" 1741984745808: TensorSpec(shape=(1, 1, 256, 256), dtype=tf.float32, name=None)\n",
|
|
" 1741984746000: TensorSpec(shape=(256,), dtype=tf.float32, name=None)\n",
|
|
" 1741984746576: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984746384: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984748496: TensorSpec(shape=(3, 3, 128, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984748688: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741984746192: TensorSpec(shape=(3, 3, 128, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984745616: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741984746768: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984746960: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984748880: TensorSpec(shape=(1, 1, 384, 256), dtype=tf.float32, name=None)\n",
|
|
" 1741984747536: TensorSpec(shape=(256,), dtype=tf.float32, name=None)\n",
|
|
" 1741984747920: TensorSpec(shape=(1, 1, 256, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984749264: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741984748112: TensorSpec(shape=(1, 1, 512, 256), dtype=tf.float32, name=None)\n",
|
|
" 1741984749072: TensorSpec(shape=(256,), dtype=tf.float32, name=None)\n",
|
|
" 1741984750032: TensorSpec(shape=(1, 1, 384, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984749840: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741984750416: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984750224: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984752336: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984752528: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984749456: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984749648: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984750608: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984750800: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984752912: TensorSpec(shape=(1, 1, 192, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984752720: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1741984751952: TensorSpec(shape=(1, 1, 192, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984751760: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984753488: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984753296: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984755408: TensorSpec(shape=(3, 3, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741984754832: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741984755984: TensorSpec(shape=(3, 3, 32, 32), dtype=tf.float32, name=None)\n",
|
|
" 1741984755600: TensorSpec(shape=(32,), dtype=tf.float32, name=None)\n",
|
|
" 1741984753680: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984753872: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984756560: TensorSpec(shape=(1, 1, 96, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984751376: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984754448: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1741984756944: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984756752: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984758096: TensorSpec(shape=(1, 1, 192, 128), dtype=tf.float32, name=None)\n",
|
|
" 1741984755792: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1742002193040: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002193424: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1741984758672: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984759440: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984759056: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984759248: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984759632: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002192464: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002194192: TensorSpec(shape=(1, 1, 192, 128), dtype=tf.float32, name=None)\n",
|
|
" 1742002194960: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1742002194576: TensorSpec(shape=(4, 2), dtype=tf.int32, name=None)\n",
|
|
" 1742002194384: TensorSpec(shape=(3, 3, 128, 128), dtype=tf.float32, name=None)\n",
|
|
" 1742002193616: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1742002196688: TensorSpec(shape=(1, 1, 384, 256), dtype=tf.float32, name=None)\n",
|
|
" 1742002196112: TensorSpec(shape=(256,), dtype=tf.float32, name=None)\n",
|
|
" 1742002197648: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002197264: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002200912: TensorSpec(shape=(3, 3, 128, 128), dtype=tf.float32, name=None)\n",
|
|
" 1742002199952: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1742002200336: TensorSpec(shape=(3, 3, 128, 128), dtype=tf.float32, name=None)\n",
|
|
" 1742002201104: TensorSpec(shape=(128,), dtype=tf.float32, name=None)\n",
|
|
" 1742002198224: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002198416: TensorSpec(shape=(4,), dtype=tf.int64, name=None)\n",
|
|
" 1742002199568: TensorSpec(shape=(1, 1, 384, 256), dtype=tf.float32, name=None)\n",
|
|
" 1742002197840: TensorSpec(shape=(256,), dtype=tf.float32, name=None)\n",
|
|
" 1742002198992: TensorSpec(shape=(3, 3, 256, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002194000: TensorSpec(shape=(3, 3, 128, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984757136: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002200144: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002195152: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984756368: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002199376: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002195344: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984757328: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002201488: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002195728: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984757520: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002201872: TensorSpec(shape=(1, 1, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002197072: TensorSpec(shape=(1, 1, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984758480: TensorSpec(shape=(1, 1, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002202256: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002196496: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984757904: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002200720: TensorSpec(shape=(3, 3, 256, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002195536: TensorSpec(shape=(3, 3, 128, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984753104: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002200528: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002194768: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984755024: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002201680: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002196304: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1741984757712: TensorSpec(shape=(3, 3, 64, 64), dtype=tf.float32, name=None)\n",
|
|
" 1742002201296: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002195920: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1741984756176: TensorSpec(shape=(64,), dtype=tf.float32, name=None)\n",
|
|
" 1742002204560: TensorSpec(shape=(1, 1, 16, 1), dtype=tf.float32, name=None)\n",
|
|
" 1742002202448: TensorSpec(shape=(1, 1, 64, 3), dtype=tf.float32, name=None)\n",
|
|
" 1742002197456: TensorSpec(shape=(1, 1, 64, 3), dtype=tf.float32, name=None)\n",
|
|
" 1741984758864: TensorSpec(shape=(1, 1, 64, 3), dtype=tf.float32, name=None)\n",
|
|
" 1742002202064: TensorSpec(shape=(3,), dtype=tf.float32, name=None)\n",
|
|
" 1742002196880: TensorSpec(shape=(3,), dtype=tf.float32, name=None)\n",
|
|
" 1741984758288: TensorSpec(shape=(3,), dtype=tf.float32, name=None)\n",
|
|
" 1742002203024: TensorSpec(shape=(3,), dtype=tf.int64, name=None)\n",
|
|
" 1742002203408: TensorSpec(shape=(3,), dtype=tf.int64, name=None)\n",
|
|
" 1742002203984: TensorSpec(shape=(3,), dtype=tf.int64, name=None)\n",
|
|
" 1742002198032: TensorSpec(shape=(3,), dtype=tf.int64, name=None)\n",
|
|
" 1742002205136: TensorSpec(shape=(1, 2, 1344), dtype=tf.float32, name=None)\n",
|
|
" 1742002203792: TensorSpec(shape=(1, 2, 1344), dtype=tf.float32, name=None)\n",
|
|
"\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mTensorFlow SavedModel:\u001b[0m export success 36.0s, saved as 'runs\\detect\\train\\weights\\best_saved_model' (38.2 MB)\n",
|
|
"\n",
|
|
"\u001b[34m\u001b[1mTensorFlow Lite:\u001b[0m starting export with tensorflow 2.19.1...\n",
|
|
"\u001b[34m\u001b[1mTensorFlow Lite:\u001b[0m export success 0.0s, saved as 'runs\\detect\\train\\weights\\best_saved_model\\best_int8.tflite' (3.0 MB)\n",
|
|
"\n",
|
|
"Export complete (36.1s)\n",
|
|
"Results saved to \u001b[1mC:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\runs\\detect\\train\\weights\u001b[0m\n",
|
|
"Predict: yolo predict task=detect model=runs\\detect\\train\\weights\\best_saved_model\\best_int8.tflite imgsz=256 int8\n",
|
|
"Validate: yolo val task=detect model=runs\\detect\\train\\weights\\best_saved_model\\best_int8.tflite imgsz=256 data=C:\\Users\\MSI-PC\\Downloads\\My First Project.v4-bening-dan-bewarna-.yolov8\\data.yaml int8 \n",
|
|
"Visualize: https://netron.app\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"'runs\\\\detect\\\\train\\\\weights\\\\best_saved_model\\\\best_int8.tflite'"
|
|
]
|
|
},
|
|
"execution_count": 5,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"from ultralytics import YOLO\n",
|
|
"model = YOLO(\"runs/detect/train/weights/best.pt\")\n",
|
|
"# Di PC/Laptop\n",
|
|
"model.export(format=\"onnx\", half=True) # FP16 (Lebih ringan)\n",
|
|
"# atau\n",
|
|
"model.export(format=\"tflite\", int8=True) # Jauh lebih cepat di Pi"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "8777dd07",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Sistem Siap. Masukkan benda ke kotak dan diamkan.\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094707.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094711.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094716.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094723.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094727.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094731.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094735.jpg\n",
|
|
"BERHASIL! Foto disimpan: captures/benda_20260218-094740.jpg\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"import cv2\n",
|
|
"import numpy as np\n",
|
|
"import time\n",
|
|
"import os\n",
|
|
"\n",
|
|
"# --- KONFIGURASI ---\n",
|
|
"ROI_BOX = [200, 150, 250, 250] # [x, y, lebar, tinggi]\n",
|
|
"SENSITIVITY = 1000 # Ambang batas gerakan\n",
|
|
"STABLE_THRESHOLD = 200 # Skor gerakan di bawah ini dianggap \"diam\"\n",
|
|
"STABLE_TIME_NEEDED = 1.5 # Harus diam selama 1.5 detik baru capture\n",
|
|
"\n",
|
|
"def main():\n",
|
|
" cap = cv2.VideoCapture(0)\n",
|
|
" fgbg = cv2.createBackgroundSubtractorMOG2(history=100, varThreshold=40)\n",
|
|
"\n",
|
|
" # Buat folder penyimpanan jika belum ada\n",
|
|
" if not os.path.exists('captures'):\n",
|
|
" os.makedirs('captures')\n",
|
|
"\n",
|
|
" is_moving = False\n",
|
|
" stable_start_time = None\n",
|
|
" photo_taken = False\n",
|
|
"\n",
|
|
" print(\"Sistem Siap. Masukkan benda ke kotak dan diamkan.\")\n",
|
|
"\n",
|
|
" while True:\n",
|
|
" success, frame = cap.read()\n",
|
|
" if not success: break\n",
|
|
" frame = cv2.flip(frame, 1)\n",
|
|
" display_frame = frame.copy()\n",
|
|
"\n",
|
|
" # 1. POTONG ROI\n",
|
|
" x, y, w, h = ROI_BOX\n",
|
|
" roi = frame[y:y+h, x:x+w]\n",
|
|
" \n",
|
|
" # 2. HITUNG GERAKAN\n",
|
|
" gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)\n",
|
|
" gray = cv2.GaussianBlur(gray, (21, 21), 0)\n",
|
|
" fgmask = fgbg.apply(gray)\n",
|
|
" motion_score = np.sum(fgmask > 0)\n",
|
|
"\n",
|
|
" # 3. LOGIKA CAPTURE OTOMATIS\n",
|
|
" status = \"Menunggu Objek...\"\n",
|
|
" color = (0, 255, 0)\n",
|
|
"\n",
|
|
" if motion_score > SENSITIVITY:\n",
|
|
" status = \"Objek Bergerak...\"\n",
|
|
" color = (0, 255, 255) # Kuning\n",
|
|
" is_moving = True\n",
|
|
" stable_start_time = None\n",
|
|
" photo_taken = False\n",
|
|
" \n",
|
|
" elif is_moving and motion_score < STABLE_THRESHOLD:\n",
|
|
" # Objek tadinya bergerak, sekarang mulai diam\n",
|
|
" if stable_start_time is None:\n",
|
|
" stable_start_time = time.time()\n",
|
|
" \n",
|
|
" elapsed = time.time() - stable_start_time\n",
|
|
" status = f\"Menganalisa... {elapsed:.1f}s\"\n",
|
|
" color = (255, 165, 0) # Oranye\n",
|
|
"\n",
|
|
" if elapsed >= STABLE_TIME_NEEDED and not photo_taken:\n",
|
|
" # --- TANGKAP FOTO ---\n",
|
|
" timestamp = time.strftime(\"%Y%m%d-%H%M%S\")\n",
|
|
" filename = f\"captures/benda_{timestamp}.jpg\"\n",
|
|
" cv2.imwrite(filename, frame)\n",
|
|
" \n",
|
|
" print(f\"BERHASIL! Foto disimpan: {filename}\")\n",
|
|
" photo_taken = True\n",
|
|
" is_moving = False # Reset setelah tangkap\n",
|
|
" status = \"FOTO TERTANGKAP!\"\n",
|
|
" color = (0, 0, 255) # Merah\n",
|
|
"\n",
|
|
" # --- UI DISPLAY ---\n",
|
|
" cv2.rectangle(display_frame, (x, y), (x+w, y+h), (255, 0, 0), 2)\n",
|
|
" cv2.rectangle(display_frame, (0, 0), (display_frame.shape[1], 50), (0, 0, 0), -1)\n",
|
|
" cv2.putText(display_frame, status, (20, 35), cv2.FONT_HERSHEY_SIMPLEX, 0.8, color, 2)\n",
|
|
" \n",
|
|
" cv2.imshow(\"Auto Capture System\", display_frame)\n",
|
|
" if cv2.waitKey(1) & 0xFF == ord('q'): break\n",
|
|
"\n",
|
|
" cap.release()\n",
|
|
" cv2.destroyAllWindows()\n",
|
|
"\n",
|
|
"if __name__ == \"__main__\":\n",
|
|
" main()"
|
|
]
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "skripsi",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.12.12"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|