| 123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101 | 
							- from ultralytics import YOLO
 
- import cv2
 
- # Load an official or custom model
 
- model = YOLO('yolov8s.pt')  # Load an official Detect model
 
- #model = YOLO('yolov8n-seg.pt')  # Load an official Segment model
 
- #model = YOLO('yolov8n-pose.pt')  # Load an official Pose model
 
- #model = YOLO('path/to/best.pt')  # Load a custom trained model
 
- # Perform tracking with the model
 
- #results = model.predict(source=0, show=True)  #
 
- #results = model.track(source="https://www.youtube.com/watch?v=6n5d1C1Alh4", show=True)  # Tracking with default tracker
 
- #results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml")  # Tracking with ByteTrack tracker
 
- # Open the input video
 
- cap = cv2.VideoCapture('track2.mp4')
 
- if not cap.isOpened():
 
-     raise Exception("Error: Could not open video.")
 
- # Get input video frame rate and dimensions
 
- #fps = int(cap.get(cv2.CAP_PROP_FPS))
 
- #frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
 
- #frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
 
- totall_class_ids = {}
 
- while cap.isOpened():
 
-     # Read a frame from the input video
 
-     success, frame = cap.read()
 
-     if success:
 
-         # Run YOLOv8 tracking on the frame, persisting tracks between frames
 
-         # results = model.predict(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=True)
 
-         results = model.track(frame, iou=0.85, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=False, persist=True)
 
-         ## Visualize the results on the frame
 
-         #annotated_frame = results[0].plot()
 
-         ##annotated_frame = cv2.resize(annotated_frame, (annotated_frame.shape[1]//2, annotated_frame.shape[0]//2))
 
-         ## Display the annotated frame
 
-         #cv2.imshow("YOLOv8 Tracking", annotated_frame)
 
-         # Process results list
 
-         for result in results:
 
-             if result.boxes.id != None: # this will ensure that id is not None -> exist tracks
 
-                 boxes = result.boxes.xyxy.cpu().numpy().astype(int)
 
-                 ids = result.boxes.id.cpu().numpy().astype(int)
 
-                 classes = result.boxes.cls.cpu().numpy()
 
-                 class_names = result.names
 
-                 for box, id, class_id in zip(boxes, ids, classes):
 
-                     color = (0, 0, 255)
 
-                     cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3],), color, 2)
 
-                     class_name = class_names[class_id]
 
-                     cv2.putText(
 
-                         frame,
 
-                         f"{class_name} id: {id}",
 
-                         (box[0], box[1]-5),
 
-                         cv2.FONT_HERSHEY_SIMPLEX,
 
-                         1,
 
-                         (0, 0, 255),
 
-                         2,
 
-                     )
 
-                     
 
-                     #Statistics
 
-                     if not class_id in totall_class_ids:
 
-                         totall_class_ids[class_id]=[]
 
-                     if not id in totall_class_ids[class_id]:
 
-                         for cid in totall_class_ids:
 
-                             if id in totall_class_ids[cid]:
 
-                                 totall_class_ids[cid].remove(id)
 
-                         totall_class_ids[class_id].append(id)
 
-         totall = {}
 
-         for cid in totall_class_ids:
 
-             totall[class_names[cid]] = len(totall_class_ids[cid])
 
-         
 
-         x = 0
 
-         y = 20
 
-         for cname in totall:
 
-             cv2.putText(frame, f"{cname}: {totall[cname]}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
 
-             y += 20
 
-         cv2.imshow("YOLOv8 Tracking", frame)
 
-         
 
-         # Check for the 'q' key to exit
 
-         if cv2.waitKey(10) & 0xFF == ord('q'):
 
-             break
 
-     else:
 
-         break
 
- # Release the input video capture and output video writerй
 
- cap.release()
 
- # Close all OpenCV windows
 
- cv2.destroyAllWindows()
 
 
  |