track2.py 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101
  1. from ultralytics import YOLO
  2. import cv2
  3. # Load an official or custom model
  4. model = YOLO('yolov8s.pt') # Load an official Detect model
  5. #model = YOLO('yolov8n-seg.pt') # Load an official Segment model
  6. #model = YOLO('yolov8n-pose.pt') # Load an official Pose model
  7. #model = YOLO('path/to/best.pt') # Load a custom trained model
  8. # Perform tracking with the model
  9. #results = model.predict(source=0, show=True) #
  10. #results = model.track(source="https://www.youtube.com/watch?v=6n5d1C1Alh4", show=True) # Tracking with default tracker
  11. #results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml") # Tracking with ByteTrack tracker
  12. # Open the input video
  13. cap = cv2.VideoCapture('track2.mp4')
  14. if not cap.isOpened():
  15. raise Exception("Error: Could not open video.")
  16. # Get input video frame rate and dimensions
  17. #fps = int(cap.get(cv2.CAP_PROP_FPS))
  18. #frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  19. #frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  20. totall_class_ids = {}
  21. while cap.isOpened():
  22. # Read a frame from the input video
  23. success, frame = cap.read()
  24. if success:
  25. # Run YOLOv8 tracking on the frame, persisting tracks between frames
  26. # results = model.predict(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=True)
  27. results = model.track(frame, iou=0.85, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=False, persist=True)
  28. ## Visualize the results on the frame
  29. #annotated_frame = results[0].plot()
  30. ##annotated_frame = cv2.resize(annotated_frame, (annotated_frame.shape[1]//2, annotated_frame.shape[0]//2))
  31. ## Display the annotated frame
  32. #cv2.imshow("YOLOv8 Tracking", annotated_frame)
  33. # Process results list
  34. for result in results:
  35. if result.boxes.id != None: # this will ensure that id is not None -> exist tracks
  36. boxes = result.boxes.xyxy.cpu().numpy().astype(int)
  37. ids = result.boxes.id.cpu().numpy().astype(int)
  38. classes = result.boxes.cls.cpu().numpy()
  39. class_names = result.names
  40. for box, id, class_id in zip(boxes, ids, classes):
  41. color = (0, 0, 255)
  42. cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3],), color, 2)
  43. class_name = class_names[class_id]
  44. cv2.putText(
  45. frame,
  46. f"{class_name} id: {id}",
  47. (box[0], box[1]-5),
  48. cv2.FONT_HERSHEY_SIMPLEX,
  49. 1,
  50. (0, 0, 255),
  51. 2,
  52. )
  53. #Statistics
  54. if not class_id in totall_class_ids:
  55. totall_class_ids[class_id]=[]
  56. if not id in totall_class_ids[class_id]:
  57. for cid in totall_class_ids:
  58. if id in totall_class_ids[cid]:
  59. totall_class_ids[cid].remove(id)
  60. totall_class_ids[class_id].append(id)
  61. totall = {}
  62. for cid in totall_class_ids:
  63. totall[class_names[cid]] = len(totall_class_ids[cid])
  64. x = 0
  65. y = 20
  66. for cname in totall:
  67. cv2.putText(frame, f"{cname}: {totall[cname]}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
  68. y += 20
  69. cv2.imshow("YOLOv8 Tracking", frame)
  70. # Check for the 'q' key to exit
  71. if cv2.waitKey(10) & 0xFF == ord('q'):
  72. break
  73. else:
  74. break
  75. # Release the input video capture and output video writerй
  76. cap.release()
  77. # Close all OpenCV windows
  78. cv2.destroyAllWindows()