track0.py 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778
  1. from ultralytics import YOLO
  2. import cv2
  3. # Load an official or custom model
  4. model = YOLO('yolov8s.pt') # Load an official Detect model
  5. #model = YOLO('yolov8n-seg.pt') # Load an official Segment model
  6. #model = YOLO('yolov8n-pose.pt') # Load an official Pose model
  7. #model = YOLO('path/to/best.pt') # Load a custom trained model
  8. # Perform tracking with the model
  9. #results = model.predict(source=0, show=True) #
  10. #results = model.track(source="https://www.youtube.com/watch?v=6n5d1C1Alh4", show=True) # Tracking with default tracker
  11. #results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml") # Tracking with ByteTrack tracker
  12. # Open the input video
  13. cap = cv2.VideoCapture(0)
  14. if not cap.isOpened():
  15. raise Exception("Error: Could not open video.")
  16. # Get input video frame rate and dimensions
  17. #fps = int(cap.get(cv2.CAP_PROP_FPS))
  18. #frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  19. #frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  20. while cap.isOpened():
  21. # Read a frame from the input video
  22. success, frame = cap.read()
  23. if success:
  24. # Run YOLOv8 tracking on the frame, persisting tracks between frames
  25. # results = model.predict(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=True)
  26. results = model.track(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=False)
  27. ## Visualize the results on the frame
  28. #annotated_frame = results[0].plot()
  29. ##annotated_frame = cv2.resize(annotated_frame, (annotated_frame.shape[1]//2, annotated_frame.shape[0]//2))
  30. ## Display the annotated frame
  31. #cv2.imshow("YOLOv8 Tracking", annotated_frame)
  32. # Process results list
  33. for result in results:
  34. if result.boxes.id != None: # this will ensure that id is not None -> exist tracks
  35. boxes = result.boxes.xyxy.cpu().numpy().astype(int)
  36. ids = result.boxes.id.cpu().numpy().astype(int)
  37. classes = result.boxes.cls.cpu().numpy()
  38. class_names = result.names
  39. for box, id, class_id in zip(boxes, ids, classes):
  40. color = (0, 0, 255)
  41. cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3],), color, 2)
  42. class_name = class_names[class_id]
  43. cv2.putText(
  44. frame,
  45. f"{class_name} id: {id}",
  46. (box[0], box[1]-5),
  47. cv2.FONT_HERSHEY_SIMPLEX,
  48. 1,
  49. (0, 0, 255),
  50. 2,
  51. )
  52. cv2.imshow("YOLOv8 Tracking", frame)
  53. # Check for the 'q' key to exit
  54. if cv2.waitKey(10) & 0xFF == ord('q'):
  55. break
  56. else:
  57. break
  58. # Release the input video capture and output video writerй
  59. cap.release()
  60. # Close all OpenCV windows
  61. cv2.destroyAllWindows()