123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081 |
- from ultralytics import YOLO
- import cv2
- import numpy as np
- # Load an official or custom model
- model = YOLO('yolov8s-seg.pt') # Load an official Segment model
- # Open the input video
- cap = cv2.VideoCapture(0)
- if not cap.isOpened():
- raise Exception("Error: Could not open video.")
- while cap.isOpened():
- # Read a frame from the input video
- success, frame = cap.read()
- if success:
- # Run YOLOv8 tracking on the frame, persisting tracks between frames
- results = model.track(frame, iou=0.65, conf=0.40, persist=True, imgsz=640, verbose=False, tracker="botsort.yaml")
- # Process results list
- for result in results:
- if result.boxes.id != None: # this will ensure that id is not None -> exist tracks
- boxes = result.boxes.xyxy.cpu().numpy().astype(int)
- masks = result.masks.data.cpu().numpy().astype(int)
- ids = result.boxes.id.cpu().numpy().astype(int)
- classes = result.boxes.cls.cpu().numpy()
- class_names = result.names
- for box, mask, id, class_id in zip(boxes, masks, ids, classes):
- color = (0, 0, 255)
- # Mask
- color_mask = np.zeros_like(frame)
- mask = cv2.resize(mask, (frame.shape[1], frame.shape[0]), interpolation=cv2.INTER_NEAREST)
- color_mask[mask > 0] = color
- alpha = 0.3
- frame = cv2.addWeighted(frame, 1, color_mask, alpha, 0)
- # Mask Boder
- mask_contours, _ = cv2.findContours(mask.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
- frame = cv2.drawContours(frame, mask_contours, -1, color, 2)
- # Text
- class_name = class_names[class_id]
- text = f"{class_name}"
- font = cv2.FONT_HERSHEY_SIMPLEX
- fontScale = 1.2
- fontColor = (0, 0, 255)
- thickness = 3
- textSize = cv2.getTextSize(text, font, fontScale, thickness)[0]
- textWidth, textHeight = textSize[0], textSize[1]
- centerX = (box[0]+box[2])//2 - textWidth // 2
- centerY = (box[1]+box[3])//2 + textHeight // 2
- frame = cv2.putText(
- frame,
- text,
- (centerX, centerY),
- font,
- fontScale,
- fontColor,
- thickness
- )
- cv2.imshow("YOLOv8 Segmentation", frame)
- # Check for the 'q' key to exit
- if cv2.waitKey(10) & 0xFF == ord('q'):
- break
- else:
- break
- # Release the input video capture and output video writerй
- cap.release()
- # Close all OpenCV windows
- cv2.destroyAllWindows()
|