1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253 |
- from ultralytics import YOLO
- from ultralytics.utils.plotting import Annotator, colors
- import cv2
- import numpy as np
- # Load model
- model = YOLO('yolov8m-face.pt')
- names = model.names
- blur_ratio = 50
- # Open the input video
- cap = cv2.VideoCapture(0)
- if not cap.isOpened():
- raise Exception("Error: Could not open video.")
- while cap.isOpened():
- # Read a frame from the input video
- success, frame = cap.read()
- if success:
- # Run YOLOv8 tracking on the frame, persisting tracks between frames
- results = model.predict(frame, iou=0.65, conf=0.40, verbose=False)
- # Process results list
- for result in results:
- boxes = result.boxes.xyxy.cpu().tolist()
- clss = result.boxes.cls.cpu().tolist()
- annotator = Annotator(frame, line_width=2, example=names)
- if boxes is not None:
- for box, cls in zip(boxes, clss):
- annotator.box_label(box, color=colors(int(cls), True), label=names[int(cls)])
- obj = frame[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
- blur_obj = cv2.blur(obj, (blur_ratio, blur_ratio))
- frame[int(box[1]):int(box[3]), int(box[0]):int(box[2])] = blur_obj
- cv2.imshow("YOLOv8 Face blur", frame)
- # Check for the 'q' key to exit
- if cv2.waitKey(10) & 0xFF == ord('q'):
- break
- else:
- break
- # Release the input video capture and output video writerй
- cap.release()
- # Close all OpenCV windows
- cv2.destroyAllWindows()
|