face-blur.py 1.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. from ultralytics import YOLO
  2. from ultralytics.utils.plotting import Annotator, colors
  3. import cv2
  4. import numpy as np
  5. # Load model
  6. model = YOLO('yolov8m-face.pt')
  7. names = model.names
  8. blur_ratio = 50
  9. # Open the input video
  10. cap = cv2.VideoCapture(0)
  11. if not cap.isOpened():
  12. raise Exception("Error: Could not open video.")
  13. while cap.isOpened():
  14. # Read a frame from the input video
  15. success, frame = cap.read()
  16. if success:
  17. # Run YOLOv8 tracking on the frame, persisting tracks between frames
  18. results = model.predict(frame, iou=0.65, conf=0.40, verbose=False)
  19. # Process results list
  20. for result in results:
  21. boxes = result.boxes.xyxy.cpu().tolist()
  22. clss = result.boxes.cls.cpu().tolist()
  23. annotator = Annotator(frame, line_width=2, example=names)
  24. if boxes is not None:
  25. for box, cls in zip(boxes, clss):
  26. annotator.box_label(box, color=colors(int(cls), True), label=names[int(cls)])
  27. obj = frame[int(box[1]):int(box[3]), int(box[0]):int(box[2])]
  28. blur_obj = cv2.blur(obj, (blur_ratio, blur_ratio))
  29. frame[int(box[1]):int(box[3]), int(box[0]):int(box[2])] = blur_obj
  30. cv2.imshow("YOLOv8 Face blur", frame)
  31. # Check for the 'q' key to exit
  32. if cv2.waitKey(10) & 0xFF == ord('q'):
  33. break
  34. else:
  35. break
  36. # Release the input video capture and output video writerй
  37. cap.release()
  38. # Close all OpenCV windows
  39. cv2.destroyAllWindows()