Bladeren bron

computer vision examples

Andrey Koryagin 6 maanden geleden
bovenliggende
commit
2d8922f457
4 gewijzigde bestanden met toevoegingen van 286 en 0 verwijderingen
  1. 93 0
      sot/sot.py
  2. 78 0
      track/track0.py
  3. 14 0
      track/track1.py
  4. 101 0
      track/track2.py

+ 93 - 0
sot/sot.py

@@ -0,0 +1,93 @@
+import cv2
+
+mark_color = (150, 250, 250)
+mark_thick = 2
+
+bbox_color = (0, 0, 255)
+bbox_thick = 2
+
+track_mode = False
+height = 0
+width = 0
+channels = 0
+
+def createTreker():
+    return cv2.TrackerMIL_create()  # метод Multiple Instance Learning
+    #return cv2.legacy.TrackerMOSSE_create()  # Minimum Output Sum of Squared Error
+    #return cv2.TrackerGOTURN_create() # Neural network
+
+def drawMark():
+    cv2.line(img, (bbox_x, bbox_y), (int(bbox_x+bbox_w/4), bbox_y), mark_color, mark_thick)
+    cv2.line(img, (bbox_x+bbox_w, bbox_y), (int(bbox_x+bbox_w-bbox_w/4), bbox_y), mark_color, mark_thick)
+
+    cv2.line(img, (bbox_x, bbox_y+bbox_h), (int(bbox_x+bbox_w/4), bbox_y+bbox_h), mark_color, mark_thick)
+    cv2.line(img, (bbox_x+bbox_w, bbox_y+bbox_h), (int(bbox_x+bbox_w-bbox_w/4), bbox_y+bbox_h), mark_color, mark_thick)
+
+    cv2.line(img, (bbox_x, bbox_y), (bbox_x, int(bbox_y+bbox_h/4)), mark_color, mark_thick)
+    cv2.line(img, (bbox_x+bbox_w, bbox_y), (bbox_x+bbox_w, int(bbox_y+bbox_h/4)), mark_color, mark_thick)
+
+    cv2.line(img, (bbox_x, bbox_y+bbox_h), (bbox_x, int(bbox_y+bbox_h-bbox_h/4)), mark_color, mark_thick)
+    cv2.line(img, (bbox_x+bbox_w, bbox_y+bbox_h), (bbox_x+bbox_w, int(bbox_y+bbox_h-bbox_h/4)), mark_color, mark_thick)
+
+def drawBox():
+    cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])), bbox_color, bbox_thick, 1)
+
+def doAction():
+    objX = int(bbox[0] + bbox[2]/2)
+    objY = int(bbox[1] + bbox[3]/2)
+    startX = int(width/2)
+    startY = int(height/2)
+    cv2.line(img, (startX, startY), (objX, startY), (0,255,0), 2)
+    cv2.line(img, (startX, startY), (startX, objY), (0,255,0), 2)
+
+
+tracker = createTreker()
+
+cap = cv2.VideoCapture(0)
+
+#frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+#frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+success, img = cap.read()
+if success:
+    height, width, channels = img.shape
+    bbox_w = int(width/5)
+    bbox_x = int(width/2 - bbox_w/2)
+    bbox_h = int(height/5)
+    bbox_y = int(height/2 - bbox_h/2)
+    bbox = (bbox_x, bbox_y, bbox_w, bbox_h)
+
+while True:
+    timer = cv2.getTickCount()
+
+    success, img = cap.read()
+    if not success:
+        break
+
+    drawMark()
+
+    if track_mode == True:
+        success, bbox = tracker.update(img)
+        if success:
+            drawBox()
+            doAction()
+        else:
+            cv2.putText(img, "Lost", (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
+
+    fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
+    cv2.putText(img, str(int(fps)), (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
+
+    cv2.imshow("Tracking", img)
+
+    key = cv2.waitKey(1)
+    if key & 0xff == ord(' '):
+        track_mode = not track_mode
+        if track_mode == True:
+            bbox=(bbox_x, bbox_y, bbox_w, bbox_h)
+            tracker = createTreker()
+            tracker.init(img, bbox)
+    if key & 0xff == ord('q'):
+        break
+
+cap.release()
+cv2.destroyAllWindows()

+ 78 - 0
track/track0.py

@@ -0,0 +1,78 @@
+from ultralytics import YOLO
+import cv2
+
+# Load an official or custom model
+model = YOLO('yolov8s.pt')  # Load an official Detect model
+#model = YOLO('yolov8n-seg.pt')  # Load an official Segment model
+#model = YOLO('yolov8n-pose.pt')  # Load an official Pose model
+#model = YOLO('path/to/best.pt')  # Load a custom trained model
+
+# Perform tracking with the model
+#results = model.predict(source=0, show=True)  #
+#results = model.track(source="https://www.youtube.com/watch?v=6n5d1C1Alh4", show=True)  # Tracking with default tracker
+#results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml")  # Tracking with ByteTrack tracker
+
+
+# Open the input video
+cap = cv2.VideoCapture(0)
+
+if not cap.isOpened():
+    raise Exception("Error: Could not open video.")
+
+# Get input video frame rate and dimensions
+#fps = int(cap.get(cv2.CAP_PROP_FPS))
+#frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+#frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+while cap.isOpened():
+    # Read a frame from the input video
+    success, frame = cap.read()
+
+    if success:
+        # Run YOLOv8 tracking on the frame, persisting tracks between frames
+
+        # results = model.predict(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=True)
+        results = model.track(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=False)
+
+        ## Visualize the results on the frame
+        #annotated_frame = results[0].plot()
+        ##annotated_frame = cv2.resize(annotated_frame, (annotated_frame.shape[1]//2, annotated_frame.shape[0]//2))
+        ## Display the annotated frame
+        #cv2.imshow("YOLOv8 Tracking", annotated_frame)
+
+        # Process results list
+        for result in results:
+            if result.boxes.id != None: # this will ensure that id is not None -> exist tracks
+                boxes = result.boxes.xyxy.cpu().numpy().astype(int)
+                ids = result.boxes.id.cpu().numpy().astype(int)
+                classes = result.boxes.cls.cpu().numpy()
+                class_names = result.names
+
+                for box, id, class_id in zip(boxes, ids, classes):
+                    color = (0, 0, 255)
+                    cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3],), color, 2)
+                    class_name = class_names[class_id]
+                    cv2.putText(
+                        frame,
+                        f"{class_name} id: {id}",
+                        (box[0], box[1]-5),
+                        cv2.FONT_HERSHEY_SIMPLEX,
+                        1,
+                        (0, 0, 255),
+                        2,
+                    )
+
+        cv2.imshow("YOLOv8 Tracking", frame)
+
+        # Check for the 'q' key to exit
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            break
+    else:
+        break
+
+# Release the input video capture and output video writerй
+cap.release()
+
+# Close all OpenCV windows
+cv2.destroyAllWindows()
+

+ 14 - 0
track/track1.py

@@ -0,0 +1,14 @@
+from ultralytics import YOLO
+
+# Load an official or custom model
+model = YOLO('yolov8m.pt')  # Load an official Detect model
+#model = YOLO('yolov8n-seg.pt')  # Load an official Segment model
+#model = YOLO('yolov8n-pose.pt')  # Load an official Pose model
+#model = YOLO('path/to/best.pt')  # Load a custom trained model
+
+# Perform tracking with the model
+results = model.predict(source='track1.mp4', show=True)  #
+#results = model.track(source="https://www.youtube.com/watch?v=6n5d1C1Alh4", show=True)  # Tracking with default tracker
+#results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml")  # Tracking with ByteTrack tracker
+
+

+ 101 - 0
track/track2.py

@@ -0,0 +1,101 @@
+from ultralytics import YOLO
+import cv2
+
+# Load an official or custom model
+model = YOLO('yolov8s.pt')  # Load an official Detect model
+#model = YOLO('yolov8n-seg.pt')  # Load an official Segment model
+#model = YOLO('yolov8n-pose.pt')  # Load an official Pose model
+#model = YOLO('path/to/best.pt')  # Load a custom trained model
+
+# Perform tracking with the model
+#results = model.predict(source=0, show=True)  #
+#results = model.track(source="https://www.youtube.com/watch?v=6n5d1C1Alh4", show=True)  # Tracking with default tracker
+#results = model.track(source="https://youtu.be/LNwODJXcvt4", show=True, tracker="bytetrack.yaml")  # Tracking with ByteTrack tracker
+
+
+# Open the input video
+cap = cv2.VideoCapture('track2.mp4')
+
+if not cap.isOpened():
+    raise Exception("Error: Could not open video.")
+
+# Get input video frame rate and dimensions
+#fps = int(cap.get(cv2.CAP_PROP_FPS))
+#frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
+#frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
+
+totall_class_ids = {}
+
+while cap.isOpened():
+    # Read a frame from the input video
+    success, frame = cap.read()
+
+    if success:
+        # Run YOLOv8 tracking on the frame, persisting tracks between frames
+
+        # results = model.predict(frame, iou=0.65, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=True)
+        results = model.track(frame, iou=0.85, conf=0.50, tracker="botsort.yaml", imgsz=640, verbose=False, persist=True)
+
+        ## Visualize the results on the frame
+        #annotated_frame = results[0].plot()
+        ##annotated_frame = cv2.resize(annotated_frame, (annotated_frame.shape[1]//2, annotated_frame.shape[0]//2))
+        ## Display the annotated frame
+        #cv2.imshow("YOLOv8 Tracking", annotated_frame)
+
+        # Process results list
+        for result in results:
+            if result.boxes.id != None: # this will ensure that id is not None -> exist tracks
+                boxes = result.boxes.xyxy.cpu().numpy().astype(int)
+                ids = result.boxes.id.cpu().numpy().astype(int)
+                classes = result.boxes.cls.cpu().numpy()
+                class_names = result.names
+
+                for box, id, class_id in zip(boxes, ids, classes):
+                    color = (0, 0, 255)
+                    cv2.rectangle(frame, (box[0], box[1]), (box[2], box[3],), color, 2)
+                    class_name = class_names[class_id]
+                    cv2.putText(
+                        frame,
+                        f"{class_name} id: {id}",
+                        (box[0], box[1]-5),
+                        cv2.FONT_HERSHEY_SIMPLEX,
+                        1,
+                        (0, 0, 255),
+                        2,
+                    )
+                    
+                    #Statistics
+                    if not class_id in totall_class_ids:
+                        totall_class_ids[class_id]=[]
+
+                    if not id in totall_class_ids[class_id]:
+                        for cid in totall_class_ids:
+                            if id in totall_class_ids[cid]:
+                                totall_class_ids[cid].remove(id)
+
+                        totall_class_ids[class_id].append(id)
+
+        totall = {}
+        for cid in totall_class_ids:
+            totall[class_names[cid]] = len(totall_class_ids[cid])
+        
+        x = 0
+        y = 20
+        for cname in totall:
+            cv2.putText(frame, f"{cname}: {totall[cname]}", (x, y), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 255, 0), 2)
+            y += 20
+
+        cv2.imshow("YOLOv8 Tracking", frame)
+        
+        # Check for the 'q' key to exit
+        if cv2.waitKey(10) & 0xFF == ord('q'):
+            break
+    else:
+        break
+
+# Release the input video capture and output video writerй
+cap.release()
+
+# Close all OpenCV windows
+cv2.destroyAllWindows()
+