|
@@ -0,0 +1,93 @@
|
|
|
+import cv2
|
|
|
+
|
|
|
+mark_color = (150, 250, 250)
|
|
|
+mark_thick = 2
|
|
|
+
|
|
|
+bbox_color = (0, 0, 255)
|
|
|
+bbox_thick = 2
|
|
|
+
|
|
|
+track_mode = False
|
|
|
+height = 0
|
|
|
+width = 0
|
|
|
+channels = 0
|
|
|
+
|
|
|
+def createTreker():
|
|
|
+ return cv2.TrackerMIL_create() # метод Multiple Instance Learning
|
|
|
+ #return cv2.legacy.TrackerMOSSE_create() # Minimum Output Sum of Squared Error
|
|
|
+ #return cv2.TrackerGOTURN_create() # Neural network
|
|
|
+
|
|
|
+def drawMark():
|
|
|
+ cv2.line(img, (bbox_x, bbox_y), (int(bbox_x+bbox_w/4), bbox_y), mark_color, mark_thick)
|
|
|
+ cv2.line(img, (bbox_x+bbox_w, bbox_y), (int(bbox_x+bbox_w-bbox_w/4), bbox_y), mark_color, mark_thick)
|
|
|
+
|
|
|
+ cv2.line(img, (bbox_x, bbox_y+bbox_h), (int(bbox_x+bbox_w/4), bbox_y+bbox_h), mark_color, mark_thick)
|
|
|
+ cv2.line(img, (bbox_x+bbox_w, bbox_y+bbox_h), (int(bbox_x+bbox_w-bbox_w/4), bbox_y+bbox_h), mark_color, mark_thick)
|
|
|
+
|
|
|
+ cv2.line(img, (bbox_x, bbox_y), (bbox_x, int(bbox_y+bbox_h/4)), mark_color, mark_thick)
|
|
|
+ cv2.line(img, (bbox_x+bbox_w, bbox_y), (bbox_x+bbox_w, int(bbox_y+bbox_h/4)), mark_color, mark_thick)
|
|
|
+
|
|
|
+ cv2.line(img, (bbox_x, bbox_y+bbox_h), (bbox_x, int(bbox_y+bbox_h-bbox_h/4)), mark_color, mark_thick)
|
|
|
+ cv2.line(img, (bbox_x+bbox_w, bbox_y+bbox_h), (bbox_x+bbox_w, int(bbox_y+bbox_h-bbox_h/4)), mark_color, mark_thick)
|
|
|
+
|
|
|
+def drawBox():
|
|
|
+ cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3])), bbox_color, bbox_thick, 1)
|
|
|
+
|
|
|
+def doAction():
|
|
|
+ objX = int(bbox[0] + bbox[2]/2)
|
|
|
+ objY = int(bbox[1] + bbox[3]/2)
|
|
|
+ startX = int(width/2)
|
|
|
+ startY = int(height/2)
|
|
|
+ cv2.line(img, (startX, startY), (objX, startY), (0,255,0), 2)
|
|
|
+ cv2.line(img, (startX, startY), (startX, objY), (0,255,0), 2)
|
|
|
+
|
|
|
+
|
|
|
+tracker = createTreker()
|
|
|
+
|
|
|
+cap = cv2.VideoCapture(0)
|
|
|
+
|
|
|
+#frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
|
|
+#frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
|
|
+
|
|
|
+success, img = cap.read()
|
|
|
+if success:
|
|
|
+ height, width, channels = img.shape
|
|
|
+ bbox_w = int(width/5)
|
|
|
+ bbox_x = int(width/2 - bbox_w/2)
|
|
|
+ bbox_h = int(height/5)
|
|
|
+ bbox_y = int(height/2 - bbox_h/2)
|
|
|
+ bbox = (bbox_x, bbox_y, bbox_w, bbox_h)
|
|
|
+
|
|
|
+while True:
|
|
|
+ timer = cv2.getTickCount()
|
|
|
+
|
|
|
+ success, img = cap.read()
|
|
|
+ if not success:
|
|
|
+ break
|
|
|
+
|
|
|
+ drawMark()
|
|
|
+
|
|
|
+ if track_mode == True:
|
|
|
+ success, bbox = tracker.update(img)
|
|
|
+ if success:
|
|
|
+ drawBox()
|
|
|
+ doAction()
|
|
|
+ else:
|
|
|
+ cv2.putText(img, "Lost", (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
|
|
|
+
|
|
|
+ fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
|
|
|
+ cv2.putText(img, str(int(fps)), (5, 25), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)
|
|
|
+
|
|
|
+ cv2.imshow("Tracking", img)
|
|
|
+
|
|
|
+ key = cv2.waitKey(1)
|
|
|
+ if key & 0xff == ord(' '):
|
|
|
+ track_mode = not track_mode
|
|
|
+ if track_mode == True:
|
|
|
+ bbox=(bbox_x, bbox_y, bbox_w, bbox_h)
|
|
|
+ tracker = createTreker()
|
|
|
+ tracker.init(img, bbox)
|
|
|
+ if key & 0xff == ord('q'):
|
|
|
+ break
|
|
|
+
|
|
|
+cap.release()
|
|
|
+cv2.destroyAllWindows()
|