123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263 |
- from ultralytics import YOLO
- import cv2
- import easyocr
- # Load a model
- model = YOLO('license_plate_detector.pt') # load an official model
- # Open the input video
- cap = cv2.VideoCapture('trafik.mp4')
- reader = easyocr.Reader(['ch_sim','en']) # this needs to run only once to load the model into memory
- bad_boys = ['GXISOGJ','GXIS OGJ', 'GXI5OGJ', 'GXI5 OGJ', 'EYGINBG']
- if not cap.isOpened():
- raise Exception("Error: Could not open video.")
- while cap.isOpened():
- # Read a frame from the input video
- success, frame = cap.read()
- if success:
- results = model.predict(source=frame, conf=0.30, verbose=False)
- for result in results:
- box = result.boxes.xyxy.cpu().numpy().astype(int)
- if (len(box) > 0 ) :
- x1 = box[0][0]
- y1 = box[0][1]
- x2 = box[0][2]
- y2 = box[0][3]
- new_img = frame[y1:y2, x1:x2]
- #new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2GRAY)
- #new_img = cv2.convertScaleAbs(new_img, alpha=1.5, beta=30.0)
- result = reader.readtext(new_img)
- plate_text = ''
- for (bbox, text, prob) in result:
- plate_text += text
- print (plate_text)
- if plate_text in bad_boys:
- color = (0, 0, 255)
- frame = cv2.putText(frame, 'BAD BOY', (x1, y1-50), cv2.FONT_HERSHEY_SIMPLEX, 3.0, color, 10)
- else:
- color = (0, 255, 0)
- frame = cv2.rectangle(frame, (x1, y1), (x2, y2), color, 4)
- frame = cv2.putText(frame, plate_text, (x1, y1-3), cv2.FONT_HERSHEY_SIMPLEX, 1.5, color, 3)
- #cv2.imshow("Plate", new_img)
- frame = cv2.resize(frame, (0, 0), fx = 0.3, fy = 0.3)
- cv2.imshow("License plate detector", frame)
- key = cv2.waitKey(1)
- if key & 0xff == ord('q'):
- break
- cap.release()
- cv2.destroyAllWindows()
|