ZED Camera, Single ArUco Marker Pose Detection - Carleton-SRCL/SPOT GitHub Wiki

stereovision.py

Click here for raw code
      #Code for pose detection in pre-recorded video, starting form pose detection of image code, in progress
  #reference https://pyimagesearch.com/2020/12/21/detecting-aruco-markers-with-opencv-and-python/
  
  # import the necessary packages
  from imutils.video import VideoStream
  import argparse
  import imutils
  import time
  import cv2
  import sys
  import numpy as np
  
  #variables that are hardcoded
  DICT_USED = "DICT_4X4_1000"
  OUTPUT_IMAGE_WIDTH = 600
  calibration_data_direc = r'/calibration_matrix.npy'
  distortion_data_direc = r'/distortion_coefficients.npy'
  
  
  # define names of each possible ArUco tag OpenCV supports
  ARUCO_DICT = {
      "DICT_4X4_50": cv2.aruco.DICT_4X4_50,
      "DICT_4X4_100": cv2.aruco.DICT_4X4_100,
      "DICT_4X4_250": cv2.aruco.DICT_4X4_250,
      "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
      "DICT_5X5_50": cv2.aruco.DICT_5X5_50,
      "DICT_5X5_100": cv2.aruco.DICT_5X5_100,
      "DICT_5X5_250": cv2.aruco.DICT_5X5_250,
      "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
      "DICT_6X6_50": cv2.aruco.DICT_6X6_50,
      "DICT_6X6_100": cv2.aruco.DICT_6X6_100,
      "DICT_6X6_250": cv2.aruco.DICT_6X6_250,
      "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
      "DICT_7X7_50": cv2.aruco.DICT_7X7_50,
      "DICT_7X7_100": cv2.aruco.DICT_7X7_100,
      "DICT_7X7_250": cv2.aruco.DICT_7X7_250,
      "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
      "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
      "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
      "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
      "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
      "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
  }
  
  def pose_esitmation(image ,corners, ids, matrix_coefficients_path, distortion_coefficients_path):
      k = np.load(matrix_coefficients_path) # load in matrices 
      d = np.load(distortion_coefficients_path)
      if len(corners) > 0:
          for i in range(0, len(ids)):
              # estimate pose of markers and retun rvec and tvec for ref
              rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], 0.02, k,d)
              cv2.aruco.drawDetectedMarkers(image, corners) 
              cv2.drawFrameAxes(image, k, d, rvec, tvec, 0.01) 
      return image
  
  # verify that the supplied ArUCo tag exists and is supported by
  # OpenCV
  if ARUCO_DICT.get(DICT_USED) is None:
      print("[INFO] ArUCo tag of '{}' is not supported".format(DICT_USED))
      sys.exit(0)
  # load the ArUCo dictionary and grab the ArUCo parameters
  print("[INFO] detecting '{}' tags...".format(DICT_USED))
  arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT.get(DICT_USED))
  arucoParams = cv2.aruco.EstimateParameters()
  
  
  #initialize the video stream and allow the camera sensor to warm up
  print("[INFO] starting video stream...")
  # vs = VideoStream(src=0).start()
  time.sleep(2.0)
  
  # Camera parameters to undistort and rectify images
  cv_file = cv2.FileStorage()
  cv_file.open('stereoMap.xml', cv2.FileStorage_READ)
  stereoMapL_x = cv_file.getNode('stereoMapL_x').mat()
  stereoMapL_y = cv_file.getNode('stereoMapL_y').mat()
  stereoMapR_x = cv_file.getNode('stereoMapR_x').mat()
  stereoMapR_y = cv_file.getNode('stereoMapR_y').mat()
  
  # Import other distortion matrices
  R_calibration_data_direc = "R_calibration_matrix.npy"
  L_calibration_data_direc = "L_calibration_matrix.npy"
  R_distortion_data_direc  = "R_distortion_matrix.npy"
  L_distortion_data_direc  = "L_distortion_matrix.npy"
  
  # Open Stereo camera
  # cap_Ster = VideoStream(src=0).start()
  cap_Ster = cv2.VideoCapture(-1, cv2.CAP_DSHOW)
  cap_Ster.set(cv2.CAP_PROP_FPS, 60)
  cfps = int(cap_Ster.get(cv2.CAP_PROP_FPS))
  time.sleep(2.0)
  if not cap_Ster.isOpened():
      print("Cannot open camera")
      exit()
  
  # loop over the frames from the video stream
  while True:
      # grab frames from each
      # either split then read or read then split
      _, frameFull = cap_Ster.read()
      # frameFull = cap_Ster.read()
      _, width, _ = frameFull.shape
      width_cutoff = width // 2
      frame_right = frameFull[:, width_cutoff:]
      frame_left = frameFull[:, :width_cutoff]
      # _, frame_right = cap_right.read()
      # _, frame_left = cap_left.read()
      # Undistort and rectify images
      frame_right = cv2.remap(frame_right, stereoMapR_x, stereoMapR_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
      frame_left = cv2.remap(frame_left, stereoMapL_x, stereoMapL_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
  
      # detect ArUco markers in each input frame
      (corners_R, ids_R, rejected_R) = cv2.aruco.detectMarkers(frame_right, arucoDict, parameters=cv2.aruco.DetectorParameters())
      (corners_L, ids_L, rejected_L) = cv2.aruco.detectMarkers(frame_left, arucoDict, parameters=cv2.aruco.DetectorParameters())
      # detect pose in each frame
      frame_right  = pose_esitmation(frame_right ,corners_R, ids_R,R_calibration_data_direc, R_distortion_data_direc)
      frame_left  = pose_esitmation(frame_right ,corners_L, ids_L,L_calibration_data_direc, L_distortion_data_direc)
      # show the output frame
      cv2.imshow("Frame", frame_right)
      cv2.imshow("Frame", frame_left)
      key = cv2.waitKey(1) & 0xFF
      # if the `q` key was pressed, break from the loop
      if key == ord("q"):
          break
  
  # do a bit of cleanup
  cap_Ster.release()
  cv2.destroyAllWindows()
  cap_Ster.stop()
  
  
  # # Camera parameters to undistort and rectify images
  # cv_file = cv2.FileStorage()
  # cv_file.open('stereoMap.xml', cv2.FileStorage_READ)
  
  # stereoMapL_x = cv_file.getNode('stereoMapL_x').mat()
  # stereoMapL_y = cv_file.getNode('stereoMapL_y').mat()
  # stereoMapR_x = cv_file.getNode('stereoMapR_x').mat()
  # stereoMapR_y = cv_file.getNode('stereoMapR_y').mat()
  
  
  # # Open both cameras
  # cap_right = cv2.VideoCapture(2, cv2.CAP_DSHOW)
  # cap_left = cv2.VideoCapture(0, cv2.CAP_DSHOW)
  
  
  # while(cap_right.isOpened() and cap_left.isOpened()):
  
  #     succes_right, frame_right = cap_right.read()
  #     succes_left, frame_left = cap_left.read()
  
  #     # Undistort and rectify images
  #     frame_right = cv2.remap(frame_right, stereoMapR_x, stereoMapR_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
  #     frame_left = cv2.remap(frame_left, stereoMapL_x, stereoMapL_y, cv2.INTER_LANCZOS4, cv2.BORDER_CONSTANT, 0)
  
  #     # Show the frames
  #     cv2.imshow("frame right", frame_right)
  #     cv2.imshow("frame left", frame_left)
  
  
  #     # Hit "q" to close the window
  #     if cv2.waitKey(1) & 0xFF == ord('q'):
  #         break
  
  
  # # Release and destroy all windows before termination
  # cap_right.release()
  # cap_left.release()
  
  # cv2.destroyAllWindows()

Purpose

  • This functions superimposes the detected marker and marker pose on the input stereo camera feed.

Inputs

  • R_calibration_data_direc = "R_calibration_matrix.npy"

  • L_calibration_data_direc = "L_calibration_matrix.npy"

  • R_distortion_data_direc = "R_distortion_matrix.npy"

  • L_distortion_data_direc = "L_distortion_matrix.npy"

  • DICT_USED = "DICT_4X4_50"

    • The dictionary used for ArUco marker generation.

Outputs

  • A live feed with the superimposed detected pose and marker outline for both the left and right cameras (stacked: right on top, left on bottom).

Click here to go BACK

⚠️ **GitHub.com Fallback** ⚠️