ZED Camera Finalized ArUco Pose Estimation - Carleton-SRCL/SPOT GitHub Wiki

MULTI_AR_VIS_BASE_WO.py

Click here for raw code
  import cv2 as cv
  from cv2 import aruco
  import numpy as np
  import math
  import struct
  import socket
  import ODC_SS
  
  
  """
  This version (Version 6) showcases the OPTIMIZED complete detection and pose determination of aruco markers of different sizes
  
      - Austin Dahlseide
  """
  
  sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
  server_address = ('192.168.1.110',30172)
  
  
  calib_data_path = (r"Big")
  L_cam_mat = np.load(calib_data_path + '/Left_calibration_matrix.npy')
  R_cam_mat = np.load(calib_data_path + '/Right_calibration_matrix.npy')
  L_dist_coef = np.load(calib_data_path + '/Left_distortion_coefficients.npy')
  R_dist_coef = np.load(calib_data_path + '/Right_distortion_coefficients.npy')
  r_vectors = np.load(calib_data_path +'/RotationVector_b.npy')
  t_vectors = np.load(calib_data_path +'/TranslationVector_b.npy')
  
  marker_dict = aruco.getPredefinedDictionary(aruco.DICT_4X4_50)
  
  param_markers = aruco.EstimateParameters()
  
  # Dictionary to Store Local Position Vectors For Each Chosen Marker ID
  Local_marker_positions = {
      1: {"R_local": np.array([-9.0125, -7.25, -15.0], dtype=np.float32), "Y_4": 7.9, "Face": 4},
      2: {"R_local": np.array([-9.1625, 7.55, -15.0], dtype=np.float32), "Y_4": 22.7, "Face": 4},
      3: {"R_local": np.array([9.1375, -7.20, -15.0], dtype=np.float32), "Y_4": 7.95, "Face": 4},
      4: {"R_local": np.array([8.5875, 7.45, -15.0], dtype=np.float32), "Y_4": 22.6, "Face": 4},
      5: {"R_local": np.array([8.4375, -2.95, -15.0], dtype=np.float32), "Y_4": 4.2, "Face": 1},
      6: {"R_local": np.array([-1.6625, 11.35, -15.0], dtype=np.float32), "Y_4": 26.5, "Face": 1},
      7: {"R_local": np.array([8.1375, -10.55, -15.0], dtype=np.float32), "Y_4": 4.6, "Face": 1},
      8: {"R_local": np.array([-8.9125, -10.35, -15.0], dtype=np.float32), "Y_4": 4.8, "Face": 2},
      9: {"R_local": np.array([-8.8125, 7.95, -15.0], dtype=np.float32), "Y_4": 23.1, "Face": 2},
      10: {"R_local": np.array([7.7275, -10.45, -15.0], dtype=np.float32), "Y_4": 4.7, "Face": 2},
      11: {"R_local": np.array([9.2275, 7.45, -15.0], dtype=np.float32), "Y_4": 22.6, "Face": 2},
      13: {"R_local": np.array([6.0375, -7.5, -15.0], dtype=np.float32), "Y_4": 7.65, "Face": 3},
      14: {"R_local": np.array([-9.1025, 7.45, -15.0], dtype=np.float32), "Y_4": 22.6, "Face": 3},
      15: {"R_local": np.array([-7.5125, -7.45, -15.0], dtype=np.float32), "Y_4": 7.7, "Face": 3},
      21: {"R_local": np.array([-9.8625, 12.8, -15.0], dtype=np.float32), "Y_4": 2.35, "Face": 1},
      22: {"R_local": np.array([-5.9625, 12.75, -15.0], dtype=np.float32), "Y_4": 2.4, "Face": 1},
      23: {"R_local": np.array([-2.0625, 12.75, -15.0], dtype=np.float32), "Y_4": 2.4, "Face": 1},
      24: {"R_local": np.array([0.7375, 4.75, -15.0], dtype=np.float32), "Y_4": 10.4, "Face": 1},
      25: {"R_local": np.array([0.7875, 0.9, -15.0], dtype=np.float32), "Y_4": 14.25, "Face": 1},}  # Format for variables ----> ID#: {"R_local": np.array([rLx, rLy, rLz], dtype=np.float32), "Y_4": 5.0},
  
  # ID 12 is NOT used.
  
  # Define a Dictionary With Marker Sizes Corresponding to Marker IDs
  marker_sizes = {
      1: {"Marker_Size": 6.8},
      2: {"Marker_Size": 6.8},
      3: {"Marker_Size": 6.8},
      4: {"Marker_Size": 6.8},
      5: {"Marker_Size": 6.8},
      6: {"Marker_Size": 6.8},
      7: {"Marker_Size": 6.8},
      8: {"Marker_Size": 6.8},
      9: {"Marker_Size": 6.8},
      10: {"Marker_Size": 6.8},
      11: {"Marker_Size": 6.8},
      13: {"Marker_Size": 6.8},
      14: {"Marker_Size": 6.8},
      15: {"Marker_Size": 6.8},
      21: {"Marker_Size": 3.5},
      22: {"Marker_Size": 3.5},
      23: {"Marker_Size": 3.5},
      24: {"Marker_Size": 3.5},
      25: {"Marker_Size": 3.5},}
  
  """
  # ***IMPORTANT*** Confirm position of ZED camera RELATIVE to Centre of Mass of Chaser Platform
  """
  Z_1 = 14.6   # Translation Distance in the Z-direction From Centre of Chaser Platform to Centre of ZED Camera (in [cm])
  Y_0 = 3.5    # Distance From Top Surface of Chaser Platform to Centre of ZED Camera (in [cm])
  
  """
  THE DISTANCE THESE ARE APART ARE CORRECT. THEIR POSITION IS ****NOT****
  """
  X_1_R = 9.2125  # Translation Distance in the X-direction From Centre of Chaser Platform to Right Lens of ZED Camera (in [cm])
  X_1_L = -2.7375 # Translation Distance in the X-direction From Centre of Chaser Platform to Left Lens of ZED Camera (in [cm])
  
  def calculate_rotation_matrix(pitch_marker):
      C_2_theta = np.array([
          [math.cos(pitch_marker), 0, -1 * math.sin(pitch_marker)],
          [0, 1, 0],
          [math.sin(pitch_marker), 0, math.cos(pitch_marker)]
      ], dtype=np.float32)
      return C_2_theta
  
  def calculate_marker_position(marker_id, R_flip, rVec, tVec, i, is_left_frame):
      if marker_id not in Local_marker_positions: # Skip processing for markers not in the Local_marker_positions dictionary
          return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan  
      
      R_ct = np.matrix(cv.Rodrigues(rVec[0])[0])
      R_tc = R_ct.T
  
      roll_marker, pitch_marker, yaw_marker = rotationMatrixToEulerAngles(R_flip * R_tc)
  
      roll_deg = math.degrees(roll_marker)
      pitch_deg = math.degrees(pitch_marker)
      yaw_deg = math.degrees(yaw_marker)
  
      # Calculating the distance
      distance = np.linalg.norm(tVec[0])
  
      # Retrieve the data dictionary from the main dictionary
      marker_data = Local_marker_positions.get(marker_id, {"R_local": np.zeros(3), "Y_4": 0.0, "Face": 0})  # Calls Marker Position Vector from Dictionary
      R_local = marker_data["R_local"] # Position of the Marker in the Local Coordinate System (Origin at the center of the target platform)
      Y_4 = marker_data["Y_4"] # Distance From Top Surface of the Target Platform to Centre of the ArUco Marker (in [cm])
      Face = marker_data["Face"]
  
      # Calculate rotation matrix based on the pitch angle
      C_2_theta = calculate_rotation_matrix(pitch_marker) # Euler angle (in this case Pitch) is given in [rad]
  
      # Additional calculations
      Y_2 = Y_4 - Y_0 # Distance From the Centre of the ZED Camera to the Centre of the ArUco Marker (in [cm])
      Z_y = math.sqrt((distance * distance) - (Y_2 * Y_2)) # Projection of Marker Distance to ZED Camera From the Side View (y-z) to the Top View (x-z)
  
      X_2 = tVec[0][0][0] # Translation Distance in the X-direction of the Marker Relative to the Centre of the ZED Camera (in [cm])
      Z_2 = math.sqrt((Z_y * Z_y) - (X_2 * X_2)) # Projection of Marker Distance to ZED Camera From the Side View (y-z) to the Top View (x-z)
  
      if is_left_frame:
          X_1 = X_1_L
      else:
          X_1 = X_1_R
  
      R_global = np.dot(C_2_theta, R_local) # Converts From Local Reference Frame to Global Reference Frame
      X_3 = -R_global[0] # Translation Distance in X-direction of the Marker Relative to the Centre of the Target Platform in the Global Frame (in [cm])
      Z_3 = -R_global[2] # Translation Distance in Z-direction of the Marker Relative to the Centre of the Target Platform in the Global Frame (in [cm])
  
      Z_abs = (Z_1 + Z_2 + Z_3) # Displacement in the Z-direction of the Centre of the Target Platform Relative to the Centre of the Chaser Platform (in [cm])
      X_abs = (X_1 + X_2 + X_3) # Displacement in the X-direction of the Centre of the Target Platform Relative to the Centre of the Chaser Platform (in [cm])
  
      R_abs = math.sqrt((Z_abs * Z_abs) + (X_abs * X_abs)) # Distance Between the Centre of the Target Platform and the Centre of the Chaser Platform (in [cm])
  
      # Get the Global Orientation of the Target Platform
      Yaw = pitch_deg
      if Face == 1:
          Yaw += Face_1
      elif Face == 2:
          Yaw += Face_2
      elif Face == 3:
          Yaw += Face_3
      elif Face == 4:
          Yaw += Face_4
  
      # Ignore data from 4.0 cm markers when R_abs is greater than or equal to 70 cm
      if marker_sizes.get(marker_id, {"Marker_Size": 0.0})["Marker_Size"] == 3.5 and R_abs >= 100:
          return np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan
  
      return roll_deg, pitch_deg, yaw_deg, distance, R_local, C_2_theta, Z_y, X_2, Z_2, R_global, X_3, Z_3, Z_abs, X_abs, R_abs, Yaw
  
  def isRotationMatrix(R):
      Rt = np.transpose(R)
      shouldBeIdentity = np.dot(Rt, R)
      I = np.identity(3, dtype=R.dtype)
      n = np.linalg.norm(I - shouldBeIdentity)
      return n < 1e-6
  
  # Calculates rotation matrix to euler angles (x and z are swapped).
  def rotationMatrixToEulerAngles(R):
      assert (isRotationMatrix(R))
  
      sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0])
  
      singular = sy < 1e-6
  
      if not singular:
          x = math.atan2(R[2, 1], R[2, 2])
          y = math.atan2(-R[2, 0], sy)
          z = math.atan2(R[1, 0], R[0, 0])
      else:
          x = math.atan2(-R[1, 2], R[1, 1])
          y = math.atan2(-R[2, 0], sy)
          z = 0
  
      return np.array([x, y, z])
  
  def DnD(marker_sizes, marker_corners, marker_IDs, frame, cam_mat, dist_coef, rVec, tVec, roll_dict, pitch_dict, yaw_dict):
      
      for i, ids in enumerate(marker_IDs):
          # Detect and Draw
          marker_size_data = marker_sizes.get(ids[0], {"Marker_Size": 0.0})
          MARKER_SIZE = marker_size_data["Marker_Size"]
  
          if MARKER_SIZE <= 0.0:
              continue  # Skips this iteration
  
          rVec, tVec, _ = aruco.estimatePoseSingleMarkers(marker_corners[i], MARKER_SIZE, cam_mat, dist_coef)
  
          # Append the rotation vector of each marker to the list
          rvecs_list.extend(rVec)      
  
          #-- Obtain the rotation matrix tag->camera
          R_ct = np.matrix(cv.Rodrigues(rVec[0])[0])
          R_tc = R_ct.T
  
          #-- Get the attitude in terms of euler 321 (Needs to be flipped first)
          roll_marker, pitch_marker, yaw_marker = rotationMatrixToEulerAngles(R_flip * R_tc)
  
          # Call the function for each marker in the right frame
          _, _, _, _, _, _, _, _, _, _, _, _, Z_abs, X_abs, R_abs, Yaw = calculate_marker_position(
              ids[0], R_flip, rVec, tVec, i, is_left_frame=False)
          
          # Call the function for each marker in the left frame
          _, _, _, _, _, _, _, _, _, _, _, _, Z_abs, X_abs, R_abs, Yaw = calculate_marker_position(
              ids[0], R_flip, rVec, tVec, i, is_left_frame=True)
  
          # Store Euler angles in dictionaries based on marker ID
          roll_dict[ids[0]] = math.degrees(roll_marker)
          pitch_dict[ids[0]] = math.degrees(pitch_marker)
          yaw_dict[ids[0]] = math.degrees(yaw_marker)
  
          print("Marker ID:",MARKER_SIZE ,"C-C: ", R_abs, "Yaw: ", Yaw)
  
      return frame, roll_dict, pitch_dict, yaw_dict, rVec, tVec
  
  #--- 180 deg rotation matrix around the x axis
  R_flip  = np.zeros((3,3), dtype=np.float32)
  R_flip[0,0] = 1.0
  R_flip[1,1] =-1.0
  R_flip[2,2] =-1.0
  
  # Initialize an Empty List to Store the Path Planning Required Data
  PPL_list = []
  
  # Initialize empty dictionaries for Euler Angles & Centre-to-Centre Distance
  roll_dict_R = {}
  pitch_dict_R = {}
  yaw_dict_R = {}
  R_abs_dict_R = {}
  
  roll_dict_L = {}
  pitch_dict_L = {}
  yaw_dict_L = {}
  R_abs_dict_L = {}
  
  # Initialize an empty list to store the rotation vectors 
  rvecs_list =[]
  
  # Angles to Add to Each Face
  Face_1 = 180
  Face_2 = 90
  Face_3 = 0
  Face_4 = 270
  
  
  
  
  
  
  
  cap = cv.VideoCapture(0)
  cap.set(cv.CAP_PROP_FRAME_WIDTH, 4416)
  cap.set(cv.CAP_PROP_FRAME_HEIGHT, 1242)
  cap.set(cv.CAP_PROP_FPS, 15)           # Set FPS Rate
  
  show_images = False
  show_final_images = False
  save_image = False
  
  index = 0
  
  while True:
      ret, Fullframe = cap.read()
      if not ret:
          break # check if imported correctly
  
      x_shae = np.nan
      z_shae = np.nan
      index = index + 1
      print(index)
      if index == 10:
          imgL, imgR = ODC_SS.split_image(Fullframe)
          img_und_R = cv.undistort(imgR, R_cam_mat, R_dist_coef)
          img_und_L = cv.undistort(imgL, L_cam_mat, L_dist_coef)
          Fullframe = np.concatenate((img_und_L, img_und_R), axis=1)
  
          position_of_obstacle = ODC_SS.run_code(Fullframe, calib_data_path, show_images, show_final_images,
                                                        save_image)
          print("Position of Obstacle",position_of_obstacle)
          x_shae, z_shae = position_of_obstacle
          index = 0
  
  
      # Initialize Empty Lists for Distances in the Left and Right Frames
      distances_left = []
      distances_right = []
  
      # Initialize Empty Lists to Store Global Yaw Values for Both Left and Right Frames
      yaw_values_left = []
      yaw_values_right = []
  
      # Initialize Empty Lists to Store Global X-Y Values for Both Left and Right Frames
      X_left = []
      X_right = []
      Y_left = []
      Y_right = []
  
      h,w,_ = Fullframe.shape # Split inputs into left and right
      w_c = w // 2
      frame_left = Fullframe[0:h, 0:w_c]
      frame_right = Fullframe[0:h, w_c:w]
      R_gray_frame = cv.cvtColor(frame_right, cv.COLOR_BGR2GRAY)
      L_gray_frame = cv.cvtColor(frame_left, cv.COLOR_BGR2GRAY)
      L_marker_corners, L_marker_IDs, reject = aruco.detectMarkers(L_gray_frame, marker_dict, parameters=aruco.DetectorParameters())
      R_marker_corners, R_marker_IDs, reject = aruco.detectMarkers(R_gray_frame, marker_dict, parameters=aruco.DetectorParameters())
  
      # Ensures that rVec and tVec are Defined Outside the Loop
      rVec_L = None
      tVec_L = None
      rVec_R = None
      tVec_R = None
  
      for marker_id in roll_dict_L.keys():
          roll_dict_L[marker_id] = None
          pitch_dict_L[marker_id] = None
          yaw_dict_L[marker_id] = None
          R_abs_dict_L[marker_id] = None
      for marker_id in roll_dict_R.keys():
          roll_dict_R[marker_id] = None
          pitch_dict_R[marker_id] = None
          yaw_dict_R[marker_id] = None
          R_abs_dict_R[marker_id] = None
  
      # Split into left and right frames and process. Detecting markers in each frame then combining them into a single
      # Reset Euler angles for all marker IDs to None at the beginning of each iteration
  
      # frame for display
      if L_marker_corners:
          L_img, roll_dict_L, pitch_dict_L, yaw_dict_L, rVec_L, tVec_L = DnD(marker_sizes, L_marker_corners, L_marker_IDs, frame_left, L_cam_mat, L_dist_coef, r_vectors, t_vectors, roll_dict_L, pitch_dict_L, yaw_dict_L)
  
          # Calculate and Append the Calculated Centre to Centre Distance and Global Yaw Angle to the List for Left Frame
          for i, ids in enumerate(L_marker_IDs):
              roll_deg, pitch_deg, yaw_deg, distance, _, _, _, _, _, _, _, _, Z_abs, X_abs, R_abs, Yaw = calculate_marker_position(
                  ids[0], R_flip, rVec_L, tVec_L, i, is_left_frame=True)
              distances_left.append(R_abs)
              yaw_values_left.append(Yaw)
              X_left.append(X_abs)
              Y_left.append(Z_abs) # Actually "Z," but for purposes of path planning data labelling it has been changed to Y
  
      else:
          L_img = frame_left
  
      if R_marker_corners:
          R_img, roll_dict_R, pitch_dict_R, yaw_dict_R, rVec_R, tVec_R = DnD(marker_sizes, R_marker_corners, R_marker_IDs, frame_right, R_cam_mat, R_dist_coef, r_vectors, t_vectors, roll_dict_R, pitch_dict_R, yaw_dict_R)
  
          # Calculate and Append the Calculated Centre to Centre Distance and Global Yaw Angle to the List for Right Frame
          for i, ids in enumerate(R_marker_IDs):
              roll_deg, pitch_deg, yaw_deg, distance, _, _, _, _, _, _, _, _, Z_abs, X_abs, R_abs, Yaw = calculate_marker_position(
                  ids[0], R_flip, rVec_R, tVec_R, i, is_left_frame=False)
              distances_right.append(R_abs)
              yaw_values_right.append(Yaw)
              X_right.append(X_abs)
              Y_right.append(Z_abs) # Actually "Z," but for purposes of path planning data labelling it has been changed to Y
  
      else:
          R_img = frame_right
  
      # Average the Yaw Values of All Detected Markers For Each ZED Camera Frame
      average_yaw_left = np.nanmean(yaw_values_left)
      average_yaw_right = np.nanmean(yaw_values_right)
  
      # Average the X and Z Distance Values of All Detected Markers For Each ZED Camera Frame
      avg_X_left = np.nanmean(X_left)
      avg_X_right = np.nanmean(X_right)
      avg_Y_left = np.nanmean(Y_left)
      avg_Y_right = np.nanmean(Y_right)
  
      # Average the Centre to Centre Distance and Global Yaw Angle Between the Two Frames
      Yaw_Global = np.nanmean([average_yaw_left, average_yaw_right])
      x = np.nanmean([avg_X_left, avg_X_right])
      y = np.nanmean([avg_Y_left, avg_Y_right])
  
      print("X: ", x, "Y: ", y, "Yaw: ", Yaw_Global)
  
      # Append data to the list
      PPL_list.append([x, y, Yaw_Global])
  
      try:
          yaw = Yaw_Global
          x_dist = x
          y_dist = y
          data = bytearray(struct.pack("fffff",x_dist,y_dist,yaw, x_shae, z_shae))
  
          sock.sendto(data,server_address)
          print("great success")
  
      except:
          print("no bueno")
  
      # stack side by side
      Hori = np.concatenate((L_img, R_img), axis=1)
      #cv.imshow('HORIZONTAL', Hori)
      key = cv.waitKey(1)
      if key == ord("q") or key == ord('Q'):
          break
  cap.release()
  cv.destroyAllWindows()
      
  
  # show_images = True
  # show_final_images = True
  #
  # img_path = r'C:\Users\shaes\Desktop\MAAE_4907\ryan measurments\Measure_11.jpg'
  #
  # Fullframe = cv.imread(img_path)
  #
  # cv.imshow("",ODC_SS.resize_for_display(Fullframe, 34))
  # cv.waitKey(0)
  # cv.destroyAllWindows()
  #
  # x_shae = np.nan
  # z_shae = np.nan
  # # index = index + 1
  # # index = 10
  # # print(index)
  # # if index == 10:
  #
  # # print("index is ", index)
  # # cv.waitKey(1)
  # # print(x_shae, z_shae)
  #
  # l_cam_mat = np.load(calib_data_path + '/Left_calibration_matrix.npy')
  # l_dist_coef = np.load(calib_data_path + '/Left_distortion_coefficients.npy')
  # r_cam_mat = np.load(calib_data_path + '/Right_calibration_matrix.npy')
  # r_dist_coef = np.load(calib_data_path + '/Right_distortion_coefficients.npy')
  #
  # imgL, imgR = ODC_SS.split_image(Fullframe)
  # # imgR = cv.resize(imgR, (1280, 720))
  # img_und_R = cv.undistort(imgR, r_cam_mat, r_dist_coef)
  # # imgL = cv.resize(imgL, (1280, 720))
  # img_und_L = cv.undistort(imgL, l_cam_mat, l_dist_coef)
  # Fullframe = np.concatenate((img_und_L, img_und_R), axis=1)
  #
  # position_of_obstacle = ODC_SS.run_code_no_cal(Fullframe, show_images, show_final_images)
  # print("Position of Obstacle", position_of_obstacle)
  # x_shae, z_shae = position_of_obstacle
  # # index = 0
  #
  # # Initialize Empty Lists for Distances in the Left and Right Frames
  # distances_left = []
  # distances_right = []
  #
  # # Initialize Empty Lists to Store Global Yaw Values for Both Left and Right Frames
  # yaw_values_left = []
  # yaw_values_right = []
  #
  # # Initialize Empty Lists to Store Global X-Y Values for Both Left and Right Frames
  # X_left = []
  # X_right = []
  # Y_left = []
  # Y_right = []
  #
  # h, w, _ = Fullframe.shape  # Split inputs into left and right
  # w_c = w // 2
  # frame_left = Fullframe[0:h, 0:w_c]
  # frame_right = Fullframe[0:h, w_c:w]
  # R_gray_frame = cv.cvtColor(frame_right, cv.COLOR_BGR2GRAY)
  # L_gray_frame = cv.cvtColor(frame_left, cv.COLOR_BGR2GRAY)
  # L_marker_corners, L_marker_IDs, reject = aruco.detectMarkers(L_gray_frame, marker_dict, parameters=aruco.DetectorParameters())
  # R_marker_corners, R_marker_IDs, reject = aruco.detectMarkers(R_gray_frame, marker_dict, parameters=aruco.DetectorParameters())
  #
  # # Ensures that rVec and tVec are Defined Outside the Loop
  # rVec_L = None
  # tVec_L = None
  # rVec_R = None
  # tVec_R = None
  #
  # for marker_id in roll_dict_L.keys():
  #     roll_dict_L[marker_id] = None
  #     pitch_dict_L[marker_id] = None
  #     yaw_dict_L[marker_id] = None
  #     R_abs_dict_L[marker_id] = None
  # for marker_id in roll_dict_R.keys():
  #     roll_dict_R[marker_id] = None
  #     pitch_dict_R[marker_id] = None
  #     yaw_dict_R[marker_id] = None
  #     R_abs_dict_R[marker_id] = None
  #
  # # Split into left and right frames and process. Detecting markers in each frame then combining them into a single
  # # Reset Euler angles for all marker IDs to None at the beginning of each iteration
  #
  # # frame for display
  # if L_marker_corners:
  #     L_img, roll_dict_L, pitch_dict_L, yaw_dict_L, rVec_L, tVec_L = DnD(marker_sizes, L_marker_corners, L_marker_IDs, frame_left, L_cam_mat, L_dist_coef, r_vectors, t_vectors, roll_dict_L, pitch_dict_L, yaw_dict_L)
  #
  #     # Calculate and Append the Calculated Centre to Centre Distance and Global Yaw Angle to the List for Left Frame
  #     for i, ids in enumerate(L_marker_IDs):
  #         roll_deg, pitch_deg, yaw_deg, distance, _, _, _, _, _, _, _, _, Z_abs, X_abs, R_abs, Yaw = calculate_marker_position(
  #             ids[0], R_flip, rVec_L, tVec_L, i, is_left_frame=True)
  #         distances_left.append(R_abs)
  #         yaw_values_left.append(Yaw)
  #         X_left.append(X_abs)
  #         Y_left.append(
  #             Z_abs)  # Actually "Z," but for purposes of path planning data labelling it has been changed to Y
  #
  # else:
  #     L_img = frame_left
  #
  # if R_marker_corners:
  #     R_img, roll_dict_R, pitch_dict_R, yaw_dict_R, rVec_R, tVec_R = DnD(marker_sizes, R_marker_corners, R_marker_IDs, frame_right, R_cam_mat, R_dist_coef, r_vectors, t_vectors, roll_dict_R, pitch_dict_R, yaw_dict_R)
  #
  #     # Calculate and Append the Calculated Centre to Centre Distance and Global Yaw Angle to the List for Right Frame
  #     for i, ids in enumerate(R_marker_IDs):
  #         roll_deg, pitch_deg, yaw_deg, distance, _, _, _, _, _, _, _, _, Z_abs, X_abs, R_abs, Yaw = calculate_marker_position(
  #             ids[0], R_flip, rVec_R, tVec_R, i, is_left_frame=False)
  #         distances_right.append(R_abs)
  #         yaw_values_right.append(Yaw)
  #         X_right.append(X_abs)
  #         Y_right.append(
  #             Z_abs)  # Actually "Z," but for purposes of path planning data labelling it has been changed to Y
  #
  # else:
  #     R_img = frame_right
  #
  # # Average the Yaw Values of All Detected Markers For Each ZED Camera Frame
  # average_yaw_left = np.nanmean(yaw_values_left)
  # average_yaw_right = np.nanmean(yaw_values_right)
  #
  # # Average the X and Z Distance Values of All Detected Markers For Each ZED Camera Frame
  # avg_X_left = np.nanmean(X_left)
  # avg_X_right = np.nanmean(X_right)
  # avg_Y_left = np.nanmean(Y_left)
  # avg_Y_right = np.nanmean(Y_right)
  #
  # # Average the Centre to Centre Distance and Global Yaw Angle Between the Two Frames
  # Yaw_Global = np.nanmean([average_yaw_left, average_yaw_right])
  # x = np.nanmean([avg_X_left, avg_X_right])
  # y = np.nanmean([avg_Y_left, avg_Y_right])
  #
  # print("X: ", x, "Y: ", y, "Yaw: ", Yaw_Global)
  #
  # # Append data to the list
  # PPL_list.append([x, y, Yaw_Global])
  #
  # try:
  #     yaw = Yaw_Global
  #     x_dist = x
  #     y_dist = y
  #     data = bytearray(struct.pack("fffff", x_dist, y_dist, yaw, x_shae, z_shae))
  #
  #     sock.sendto(data, server_address)
  #     print("great success")
  #
  # except:
  #     print("no bueno")
  #
  # # stack side by side
  # Hori = np.concatenate((L_img, R_img), axis=1)
  # # cv.imshow('HORIZONTAL', Hori)
  # # key = cv.waitKey(1)
  # # if key == ord("q") or key == ord('Q'):
  # #     break
  # # cap.release()
  # # cv.destroyAllWindows()

Purpose

  • This function uses the stereo camera calibration data to detect the target with ArUco markers, estimate their 3D pose, transform positions to global coordinates, calculate yaw angles, generate path planning data, integrate with MATLAB with UDP communication, and enable real-time processing for dynamic marker detection and pose estimation.

Inputs

  • Calibration Data Paths
    • These are paths to the calibration data files for the left and right cameras.
  • Camera Matrices and Distortion Coefficients
    • This data consists of the left and right camera matrices (L_cam_mat, R_cam_mat), left and right camera distortion coefficients (L_dist_coef, R_dist_coef), and rotation and translation vectors (r_vectors, t_vectors).
  • Rotation and Translation Vector
    • Vectors used for calculating the pose of markers.
  • Marker Dictionary and Estimation Parameters
    • These are the ArUco marker dictionary (marker_dict) and parameters for estimating marker pose (param_markers).
  • Marker Positions and Sizes
    • Local marker positions for each marker ID (Local_marker_positions) and the marker sizes for each marker ID (marker_sizes).

Outputs

  • Detected Target Platform
    • Pose estimation of the target platform using the ArUco markers detected in the camera frames, including their IDs and pose estimates.
  • Visualization
    • Visual representation of the detected markers and their pose estimates overlaid on the camera frames.

Click here to go BACK

⚠️ **GitHub.com Fallback** ⚠️