Single Camera Single ArUco Marker Pose Detection - Carleton-SRCL/SPOT GitHub Wiki
Click here for raw code
#Code for pose detection in pre-recorded video, starting form pose detection of image code, in progress
#reference https://pyimagesearch.com/2020/12/21/detecting-aruco-markers-with-opencv-and-python/
import numpy as np
import utils
import argparse
import cv2
import sys
r"""
(needs to be run inside) (venv) PS C:\Directory
Dictionary type needs to match the ARUCO code size, adjust in both your command and variable DICT_USED below
Sample Command:-
python Pose_Detection.py --image C:\Directory\image.jpg --type DICT_6X6_1000
python Live_Pose_Det.py --type DICT_4X4_50
"""
# import the necessary packages
from imutils.video import VideoStream
import argparse
import imutils
import time
import cv2
import sys
#variables that are hardcoded
DICT_USED = cv2.aruco.DICT_4X4_1000
OUTPUT_IMAGE_WIDTH = 600
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-t", "--type", type=str,
default="DICT_ARUCO_ORIGINAL",
help="type of ArUCo tag to detect")
args = vars(ap.parse_args())
#Code used for image detection
#ap = argparse.ArgumentParser()
#ap.add_argument("-i", "--image", required=True, help="path to input image containing ArUCo tag")
#ap.add_argument("-t", "--type", type=str, default="DICT_ARUCO_ORIGINAL", help="type of ArUCo tag to detect")
#args = vars(ap.parse_args())
# define names of each possible ArUco tag OpenCV supports
ARUCO_DICT = {
"DICT_4X4_50": cv2.aruco.DICT_4X4_50,
"DICT_4X4_100": cv2.aruco.DICT_4X4_100,
"DICT_4X4_250": cv2.aruco.DICT_4X4_250,
"DICT_4X4_1000": cv2.aruco.DICT_4X4_1000,
"DICT_5X5_50": cv2.aruco.DICT_5X5_50,
"DICT_5X5_100": cv2.aruco.DICT_5X5_100,
"DICT_5X5_250": cv2.aruco.DICT_5X5_250,
"DICT_5X5_1000": cv2.aruco.DICT_5X5_1000,
"DICT_6X6_50": cv2.aruco.DICT_6X6_50,
"DICT_6X6_100": cv2.aruco.DICT_6X6_100,
"DICT_6X6_250": cv2.aruco.DICT_6X6_250,
"DICT_6X6_1000": cv2.aruco.DICT_6X6_1000,
"DICT_7X7_50": cv2.aruco.DICT_7X7_50,
"DICT_7X7_100": cv2.aruco.DICT_7X7_100,
"DICT_7X7_250": cv2.aruco.DICT_7X7_250,
"DICT_7X7_1000": cv2.aruco.DICT_7X7_1000,
"DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL,
"DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5,
"DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9,
"DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10,
"DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11
}
def aruco_display(corners, ids, rejected, image):
if len(corners) > 0:
# flatten the ArUco IDs list
ids = ids.flatten()
# loop over the detected ArUCo corners
for (markerCorner, markerID) in zip(corners, ids):
# extract the marker corners (which are always returned in
# top-left, top-right, bottom-right, and bottom-left order)
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
# convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
# draw the bounding box of the ArUCo detection
cv2.line(image, topLeft, topRight, (0, 255, 0), 2)
cv2.line(image, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2)
# compute and draw the center (x, y)-coordinates of the ArUco
# marker
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1)
# draw the ArUco marker ID on the image
cv2.putText(image, str(markerID),
(topLeft[0], topLeft[1] - 15), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
print("[INFO] ArUco marker ID: {}".format(markerID))
return image
# verify that the supplied ArUCo tag exists and is supported by
# OpenCV
if ARUCO_DICT.get(args["type"], None) is None:
print("[INFO] ArUCo tag of '{}' is not supported".format(
args["type"]))
sys.exit(0)
# load the ArUCo dictionary and grab the ArUCo parameters
print("[INFO] detecting '{}' tags...".format(args["type"]))
arucoDict = cv2.aruco.getPredefinedDictionary(ARUCO_DICT[args["type"]])
arucoParams = cv2.aruco.EstimateParameters()
# initialize the video stream and allow the camera sensor to warm up
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(2.0)
# loop over the frames from the video stream
while True:
# grab the frame from the threaded video stream and resize it
# to have a maximum width of 1000 pixels
frame = vs.read()
frame = imutils.resize(frame, width=1000)
# detect ArUco markers in the input frame
(corners, ids, rejected) = cv2.aruco.detectMarkers(frame, arucoDict, parameters=cv2.aruco.DetectorParameters())
# verify *at least* one ArUco marker was detected
if len(corners) > 0:
# flatten the ArUco IDs list
ids = ids.flatten()
# loop over the detected ArUCo corners
for (markerCorner, markerID) in zip(corners, ids):
# extract the marker corners (which are always returned
# in top-left, top-right, bottom-right, and bottom-left
# order)
corners = markerCorner.reshape((4, 2))
(topLeft, topRight, bottomRight, bottomLeft) = corners
# convert each of the (x, y)-coordinate pairs to integers
topRight = (int(topRight[0]), int(topRight[1]))
bottomRight = (int(bottomRight[0]), int(bottomRight[1]))
bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1]))
topLeft = (int(topLeft[0]), int(topLeft[1]))
# draw the bounding box of the ArUCo detection
cv2.line(frame, topLeft, topRight, (0, 255, 0), 2)
cv2.line(frame, topRight, bottomRight, (0, 255, 0), 2)
cv2.line(frame, bottomRight, bottomLeft, (0, 255, 0), 2)
cv2.line(frame, bottomLeft, topLeft, (0, 255, 0), 2)
# compute and draw the center (x, y)-coordinates of the
# ArUco marker
cX = int((topLeft[0] + bottomRight[0]) / 2.0)
cY = int((topLeft[1] + bottomRight[1]) / 2.0)
cv2.circle(frame, (cX, cY), 4, (0, 0, 255), -1)
# draw the ArUco marker ID on the frame
cv2.putText(frame, str(markerID),
(topLeft[0], topLeft[1] - 15),
cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the `q` key was pressed, break from the loop
if key == ord("q"):
break
# do a bit of cleanup
cv2.destroyAllWindows()
vs.stop()
#Code Used by image marker detection
'''
print("Loading image...")
image = cv2.imread(args["image"])
h, w, _ = image.shape
width = OUTPUT_IMAGE_WIDTH
height = int(width * (h / w))
image = cv2.resize(image, (width, height), interpolation=cv2.INTER_CUBIC)
# verify that the supplied ArUCo tag exists and is supported by OpenCV
if ARUCO_DICT.get(args["type"], None) is None:
print(f"ArUCo tag type '{args['type']}' is not supported")
sys.exit(0)
# load the ArUCo dictionary, grab the ArUCo parameters, and detect
# the markers
print("Detecting '{}' tags....".format(args["type"]))
arucoDict = cv2.aruco.getPredefinedDictionary(
DICT_USED) # Hard-coded the type of dictionary associated with the image
arucoParams = cv2.aruco.EstimateParameters()
corners, ids, rejected = cv2.aruco.detectMarkers(image, arucoDict, parameters=cv2.aruco.DetectorParameters())
detected_markers = aruco_display(corners, ids, rejected, image)
cv2.imshow("Image", detected_markers)
'''
# # Uncomment to save
# cv2.imwrite("output_sample.png",detected_markers)
#cv2.waitKey(0)
Purpose
- This function superimposes the detected ArUco marker and marker pose on the input camera feed (single camera feed).
Input Variables
-
DICT_USED = "DICT_4X4_50"
- The dictionary used for ArUco marker generation.
-
OUTPUT_IMAGE_WIDTH = 600
- This is the width of the output video feed in pixels.
-
calibration_data_direc
- The directory of the saved camera calibration matrix generated in CalibrationProcedure.py.
-
distortion_data_direc
- The directory of the saved camera distortion matrix generated in CalibrationProcedure.py.
Outputs
- The superimposed detected pose and marker outline are output to and visible on a live camera feed.
Figure 4: Superimposed pose determination on live feed of ArUco marker.