|
3 | 3 | import sys
|
4 | 4 | import time
|
5 | 5 |
|
6 |
| - |
7 | 6 | ARUCO_DICT = {
|
8 |
| - "DICT_4X4_50": cv2.aruco.DICT_4X4_50, |
9 |
| - "DICT_4X4_100": cv2.aruco.DICT_4X4_100, |
10 |
| - "DICT_4X4_250": cv2.aruco.DICT_4X4_250, |
11 |
| - "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000, |
12 |
| - "DICT_5X5_50": cv2.aruco.DICT_5X5_50, |
13 |
| - "DICT_5X5_100": cv2.aruco.DICT_5X5_100, |
14 |
| - "DICT_5X5_250": cv2.aruco.DICT_5X5_250, |
15 |
| - "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000, |
16 |
| - "DICT_6X6_50": cv2.aruco.DICT_6X6_50, |
17 |
| - "DICT_6X6_100": cv2.aruco.DICT_6X6_100, |
18 |
| - "DICT_6X6_250": cv2.aruco.DICT_6X6_250, |
19 |
| - "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000, |
20 |
| - "DICT_7X7_50": cv2.aruco.DICT_7X7_50, |
21 |
| - "DICT_7X7_100": cv2.aruco.DICT_7X7_100, |
22 |
| - "DICT_7X7_250": cv2.aruco.DICT_7X7_250, |
23 |
| - "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000, |
24 |
| - "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL, |
25 |
| - "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5, |
26 |
| - "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9, |
27 |
| - "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10, |
28 |
| - "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11 |
| 7 | + "DICT_4X4_50": cv2.aruco.DICT_4X4_50, |
| 8 | + "DICT_4X4_100": cv2.aruco.DICT_4X4_100, |
| 9 | + "DICT_4X4_250": cv2.aruco.DICT_4X4_250, |
| 10 | + "DICT_4X4_1000": cv2.aruco.DICT_4X4_1000, |
| 11 | + "DICT_5X5_50": cv2.aruco.DICT_5X5_50, |
| 12 | + "DICT_5X5_100": cv2.aruco.DICT_5X5_100, |
| 13 | + "DICT_5X5_250": cv2.aruco.DICT_5X5_250, |
| 14 | + "DICT_5X5_1000": cv2.aruco.DICT_5X5_1000, |
| 15 | + "DICT_6X6_50": cv2.aruco.DICT_6X6_50, |
| 16 | + "DICT_6X6_100": cv2.aruco.DICT_6X6_100, |
| 17 | + "DICT_6X6_250": cv2.aruco.DICT_6X6_250, |
| 18 | + "DICT_6X6_1000": cv2.aruco.DICT_6X6_1000, |
| 19 | + "DICT_7X7_50": cv2.aruco.DICT_7X7_50, |
| 20 | + "DICT_7X7_100": cv2.aruco.DICT_7X7_100, |
| 21 | + "DICT_7X7_250": cv2.aruco.DICT_7X7_250, |
| 22 | + "DICT_7X7_1000": cv2.aruco.DICT_7X7_1000, |
| 23 | + "DICT_ARUCO_ORIGINAL": cv2.aruco.DICT_ARUCO_ORIGINAL, |
| 24 | + "DICT_APRILTAG_16h5": cv2.aruco.DICT_APRILTAG_16h5, |
| 25 | + "DICT_APRILTAG_25h9": cv2.aruco.DICT_APRILTAG_25h9, |
| 26 | + "DICT_APRILTAG_36h10": cv2.aruco.DICT_APRILTAG_36h10, |
| 27 | + "DICT_APRILTAG_36h11": cv2.aruco.DICT_APRILTAG_36h11 |
29 | 28 | }
|
30 | 29 |
|
31 |
| -def aruco_display(corners, ids, rejected, image): |
| 30 | +def draw_axis(frame, rvec, tvec, matrix_coefficients, distortion_coefficients): |
| 31 | + axis = np.float32([[0.1, 0, 0], [0, 0.1, 0], [0, 0, 0.1]]).reshape(-1, 3) |
| 32 | + imgpts, _ = cv2.projectPoints(axis, rvec, tvec, matrix_coefficients, distortion_coefficients) |
32 | 33 |
|
33 |
| - if len(corners) > 0: |
34 |
| - |
35 |
| - ids = ids.flatten() |
36 |
| - |
37 |
| - for (markerCorner, markerID) in zip(corners, ids): |
38 |
| - |
39 |
| - corners = markerCorner.reshape((4, 2)) |
40 |
| - (topLeft, topRight, bottomRight, bottomLeft) = corners |
41 |
| - |
42 |
| - topRight = (int(topRight[0]), int(topRight[1])) |
43 |
| - bottomRight = (int(bottomRight[0]), int(bottomRight[1])) |
44 |
| - bottomLeft = (int(bottomLeft[0]), int(bottomLeft[1])) |
45 |
| - topLeft = (int(topLeft[0]), int(topLeft[1])) |
46 |
| - |
47 |
| - cv2.line(image, topLeft, topRight, (0, 255, 0), 2) |
48 |
| - cv2.line(image, topRight, bottomRight, (0, 255, 0), 2) |
49 |
| - cv2.line(image, bottomRight, bottomLeft, (0, 255, 0), 2) |
50 |
| - cv2.line(image, bottomLeft, topLeft, (0, 255, 0), 2) |
51 |
| - |
52 |
| - cX = int((topLeft[0] + bottomRight[0]) / 2.0) |
53 |
| - cY = int((topLeft[1] + bottomRight[1]) / 2.0) |
54 |
| - cv2.circle(image, (cX, cY), 4, (0, 0, 255), -1) |
55 |
| - |
56 |
| - cv2.putText(image, str(markerID),(topLeft[0], topLeft[1] - 10), cv2.FONT_HERSHEY_SIMPLEX, |
57 |
| - 0.5, (0, 255, 0), 2) |
58 |
| - print("[Inference] ArUco marker ID: {}".format(markerID)) |
59 |
| - |
60 |
| - return image |
61 |
| - |
62 |
| - |
| 34 | + # Ensure tvec is a 2D point |
| 35 | + origin = (int(tvec[0]), int(tvec[1])) |
| 36 | + imgpts = np.int32(imgpts).reshape(-1, 2) |
| 37 | + |
| 38 | + print("Origin:", origin) |
| 39 | + print("Image Points:", imgpts) |
| 40 | + |
| 41 | + if imgpts.shape[0] != 3: |
| 42 | + print("Error: imgpts does not contain 3 points. Current shape:", imgpts.shape) |
| 43 | + return |
| 44 | + |
| 45 | + cv2.line(frame, origin, tuple(imgpts[0].ravel()), (255, 0, 0), 5) # X axis |
| 46 | + cv2.line(frame, origin, tuple(imgpts[1].ravel()), (0, 255, 0), 5) # Y axis |
| 47 | + cv2.line(frame, origin, tuple(imgpts[2].ravel()), (0, 0, 255), 5) # Z axis |
63 | 48 |
|
64 | 49 | def pose_estimation(frame, aruco_dict_type, matrix_coefficients, distortion_coefficients):
|
65 |
| - |
66 | 50 | gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
67 |
| - cv2.aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_type) |
| 51 | + aruco_dict = cv2.aruco.Dictionary_get(aruco_dict_type) |
68 | 52 | parameters = cv2.aruco.DetectorParameters_create()
|
69 |
| - |
70 |
| - |
71 |
| - corners, ids, rejected_img_points = cv2.aruco.detectMarkers(gray, cv2.aruco_dict,parameters=parameters, |
72 |
| - cameraMatrix=matrix_coefficients, |
73 |
| - distCoeff=distortion_coefficients) |
74 |
| - |
75 |
| - |
| 53 | + |
| 54 | + corners, ids, rejected_img_points = cv2.aruco.detectMarkers(gray, aruco_dict, parameters=parameters) |
| 55 | + |
76 | 56 | if len(corners) > 0:
|
77 | 57 | for i in range(0, len(ids)):
|
78 |
| - |
79 |
| - rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients, |
80 |
| - distortion_coefficients) |
| 58 | + rvec, tvec, markerPoints = cv2.aruco.estimatePoseSingleMarkers(corners[i], 0.02, matrix_coefficients, distortion_coefficients) |
81 | 59 |
|
82 |
| - cv2.aruco.drawDetectedMarkers(frame, corners) |
83 |
| - |
84 |
| - cv2.aruco.drawAxis(frame, matrix_coefficients, distortion_coefficients, rvec, tvec, 0.01) |
85 |
| - |
86 |
| - return frame |
87 |
| - |
88 |
| - |
| 60 | + cv2.aruco.drawDetectedMarkers(frame, corners) |
| 61 | + |
| 62 | + # Ensure rvec and tvec are in the correct shape |
| 63 | + rvec = rvec.reshape((3, 1)) |
| 64 | + tvec = tvec.reshape((3, 1)) |
| 65 | + |
| 66 | + draw_axis(frame, rvec, tvec, matrix_coefficients, distortion_coefficients) |
89 | 67 |
|
| 68 | + return frame |
90 | 69 |
|
91 | 70 | aruco_type = "DICT_5X5_100"
|
92 |
| - |
93 | 71 | arucoDict = cv2.aruco.Dictionary_get(ARUCO_DICT[aruco_type])
|
94 |
| - |
95 | 72 | arucoParams = cv2.aruco.DetectorParameters_create()
|
96 | 73 |
|
97 |
| - |
98 |
| -intrinsic_camera = np.array(((933.15867, 0, 657.59),(0,933.1586, 400.36993),(0,0,1))) |
99 |
| -distortion = np.array((-0.43948,0.18514,0,0)) |
100 |
| - |
| 74 | +intrinsic_camera = np.array(((933.15867, 0, 657.59), (0, 933.1586, 400.36993), (0, 0, 1))) |
| 75 | +distortion = np.array((-0.43948, 0.18514, 0, 0)) |
101 | 76 |
|
102 | 77 | cap = cv2.VideoCapture(0)
|
103 |
| - |
104 | 78 | cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
|
105 | 79 | cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
|
106 | 80 |
|
107 |
| - |
108 |
| - |
109 | 81 | while cap.isOpened():
|
110 |
| - |
111 | 82 | ret, img = cap.read()
|
112 | 83 |
|
| 84 | + if not ret: |
| 85 | + print("Failed to grab frame") |
| 86 | + break |
| 87 | + |
113 | 88 | output = pose_estimation(img, ARUCO_DICT[aruco_type], intrinsic_camera, distortion)
|
114 |
| - |
| 89 | + |
115 | 90 | cv2.imshow('Estimated Pose', output)
|
116 |
| - |
| 91 | + |
117 | 92 | key = cv2.waitKey(1) & 0xFF
|
118 | 93 | if key == ord('q'):
|
119 | 94 | break
|
|
0 commit comments