Skip to content

hand_eye_calibration: different methods have the same result #27625

@saruca1784

Description

@saruca1784

System Information

hello, can anyone help ?
I'm using ubuntu 22.04 and opencv4.5.5 with python3.10.12
when I use cv2.calibrateHandEye(gripper2base_r, gripper2base_t, target2cam_r, target2cam_t, method)
to solve hand_eye matrix with different methods, however the results of different methods are exactly the same.
I'm quite confused, what's wrong with the code?

Detailed description

the test data : base2end_r.zip
my computer output: calib_R1:
[[-0.781830618826 -0.223800002067 -0.581940239664]
[ 0.595770738195 0.007051521454 -0.803123591707]
[ 0.183842625569 -0.974609580868 0.127820397058]]
calib_t1:
[[0.807598805294]
[0.86350968266 ]
[0.562076898478]]
calib_R2:
[[-0.781830618826 -0.223800002067 -0.581940239664]
[ 0.595770738195 0.007051521454 -0.803123591707]
[ 0.183842625569 -0.974609580868 0.127820397058]]
calib_t2:
[[0.807598805294]
[0.86350968266 ]
[0.562076898478]]

Steps to reproduce

my test code:
import cv2
import yaml
import numpy as np
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
np.set_printoptions(precision=12, suppress=False)

Utility function to load YAML file as numpy array

def load_yaml_to_numpy(path):
with open(path, 'r') as f:
data = yaml.safe_load(f)
return np.array(data, dtype=np.float64)
def alternative_validate( calib_R, calib_t,poses_R, poses_T, image_r, image_t):
T_result_estimate = []
T_calib = np.eye(4)
T_calib[:3, :3] = calib_R
T_calib[:3, 3] = calib_t
count = 1
hand_eye_mode = 0
for Rg, Tg, rc, tc in zip(poses_R, poses_T, image_r, image_t):
Rc = rc
Tc = tc

if hand_eye_mode == 0: # eye_to_hand, T_oe is constant,object to end
T_oc = np.eye(4)
T_oc[:3, :3] = Rc
T_oc[:3, 3] = Tc
T_ob_measure = T_calib@ T_oc
T_be = np.eye(4)
T_be[:3, :3] = Rg
T_be[:3, 3] = Tg
T_oe = T_be@T_ob_measure
print("count {} T_oe:\n".format(count), T_oe)
else: # eye_in_hand, T_ob is constant
T_oc = np.eye(4)
T_oc[:3, :3] = Rc
T_oc[:3, 3] = Tc
T_ce = T_calib
T_eb = np.eye(4)
T_eb[:3, :3] = Rg
T_eb[:3, 3] = Tg
T_ob = T_eb@T_ce@T_oc
print("count {} T_ob:\n".format(count), T_ob)
count += 1
if hand_eye_mode == 0:
T_result_estimate.append(T_oe)
else:
T_result_estimate.append(T_ob)

Convert list to numpy array for slicing

T_result_estimate = np.array(T_result_estimate) # shape: (N, 4, 4)

Compute mean, variance, and standard deviation for translation part

translations = T_result_estimate[:, :3, 3] # shape: (N, 3)
mean_translation = np.mean(translations, axis=0)
var_translation = np.var(translations, axis=0)
std_translation = np.std(translations, axis=0)
diff_translation = np.linalg.norm(translations - mean_translation, axis=1)
max_diff_idx = np.argmax(diff_translation)
max_diff = diff_translation[max_diff_idx]
print("Mean translation:", mean_translation)
print("Variance of translation:", var_translation)
print("Standard deviation of translation:", std_translation)
print("Maximum difference between translations and mean_translation:", max_diff)
print("Data point with maximum difference: ", translations[max_diff_idx])
print("Data point with maximum difference index: ", max_diff_idx+1)#index starts from 1
over_threshold_indices = np.where(diff_translation > 0.006)[0]
print("Indices with translation difference over 0.006:", over_threshold_indices + 1)

Compute mean, variance, and standard deviation for rotation part (as rotation matrices)

rotations = T_result_estimate[:, :3, :3] # shape: (N, 3, 3)

If you want to analyze rotation as quaternion:

quats = R.from_matrix(rotations).as_quat() # shape: (N, 4)
mean_quat = np.mean(quats, axis=0)
var_quat = np.var(quats, axis=0)
std_quat = np.std(quats, axis=0)
mean_rotation = R.from_quat(mean_quat).as_matrix() # Convert mean quaternion back to rotation matrix

print("Mean quaternion:", mean_quat)
print("Variance of quaternion:", var_quat)
print("Standard deviation of quaternion:", std_quat)

Plot all translation points and their mean in 3D

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(translations[:, 0], translations[:, 1], translations[:, 2], c='b', label='Translation')
ax.scatter(mean_translation[0], mean_translation[1], mean_translation[2], c='r', s=100, label='Mean Translation', marker='*')

Annotate each translation point with its index label

for i in range(translations.shape[0]):
ax.text(translations[i, 0], translations[i, 1], translations[i, 2], f'{i+1}', color='black', fontsize=8)

Plot rotation axes (origin + direction) for each rotation matrix

for i in range(rotations.shape[0]):
origin = translations[i]

# X axis (red)

ax.quiver(origin[0], origin[1], origin[2],

rotations[i, 0, 0], rotations[i, 1, 0], rotations[i, 2, 0],

length=0.005, color='r', alpha=0.5)

# Y axis (green)

ax.quiver(origin[0], origin[1], origin[2],

rotations[i, 0, 1], rotations[i, 1, 1], rotations[i, 2, 1],

length=0.005, color='g', alpha=0.5)

# Z axis (blue)

ax.quiver(origin[0], origin[1], origin[2],

rotations[i, 0, 2], rotations[i, 1, 2], rotations[i, 2, 2],

length=0.005, color='b', alpha=0.5)

Plot mean rotation at mean translation

ax.quiver(mean_translation[0], mean_translation[1], mean_translation[2],

mean_rotation[0, 0], mean_rotation[1, 0], mean_rotation[2, 0],

length=0.005, color='r', alpha=1.0)

ax.quiver(mean_translation[0], mean_translation[1], mean_translation[2],

mean_rotation[0, 1], mean_rotation[1, 1], mean_rotation[2, 1],

length=0.005, color='g', alpha=1.0)

ax.quiver(mean_translation[0], mean_translation[1], mean_translation[2],

mean_rotation[0, 2], mean_rotation[1, 2], mean_rotation[2, 2],

length=0.005, color='b', alpha=1.0)

ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax.set_title('Translations and Rotations in 3D')
ax.legend()
plt.show()

Load calibration data from four YAML files

board2cam_t = load_yaml_to_numpy('/home/work/ROS2WS/calib_data2/board2cam_t.yaml')
board2cam_r = load_yaml_to_numpy('/home/work/ROS2WS/calib_data2/board2cam_r.yaml')
base2end_t = load_yaml_to_numpy('/home/work/ROS2WS/calib_data2/base2end_t.yaml')
base2end_r = load_yaml_to_numpy('/home/work/ROS2WS/calib_data2/base2end_r.yaml')

# Print shapes and first element for verification

print("board2cam_t shape:", board2cam_t.shape)

print("board2cam_r shape:", board2cam_r.shape)

print("base2end_t shape:", base2end_t.shape)

print("base2end_r shape:", base2end_r.shape)

calib_R1, calib_t1 = cv2.calibrateHandEye(base2end_r[:40], base2end_t[:40], board2cam_r[:40],board2cam_t[:40], cv2.CALIB_HAND_EYE_TSAI)
print ("base2end_r: \n{}".format(base2end_r[:40]))
print ("base2end_t: \n{}".format(base2end_t[:40]))
print ("board2cam_r: \n{}".format(board2cam_r[:40]))
print ("board2cam_t: \n{}".format(board2cam_t[:40]))

calib_R1, calib_t1 = cv2.calibrateHandEye(base2end_r[:40], base2end_t[:40], board2cam_r[:40],board2cam_t[:40], cv2.CALIB_HAND_EYE_TSAI)

calib_R2, calib_t2 = cv2.calibrateHandEye(base2end_r[:40], base2end_t[:40], board2cam_r[:40],board2cam_t[:40], cv2.CALIB_HAND_EYE_TSAI )

print("calib_R1:\n", calib_R1)
print("calib_t1:\n", calib_t1)

print("calib_R2:\n", calib_R2)

print("calib_t2:\n", calib_t2)

Issue submission checklist

  • I report the issue, it's not a question
  • I checked the problem with documentation, FAQ, open issues, forum.opencv.org, Stack Overflow, etc and have not found any solution
  • I updated to the latest OpenCV version and the issue is still there
  • There is reproducer code and related data files (videos, images, onnx, etc)

Metadata

Metadata

Assignees

No one assigned

    Labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions