Skip to content

Calibration

Camera calibration engine using ChArUco boards

CameraCalibrator

Calibrates cameras using ChArUco boards

Source code in raiden/calibration/core.py
class CameraCalibrator:
    """Calibrates cameras using ChArUco boards"""

    def __init__(self, board_config: ChArUcoBoardConfig):
        self.board_config = board_config
        self.detector = ChArUcoDetector(board_config)

    def calibrate_intrinsics(
        self,
        all_corners: List[np.ndarray],
        all_ids: List[np.ndarray],
        image_size: Tuple[int, int],
    ) -> Dict:
        """Calibrate camera intrinsics from multiple views

        Args:
            all_corners: List of detected corners from each view
            all_ids: List of detected IDs from each view
            image_size: Image size (width, height)

        Returns:
            Dictionary with calibration results:
            - camera_matrix: 3x3 intrinsic matrix
            - distortion_coeffs: Distortion coefficients
            - reprojection_error: RMS reprojection error
            - success: Whether calibration succeeded
        """
        if len(all_corners) < 3:
            return {"success": False, "error": "Need at least 3 views for calibration"}

        # Calibrate camera
        ret, camera_matrix, dist_coeffs, rvecs, tvecs = (
            cv2.aruco.calibrateCameraCharuco(
                charucoCorners=all_corners,
                charucoIds=all_ids,
                board=self.detector.board,
                imageSize=image_size,
                cameraMatrix=None,
                distCoeffs=None,
            )
        )

        return {
            "success": True,
            "camera_matrix": camera_matrix.tolist(),
            "distortion_coeffs": dist_coeffs.flatten().tolist(),
            "reprojection_error": ret,
            "rvecs": [r.tolist() for r in rvecs],
            "tvecs": [t.tolist() for t in tvecs],
        }

    def calibrate_hand_eye(
        self,
        robot_poses: List[np.ndarray],
        camera_poses: List[Tuple[np.ndarray, np.ndarray]],
        method: int = cv2.CALIB_HAND_EYE_TSAI,
    ) -> Dict:
        """Perform hand-eye calibration (eye-in-hand configuration)

        Solves the AX=XB problem where:
        - A: Transformation between robot gripper poses
        - B: Transformation between camera poses
        - X: Transformation from camera to gripper (what we're solving for)

        Args:
            robot_poses: List of 4x4 robot gripper-to-base transformation matrices
            camera_poses: List of (rvec, tvec) tuples for board-to-camera transforms
            method: OpenCV calibration method (default: Tsai)

        Returns:
            Dictionary with calibration results:
            - rotation_matrix: 3x3 rotation matrix (camera to gripper)
            - translation_vector: 3x1 translation vector (camera to gripper)
            - rotation_vector: 3x1 rotation vector (axis-angle)
            - success: Whether calibration succeeded
        """
        if len(robot_poses) < 3 or len(camera_poses) < 3:
            return {"success": False, "error": "Need at least 3 pose pairs"}

        if len(robot_poses) != len(camera_poses):
            return {
                "success": False,
                "error": "Robot poses and camera poses must have same length",
            }

        # robot_poses are FK results = T^base_ee (transforms gripper frame to base frame)
        # OpenCV calibrateHandEye expects R_gripper2base = T^base_ee, which is exactly what FK gives
        R_gripper2base = []
        t_gripper2base = []
        for pose in robot_poses:
            R_gripper2base.append(pose[:3, :3])
            t_gripper2base.append(pose[:3, 3:4])

        # Convert camera poses (board-to-camera) to target-to-camera
        # ChArUco detection gives us T_board_to_camera, which is what OpenCV expects
        R_target2cam = []
        t_target2cam = []
        for rvec, tvec in camera_poses:
            R, _ = cv2.Rodrigues(rvec)
            R_target2cam.append(R)
            t_target2cam.append(tvec)

        # Perform hand-eye calibration
        R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
            R_gripper2base=R_gripper2base,
            t_gripper2base=t_gripper2base,
            R_target2cam=R_target2cam,
            t_target2cam=t_target2cam,
            method=method,
        )

        # Convert rotation matrix to rotation vector
        rvec_cam2gripper, _ = cv2.Rodrigues(R_cam2gripper)

        return {
            "success": True,
            "rotation_matrix": R_cam2gripper.tolist(),
            "translation_vector": t_cam2gripper.flatten().tolist(),
            "rotation_vector": rvec_cam2gripper.flatten().tolist(),
            "method": self._get_method_name(method),
        }

    def calibrate_scene_camera(
        self, camera_poses: List[Tuple[np.ndarray, np.ndarray]]
    ) -> Dict:
        """Calibrate a fixed scene camera

        For a fixed scene camera viewing a fixed calibration board,
        we compute the camera's pose relative to the board frame (world frame).
        We average across multiple observations for robustness.

        Args:
            camera_poses: List of (rvec, tvec) tuples for board-to-camera transforms

        Returns:
            Dictionary with calibration results:
            - rotation_matrix: 3x3 rotation matrix (camera to board/world frame)
            - translation_vector: 3x1 translation vector (camera position in board/world frame)
            - rotation_vector: 3x1 rotation vector
            - reference_frame: "board" (the board frame is used as world frame)
            - success: Whether calibration succeeded
        """
        if len(camera_poses) < 1:
            return {"success": False, "error": "Need at least 1 pose"}

        # Average rotation and translation across all poses
        # For rotation, we convert to quaternions, average, then convert back
        rotations = []
        translations = []

        for rvec, tvec in camera_poses:
            R, _ = cv2.Rodrigues(rvec)
            rotations.append(Rotation.from_matrix(R))
            translations.append(tvec.flatten())

        # Average rotations using scipy
        if len(rotations) > 1:
            avg_rotation = Rotation.concatenate(rotations).mean()
        else:
            avg_rotation = rotations[0]

        avg_R_board_to_cam = avg_rotation.as_matrix()
        avg_t_board_to_cam = np.mean(translations, axis=0)

        # Invert to get camera-to-board transform
        # This is what we want: the camera's pose relative to the board (world frame)
        avg_R_cam_to_board = avg_R_board_to_cam.T
        avg_t_cam_to_board = -avg_R_cam_to_board @ avg_t_board_to_cam

        # Convert to rotation vector
        avg_rvec, _ = cv2.Rodrigues(avg_R_cam_to_board)

        return {
            "success": True,
            "rotation_matrix": avg_R_cam_to_board.tolist(),
            "translation_vector": avg_t_cam_to_board.tolist(),
            "rotation_vector": avg_rvec.flatten().tolist(),
            "reference_frame": "board",
        }

    @staticmethod
    def _get_method_name(method: int) -> str:
        """Get human-readable name for calibration method"""
        method_names = {
            cv2.CALIB_HAND_EYE_TSAI: "CALIB_HAND_EYE_TSAI",
            cv2.CALIB_HAND_EYE_PARK: "CALIB_HAND_EYE_PARK",
            cv2.CALIB_HAND_EYE_HORAUD: "CALIB_HAND_EYE_HORAUD",
            cv2.CALIB_HAND_EYE_ANDREFF: "CALIB_HAND_EYE_ANDREFF",
            cv2.CALIB_HAND_EYE_DANIILIDIS: "CALIB_HAND_EYE_DANIILIDIS",
        }
        return method_names.get(method, f"UNKNOWN_{method}")

    def compute_reprojection_error(
        self,
        corners: np.ndarray,
        ids: np.ndarray,
        rvec: np.ndarray,
        tvec: np.ndarray,
        camera_matrix: np.ndarray,
        dist_coeffs: np.ndarray,
    ) -> float:
        """Compute reprojection error for a single view

        Args:
            corners: Detected corner positions
            ids: Detected corner IDs
            rvec: Rotation vector
            tvec: Translation vector
            camera_matrix: Camera intrinsic matrix
            dist_coeffs: Distortion coefficients

        Returns:
            RMS reprojection error in pixels
        """
        # Get 3D object points for detected corners
        obj_points = self.detector.board.getChessboardCorners()
        obj_points_detected = obj_points[ids.flatten()]

        # Project 3D points to 2D
        projected, _ = cv2.projectPoints(
            obj_points_detected, rvec, tvec, camera_matrix, dist_coeffs
        )

        # Compute error
        projected = projected.reshape(-1, 2)
        corners = corners.reshape(-1, 2)
        errors = np.linalg.norm(projected - corners, axis=1)

        return np.sqrt(np.mean(errors**2))

calibrate_hand_eye(robot_poses, camera_poses, method=cv2.CALIB_HAND_EYE_TSAI)

Perform hand-eye calibration (eye-in-hand configuration)

Solves the AX=XB problem where: - A: Transformation between robot gripper poses - B: Transformation between camera poses - X: Transformation from camera to gripper (what we're solving for)

Parameters:

Name Type Description Default
robot_poses List[ndarray]

List of 4x4 robot gripper-to-base transformation matrices

required
camera_poses List[Tuple[ndarray, ndarray]]

List of (rvec, tvec) tuples for board-to-camera transforms

required
method int

OpenCV calibration method (default: Tsai)

CALIB_HAND_EYE_TSAI

Returns:

Type Description
Dict

Dictionary with calibration results:

Dict
  • rotation_matrix: 3x3 rotation matrix (camera to gripper)
Dict
  • translation_vector: 3x1 translation vector (camera to gripper)
Dict
  • rotation_vector: 3x1 rotation vector (axis-angle)
Dict
  • success: Whether calibration succeeded
Source code in raiden/calibration/core.py
def calibrate_hand_eye(
    self,
    robot_poses: List[np.ndarray],
    camera_poses: List[Tuple[np.ndarray, np.ndarray]],
    method: int = cv2.CALIB_HAND_EYE_TSAI,
) -> Dict:
    """Perform hand-eye calibration (eye-in-hand configuration)

    Solves the AX=XB problem where:
    - A: Transformation between robot gripper poses
    - B: Transformation between camera poses
    - X: Transformation from camera to gripper (what we're solving for)

    Args:
        robot_poses: List of 4x4 robot gripper-to-base transformation matrices
        camera_poses: List of (rvec, tvec) tuples for board-to-camera transforms
        method: OpenCV calibration method (default: Tsai)

    Returns:
        Dictionary with calibration results:
        - rotation_matrix: 3x3 rotation matrix (camera to gripper)
        - translation_vector: 3x1 translation vector (camera to gripper)
        - rotation_vector: 3x1 rotation vector (axis-angle)
        - success: Whether calibration succeeded
    """
    if len(robot_poses) < 3 or len(camera_poses) < 3:
        return {"success": False, "error": "Need at least 3 pose pairs"}

    if len(robot_poses) != len(camera_poses):
        return {
            "success": False,
            "error": "Robot poses and camera poses must have same length",
        }

    # robot_poses are FK results = T^base_ee (transforms gripper frame to base frame)
    # OpenCV calibrateHandEye expects R_gripper2base = T^base_ee, which is exactly what FK gives
    R_gripper2base = []
    t_gripper2base = []
    for pose in robot_poses:
        R_gripper2base.append(pose[:3, :3])
        t_gripper2base.append(pose[:3, 3:4])

    # Convert camera poses (board-to-camera) to target-to-camera
    # ChArUco detection gives us T_board_to_camera, which is what OpenCV expects
    R_target2cam = []
    t_target2cam = []
    for rvec, tvec in camera_poses:
        R, _ = cv2.Rodrigues(rvec)
        R_target2cam.append(R)
        t_target2cam.append(tvec)

    # Perform hand-eye calibration
    R_cam2gripper, t_cam2gripper = cv2.calibrateHandEye(
        R_gripper2base=R_gripper2base,
        t_gripper2base=t_gripper2base,
        R_target2cam=R_target2cam,
        t_target2cam=t_target2cam,
        method=method,
    )

    # Convert rotation matrix to rotation vector
    rvec_cam2gripper, _ = cv2.Rodrigues(R_cam2gripper)

    return {
        "success": True,
        "rotation_matrix": R_cam2gripper.tolist(),
        "translation_vector": t_cam2gripper.flatten().tolist(),
        "rotation_vector": rvec_cam2gripper.flatten().tolist(),
        "method": self._get_method_name(method),
    }

calibrate_intrinsics(all_corners, all_ids, image_size)

Calibrate camera intrinsics from multiple views

Parameters:

Name Type Description Default
all_corners List[ndarray]

List of detected corners from each view

required
all_ids List[ndarray]

List of detected IDs from each view

required
image_size Tuple[int, int]

Image size (width, height)

required

Returns:

Type Description
Dict

Dictionary with calibration results:

Dict
  • camera_matrix: 3x3 intrinsic matrix
Dict
  • distortion_coeffs: Distortion coefficients
Dict
  • reprojection_error: RMS reprojection error
Dict
  • success: Whether calibration succeeded
Source code in raiden/calibration/core.py
def calibrate_intrinsics(
    self,
    all_corners: List[np.ndarray],
    all_ids: List[np.ndarray],
    image_size: Tuple[int, int],
) -> Dict:
    """Calibrate camera intrinsics from multiple views

    Args:
        all_corners: List of detected corners from each view
        all_ids: List of detected IDs from each view
        image_size: Image size (width, height)

    Returns:
        Dictionary with calibration results:
        - camera_matrix: 3x3 intrinsic matrix
        - distortion_coeffs: Distortion coefficients
        - reprojection_error: RMS reprojection error
        - success: Whether calibration succeeded
    """
    if len(all_corners) < 3:
        return {"success": False, "error": "Need at least 3 views for calibration"}

    # Calibrate camera
    ret, camera_matrix, dist_coeffs, rvecs, tvecs = (
        cv2.aruco.calibrateCameraCharuco(
            charucoCorners=all_corners,
            charucoIds=all_ids,
            board=self.detector.board,
            imageSize=image_size,
            cameraMatrix=None,
            distCoeffs=None,
        )
    )

    return {
        "success": True,
        "camera_matrix": camera_matrix.tolist(),
        "distortion_coeffs": dist_coeffs.flatten().tolist(),
        "reprojection_error": ret,
        "rvecs": [r.tolist() for r in rvecs],
        "tvecs": [t.tolist() for t in tvecs],
    }

calibrate_scene_camera(camera_poses)

Calibrate a fixed scene camera

For a fixed scene camera viewing a fixed calibration board, we compute the camera's pose relative to the board frame (world frame). We average across multiple observations for robustness.

Parameters:

Name Type Description Default
camera_poses List[Tuple[ndarray, ndarray]]

List of (rvec, tvec) tuples for board-to-camera transforms

required

Returns:

Type Description
Dict

Dictionary with calibration results:

Dict
  • rotation_matrix: 3x3 rotation matrix (camera to board/world frame)
Dict
  • translation_vector: 3x1 translation vector (camera position in board/world frame)
Dict
  • rotation_vector: 3x1 rotation vector
Dict
  • reference_frame: "board" (the board frame is used as world frame)
Dict
  • success: Whether calibration succeeded
Source code in raiden/calibration/core.py
def calibrate_scene_camera(
    self, camera_poses: List[Tuple[np.ndarray, np.ndarray]]
) -> Dict:
    """Calibrate a fixed scene camera

    For a fixed scene camera viewing a fixed calibration board,
    we compute the camera's pose relative to the board frame (world frame).
    We average across multiple observations for robustness.

    Args:
        camera_poses: List of (rvec, tvec) tuples for board-to-camera transforms

    Returns:
        Dictionary with calibration results:
        - rotation_matrix: 3x3 rotation matrix (camera to board/world frame)
        - translation_vector: 3x1 translation vector (camera position in board/world frame)
        - rotation_vector: 3x1 rotation vector
        - reference_frame: "board" (the board frame is used as world frame)
        - success: Whether calibration succeeded
    """
    if len(camera_poses) < 1:
        return {"success": False, "error": "Need at least 1 pose"}

    # Average rotation and translation across all poses
    # For rotation, we convert to quaternions, average, then convert back
    rotations = []
    translations = []

    for rvec, tvec in camera_poses:
        R, _ = cv2.Rodrigues(rvec)
        rotations.append(Rotation.from_matrix(R))
        translations.append(tvec.flatten())

    # Average rotations using scipy
    if len(rotations) > 1:
        avg_rotation = Rotation.concatenate(rotations).mean()
    else:
        avg_rotation = rotations[0]

    avg_R_board_to_cam = avg_rotation.as_matrix()
    avg_t_board_to_cam = np.mean(translations, axis=0)

    # Invert to get camera-to-board transform
    # This is what we want: the camera's pose relative to the board (world frame)
    avg_R_cam_to_board = avg_R_board_to_cam.T
    avg_t_cam_to_board = -avg_R_cam_to_board @ avg_t_board_to_cam

    # Convert to rotation vector
    avg_rvec, _ = cv2.Rodrigues(avg_R_cam_to_board)

    return {
        "success": True,
        "rotation_matrix": avg_R_cam_to_board.tolist(),
        "translation_vector": avg_t_cam_to_board.tolist(),
        "rotation_vector": avg_rvec.flatten().tolist(),
        "reference_frame": "board",
    }

compute_reprojection_error(corners, ids, rvec, tvec, camera_matrix, dist_coeffs)

Compute reprojection error for a single view

Parameters:

Name Type Description Default
corners ndarray

Detected corner positions

required
ids ndarray

Detected corner IDs

required
rvec ndarray

Rotation vector

required
tvec ndarray

Translation vector

required
camera_matrix ndarray

Camera intrinsic matrix

required
dist_coeffs ndarray

Distortion coefficients

required

Returns:

Type Description
float

RMS reprojection error in pixels

Source code in raiden/calibration/core.py
def compute_reprojection_error(
    self,
    corners: np.ndarray,
    ids: np.ndarray,
    rvec: np.ndarray,
    tvec: np.ndarray,
    camera_matrix: np.ndarray,
    dist_coeffs: np.ndarray,
) -> float:
    """Compute reprojection error for a single view

    Args:
        corners: Detected corner positions
        ids: Detected corner IDs
        rvec: Rotation vector
        tvec: Translation vector
        camera_matrix: Camera intrinsic matrix
        dist_coeffs: Distortion coefficients

    Returns:
        RMS reprojection error in pixels
    """
    # Get 3D object points for detected corners
    obj_points = self.detector.board.getChessboardCorners()
    obj_points_detected = obj_points[ids.flatten()]

    # Project 3D points to 2D
    projected, _ = cv2.projectPoints(
        obj_points_detected, rvec, tvec, camera_matrix, dist_coeffs
    )

    # Compute error
    projected = projected.reshape(-1, 2)
    corners = corners.reshape(-1, 2)
    errors = np.linalg.norm(projected - corners, axis=1)

    return np.sqrt(np.mean(errors**2))

ChArUcoBoardConfig dataclass

Configuration for ChArUco calibration board

Source code in raiden/calibration/core.py
@dataclass
class ChArUcoBoardConfig:
    """Configuration for ChArUco calibration board"""

    squares_x: int = 9
    squares_y: int = 9
    square_length: float = 0.03  # meters (checker size: 30mm)
    marker_length: float = 0.023  # meters (marker size: 23mm)
    dictionary: str = "DICT_6X6_250"  # ArUco dictionary

    @staticmethod
    def from_dict(data: dict) -> "ChArUcoBoardConfig":
        return ChArUcoBoardConfig(
            squares_x=data["squares_x"],
            squares_y=data["squares_y"],
            square_length=data["square_length"],
            marker_length=data["marker_length"],
            dictionary=data["dictionary"],
        )

ChArUcoDetector

Detects ChArUco boards in images

Source code in raiden/calibration/core.py
class ChArUcoDetector:
    """Detects ChArUco boards in images"""

    def __init__(self, board_config: ChArUcoBoardConfig):
        self.board_config = board_config

        # Get ArUco dictionary
        dict_name = board_config.dictionary

        if hasattr(cv2.aruco, dict_name):
            aruco_dict = getattr(cv2.aruco, dict_name)
            self.dictionary = cv2.aruco.getPredefinedDictionary(aruco_dict)
        else:
            raise ValueError(f"Unknown ArUco dictionary: {dict_name}")

        # Create ChArUco board
        self.board = cv2.aruco.CharucoBoard(
            size=(board_config.squares_x, board_config.squares_y),
            squareLength=board_config.square_length,
            markerLength=board_config.marker_length,
            dictionary=self.dictionary,
        )

        # Detector parameters
        self.detector_params = cv2.aruco.DetectorParameters()
        self.detector = cv2.aruco.ArucoDetector(self.dictionary, self.detector_params)

    def detect(
        self, image: np.ndarray
    ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
        """Detect ChArUco board in image

        Args:
            image: Input image (grayscale or color)

        Returns:
            Tuple of (corners, ids) or (None, None) if detection failed
            - corners: Nx2 array of corner positions
            - ids: Nx1 array of corner IDs
        """
        # Convert to grayscale if needed
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image

        # Detect ArUco markers
        marker_corners, marker_ids, _ = self.detector.detectMarkers(gray)

        if marker_ids is None or len(marker_ids) == 0:
            return None, None

        # Interpolate ChArUco corners
        num_corners, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
            markerCorners=marker_corners,
            markerIds=marker_ids,
            image=gray,
            board=self.board,
        )

        if num_corners == 0:
            return None, None

        return charuco_corners, charuco_ids

    def detect_with_markers(
        self, image: np.ndarray
    ) -> Tuple[
        Optional[np.ndarray], Optional[np.ndarray], Optional[list], Optional[np.ndarray]
    ]:
        """Detect ChArUco board and return ArUco marker info for debugging

        Args:
            image: Input image (grayscale or color)

        Returns:
            Tuple of (charuco_corners, charuco_ids, marker_corners, marker_ids)
            - charuco_corners: Nx2 array of ChArUco corner positions (or None)
            - charuco_ids: Nx1 array of ChArUco corner IDs (or None)
            - marker_corners: List of detected ArUco marker corners (or None)
            - marker_ids: Array of detected ArUco marker IDs (or None)
        """
        # Convert to grayscale if needed
        if len(image.shape) == 3:
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        else:
            gray = image

        # Detect ArUco markers
        marker_corners, marker_ids, _ = self.detector.detectMarkers(gray)

        if marker_ids is None or len(marker_ids) == 0:
            return None, None, None, None

        # Interpolate ChArUco corners
        num_corners, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
            markerCorners=marker_corners,
            markerIds=marker_ids,
            image=gray,
            board=self.board,
        )

        if num_corners == 0:
            # Return marker info even if ChArUco detection failed
            return None, None, marker_corners, marker_ids

        return charuco_corners, charuco_ids, marker_corners, marker_ids

    def estimate_pose(
        self,
        corners: np.ndarray,
        ids: np.ndarray,
        camera_matrix: np.ndarray,
        dist_coeffs: np.ndarray,
    ) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
        """Estimate board pose from detected corners

        Args:
            corners: Detected corner positions
            ids: Detected corner IDs
            camera_matrix: Camera intrinsic matrix
            dist_coeffs: Camera distortion coefficients

        Returns:
            Tuple of (rvec, tvec) or (None, None) if estimation failed
            - rvec: Rotation vector (3x1)
            - tvec: Translation vector (3x1)
        """
        success, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(
            charucoCorners=corners,
            charucoIds=ids,
            board=self.board,
            cameraMatrix=camera_matrix,
            distCoeffs=dist_coeffs,
            rvec=None,
            tvec=None,
        )

        if not success:
            return None, None

        return rvec, tvec

detect(image)

Detect ChArUco board in image

Parameters:

Name Type Description Default
image ndarray

Input image (grayscale or color)

required

Returns:

Type Description
Optional[ndarray]

Tuple of (corners, ids) or (None, None) if detection failed

Optional[ndarray]
  • corners: Nx2 array of corner positions
Tuple[Optional[ndarray], Optional[ndarray]]
  • ids: Nx1 array of corner IDs
Source code in raiden/calibration/core.py
def detect(
    self, image: np.ndarray
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
    """Detect ChArUco board in image

    Args:
        image: Input image (grayscale or color)

    Returns:
        Tuple of (corners, ids) or (None, None) if detection failed
        - corners: Nx2 array of corner positions
        - ids: Nx1 array of corner IDs
    """
    # Convert to grayscale if needed
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image

    # Detect ArUco markers
    marker_corners, marker_ids, _ = self.detector.detectMarkers(gray)

    if marker_ids is None or len(marker_ids) == 0:
        return None, None

    # Interpolate ChArUco corners
    num_corners, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
        markerCorners=marker_corners,
        markerIds=marker_ids,
        image=gray,
        board=self.board,
    )

    if num_corners == 0:
        return None, None

    return charuco_corners, charuco_ids

detect_with_markers(image)

Detect ChArUco board and return ArUco marker info for debugging

Parameters:

Name Type Description Default
image ndarray

Input image (grayscale or color)

required

Returns:

Type Description
Optional[ndarray]

Tuple of (charuco_corners, charuco_ids, marker_corners, marker_ids)

Optional[ndarray]
  • charuco_corners: Nx2 array of ChArUco corner positions (or None)
Optional[list]
  • charuco_ids: Nx1 array of ChArUco corner IDs (or None)
Optional[ndarray]
  • marker_corners: List of detected ArUco marker corners (or None)
Tuple[Optional[ndarray], Optional[ndarray], Optional[list], Optional[ndarray]]
  • marker_ids: Array of detected ArUco marker IDs (or None)
Source code in raiden/calibration/core.py
def detect_with_markers(
    self, image: np.ndarray
) -> Tuple[
    Optional[np.ndarray], Optional[np.ndarray], Optional[list], Optional[np.ndarray]
]:
    """Detect ChArUco board and return ArUco marker info for debugging

    Args:
        image: Input image (grayscale or color)

    Returns:
        Tuple of (charuco_corners, charuco_ids, marker_corners, marker_ids)
        - charuco_corners: Nx2 array of ChArUco corner positions (or None)
        - charuco_ids: Nx1 array of ChArUco corner IDs (or None)
        - marker_corners: List of detected ArUco marker corners (or None)
        - marker_ids: Array of detected ArUco marker IDs (or None)
    """
    # Convert to grayscale if needed
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    else:
        gray = image

    # Detect ArUco markers
    marker_corners, marker_ids, _ = self.detector.detectMarkers(gray)

    if marker_ids is None or len(marker_ids) == 0:
        return None, None, None, None

    # Interpolate ChArUco corners
    num_corners, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco(
        markerCorners=marker_corners,
        markerIds=marker_ids,
        image=gray,
        board=self.board,
    )

    if num_corners == 0:
        # Return marker info even if ChArUco detection failed
        return None, None, marker_corners, marker_ids

    return charuco_corners, charuco_ids, marker_corners, marker_ids

estimate_pose(corners, ids, camera_matrix, dist_coeffs)

Estimate board pose from detected corners

Parameters:

Name Type Description Default
corners ndarray

Detected corner positions

required
ids ndarray

Detected corner IDs

required
camera_matrix ndarray

Camera intrinsic matrix

required
dist_coeffs ndarray

Camera distortion coefficients

required

Returns:

Type Description
Optional[ndarray]

Tuple of (rvec, tvec) or (None, None) if estimation failed

Optional[ndarray]
  • rvec: Rotation vector (3x1)
Tuple[Optional[ndarray], Optional[ndarray]]
  • tvec: Translation vector (3x1)
Source code in raiden/calibration/core.py
def estimate_pose(
    self,
    corners: np.ndarray,
    ids: np.ndarray,
    camera_matrix: np.ndarray,
    dist_coeffs: np.ndarray,
) -> Tuple[Optional[np.ndarray], Optional[np.ndarray]]:
    """Estimate board pose from detected corners

    Args:
        corners: Detected corner positions
        ids: Detected corner IDs
        camera_matrix: Camera intrinsic matrix
        dist_coeffs: Camera distortion coefficients

    Returns:
        Tuple of (rvec, tvec) or (None, None) if estimation failed
        - rvec: Rotation vector (3x1)
        - tvec: Translation vector (3x1)
    """
    success, rvec, tvec = cv2.aruco.estimatePoseCharucoBoard(
        charucoCorners=corners,
        charucoIds=ids,
        board=self.board,
        cameraMatrix=camera_matrix,
        distCoeffs=dist_coeffs,
        rvec=None,
        tvec=None,
    )

    if not success:
        return None, None

    return rvec, tvec

load_calibration_poses(poses_file)

Load calibration poses from JSON file

Parameters:

Name Type Description Default
poses_file str

Path to calibration poses JSON file

required

Returns:

Type Description
Dict

Dictionary with poses data

Source code in raiden/calibration/core.py
def load_calibration_poses(poses_file: str) -> Dict:
    """Load calibration poses from JSON file

    Args:
        poses_file: Path to calibration poses JSON file

    Returns:
        Dictionary with poses data
    """
    with open(poses_file, "r") as f:
        return json.load(f)

save_calibration_results(results, output_file)

Save calibration results to JSON file

Parameters:

Name Type Description Default
results Dict

Calibration results dictionary

required
output_file str

Path to output JSON file

required
Source code in raiden/calibration/core.py
def save_calibration_results(results: Dict, output_file: str):
    """Save calibration results to JSON file

    Args:
        results: Calibration results dictionary
        output_file: Path to output JSON file
    """
    output_path = Path(output_file)
    output_path.parent.mkdir(parents=True, exist_ok=True)

    with open(output_file, "w") as f:
        json.dump(results, f, indent=2)

Interactive recording of robot poses for calibration

CalibrationPose dataclass

A single calibration pose with robot joint positions

Source code in raiden/calibration/recorder.py
@dataclass
class CalibrationPose:
    """A single calibration pose with robot joint positions"""

    id: int
    name: str
    follower_r: Optional[List[float]] = None
    follower_l: Optional[List[float]] = None
    timestamp: str = ""
    notes: str = ""

    def to_dict(self):
        result = {
            "id": self.id,
            "name": self.name,
            "timestamp": self.timestamp,
            "notes": self.notes,
        }
        if self.follower_r is not None:
            result["follower_r"] = self.follower_r
        if self.follower_l is not None:
            result["follower_l"] = self.follower_l
        return result

CalibrationPoseRecorder

Records robot poses for camera calibration

Source code in raiden/calibration/recorder.py
class CalibrationPoseRecorder:
    """Records robot poses for camera calibration"""

    def __init__(
        self,
        output_file: str = CALIBRATION_POSES_FILE,
        camera_config_file: str = CAMERA_CONFIG,
        charuco_config: Optional[ChArUcoBoardConfig] = None,
    ):
        self.output_file = Path(output_file)
        self.output_file.parent.mkdir(parents=True, exist_ok=True)

        self.camera_config_file = camera_config_file
        self.charuco_config = charuco_config or ChArUcoBoardConfig()
        self.poses: List[CalibrationPose] = []

        self.robot_controller: Optional[RobotController] = None
        self.calibration_targets: List[str] = []

    def initialize_robots(self):
        """Initialize robots based on which wrist cameras are in camera.json."""
        cfg = CameraConfig(self.camera_config_file)

        has_right = cfg.get_camera_by_role("right_wrist") is not None
        has_left = cfg.get_camera_by_role("left_wrist") is not None

        # Collect wrist camera names for calibration targets
        if has_right:
            cam = cfg.get_camera_by_role("right_wrist")
            self.calibration_targets.append(cam)
        if has_left:
            cam = cfg.get_camera_by_role("left_wrist")
            self.calibration_targets.append(cam)

        # Always include all scene cameras
        for cam in cfg.get_cameras_by_role("scene"):
            if cam not in self.calibration_targets:
                self.calibration_targets.append(cam)

        self.robot_controller = RobotController(
            use_right_leader=has_right,
            use_left_leader=has_left,
            use_right_follower=has_right,
            use_left_follower=has_left,
        )

        # Complete setup: check CAN -> init -> home -> grav comp -> start teleop
        self.robot_controller.setup_for_teleop_recording()

    def record_current_pose(self, notes: str = "") -> CalibrationPose:
        """Record the current robot pose"""
        pose_id = len(self.poses)
        timestamp = datetime.now().isoformat()

        pose = CalibrationPose(
            id=pose_id,
            name=f"pose_{pose_id}",
            timestamp=timestamp,
            notes=notes,
        )

        # Get joint positions from robot controller
        joint_positions = self.robot_controller.get_joint_positions()

        if "follower_r" in joint_positions:
            pose.follower_r = joint_positions["follower_r"].tolist()
        if "follower_l" in joint_positions:
            pose.follower_l = joint_positions["follower_l"].tolist()

        self.poses.append(pose)
        return pose

    def delete_last_pose(self) -> bool:
        """Delete the most recently recorded pose"""
        if self.poses:
            deleted = self.poses.pop()
            print(f"✓ Deleted pose {deleted.id}: {deleted.name}")
            return True
        else:
            print("No poses to delete")
            return False

    def list_poses(self):
        """Print all recorded poses"""
        if not self.poses:
            print("No poses recorded yet")
            return

        print(f"\nRecorded {len(self.poses)} pose(s):")
        for pose in self.poses:
            print(f"  {pose.id}: {pose.name} - {pose.timestamp}")
            if pose.notes:
                print(f"     Notes: {pose.notes}")

    def check_button_press(self) -> bool:
        """Check if any leader button was pressed

        Returns:
            True if a button was pressed (rising edge), False otherwise
        """
        pressed_leader = self.robot_controller.check_button_press()
        return pressed_leader is not None

    def save_poses(self):
        """Save recorded poses to JSON file"""
        data = {
            "version": "1.0",
            "charuco_config": self.charuco_config.to_dict(),
            "poses": [pose.to_dict() for pose in self.poses],
            "calibration_targets": self.calibration_targets,
            "board_mount": "fixed",
            "created": datetime.now().isoformat(),
            "last_modified": datetime.now().isoformat(),
        }

        with open(self.output_file, "w") as f:
            json.dump(data, f, indent=2)

        print(f"\n✓ Saved {len(self.poses)} poses to {self.output_file}")

    def run_interactive_recording(self, min_poses: int = 5):
        """Run interactive pose recording session"""
        import select
        import signal
        import termios
        import tty

        print("\n" + "=" * 70)
        print("  Raiden - Camera Calibration Pose Recording")
        print("=" * 70)

        # Initialize robots
        self.initialize_robots()

        # Setup signal handlers for emergency stop (SIGTERM)
        # Note: SIGINT (Ctrl+C) is handled via KeyboardInterrupt for graceful save
        def signal_handler(signum, frame):
            self.robot_controller.emergency_stop()

        signal.signal(signal.SIGTERM, signal_handler)

        print("\nTarget Cameras:")
        for target in self.calibration_targets:
            print(f"  - {target}")

        print("\nChArUco Board Configuration:")
        print(
            f"  - Grid size: {self.charuco_config.squares_x}x{self.charuco_config.squares_y} squares"
        )
        print(f"  - Checker size: {self.charuco_config.square_length * 1000:.1f} mm")
        print(f"  - Marker size: {self.charuco_config.marker_length * 1000:.1f} mm")
        print(f"  - Dictionary: {self.charuco_config.dictionary}")

        print("\nInstructions:")
        print("  1. Position the ChArUco board in a fixed location")
        print("  2. Move the LEADER arms - followers will automatically follow")
        print("  3. Position so the board is clearly visible from the camera(s)")
        print("  4. Press button on any leader arm OR type 'r' to record pose")
        print(
            f"  5. Record at least {min_poses} diverse poses (vary distance and angle)"
        )

        print("\nCommands:")
        print("  r - Record current pose")
        print("  d - Delete last pose")
        print("  l - List recorded poses")
        print("  q - Quit and save")
        print("  h - Show help")

        has_leaders = self.robot_controller.leader_r or self.robot_controller.leader_l
        if has_leaders:
            print("\nButton Input:")
            print("  - Press button on any leader arm to record current pose")

        print("\n" + "=" * 70)
        print("\nReady to record poses!")

        # Save terminal settings for raw mode
        old_settings = None
        if sys.stdin.isatty():
            old_settings = termios.tcgetattr(sys.stdin)

        # Interactive loop
        try:
            while True:
                # Status line
                poses_count = len(self.poses)
                status = f"\nPoses recorded: {poses_count}"
                if poses_count < min_poses:
                    status += f" / {min_poses} (minimum)"
                else:
                    status += " ✓ (minimum reached)"
                print(status)

                print(
                    "\nWaiting for input (button press or command)...",
                    end="",
                    flush=True,
                )

                # Monitor button presses in a loop
                command = None
                while command is None:
                    # Check for button press
                    if has_leaders and self.check_button_press():
                        print("\n\n🔘 Button pressed! Recording pose...")
                        time.sleep(0.5)  # Debounce
                        command = "r"
                        break

                    # Check for keyboard input (non-blocking)
                    if sys.stdin.isatty() and old_settings:
                        if select.select([sys.stdin], [], [], 0.05)[0]:
                            # Switch to raw mode to read single character
                            tty.setraw(sys.stdin.fileno())
                            char = sys.stdin.read(1)
                            termios.tcsetattr(
                                sys.stdin, termios.TCSADRAIN, old_settings
                            )

                            if char == "\x03":  # Ctrl+C
                                raise KeyboardInterrupt
                            elif char == "\n" or char == "\r":
                                # Enter pressed, get full command
                                print("\r\nCommand [r/d/l/q/h]: ", end="", flush=True)
                                # Restore normal mode for input
                                command = input().strip().lower()
                                break
                            elif char in ["r", "d", "l", "q", "h"]:
                                # Direct command without Enter
                                command = char
                                print(f"\r\nCommand: {command}")
                                break
                    else:
                        # Fallback for non-TTY or if we can't set raw mode
                        time.sleep(0.1)

                if command == "r":
                    pose = self.record_current_pose()
                    print(f"✓ Recorded pose {pose.id}: {pose.name}")

                    if self.robot_controller.follower_r and pose.follower_r:
                        pos = pose.follower_r[:3]
                        print(
                            f"  Right follower position (first 3 joints): {[f'{p:.3f}' for p in pos]}"
                        )

                    if self.robot_controller.follower_l and pose.follower_l:
                        pos = pose.follower_l[:3]
                        print(
                            f"  Left follower position (first 3 joints): {[f'{p:.3f}' for p in pos]}"
                        )

                elif command == "d":
                    self.delete_last_pose()

                elif command == "l":
                    self.list_poses()

                elif command == "q":
                    if poses_count < min_poses:
                        print(
                            f"\nWarning: Only {poses_count} poses recorded (minimum: {min_poses})."
                        )
                        print("Continue anyway? [y/N]: ", end="", flush=True)
                        # Restore terminal for input
                        if old_settings:
                            termios.tcsetattr(
                                sys.stdin, termios.TCSADRAIN, old_settings
                            )
                        confirm = input().strip().lower()
                        if confirm != "y":
                            print("Continuing recording...")
                            continue

                    self.save_poses()
                    print("\nCalibration pose recording complete!")

                    # Shutdown: stop teleop -> restore control -> go home
                    self.robot_controller.shutdown()

                    break

                elif command == "h":
                    print("\nCommands:")
                    print("  r - Record current pose")
                    print("  d - Delete last pose")
                    print("  l - List recorded poses")
                    print("  q - Quit and save")
                    print("  h - Show help")
                    if has_leaders:
                        print("\nButton Input:")
                        print("  - Press button on any leader arm to record")

                else:
                    print(f"Unknown command: {command}")
                    print("Type 'h' for help")

        except KeyboardInterrupt:
            print("\n\nInterrupted by user")
            if old_settings:
                termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
            if self.poses:
                print("Save recorded poses before exiting? [Y/n]: ", end="", flush=True)
                save = input().strip().lower()
                if save != "n":
                    self.save_poses()

            # Shutdown: stop teleop -> restore control -> go home
            self.robot_controller.shutdown()
        except Exception as e:
            print(f"\nError: {e}")
            import traceback

            traceback.print_exc()

            # Perform emergency stop on error
            if self.robot_controller and self.robot_controller.has_robots():
                self.robot_controller.emergency_stop()
        finally:
            # Cleanup robots
            if self.robot_controller:
                self.robot_controller.cleanup()

check_button_press()

Check if any leader button was pressed

Returns:

Type Description
bool

True if a button was pressed (rising edge), False otherwise

Source code in raiden/calibration/recorder.py
def check_button_press(self) -> bool:
    """Check if any leader button was pressed

    Returns:
        True if a button was pressed (rising edge), False otherwise
    """
    pressed_leader = self.robot_controller.check_button_press()
    return pressed_leader is not None

delete_last_pose()

Delete the most recently recorded pose

Source code in raiden/calibration/recorder.py
def delete_last_pose(self) -> bool:
    """Delete the most recently recorded pose"""
    if self.poses:
        deleted = self.poses.pop()
        print(f"✓ Deleted pose {deleted.id}: {deleted.name}")
        return True
    else:
        print("No poses to delete")
        return False

initialize_robots()

Initialize robots based on which wrist cameras are in camera.json.

Source code in raiden/calibration/recorder.py
def initialize_robots(self):
    """Initialize robots based on which wrist cameras are in camera.json."""
    cfg = CameraConfig(self.camera_config_file)

    has_right = cfg.get_camera_by_role("right_wrist") is not None
    has_left = cfg.get_camera_by_role("left_wrist") is not None

    # Collect wrist camera names for calibration targets
    if has_right:
        cam = cfg.get_camera_by_role("right_wrist")
        self.calibration_targets.append(cam)
    if has_left:
        cam = cfg.get_camera_by_role("left_wrist")
        self.calibration_targets.append(cam)

    # Always include all scene cameras
    for cam in cfg.get_cameras_by_role("scene"):
        if cam not in self.calibration_targets:
            self.calibration_targets.append(cam)

    self.robot_controller = RobotController(
        use_right_leader=has_right,
        use_left_leader=has_left,
        use_right_follower=has_right,
        use_left_follower=has_left,
    )

    # Complete setup: check CAN -> init -> home -> grav comp -> start teleop
    self.robot_controller.setup_for_teleop_recording()

list_poses()

Print all recorded poses

Source code in raiden/calibration/recorder.py
def list_poses(self):
    """Print all recorded poses"""
    if not self.poses:
        print("No poses recorded yet")
        return

    print(f"\nRecorded {len(self.poses)} pose(s):")
    for pose in self.poses:
        print(f"  {pose.id}: {pose.name} - {pose.timestamp}")
        if pose.notes:
            print(f"     Notes: {pose.notes}")

record_current_pose(notes='')

Record the current robot pose

Source code in raiden/calibration/recorder.py
def record_current_pose(self, notes: str = "") -> CalibrationPose:
    """Record the current robot pose"""
    pose_id = len(self.poses)
    timestamp = datetime.now().isoformat()

    pose = CalibrationPose(
        id=pose_id,
        name=f"pose_{pose_id}",
        timestamp=timestamp,
        notes=notes,
    )

    # Get joint positions from robot controller
    joint_positions = self.robot_controller.get_joint_positions()

    if "follower_r" in joint_positions:
        pose.follower_r = joint_positions["follower_r"].tolist()
    if "follower_l" in joint_positions:
        pose.follower_l = joint_positions["follower_l"].tolist()

    self.poses.append(pose)
    return pose

run_interactive_recording(min_poses=5)

Run interactive pose recording session

Source code in raiden/calibration/recorder.py
def run_interactive_recording(self, min_poses: int = 5):
    """Run interactive pose recording session"""
    import select
    import signal
    import termios
    import tty

    print("\n" + "=" * 70)
    print("  Raiden - Camera Calibration Pose Recording")
    print("=" * 70)

    # Initialize robots
    self.initialize_robots()

    # Setup signal handlers for emergency stop (SIGTERM)
    # Note: SIGINT (Ctrl+C) is handled via KeyboardInterrupt for graceful save
    def signal_handler(signum, frame):
        self.robot_controller.emergency_stop()

    signal.signal(signal.SIGTERM, signal_handler)

    print("\nTarget Cameras:")
    for target in self.calibration_targets:
        print(f"  - {target}")

    print("\nChArUco Board Configuration:")
    print(
        f"  - Grid size: {self.charuco_config.squares_x}x{self.charuco_config.squares_y} squares"
    )
    print(f"  - Checker size: {self.charuco_config.square_length * 1000:.1f} mm")
    print(f"  - Marker size: {self.charuco_config.marker_length * 1000:.1f} mm")
    print(f"  - Dictionary: {self.charuco_config.dictionary}")

    print("\nInstructions:")
    print("  1. Position the ChArUco board in a fixed location")
    print("  2. Move the LEADER arms - followers will automatically follow")
    print("  3. Position so the board is clearly visible from the camera(s)")
    print("  4. Press button on any leader arm OR type 'r' to record pose")
    print(
        f"  5. Record at least {min_poses} diverse poses (vary distance and angle)"
    )

    print("\nCommands:")
    print("  r - Record current pose")
    print("  d - Delete last pose")
    print("  l - List recorded poses")
    print("  q - Quit and save")
    print("  h - Show help")

    has_leaders = self.robot_controller.leader_r or self.robot_controller.leader_l
    if has_leaders:
        print("\nButton Input:")
        print("  - Press button on any leader arm to record current pose")

    print("\n" + "=" * 70)
    print("\nReady to record poses!")

    # Save terminal settings for raw mode
    old_settings = None
    if sys.stdin.isatty():
        old_settings = termios.tcgetattr(sys.stdin)

    # Interactive loop
    try:
        while True:
            # Status line
            poses_count = len(self.poses)
            status = f"\nPoses recorded: {poses_count}"
            if poses_count < min_poses:
                status += f" / {min_poses} (minimum)"
            else:
                status += " ✓ (minimum reached)"
            print(status)

            print(
                "\nWaiting for input (button press or command)...",
                end="",
                flush=True,
            )

            # Monitor button presses in a loop
            command = None
            while command is None:
                # Check for button press
                if has_leaders and self.check_button_press():
                    print("\n\n🔘 Button pressed! Recording pose...")
                    time.sleep(0.5)  # Debounce
                    command = "r"
                    break

                # Check for keyboard input (non-blocking)
                if sys.stdin.isatty() and old_settings:
                    if select.select([sys.stdin], [], [], 0.05)[0]:
                        # Switch to raw mode to read single character
                        tty.setraw(sys.stdin.fileno())
                        char = sys.stdin.read(1)
                        termios.tcsetattr(
                            sys.stdin, termios.TCSADRAIN, old_settings
                        )

                        if char == "\x03":  # Ctrl+C
                            raise KeyboardInterrupt
                        elif char == "\n" or char == "\r":
                            # Enter pressed, get full command
                            print("\r\nCommand [r/d/l/q/h]: ", end="", flush=True)
                            # Restore normal mode for input
                            command = input().strip().lower()
                            break
                        elif char in ["r", "d", "l", "q", "h"]:
                            # Direct command without Enter
                            command = char
                            print(f"\r\nCommand: {command}")
                            break
                else:
                    # Fallback for non-TTY or if we can't set raw mode
                    time.sleep(0.1)

            if command == "r":
                pose = self.record_current_pose()
                print(f"✓ Recorded pose {pose.id}: {pose.name}")

                if self.robot_controller.follower_r and pose.follower_r:
                    pos = pose.follower_r[:3]
                    print(
                        f"  Right follower position (first 3 joints): {[f'{p:.3f}' for p in pos]}"
                    )

                if self.robot_controller.follower_l and pose.follower_l:
                    pos = pose.follower_l[:3]
                    print(
                        f"  Left follower position (first 3 joints): {[f'{p:.3f}' for p in pos]}"
                    )

            elif command == "d":
                self.delete_last_pose()

            elif command == "l":
                self.list_poses()

            elif command == "q":
                if poses_count < min_poses:
                    print(
                        f"\nWarning: Only {poses_count} poses recorded (minimum: {min_poses})."
                    )
                    print("Continue anyway? [y/N]: ", end="", flush=True)
                    # Restore terminal for input
                    if old_settings:
                        termios.tcsetattr(
                            sys.stdin, termios.TCSADRAIN, old_settings
                        )
                    confirm = input().strip().lower()
                    if confirm != "y":
                        print("Continuing recording...")
                        continue

                self.save_poses()
                print("\nCalibration pose recording complete!")

                # Shutdown: stop teleop -> restore control -> go home
                self.robot_controller.shutdown()

                break

            elif command == "h":
                print("\nCommands:")
                print("  r - Record current pose")
                print("  d - Delete last pose")
                print("  l - List recorded poses")
                print("  q - Quit and save")
                print("  h - Show help")
                if has_leaders:
                    print("\nButton Input:")
                    print("  - Press button on any leader arm to record")

            else:
                print(f"Unknown command: {command}")
                print("Type 'h' for help")

    except KeyboardInterrupt:
        print("\n\nInterrupted by user")
        if old_settings:
            termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
        if self.poses:
            print("Save recorded poses before exiting? [Y/n]: ", end="", flush=True)
            save = input().strip().lower()
            if save != "n":
                self.save_poses()

        # Shutdown: stop teleop -> restore control -> go home
        self.robot_controller.shutdown()
    except Exception as e:
        print(f"\nError: {e}")
        import traceback

        traceback.print_exc()

        # Perform emergency stop on error
        if self.robot_controller and self.robot_controller.has_robots():
            self.robot_controller.emergency_stop()
    finally:
        # Cleanup robots
        if self.robot_controller:
            self.robot_controller.cleanup()

save_poses()

Save recorded poses to JSON file

Source code in raiden/calibration/recorder.py
def save_poses(self):
    """Save recorded poses to JSON file"""
    data = {
        "version": "1.0",
        "charuco_config": self.charuco_config.to_dict(),
        "poses": [pose.to_dict() for pose in self.poses],
        "calibration_targets": self.calibration_targets,
        "board_mount": "fixed",
        "created": datetime.now().isoformat(),
        "last_modified": datetime.now().isoformat(),
    }

    with open(self.output_file, "w") as f:
        json.dump(data, f, indent=2)

    print(f"\n✓ Saved {len(self.poses)} poses to {self.output_file}")

ChArUcoBoardConfig dataclass

Configuration for ChArUco calibration board

Source code in raiden/calibration/recorder.py
@dataclass
class ChArUcoBoardConfig:
    """Configuration for ChArUco calibration board"""

    squares_x: int = 9
    squares_y: int = 9
    square_length: float = 0.03  # meters (checker size: 30mm)
    marker_length: float = 0.023  # meters (marker size: 23mm)
    dictionary: str = "DICT_6X6_250"  # ArUco dictionary

    def to_dict(self):
        return asdict(self)

run_calibration_pose_recording(min_poses=5, output_file=CALIBRATION_POSES_FILE, camera_config_file=CAMERA_CONFIG, charuco_config=None)

Main entry point for calibration pose recording

Source code in raiden/calibration/recorder.py
def run_calibration_pose_recording(
    min_poses: int = 5,
    output_file: str = CALIBRATION_POSES_FILE,
    camera_config_file: str = CAMERA_CONFIG,
    charuco_config: Optional[ChArUcoBoardConfig] = None,
):
    """Main entry point for calibration pose recording"""
    recorder = CalibrationPoseRecorder(
        output_file=output_file,
        camera_config_file=camera_config_file,
        charuco_config=charuco_config,
    )
    recorder.run_interactive_recording(min_poses=min_poses)

Orchestrates the full camera calibration workflow

CalibrationRunner

Runs the complete calibration workflow

Source code in raiden/calibration/runner.py
 210
 211
 212
 213
 214
 215
 216
 217
 218
 219
 220
 221
 222
 223
 224
 225
 226
 227
 228
 229
 230
 231
 232
 233
 234
 235
 236
 237
 238
 239
 240
 241
 242
 243
 244
 245
 246
 247
 248
 249
 250
 251
 252
 253
 254
 255
 256
 257
 258
 259
 260
 261
 262
 263
 264
 265
 266
 267
 268
 269
 270
 271
 272
 273
 274
 275
 276
 277
 278
 279
 280
 281
 282
 283
 284
 285
 286
 287
 288
 289
 290
 291
 292
 293
 294
 295
 296
 297
 298
 299
 300
 301
 302
 303
 304
 305
 306
 307
 308
 309
 310
 311
 312
 313
 314
 315
 316
 317
 318
 319
 320
 321
 322
 323
 324
 325
 326
 327
 328
 329
 330
 331
 332
 333
 334
 335
 336
 337
 338
 339
 340
 341
 342
 343
 344
 345
 346
 347
 348
 349
 350
 351
 352
 353
 354
 355
 356
 357
 358
 359
 360
 361
 362
 363
 364
 365
 366
 367
 368
 369
 370
 371
 372
 373
 374
 375
 376
 377
 378
 379
 380
 381
 382
 383
 384
 385
 386
 387
 388
 389
 390
 391
 392
 393
 394
 395
 396
 397
 398
 399
 400
 401
 402
 403
 404
 405
 406
 407
 408
 409
 410
 411
 412
 413
 414
 415
 416
 417
 418
 419
 420
 421
 422
 423
 424
 425
 426
 427
 428
 429
 430
 431
 432
 433
 434
 435
 436
 437
 438
 439
 440
 441
 442
 443
 444
 445
 446
 447
 448
 449
 450
 451
 452
 453
 454
 455
 456
 457
 458
 459
 460
 461
 462
 463
 464
 465
 466
 467
 468
 469
 470
 471
 472
 473
 474
 475
 476
 477
 478
 479
 480
 481
 482
 483
 484
 485
 486
 487
 488
 489
 490
 491
 492
 493
 494
 495
 496
 497
 498
 499
 500
 501
 502
 503
 504
 505
 506
 507
 508
 509
 510
 511
 512
 513
 514
 515
 516
 517
 518
 519
 520
 521
 522
 523
 524
 525
 526
 527
 528
 529
 530
 531
 532
 533
 534
 535
 536
 537
 538
 539
 540
 541
 542
 543
 544
 545
 546
 547
 548
 549
 550
 551
 552
 553
 554
 555
 556
 557
 558
 559
 560
 561
 562
 563
 564
 565
 566
 567
 568
 569
 570
 571
 572
 573
 574
 575
 576
 577
 578
 579
 580
 581
 582
 583
 584
 585
 586
 587
 588
 589
 590
 591
 592
 593
 594
 595
 596
 597
 598
 599
 600
 601
 602
 603
 604
 605
 606
 607
 608
 609
 610
 611
 612
 613
 614
 615
 616
 617
 618
 619
 620
 621
 622
 623
 624
 625
 626
 627
 628
 629
 630
 631
 632
 633
 634
 635
 636
 637
 638
 639
 640
 641
 642
 643
 644
 645
 646
 647
 648
 649
 650
 651
 652
 653
 654
 655
 656
 657
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
class CalibrationRunner:
    """Runs the complete calibration workflow"""

    def __init__(
        self,
        camera_config_file: str = CAMERA_CONFIG,
        poses_file: str = CALIBRATION_POSES_FILE,
        output_file: str = CALIBRATION_FILE,
        charuco_config: Optional[ChArUcoBoardConfig] = None,
    ):
        self.camera_config_file = camera_config_file
        self.poses_file = poses_file
        self.output_file = output_file
        self._charuco_config_override = charuco_config

        self.camera_config: Optional[CameraConfig] = None
        self.poses_data: Optional[Dict] = None
        self.board_config: Optional[ChArUcoBoardConfig] = None
        self.calibrator: Optional[CameraCalibrator] = None

        self.robot_controller: Optional[RobotController] = None
        self.cameras: Dict[str, Camera] = {}
        self._move_stop_event = threading.Event()

    def load_configuration(self):
        """Load camera config and calibration poses"""
        print("\nLoading configuration...")

        # Load camera config
        self.camera_config = CameraConfig(self.camera_config_file)
        num_cameras = len(self.camera_config.list_cameras())
        print(f"✓ Camera config loaded: {num_cameras} camera(s) configured")

        # Load poses
        self.poses_data = load_calibration_poses(self.poses_file)
        num_poses = len(self.poses_data["poses"])
        print(f"✓ Calibration poses loaded: {num_poses} pose(s)")

        # Load ChArUco config (CLI override takes precedence over poses file)
        if self._charuco_config_override is not None:
            self.board_config = self._charuco_config_override
        else:
            self.board_config = ChArUcoBoardConfig.from_dict(
                self.poses_data["charuco_config"]
            )
        print("✓ ChArUco board config loaded")

        # Create calibrator
        self.calibrator = CameraCalibrator(self.board_config)

    def initialize_robots(self, left_wrist: bool, right_wrist: bool):
        """Initialize robots based on which cameras to calibrate"""
        # Create robot controller (only followers, no leaders needed for calibration)
        self.robot_controller = RobotController(
            use_right_leader=False,
            use_left_leader=False,
            use_right_follower=right_wrist,
            use_left_follower=left_wrist,
        )

        # Check CAN interfaces
        print("\nChecking CAN interfaces...")
        self.robot_controller.check_can_interfaces()

        # Initialize robots
        print("\nInitializing robots...")
        self.robot_controller.initialize_robots(gravity_comp_mode=False)

        # Move to home positions
        self.robot_controller.move_to_home_positions(simultaneous=True)

    def initialize_cameras(self, camera_names: List[str], warmup_frames: int = 10):
        """Initialize cameras by name using the camera factory."""
        print("\nInitializing cameras...")

        for name in camera_names:
            cam_type = self.camera_config.get_camera_type(name)
            serial = self.camera_config.get_serial_by_name(name)
            if serial is None:
                raise ValueError(f"Camera '{name}' not found in config")

            print(f"  - Initializing {name} ({cam_type}, serial: {serial})...")
            camera = self.camera_config.create_camera(name)
            camera.open()
            self.cameras[name] = camera

        # Warm up all cameras together after all are open, so each one has had
        # time to stabilize regardless of how many cameras are connected.
        print(f"  Warming up cameras ({warmup_frames} frames)...")
        for _ in range(warmup_frames):
            for camera in self.cameras.values():
                camera.grab()

        print("✓ Cameras initialized")

    def _compute_and_apply_bimanual_transform(
        self, camera_data: Dict, left_camera_id: str, right_camera_id: str
    ) -> bool:
        """Compute bimanual transform from hand-eye calibration results

        Strategy:
        1. Compute left hand-eye calibration (in left arm base frame)
        2. Compute right hand-eye calibration (in right arm base frame)
        3. Use both hand-eye results to compute bimanual base transform
        4. Update right arm poses to left base frame

        Args:
            camera_data: Dictionary with calibration data for all cameras
            left_camera_id: Name of left wrist camera
            right_camera_id: Name of right wrist camera

        Returns:
            True if transform was successfully computed, False otherwise
        """
        global T_RIGHT_BASE_TO_LEFT_BASE

        print("\nComputing bimanual base transform from hand-eye calibrations...")

        left_data = camera_data.get(left_camera_id)
        right_data = camera_data.get(right_camera_id)

        # Validate we have data from both cameras
        if not left_data or not right_data:
            print("  ✗ Missing camera data")
            return False

        if not left_data["corners"] or not right_data["corners"]:
            print("  ✗ No board detections found")
            return False

        # Step 1: Get camera intrinsics from SDK (factory calibrated)
        print("  Step 1: Getting camera intrinsics from SDK...")
        left_camera = self.cameras.get(left_camera_id)
        right_camera = self.cameras.get(right_camera_id)

        if not left_camera or not right_camera:
            print("  ✗ Camera objects not found")
            return False

        left_cam_matrix, left_dist_coeffs, left_img_size = left_camera.get_intrinsics()
        right_cam_matrix, right_dist_coeffs, right_img_size = (
            right_camera.get_intrinsics()
        )

        print(
            f"    ✓ Left camera: fx={left_cam_matrix[0, 0]:.1f}, fy={left_cam_matrix[1, 1]:.1f}"
        )
        print(
            f"    ✓ Right camera: fx={right_cam_matrix[0, 0]:.1f}, fy={right_cam_matrix[1, 1]:.1f}"
        )

        # Step 2: Estimate board poses from both cameras
        left_board_poses = self._estimate_board_poses(
            left_data,
            left_cam_matrix.tolist(),
            left_dist_coeffs.tolist(),
        )
        right_board_poses = self._estimate_board_poses(
            right_data,
            right_cam_matrix.tolist(),
            right_dist_coeffs.tolist(),
        )

        # Match valid poses
        valid_poses = self._match_valid_poses(
            left_data["robot_poses"],
            right_data["robot_poses"],
            left_board_poses,
            right_board_poses,
        )

        if not valid_poses:
            print("  ✗ No valid pose pairs found")
            return False

        print(
            f"  Step 2: Computing hand-eye calibrations ({len(valid_poses['left_robot'])} pose pairs)..."
        )

        # Step 3: Compute left hand-eye calibration (in left arm base frame)
        print("    Computing left wrist hand-eye calibration...")
        left_hand_eye_result = self.calibrator.calibrate_hand_eye(
            valid_poses["left_robot"], valid_poses["left_board"]
        )

        if not left_hand_eye_result["success"]:
            print(
                f"      ✗ Left hand-eye calibration failed: {left_hand_eye_result.get('error', 'Unknown error')}"
            )
            return False

        print(
            f"      ✓ Left hand-eye calibration complete (method: {left_hand_eye_result['method']})"
        )

        # Step 4: Compute right hand-eye calibration (in right arm base frame)
        print("    Computing right wrist hand-eye calibration...")
        right_hand_eye_result = self.calibrator.calibrate_hand_eye(
            valid_poses["right_robot"], valid_poses["right_board"]
        )

        if not right_hand_eye_result["success"]:
            print(
                f"      ✗ Right hand-eye calibration failed: {right_hand_eye_result.get('error', 'Unknown error')}"
            )
            return False

        print(
            f"      ✓ Right hand-eye calibration complete (method: {right_hand_eye_result['method']})"
        )

        # Step 5: Compute bimanual base transform using both hand-eye results
        print("  Step 3: Computing bimanual base transform from hand-eye results...")

        T_RIGHT_BASE_TO_LEFT_BASE = compute_bimanual_base_transform_from_calibration(
            valid_poses["left_robot"],
            valid_poses["right_robot"],
            valid_poses["left_board"],
            valid_poses["right_board"],
            left_hand_eye_result,
            right_hand_eye_result,
        )

        print(
            f"    ✓ Bimanual transform computed from {len(valid_poses['left_robot'])} pose pair(s)"
        )

        # Step 6: Transform right arm poses to left base frame
        T_left_base_to_right_base = np.linalg.inv(T_RIGHT_BASE_TO_LEFT_BASE)
        right_data["robot_poses"] = [
            T_left_base_to_right_base @ pose if pose is not None else None
            for pose in right_data["robot_poses"]
        ]
        print("  ✓ Right arm poses transformed to left base frame")

        return True

    def _estimate_board_poses(
        self, camera_data: Dict, camera_matrix: list, distortion_coeffs: list
    ) -> List:
        """Estimate board poses from corner detections"""
        camera_matrix_np = np.array(camera_matrix)
        dist_coeffs_np = np.array(distortion_coeffs)
        board_poses = []

        for corners, ids in zip(camera_data["corners"], camera_data["ids"]):
            rvec, tvec = self.calibrator.detector.estimate_pose(
                corners, ids, camera_matrix_np, dist_coeffs_np
            )
            if rvec is not None:
                board_poses.append((rvec, tvec))

        return board_poses

    def _match_valid_poses(
        self,
        left_robot_poses: List,
        right_robot_poses: List,
        left_board_poses: List,
        right_board_poses: List,
    ) -> Optional[Dict]:
        """Match valid pose pairs from both arms"""
        valid_left_robot = []
        valid_right_robot = []
        valid_left_board = []
        valid_right_board = []

        min_len = min(
            len(left_robot_poses),
            len(right_robot_poses),
            len(left_board_poses),
            len(right_board_poses),
        )

        for i in range(min_len):
            if left_robot_poses[i] is not None and right_robot_poses[i] is not None:
                valid_left_robot.append(left_robot_poses[i])
                valid_right_robot.append(right_robot_poses[i])
                valid_left_board.append(left_board_poses[i])
                valid_right_board.append(right_board_poses[i])

        if not valid_left_robot:
            return None

        return {
            "left_robot": valid_left_robot,
            "right_robot": valid_right_robot,
            "left_board": valid_left_board,
            "right_board": valid_right_board,
        }

    def collect_calibration_data(
        self,
    ) -> Dict[str, Dict[str, List]]:
        """Move through poses and collect calibration data.

        Returns:
            Dictionary mapping camera names to their calibration data.
            Each value is a dict with keys ``images``, ``corners``, ``ids``,
            and ``robot_poses`` (for hand-eye calibration).
        """
        print("\nStarting calibration sequence...")
        print("=" * 70)
        print(
            "Coordinate frame: All transforms will be expressed relative to left arm base"
        )
        print()

        # Initialize data storage
        camera_data = {
            name: {
                "images": [],
                "corners": [],
                "ids": [],
                "robot_poses": [],
                "joint_positions": [],  # Store joint positions for bimanual transform computation
                "arm_type": "left"
                if "left" in name
                else ("right" if "right" in name else None),
            }
            for name in self.cameras.keys()
        }

        poses = self.poses_data["poses"]

        for i, pose in enumerate(poses):
            print(f"\nPose {i + 1}/{len(poses)}: {pose['name']}")

            # Move robots to pose (only if we have robots)
            if self.robot_controller:
                threads = []
                if self.robot_controller.follower_r and "follower_r" in pose:
                    target_r = np.array(pose["follower_r"])
                    threads.append(
                        threading.Thread(
                            target=smooth_move_joints,
                            args=(self.robot_controller.follower_r, target_r),
                            kwargs={"stop_event": self._move_stop_event},
                            daemon=True,
                        )
                    )
                if self.robot_controller.follower_l and "follower_l" in pose:
                    target_l = np.array(pose["follower_l"])
                    threads.append(
                        threading.Thread(
                            target=smooth_move_joints,
                            args=(self.robot_controller.follower_l, target_l),
                            kwargs={"stop_event": self._move_stop_event},
                            daemon=True,
                        )
                    )

                if threads:
                    print(
                        f"  Moving {'both arms' if len(threads) > 1 else 'arm'} simultaneously..."
                    )
                    for t in threads:
                        t.start()
                    for t in threads:
                        t.join()

                # Wait for stabilization
                print("  Waiting for stabilization...")
                time.sleep(1.0)
            else:
                # No robots - just a short delay for scene camera
                time.sleep(0.5)

            # Capture images and detect board
            print("  Capturing images...")
            for camera_name, camera in self.cameras.items():
                try:
                    # Capture frame
                    if not camera.grab():
                        raise RuntimeError(f"grab() failed for {camera_name}")
                    image = camera.get_frame().color

                    # Ensure image is contiguous and has correct dtype
                    image = np.ascontiguousarray(image, dtype=np.uint8)

                    camera_data[camera_name]["images"].append(image)

                    # Detect ChArUco board
                    corners, ids = self.calibrator.detector.detect(image)

                    if corners is not None and len(corners) > 0:
                        camera_data[camera_name]["corners"].append(corners)
                        camera_data[camera_name]["ids"].append(ids)
                        print(
                            f"    ✓ {camera_name}: ChArUco board detected ({len(corners)} corners)"
                        )

                        # For wrist cameras, store robot pose for hand-eye calibration
                        if "wrist" in camera_name and self.robot_controller:
                            # Get corresponding robot and actual joint positions
                            robot = None
                            if (
                                "right" in camera_name
                                and self.robot_controller.follower_r
                            ):
                                robot = self.robot_controller.follower_r
                            elif (
                                "left" in camera_name
                                and self.robot_controller.follower_l
                            ):
                                robot = self.robot_controller.follower_l
                            else:
                                continue

                            # Read actual joint positions from the robot
                            try:
                                actual_joint_pos = robot.get_joint_pos()

                                # Store actual joint positions for later use
                                camera_data[camera_name]["joint_positions"].append(
                                    actual_joint_pos
                                )

                                # Compute forward kinematics from actual joint positions
                                # For initial data collection, compute FK in local frames
                                # (we'll recompute after getting bimanual transform)
                                global _kinematics_cache
                                if _kinematics_cache is None:
                                    _kinematics_cache = Kinematics(
                                        get_yam_4310_linear_xml_path(),
                                        site_name="grasp_site",
                                    )

                                robot_pose = _kinematics_cache.fk(actual_joint_pos[:6])
                                camera_data[camera_name]["robot_poses"].append(
                                    robot_pose
                                )
                            except Exception as e:
                                print(
                                    f"    Warning: Failed to get actual robot pose: {e}"
                                )
                                # Store None as placeholder
                                camera_data[camera_name]["robot_poses"].append(None)
                    else:
                        print(f"    ✗ {camera_name}: Board not detected")

                except Exception as e:
                    print(f"    ✗ {camera_name}: Error - {e}")

        print("\n" + "=" * 70)
        print("Data collection complete!")
        return camera_data

    def run_calibration(
        self,
        left_wrist_camera_id: Optional[str] = None,
        right_wrist_camera_id: Optional[str] = None,
        scene_camera_ids: Optional[List[str]] = None,
    ) -> Dict:
        """Run the full calibration workflow

        Args:
            left_wrist_camera_id: Name of left wrist camera (e.g., "left_wrist")
            right_wrist_camera_id: Name of right wrist camera (e.g., "right_wrist")
            scene_camera_ids: Names of all scene cameras (multiple allowed)

        Returns:
            Calibration results dictionary
        """
        try:
            # Load configuration
            self.load_configuration()

            # Determine which cameras to calibrate
            target_cameras = []
            calibrate_left = left_wrist_camera_id is not None
            calibrate_right = right_wrist_camera_id is not None
            calibrate_scene = bool(scene_camera_ids)

            if calibrate_right:
                target_cameras.append(right_wrist_camera_id)
            if calibrate_left:
                target_cameras.append(left_wrist_camera_id)
            if calibrate_scene:
                target_cameras.extend(scene_camera_ids)

            # Check bimanual transform requirements
            global T_RIGHT_BASE_TO_LEFT_BASE
            if (
                calibrate_right
                and not calibrate_left
                and T_RIGHT_BASE_TO_LEFT_BASE is None
            ):
                raise ValueError(
                    "Cannot calibrate right wrist camera without bimanual transform.\n"
                    "Please calibrate both left and right wrist cameras together to compute the transform."
                )

            print("\nCalibration targets:")
            for camera in target_cameras:
                camera_type = "hand-eye" if "wrist" in camera else "scene"
                print(f"  - {camera} ({camera_type})")

            # Initialize hardware
            # Initialize cameras first (quick check before robot initialization)
            self.initialize_cameras(target_cameras)

            # Only initialize robots if we're calibrating wrist cameras
            if calibrate_left or calibrate_right:
                self.initialize_robots(calibrate_left, calibrate_right)
            else:
                print("\nNo wrist cameras selected - skipping robot initialization")

            # Collect data
            camera_data = self.collect_calibration_data()

            # Compute bimanual transform if both arms are calibrated
            bimanual_transform_computed = False
            if calibrate_left and calibrate_right and T_RIGHT_BASE_TO_LEFT_BASE is None:
                bimanual_transform_computed = (
                    self._compute_and_apply_bimanual_transform(
                        camera_data, left_wrist_camera_id, right_wrist_camera_id
                    )
                )

            # Run calibration for each camera
            print("\nComputing calibrations...")
            print("=" * 70)

            results = {
                "version": "1.0",
                "timestamp": datetime.now().isoformat(),
                "coordinate_frame": "left_arm_base",  # All transforms relative to left arm base
                "charuco_config": self.board_config.__dict__,
                "cameras": {},
                "quality_metrics": {},
            }

            # Add bimanual transform if computed
            if bimanual_transform_computed and T_RIGHT_BASE_TO_LEFT_BASE is not None:
                results["bimanual_transform"] = {
                    "right_base_to_left_base": T_RIGHT_BASE_TO_LEFT_BASE.tolist(),
                    "computed_from_calibration": True,
                    "description": "Transform from right arm base frame to left arm base frame",
                }
            elif T_RIGHT_BASE_TO_LEFT_BASE is not None:
                results["bimanual_transform"] = {
                    "right_base_to_left_base": T_RIGHT_BASE_TO_LEFT_BASE.tolist(),
                    "computed_from_calibration": False,
                    "description": "Transform from right arm base frame to left arm base frame (from config)",
                }

            # Process cameras in order: left wrist first (needed for scene camera conversion), then others
            # This ensures left wrist hand-eye calibration is available for scene camera conversion
            cameras_to_process = []
            if left_wrist_camera_id and left_wrist_camera_id in target_cameras:
                cameras_to_process.append(left_wrist_camera_id)
            for camera_name in target_cameras:
                if camera_name != left_wrist_camera_id:
                    cameras_to_process.append(camera_name)

            for camera_name in cameras_to_process:
                print(f"\n{camera_name}:")
                data = camera_data[camera_name]

                if len(data["corners"]) < 3:
                    print(
                        f"  ✗ Insufficient data ({len(data['corners'])} views, need >= 3)"
                    )
                    continue

                # Get factory-calibrated intrinsics from the camera SDK
                cam_type = self.camera_config.get_camera_type(camera_name) or "unknown"
                print(
                    f"  Getting intrinsics from {cam_type} SDK (factory calibration)..."
                )
                camera = self.cameras.get(camera_name)
                if not camera:
                    print("  ✗ Camera object not found")
                    continue

                cam_matrix, dist_coeffs, image_size = camera.get_intrinsics()
                print(
                    f"    ✓ fx={cam_matrix[0, 0]:.1f}, fy={cam_matrix[1, 1]:.1f}, cx={cam_matrix[0, 2]:.1f}, cy={cam_matrix[1, 2]:.1f}"
                )

                intrinsics_result = {
                    "success": True,
                    "camera_matrix": cam_matrix.tolist(),
                    "distortion_coeffs": dist_coeffs.tolist(),
                }

                # Store intrinsics
                camera_result = {
                    "type": "hand_eye" if "wrist" in camera_name else "scene",
                    "serial_number": self.camera_config.get_serial_by_name(camera_name),
                    "intrinsics": {
                        "camera_matrix": intrinsics_result["camera_matrix"],
                        "distortion_coeffs": intrinsics_result["distortion_coeffs"],
                        "image_size": list(image_size),
                        "source": f"{cam_type}_sdk_factory_calibration",
                    },
                    "num_poses_used": len(data["corners"]),
                }

                # Perform extrinsics calibration
                if "wrist" in camera_name:
                    # Hand-eye calibration
                    print("  Computing hand-eye calibration...")

                    # Check if we have robot poses
                    if not data["robot_poses"] or data["robot_poses"][0] is None:
                        print(
                            "  ✗ Forward kinematics not implemented - skipping hand-eye calibration"
                        )
                    else:
                        # Estimate camera poses (board-to-camera)
                        camera_matrix = np.array(intrinsics_result["camera_matrix"])
                        dist_coeffs = np.array(intrinsics_result["distortion_coeffs"])
                        camera_poses = []

                        for corners, ids in zip(data["corners"], data["ids"]):
                            rvec, tvec = self.calibrator.detector.estimate_pose(
                                corners, ids, camera_matrix, dist_coeffs
                            )
                            if rvec is not None:
                                camera_poses.append((rvec, tvec))

                        # Run hand-eye calibration
                        hand_eye_result = self.calibrator.calibrate_hand_eye(
                            data["robot_poses"], camera_poses
                        )

                        if hand_eye_result["success"]:
                            camera_result["hand_eye_calibration"] = hand_eye_result
                            print(
                                f"    ✓ Hand-eye calibration complete (method: {hand_eye_result['method']})"
                            )
                        else:
                            print(
                                f"  ✗ Hand-eye calibration failed: {hand_eye_result.get('error', 'Unknown error')}"
                            )

                else:
                    # Scene camera calibration
                    print("  Computing scene camera extrinsics...")

                    # Estimate camera poses (T_board_to_scene_cam)
                    camera_matrix = np.array(intrinsics_result["camera_matrix"])
                    dist_coeffs = np.array(intrinsics_result["distortion_coeffs"])
                    camera_poses = []

                    for corners, ids in zip(data["corners"], data["ids"]):
                        rvec, tvec = self.calibrator.detector.estimate_pose(
                            corners, ids, camera_matrix, dist_coeffs
                        )
                        if rvec is not None:
                            camera_poses.append((rvec, tvec))

                    # Calibrate scene camera relative to board frame first
                    scene_result = self.calibrator.calibrate_scene_camera(camera_poses)

                    if scene_result["success"]:
                        # Now convert to left arm base frame if we have left wrist camera data
                        # T_scene_cam_to_left_base = T_scene_cam_to_board @ T_board_to_left_base
                        # We can get T_board_to_left_base from the left wrist camera observations

                        if calibrate_left and left_wrist_camera_id:
                            left_data = camera_data.get(left_wrist_camera_id)
                            if left_data and "hand_eye_calibration" in results[
                                "cameras"
                            ].get(left_wrist_camera_id, {}):
                                print(
                                    "    Converting scene camera pose to left arm base frame..."
                                )

                                # Get left hand-eye calibration (T_left_cam_to_left_ee)
                                left_hand_eye = results["cameras"][
                                    left_wrist_camera_id
                                ]["hand_eye_calibration"]
                                T_left_cam_to_left_ee = np.eye(4)
                                T_left_cam_to_left_ee[:3, :3] = np.array(
                                    left_hand_eye["rotation_matrix"]
                                )
                                T_left_cam_to_left_ee[:3, 3] = np.array(
                                    left_hand_eye["translation_vector"]
                                )

                                # Get left intrinsics
                                left_camera = self.cameras.get(left_wrist_camera_id)
                                left_cam_matrix, left_dist_coeffs, _ = (
                                    left_camera.get_intrinsics()
                                )

                                # Estimate board poses from left camera
                                left_board_poses = []
                                for corners, ids in zip(
                                    left_data["corners"], left_data["ids"]
                                ):
                                    rvec, tvec = self.calibrator.detector.estimate_pose(
                                        corners, ids, left_cam_matrix, left_dist_coeffs
                                    )
                                    if rvec is not None:
                                        left_board_poses.append((rvec, tvec))

                                if left_board_poses and left_data["robot_poses"]:
                                    # Compute average T_board_to_left_base from left wrist observations
                                    print(
                                        f"      Computing T_board_to_left_base from {len(left_board_poses)} left wrist observations..."
                                    )
                                    board_to_left_base_transforms = []
                                    for i, (rvec, tvec) in enumerate(left_board_poses):
                                        if (
                                            i >= len(left_data["robot_poses"])
                                            or left_data["robot_poses"][i] is None
                                        ):
                                            continue

                                        # T_board_to_left_cam
                                        R, _ = cv2.Rodrigues(rvec)
                                        T_board_to_left_cam = np.eye(4)
                                        T_board_to_left_cam[:3, :3] = R
                                        T_board_to_left_cam[:3, 3] = tvec.flatten()

                                        # Correct chain: T^base_board = FK @ hand_eye @ ChArUco
                                        T_left_base_to_left_ee = left_data[
                                            "robot_poses"
                                        ][i]

                                        T_board_to_left_base = (
                                            T_left_base_to_left_ee
                                            @ T_left_cam_to_left_ee
                                            @ T_board_to_left_cam
                                        )

                                        board_to_left_base_transforms.append(
                                            T_board_to_left_base
                                        )

                                    if board_to_left_base_transforms:
                                        # Average the transforms
                                        from scipy.spatial.transform import Rotation

                                        rotations = [
                                            Rotation.from_matrix(T[:3, :3])
                                            for T in board_to_left_base_transforms
                                        ]
                                        translations = [
                                            T[:3, 3]
                                            for T in board_to_left_base_transforms
                                        ]

                                        avg_rotation = Rotation.concatenate(
                                            rotations
                                        ).mean()
                                        avg_translation = np.mean(translations, axis=0)

                                        T_board_to_left_base = np.eye(4)
                                        T_board_to_left_base[:3, :3] = (
                                            avg_rotation.as_matrix()
                                        )
                                        T_board_to_left_base[:3, 3] = avg_translation

                                        # Now compute T_scene_cam_to_left_base
                                        T_scene_cam_to_board = np.eye(4)
                                        T_scene_cam_to_board[:3, :3] = np.array(
                                            scene_result["rotation_matrix"]
                                        )
                                        T_scene_cam_to_board[:3, 3] = np.array(
                                            scene_result["translation_vector"]
                                        )

                                        # T_board_to_left_base is T^base_board (transforms board->base)
                                        # T_scene_cam_to_board is T^board_cam (transforms cam->board, from calibrate_scene_camera)
                                        # T^base_cam = T^base_board @ T^board_cam
                                        T_left_base_to_scene_cam = (
                                            T_board_to_left_base @ T_scene_cam_to_board
                                        )

                                        scene_cam_pos_in_left_base = (
                                            T_left_base_to_scene_cam[:3, 3]
                                        )
                                        print(
                                            f"      Scene camera position in left base: {scene_cam_pos_in_left_base}"
                                        )

                                        # Store T^base_cam: scene camera pose in left base frame
                                        # translation_vector = scene camera position in left base frame
                                        scene_result["rotation_matrix"] = (
                                            T_left_base_to_scene_cam[:3, :3].tolist()
                                        )
                                        scene_result["translation_vector"] = (
                                            T_left_base_to_scene_cam[:3, 3].tolist()
                                        )
                                        rvec_new, _ = cv2.Rodrigues(
                                            T_left_base_to_scene_cam[:3, :3]
                                        )
                                        scene_result["rotation_vector"] = (
                                            rvec_new.flatten().tolist()
                                        )
                                        scene_result["reference_frame"] = (
                                            "left_arm_base"
                                        )

                                        print(
                                            "      ✓ Scene camera pose converted to left arm base frame"
                                        )
                                    else:
                                        print(
                                            "      ! Warning: Could not convert to left arm base frame (no valid board observations)"
                                        )
                            else:
                                print(
                                    "      ! Warning: Cannot convert to left arm base frame (left wrist not calibrated)"
                                )
                        else:
                            print(
                                "      ! Warning: Scene camera extrinsics are in board frame (left wrist not calibrated)"
                            )

                        camera_result["extrinsics"] = scene_result
                        print("    ✓ Scene camera calibration complete")
                    else:
                        print("  ✗ Scene camera calibration failed")

                results["cameras"][camera_name] = camera_result

            # Compute quality metrics
            total_poses = len(self.poses_data["poses"])
            poses_with_detections = max(
                len(data["corners"]) for data in camera_data.values()
            )

            results["quality_metrics"] = {
                "overall_success": len(results["cameras"]) > 0,
                "poses_collected": total_poses,
                "poses_with_detections": poses_with_detections,
                "detection_rate": poses_with_detections / total_poses
                if total_poses > 0
                else 0,
            }

            print("\n" + "=" * 70)
            print("Calibration complete!")
            print("\nSummary:")
            print(f"  - Cameras calibrated: {len(results['cameras'])}")
            print(f"  - Poses used: {poses_with_detections}/{total_poses}")
            print(
                f"  - Detection rate: {results['quality_metrics']['detection_rate'] * 100:.1f}%"
            )
            if bimanual_transform_computed:
                print("  - Bimanual transform: Computed from calibration data ✓")
            elif T_RIGHT_BASE_TO_LEFT_BASE is not None:
                print("  - Bimanual transform: Loaded from configuration file")

            # Save results
            save_calibration_results(results, self.output_file)
            print(f"\n✓ Results saved to: {self.output_file}")

            # Store calibration result in DB
            try:
                from raiden.db.database import get_db

                get_db().add_calibration_result(results, self.output_file)
                print("✓ Calibration result recorded in DB")
            except Exception as _db_err:
                print(f"  Warning: could not record calibration in DB: {_db_err}")

            # Cleanup
            print("\nCleaning up...")

            if self.robot_controller:
                # Move the arms to the home positions.
                self.robot_controller.move_to_home_positions(simultaneous=True)

                # Stop control threads without moving to home
                # (Robots will stay in their current position - user can move them manually)
                print("  Stopping robot control threads...")
                self.robot_controller.stop_teleoperation()

                # Give threads time to fully terminate
                time.sleep(0.5)

                # Cleanup robot resources
                self.robot_controller.cleanup()

                # Give robot cleanup time to complete before exit
                time.sleep(0.5)
                print("  ✓ Robot controller cleaned up")
                print(
                    "  Note: Arms remain in current position - manually move to home if needed"
                )

            # Close cameras first (quick and safe)
            for camera in self.cameras.values():
                camera.close()
            print("  ✓ Cameras closed")

            print("\n✓ Cleanup complete")

            return results

        except KeyboardInterrupt:
            print("\n\nCalibration interrupted by user")
            self._move_stop_event.set()
            if self.robot_controller and self.robot_controller.has_robots():
                self.robot_controller.emergency_stop()
            raise

        except Exception as e:
            print(f"\nError during calibration: {e}")
            import traceback

            traceback.print_exc()

            self._move_stop_event.set()
            if self.robot_controller and self.robot_controller.has_robots():
                self.robot_controller.emergency_stop()
            raise

collect_calibration_data()

Move through poses and collect calibration data.

Returns:

Type Description
Dict[str, Dict[str, List]]

Dictionary mapping camera names to their calibration data.

Dict[str, Dict[str, List]]

Each value is a dict with keys images, corners, ids,

Dict[str, Dict[str, List]]

and robot_poses (for hand-eye calibration).

Source code in raiden/calibration/runner.py
def collect_calibration_data(
    self,
) -> Dict[str, Dict[str, List]]:
    """Move through poses and collect calibration data.

    Returns:
        Dictionary mapping camera names to their calibration data.
        Each value is a dict with keys ``images``, ``corners``, ``ids``,
        and ``robot_poses`` (for hand-eye calibration).
    """
    print("\nStarting calibration sequence...")
    print("=" * 70)
    print(
        "Coordinate frame: All transforms will be expressed relative to left arm base"
    )
    print()

    # Initialize data storage
    camera_data = {
        name: {
            "images": [],
            "corners": [],
            "ids": [],
            "robot_poses": [],
            "joint_positions": [],  # Store joint positions for bimanual transform computation
            "arm_type": "left"
            if "left" in name
            else ("right" if "right" in name else None),
        }
        for name in self.cameras.keys()
    }

    poses = self.poses_data["poses"]

    for i, pose in enumerate(poses):
        print(f"\nPose {i + 1}/{len(poses)}: {pose['name']}")

        # Move robots to pose (only if we have robots)
        if self.robot_controller:
            threads = []
            if self.robot_controller.follower_r and "follower_r" in pose:
                target_r = np.array(pose["follower_r"])
                threads.append(
                    threading.Thread(
                        target=smooth_move_joints,
                        args=(self.robot_controller.follower_r, target_r),
                        kwargs={"stop_event": self._move_stop_event},
                        daemon=True,
                    )
                )
            if self.robot_controller.follower_l and "follower_l" in pose:
                target_l = np.array(pose["follower_l"])
                threads.append(
                    threading.Thread(
                        target=smooth_move_joints,
                        args=(self.robot_controller.follower_l, target_l),
                        kwargs={"stop_event": self._move_stop_event},
                        daemon=True,
                    )
                )

            if threads:
                print(
                    f"  Moving {'both arms' if len(threads) > 1 else 'arm'} simultaneously..."
                )
                for t in threads:
                    t.start()
                for t in threads:
                    t.join()

            # Wait for stabilization
            print("  Waiting for stabilization...")
            time.sleep(1.0)
        else:
            # No robots - just a short delay for scene camera
            time.sleep(0.5)

        # Capture images and detect board
        print("  Capturing images...")
        for camera_name, camera in self.cameras.items():
            try:
                # Capture frame
                if not camera.grab():
                    raise RuntimeError(f"grab() failed for {camera_name}")
                image = camera.get_frame().color

                # Ensure image is contiguous and has correct dtype
                image = np.ascontiguousarray(image, dtype=np.uint8)

                camera_data[camera_name]["images"].append(image)

                # Detect ChArUco board
                corners, ids = self.calibrator.detector.detect(image)

                if corners is not None and len(corners) > 0:
                    camera_data[camera_name]["corners"].append(corners)
                    camera_data[camera_name]["ids"].append(ids)
                    print(
                        f"    ✓ {camera_name}: ChArUco board detected ({len(corners)} corners)"
                    )

                    # For wrist cameras, store robot pose for hand-eye calibration
                    if "wrist" in camera_name and self.robot_controller:
                        # Get corresponding robot and actual joint positions
                        robot = None
                        if (
                            "right" in camera_name
                            and self.robot_controller.follower_r
                        ):
                            robot = self.robot_controller.follower_r
                        elif (
                            "left" in camera_name
                            and self.robot_controller.follower_l
                        ):
                            robot = self.robot_controller.follower_l
                        else:
                            continue

                        # Read actual joint positions from the robot
                        try:
                            actual_joint_pos = robot.get_joint_pos()

                            # Store actual joint positions for later use
                            camera_data[camera_name]["joint_positions"].append(
                                actual_joint_pos
                            )

                            # Compute forward kinematics from actual joint positions
                            # For initial data collection, compute FK in local frames
                            # (we'll recompute after getting bimanual transform)
                            global _kinematics_cache
                            if _kinematics_cache is None:
                                _kinematics_cache = Kinematics(
                                    get_yam_4310_linear_xml_path(),
                                    site_name="grasp_site",
                                )

                            robot_pose = _kinematics_cache.fk(actual_joint_pos[:6])
                            camera_data[camera_name]["robot_poses"].append(
                                robot_pose
                            )
                        except Exception as e:
                            print(
                                f"    Warning: Failed to get actual robot pose: {e}"
                            )
                            # Store None as placeholder
                            camera_data[camera_name]["robot_poses"].append(None)
                else:
                    print(f"    ✗ {camera_name}: Board not detected")

            except Exception as e:
                print(f"    ✗ {camera_name}: Error - {e}")

    print("\n" + "=" * 70)
    print("Data collection complete!")
    return camera_data

initialize_cameras(camera_names, warmup_frames=10)

Initialize cameras by name using the camera factory.

Source code in raiden/calibration/runner.py
def initialize_cameras(self, camera_names: List[str], warmup_frames: int = 10):
    """Initialize cameras by name using the camera factory."""
    print("\nInitializing cameras...")

    for name in camera_names:
        cam_type = self.camera_config.get_camera_type(name)
        serial = self.camera_config.get_serial_by_name(name)
        if serial is None:
            raise ValueError(f"Camera '{name}' not found in config")

        print(f"  - Initializing {name} ({cam_type}, serial: {serial})...")
        camera = self.camera_config.create_camera(name)
        camera.open()
        self.cameras[name] = camera

    # Warm up all cameras together after all are open, so each one has had
    # time to stabilize regardless of how many cameras are connected.
    print(f"  Warming up cameras ({warmup_frames} frames)...")
    for _ in range(warmup_frames):
        for camera in self.cameras.values():
            camera.grab()

    print("✓ Cameras initialized")

initialize_robots(left_wrist, right_wrist)

Initialize robots based on which cameras to calibrate

Source code in raiden/calibration/runner.py
def initialize_robots(self, left_wrist: bool, right_wrist: bool):
    """Initialize robots based on which cameras to calibrate"""
    # Create robot controller (only followers, no leaders needed for calibration)
    self.robot_controller = RobotController(
        use_right_leader=False,
        use_left_leader=False,
        use_right_follower=right_wrist,
        use_left_follower=left_wrist,
    )

    # Check CAN interfaces
    print("\nChecking CAN interfaces...")
    self.robot_controller.check_can_interfaces()

    # Initialize robots
    print("\nInitializing robots...")
    self.robot_controller.initialize_robots(gravity_comp_mode=False)

    # Move to home positions
    self.robot_controller.move_to_home_positions(simultaneous=True)

load_configuration()

Load camera config and calibration poses

Source code in raiden/calibration/runner.py
def load_configuration(self):
    """Load camera config and calibration poses"""
    print("\nLoading configuration...")

    # Load camera config
    self.camera_config = CameraConfig(self.camera_config_file)
    num_cameras = len(self.camera_config.list_cameras())
    print(f"✓ Camera config loaded: {num_cameras} camera(s) configured")

    # Load poses
    self.poses_data = load_calibration_poses(self.poses_file)
    num_poses = len(self.poses_data["poses"])
    print(f"✓ Calibration poses loaded: {num_poses} pose(s)")

    # Load ChArUco config (CLI override takes precedence over poses file)
    if self._charuco_config_override is not None:
        self.board_config = self._charuco_config_override
    else:
        self.board_config = ChArUcoBoardConfig.from_dict(
            self.poses_data["charuco_config"]
        )
    print("✓ ChArUco board config loaded")

    # Create calibrator
    self.calibrator = CameraCalibrator(self.board_config)

run_calibration(left_wrist_camera_id=None, right_wrist_camera_id=None, scene_camera_ids=None)

Run the full calibration workflow

Parameters:

Name Type Description Default
left_wrist_camera_id Optional[str]

Name of left wrist camera (e.g., "left_wrist")

None
right_wrist_camera_id Optional[str]

Name of right wrist camera (e.g., "right_wrist")

None
scene_camera_ids Optional[List[str]]

Names of all scene cameras (multiple allowed)

None

Returns:

Type Description
Dict

Calibration results dictionary

Source code in raiden/calibration/runner.py
 658
 659
 660
 661
 662
 663
 664
 665
 666
 667
 668
 669
 670
 671
 672
 673
 674
 675
 676
 677
 678
 679
 680
 681
 682
 683
 684
 685
 686
 687
 688
 689
 690
 691
 692
 693
 694
 695
 696
 697
 698
 699
 700
 701
 702
 703
 704
 705
 706
 707
 708
 709
 710
 711
 712
 713
 714
 715
 716
 717
 718
 719
 720
 721
 722
 723
 724
 725
 726
 727
 728
 729
 730
 731
 732
 733
 734
 735
 736
 737
 738
 739
 740
 741
 742
 743
 744
 745
 746
 747
 748
 749
 750
 751
 752
 753
 754
 755
 756
 757
 758
 759
 760
 761
 762
 763
 764
 765
 766
 767
 768
 769
 770
 771
 772
 773
 774
 775
 776
 777
 778
 779
 780
 781
 782
 783
 784
 785
 786
 787
 788
 789
 790
 791
 792
 793
 794
 795
 796
 797
 798
 799
 800
 801
 802
 803
 804
 805
 806
 807
 808
 809
 810
 811
 812
 813
 814
 815
 816
 817
 818
 819
 820
 821
 822
 823
 824
 825
 826
 827
 828
 829
 830
 831
 832
 833
 834
 835
 836
 837
 838
 839
 840
 841
 842
 843
 844
 845
 846
 847
 848
 849
 850
 851
 852
 853
 854
 855
 856
 857
 858
 859
 860
 861
 862
 863
 864
 865
 866
 867
 868
 869
 870
 871
 872
 873
 874
 875
 876
 877
 878
 879
 880
 881
 882
 883
 884
 885
 886
 887
 888
 889
 890
 891
 892
 893
 894
 895
 896
 897
 898
 899
 900
 901
 902
 903
 904
 905
 906
 907
 908
 909
 910
 911
 912
 913
 914
 915
 916
 917
 918
 919
 920
 921
 922
 923
 924
 925
 926
 927
 928
 929
 930
 931
 932
 933
 934
 935
 936
 937
 938
 939
 940
 941
 942
 943
 944
 945
 946
 947
 948
 949
 950
 951
 952
 953
 954
 955
 956
 957
 958
 959
 960
 961
 962
 963
 964
 965
 966
 967
 968
 969
 970
 971
 972
 973
 974
 975
 976
 977
 978
 979
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
def run_calibration(
    self,
    left_wrist_camera_id: Optional[str] = None,
    right_wrist_camera_id: Optional[str] = None,
    scene_camera_ids: Optional[List[str]] = None,
) -> Dict:
    """Run the full calibration workflow

    Args:
        left_wrist_camera_id: Name of left wrist camera (e.g., "left_wrist")
        right_wrist_camera_id: Name of right wrist camera (e.g., "right_wrist")
        scene_camera_ids: Names of all scene cameras (multiple allowed)

    Returns:
        Calibration results dictionary
    """
    try:
        # Load configuration
        self.load_configuration()

        # Determine which cameras to calibrate
        target_cameras = []
        calibrate_left = left_wrist_camera_id is not None
        calibrate_right = right_wrist_camera_id is not None
        calibrate_scene = bool(scene_camera_ids)

        if calibrate_right:
            target_cameras.append(right_wrist_camera_id)
        if calibrate_left:
            target_cameras.append(left_wrist_camera_id)
        if calibrate_scene:
            target_cameras.extend(scene_camera_ids)

        # Check bimanual transform requirements
        global T_RIGHT_BASE_TO_LEFT_BASE
        if (
            calibrate_right
            and not calibrate_left
            and T_RIGHT_BASE_TO_LEFT_BASE is None
        ):
            raise ValueError(
                "Cannot calibrate right wrist camera without bimanual transform.\n"
                "Please calibrate both left and right wrist cameras together to compute the transform."
            )

        print("\nCalibration targets:")
        for camera in target_cameras:
            camera_type = "hand-eye" if "wrist" in camera else "scene"
            print(f"  - {camera} ({camera_type})")

        # Initialize hardware
        # Initialize cameras first (quick check before robot initialization)
        self.initialize_cameras(target_cameras)

        # Only initialize robots if we're calibrating wrist cameras
        if calibrate_left or calibrate_right:
            self.initialize_robots(calibrate_left, calibrate_right)
        else:
            print("\nNo wrist cameras selected - skipping robot initialization")

        # Collect data
        camera_data = self.collect_calibration_data()

        # Compute bimanual transform if both arms are calibrated
        bimanual_transform_computed = False
        if calibrate_left and calibrate_right and T_RIGHT_BASE_TO_LEFT_BASE is None:
            bimanual_transform_computed = (
                self._compute_and_apply_bimanual_transform(
                    camera_data, left_wrist_camera_id, right_wrist_camera_id
                )
            )

        # Run calibration for each camera
        print("\nComputing calibrations...")
        print("=" * 70)

        results = {
            "version": "1.0",
            "timestamp": datetime.now().isoformat(),
            "coordinate_frame": "left_arm_base",  # All transforms relative to left arm base
            "charuco_config": self.board_config.__dict__,
            "cameras": {},
            "quality_metrics": {},
        }

        # Add bimanual transform if computed
        if bimanual_transform_computed and T_RIGHT_BASE_TO_LEFT_BASE is not None:
            results["bimanual_transform"] = {
                "right_base_to_left_base": T_RIGHT_BASE_TO_LEFT_BASE.tolist(),
                "computed_from_calibration": True,
                "description": "Transform from right arm base frame to left arm base frame",
            }
        elif T_RIGHT_BASE_TO_LEFT_BASE is not None:
            results["bimanual_transform"] = {
                "right_base_to_left_base": T_RIGHT_BASE_TO_LEFT_BASE.tolist(),
                "computed_from_calibration": False,
                "description": "Transform from right arm base frame to left arm base frame (from config)",
            }

        # Process cameras in order: left wrist first (needed for scene camera conversion), then others
        # This ensures left wrist hand-eye calibration is available for scene camera conversion
        cameras_to_process = []
        if left_wrist_camera_id and left_wrist_camera_id in target_cameras:
            cameras_to_process.append(left_wrist_camera_id)
        for camera_name in target_cameras:
            if camera_name != left_wrist_camera_id:
                cameras_to_process.append(camera_name)

        for camera_name in cameras_to_process:
            print(f"\n{camera_name}:")
            data = camera_data[camera_name]

            if len(data["corners"]) < 3:
                print(
                    f"  ✗ Insufficient data ({len(data['corners'])} views, need >= 3)"
                )
                continue

            # Get factory-calibrated intrinsics from the camera SDK
            cam_type = self.camera_config.get_camera_type(camera_name) or "unknown"
            print(
                f"  Getting intrinsics from {cam_type} SDK (factory calibration)..."
            )
            camera = self.cameras.get(camera_name)
            if not camera:
                print("  ✗ Camera object not found")
                continue

            cam_matrix, dist_coeffs, image_size = camera.get_intrinsics()
            print(
                f"    ✓ fx={cam_matrix[0, 0]:.1f}, fy={cam_matrix[1, 1]:.1f}, cx={cam_matrix[0, 2]:.1f}, cy={cam_matrix[1, 2]:.1f}"
            )

            intrinsics_result = {
                "success": True,
                "camera_matrix": cam_matrix.tolist(),
                "distortion_coeffs": dist_coeffs.tolist(),
            }

            # Store intrinsics
            camera_result = {
                "type": "hand_eye" if "wrist" in camera_name else "scene",
                "serial_number": self.camera_config.get_serial_by_name(camera_name),
                "intrinsics": {
                    "camera_matrix": intrinsics_result["camera_matrix"],
                    "distortion_coeffs": intrinsics_result["distortion_coeffs"],
                    "image_size": list(image_size),
                    "source": f"{cam_type}_sdk_factory_calibration",
                },
                "num_poses_used": len(data["corners"]),
            }

            # Perform extrinsics calibration
            if "wrist" in camera_name:
                # Hand-eye calibration
                print("  Computing hand-eye calibration...")

                # Check if we have robot poses
                if not data["robot_poses"] or data["robot_poses"][0] is None:
                    print(
                        "  ✗ Forward kinematics not implemented - skipping hand-eye calibration"
                    )
                else:
                    # Estimate camera poses (board-to-camera)
                    camera_matrix = np.array(intrinsics_result["camera_matrix"])
                    dist_coeffs = np.array(intrinsics_result["distortion_coeffs"])
                    camera_poses = []

                    for corners, ids in zip(data["corners"], data["ids"]):
                        rvec, tvec = self.calibrator.detector.estimate_pose(
                            corners, ids, camera_matrix, dist_coeffs
                        )
                        if rvec is not None:
                            camera_poses.append((rvec, tvec))

                    # Run hand-eye calibration
                    hand_eye_result = self.calibrator.calibrate_hand_eye(
                        data["robot_poses"], camera_poses
                    )

                    if hand_eye_result["success"]:
                        camera_result["hand_eye_calibration"] = hand_eye_result
                        print(
                            f"    ✓ Hand-eye calibration complete (method: {hand_eye_result['method']})"
                        )
                    else:
                        print(
                            f"  ✗ Hand-eye calibration failed: {hand_eye_result.get('error', 'Unknown error')}"
                        )

            else:
                # Scene camera calibration
                print("  Computing scene camera extrinsics...")

                # Estimate camera poses (T_board_to_scene_cam)
                camera_matrix = np.array(intrinsics_result["camera_matrix"])
                dist_coeffs = np.array(intrinsics_result["distortion_coeffs"])
                camera_poses = []

                for corners, ids in zip(data["corners"], data["ids"]):
                    rvec, tvec = self.calibrator.detector.estimate_pose(
                        corners, ids, camera_matrix, dist_coeffs
                    )
                    if rvec is not None:
                        camera_poses.append((rvec, tvec))

                # Calibrate scene camera relative to board frame first
                scene_result = self.calibrator.calibrate_scene_camera(camera_poses)

                if scene_result["success"]:
                    # Now convert to left arm base frame if we have left wrist camera data
                    # T_scene_cam_to_left_base = T_scene_cam_to_board @ T_board_to_left_base
                    # We can get T_board_to_left_base from the left wrist camera observations

                    if calibrate_left and left_wrist_camera_id:
                        left_data = camera_data.get(left_wrist_camera_id)
                        if left_data and "hand_eye_calibration" in results[
                            "cameras"
                        ].get(left_wrist_camera_id, {}):
                            print(
                                "    Converting scene camera pose to left arm base frame..."
                            )

                            # Get left hand-eye calibration (T_left_cam_to_left_ee)
                            left_hand_eye = results["cameras"][
                                left_wrist_camera_id
                            ]["hand_eye_calibration"]
                            T_left_cam_to_left_ee = np.eye(4)
                            T_left_cam_to_left_ee[:3, :3] = np.array(
                                left_hand_eye["rotation_matrix"]
                            )
                            T_left_cam_to_left_ee[:3, 3] = np.array(
                                left_hand_eye["translation_vector"]
                            )

                            # Get left intrinsics
                            left_camera = self.cameras.get(left_wrist_camera_id)
                            left_cam_matrix, left_dist_coeffs, _ = (
                                left_camera.get_intrinsics()
                            )

                            # Estimate board poses from left camera
                            left_board_poses = []
                            for corners, ids in zip(
                                left_data["corners"], left_data["ids"]
                            ):
                                rvec, tvec = self.calibrator.detector.estimate_pose(
                                    corners, ids, left_cam_matrix, left_dist_coeffs
                                )
                                if rvec is not None:
                                    left_board_poses.append((rvec, tvec))

                            if left_board_poses and left_data["robot_poses"]:
                                # Compute average T_board_to_left_base from left wrist observations
                                print(
                                    f"      Computing T_board_to_left_base from {len(left_board_poses)} left wrist observations..."
                                )
                                board_to_left_base_transforms = []
                                for i, (rvec, tvec) in enumerate(left_board_poses):
                                    if (
                                        i >= len(left_data["robot_poses"])
                                        or left_data["robot_poses"][i] is None
                                    ):
                                        continue

                                    # T_board_to_left_cam
                                    R, _ = cv2.Rodrigues(rvec)
                                    T_board_to_left_cam = np.eye(4)
                                    T_board_to_left_cam[:3, :3] = R
                                    T_board_to_left_cam[:3, 3] = tvec.flatten()

                                    # Correct chain: T^base_board = FK @ hand_eye @ ChArUco
                                    T_left_base_to_left_ee = left_data[
                                        "robot_poses"
                                    ][i]

                                    T_board_to_left_base = (
                                        T_left_base_to_left_ee
                                        @ T_left_cam_to_left_ee
                                        @ T_board_to_left_cam
                                    )

                                    board_to_left_base_transforms.append(
                                        T_board_to_left_base
                                    )

                                if board_to_left_base_transforms:
                                    # Average the transforms
                                    from scipy.spatial.transform import Rotation

                                    rotations = [
                                        Rotation.from_matrix(T[:3, :3])
                                        for T in board_to_left_base_transforms
                                    ]
                                    translations = [
                                        T[:3, 3]
                                        for T in board_to_left_base_transforms
                                    ]

                                    avg_rotation = Rotation.concatenate(
                                        rotations
                                    ).mean()
                                    avg_translation = np.mean(translations, axis=0)

                                    T_board_to_left_base = np.eye(4)
                                    T_board_to_left_base[:3, :3] = (
                                        avg_rotation.as_matrix()
                                    )
                                    T_board_to_left_base[:3, 3] = avg_translation

                                    # Now compute T_scene_cam_to_left_base
                                    T_scene_cam_to_board = np.eye(4)
                                    T_scene_cam_to_board[:3, :3] = np.array(
                                        scene_result["rotation_matrix"]
                                    )
                                    T_scene_cam_to_board[:3, 3] = np.array(
                                        scene_result["translation_vector"]
                                    )

                                    # T_board_to_left_base is T^base_board (transforms board->base)
                                    # T_scene_cam_to_board is T^board_cam (transforms cam->board, from calibrate_scene_camera)
                                    # T^base_cam = T^base_board @ T^board_cam
                                    T_left_base_to_scene_cam = (
                                        T_board_to_left_base @ T_scene_cam_to_board
                                    )

                                    scene_cam_pos_in_left_base = (
                                        T_left_base_to_scene_cam[:3, 3]
                                    )
                                    print(
                                        f"      Scene camera position in left base: {scene_cam_pos_in_left_base}"
                                    )

                                    # Store T^base_cam: scene camera pose in left base frame
                                    # translation_vector = scene camera position in left base frame
                                    scene_result["rotation_matrix"] = (
                                        T_left_base_to_scene_cam[:3, :3].tolist()
                                    )
                                    scene_result["translation_vector"] = (
                                        T_left_base_to_scene_cam[:3, 3].tolist()
                                    )
                                    rvec_new, _ = cv2.Rodrigues(
                                        T_left_base_to_scene_cam[:3, :3]
                                    )
                                    scene_result["rotation_vector"] = (
                                        rvec_new.flatten().tolist()
                                    )
                                    scene_result["reference_frame"] = (
                                        "left_arm_base"
                                    )

                                    print(
                                        "      ✓ Scene camera pose converted to left arm base frame"
                                    )
                                else:
                                    print(
                                        "      ! Warning: Could not convert to left arm base frame (no valid board observations)"
                                    )
                        else:
                            print(
                                "      ! Warning: Cannot convert to left arm base frame (left wrist not calibrated)"
                            )
                    else:
                        print(
                            "      ! Warning: Scene camera extrinsics are in board frame (left wrist not calibrated)"
                        )

                    camera_result["extrinsics"] = scene_result
                    print("    ✓ Scene camera calibration complete")
                else:
                    print("  ✗ Scene camera calibration failed")

            results["cameras"][camera_name] = camera_result

        # Compute quality metrics
        total_poses = len(self.poses_data["poses"])
        poses_with_detections = max(
            len(data["corners"]) for data in camera_data.values()
        )

        results["quality_metrics"] = {
            "overall_success": len(results["cameras"]) > 0,
            "poses_collected": total_poses,
            "poses_with_detections": poses_with_detections,
            "detection_rate": poses_with_detections / total_poses
            if total_poses > 0
            else 0,
        }

        print("\n" + "=" * 70)
        print("Calibration complete!")
        print("\nSummary:")
        print(f"  - Cameras calibrated: {len(results['cameras'])}")
        print(f"  - Poses used: {poses_with_detections}/{total_poses}")
        print(
            f"  - Detection rate: {results['quality_metrics']['detection_rate'] * 100:.1f}%"
        )
        if bimanual_transform_computed:
            print("  - Bimanual transform: Computed from calibration data ✓")
        elif T_RIGHT_BASE_TO_LEFT_BASE is not None:
            print("  - Bimanual transform: Loaded from configuration file")

        # Save results
        save_calibration_results(results, self.output_file)
        print(f"\n✓ Results saved to: {self.output_file}")

        # Store calibration result in DB
        try:
            from raiden.db.database import get_db

            get_db().add_calibration_result(results, self.output_file)
            print("✓ Calibration result recorded in DB")
        except Exception as _db_err:
            print(f"  Warning: could not record calibration in DB: {_db_err}")

        # Cleanup
        print("\nCleaning up...")

        if self.robot_controller:
            # Move the arms to the home positions.
            self.robot_controller.move_to_home_positions(simultaneous=True)

            # Stop control threads without moving to home
            # (Robots will stay in their current position - user can move them manually)
            print("  Stopping robot control threads...")
            self.robot_controller.stop_teleoperation()

            # Give threads time to fully terminate
            time.sleep(0.5)

            # Cleanup robot resources
            self.robot_controller.cleanup()

            # Give robot cleanup time to complete before exit
            time.sleep(0.5)
            print("  ✓ Robot controller cleaned up")
            print(
                "  Note: Arms remain in current position - manually move to home if needed"
            )

        # Close cameras first (quick and safe)
        for camera in self.cameras.values():
            camera.close()
        print("  ✓ Cameras closed")

        print("\n✓ Cleanup complete")

        return results

    except KeyboardInterrupt:
        print("\n\nCalibration interrupted by user")
        self._move_stop_event.set()
        if self.robot_controller and self.robot_controller.has_robots():
            self.robot_controller.emergency_stop()
        raise

    except Exception as e:
        print(f"\nError during calibration: {e}")
        import traceback

        traceback.print_exc()

        self._move_stop_event.set()
        if self.robot_controller and self.robot_controller.has_robots():
            self.robot_controller.emergency_stop()
        raise

compute_bimanual_base_transform_from_calibration(left_arm_poses, right_arm_poses, left_board_poses, right_board_poses, left_hand_eye_result, right_hand_eye_result)

Compute transform from right arm base to left arm base after hand-eye calibrations

Uses hand-eye calibration results for both arms: 1. For each pose, compute board position from left arm: left_base -> left_ee -> left_cam -> board 2. For each pose, compute board position from right arm: right_base -> right_ee -> right_cam -> board 3. Since board is fixed, both should point to same location in world 4. Compute offset between bases and average across poses

Parameters:

Name Type Description Default
left_arm_poses List[ndarray]

List of T_left_base_to_left_ee transforms

required
right_arm_poses List[ndarray]

List of T_right_base_to_right_ee transforms (in right base frame)

required
left_board_poses List[Tuple[ndarray, ndarray]]

List of (rvec, tvec) board observations from left camera

required
right_board_poses List[Tuple[ndarray, ndarray]]

List of (rvec, tvec) board observations from right camera

required
left_hand_eye_result Dict

Hand-eye calibration result for left wrist (T_left_cam_to_left_ee)

required
right_hand_eye_result Dict

Hand-eye calibration result for right wrist (T_right_cam_to_right_ee)

required

Returns:

Name Type Description
T_right_base_to_left_base ndarray

4x4 transformation matrix

Source code in raiden/calibration/runner.py
def compute_bimanual_base_transform_from_calibration(
    left_arm_poses: List[np.ndarray],
    right_arm_poses: List[np.ndarray],
    left_board_poses: List[Tuple[np.ndarray, np.ndarray]],
    right_board_poses: List[Tuple[np.ndarray, np.ndarray]],
    left_hand_eye_result: Dict,
    right_hand_eye_result: Dict,
) -> np.ndarray:
    """Compute transform from right arm base to left arm base after hand-eye calibrations

    Uses hand-eye calibration results for both arms:
    1. For each pose, compute board position from left arm: left_base -> left_ee -> left_cam -> board
    2. For each pose, compute board position from right arm: right_base -> right_ee -> right_cam -> board
    3. Since board is fixed, both should point to same location in world
    4. Compute offset between bases and average across poses

    Args:
        left_arm_poses: List of T_left_base_to_left_ee transforms
        right_arm_poses: List of T_right_base_to_right_ee transforms (in right base frame)
        left_board_poses: List of (rvec, tvec) board observations from left camera
        right_board_poses: List of (rvec, tvec) board observations from right camera
        left_hand_eye_result: Hand-eye calibration result for left wrist (T_left_cam_to_left_ee)
        right_hand_eye_result: Hand-eye calibration result for right wrist (T_right_cam_to_right_ee)

    Returns:
        T_right_base_to_left_base: 4x4 transformation matrix
    """
    if len(left_arm_poses) < 1 or len(right_arm_poses) < 1:
        raise ValueError("Need at least 1 pose from both arms")

    if len(left_board_poses) < 1 or len(right_board_poses) < 1:
        raise ValueError("Need at least 1 board observation from both cameras")

    from scipy.spatial.transform import Rotation

    num_poses = min(
        len(left_arm_poses),
        len(right_arm_poses),
        len(left_board_poses),
        len(right_board_poses),
    )
    print(
        f"    Using hand-eye calibrations to compute bimanual base transform ({num_poses} poses)..."
    )

    # Extract hand-eye transforms (camera to end-effector)
    T_left_cam_to_left_ee = np.eye(4)
    T_left_cam_to_left_ee[:3, :3] = np.array(left_hand_eye_result["rotation_matrix"])
    T_left_cam_to_left_ee[:3, 3] = np.array(left_hand_eye_result["translation_vector"])

    T_right_cam_to_right_ee = np.eye(4)
    T_right_cam_to_right_ee[:3, :3] = np.array(right_hand_eye_result["rotation_matrix"])
    T_right_cam_to_right_ee[:3, 3] = np.array(
        right_hand_eye_result["translation_vector"]
    )

    # Verify rotation matrices are valid
    det_left = np.linalg.det(T_left_cam_to_left_ee[:3, :3])
    det_right = np.linalg.det(T_right_cam_to_right_ee[:3, :3])

    if abs(det_left - 1.0) > 0.01 or abs(det_right - 1.0) > 0.01:
        print(
            f"    Warning: Rotation matrices are not orthogonal (det_left={det_left:.4f}, det_right={det_right:.4f})"
        )

    # For each pose, compute board position from each base frame
    transforms = []

    for i in range(num_poses):
        # Get robot FK poses
        T_left_base_to_left_ee = left_arm_poses[i]
        T_right_base_to_right_ee = right_arm_poses[i]

        # Get board observations (board-to-camera)
        rvec_l, tvec_l = left_board_poses[i]
        R_l, _ = cv2.Rodrigues(rvec_l)
        T_board_to_left_cam = np.eye(4)
        T_board_to_left_cam[:3, :3] = R_l
        T_board_to_left_cam[:3, 3] = tvec_l.flatten()

        rvec_r, tvec_r = right_board_poses[i]
        R_r, _ = cv2.Rodrigues(rvec_r)
        T_board_to_right_cam = np.eye(4)
        T_board_to_right_cam[:3, :3] = R_r
        T_board_to_right_cam[:3, 3] = tvec_r.flatten()

        # Correct chain: T^base_board = T^base_ee @ T^ee_cam @ T^cam_board
        # = FK @ hand_eye_result @ ChArUco_result
        # No inversions needed:
        #   - T_left_base_to_left_ee  = FK               = T^base_ee  (transforms ee->base)
        #   - T_left_cam_to_left_ee   = hand-eye output  = T^ee_cam   (transforms cam->ee)
        #   - T_board_to_left_cam     = ChArUco output   = T^cam_board (transforms board->cam)

        T_left_base_to_board = (
            T_left_base_to_left_ee @ T_left_cam_to_left_ee @ T_board_to_left_cam
        )

        T_right_base_to_board = (
            T_right_base_to_right_ee @ T_right_cam_to_right_ee @ T_board_to_right_cam
        )

        # Since board is fixed: T_left_base_to_board = T_left_base_to_right_base @ T_right_base_to_board
        # Therefore: T_left_base_to_right_base = T_left_base_to_board @ inv(T_right_base_to_board)
        T_left_base_to_right_base = T_left_base_to_board @ np.linalg.inv(
            T_right_base_to_board
        )

        # We want T_right_base_to_left_base = inv(T_left_base_to_right_base)
        T_right_base_to_left_base = np.linalg.inv(T_left_base_to_right_base)

        transforms.append(T_right_base_to_left_base)

    # Average the transforms across all poses
    rotations = [Rotation.from_matrix(T[:3, :3]) for T in transforms]
    translations = [T[:3, 3] for T in transforms]

    avg_rotation = Rotation.concatenate(rotations).mean()
    avg_translation = np.mean(translations, axis=0)

    T_right_base_to_left_base = np.eye(4)
    T_right_base_to_left_base[:3, :3] = avg_rotation.as_matrix()
    T_right_base_to_left_base[:3, 3] = avg_translation

    print(
        f"    Bimanual transform (T_right_base_to_left_base) translation: {T_right_base_to_left_base[:3, 3]}"
    )

    return T_right_base_to_left_base

compute_forward_kinematics(joint_positions, arm='left')

Compute forward kinematics for YAM robot arm using i2rt library

Parameters:

Name Type Description Default
joint_positions ndarray

6 joint positions (without gripper)

required
arm str

Which arm ("left" or "right")

'left'

Returns:

Type Description
ndarray

4x4 transformation matrix from left arm base to end-effector (grasp_site)

ndarray

All transforms are expressed relative to the left arm base frame.

Source code in raiden/calibration/runner.py
def compute_forward_kinematics(
    joint_positions: np.ndarray, arm: str = "left"
) -> np.ndarray:
    """Compute forward kinematics for YAM robot arm using i2rt library

    Args:
        joint_positions: 6 joint positions (without gripper)
        arm: Which arm ("left" or "right")

    Returns:
        4x4 transformation matrix from left arm base to end-effector (grasp_site)
        All transforms are expressed relative to the left arm base frame.
    """
    global _kinematics_cache

    # Initialize kinematics on first call (cached for subsequent calls)
    if _kinematics_cache is None:
        _kinematics_cache = Kinematics(
            get_yam_4310_linear_xml_path(), site_name="grasp_site"
        )

    # Ensure we have 6 DoF (without gripper)
    assert len(joint_positions) == 6, (
        f"Expected 6 joint positions, got {len(joint_positions)}"
    )

    # Compute forward kinematics (returns T_arm_base_to_ee)
    T_arm_base_to_ee = _kinematics_cache.fk(joint_positions)

    # Transform to left arm base frame
    if arm == "right":
        if T_RIGHT_BASE_TO_LEFT_BASE is None:
            raise ValueError(
                "Bimanual transform not available. "
                "Calibrate both left and right wrist cameras together to compute it."
            )

        # For right arm: T_left_base_to_ee = T_left_base_to_right_base @ T_right_base_to_ee
        T_left_base_to_right_base = np.linalg.inv(T_RIGHT_BASE_TO_LEFT_BASE)
        return T_left_base_to_right_base @ T_arm_base_to_ee

    # For left arm, already in the correct frame
    return T_arm_base_to_ee