|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
import io
|
|
import logging
|
|
import logging.handlers
|
|
import os
|
|
import time
|
|
from dataclasses import dataclass
|
|
from pathlib import Path
|
|
from threading import Event
|
|
from typing import Any
|
|
|
|
import torch
|
|
|
|
from lerobot.configs.types import PolicyFeature
|
|
from lerobot.constants import OBS_IMAGES, OBS_STATE
|
|
from lerobot.datasets.utils import build_dataset_frame, hw_to_dataset_features
|
|
|
|
|
|
from lerobot.policies import ACTConfig, DiffusionConfig, PI0Config, SmolVLAConfig, VQBeTConfig
|
|
from lerobot.robots.robot import Robot
|
|
from lerobot.transport import async_inference_pb2
|
|
from lerobot.transport.utils import bytes_buffer_size
|
|
from lerobot.utils.utils import init_logging
|
|
|
|
Action = torch.Tensor
|
|
ActionChunk = torch.Tensor
|
|
|
|
|
|
RawObservation = dict[str, torch.Tensor]
|
|
|
|
|
|
LeRobotObservation = dict[str, torch.Tensor]
|
|
|
|
|
|
Observation = dict[str, torch.Tensor]
|
|
|
|
|
|
def visualize_action_queue_size(action_queue_size: list[int]) -> None:
|
|
import matplotlib.pyplot as plt
|
|
|
|
fig, ax = plt.subplots()
|
|
ax.set_title("Action Queue Size Over Time")
|
|
ax.set_xlabel("Environment steps")
|
|
ax.set_ylabel("Action Queue Size")
|
|
ax.set_ylim(0, max(action_queue_size) * 1.1)
|
|
ax.grid(True, alpha=0.3)
|
|
ax.plot(range(len(action_queue_size)), action_queue_size)
|
|
plt.show()
|
|
|
|
|
|
def validate_robot_cameras_for_policy(
|
|
lerobot_observation_features: dict[str, dict], policy_image_features: dict[str, PolicyFeature]
|
|
) -> None:
|
|
image_keys = list(filter(is_image_key, lerobot_observation_features))
|
|
assert set(image_keys) == set(policy_image_features.keys()), (
|
|
f"Policy image features must match robot cameras! Received {list(policy_image_features.keys())} != {image_keys}"
|
|
)
|
|
|
|
|
|
def map_robot_keys_to_lerobot_features(robot: Robot) -> dict[str, dict]:
|
|
return hw_to_dataset_features(robot.observation_features, "observation", use_video=False)
|
|
|
|
|
|
def is_image_key(k: str) -> bool:
|
|
return k.startswith(OBS_IMAGES)
|
|
|
|
|
|
def resize_robot_observation_image(image: torch.tensor, resize_dims: tuple[int, int, int]) -> torch.tensor:
|
|
assert image.ndim == 3, f"Image must be (C, H, W)! Received {image.shape}"
|
|
|
|
image = image.permute(2, 0, 1)
|
|
dims = (resize_dims[1], resize_dims[2])
|
|
|
|
image_batched = image.unsqueeze(0)
|
|
|
|
resized = torch.nn.functional.interpolate(image_batched, size=dims, mode="bilinear", align_corners=False)
|
|
|
|
return resized.squeeze(0)
|
|
|
|
|
|
def raw_observation_to_observation(
|
|
raw_observation: RawObservation,
|
|
lerobot_features: dict[str, dict],
|
|
policy_image_features: dict[str, PolicyFeature],
|
|
device: str,
|
|
) -> Observation:
|
|
observation = {}
|
|
|
|
observation = prepare_raw_observation(raw_observation, lerobot_features, policy_image_features)
|
|
for k, v in observation.items():
|
|
if isinstance(v, torch.Tensor):
|
|
if "image" in k:
|
|
|
|
observation[k] = prepare_image(v).unsqueeze(0).to(device)
|
|
else:
|
|
observation[k] = v.to(device)
|
|
else:
|
|
observation[k] = v
|
|
|
|
return observation
|
|
|
|
|
|
def prepare_image(image: torch.Tensor) -> torch.Tensor:
|
|
"""Minimal preprocessing to turn int8 images to float32 in [0, 1], and create a memory-contiguous tensor"""
|
|
image = image.type(torch.float32) / 255
|
|
image = image.contiguous()
|
|
|
|
return image
|
|
|
|
|
|
def extract_state_from_raw_observation(
|
|
lerobot_obs: RawObservation,
|
|
) -> torch.Tensor:
|
|
"""Extract the state from a raw observation."""
|
|
state = torch.tensor(lerobot_obs[OBS_STATE])
|
|
|
|
if state.ndim == 1:
|
|
state = state.unsqueeze(0)
|
|
|
|
return state
|
|
|
|
|
|
def extract_images_from_raw_observation(
|
|
lerobot_obs: RawObservation,
|
|
camera_key: str,
|
|
) -> dict[str, torch.Tensor]:
|
|
"""Extract the images from a raw observation."""
|
|
return torch.tensor(lerobot_obs[camera_key])
|
|
|
|
|
|
def make_lerobot_observation(
|
|
robot_obs: RawObservation,
|
|
lerobot_features: dict[str, dict],
|
|
) -> LeRobotObservation:
|
|
"""Make a lerobot observation from a raw observation."""
|
|
return build_dataset_frame(lerobot_features, robot_obs, prefix="observation")
|
|
|
|
|
|
def prepare_raw_observation(
|
|
robot_obs: RawObservation,
|
|
lerobot_features: dict[str, dict],
|
|
policy_image_features: dict[str, PolicyFeature],
|
|
) -> Observation:
|
|
"""Matches keys from the raw robot_obs dict to the keys expected by a given policy (passed as
|
|
policy_image_features)."""
|
|
|
|
|
|
lerobot_obs = make_lerobot_observation(robot_obs, lerobot_features)
|
|
|
|
|
|
image_keys = list(filter(is_image_key, lerobot_obs))
|
|
|
|
state_dict = {OBS_STATE: extract_state_from_raw_observation(lerobot_obs)}
|
|
image_dict = {
|
|
image_k: extract_images_from_raw_observation(lerobot_obs, image_k) for image_k in image_keys
|
|
}
|
|
|
|
|
|
|
|
image_dict = {
|
|
key: resize_robot_observation_image(torch.tensor(lerobot_obs[key]), policy_image_features[key].shape)
|
|
for key in image_keys
|
|
}
|
|
|
|
if "task" in robot_obs:
|
|
state_dict["task"] = robot_obs["task"]
|
|
|
|
return {**state_dict, **image_dict}
|
|
|
|
|
|
def get_logger(name: str, log_to_file: bool = True) -> logging.Logger:
|
|
"""
|
|
Get a logger using the standardized logging setup from utils.py.
|
|
|
|
Args:
|
|
name: Logger name (e.g., 'policy_server', 'robot_client')
|
|
log_to_file: Whether to also log to a file
|
|
|
|
Returns:
|
|
Configured logger instance
|
|
"""
|
|
|
|
if log_to_file:
|
|
os.makedirs("logs", exist_ok=True)
|
|
log_file = Path(f"logs/{name}_{int(time.time())}.log")
|
|
else:
|
|
log_file = None
|
|
|
|
|
|
init_logging(log_file=log_file, display_pid=False)
|
|
|
|
|
|
return logging.getLogger(name)
|
|
|
|
|
|
@dataclass
|
|
class TimedData:
|
|
"""A data object with timestamp and timestep information.
|
|
|
|
Args:
|
|
timestamp: Unix timestamp relative to data's creation.
|
|
data: The actual data to wrap a timestamp around.
|
|
timestep: The timestep of the data.
|
|
"""
|
|
|
|
timestamp: float
|
|
timestep: int
|
|
|
|
def get_timestamp(self):
|
|
return self.timestamp
|
|
|
|
def get_timestep(self):
|
|
return self.timestep
|
|
|
|
|
|
@dataclass
|
|
class TimedAction(TimedData):
|
|
action: Action
|
|
|
|
def get_action(self):
|
|
return self.action
|
|
|
|
|
|
@dataclass
|
|
class TimedObservation(TimedData):
|
|
observation: RawObservation
|
|
must_go: bool = False
|
|
|
|
def get_observation(self):
|
|
return self.observation
|
|
|
|
|
|
@dataclass
|
|
class FPSTracker:
|
|
"""Utility class to track FPS metrics over time."""
|
|
|
|
target_fps: float
|
|
first_timestamp: float = None
|
|
total_obs_count: int = 0
|
|
|
|
def calculate_fps_metrics(self, current_timestamp: float) -> dict[str, float]:
|
|
"""Calculate average FPS vs target"""
|
|
self.total_obs_count += 1
|
|
|
|
|
|
if self.first_timestamp is None:
|
|
self.first_timestamp = current_timestamp
|
|
|
|
|
|
total_duration = current_timestamp - self.first_timestamp
|
|
avg_fps = (self.total_obs_count - 1) / total_duration if total_duration > 1e-6 else 0.0
|
|
|
|
return {"avg_fps": avg_fps, "target_fps": self.target_fps}
|
|
|
|
def reset(self):
|
|
"""Reset the FPS tracker state"""
|
|
self.first_timestamp = None
|
|
self.total_obs_count = 0
|
|
|
|
|
|
@dataclass
|
|
class RemotePolicyConfig:
|
|
policy_type: str
|
|
pretrained_name_or_path: str
|
|
lerobot_features: dict[str, PolicyFeature]
|
|
actions_per_chunk: int
|
|
device: str = "cpu"
|
|
|
|
|
|
def _compare_observation_states(obs1_state: torch.Tensor, obs2_state: torch.Tensor, atol: float) -> bool:
|
|
"""Check if two observation states are similar, under a tolerance threshold"""
|
|
return bool(torch.linalg.norm(obs1_state - obs2_state) < atol)
|
|
|
|
|
|
def observations_similar(
|
|
obs1: TimedObservation, obs2: TimedObservation, lerobot_features: dict[str, dict], atol: float = 1
|
|
) -> bool:
|
|
"""Check if two observations are similar, under a tolerance threshold. Measures distance between
|
|
observations as the difference in joint-space between the two observations.
|
|
|
|
NOTE(fracapuano): This is a very simple check, and it is enough for the current use case.
|
|
An immediate next step is to use (fast) perceptual difference metrics comparing some camera views,
|
|
to surpass this joint-space similarity check.
|
|
"""
|
|
obs1_state = extract_state_from_raw_observation(
|
|
make_lerobot_observation(obs1.get_observation(), lerobot_features)
|
|
)
|
|
obs2_state = extract_state_from_raw_observation(
|
|
make_lerobot_observation(obs2.get_observation(), lerobot_features)
|
|
)
|
|
|
|
return _compare_observation_states(obs1_state, obs2_state, atol=atol)
|
|
|
|
|
|
def send_bytes_in_chunks(
|
|
buffer: bytes,
|
|
message_class: Any,
|
|
log_prefix: str = "",
|
|
silent: bool = True,
|
|
chunk_size: int = 3 * 1024 * 1024,
|
|
):
|
|
|
|
|
|
|
|
buffer = io.BytesIO(buffer)
|
|
size_in_bytes = bytes_buffer_size(buffer)
|
|
|
|
sent_bytes = 0
|
|
|
|
logging_method = logging.info if not silent else logging.debug
|
|
|
|
logging_method(f"{log_prefix} Buffer size {size_in_bytes / 1024 / 1024} MB with")
|
|
|
|
while sent_bytes < size_in_bytes:
|
|
transfer_state = async_inference_pb2.TransferState.TRANSFER_MIDDLE
|
|
|
|
if sent_bytes + chunk_size >= size_in_bytes:
|
|
transfer_state = async_inference_pb2.TransferState.TRANSFER_END
|
|
elif sent_bytes == 0:
|
|
transfer_state = async_inference_pb2.TransferState.TRANSFER_BEGIN
|
|
|
|
size_to_read = min(chunk_size, size_in_bytes - sent_bytes)
|
|
chunk = buffer.read(size_to_read)
|
|
|
|
yield message_class(transfer_state=transfer_state, data=chunk)
|
|
sent_bytes += size_to_read
|
|
logging_method(f"{log_prefix} Sent {sent_bytes}/{size_in_bytes} bytes with state {transfer_state}")
|
|
|
|
logging_method(f"{log_prefix} Published {sent_bytes / 1024 / 1024} MB")
|
|
|
|
|
|
def receive_bytes_in_chunks(
|
|
iterator, continue_receiving: Event, logger: logging.Logger, log_prefix: str = ""
|
|
):
|
|
|
|
|
|
|
|
bytes_buffer = io.BytesIO()
|
|
step = 0
|
|
|
|
logger.info(f"{log_prefix} Starting receiver")
|
|
for item in iterator:
|
|
logger.debug(f"{log_prefix} Received item")
|
|
if not continue_receiving.is_set():
|
|
logger.info(f"{log_prefix} Shutting down receiver")
|
|
return
|
|
|
|
if item.transfer_state == async_inference_pb2.TransferState.TRANSFER_BEGIN:
|
|
bytes_buffer.seek(0)
|
|
bytes_buffer.truncate(0)
|
|
bytes_buffer.write(item.data)
|
|
logger.debug(f"{log_prefix} Received data at step 0")
|
|
|
|
elif item.transfer_state == async_inference_pb2.TransferState.TRANSFER_MIDDLE:
|
|
bytes_buffer.write(item.data)
|
|
step += 1
|
|
logger.debug(f"{log_prefix} Received data at step {step}")
|
|
|
|
elif item.transfer_state == async_inference_pb2.TransferState.TRANSFER_END:
|
|
bytes_buffer.write(item.data)
|
|
logger.debug(f"{log_prefix} Received data at step end size {bytes_buffer_size(bytes_buffer)}")
|
|
|
|
complete_bytes = bytes_buffer.getvalue()
|
|
|
|
bytes_buffer.seek(0)
|
|
bytes_buffer.truncate(0)
|
|
|
|
logger.debug(f"{log_prefix} Queue updated")
|
|
return complete_bytes
|
|
|
|
else:
|
|
logger.warning(f"{log_prefix} Received unknown transfer state {item.transfer_state}")
|
|
raise ValueError(f"Received unknown transfer state {item.transfer_state}")
|
|
|